repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/conv_lstm1d.py | keras/src/layers/rnn/conv_lstm1d.py | from keras.src.api_export import keras_export
from keras.src.layers.rnn.conv_lstm import ConvLSTM
@keras_export("keras.layers.ConvLSTM1D")
class ConvLSTM1D(ConvLSTM):
"""1D Convolutional LSTM.
Similar to an LSTM layer, but the input transformations
and recurrent transformations are both convolutional.
Args:
filters: int, the dimension of the output space (the number of filters
in the convolution).
kernel_size: int or tuple/list of 1 integer, specifying the size of
the convolution window.
strides: int or tuple/list of 1 integer, specifying the stride length
of the convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the
same height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of 1 integers, specifying the dilation
rate to use for dilated convolution.
activation: Activation function to use. By default hyperbolic tangent
activation function is applied (`tanh(x)`).
recurrent_activation: Activation function to use for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean. If `True`, add 1 to the bias of
the forget gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al., 2015](
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state.
seed: Random seed for dropout.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state in addition
to the output. Default: `False`.
go_backwards: Boolean (default: `False`).
If `True`, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If `True`, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default: `False`).
If `True`, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
Call arguments:
inputs: A 4D tensor.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether a
given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode.
This is only relevant if `dropout` or `recurrent_dropout` are set.
Input shape:
- If `data_format="channels_first"`:
4D tensor with shape: `(samples, time, channels, rows)`
- If `data_format="channels_last"`:
4D tensor with shape: `(samples, time, rows, channels)`
Output shape:
- If `return_state`: a list of tensors. The first tensor is the output.
The remaining tensors are the last states,
each 3D tensor with shape: `(samples, filters, new_rows)` if
`data_format='channels_first'`
or shape: `(samples, new_rows, filters)` if
`data_format='channels_last'`.
`rows` values might have changed due to padding.
- If `return_sequences`: 4D tensor with shape: `(samples, timesteps,
filters, new_rows)` if data_format='channels_first'
or shape: `(samples, timesteps, new_rows, filters)` if
`data_format='channels_last'`.
- Else, 3D tensor with shape: `(samples, filters, new_rows)` if
`data_format='channels_first'`
or shape: `(samples, new_rows, filters)` if
`data_format='channels_last'`.
References:
- [Shi et al., 2015](http://arxiv.org/abs/1506.04214v1)
(the current implementation does not include the feedback loop on the
cells output).
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
seed=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
**kwargs,
):
super().__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
seed=seed,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/bidirectional.py | keras/src/layers/rnn/bidirectional.py | import copy
from keras.src import ops
from keras.src import utils
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Bidirectional")
class Bidirectional(Layer):
"""Bidirectional wrapper for RNNs.
Args:
layer: `keras.layers.RNN` instance, such as
`keras.layers.LSTM` or `keras.layers.GRU`.
It could also be a `keras.layers.Layer` instance
that meets the following criteria:
1. Be a sequence-processing layer (accepts 3D+ inputs).
2. Have a `go_backwards`, `return_sequences` and `return_state`
attribute (with the same semantics as for the `RNN` class).
3. Have an `input_spec` attribute.
4. Implement serialization via `get_config()` and `from_config()`.
Note that the recommended way to create new RNN layers is to write a
custom RNN cell and use it with `keras.layers.RNN`, instead of
subclassing `keras.layers.Layer` directly.
When `return_sequences` is `True`, the output of the masked
timestep will be zero regardless of the layer's original
`zero_output_for_mask` value.
merge_mode: Mode by which outputs of the forward and backward RNNs
will be combined. One of `{"sum", "mul", "concat", "ave", None}`.
If `None`, the outputs will not be combined,
they will be returned as a list. Defaults to `"concat"`.
backward_layer: Optional `keras.layers.RNN`,
or `keras.layers.Layer` instance to be used to handle
backwards input processing.
If `backward_layer` is not provided, the layer instance passed
as the `layer` argument will be used to generate the backward layer
automatically.
Note that the provided `backward_layer` layer should have properties
matching those of the `layer` argument, in particular
it should have the same values for `stateful`, `return_states`,
`return_sequences`, etc. In addition, `backward_layer`
and `layer` should have different `go_backwards` argument values.
A `ValueError` will be raised if these requirements are not met.
Call arguments:
The call arguments for this layer are the same as those of the
wrapped RNN layer. Beware that when passing the `initial_state`
argument during the call of this layer, the first half in the
list of elements in the `initial_state` list will be passed to
the forward RNN call and the last half in the list of elements
will be passed to the backward RNN call.
Note: instantiating a `Bidirectional` layer from an existing RNN layer
instance will not reuse the weights state of the RNN layer instance -- the
`Bidirectional` layer will have freshly initialized weights.
Examples:
```python
model = Sequential([
Input(shape=(5, 10)),
Bidirectional(LSTM(10, return_sequences=True),
Bidirectional(LSTM(10)),
Dense(5, activation="softmax"),
])
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# With custom backward layer
forward_layer = LSTM(10, return_sequences=True)
backward_layer = LSTM(10, activation='relu', return_sequences=True,
go_backwards=True)
model = Sequential([
Input(shape=(5, 10)),
Bidirectional(forward_layer, backward_layer=backward_layer),
Dense(5, activation="softmax"),
])
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
```
"""
def __init__(
self,
layer,
merge_mode="concat",
weights=None,
backward_layer=None,
**kwargs,
):
if not isinstance(layer, Layer):
raise ValueError(
"Please initialize `Bidirectional` layer with a "
f"`keras.layers.Layer` instance. Received: {layer}"
)
if backward_layer is not None and not isinstance(backward_layer, Layer):
raise ValueError(
"`backward_layer` need to be a `keras.layers.Layer` "
f"instance. Received: {backward_layer}"
)
if merge_mode not in ["sum", "mul", "ave", "concat", None]:
raise ValueError(
f"Invalid merge mode. Received: {merge_mode}. "
"Merge mode should be one of "
'{"sum", "mul", "ave", "concat", None}'
)
super().__init__(**kwargs)
# Recreate the forward layer from the original layer config, so that it
# will not carry over any state from the layer.
config = serialization_lib.serialize_keras_object(layer)
config["config"]["name"] = (
f"forward_{utils.removeprefix(layer.name, 'forward_')}"
)
self.forward_layer = serialization_lib.deserialize_keras_object(config)
if backward_layer is None:
config = serialization_lib.serialize_keras_object(layer)
config["config"]["go_backwards"] = True
config["config"]["name"] = (
f"backward_{utils.removeprefix(layer.name, 'backward_')}"
)
self.backward_layer = serialization_lib.deserialize_keras_object(
config
)
else:
self.backward_layer = backward_layer
# Keep the use_cudnn attribute if defined (not serialized).
if hasattr(layer, "use_cudnn"):
self.forward_layer.use_cudnn = layer.use_cudnn
self.backward_layer.use_cudnn = layer.use_cudnn
self._verify_layer_config()
def force_zero_output_for_mask(layer):
# Force the zero_output_for_mask to be True if returning sequences.
if getattr(layer, "zero_output_for_mask", None) is not None:
layer.zero_output_for_mask = layer.return_sequences
force_zero_output_for_mask(self.forward_layer)
force_zero_output_for_mask(self.backward_layer)
self.merge_mode = merge_mode
if weights:
nw = len(weights)
self.forward_layer.initial_weights = weights[: nw // 2]
self.backward_layer.initial_weights = weights[nw // 2 :]
self.stateful = layer.stateful
self.return_sequences = layer.return_sequences
self.return_state = layer.return_state
self.supports_masking = True
self.input_spec = layer.input_spec
def _verify_layer_config(self):
"""Ensure the forward and backward layers have valid common property."""
if self.forward_layer.go_backwards == self.backward_layer.go_backwards:
raise ValueError(
"Forward layer and backward layer should have different "
"`go_backwards` value. Received: "
"forward_layer.go_backwards "
f"{self.forward_layer.go_backwards}, "
"backward_layer.go_backwards="
f"{self.backward_layer.go_backwards}"
)
common_attributes = ("stateful", "return_sequences", "return_state")
for a in common_attributes:
forward_value = getattr(self.forward_layer, a)
backward_value = getattr(self.backward_layer, a)
if forward_value != backward_value:
raise ValueError(
"Forward layer and backward layer are expected to have "
f'the same value for attribute "{a}", got '
f'"{forward_value}" for forward layer and '
f'"{backward_value}" for backward layer'
)
def compute_output_shape(self, sequences_shape, initial_state_shape=None):
output_shape = self.forward_layer.compute_output_shape(sequences_shape)
if self.return_state:
output_shape, state_shape = output_shape[0], output_shape[1:]
if self.merge_mode == "concat":
output_shape = list(output_shape)
output_shape[-1] *= 2
output_shape = tuple(output_shape)
elif self.merge_mode is None:
output_shape = [output_shape, output_shape]
if self.return_state:
if self.merge_mode is None:
return tuple(output_shape) + state_shape + state_shape
return tuple([output_shape]) + (state_shape) + (state_shape)
return tuple(output_shape)
def call(
self,
sequences,
initial_state=None,
mask=None,
training=None,
):
kwargs = {}
if self.forward_layer._call_has_training_arg:
kwargs["training"] = training
if self.forward_layer._call_has_mask_arg:
kwargs["mask"] = mask
if initial_state is not None:
# initial_states are not keras tensors, eg eager tensor from np
# array. They are only passed in from kwarg initial_state, and
# should be passed to forward/backward layer via kwarg
# initial_state as well.
forward_inputs, backward_inputs = sequences, sequences
half = len(initial_state) // 2
forward_state = initial_state[:half]
backward_state = initial_state[half:]
else:
forward_inputs, backward_inputs = sequences, sequences
forward_state, backward_state = None, None
y = self.forward_layer(
forward_inputs, initial_state=forward_state, **kwargs
)
y_rev = self.backward_layer(
backward_inputs, initial_state=backward_state, **kwargs
)
if self.return_state:
states = tuple(y[1:] + y_rev[1:])
y = y[0]
y_rev = y_rev[0]
y = ops.cast(y, self.compute_dtype)
y_rev = ops.cast(y_rev, self.compute_dtype)
if self.return_sequences:
y_rev = ops.flip(y_rev, axis=1)
if self.merge_mode == "concat":
output = ops.concatenate([y, y_rev], axis=-1)
elif self.merge_mode == "sum":
output = y + y_rev
elif self.merge_mode == "ave":
output = (y + y_rev) / 2
elif self.merge_mode == "mul":
output = y * y_rev
elif self.merge_mode is None:
output = (y, y_rev)
else:
raise ValueError(
"Unrecognized value for `merge_mode`. "
f"Received: {self.merge_mode}"
'Expected one of {"concat", "sum", "ave", "mul"}.'
)
if self.return_state:
if self.merge_mode is None:
return output + states
return (output,) + states
return output
def reset_states(self):
# Compatibility alias.
self.reset_state()
def reset_state(self):
if not self.stateful:
raise AttributeError("Layer must be stateful.")
self.forward_layer.reset_state()
self.backward_layer.reset_state()
@property
def states(self):
if self.forward_layer.states and self.backward_layer.states:
return tuple(self.forward_layer.states + self.backward_layer.states)
return None
def build(self, sequences_shape, initial_state_shape=None):
if not self.forward_layer.built:
self.forward_layer.build(sequences_shape)
if not self.backward_layer.built:
self.backward_layer.build(sequences_shape)
def compute_mask(self, _, mask):
if isinstance(mask, list):
mask = mask[0]
if self.return_sequences:
if not self.merge_mode:
output_mask = (mask, mask)
else:
output_mask = mask
else:
output_mask = (None, None) if not self.merge_mode else None
if self.return_state and self.states is not None:
state_mask = (None for _ in self.states)
if isinstance(output_mask, list):
return output_mask + state_mask * 2
return (output_mask,) + state_mask * 2
return output_mask
def get_config(self):
config = {"merge_mode": self.merge_mode}
config["layer"] = serialization_lib.serialize_keras_object(
self.forward_layer
)
config["backward_layer"] = serialization_lib.serialize_keras_object(
self.backward_layer
)
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config, custom_objects=None):
# Instead of updating the input, create a copy and use that.
config = copy.deepcopy(config)
config["layer"] = serialization_lib.deserialize_keras_object(
config["layer"], custom_objects=custom_objects
)
# Handle (optional) backward layer instantiation.
backward_layer_config = config.pop("backward_layer", None)
if backward_layer_config is not None:
backward_layer = serialization_lib.deserialize_keras_object(
backward_layer_config, custom_objects=custom_objects
)
config["backward_layer"] = backward_layer
# Instantiate the wrapper, adjust it and return it.
layer = cls(**config)
return layer
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/rnn.py | keras/src/layers/rnn/rnn.py | from keras.src import backend
from keras.src import ops
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell
from keras.src.layers.rnn.stacked_rnn_cells import StackedRNNCells
from keras.src.saving import serialization_lib
from keras.src.utils import tracking
@keras_export("keras.layers.RNN")
class RNN(Layer):
"""Base class for recurrent layers.
Args:
cell: A RNN cell instance or a list of RNN cell instances.
A RNN cell is a class that has:
- A `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- A `state_size` attribute. This can be a single integer
(single state) in which case it is the size of the recurrent
state. This can also be a list/tuple of integers
(one size per state).
- A `output_size` attribute, a single integer.
- A `get_initial_state(batch_size=None)`
method that creates a tensor meant to be fed to `call()` as the
initial state, if the user didn't specify any initial state
via other means. The returned initial state should have
shape `(batch_size, cell.state_size)`.
The cell might choose to create a tensor full of zeros,
or other values based on the cell's implementation.
`inputs` is the input tensor to the RNN layer, with shape
`(batch_size, timesteps, features)`.
If this method is not implemented
by the cell, the RNN layer will create a zero filled tensor
with shape `(batch_size, cell.state_size)`.
In the case that `cell` is a list of RNN cell instances, the cells
will be stacked on top of each other in the RNN, resulting in an
efficient stacked RNN.
return_sequences: Boolean (default `False`). Whether to return the last
output in the output sequence, or the full sequence.
return_state: Boolean (default `False`).
Whether to return the last state in addition to the output.
go_backwards: Boolean (default `False`).
If `True`, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default `False`). If True, the last state
for each sample at index `i` in a batch will be used as initial
state for the sample of index `i` in the following batch.
unroll: Boolean (default `False`).
If True, the network will be unrolled, else a symbolic loop will be
used. Unrolling can speed-up a RNN, although it tends to be more
memory-intensive. Unrolling is only suitable for short sequences.
zero_output_for_mask: Boolean (default `False`).
Whether the output should use zeros for the masked timesteps.
Note that this field is only used when `return_sequences`
is `True` and `mask` is provided.
It can useful if you want to reuse the raw output sequence of
the RNN without interference from the masked timesteps, e.g.,
merging bidirectional RNNs.
Call arguments:
sequences: A 3-D tensor with shape `(batch_size, timesteps, features)`.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
mask: Binary tensor of shape `[batch_size, timesteps]`
indicating whether a given timestep should be masked.
An individual `True` entry indicates that the corresponding
timestep should be utilized, while a `False` entry indicates
that the corresponding timestep should be ignored.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed
to the cell when calling it.
This is for use with cells that use dropout.
Output shape:
- If `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, state_size)`, where `state_size` could
be a high dimension tensor shape.
- If `return_sequences`: 3D tensor with shape
`(batch_size, timesteps, output_size)`.
Masking:
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use a `keras.layers.Embedding` layer with the `mask_zero` parameter
set to `True`.
Note on using statefulness in RNNs:
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- Specify `stateful=True` in the layer constructor.
- Specify a fixed batch size for your model, by passing
`batch_size=...` to the `Input` layer(s) of your model.
Remember to also specify the same `batch_size=...` when
calling `fit()`, or otherwise use a generator-like
data source like a `keras.utils.PyDataset` or a
`tf.data.Dataset`.
- Specify `shuffle=False` when calling `fit()`, since your
batches are expected to be temporally ordered.
To reset the states of your model, call `.reset_state()` on either
a specific layer, or on your entire model.
Note on specifying the initial state of RNNs:
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_state()` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
Examples:
```python
from keras.layers import RNN
from keras import ops
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.Layer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.units = units
self.state_size = units
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
def call(self, inputs, states):
prev_output = states[0]
h = ops.matmul(inputs, self.kernel)
output = h + ops.matmul(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
"""
def __init__(
self,
cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
zero_output_for_mask=False,
**kwargs,
):
if isinstance(cell, (list, tuple)):
cell = StackedRNNCells(cell)
if "call" not in dir(cell):
raise ValueError(
"Argument `cell` should have a `call` method. "
f"Received: cell={cell}"
)
if "state_size" not in dir(cell):
raise ValueError(
"The RNN cell should have a `state_size` attribute "
"(single integer or list of integers, "
"one integer per RNN state). "
f"Received: cell={cell}"
)
super().__init__(**kwargs)
# If True, the output for masked timestep will be zeros, whereas in the
# False case, output from previous timestep is returned for masked
# timestep.
self.zero_output_for_mask = zero_output_for_mask
self.cell = cell
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.supports_masking = True
self.input_spec = None
self.states = None
self._expected_batch_size = None
state_size = getattr(self.cell, "state_size", None)
if state_size is None:
raise ValueError(
"state_size must be specified as property on the RNN cell."
)
if not isinstance(state_size, (list, tuple, int)):
raise ValueError(
"state_size must be an integer, or a list/tuple of integers "
"(one for each state tensor)."
)
if isinstance(state_size, int):
self.state_size = [state_size]
self.single_state = True
else:
self.state_size = list(state_size)
self.single_state = False
def compute_output_shape(self, sequences_shape, initial_state_shape=None):
batch_size = sequences_shape[0]
length = sequences_shape[1]
states_shape = []
for state_size in self.state_size:
if isinstance(state_size, int):
states_shape.append((batch_size, state_size))
elif isinstance(state_size, (list, tuple)):
states_shape.append([(batch_size, s) for s in state_size])
output_size = getattr(self.cell, "output_size", None)
if output_size is None:
output_size = self.state_size[0]
if not isinstance(output_size, int):
raise ValueError("output_size must be an integer.")
if self.return_sequences:
output_shape = (batch_size, length, output_size)
else:
output_shape = (batch_size, output_size)
if self.return_state:
return output_shape, *states_shape
return output_shape
def compute_mask(self, _, mask):
# Time step masks must be the same for each input.
# This is because the mask for an RNN is of size [batch, time_steps, 1],
# and specifies which time steps should be skipped, and a time step
# must be skipped for all inputs.
mask = tree.flatten(mask)[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.state_size]
return [output_mask] + state_mask
else:
return output_mask
def build(self, sequences_shape, initial_state_shape=None):
# Build cell (if layer).
step_input_shape = (sequences_shape[0],) + tuple(sequences_shape[2:])
if isinstance(self.cell, Layer) and not self.cell.built:
self.cell.build(step_input_shape)
self.cell.built = True
if self.stateful:
if self.states is not None:
self.reset_state()
else:
if sequences_shape[0] is None:
raise ValueError(
"When using `stateful=True` in a RNN, the "
"batch size must be static. Found dynamic "
f"batch size: sequence.shape={sequences_shape}"
)
self._create_state_variables(sequences_shape[0])
self._expected_batch_size = ops.shape(
tree.flatten(self.states)[0]
)[0]
@tracking.no_automatic_dependency_tracking
def _create_state_variables(self, batch_size):
with backend.name_scope(self.name, caller=self):
self.states = tree.map_structure(
lambda value: backend.Variable(
value,
trainable=False,
dtype=self.variable_dtype,
name="rnn_state",
),
self.get_initial_state(batch_size),
)
def get_initial_state(self, batch_size):
get_initial_state_fn = getattr(self.cell, "get_initial_state", None)
if get_initial_state_fn:
init_state = get_initial_state_fn(batch_size=batch_size)
else:
return [
ops.zeros((batch_size, d), dtype=self.cell.compute_dtype)
for d in self.state_size
]
# RNN expect the states in a list, even if single state.
if not tree.is_nested(init_state):
init_state = [init_state]
# Force the state to be a list in case it is a namedtuple eg
# LSTMStateTuple.
return list(init_state)
def reset_states(self):
# Compatibility alias.
self.reset_state()
def reset_state(self):
if self.states is not None:
for v in self.states:
v.assign(ops.zeros_like(v.value))
def inner_loop(self, sequences, initial_state, mask, training=False):
cell_kwargs = {}
if isinstance(self.cell, Layer) and self.cell._call_has_training_arg:
cell_kwargs["training"] = training
def step(inputs, states):
# Create new tensor copies when using PyTorch backend
# with stateful=True. This prevents in-place modifications
# that would otherwise break PyTorch's autograd functionality
# by modifying tensors needed for gradient computation.
if backend.backend() == "torch" and self.stateful:
states = tree.map_structure(ops.copy, states)
output, new_states = self.cell(inputs, states, **cell_kwargs)
if not tree.is_nested(new_states):
new_states = [new_states]
return output, new_states
if not tree.is_nested(initial_state):
initial_state = [initial_state]
return backend.rnn(
step,
sequences,
initial_state,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=sequences.shape[1],
zero_output_for_mask=self.zero_output_for_mask,
return_all_outputs=self.return_sequences,
)
def call(
self,
sequences,
initial_state=None,
mask=None,
training=False,
):
timesteps = sequences.shape[1]
if self.unroll and timesteps is None:
raise ValueError(
"Cannot unroll a RNN if the "
"time dimension is undefined. \n"
"- If using a Sequential model, "
"specify the time dimension by passing "
"an `Input()` as your first layer.\n"
"- If using the functional API, specify "
"the time dimension by passing a `shape` "
"or `batch_shape` argument to your `Input()`."
)
if initial_state is None:
if self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(
batch_size=ops.shape(sequences)[0]
)
if self.stateful:
actual_batch_size = sequences.shape[0]
if (
self._expected_batch_size is not None
and actual_batch_size is not None
and actual_batch_size != self._expected_batch_size
):
raise ValueError(
f"If an RNN is stateful, the batch size of the "
f"input sequences must be the same as the batch "
f"size of the initial state. \n"
f"- Expected batch size: {self._expected_batch_size}\n"
f"- Received batch size: {actual_batch_size}"
)
# RNN expect the states in a list, even if single state.
if not tree.is_nested(initial_state):
initial_state = [initial_state]
initial_state = list(initial_state)
# Cast states to compute dtype.
# Note that states may be deeply nested
# (e.g. in the stacked cells case).
initial_state = tree.map_structure(
lambda x: backend.convert_to_tensor(
x, dtype=self.cell.compute_dtype
),
initial_state,
)
# Prepopulate the dropout state so that the inner_loop is stateless
# this is particularly important for JAX backend.
self._maybe_config_dropout_masks(
self.cell, sequences[:, 0, :], initial_state
)
last_output, outputs, states = self.inner_loop(
sequences=sequences,
initial_state=initial_state,
mask=mask,
training=training,
)
last_output = ops.cast(last_output, self.compute_dtype)
outputs = ops.cast(outputs, self.compute_dtype)
states = tree.map_structure(
lambda x: ops.cast(x, dtype=self.compute_dtype), states
)
self._maybe_reset_dropout_masks(self.cell)
if self.stateful:
for self_state, state in zip(
tree.flatten(self.states), tree.flatten(states)
):
self_state.assign(state)
if self.return_sequences:
output = outputs
else:
output = last_output
if self.return_state:
return output, *states
return output
def _maybe_config_dropout_masks(self, cell, input_sequence, input_state):
state = (
input_state[0]
if isinstance(input_state, (list, tuple))
else input_state
)
if isinstance(cell, DropoutRNNCell):
cell.get_dropout_mask(input_sequence)
cell.get_recurrent_dropout_mask(state)
if isinstance(cell, StackedRNNCells):
for c, s in zip(cell.cells, input_state):
self._maybe_config_dropout_masks(c, input_sequence, s)
# Replicate the behavior of `StackedRNNCells.call` to compute
# the inputs for the next cell.
s = list(s) if tree.is_nested(s) else [s]
cell_call_fn = c.__call__ if callable(c) else c.call
input_sequence, _ = cell_call_fn(input_sequence, s)
def _maybe_reset_dropout_masks(self, cell):
if isinstance(cell, DropoutRNNCell):
cell.reset_dropout_mask()
cell.reset_recurrent_dropout_mask()
if isinstance(cell, StackedRNNCells):
for c in cell.cells:
self._maybe_reset_dropout_masks(c)
def get_config(self):
config = {
"return_sequences": self.return_sequences,
"return_state": self.return_state,
"go_backwards": self.go_backwards,
"stateful": self.stateful,
"unroll": self.unroll,
"zero_output_for_mask": self.zero_output_for_mask,
}
config["cell"] = serialization_lib.serialize_keras_object(self.cell)
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config, custom_objects=None):
cell = serialization_lib.deserialize_keras_object(
config.pop("cell"), custom_objects=custom_objects
)
layer = cls(cell, **config)
return layer
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/lstm.py | keras/src/layers/rnn/lstm.py | from keras.src import activations
from keras.src import backend
from keras.src import constraints
from keras.src import initializers
from keras.src import ops
from keras.src import regularizers
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell
from keras.src.layers.rnn.rnn import RNN
@keras_export("keras.layers.LSTMCell")
class LSTMCell(Layer, DropoutRNNCell):
"""Cell class for the LSTM layer.
This class processes one step within the whole time sequence input, whereas
`keras.layer.LSTM` processes the whole sequence.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use. Default: hyperbolic tangent
(`tanh`). If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
applied (ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer
should use a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`"glorot_uniform"`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation
of the recurrent state. Default: `"orthogonal"`.
bias_initializer: Initializer for the bias vector. Default: `"zeros"`.
unit_forget_bias: Boolean (default `True`). If `True`,
add 1 to the bias of the forget gate at initialization.
Setting it to `True` will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](
https://github.com/mlresearch/v37/blob/gh-pages/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector.
Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector.
Default: `None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state. Default: 0.
seed: Random seed for dropout.
Call arguments:
inputs: A 2D tensor, with shape `(batch, features)`.
states: A 2D tensor with shape `(batch, units)`, which is the state
from the previous time step.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
Example:
>>> inputs = np.random.random((32, 10, 8))
>>> rnn = keras.layers.RNN(keras.layers.LSTMCell(4))
>>> output = rnn(inputs)
>>> output.shape
(32, 4)
>>> rnn = keras.layers.RNN(
... keras.layers.LSTMCell(4),
... return_sequences=True,
... return_state=True)
>>> whole_sequence_output, final_state = rnn(inputs)
>>> whole_sequence_output.shape
(32, 10, 4)
>>> final_state.shape
(32, 4)
"""
def __init__(
self,
units,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
seed=None,
**kwargs,
):
if units <= 0:
raise ValueError(
"Received an invalid value for argument `units`, "
f"expected a positive integer, got {units}."
)
implementation = kwargs.pop("implementation", 2)
super().__init__(**kwargs)
self.implementation = implementation
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1.0, max(0.0, dropout))
self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout))
if self.recurrent_dropout != 0.0:
self.implementation = 1
if self.implementation == 1:
self.dropout_mask_count = 4
self.seed = seed
self.seed_generator = backend.random.SeedGenerator(seed=seed)
self.unit_forget_bias = unit_forget_bias
self.state_size = [self.units, self.units]
self.output_size = self.units
def build(self, input_shape):
super().build(input_shape)
input_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_dim, self.units * 4),
name="kernel",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name="recurrent_kernel",
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return ops.concatenate(
[
self.bias_initializer(
(self.units,), *args, **kwargs
),
initializers.get("ones")(
(self.units,), *args, **kwargs
),
self.bias_initializer(
(self.units * 2,), *args, **kwargs
),
]
)
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 4,),
name="bias",
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
def _compute_carry_and_output(self, x, h_tm1, c_tm1):
"""Computes carry and output using split kernels."""
x_i, x_f, x_c, x_o = x
h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i + ops.matmul(h_tm1_i, self.recurrent_kernel[:, : self.units])
)
f = self.recurrent_activation(
x_f
+ ops.matmul(
h_tm1_f, self.recurrent_kernel[:, self.units : self.units * 2]
)
)
c = f * c_tm1 + i * self.activation(
x_c
+ ops.matmul(
h_tm1_c,
self.recurrent_kernel[:, self.units * 2 : self.units * 3],
)
)
o = self.recurrent_activation(
x_o
+ ops.matmul(h_tm1_o, self.recurrent_kernel[:, self.units * 3 :])
)
return c, o
def _compute_carry_and_output_fused(self, z, c_tm1):
"""Computes carry and output using fused kernels."""
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
return c, o
def call(self, inputs, states, training=False):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
if self.implementation == 1:
if training and 0.0 < self.dropout < 1.0:
dp_mask = self.get_dropout_mask(inputs)
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
k_i, k_f, k_c, k_o = ops.split(self.kernel, 4, axis=1)
x_i = ops.matmul(inputs_i, k_i)
x_f = ops.matmul(inputs_f, k_f)
x_c = ops.matmul(inputs_c, k_c)
x_o = ops.matmul(inputs_o, k_o)
if self.use_bias:
b_i, b_f, b_c, b_o = ops.split(self.bias, 4, axis=0)
x_i += b_i
x_f += b_f
x_c += b_c
x_o += b_o
if training and 0.0 < self.recurrent_dropout < 1.0:
rec_dp_mask = self.get_recurrent_dropout_mask(h_tm1)
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
x = (x_i, x_f, x_c, x_o)
h_tm1 = (h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o)
c, o = self._compute_carry_and_output(x, h_tm1, c_tm1)
else:
if training and 0.0 < self.dropout < 1.0:
dp_mask = self.get_dropout_mask(inputs)
inputs = inputs * dp_mask
z = ops.matmul(inputs, self.kernel)
z = ops.add(z, ops.matmul(h_tm1, self.recurrent_kernel))
if self.use_bias:
z = ops.add(z, self.bias)
z = ops.split(z, 4, axis=1)
c, o = self._compute_carry_and_output_fused(z, c_tm1)
h = o * self.activation(c)
return h, [h, c]
def get_config(self):
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"recurrent_activation": activations.serialize(
self.recurrent_activation
),
"use_bias": self.use_bias,
"unit_forget_bias": self.unit_forget_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
def get_initial_state(self, batch_size=None):
return [
ops.zeros((batch_size, d), dtype=self.compute_dtype)
for d in self.state_size
]
@keras_export("keras.layers.LSTM")
class LSTM(RNN):
"""Long Short-Term Memory layer - Hochreiter 1997.
Based on available runtime hardware and constraints, this layer
will choose different implementations (cuDNN-based or backend-native)
to maximize the performance. If a GPU is available and all
the arguments to the layer meet the requirement of the cuDNN kernel
(see below for details), the layer will use a fast cuDNN implementation
when using the TensorFlow backend.
The requirements to use the cuDNN implementation are:
1. `activation` == `tanh`
2. `recurrent_activation` == `sigmoid`
3. `recurrent_dropout` == 0
4. `unroll` is `False`
5. `use_bias` is `True`
6. Inputs, if use masking, are strictly right-padded.
7. Eager execution is enabled in the outermost context.
For example:
>>> inputs = np.random.random((32, 10, 8))
>>> lstm = keras.layers.LSTM(4)
>>> output = lstm(inputs)
>>> output.shape
(32, 4)
>>> lstm = keras.layers.LSTM(
... 4, return_sequences=True, return_state=True)
>>> whole_seq_output, final_memory_state, final_carry_state = lstm(inputs)
>>> whole_seq_output.shape
(32, 10, 4)
>>> final_memory_state.shape
(32, 4)
>>> final_carry_state.shape
(32, 4)
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: sigmoid (`sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer
should use a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`"glorot_uniform"`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent
state. Default: `"orthogonal"`.
bias_initializer: Initializer for the bias vector. Default: `"zeros"`.
unit_forget_bias: Boolean (default `True`). If `True`,
add 1 to the bias of the forget gate at initialization.
Setting it to `True` will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](
https://github.com/mlresearch/v37/blob/gh-pages/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector.
Default: `None`.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation"). Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector.
Default: `None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state. Default: 0.
seed: Random seed for dropout.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state in addition
to the output. Default: `False`.
go_backwards: Boolean (default: `False`).
If `True`, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default: `False`). If `True`, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If `True`, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
use_cudnn: Whether to use a cuDNN-backed implementation. `"auto"` will
attempt to use cuDNN when feasible, and will fallback to the
default implementation if not.
Call arguments:
inputs: A 3D tensor, with shape `(batch, timesteps, feature)`.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked (optional).
An individual `True` entry indicates that the corresponding timestep
should be utilized, while a `False` entry indicates that the
corresponding timestep should be ignored. Defaults to `None`.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the
cell when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used (optional). Defaults to `None`.
initial_state: List of initial state tensors to be passed to the first
call of the cell (optional, `None` causes creation
of zero-filled initial state tensors). Defaults to `None`.
"""
def __init__(
self,
units,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
seed=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
use_cudnn="auto",
**kwargs,
):
cell = LSTMCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
unit_forget_bias=unit_forget_bias,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
dtype=kwargs.get("dtype", None),
trainable=kwargs.get("trainable", True),
name="lstm_cell",
seed=seed,
implementation=kwargs.pop("implementation", 2),
)
super().__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
activity_regularizer=activity_regularizer,
**kwargs,
)
self.input_spec = InputSpec(ndim=3)
if use_cudnn not in ("auto", True, False):
raise ValueError(
"Invalid valid received for argument `use_cudnn`. "
"Expected one of {'auto', True, False}. "
f"Received: use_cudnn={use_cudnn}"
)
self.use_cudnn = use_cudnn
if (
backend.backend() == "tensorflow"
and backend.cudnn_ok(
cell.activation,
cell.recurrent_activation,
self.unroll,
cell.use_bias,
)
and use_cudnn in (True, "auto")
):
self.supports_jit = False
def inner_loop(self, sequences, initial_state, mask, training=False):
if tree.is_nested(mask):
mask = mask[0]
if self.use_cudnn in ("auto", True):
if not self.recurrent_dropout:
try:
if training and self.dropout:
dp_mask = self.cell.get_dropout_mask(sequences[:, 0, :])
dp_mask = ops.expand_dims(dp_mask, axis=1)
dp_mask = ops.broadcast_to(
dp_mask, ops.shape(sequences)
)
dp_sequences = sequences * dp_mask
else:
dp_sequences = sequences
# Backends are allowed to specify (optionally) optimized
# implementation of the inner LSTM loop. In the case of
# TF for instance, it will leverage cuDNN when feasible, and
# it will raise NotImplementedError otherwise.
out = backend.lstm(
dp_sequences,
initial_state[0],
initial_state[1],
mask,
kernel=self.cell.kernel,
recurrent_kernel=self.cell.recurrent_kernel,
bias=self.cell.bias,
activation=self.cell.activation,
recurrent_activation=self.cell.recurrent_activation,
return_sequences=self.return_sequences,
go_backwards=self.go_backwards,
unroll=self.unroll,
)
# We disable jit_compile for the model in this case,
# since cuDNN ops aren't XLA compatible.
if backend.backend() == "tensorflow":
self.supports_jit = False
return out
except NotImplementedError:
pass
if self.use_cudnn is True:
raise ValueError(
"use_cudnn=True was specified, "
"but cuDNN is not supported for this layer configuration "
"with this backend. Pass use_cudnn='auto' to fallback "
"to a non-cuDNN implementation."
)
return super().inner_loop(
sequences, initial_state, mask=mask, training=training
)
def call(self, sequences, initial_state=None, mask=None, training=False):
return super().call(
sequences, mask=mask, training=training, initial_state=initial_state
)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"recurrent_activation": activations.serialize(
self.recurrent_activation
),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"unit_forget_bias": self.unit_forget_bias,
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
"seed": self.cell.seed,
}
base_config = super().get_config()
del base_config["cell"]
return {**base_config, **config}
@classmethod
def from_config(cls, config):
return cls(**config)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/conv_lstm2d.py | keras/src/layers/rnn/conv_lstm2d.py | from keras.src.api_export import keras_export
from keras.src.layers.rnn.conv_lstm import ConvLSTM
@keras_export("keras.layers.ConvLSTM2D")
class ConvLSTM2D(ConvLSTM):
"""2D Convolutional LSTM.
Similar to an LSTM layer, but the input transformations
and recurrent transformations are both convolutional.
Args:
filters: int, the dimension of the output space (the number of filters
in the convolution).
kernel_size: int or tuple/list of 2 integers, specifying the size of the
convolution window.
strides: int or tuple/list of 2 integers, specifying the stride length
of the convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of 2 integers, specifying the dilation
rate to use for dilated convolution.
activation: Activation function to use. By default hyperbolic tangent
activation function is applied (`tanh(x)`).
recurrent_activation: Activation function to use for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean. If `True`, add 1 to the bias of the forget
gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al., 2015](
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state.
seed: Random seed for dropout.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state in addition
to the output. Default: `False`.
go_backwards: Boolean (default: `False`).
If `True`, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If `True`, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default: `False`).
If `True`, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
Call arguments:
inputs: A 5D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether a
given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode.
This is only relevant if `dropout` or `recurrent_dropout` are set.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
Input shape:
- If `data_format='channels_first'`:
5D tensor with shape: `(samples, time, channels, rows, cols)`
- If `data_format='channels_last'`:
5D tensor with shape: `(samples, time, rows, cols, channels)`
Output shape:
- If `return_state`: a list of tensors. The first tensor is the output.
The remaining tensors are the last states,
each 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if
`data_format='channels_first'`
or shape: `(samples, new_rows, new_cols, filters)` if
`data_format='channels_last'`. `rows` and `cols` values might have
changed due to padding.
- If `return_sequences`: 5D tensor with shape: `(samples, timesteps,
filters, new_rows, new_cols)` if data_format='channels_first'
or shape: `(samples, timesteps, new_rows, new_cols, filters)` if
`data_format='channels_last'`.
- Else, 4D tensor with shape: `(samples, filters, new_rows, new_cols)` if
`data_format='channels_first'`
or shape: `(samples, new_rows, new_cols, filters)` if
`data_format='channels_last'`.
References:
- [Shi et al., 2015](http://arxiv.org/abs/1506.04214v1)
(the current implementation does not include the feedback loop on the
cells output).
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
seed=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
**kwargs,
):
super().__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
seed=seed,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/dropout_rnn_cell.py | keras/src/layers/rnn/dropout_rnn_cell.py | from keras.src import backend
from keras.src import ops
class DropoutRNNCell:
"""Object that holds dropout-related functionality for RNN cells.
This class is not a standalone RNN cell. It suppose to be used with a RNN
cell by multiple inheritance. Any cell that mix with class should have
following fields:
- `dropout`: a float number in the range `[0, 1]`.
Dropout rate for the input tensor.
- `recurrent_dropout`: a float number in the range `[0, 1]`.
Dropout rate for the recurrent connections.
- `seed_generator`, an instance of `backend.random.SeedGenerator`.
This object will create and cache dropout masks, and reuse them for
all incoming steps, so that the same mask is used for every step.
"""
def _create_dropout_mask(self, step_input, dropout_rate):
count = getattr(self, "dropout_mask_count", None)
ones = ops.ones_like(step_input)
if count is None:
return backend.random.dropout(
ones, rate=dropout_rate, seed=self.seed_generator
)
else:
return [
backend.random.dropout(
ones, rate=dropout_rate, seed=self.seed_generator
)
for _ in range(count)
]
def get_dropout_mask(self, step_input):
if not hasattr(self, "_dropout_mask"):
self._dropout_mask = None
if self._dropout_mask is None and self.dropout > 0:
self._dropout_mask = self._create_dropout_mask(
step_input, self.dropout
)
return self._dropout_mask
def get_recurrent_dropout_mask(self, step_input):
if not hasattr(self, "_recurrent_dropout_mask"):
self._recurrent_dropout_mask = None
if self._recurrent_dropout_mask is None and self.recurrent_dropout > 0:
self._recurrent_dropout_mask = self._create_dropout_mask(
step_input, self.recurrent_dropout
)
return self._recurrent_dropout_mask
def reset_dropout_mask(self):
"""Reset the cached dropout mask if any.
The RNN layer invokes this in the `call()` method
so that the cached mask is cleared after calling `cell.call()`. The
mask should be cached across all timestep within the same batch, but
shouldn't be cached between batches.
"""
self._dropout_mask = None
def reset_recurrent_dropout_mask(self):
self._recurrent_dropout_mask = None
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/conv_lstm2d_test.py | keras/src/layers/rnn/conv_lstm2d_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import testing
class ConvLSTM2DTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
channels_last = backend.config.image_data_format() == "channels_last"
self.run_layer_test(
layers.ConvLSTM2D,
init_kwargs={"filters": 5, "kernel_size": 3, "padding": "same"},
input_shape=(3, 2, 4, 4, 3) if channels_last else (3, 2, 3, 4, 4),
expected_output_shape=(
(3, 4, 4, 5) if channels_last else (3, 5, 4, 4)
),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.ConvLSTM2D,
init_kwargs={
"filters": 5,
"kernel_size": 3,
"padding": "valid",
"recurrent_dropout": 0.5,
},
input_shape=(3, 2, 8, 8, 3) if channels_last else (3, 2, 3, 8, 8),
call_kwargs={"training": True},
expected_output_shape=(
(3, 6, 6, 5) if channels_last else (3, 5, 6, 6)
),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.ConvLSTM2D,
init_kwargs={
"filters": 5,
"kernel_size": 3,
"padding": "valid",
"return_sequences": True,
},
input_shape=(3, 2, 8, 8, 3) if channels_last else (3, 2, 3, 8, 8),
expected_output_shape=(
(3, 2, 6, 6, 5) if channels_last else (3, 2, 5, 6, 6)
),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
def test_correctness(self):
sequence = (
np.arange(480).reshape((2, 3, 4, 4, 5)).astype("float32") / 100
)
expected_output = np.array(
[
[
[[0.48694518, 0.48694518], [0.50237733, 0.50237733]],
[[0.5461202, 0.5461202], [0.5598283, 0.5598283]],
],
[
[[0.8661607, 0.8661607], [0.86909103, 0.86909103]],
[[0.8774414, 0.8774414], [0.8800861, 0.8800861]],
],
]
)
if backend.config.image_data_format() == "channels_first":
sequence = sequence.transpose((0, 1, 4, 2, 3))
expected_output = expected_output.transpose((0, 3, 1, 2))
layer = layers.ConvLSTM2D(
filters=2,
kernel_size=3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence)
self.assertAllClose(
expected_output,
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/__init__.py | keras/src/layers/rnn/__init__.py | python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false | |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/stacked_rnn_cells_test.py | keras/src/layers/rnn/stacked_rnn_cells_test.py | import numpy as np
import pytest
from keras.src import layers
from keras.src import testing
from keras.src.layers.rnn.rnn_test import OneStateRNNCell
from keras.src.layers.rnn.rnn_test import TwoStatesRNNCell
class StackedRNNTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.RNN,
init_kwargs={
"cell": [
OneStateRNNCell(3),
OneStateRNNCell(4),
OneStateRNNCell(5),
],
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 5),
expected_num_trainable_weights=6,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
supports_masking=True,
custom_objects={"OneStateRNNCell": OneStateRNNCell},
)
self.run_layer_test(
layers.RNN,
init_kwargs={
"cell": [
OneStateRNNCell(3),
OneStateRNNCell(4),
OneStateRNNCell(5),
],
"return_sequences": True,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 5),
expected_num_trainable_weights=6,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
supports_masking=True,
custom_objects={"OneStateRNNCell": OneStateRNNCell},
)
# Two-state case.
self.run_layer_test(
layers.RNN,
init_kwargs={
"cell": [
TwoStatesRNNCell(3),
TwoStatesRNNCell(4),
TwoStatesRNNCell(5),
],
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 5),
expected_num_trainable_weights=9,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
supports_masking=True,
custom_objects={"TwoStatesRNNCell": TwoStatesRNNCell},
)
self.run_layer_test(
layers.RNN,
init_kwargs={
"cell": [
TwoStatesRNNCell(3),
TwoStatesRNNCell(4),
TwoStatesRNNCell(5),
],
"return_sequences": True,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 5),
expected_num_trainable_weights=9,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
supports_masking=True,
custom_objects={"TwoStatesRNNCell": TwoStatesRNNCell},
)
self.run_layer_test(
layers.RNN,
init_kwargs={
"cell": [
layers.SimpleRNNCell(3, dropout=0.1, recurrent_dropout=0.1),
layers.SimpleRNNCell(4, dropout=0.1, recurrent_dropout=0.1),
layers.SimpleRNNCell(5, dropout=0.1, recurrent_dropout=0.1),
],
"return_sequences": True,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 5),
expected_num_trainable_weights=9,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=3,
supports_masking=True,
)
self.run_layer_test(
layers.RNN,
init_kwargs={
"cell": [
layers.GRUCell(3, dropout=0.1, recurrent_dropout=0.1),
layers.GRUCell(4, dropout=0.1, recurrent_dropout=0.1),
layers.GRUCell(5, dropout=0.1, recurrent_dropout=0.1),
],
"return_sequences": True,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 5),
expected_num_trainable_weights=9,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=3,
supports_masking=True,
)
self.run_layer_test(
layers.RNN,
init_kwargs={
"cell": [
layers.LSTMCell(3, dropout=0.1, recurrent_dropout=0.1),
layers.LSTMCell(4, dropout=0.1, recurrent_dropout=0.1),
layers.LSTMCell(5, dropout=0.1, recurrent_dropout=0.1),
],
"return_sequences": True,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 5),
expected_num_trainable_weights=9,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=3,
supports_masking=True,
)
def test_correctness_single_state_stack(self):
sequence = np.arange(24).reshape((2, 3, 4)).astype("float32")
layer = layers.RNN([OneStateRNNCell(3), OneStateRNNCell(2)])
output = layer(sequence)
self.assertAllClose(
np.array([[786.0, 786.0], [4386.0, 4386.0]]),
output,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
layer = layers.RNN(
[OneStateRNNCell(3), OneStateRNNCell(2)], return_sequences=True
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[[18.0, 18.0], [156.0, 156.0], [786.0, 786.0]],
[[162.0, 162.0], [1020.0, 1020.0], [4386.0, 4386.0]],
]
),
output,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
layer = layers.RNN(
[OneStateRNNCell(3), OneStateRNNCell(2)], return_state=True
)
output, state_1, state_2 = layer(sequence)
self.assertAllClose(
np.array([[786.0, 786.0], [4386.0, 4386.0]]),
output,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
self.assertAllClose(
np.array([[158.0, 158.0, 158.0], [782.0, 782.0, 782.0]]), state_1
)
self.assertAllClose(
np.array([[786.0, 786.0], [4386.0, 4386.0]]),
state_2,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
layer = layers.RNN(
[OneStateRNNCell(3), OneStateRNNCell(2)],
return_sequences=True,
return_state=True,
)
output, state_1, state_2 = layer(sequence)
self.assertAllClose(
np.array(
[
[[18.0, 18.0], [156.0, 156.0], [786.0, 786.0]],
[[162.0, 162.0], [1020.0, 1020.0], [4386.0, 4386.0]],
]
),
output,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
self.assertAllClose(
np.array([[158.0, 158.0, 158.0], [782.0, 782.0, 782.0]]),
state_1,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
self.assertAllClose(
np.array([[786.0, 786.0], [4386.0, 4386.0]]),
state_2,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
def test_correctness_two_states_stack(self):
sequence = np.arange(24).reshape((2, 3, 4)).astype("float32")
layer = layers.RNN([TwoStatesRNNCell(3), TwoStatesRNNCell(2)])
output = layer(sequence)
self.assertAllClose(
np.array([[3144.0, 3144.0], [17544.0, 17544.0]]),
output,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
layer = layers.RNN(
[TwoStatesRNNCell(3), TwoStatesRNNCell(2)], return_sequences=True
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[[72.0, 72.0], [624.0, 624.0], [3144.0, 3144.0]],
[[648.0, 648.0], [4080.0, 4080.0], [17544.0, 17544.0]],
]
),
output,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
layer = layers.RNN(
[TwoStatesRNNCell(3), TwoStatesRNNCell(2)], return_state=True
)
output, state_1, state_2 = layer(sequence)
self.assertAllClose(
np.array([[3144.0, 3144.0], [17544.0, 17544.0]]),
output,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
self.assertAllClose(
np.array([[158.0, 158.0, 158.0], [782.0, 782.0, 782.0]]),
state_1[0],
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
self.assertAllClose(
np.array([[158.0, 158.0, 158.0], [782.0, 782.0, 782.0]]),
state_1[1],
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
self.assertAllClose(
np.array([[1572.0, 1572.0], [8772.0, 8772.0]]),
state_2[0],
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
self.assertAllClose(
np.array([[1572.0, 1572.0], [8772.0, 8772.0]]),
state_2[1],
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
def test_statefullness_single_state_stack(self):
sequence = np.arange(24).reshape((2, 3, 4)).astype("float32")
layer = layers.RNN(
[OneStateRNNCell(3), OneStateRNNCell(2)], stateful=True
)
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array([[34092.0, 34092.0], [173196.0, 173196.0]]),
output,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
def test_statefullness_two_states_stack(self):
sequence = np.arange(24).reshape((2, 3, 4)).astype("float32")
layer = layers.RNN(
[TwoStatesRNNCell(3), TwoStatesRNNCell(2)], stateful=True
)
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array([[136368.0, 136368.0], [692784.0, 692784.0]]),
output,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
def test_return_state_stacked_lstm_cell(self):
layer = layers.RNN(
[layers.LSTMCell(10), layers.LSTMCell(10)], return_state=True
)
out = layer(np.zeros((2, 3, 5)))
self.assertLen(out, 3)
self.assertEqual(out[0].shape, (2, 10))
self.assertEqual(out[1][0].shape, (2, 10))
self.assertEqual(out[1][1].shape, (2, 10))
self.assertEqual(out[2][0].shape, (2, 10))
self.assertEqual(out[2][1].shape, (2, 10))
shape = layer.compute_output_shape((2, 3, 5))
self.assertLen(shape, 3)
self.assertEqual(shape[0], (2, 10))
self.assertEqual(shape[1][0], (2, 10))
self.assertEqual(shape[1][1], (2, 10))
self.assertEqual(shape[2][0], (2, 10))
self.assertEqual(shape[2][1], (2, 10))
def test_stacked_lstm_cell_mask(self):
sequence = np.ones((2, 3, 4))
mask = np.array([[True, True, True], [True, True, False]])
cell_kwargs = dict(
units=1, kernel_initializer="ones", recurrent_initializer="ones"
)
rnn_cells = [layers.LSTMCell(**cell_kwargs) for _ in range(2)]
stacked_rnn = layers.RNN(rnn_cells)
output = stacked_rnn(sequence, mask=mask)
self.assertAllClose(np.array([[0.7793], [0.5998]]), output, atol=1e-4)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/lstm_test.py | keras/src/layers/rnn/lstm_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import initializers
from keras.src import layers
from keras.src import testing
class LSTMTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.LSTM,
init_kwargs={"units": 3, "dropout": 0.5},
input_shape=(3, 2, 4),
call_kwargs={"training": True},
expected_output_shape=(3, 3),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.LSTM,
init_kwargs={"units": 3, "dropout": 0.5, "recurrent_dropout": 0.5},
input_shape=(3, 2, 4),
call_kwargs={"training": True},
expected_output_shape=(3, 3),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.LSTM,
init_kwargs={
"units": 3,
"return_sequences": True,
"bias_regularizer": "l1",
"kernel_regularizer": "l2",
"recurrent_regularizer": "l2",
},
input_shape=(3, 2, 4),
expected_output_shape=(3, 2, 3),
expected_num_losses=3,
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
@parameterized.parameters([1, 2])
def test_correctness(self, implementation):
sequence = np.arange(72).reshape((3, 6, 4)).astype("float32")
layer = layers.LSTM(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
implementation=implementation,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.6288687, 0.6288687, 0.6288687],
[0.86899155, 0.86899155, 0.86899155],
[0.9460773, 0.9460773, 0.9460773],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.LSTM(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
implementation=implementation,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.35622165, 0.35622165, 0.35622165],
[0.74789524, 0.74789524, 0.74789524],
[0.8872726, 0.8872726, 0.8872726],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.LSTM(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
unroll=True,
implementation=implementation,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.6288687, 0.6288687, 0.6288687],
[0.86899155, 0.86899155, 0.86899155],
[0.9460773, 0.9460773, 0.9460773],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.LSTM(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
unit_forget_bias=False,
implementation=implementation,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.57019705, 0.57019705, 0.57019705],
[0.8661914, 0.8661914, 0.8661914],
[0.9459622, 0.9459622, 0.9459622],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.LSTM(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
use_bias=False,
implementation=implementation,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.54986924, 0.54986924, 0.54986924],
[0.86226785, 0.86226785, 0.86226785],
[0.9443936, 0.9443936, 0.9443936],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
def test_statefulness(self):
sequence = np.arange(24).reshape((2, 3, 4)).astype("float32")
layer = layers.LSTM(
4,
stateful=True,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.3124785, 0.3124785, 0.3124785, 0.3124785],
[0.6863672, 0.6863672, 0.6863672, 0.6863672],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer.reset_state()
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.3124785, 0.3124785, 0.3124785, 0.3124785],
[0.6863672, 0.6863672, 0.6863672, 0.6863672],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
def test_pass_initial_state(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
initial_state = [
np.arange(4).reshape((2, 2)).astype("float32"),
np.arange(4).reshape((2, 2)).astype("float32"),
]
layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array([[0.20574439, 0.3558822], [0.64930826, 0.66276]]),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
)
output = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array([[0.13281618, 0.2790356], [0.5839337, 0.5992567]]),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
def test_masking(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
mask = np.array([[True, True, False, True], [True, False, False, True]])
layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
unroll=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array([[0.1524914, 0.1524914], [0.35969394, 0.35969394]]),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_sequences=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array(
[
[0.0158891, 0.0158891],
[0.05552047, 0.05552047],
[0.05552047, 0.05552047],
[0.1524914, 0.1524914],
],
),
output[0],
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
self.assertAllClose(
np.array(
[
[0.14185596, 0.14185596],
[0.14185596, 0.14185596],
[0.14185596, 0.14185596],
[0.35969394, 0.35969394],
],
),
output[1],
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_sequences=True,
zero_output_for_mask=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array(
[
[0.0158891, 0.0158891],
[0.05552047, 0.05552047],
[0.0, 0.0],
[0.1524914, 0.1524914],
],
),
output[0],
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
self.assertAllClose(
np.array(
[
[0.14185596, 0.14185596],
[0.0, 0.0],
[0.0, 0.0],
[0.35969394, 0.35969394],
],
),
output[1],
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array([[0.10056866, 0.10056866], [0.31006062, 0.31006062]]),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/simple_rnn_test.py | keras/src/layers/rnn/simple_rnn_test.py | import numpy as np
import pytest
from keras.src import initializers
from keras.src import layers
from keras.src import testing
class SimpleRNNTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.SimpleRNN,
init_kwargs={"units": 3, "dropout": 0.5, "recurrent_dropout": 0.5},
input_shape=(3, 2, 4),
call_kwargs={"training": True},
expected_output_shape=(3, 3),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
expected_num_non_trainable_variables=1,
supports_masking=True,
)
self.run_layer_test(
layers.SimpleRNN,
init_kwargs={
"units": 3,
"return_sequences": True,
"bias_regularizer": "l1",
"kernel_regularizer": "l2",
"recurrent_regularizer": "l2",
},
input_shape=(3, 2, 4),
expected_output_shape=(3, 2, 3),
expected_num_losses=3,
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
def test_correctness(self):
sequence = np.arange(24).reshape((2, 3, 4)).astype("float32")
layer = layers.SimpleRNN(
4,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.405432, 0.405432, 0.405432, 0.405432],
[0.73605347, 0.73605347, 0.73605347, 0.73605347],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.SimpleRNN(
4,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
unroll=True,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.405432, 0.405432, 0.405432, 0.405432],
[0.73605347, 0.73605347, 0.73605347, 0.73605347],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.SimpleRNN(
4,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.11144729, 0.11144729, 0.11144729, 0.11144729],
[0.5528889, 0.5528889, 0.5528889, 0.5528889],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.SimpleRNN(
4,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
unroll=True,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.11144729, 0.11144729, 0.11144729, 0.11144729],
[0.5528889, 0.5528889, 0.5528889, 0.5528889],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
def test_statefulness(self):
sequence = np.arange(24).reshape((2, 3, 4)).astype("float32")
layer = layers.SimpleRNN(
4,
stateful=True,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.40559256, 0.40559256, 0.40559256, 0.40559256],
[0.7361247, 0.7361247, 0.7361247, 0.7361247],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer.reset_state()
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.40559256, 0.40559256, 0.40559256, 0.40559256],
[0.7361247, 0.7361247, 0.7361247, 0.7361247],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
def test_pass_initial_state(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
initial_state = np.arange(8).reshape((2, 4)).astype("float32")
layer = layers.SimpleRNN(
4,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array(
[
[0.33621645, 0.33621645, 0.33621645, 0.33621645],
[0.6262637, 0.6262637, 0.6262637, 0.6262637],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.SimpleRNN(
4,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
)
output = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array(
[
[0.07344437, 0.07344437, 0.07344437, 0.07344437],
[0.43043602, 0.43043602, 0.43043602, 0.43043602],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
def test_masking(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
mask = np.array([[True, True, False, True], [True, False, False, True]])
layer = layers.SimpleRNN(
4,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
unroll=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array(
[
[0.32951632, 0.32951632, 0.32951632, 0.32951632],
[0.61799484, 0.61799484, 0.61799484, 0.61799484],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.SimpleRNN(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_sequences=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array(
[
[0.0599281, 0.0599281],
[0.15122814, 0.15122814],
[0.15122814, 0.15122814],
[0.32394567, 0.32394567],
],
),
output[0],
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
self.assertAllClose(
np.array(
[
[0.3969304, 0.3969304],
[0.3969304, 0.3969304],
[0.3969304, 0.3969304],
[0.608085, 0.608085],
],
),
output[1],
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.SimpleRNN(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_sequences=True,
zero_output_for_mask=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array(
[
[0.0599281, 0.0599281],
[0.15122814, 0.15122814],
[0.0, 0.0],
[0.32394567, 0.32394567],
],
),
output[0],
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
self.assertAllClose(
np.array(
[
[0.3969304, 0.3969304],
[0.0, 0.0],
[0.0, 0.0],
[0.608085, 0.608085],
],
),
output[1],
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.SimpleRNN(
4,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array(
[
[0.07376196, 0.07376196, 0.07376196, 0.07376196],
[0.43645123, 0.43645123, 0.43645123, 0.43645123],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/bidirectional_test.py | keras/src/layers/rnn/bidirectional_test.py | import numpy as np
import pytest
from keras.src import initializers
from keras.src import layers
from keras.src import testing
class SimpleRNNTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.Bidirectional,
init_kwargs={"layer": layers.SimpleRNN(4)},
input_shape=(3, 2, 4),
expected_output_shape=(3, 8),
expected_num_trainable_weights=6,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.Bidirectional,
init_kwargs={
"layer": layers.SimpleRNN(4),
"backward_layer": layers.SimpleRNN(4, go_backwards=True),
"merge_mode": "sum",
},
input_shape=(3, 2, 4),
expected_output_shape=(3, 4),
expected_num_trainable_weights=6,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
def test_correctness(self):
sequence = np.arange(24).reshape((2, 3, 4)).astype("float32")
forward_layer = layers.SimpleRNN(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
layer = layers.Bidirectional(
layer=forward_layer,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.39687276, 0.39687276, 0.10004295, 0.10004295],
[0.7237238, 0.7237238, 0.53391594, 0.53391594],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.Bidirectional(layer=forward_layer, merge_mode="ave")
output = layer(sequence)
self.assertAllClose(
np.array([[0.24845785, 0.24845785], [0.6288199, 0.6288199]]),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer = layers.Bidirectional(layer=forward_layer, merge_mode=None)
output1, output2 = layer(sequence)
self.assertAllClose(
np.array([[0.39687276, 0.39687276], [0.7237238, 0.7237238]]),
output1,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
self.assertAllClose(
np.array([[0.10004295, 0.10004295], [0.53391594, 0.53391594]]),
output2,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
backward_layer = layers.SimpleRNN(
2,
kernel_initializer=initializers.Constant(0.03),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.01),
go_backwards=True,
)
layer = layers.Bidirectional(
layer=forward_layer, backward_layer=backward_layer, merge_mode="mul"
)
output = layer(sequence)
self.assertAllClose(
np.array([[0.08374989, 0.08374989], [0.6740834, 0.6740834]]),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
forward_layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_sequences=True,
)
layer = layers.Bidirectional(layer=forward_layer, merge_mode="sum")
output = layer(sequence)
self.assertAllClose(
np.array(
[
[
[0.20937867, 0.20937867],
[0.34462988, 0.34462988],
[0.40290534, 0.40290534],
],
[
[0.59829646, 0.59829646],
[0.6734641, 0.6734641],
[0.6479671, 0.6479671],
],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
def test_statefulness(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
forward_layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
stateful=True,
)
layer = layers.Bidirectional(layer=forward_layer)
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.26234663, 0.26234663, 0.16959146, 0.16959146],
[0.6137073, 0.6137073, 0.5381646, 0.5381646],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
layer.reset_state()
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.26234663, 0.26234663, 0.16959146, 0.16959146],
[0.6137073, 0.6137073, 0.5381646, 0.5381646],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
def test_pass_initial_state(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
initial_state = [
np.arange(4).reshape((2, 2)).astype("float32") * 1,
np.arange(4).reshape((2, 2)).astype("float32") * 2,
np.arange(4).reshape((2, 2)).astype("float32") * 3,
np.arange(4).reshape((2, 2)).astype("float32") * 4,
]
forward_layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
layer = layers.Bidirectional(
layer=forward_layer,
)
output = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array(
[
[0.20794602, 0.4577124, 0.14046375, 0.48191673],
[0.6682636, 0.6711909, 0.60943645, 0.60950446],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
def test_masking(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
forward_layer = layers.GRU(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
layer = layers.Bidirectional(layer=forward_layer)
mask = np.array([[True, True, False, True], [True, False, False, True]])
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array(
[
[0.19393763, 0.19393763, 0.11669192, 0.11669192],
[0.30818558, 0.30818558, 0.28380975, 0.28380975],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
def test_return_state(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
forward_layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_state=True,
)
layer = layers.Bidirectional(layer=forward_layer)
output, h1, c1, h2, c2 = layer(sequence)
self.assertAllClose(
np.array(
[
[0.1990008, 0.1990008, 0.12659755, 0.12659755],
[0.52335435, 0.52335435, 0.44717982, 0.44717982],
]
),
output,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
self.assertAllClose(
np.array([[0.1990008, 0.1990008], [0.52335435, 0.52335435]]),
h1,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
self.assertAllClose(
np.array([[0.35567185, 0.35567185], [1.0492687, 1.0492687]]),
c1,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
self.assertAllClose(
np.array([[0.12659755, 0.12659755], [0.44717982, 0.44717982]]),
h2,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
self.assertAllClose(
np.array([[0.2501858, 0.2501858], [0.941473, 0.941473]]),
c2,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
@pytest.mark.requires_trainable_backend
def test_output_shape(self):
x = np.array([[[101, 202], [303, 404]]])
for merge_mode in ["ave", "concat", "mul", "sum", None]:
sub_layer = layers.LSTM(2, return_state=True)
layer = layers.Bidirectional(sub_layer, merge_mode=merge_mode)
output = layer(x)
output_shape = layer.compute_output_shape(x.shape)
for out, shape in zip(output, output_shape):
self.assertEqual(out.shape, shape)
for merge_mode in ["concat", "ave", "mul", "sum"]:
sub_layer = layers.LSTM(2, return_state=False)
layer = layers.Bidirectional(sub_layer, merge_mode=merge_mode)
output = layer(x)
output_shape = layer.compute_output_shape(x.shape)
self.assertEqual(output.shape, output_shape)
# return_state=False & merge_mode=None
sub_layer = layers.LSTM(2, return_state=False)
layer = layers.Bidirectional(sub_layer, merge_mode=None)
output = layer(x)
output_shape = layer.compute_output_shape(x.shape)
for out, shape in zip(output, output_shape):
self.assertEqual(out.shape, shape)
def test_keeps_use_cudnn(self):
# keep use_cudnn if the layer has it
for rnn_class in [layers.GRU, layers.LSTM]:
for use_cudnn in [True, False, "auto"]:
rnn = rnn_class(1, use_cudnn=use_cudnn)
bidi = layers.Bidirectional(rnn)
self.assertEqual(bidi.forward_layer.use_cudnn, use_cudnn)
self.assertEqual(bidi.backward_layer.use_cudnn, use_cudnn)
# otherwise ignore it
rnn = layers.SimpleRNN(1)
bidi = layers.Bidirectional(rnn)
self.assertFalse(hasattr(bidi.forward_layer, "use_cudnn"))
self.assertFalse(hasattr(bidi.backward_layer, "use_cudnn"))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/conv_lstm.py | keras/src/layers/rnn/conv_lstm.py | from keras.src import activations
from keras.src import backend
from keras.src import constraints
from keras.src import initializers
from keras.src import ops
from keras.src import regularizers
from keras.src import tree
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell
from keras.src.layers.rnn.rnn import RNN
from keras.src.ops import operation_utils
from keras.src.utils import argument_validation
class ConvLSTMCell(Layer, DropoutRNNCell):
"""Cell class for the ConvLSTM layer.
Args:
rank: Integer, rank of the convolution, e.g. "2" for 2D convolutions.
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers, specifying the strides
of the convolution. Specifying any stride value != 1
is incompatible with specifying any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly
to the left/right or up/down of the input such that output
has the same height/width dimension as the input.
data_format: A string, one of `channels_last` (default) or
`channels_first`. When unspecified, uses
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json` (if exists) else 'channels_last'.
Defaults to `'channels_last'`.
dilation_rate: An integer or tuple/list of n integers, specifying the
dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. If `None`, no activation is applied.
recurrent_activation: Activation function to use for the recurrent step.
use_bias: Boolean, (default `True`), whether the layer
should use a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`"glorot_uniform"`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent
state. Default: `"orthogonal"`.
bias_initializer: Initializer for the bias vector. Default: `"zeros"`.
unit_forget_bias: Boolean (default `True`). If `True`,
add 1 to the bias of the forget gate at initialization.
Setting it to `True` will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al.](
https://github.com/mlresearch/v37/blob/gh-pages/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector.
Default: `None`.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation"). Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector.
Default: `None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state. Default: 0.
seed: Random seed for dropout.
Call arguments:
inputs: A (2+ `rank`)D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(
self,
rank,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
seed=None,
**kwargs,
):
super().__init__(**kwargs)
self.seed = seed
self.seed_generator = backend.random.SeedGenerator(seed=seed)
self.rank = rank
if self.rank > 3:
raise ValueError(
f"Rank {rank} convolutions are not currently "
f"implemented. Received: rank={rank}"
)
self.filters = filters
self.kernel_size = argument_validation.standardize_tuple(
kernel_size, self.rank, "kernel_size"
)
self.strides = argument_validation.standardize_tuple(
strides, self.rank, "strides", allow_zero=True
)
self.padding = argument_validation.standardize_padding(padding)
self.data_format = backend.standardize_data_format(data_format)
self.dilation_rate = argument_validation.standardize_tuple(
dilation_rate, self.rank, "dilation_rate"
)
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1.0, max(0.0, dropout))
self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout))
self.dropout_mask_count = 4
self.input_spec = InputSpec(ndim=rank + 2)
self.state_size = -1 # Custom, defined in methods
def build(self, inputs_shape, states_shape=None):
if self.data_format == "channels_first":
channel_axis = 1
self.spatial_dims = inputs_shape[2:]
else:
channel_axis = -1
self.spatial_dims = inputs_shape[1:-1]
if None in self.spatial_dims:
raise ValueError(
"ConvLSTM layers only support static "
"input shapes for the spatial dimension. "
f"Received invalid input shape: input_shape={inputs_shape}"
)
if inputs_shape[channel_axis] is None:
raise ValueError(
"The channel dimension of the inputs (last axis) should be "
"defined. Found None. Full input shape received: "
f"input_shape={inputs_shape}"
)
self.input_spec = InputSpec(
ndim=self.rank + 3, shape=(None,) + inputs_shape[1:]
)
input_dim = inputs_shape[channel_axis]
self.input_dim = input_dim
self.kernel_shape = self.kernel_size + (input_dim, self.filters * 4)
recurrent_kernel_shape = self.kernel_size + (
self.filters,
self.filters * 4,
)
self.kernel = self.add_weight(
shape=self.kernel_shape,
initializer=self.kernel_initializer,
name="kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.recurrent_kernel = self.add_weight(
shape=recurrent_kernel_shape,
initializer=self.recurrent_initializer,
name="recurrent_kernel",
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return ops.concatenate(
[
self.bias_initializer(
(self.filters,), *args, **kwargs
),
initializers.get("ones")(
(self.filters,), *args, **kwargs
),
self.bias_initializer(
(self.filters * 2,), *args, **kwargs
),
]
)
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.filters * 4,),
name="bias",
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
def call(self, inputs, states, training=False):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
if training and 0.0 < self.dropout < 1.0:
dp_mask = self.get_dropout_mask(inputs)
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
if training and 0.0 < self.recurrent_dropout < 1.0:
rec_dp_mask = self.get_recurrent_dropout_mask(h_tm1)
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
(kernel_i, kernel_f, kernel_c, kernel_o) = ops.split(
self.kernel, 4, axis=self.rank + 1
)
(
recurrent_kernel_i,
recurrent_kernel_f,
recurrent_kernel_c,
recurrent_kernel_o,
) = ops.split(self.recurrent_kernel, 4, axis=self.rank + 1)
if self.use_bias:
bias_i, bias_f, bias_c, bias_o = ops.split(self.bias, 4)
else:
bias_i, bias_f, bias_c, bias_o = None, None, None, None
x_i = self.input_conv(inputs_i, kernel_i, bias_i, padding=self.padding)
x_f = self.input_conv(inputs_f, kernel_f, bias_f, padding=self.padding)
x_c = self.input_conv(inputs_c, kernel_c, bias_c, padding=self.padding)
x_o = self.input_conv(inputs_o, kernel_o, bias_o, padding=self.padding)
h_i = self.recurrent_conv(h_tm1_i, recurrent_kernel_i)
h_f = self.recurrent_conv(h_tm1_f, recurrent_kernel_f)
h_c = self.recurrent_conv(h_tm1_c, recurrent_kernel_c)
h_o = self.recurrent_conv(h_tm1_o, recurrent_kernel_o)
i = self.recurrent_activation(x_i + h_i)
f = self.recurrent_activation(x_f + h_f)
c = f * c_tm1 + i * self.activation(x_c + h_c)
o = self.recurrent_activation(x_o + h_o)
h = o * self.activation(c)
return h, [h, c]
def compute_output_shape(self, inputs_shape, states_shape=None):
conv_output_shape = operation_utils.compute_conv_output_shape(
inputs_shape,
self.filters,
self.kernel_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
return conv_output_shape, [conv_output_shape, conv_output_shape]
def get_initial_state(self, batch_size=None):
if self.data_format == "channels_last":
input_shape = (batch_size,) + self.spatial_dims + (self.input_dim,)
else:
input_shape = (batch_size, self.input_dim) + self.spatial_dims
state_shape = self.compute_output_shape(input_shape)[0]
return [
ops.zeros(state_shape, dtype=self.compute_dtype),
ops.zeros(state_shape, dtype=self.compute_dtype),
]
def input_conv(self, x, w, b=None, padding="valid"):
conv_out = ops.conv(
x,
w,
strides=self.strides,
padding=padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
if b is not None:
if self.data_format == "channels_last":
bias_shape = (1,) * (self.rank + 1) + (self.filters,)
else:
bias_shape = (1, self.filters) + (1,) * self.rank
bias = ops.reshape(b, bias_shape)
conv_out += bias
return conv_out
def recurrent_conv(self, x, w):
strides = argument_validation.standardize_tuple(
1, self.rank, "strides", allow_zero=True
)
conv_out = ops.conv(
x, w, strides=strides, padding="same", data_format=self.data_format
)
return conv_out
def get_config(self):
config = {
"filters": self.filters,
"kernel_size": self.kernel_size,
"strides": self.strides,
"padding": self.padding,
"data_format": self.data_format,
"dilation_rate": self.dilation_rate,
"activation": activations.serialize(self.activation),
"recurrent_activation": activations.serialize(
self.recurrent_activation
),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"unit_forget_bias": self.unit_forget_bias,
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
class ConvLSTM(RNN):
"""Abstract N-D Convolutional LSTM layer (used as implementation base).
Similar to an LSTM layer, but the input transformations
and recurrent transformations are both convolutional.
Args:
rank: Integer, rank of the convolution, e.g. "2" for 2D convolutions.
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
dimensions of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the strides of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, time, ..., channels)`
while `channels_first` corresponds to
inputs with shape `(batch, time, channels, ...)`.
When unspecified, uses
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json` (if exists) else 'channels_last'.
Defaults to `'channels_last'`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
By default hyperbolic tangent activation function is applied
(`tanh(x)`).
recurrent_activation: Activation function to use
for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al., 2015](
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
seed: Random seed for dropout.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. (default False)
return_state: Boolean Whether to return the last state
in addition to the output. (default False)
go_backwards: Boolean (default False).
If True, process the input sequence backwards.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
"""
def __init__(
self,
rank,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
seed=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
**kwargs,
):
cell = ConvLSTMCell(
rank=rank,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
seed=seed,
name="conv_lstm_cell",
dtype=kwargs.get("dtype"),
)
super().__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
**kwargs,
)
self.input_spec = InputSpec(ndim=rank + 3)
def call(self, sequences, initial_state=None, mask=None, training=False):
return super().call(
sequences, initial_state=initial_state, mask=mask, training=training
)
def compute_output_shape(self, sequences_shape, initial_state_shape=None):
batch_size = sequences_shape[0]
steps = sequences_shape[1]
step_shape = (batch_size,) + sequences_shape[2:]
state_shape = self.cell.compute_output_shape(step_shape)[0][1:]
if self.return_sequences:
output_shape = (
batch_size,
steps,
) + state_shape
else:
output_shape = (batch_size,) + state_shape
if self.return_state:
batched_state_shape = (batch_size,) + state_shape
return output_shape, batched_state_shape, batched_state_shape
return output_shape
def compute_mask(self, _, mask):
mask = tree.flatten(mask)[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None, None]
return [output_mask] + state_mask
else:
return output_mask
@property
def filters(self):
return self.cell.filters
@property
def kernel_size(self):
return self.cell.kernel_size
@property
def strides(self):
return self.cell.strides
@property
def padding(self):
return self.cell.padding
@property
def data_format(self):
return self.cell.data_format
@property
def dilation_rate(self):
return self.cell.dilation_rate
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {
"filters": self.filters,
"kernel_size": self.kernel_size,
"strides": self.strides,
"padding": self.padding,
"data_format": self.data_format,
"dilation_rate": self.dilation_rate,
"activation": activations.serialize(self.activation),
"recurrent_activation": activations.serialize(
self.recurrent_activation
),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"unit_forget_bias": self.unit_forget_bias,
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
"seed": self.cell.seed,
}
base_config = super().get_config()
del base_config["cell"]
return {**base_config, **config}
@classmethod
def from_config(cls, config):
return cls(**config)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/gru.py | keras/src/layers/rnn/gru.py | from keras.src import activations
from keras.src import backend
from keras.src import constraints
from keras.src import initializers
from keras.src import ops
from keras.src import regularizers
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.layers.rnn.dropout_rnn_cell import DropoutRNNCell
from keras.src.layers.rnn.rnn import RNN
@keras_export("keras.layers.GRUCell")
class GRUCell(Layer, DropoutRNNCell):
"""Cell class for the GRU layer.
This class processes one step within the whole time sequence input, whereas
`keras.layer.GRU` processes the whole sequence.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use. Default: hyperbolic tangent
(`tanh`). If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use for the recurrent step.
Default: sigmoid (`sigmoid`). If you pass `None`, no activation is
applied (ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer
should use a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`"glorot_uniform"`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation
of the recurrent state. Default: `"orthogonal"`.
bias_initializer: Initializer for the bias vector. Default: `"zeros"`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector.
Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector.
Default: `None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state. Default: 0.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before",
True = "after" (default and cuDNN compatible).
seed: Random seed for dropout.
Call arguments:
inputs: A 2D tensor, with shape `(batch, features)`.
states: A 2D tensor with shape `(batch, units)`, which is the state
from the previous time step.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
Example:
>>> inputs = np.random.random((32, 10, 8))
>>> rnn = keras.layers.RNN(keras.layers.GRUCell(4))
>>> output = rnn(inputs)
>>> output.shape
(32, 4)
>>> rnn = keras.layers.RNN(
... keras.layers.GRUCell(4),
... return_sequences=True,
... return_state=True)
>>> whole_sequence_output, final_state = rnn(inputs)
>>> whole_sequence_output.shape
(32, 10, 4)
>>> final_state.shape
(32, 4)
"""
def __init__(
self,
units,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
reset_after=True,
seed=None,
**kwargs,
):
if units <= 0:
raise ValueError(
"Received an invalid value for argument `units`, "
f"expected a positive integer, got {units}."
)
implementation = kwargs.pop("implementation", 2)
super().__init__(**kwargs)
self.implementation = implementation
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1.0, max(0.0, dropout))
self.recurrent_dropout = min(1.0, max(0.0, recurrent_dropout))
if self.recurrent_dropout != 0.0:
self.implementation = 1
if self.implementation == 1:
self.dropout_mask_count = 3
self.seed = seed
self.seed_generator = backend.random.SeedGenerator(seed=seed)
self.reset_after = reset_after
self.state_size = self.units
self.output_size = self.units
def build(self, input_shape):
super().build(input_shape)
input_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_dim, self.units * 3),
name="kernel",
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name="recurrent_kernel",
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint,
)
if self.use_bias:
if not self.reset_after:
bias_shape = (3 * self.units,)
else:
# separate biases for input and recurrent kernels
# Note: the shape is intentionally different from CuDNNGRU
# biases `(2 * 3 * self.units,)`, so that we can distinguish the
# classes when loading and converting saved weights.
bias_shape = (2, 3 * self.units)
self.bias = self.add_weight(
shape=bias_shape,
name="bias",
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
def call(self, inputs, states, training=False):
h_tm1 = (
states[0] if tree.is_nested(states) else states
) # previous state
if self.use_bias:
if not self.reset_after:
input_bias, recurrent_bias = self.bias, None
else:
input_bias, recurrent_bias = (
ops.squeeze(e, axis=0)
for e in ops.split(self.bias, self.bias.shape[0], axis=0)
)
if self.implementation == 1:
if training and 0.0 < self.dropout < 1.0:
dp_mask = self.get_dropout_mask(inputs)
inputs_z = inputs * dp_mask[0]
inputs_r = inputs * dp_mask[1]
inputs_h = inputs * dp_mask[2]
else:
inputs_z = inputs
inputs_r = inputs
inputs_h = inputs
x_z = ops.matmul(inputs_z, self.kernel[:, : self.units])
x_r = ops.matmul(
inputs_r, self.kernel[:, self.units : self.units * 2]
)
x_h = ops.matmul(inputs_h, self.kernel[:, self.units * 2 :])
if self.use_bias:
x_z += input_bias[: self.units]
x_r += input_bias[self.units : self.units * 2]
x_h += input_bias[self.units * 2 :]
if training and 0.0 < self.recurrent_dropout < 1.0:
rec_dp_mask = self.get_recurrent_dropout_mask(h_tm1)
h_tm1_z = h_tm1 * rec_dp_mask[0]
h_tm1_r = h_tm1 * rec_dp_mask[1]
h_tm1_h = h_tm1 * rec_dp_mask[2]
else:
h_tm1_z = h_tm1
h_tm1_r = h_tm1
h_tm1_h = h_tm1
recurrent_z = ops.matmul(
h_tm1_z, self.recurrent_kernel[:, : self.units]
)
recurrent_r = ops.matmul(
h_tm1_r, self.recurrent_kernel[:, self.units : self.units * 2]
)
if self.reset_after and self.use_bias:
recurrent_z += recurrent_bias[: self.units]
recurrent_r += recurrent_bias[self.units : self.units * 2]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
# reset gate applied after/before matrix multiplication
if self.reset_after:
recurrent_h = ops.matmul(
h_tm1_h, self.recurrent_kernel[:, self.units * 2 :]
)
if self.use_bias:
recurrent_h += recurrent_bias[self.units * 2 :]
recurrent_h = r * recurrent_h
else:
recurrent_h = ops.matmul(
r * h_tm1_h, self.recurrent_kernel[:, self.units * 2 :]
)
hh = self.activation(x_h + recurrent_h)
else:
if training and 0.0 < self.dropout < 1.0:
dp_mask = self.get_dropout_mask(inputs)
inputs = inputs * dp_mask
# inputs projected by all gate matrices at once
matrix_x = ops.matmul(inputs, self.kernel)
if self.use_bias:
# biases: bias_z_i, bias_r_i, bias_h_i
matrix_x = ops.add(matrix_x, input_bias)
x_z, x_r, x_h = ops.split(matrix_x, 3, axis=-1)
if self.reset_after:
# hidden state projected by all gate matrices at once
matrix_inner = ops.matmul(h_tm1, self.recurrent_kernel)
if self.use_bias:
matrix_inner += recurrent_bias
else:
# hidden state projected separately for update/reset and new
matrix_inner = ops.matmul(
h_tm1, self.recurrent_kernel[:, : 2 * self.units]
)
recurrent_z = matrix_inner[:, : self.units]
recurrent_r = matrix_inner[:, self.units : self.units * 2]
recurrent_h = matrix_inner[:, self.units * 2 :]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
if self.reset_after:
recurrent_h = r * recurrent_h
else:
recurrent_h = ops.matmul(
r * h_tm1, self.recurrent_kernel[:, 2 * self.units :]
)
hh = self.activation(x_h + recurrent_h)
# previous and candidate state mixed by update gate
h = z * h_tm1 + (1 - z) * hh
new_state = [h] if tree.is_nested(states) else h
return h, new_state
def get_config(self):
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"recurrent_activation": activations.serialize(
self.recurrent_activation
),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
"reset_after": self.reset_after,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
def get_initial_state(self, batch_size=None):
return [
ops.zeros((batch_size, self.state_size), dtype=self.compute_dtype)
]
@keras_export("keras.layers.GRU")
class GRU(RNN):
"""Gated Recurrent Unit - Cho et al. 2014.
Based on available runtime hardware and constraints, this layer
will choose different implementations (cuDNN-based or backend-native)
to maximize the performance. If a GPU is available and all
the arguments to the layer meet the requirement of the cuDNN kernel
(see below for details), the layer will use a fast cuDNN implementation
when using the TensorFlow backend.
The requirements to use the cuDNN implementation are:
1. `activation` == `tanh`
2. `recurrent_activation` == `sigmoid`
3. `recurrent_dropout` == 0
4. `unroll` is `False`
5. `use_bias` is `True`
6. `reset_after` is `True`
7. Inputs, if use masking, are strictly right-padded.
8. Eager execution is enabled in the outermost context.
There are two variants of the GRU implementation. The default one is based
on [v3](https://arxiv.org/abs/1406.1078v3) and has reset gate applied to
hidden state before matrix multiplication. The other one is based on
[original](https://arxiv.org/abs/1406.1078v1) and has the order reversed.
The second variant is compatible with CuDNNGRU (GPU-only) and allows
inference on CPU. Thus it has separate biases for `kernel` and
`recurrent_kernel`. To use this variant, set `reset_after=True` and
`recurrent_activation='sigmoid'`.
For example:
>>> inputs = np.random.random((32, 10, 8))
>>> gru = keras.layers.GRU(4)
>>> output = gru(inputs)
>>> output.shape
(32, 4)
>>> gru = keras.layers.GRU(4, return_sequences=True, return_state=True)
>>> whole_sequence_output, final_state = gru(inputs)
>>> whole_sequence_output.shape
(32, 10, 4)
>>> final_state.shape
(32, 4)
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: sigmoid (`sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, (default `True`), whether the layer
should use a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs. Default:
`"glorot_uniform"`.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent
state. Default: `"orthogonal"`.
bias_initializer: Initializer for the bias vector. Default: `"zeros"`.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_regularizer: Regularizer function applied to the bias vector.
Default: `None`.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation"). Default: `None`.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix. Default: `None`.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix. Default: `None`.
bias_constraint: Constraint function applied to the bias vector.
Default: `None`.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs. Default: 0.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state. Default: 0.
seed: Random seed for dropout.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state in addition
to the output. Default: `False`.
go_backwards: Boolean (default `False`).
If `True`, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default: `False`). If `True`, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default: `False`).
If `True`, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). `False` is `"before"`,
`True` is `"after"` (default and cuDNN compatible).
use_cudnn: Whether to use a cuDNN-backed implementation. `"auto"` will
attempt to use cuDNN when feasible, and will fallback to the
default implementation if not.
Call arguments:
inputs: A 3D tensor, with shape `(batch, timesteps, feature)`.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked (optional).
An individual `True` entry indicates that the corresponding timestep
should be utilized, while a `False` entry indicates that the
corresponding timestep should be ignored. Defaults to `None`.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the
cell when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used (optional). Defaults to `None`.
initial_state: List of initial state tensors to be passed to the first
call of the cell (optional, `None` causes creation
of zero-filled initial state tensors). Defaults to `None`.
"""
def __init__(
self,
units,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
seed=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
reset_after=True,
use_cudnn="auto",
**kwargs,
):
cell = GRUCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
reset_after=reset_after,
dtype=kwargs.get("dtype", None),
trainable=kwargs.get("trainable", True),
name="gru_cell",
seed=seed,
implementation=kwargs.pop("implementation", 2),
)
super().__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
activity_regularizer=activity_regularizer,
**kwargs,
)
self.input_spec = InputSpec(ndim=3)
if use_cudnn not in ("auto", True, False):
raise ValueError(
"Invalid valid received for argument `use_cudnn`. "
"Expected one of {'auto', True, False}. "
f"Received: use_cudnn={use_cudnn}"
)
self.use_cudnn = use_cudnn
if (
backend.backend() == "tensorflow"
and backend.cudnn_ok(
cell.activation,
cell.recurrent_activation,
self.unroll,
cell.use_bias,
reset_after=reset_after,
)
and use_cudnn in (True, "auto")
):
self.supports_jit = False
def inner_loop(self, sequences, initial_state, mask, training=False):
if tree.is_nested(initial_state):
initial_state = initial_state[0]
if tree.is_nested(mask):
mask = mask[0]
if self.use_cudnn in ("auto", True):
if not self.recurrent_dropout:
try:
if training and self.dropout:
dp_mask = self.cell.get_dropout_mask(sequences[:, 0, :])
dp_mask = ops.expand_dims(dp_mask, axis=1)
dp_mask = ops.broadcast_to(
dp_mask, ops.shape(sequences)
)
dp_sequences = sequences * dp_mask
else:
dp_sequences = sequences
# Backends are allowed to specify (optionally) optimized
# implementation of the inner GRU loop. In the case of
# TF for instance, it will leverage cuDNN when feasible, and
# it will raise NotImplementedError otherwise.
out = backend.gru(
dp_sequences,
initial_state,
mask,
kernel=self.cell.kernel,
recurrent_kernel=self.cell.recurrent_kernel,
bias=self.cell.bias,
activation=self.cell.activation,
recurrent_activation=self.cell.recurrent_activation,
return_sequences=self.return_sequences,
go_backwards=self.go_backwards,
unroll=self.unroll,
reset_after=self.cell.reset_after,
)
# We disable jit_compile for the model in this case,
# since cuDNN ops aren't XLA compatible.
if backend.backend() == "tensorflow":
self.supports_jit = False
return out
except NotImplementedError:
pass
if self.use_cudnn is True:
raise ValueError(
"use_cudnn=True was specified, "
"but cuDNN is not supported for this layer configuration "
"with this backend. Pass use_cudnn='auto' to fallback "
"to a non-cuDNN implementation."
)
return super().inner_loop(
sequences, initial_state, mask=mask, training=training
)
def call(self, sequences, initial_state=None, mask=None, training=False):
return super().call(
sequences, mask=mask, training=training, initial_state=initial_state
)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def reset_after(self):
return self.cell.reset_after
def get_config(self):
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"recurrent_activation": activations.serialize(
self.recurrent_activation
),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"recurrent_initializer": initializers.serialize(
self.recurrent_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"recurrent_regularizer": regularizers.serialize(
self.recurrent_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"recurrent_constraint": constraints.serialize(
self.recurrent_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
"dropout": self.dropout,
"recurrent_dropout": self.recurrent_dropout,
"reset_after": self.reset_after,
"seed": self.cell.seed,
}
base_config = super().get_config()
del base_config["cell"]
return {**base_config, **config}
@classmethod
def from_config(cls, config):
return cls(**config)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/conv_lstm3d.py | keras/src/layers/rnn/conv_lstm3d.py | from keras.src.api_export import keras_export
from keras.src.layers.rnn.conv_lstm import ConvLSTM
@keras_export("keras.layers.ConvLSTM3D")
class ConvLSTM3D(ConvLSTM):
"""3D Convolutional LSTM.
Similar to an LSTM layer, but the input transformations
and recurrent transformations are both convolutional.
Args:
filters: int, the dimension of the output space (the number of filters
in the convolution).
kernel_size: int or tuple/list of 3 integers, specifying the size of the
convolution window.
strides: int or tuple/list of 3 integers, specifying the stride length
of the convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of 3 integers, specifying the dilation
rate to use for dilated convolution.
activation: Activation function to use. By default hyperbolic tangent
activation function is applied (`tanh(x)`).
recurrent_activation: Activation function to use for the recurrent step.
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel` weights
matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean. If `True`, add 1 to the bias of the forget
gate at initialization.
Use in combination with `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et al., 2015](
http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
recurrent_regularizer: Regularizer function applied to the
`recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
recurrent_constraint: Constraint function applied to the
`recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1. Fraction of the units to drop for the
linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1. Fraction of the units to drop
for the linear transformation of the recurrent state.
seed: Random seed for dropout.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence. Default: `False`.
return_state: Boolean. Whether to return the last state in addition
to the output. Default: `False`.
go_backwards: Boolean (default: `False`).
If `True`, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If `True`, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default: `False`).
If `True`, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
Call arguments:
inputs: A 6D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether a
given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode.
This is only relevant if `dropout` or `recurrent_dropout` are set.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
Input shape:
- If `data_format='channels_first'`:
5D tensor with shape: `(samples, time, channels, *spatial_dims)`
- If `data_format='channels_last'`:
5D tensor with shape: `(samples, time, *spatial_dims, channels)`
Output shape:
- If `return_state`: a list of tensors. The first tensor is the output.
The remaining tensors are the last states,
each 4D tensor with shape: `(samples, filters, *spatial_dims)` if
`data_format='channels_first'`
or shape: `(samples, *spatial_dims, filters)` if
`data_format='channels_last'`.
- If `return_sequences`: 5D tensor with shape: `(samples, timesteps,
filters, *spatial_dims)` if data_format='channels_first'
or shape: `(samples, timesteps, *spatial_dims, filters)` if
`data_format='channels_last'`.
- Else, 4D tensor with shape: `(samples, filters, *spatial_dims)` if
`data_format='channels_first'`
or shape: `(samples, *spatial_dims, filters)` if
`data_format='channels_last'`.
References:
- [Shi et al., 2015](http://arxiv.org/abs/1506.04214v1)
(the current implementation does not include the feedback loop on the
cells output).
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
activation="tanh",
recurrent_activation="sigmoid",
use_bias=True,
kernel_initializer="glorot_uniform",
recurrent_initializer="orthogonal",
bias_initializer="zeros",
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.0,
recurrent_dropout=0.0,
seed=None,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
**kwargs,
):
super().__init__(
rank=3,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
seed=seed,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/conv_lstm3d_test.py | keras/src/layers/rnn/conv_lstm3d_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import testing
class ConvLSTM1DTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
channels_last = backend.config.image_data_format() == "channels_last"
self.run_layer_test(
layers.ConvLSTM3D,
init_kwargs={"filters": 5, "kernel_size": 3, "padding": "same"},
input_shape=(
(3, 2, 4, 4, 4, 3) if channels_last else (3, 2, 3, 4, 4, 4)
),
expected_output_shape=(
(3, 4, 4, 4, 5) if channels_last else (3, 5, 4, 4, 4)
),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.ConvLSTM3D,
init_kwargs={
"filters": 5,
"kernel_size": 3,
"padding": "valid",
"recurrent_dropout": 0.5,
},
input_shape=(
(3, 2, 8, 8, 8, 3) if channels_last else (3, 2, 3, 8, 8, 8)
),
call_kwargs={"training": True},
expected_output_shape=(
(3, 6, 6, 6, 5) if channels_last else (3, 5, 6, 6, 6)
),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.ConvLSTM3D,
init_kwargs={
"filters": 5,
"kernel_size": 3,
"padding": "valid",
"return_sequences": True,
},
input_shape=(
(3, 2, 8, 8, 8, 3) if channels_last else (3, 2, 3, 8, 8, 8)
),
expected_output_shape=(
(3, 2, 6, 6, 6, 5) if channels_last else (3, 2, 5, 6, 6, 6)
),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
def test_correctness(self):
sequence = (
np.arange(1920).reshape((2, 3, 4, 4, 4, 5)).astype("float32") / 100
)
expected_output = np.array(
[
[
[
[[0.99149036, 0.99149036], [0.99180907, 0.99180907]],
[[0.99258363, 0.99258363], [0.9927925, 0.9927925]],
],
[
[[0.99413764, 0.99413764], [0.99420583, 0.99420583]],
[[0.9943788, 0.9943788], [0.9944278, 0.9944278]],
],
],
[
[
[[0.9950547, 0.9950547], [0.9950547, 0.9950547]],
[[0.9950547, 0.9950547], [0.9950547, 0.9950547]],
],
[
[[0.9950547, 0.9950547], [0.9950547, 0.9950547]],
[[0.9950547, 0.9950547], [0.9950547, 0.9950547]],
],
],
]
)
if backend.config.image_data_format() == "channels_first":
sequence = sequence.transpose((0, 1, 5, 2, 3, 4))
expected_output = expected_output.transpose((0, 4, 1, 2, 3))
layer = layers.ConvLSTM3D(
filters=2,
kernel_size=3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence)
self.assertAllClose(
expected_output,
output,
tpu_atol=1e-4,
tpu_rtol=1e-4,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/rnn/time_distributed_test.py | keras/src/layers/rnn/time_distributed_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import ops
from keras.src import testing
from keras.src.models import Sequential
class TimeDistributedTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.TimeDistributed,
init_kwargs={"layer": layers.Dense(1, use_bias=False)},
input_shape=(3, 2, 4),
expected_output_shape=(3, 2, 1),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
def test_build(self):
if backend.config.image_data_format() == "channels_last":
input_shape = (10, 128, 128, 3)
output_shape = (32, 10, 126, 126, 64)
else:
input_shape = (10, 3, 128, 128)
output_shape = (32, 10, 64, 126, 126)
inputs = layers.Input(shape=input_shape, batch_size=32)
conv_2d_layer = layers.Conv2D(64, (3, 3))
outputs = layers.TimeDistributed(conv_2d_layer)(inputs)
self.assertEqual(outputs.shape, output_shape)
def test_correctness(self):
sequence = np.arange(24).reshape((3, 2, 4)).astype("float32")
layer = layers.Dense(
1,
kernel_initializer=initializers.Constant(0.01),
use_bias=False,
)
layer = layers.TimeDistributed(layer=layer)
output = layer(sequence)
self.assertAllClose(
np.array(
[[[0.06], [0.22]], [[0.38], [0.53999996]], [[0.7], [0.86]]]
),
output,
)
def test_masking(self):
class MaskedDense(layers.Wrapper):
def __init__(self, units, **kwargs):
layer = layers.Dense(
units,
kernel_initializer=initializers.Constant(0.01),
use_bias=False,
)
super().__init__(layer, **kwargs)
self.supports_masking = True
def call(self, inputs, training=False, mask=None):
unmasked = self.layer.call(inputs)
if mask is None:
return unmasked
else:
return ops.transpose(
ops.transpose(unmasked) * ops.cast(mask, inputs.dtype)
)
sequence = np.arange(24).reshape((3, 2, 4)).astype("float32")
layer = layers.TimeDistributed(layer=MaskedDense(1))
mask = np.array([[False, True], [True, False], [True, True]])
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array([[[0], [0.22]], [[0.38], [0]], [[0.7], [0.86]]]),
output,
)
@pytest.mark.requires_trainable_backend
def test_with_mask_zero(self):
model = Sequential(
[
layers.Input(shape=(20,)),
layers.Embedding(input_dim=10, output_dim=5, mask_zero=True),
layers.TimeDistributed(
layers.Dense(units=5, activation="softmax")
),
]
)
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
X_train = np.random.uniform(1, 10, size=(22, 20))
Y_train = np.random.randint(1, 2, size=(22, 20))
model.fit(X_train, Y_train, epochs=1, batch_size=16)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/conv_test.py | keras/src/layers/convolutional/conv_test.py | import os
import numpy as np
import pytest
from absl.testing import parameterized
from numpy.lib.stride_tricks import as_strided
from keras.src import backend
from keras.src import constraints
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import saving
from keras.src import testing
def _same_padding(input_size, kernel_size, stride):
if input_size % stride == 0:
padding = max(kernel_size - stride, 0)
else:
padding = max(kernel_size - (input_size % stride), 0)
return padding // 2, padding - padding // 2
def np_conv1d(
x,
kernel_weights,
bias_weights,
strides,
padding,
data_format,
dilation_rate,
groups,
):
if data_format == "channels_first":
x = x.swapaxes(1, 2)
if isinstance(strides, (tuple, list)):
h_stride = strides[0]
else:
h_stride = strides
if isinstance(dilation_rate, (tuple, list)):
dilation_rate = dilation_rate[0]
kernel_size, ch_in, ch_out = kernel_weights.shape
if dilation_rate > 1:
new_kernel_size = kernel_size + (dilation_rate - 1) * (kernel_size - 1)
new_kernel_weights = np.zeros(
(new_kernel_size, ch_in, ch_out), dtype=kernel_weights.dtype
)
new_kernel_weights[::dilation_rate] = kernel_weights
kernel_weights = new_kernel_weights
kernel_size = kernel_weights.shape[0]
if padding != "valid":
n_batch, h_x, _ = x.shape
h_pad = _same_padding(h_x, kernel_size, h_stride)
npad = [(0, 0)] * x.ndim
if padding == "causal":
npad[1] = (h_pad[0] + h_pad[1], 0)
else:
npad[1] = h_pad
x = np.pad(x, pad_width=npad, mode="constant", constant_values=0)
n_batch, h_x, _ = x.shape
h_out = int((h_x - kernel_size) / h_stride) + 1
kernel_weights = kernel_weights.reshape(-1, ch_out)
bias_weights = bias_weights.reshape(1, ch_out)
out_grps = []
for grp in range(1, groups + 1):
x_in = x[..., (grp - 1) * ch_in : grp * ch_in]
stride_shape = (n_batch, h_out, kernel_size, ch_in)
strides = (
x_in.strides[0],
h_stride * x_in.strides[1],
x_in.strides[1],
x_in.strides[2],
)
inner_dim = kernel_size * ch_in
x_strided = as_strided(
x_in, shape=stride_shape, strides=strides
).reshape(n_batch, h_out, inner_dim)
ch_out_groups = ch_out // groups
kernel_weights_grp = kernel_weights[
..., (grp - 1) * ch_out_groups : grp * ch_out_groups
]
bias_weights_grp = bias_weights[
..., (grp - 1) * ch_out_groups : grp * ch_out_groups
]
out_grps.append(x_strided @ kernel_weights_grp + bias_weights_grp)
out = np.concatenate(out_grps, axis=-1)
if data_format == "channels_first":
out = out.swapaxes(1, 2)
return out
def np_conv2d(
x,
kernel_weights,
bias_weights,
strides,
padding,
data_format,
dilation_rate,
groups,
):
if data_format == "channels_first":
x = x.transpose((0, 2, 3, 1))
if isinstance(strides, (tuple, list)):
h_stride, w_stride = strides
else:
h_stride = strides
w_stride = strides
if isinstance(dilation_rate, (tuple, list)):
h_dilation, w_dilation = dilation_rate
else:
h_dilation = dilation_rate
w_dilation = dilation_rate
h_kernel, w_kernel, ch_in, ch_out = kernel_weights.shape
if h_dilation > 1 or w_dilation > 1:
new_h_kernel = h_kernel + (h_dilation - 1) * (h_kernel - 1)
new_w_kernel = w_kernel + (w_dilation - 1) * (w_kernel - 1)
new_kenel_size_tuple = (new_h_kernel, new_w_kernel)
new_kernel_weights = np.zeros(
(*new_kenel_size_tuple, ch_in, ch_out),
dtype=kernel_weights.dtype,
)
new_kernel_weights[::h_dilation, ::w_dilation] = kernel_weights
kernel_weights = new_kernel_weights
h_kernel, w_kernel = kernel_weights.shape[:2]
if padding == "same":
n_batch, h_x, w_x, _ = x.shape
h_pad = _same_padding(h_x, h_kernel, h_stride)
w_pad = _same_padding(w_x, w_kernel, w_stride)
npad = [(0, 0)] * x.ndim
npad[1] = h_pad
npad[2] = w_pad
x = np.pad(x, pad_width=npad, mode="constant", constant_values=0)
n_batch, h_x, w_x, _ = x.shape
h_out = int((h_x - h_kernel) / h_stride) + 1
w_out = int((w_x - w_kernel) / w_stride) + 1
out_grps = []
for grp in range(1, groups + 1):
x_in = x[..., (grp - 1) * ch_in : grp * ch_in]
stride_shape = (n_batch, h_out, w_out, h_kernel, w_kernel, ch_in)
strides = (
x_in.strides[0],
h_stride * x_in.strides[1],
w_stride * x_in.strides[2],
x_in.strides[1],
x_in.strides[2],
x_in.strides[3],
)
inner_dim = h_kernel * w_kernel * ch_in
x_strided = as_strided(
x_in, shape=stride_shape, strides=strides
).reshape(-1, inner_dim)
ch_out_groups = ch_out // groups
kernel_weights_grp = kernel_weights[
..., (grp - 1) * ch_out_groups : grp * ch_out_groups
].reshape(-1, ch_out_groups)
bias_weights_grp = bias_weights[
..., (grp - 1) * ch_out_groups : grp * ch_out_groups
]
out_grps.append(x_strided @ kernel_weights_grp + bias_weights_grp)
out = np.concatenate(out_grps, axis=-1).reshape(
n_batch, h_out, w_out, ch_out
)
if data_format == "channels_first":
out = out.transpose((0, 3, 1, 2))
return out
def np_conv3d(
x,
kernel_weights,
bias_weights,
strides,
padding,
data_format,
dilation_rate,
groups,
):
if data_format == "channels_first":
x = x.transpose((0, 2, 3, 4, 1))
if isinstance(strides, (tuple, list)):
h_stride, w_stride, d_stride = strides
else:
h_stride = strides
w_stride = strides
d_stride = strides
if isinstance(dilation_rate, (tuple, list)):
h_dilation, w_dilation, d_dilation = dilation_rate
else:
h_dilation = dilation_rate
w_dilation = dilation_rate
d_dilation = dilation_rate
h_kernel, w_kernel, d_kernel, ch_in, ch_out = kernel_weights.shape
if h_dilation > 1 or w_dilation > 1 or d_dilation > 1:
new_h_kernel = h_kernel + (h_dilation - 1) * (h_kernel - 1)
new_w_kernel = w_kernel + (w_dilation - 1) * (w_kernel - 1)
new_d_kernel = d_kernel + (d_dilation - 1) * (d_kernel - 1)
new_kenel_size_tuple = (new_h_kernel, new_w_kernel, new_d_kernel)
new_kernel_weights = np.zeros(
(*new_kenel_size_tuple, ch_in, ch_out),
dtype=kernel_weights.dtype,
)
new_kernel_weights[::h_dilation, ::w_dilation, ::d_dilation] = (
kernel_weights
)
kernel_weights = new_kernel_weights
h_kernel, w_kernel, d_kernel = kernel_weights.shape[:3]
if padding == "same":
n_batch, h_x, w_x, d_x, _ = x.shape
h_pad = _same_padding(h_x, h_kernel, h_stride)
w_pad = _same_padding(w_x, w_kernel, w_stride)
d_pad = _same_padding(d_x, d_kernel, d_stride)
npad = [(0, 0)] * x.ndim
npad[1] = h_pad
npad[2] = w_pad
npad[3] = d_pad
x = np.pad(x, pad_width=npad, mode="constant", constant_values=0)
n_batch, h_x, w_x, d_x, _ = x.shape
h_out = int((h_x - h_kernel) / h_stride) + 1
w_out = int((w_x - w_kernel) / w_stride) + 1
d_out = int((d_x - d_kernel) / d_stride) + 1
out_grps = []
for grp in range(1, groups + 1):
x_in = x[..., (grp - 1) * ch_in : grp * ch_in]
stride_shape = (
n_batch,
h_out,
w_out,
d_out,
h_kernel,
w_kernel,
d_kernel,
ch_in,
)
strides = (
x_in.strides[0],
h_stride * x_in.strides[1],
w_stride * x_in.strides[2],
d_stride * x_in.strides[3],
x_in.strides[1],
x_in.strides[2],
x_in.strides[3],
x_in.strides[4],
)
inner_dim = h_kernel * w_kernel * d_kernel * ch_in
x_strided = as_strided(
x_in, shape=stride_shape, strides=strides
).reshape(-1, inner_dim)
ch_out_groups = ch_out // groups
kernel_weights_grp = kernel_weights[
..., (grp - 1) * ch_out_groups : grp * ch_out_groups
].reshape(-1, ch_out_groups)
bias_weights_grp = bias_weights[
..., (grp - 1) * ch_out_groups : grp * ch_out_groups
]
out_grps.append(x_strided @ kernel_weights_grp + bias_weights_grp)
out = np.concatenate(out_grps, axis=-1).reshape(
n_batch, h_out, w_out, d_out, ch_out
)
if data_format == "channels_first":
out = out.transpose((0, 4, 1, 2, 3))
return out
class ConvBasicTest(testing.TestCase):
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 1,
"input_shape": (3, 5, 4),
"output_shape": (3, 4, 5),
},
{
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2,),
"groups": 2,
"input_shape": (3, 4, 4),
"output_shape": (3, 4, 6),
},
{
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "causal",
"data_format": "channels_last",
"dilation_rate": (2,),
"groups": 2,
"input_shape": (3, 4, 4),
"output_shape": (3, 4, 6),
},
{
"filters": 6,
"kernel_size": 2,
"strides": (2,),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 2,
"input_shape": (3, 5, 4),
"output_shape": (3, 2, 6),
},
)
@pytest.mark.requires_trainable_backend
def test_conv1d_basic(
self,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
groups,
input_shape,
output_shape,
):
self.run_layer_test(
layers.Conv1D,
init_kwargs={
"filters": filters,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
"groups": groups,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 1,
"input_shape": (3, 5, 5, 4),
"output_shape": (3, 4, 4, 5),
},
{
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 2),
"groups": 2,
"input_shape": (3, 4, 4, 4),
"output_shape": (3, 4, 4, 6),
},
{
"filters": 6,
"kernel_size": (2, 2),
"strides": (2, 1),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": (1, 1),
"groups": 2,
"input_shape": (3, 5, 5, 4),
"output_shape": (3, 2, 4, 6),
},
)
@pytest.mark.requires_trainable_backend
def test_conv2d_basic(
self,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
groups,
input_shape,
output_shape,
):
self.run_layer_test(
layers.Conv2D,
init_kwargs={
"filters": filters,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
"groups": groups,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 1,
"input_shape": (3, 5, 5, 5, 4),
"output_shape": (3, 4, 4, 4, 5),
},
{
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 2, 2),
"groups": 2,
"input_shape": (3, 4, 4, 4, 4),
"output_shape": (3, 4, 4, 4, 6),
},
{
"filters": 6,
"kernel_size": (2, 2, 3),
"strides": (2, 1, 2),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": (1, 1, 1),
"groups": 2,
"input_shape": (3, 5, 5, 5, 4),
"output_shape": (3, 2, 4, 2, 6),
},
)
@pytest.mark.requires_trainable_backend
def test_conv3d_basic(
self,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
groups,
input_shape,
output_shape,
):
self.run_layer_test(
layers.Conv3D,
init_kwargs={
"filters": filters,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
"groups": groups,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
def test_bad_init_args(self):
# `filters` is not positive.
with self.assertRaisesRegex(
ValueError,
"Invalid value for argument `filters`. Expected a "
"strictly positive value. Received filters=0.",
):
layers.Conv1D(filters=0, kernel_size=1)
# `kernel_size` has 0.
with self.assertRaisesRegex(
ValueError,
r"The `kernel_size` argument must be a tuple of \d+ "
r"integers. Received kernel_size=\(1, 0\), including values \{0\} "
r"that do not satisfy `value > 0`",
):
layers.Conv2D(filters=2, kernel_size=(1, 0))
# `strides` has 0.
with self.assertRaisesRegex(
ValueError,
r"The `strides` argument must be a tuple of \d+ "
r"integers. Received strides=\(1, 0\), including values \{0\} that "
r"do not satisfy `value > 0`",
):
layers.Conv2D(filters=2, kernel_size=(2, 2), strides=(1, 0))
# `dilation_rate > 1` while `strides > 1`.
with self.assertRaisesRegex(
ValueError,
r"`strides > 1` not supported in conjunction with "
r"`dilation_rate > 1`. Received: strides=\(2, 2\) and "
r"dilation_rate=\(2, 1\)",
):
layers.Conv2D(
filters=2, kernel_size=(2, 2), strides=2, dilation_rate=(2, 1)
)
# `groups` is not strictly positive.
with self.assertRaisesRegex(
ValueError,
"The number of groups must be a positive integer. "
"Received: groups=0.",
):
layers.Conv2D(filters=5, kernel_size=(2, 2), groups=0)
# `filters` cannot be divided by `groups`.
with self.assertRaisesRegex(
ValueError,
"The number of filters must be evenly divisible by the"
" number of groups. Received: groups=2, filters=5.",
):
layers.Conv2D(filters=5, kernel_size=(2, 2), groups=2)
@parameterized.named_parameters(
{
"testcase_name": "conv1d_kernel_size3_strides1",
"conv_cls": layers.Conv1D,
"filters": 6,
"kernel_size": 3,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 1,
"input_shape": (None, 5, 4),
"output_shape": (None, 3, 6),
},
{
"testcase_name": "conv1d_kernel_size2_strides2",
"conv_cls": layers.Conv1D,
"filters": 6,
"kernel_size": 2,
"strides": 2,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 2,
"input_shape": (None, 5, 4),
"output_shape": (None, 2, 6),
},
{
"testcase_name": "conv2d_kernel_size3_strides1",
"conv_cls": layers.Conv2D,
"filters": 6,
"kernel_size": 3,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 1,
"input_shape": (None, 5, 5, 4),
"output_shape": (None, 3, 3, 6),
},
{
"testcase_name": "conv2d_kernel_size2_strides2",
"conv_cls": layers.Conv2D,
"filters": 6,
"kernel_size": 2,
"strides": 2,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 2,
"input_shape": (None, 5, 5, 4),
"output_shape": (None, 2, 2, 6),
},
{
"testcase_name": "conv3d_kernel_size3_strides1",
"conv_cls": layers.Conv3D,
"filters": 6,
"kernel_size": 3,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 1,
"input_shape": (None, 5, 5, 5, 4),
"output_shape": (None, 3, 3, 3, 6),
},
{
"testcase_name": "conv3d_kernel_size2_strides2",
"conv_cls": layers.Conv3D,
"filters": 6,
"kernel_size": 2,
"strides": 2,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 2,
"input_shape": (None, 5, 5, 5, 4),
"output_shape": (None, 2, 2, 2, 6),
},
)
@pytest.mark.requires_trainable_backend
def test_enable_lora(
self,
conv_cls,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
groups,
input_shape,
output_shape,
):
if conv_cls not in (layers.Conv1D, layers.Conv2D, layers.Conv3D):
raise TypeError
layer = conv_cls(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
)
layer.build(input_shape)
layer.enable_lora(2)
self.assertLen(layer.trainable_weights, 3)
self.assertLen(layer.non_trainable_weights, 1)
if backend.backend() == "torch":
self.assertLen(layer.torch_params, 4)
# Try eager call
x = np.random.random((64,) + input_shape[1:])
y = np.random.random((64,) + output_shape[1:])
_ = layer(x[:2])
init_lora_a_kernel_value = layer.lora_kernel_a.numpy()
init_lora_b_kernel_value = layer.lora_kernel_b.numpy()
# Try calling fit()
model = models.Sequential([layer])
model.compile(optimizer="sgd", loss="mse")
model.fit(x, y)
final_lora_a_kernel_value = layer.lora_kernel_a.numpy()
final_lora_b_kernel_value = layer.lora_kernel_b.numpy()
diff_a = np.max(
np.abs(init_lora_a_kernel_value - final_lora_a_kernel_value)
)
diff_b = np.max(
np.abs(init_lora_b_kernel_value - final_lora_b_kernel_value)
)
self.assertGreater(diff_a, 0.0)
self.assertGreater(diff_b, 0.0)
# Try saving and reloading the model
temp_filepath = os.path.join(self.get_temp_dir(), "lora_model.keras")
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertTrue(new_model.layers[0].lora_enabled)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try saving and reloading the model's weights only
temp_filepath = os.path.join(
self.get_temp_dir(), "lora_model.weights.h5"
)
model.save_weights(temp_filepath)
# Load the file into a fresh, non-lora model
new_model = models.Sequential(
[
conv_cls(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
)
]
)
new_model.build(input_shape)
new_model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try loading a normal checkpoint into a lora model
new_model.save_weights(temp_filepath)
model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
@pytest.mark.requires_trainable_backend
def test_lora_weight_name(self):
class MyModel(models.Model):
def __init__(self):
super().__init__(name="mymodel")
self.conv2d = layers.Conv2D(4, 3, name="conv2d")
def build(self, input_shape):
self.conv2d.build(input_shape)
def call(self, x):
return self.conv2d(x)
model = MyModel()
model.build((None, 5, 5, 4))
model.conv2d.enable_lora(2)
self.assertEqual(
model.conv2d.lora_kernel_a.path, "mymodel/conv2d/lora_kernel_a"
)
@pytest.mark.requires_trainable_backend
def test_enable_lora_with_alpha(self):
# Create a `Conv2D` layer with a small kernel for simplicity.
layer = layers.Conv2D(filters=3, kernel_size=(2, 2), padding="valid")
# Use a fixed input shape: batch size 1, height=4, width=4, channels=3.
input_shape = (1, 4, 4, 3)
layer.build(input_shape)
# Set the base kernel to known, deterministic values.
base_kernel = np.linspace(
0, 1, num=np.prod(layer.kernel.shape), dtype=np.float32
)
base_kernel = base_kernel.reshape(layer.kernel.shape)
layer.kernel.assign(base_kernel)
# Enable LoRA with `rank`=2 and a custom `lora_alpha` value (e.g. 3.0).
layer.enable_lora(rank=2, lora_alpha=3.0)
self.assertEqual(layer.lora_rank, 2)
self.assertEqual(layer.lora_alpha, 3.0)
# For `Conv2D`, assume the LoRA weights have shapes:
# `lora_kernel_a`: (kernel_height, kernel_width, in_channels, rank)
# `lora_kernel_b`: (rank, out_channels)
lora_a_shape = layer.lora_kernel_a.shape
lora_b_shape = layer.lora_kernel_b.shape
# Assign known constant values to LoRA weights.
lora_a = np.full(lora_a_shape, 0.1, dtype=np.float32)
lora_b = np.full(lora_b_shape, 0.2, dtype=np.float32)
layer.lora_kernel_a.assign(lora_a)
layer.lora_kernel_b.assign(lora_b)
# Compute the expected delta.
# Flatten `lora_kernel_a` to shape (-1, `rank`),
# multiply with `lora_kernel_b`,
# then reshape to the kernel's shape.
scaling = 3.0 / 2 # `lora_alpha / lora_rank`
delta = np.matmul(lora_a.reshape(-1, 2), lora_b)
delta = delta.reshape(base_kernel.shape)
expected_effective_kernel = base_kernel + scaling * delta
# Compare the effective kernel computed via the property.
actual_effective_kernel = ops.convert_to_numpy(layer.kernel)
self.assertAllClose(
actual_effective_kernel,
expected_effective_kernel,
tpu_atol=1e-3,
tpu_rtol=1e-3,
)
@pytest.mark.requires_trainable_backend
def test_lora_rank_argument(self):
self.run_layer_test(
layers.Conv2D,
init_kwargs={
"filters": 5,
"kernel_size": 3,
"activation": "sigmoid",
"data_format": "channels_last",
"kernel_regularizer": "l2",
"lora_rank": 2,
},
input_shape=(2, 5, 5, 4),
expected_output_shape=(2, 3, 3, 5),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=2, # we have 2 regularizers.
supports_masking=False,
)
class ConvCorrectnessTest(testing.TestCase):
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 1,
},
{
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2,),
"groups": 2,
},
{
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "causal",
"data_format": "channels_last",
"dilation_rate": (2,),
"groups": 2,
},
{
"filters": 6,
"kernel_size": (2,),
"strides": (2,),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 2,
},
{
"filters": 6,
"kernel_size": (2,),
"strides": (2,),
"padding": "valid",
"data_format": "channels_first",
"dilation_rate": 1,
"groups": 2,
},
)
def test_conv1d(
self,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
groups,
):
layer = layers.Conv1D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
)
inputs = np.random.normal(size=[2, 8, 4])
layer.build(input_shape=inputs.shape)
kernel_shape = layer.kernel.shape
kernel_weights = np.random.normal(size=kernel_shape)
bias_weights = np.random.normal(size=(filters,))
layer.kernel.assign(kernel_weights)
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected = np_conv1d(
inputs,
kernel_weights,
bias_weights,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
)
self.assertAllClose(outputs, expected, tpu_atol=1e-1, tpu_rtol=1e-1)
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 1,
},
{
"filters": 4,
"kernel_size": 3,
"strides": 2,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 1,
},
{
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 2),
"groups": 2,
},
{
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 3),
"groups": 2,
},
{
"filters": 6,
"kernel_size": (4, 3),
"strides": (2, 1),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": (1, 1),
"groups": 2,
},
{
"filters": 6,
"kernel_size": (4, 3),
"strides": (2, 1),
"padding": "valid",
"data_format": "channels_first",
"dilation_rate": (1, 1),
"groups": 2,
},
)
def test_conv2d(
self,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
groups,
):
layer = layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
)
inputs = np.random.normal(size=[2, 8, 8, 4])
layer.build(input_shape=inputs.shape)
kernel_shape = layer.kernel.shape
kernel_weights = np.random.normal(size=kernel_shape)
bias_weights = np.random.normal(size=(filters,))
layer.kernel.assign(kernel_weights)
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected = np_conv2d(
inputs,
kernel_weights,
bias_weights,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
)
self.assertAllClose(
outputs, expected, rtol=5e-4, tpu_atol=1e-1, tpu_rtol=1e-1
)
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"groups": 1,
},
{
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 2, 2),
"groups": 2,
},
{
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 3, 4),
"groups": 2,
},
{
"filters": 6,
"kernel_size": (2, 2, 3),
"strides": (2, 1, 2),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": (1, 1, 1),
"groups": 2,
},
{
"filters": 6,
"kernel_size": (2, 2, 3),
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/base_conv_transpose.py | keras/src/layers/convolutional/base_conv_transpose.py | """Keras base class for transpose convolution layers."""
from keras.src import activations
from keras.src import constraints
from keras.src import initializers
from keras.src import ops
from keras.src import regularizers
from keras.src.backend import standardize_data_format
from keras.src.backend.common.backend_utils import (
compute_conv_transpose_output_shape,
)
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.utils.argument_validation import standardize_padding
from keras.src.utils.argument_validation import standardize_tuple
class BaseConvTranspose(Layer):
"""Abstract N-D transposed convolution layer.
The need for transposed convolutions generally arises from the desire to use
a transformation going in the opposite direction of a normal convolution,
i.e., from something that has the shape of the output of some convolution to
something that has the shape of its input while maintaining a connectivity
pattern that is compatible with said convolution.
Args:
rank: int, the rank of the transposed convolution, e.g. 2 for 2D
transposed convolution.
filters: int, the dimension of the output space (the number of filters
in the transposed convolution).
kernel_size: int or tuple/list of `rank` integers, specifying the size
of the transposed convolution window.
strides: int or tuple/list of `rank` integers, specifying the stride
length of the transposed convolution. If only one int is specified,
the same stride size will be used for all dimensions.
`strides > 1` is incompatible with `dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of `rank` integers, specifying the
dilation rate to use for dilated convolution. If only one int is
specified, the same dilation rate will be used for all dimensions.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
kernel_initializer: Initializer for the convolution kernel. If `None`,
the default initializer (`"glorot_uniform"`) will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
"""
def __init__(
self,
rank,
filters,
kernel_size,
strides=1,
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs,
):
super().__init__(
trainable=trainable,
name=name,
activity_regularizer=activity_regularizer,
**kwargs,
)
self.rank = rank
self.filters = filters
self.kernel_size = standardize_tuple(kernel_size, rank, "kernel_size")
self.strides = standardize_tuple(strides, rank, "strides")
self.dilation_rate = standardize_tuple(
dilation_rate, rank, "dilation_rate"
)
self.padding = standardize_padding(padding)
if output_padding is None:
self.output_padding = None
else:
self.output_padding = standardize_tuple(
output_padding,
rank,
"output_padding",
allow_zero=True,
)
self.data_format = standardize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=self.rank + 2)
self.data_format = self.data_format
if self.filters is not None and self.filters <= 0:
raise ValueError(
"Invalid value for argument `filters`. Expected a strictly "
f"positive value. Received filters={self.filters}."
)
if not all(self.kernel_size):
raise ValueError(
"The argument `kernel_size` cannot contain 0. Received "
f"kernel_size={self.kernel_size}."
)
if not all(self.strides):
raise ValueError(
"The argument `strides` cannot contains 0. Received "
f"strides={self.strides}."
)
if max(self.strides) > 1 and max(self.dilation_rate) > 1:
raise ValueError(
"`strides > 1` not supported in conjunction with "
f"`dilation_rate > 1`. Received: strides={self.strides} and "
f"dilation_rate={self.dilation_rate}"
)
def build(self, input_shape):
if self.data_format == "channels_last":
channel_axis = -1
input_channel = input_shape[-1]
else:
channel_axis = 1
input_channel = input_shape[1]
self.input_spec = InputSpec(
min_ndim=self.rank + 2, axes={channel_axis: input_channel}
)
kernel_shape = self.kernel_size + (
self.filters,
input_channel,
)
self.kernel = self.add_weight(
name="kernel",
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype,
)
if self.use_bias:
self.bias = self.add_weight(
name="bias",
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype,
)
else:
self.bias = None
def call(self, inputs):
outputs = ops.conv_transpose(
inputs,
self.kernel,
strides=list(self.strides),
padding=self.padding,
output_padding=self.output_padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format,
)
if self.use_bias:
if self.data_format == "channels_last":
bias_shape = (1,) * (self.rank + 1) + (self.filters,)
else:
bias_shape = (1, self.filters) + (1,) * self.rank
bias = ops.reshape(self.bias, bias_shape)
outputs = ops.add(outputs, bias)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
return compute_conv_transpose_output_shape(
input_shape,
self.kernel_size,
self.filters,
strides=self.strides,
padding=self.padding,
output_padding=self.output_padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
def get_config(self):
config = super().get_config()
config.update(
{
"filters": self.filters,
"kernel_size": self.kernel_size,
"strides": self.strides,
"padding": self.padding,
"data_format": self.data_format,
"dilation_rate": self.dilation_rate,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"bias_initializer": initializers.serialize(
self.bias_initializer
),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"bias_regularizer": regularizers.serialize(
self.bias_regularizer
),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(
self.kernel_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
)
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/conv2d_transpose.py | keras/src/layers/convolutional/conv2d_transpose.py | from keras.src.api_export import keras_export
from keras.src.layers.convolutional.base_conv_transpose import BaseConvTranspose
@keras_export(
[
"keras.layers.Conv2DTranspose",
"keras.layers.Convolution2DTranspose",
]
)
class Conv2DTranspose(BaseConvTranspose):
"""2D transposed convolution layer.
The need for transposed convolutions generally arise from the desire to use
a transformation going in the opposite direction of a normal convolution,
i.e., from something that has the shape of the output of some convolution
to something that has the shape of its input while maintaining a
connectivity pattern that is compatible with said convolution.
Args:
filters: int, the dimension of the output space (the number of filters
in the transposed convolution).
kernel_size: int or tuple/list of 1 integer, specifying the size of the
transposed convolution window.
strides: int or tuple/list of 1 integer, specifying the stride length
of the transposed convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
output_padding: An integer or tuple/list of 2 integers,
specifying the amount of padding along the height and width
of the output tensor.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch_size, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch_size, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
dilation_rate: An integer or tuple/list of 2 integers,
specifying the dilation rate for
all spatial dimensions for dilated convolution.
Specifying different dilation rates
for different dimensions is not supported.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
kernel_initializer: Initializer for the convolution kernel. If `None`,
the default initializer (`"glorot_uniform"`) will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
A 4D tensor with shape: `(batch_size, height, width, channels)`
- If `data_format="channels_first"`:
A 4D tensor with shape: `(batch_size, channels, height, width)`
Output shape:
- If `data_format="channels_last"`:
A 4D tensor with shape: `(batch_size, new_height, new_width, filters)`
- If `data_format="channels_first"`:
A 4D tensor with shape: `(batch_size, filters, new_height, new_width)`
Returns:
A 4D tensor representing
`activation(conv2d_transpose(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides > 1` and `dilation_rate > 1`.
References:
- [A guide to convolution arithmetic for deep learning](
https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional Networks](
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
Example:
>>> x = np.random.rand(4, 10, 8, 128)
>>> y = keras.layers.Conv2DTranspose(32, 2, 2, activation='relu')(x)
>>> print(y.shape)
(4, 20, 16, 32)
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
output_padding=output_padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/separable_conv1d.py | keras/src/layers/convolutional/separable_conv1d.py | from keras.src.api_export import keras_export
from keras.src.layers.convolutional.base_separable_conv import BaseSeparableConv
@keras_export(
[
"keras.layers.SeparableConv1D",
"keras.layers.SeparableConvolution1D",
]
)
class SeparableConv1D(BaseSeparableConv):
"""1D separable convolution layer.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output. It then optionally applies an
activation function to produce the final output.
Args:
filters: int, the dimensionality of the output space (i.e. the number
of filters in the pointwise convolution).
kernel_size: int or tuple/list of 1 integers, specifying the size of the
depthwise convolution window.
strides: int or tuple/list of 1 integers, specifying the stride length
of the depthwise convolution. If only one int is specified, the same
stride size will be used for all dimensions. `strides > 1` is
incompatible with `dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of 1 integers, specifying the dilation
rate to use for dilated convolution. If only one int is specified,
the same dilation rate will be used for all dimensions.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `input_channel * depth_multiplier`.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
depthwise_initializer: An initializer for the depthwise convolution
kernel. If None, then the default initializer (`"glorot_uniform"`)
will be used.
pointwise_initializer: An initializer for the pointwise convolution
kernel. If None, then the default initializer (`"glorot_uniform"`)
will be used.
bias_initializer: An initializer for the bias vector. If None, the
default initializer ('"zeros"') will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used
for norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape).
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, steps, channels)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, channels, steps)`
Output shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, new_steps, filters)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, filters, new_steps)`
Returns:
A 3D tensor representing
`activation(separable_conv1d(inputs, kernel) + bias)`.
Example:
>>> x = np.random.rand(4, 10, 12)
>>> y = keras.layers.SeparableConv1D(3, 4, 3, 2, activation='relu')(x)
>>> print(y.shape)
(4, 4, 4)
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer="glorot_uniform",
pointwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
rank=1,
depth_multiplier=depth_multiplier,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/conv_transpose_test.py | keras/src/layers/convolutional/conv_transpose_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import testing
from keras.src.backend.common.backend_utils import (
_convert_conv_transpose_padding_args_from_keras_to_torch,
)
from keras.src.backend.common.backend_utils import (
compute_conv_transpose_output_shape,
)
from keras.src.backend.common.backend_utils import (
compute_conv_transpose_padding_args_for_jax,
)
def np_conv1d_transpose(
x,
kernel_weights,
bias_weights,
strides,
padding,
output_padding,
data_format,
dilation_rate,
):
if data_format == "channels_first":
x = x.transpose((0, 2, 1))
if isinstance(strides, (tuple, list)):
h_stride = strides[0]
else:
h_stride = strides
if isinstance(dilation_rate, (tuple, list)):
h_dilation = dilation_rate[0]
else:
h_dilation = dilation_rate
h_kernel, ch_out, ch_in = kernel_weights.shape
n_batch, h_x, _ = x.shape
# Get output shape and padding
_, h_out, _ = compute_conv_transpose_output_shape(
x.shape,
kernel_weights.shape,
ch_out,
strides,
padding,
output_padding,
"channels_last",
dilation_rate,
)
jax_padding = compute_conv_transpose_padding_args_for_jax(
input_shape=x.shape,
kernel_shape=kernel_weights.shape,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=dilation_rate,
)
h_pad_side1 = h_kernel - 1 - jax_padding[0][0]
if h_dilation > 1:
# Increase kernel size
new_h_kernel = h_kernel + (h_dilation - 1) * (h_kernel - 1)
new_kernel_size_tuple = (new_h_kernel,)
new_kernel_weights = np.zeros(
(*new_kernel_size_tuple, ch_out, ch_in),
dtype=kernel_weights.dtype,
)
new_kernel_weights[::h_dilation] = kernel_weights
kernel_weights = new_kernel_weights
h_kernel = kernel_weights.shape[0]
# Compute output
output = np.zeros([n_batch, h_out + h_kernel, ch_out])
for nb in range(n_batch):
for h_x_idx in range(h_x):
h_out_idx = h_x_idx * h_stride # Index in output
output[nb, h_out_idx : h_out_idx + h_kernel, :] += np.sum(
kernel_weights[:, :, :] * x[nb, h_x_idx, :], axis=-1
)
output = output + bias_weights
# Cut padding results from output
output = output[:, h_pad_side1 : h_out + h_pad_side1]
if data_format == "channels_first":
output = output.transpose((0, 2, 1))
return output
def np_conv2d_transpose(
x,
kernel_weights,
bias_weights,
strides,
padding,
output_padding,
data_format,
dilation_rate,
):
if data_format == "channels_first":
x = x.transpose((0, 2, 3, 1))
if isinstance(strides, (tuple, list)):
h_stride, w_stride = strides
else:
h_stride = strides
w_stride = strides
if isinstance(dilation_rate, (tuple, list)):
h_dilation, w_dilation = dilation_rate
else:
h_dilation = dilation_rate
w_dilation = dilation_rate
h_kernel, w_kernel, ch_out, ch_in = kernel_weights.shape
n_batch, h_x, w_x, _ = x.shape
# Get output shape and padding
_, h_out, w_out, _ = compute_conv_transpose_output_shape(
x.shape,
kernel_weights.shape,
ch_out,
strides,
padding,
output_padding,
"channels_last",
dilation_rate,
)
jax_padding = compute_conv_transpose_padding_args_for_jax(
input_shape=x.shape,
kernel_shape=kernel_weights.shape,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=dilation_rate,
)
h_pad_side1 = h_kernel - 1 - jax_padding[0][0]
w_pad_side1 = w_kernel - 1 - jax_padding[1][0]
if h_dilation > 1 or w_dilation > 1:
# Increase kernel size
new_h_kernel = h_kernel + (h_dilation - 1) * (h_kernel - 1)
new_w_kernel = w_kernel + (w_dilation - 1) * (w_kernel - 1)
new_kernel_size_tuple = (new_h_kernel, new_w_kernel)
new_kernel_weights = np.zeros(
(*new_kernel_size_tuple, ch_out, ch_in),
dtype=kernel_weights.dtype,
)
new_kernel_weights[::h_dilation, ::w_dilation] = kernel_weights
kernel_weights = new_kernel_weights
h_kernel, w_kernel = kernel_weights.shape[:2]
# Compute output
output = np.zeros([n_batch, h_out + h_kernel, w_out + w_kernel, ch_out])
for nb in range(n_batch):
for h_x_idx in range(h_x):
h_out_idx = h_x_idx * h_stride # Index in output
for w_x_idx in range(w_x):
w_out_idx = w_x_idx * w_stride
output[
nb,
h_out_idx : h_out_idx + h_kernel,
w_out_idx : w_out_idx + w_kernel,
:,
] += np.sum(
kernel_weights[:, :, :, :] * x[nb, h_x_idx, w_x_idx, :],
axis=-1,
)
output = output + bias_weights
# Cut padding results from output
output = output[
:,
h_pad_side1 : h_out + h_pad_side1,
w_pad_side1 : w_out + w_pad_side1,
]
if data_format == "channels_first":
output = output.transpose((0, 3, 1, 2))
return output
def np_conv3d_transpose(
x,
kernel_weights,
bias_weights,
strides,
padding,
output_padding,
data_format,
dilation_rate,
):
if data_format == "channels_first":
x = x.transpose((0, 2, 3, 4, 1))
if isinstance(strides, (tuple, list)):
h_stride, w_stride, d_stride = strides
else:
h_stride = strides
w_stride = strides
d_stride = strides
if isinstance(dilation_rate, (tuple, list)):
h_dilation, w_dilation, d_dilation = dilation_rate
else:
h_dilation = dilation_rate
w_dilation = dilation_rate
d_dilation = dilation_rate
h_kernel, w_kernel, d_kernel, ch_out, ch_in = kernel_weights.shape
n_batch, h_x, w_x, d_x, _ = x.shape
# Get output shape and padding
_, h_out, w_out, d_out, _ = compute_conv_transpose_output_shape(
x.shape,
kernel_weights.shape,
ch_out,
strides,
padding,
output_padding,
data_format,
dilation_rate,
)
jax_padding = compute_conv_transpose_padding_args_for_jax(
input_shape=x.shape,
kernel_shape=kernel_weights.shape,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=dilation_rate,
)
h_pad_side1 = h_kernel - 1 - jax_padding[0][0]
w_pad_side1 = w_kernel - 1 - jax_padding[1][0]
d_pad_side1 = d_kernel - 1 - jax_padding[2][0]
if h_dilation > 1 or w_dilation > 1 or d_dilation > 1:
# Increase kernel size
new_h_kernel = h_kernel + (h_dilation - 1) * (h_kernel - 1)
new_w_kernel = w_kernel + (w_dilation - 1) * (w_kernel - 1)
new_d_kernel = d_kernel + (d_dilation - 1) * (d_kernel - 1)
new_kernel_size_tuple = (new_h_kernel, new_w_kernel, new_d_kernel)
new_kernel_weights = np.zeros(
(*new_kernel_size_tuple, ch_out, ch_in),
dtype=kernel_weights.dtype,
)
new_kernel_weights[::h_dilation, ::w_dilation, ::d_dilation] = (
kernel_weights
)
kernel_weights = new_kernel_weights
h_kernel, w_kernel, d_kernel = kernel_weights.shape[:3]
# Compute output
output = np.zeros(
[
n_batch,
h_out + h_kernel,
w_out + w_kernel,
d_out + d_kernel,
ch_out,
]
)
for nb in range(n_batch):
for h_x_idx in range(h_x):
h_out_idx = h_x_idx * h_stride # Index in output
for w_x_idx in range(w_x):
w_out_idx = w_x_idx * w_stride
for d_x_idx in range(d_x):
d_out_idx = d_x_idx * d_stride
output[
nb,
h_out_idx : h_out_idx + h_kernel,
w_out_idx : w_out_idx + w_kernel,
d_out_idx : d_out_idx + d_kernel,
:,
] += np.sum(
kernel_weights[:, :, :, :, :]
* x[nb, h_x_idx, w_x_idx, d_x_idx, :],
axis=-1,
)
output = output + bias_weights
# Cut padding results from output
output = output[
:,
h_pad_side1 : h_out + h_pad_side1,
w_pad_side1 : w_out + w_pad_side1,
d_pad_side1 : d_out + d_pad_side1,
]
if data_format == "channels_first":
output = output.transpose((0, 4, 1, 2, 3))
return output
class ConvTransposeBasicTest(testing.TestCase):
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 2,
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (2, 8, 4),
"output_shape": (2, 16, 5),
},
{
"filters": 6,
"kernel_size": 2,
"strides": 3,
"padding": "same",
"output_padding": 2,
"data_format": "channels_last",
"dilation_rate": (1,),
"input_shape": (2, 8, 4),
"output_shape": (2, 23, 6),
},
{
"filters": 6,
"kernel_size": (2,),
"strides": (2,),
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (2, 8, 4),
"output_shape": (2, 16, 6),
},
)
@pytest.mark.requires_trainable_backend
def test_conv1d_transpose_basic(
self,
filters,
kernel_size,
strides,
padding,
output_padding,
data_format,
dilation_rate,
input_shape,
output_shape,
):
self.run_layer_test(
layers.Conv1DTranspose,
init_kwargs={
"filters": filters,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"output_padding": output_padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 2,
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (2, 8, 8, 4),
"output_shape": (2, 16, 16, 5),
},
{
"filters": 6,
"kernel_size": 2,
"strides": 3,
"padding": "same",
"output_padding": 2,
"data_format": "channels_last",
"dilation_rate": (1, 1),
"input_shape": (2, 8, 8, 4),
"output_shape": (2, 23, 23, 6),
},
{
"filters": 6,
"kernel_size": (2, 3),
"strides": (2, 1),
"padding": "valid",
"output_padding": None,
"data_format": "channels_first",
"dilation_rate": (1, 1),
"input_shape": (2, 4, 8, 8),
"output_shape": (2, 6, 16, 10),
},
{
"filters": 2,
"kernel_size": (7, 7),
"strides": (16, 16),
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": (1, 1),
"input_shape": (1, 14, 14, 2),
"output_shape": (1, 224, 224, 2),
},
)
@pytest.mark.requires_trainable_backend
def test_conv2d_transpose_basic(
self,
filters,
kernel_size,
strides,
padding,
output_padding,
data_format,
dilation_rate,
input_shape,
output_shape,
):
if (
data_format == "channels_first"
and backend.backend() == "tensorflow"
):
pytest.skip("channels_first unsupported on CPU with TF")
self.run_layer_test(
layers.Conv2DTranspose,
init_kwargs={
"filters": filters,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"output_padding": output_padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 2,
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (2, 8, 8, 8, 4),
"output_shape": (2, 16, 16, 16, 5),
},
{
"filters": 6,
"kernel_size": 2,
"strides": 3,
"padding": "same",
"output_padding": 2,
"data_format": "channels_last",
"dilation_rate": (1, 1, 1),
"input_shape": (2, 8, 8, 8, 4),
"output_shape": (2, 23, 23, 23, 6),
},
{
"filters": 6,
"kernel_size": (2, 2, 3),
"strides": (2, 1, 2),
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": (1, 1, 1),
"input_shape": (2, 8, 8, 8, 4),
"output_shape": (2, 16, 9, 17, 6),
},
)
@pytest.mark.requires_trainable_backend
def test_conv3d_transpose_basic(
self,
filters,
kernel_size,
strides,
padding,
output_padding,
data_format,
dilation_rate,
input_shape,
output_shape,
):
self.run_layer_test(
layers.Conv3DTranspose,
init_kwargs={
"filters": filters,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"output_padding": output_padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
def test_bad_init_args(self):
# `filters` is not positive.
with self.assertRaisesRegex(
ValueError,
"Invalid value for argument `filters`. Expected a "
"strictly positive value. Received filters=0.",
):
layers.Conv1DTranspose(filters=0, kernel_size=1)
# `kernel_size` has 0.
with self.assertRaisesRegex(
ValueError,
r"The `kernel_size` argument must be a tuple of "
r"\d+ integers. Received kernel_size=\(1, 0\), including values"
r" \{0\} that do not satisfy `value > 0`",
):
layers.Conv2DTranspose(filters=2, kernel_size=(1, 0))
# `strides` has 0.
with self.assertRaisesRegex(
ValueError,
r"The `strides` argument must be a tuple of \d+ "
r"integers. Received strides=\(1, 0\), including values \{0\} "
r"that do not satisfy `value > 0`",
):
layers.Conv2DTranspose(
filters=2, kernel_size=(2, 2), strides=(1, 0)
)
# `dilation_rate > 1` while `strides > 1`.
with self.assertRaisesRegex(
ValueError,
r"`strides > 1` not supported in conjunction with "
r"`dilation_rate > 1`. Received: strides=\(2, 2\) and "
r"dilation_rate=\(2, 1\)",
):
layers.Conv2DTranspose(
filters=2, kernel_size=(2, 2), strides=2, dilation_rate=(2, 1)
)
class ConvTransposeCorrectnessTest(testing.TestCase):
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 2,
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"filters": 6,
"kernel_size": 2,
"strides": 3,
"padding": "same",
"output_padding": 2,
"data_format": "channels_last",
"dilation_rate": (1,),
},
{
"filters": 6,
"kernel_size": (2,),
"strides": (2,),
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": 1,
},
)
def test_conv1d_transpose(
self,
filters,
kernel_size,
strides,
padding,
output_padding,
data_format,
dilation_rate,
):
layer = layers.Conv1DTranspose(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
output_padding=output_padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 8, 4])
layer.build(input_shape=inputs.shape)
kernel_shape = layer.kernel.shape
kernel_weights = np.random.normal(size=kernel_shape)
bias_weights = np.random.normal(size=(filters,))
layer.kernel.assign(kernel_weights)
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected = np_conv1d_transpose(
inputs,
kernel_weights,
bias_weights,
strides,
padding,
output_padding,
data_format,
dilation_rate,
)
self.assertAllClose(
outputs, expected, atol=1e-5, tpu_atol=1e-1, tpu_rtol=1e-1
)
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 2,
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"filters": 6,
"kernel_size": 7,
"strides": 16,
"padding": "same",
"output_padding": 2,
"data_format": "channels_last",
"dilation_rate": (1, 1),
},
{
"filters": 6,
"kernel_size": (2, 3),
"strides": (2, 1),
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": (1, 1),
},
{
"filters": 2,
"kernel_size": (7, 7),
"strides": (16, 16),
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": (1, 1),
},
)
def test_conv2d_transpose(
self,
filters,
kernel_size,
strides,
padding,
output_padding,
data_format,
dilation_rate,
):
layer = layers.Conv2DTranspose(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
output_padding=output_padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 14, 14, 4])
layer.build(input_shape=inputs.shape)
kernel_shape = layer.kernel.shape
kernel_weights = np.random.normal(size=kernel_shape)
bias_weights = np.random.normal(size=(filters,))
layer.kernel.assign(kernel_weights)
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected = np_conv2d_transpose(
inputs,
kernel_weights,
bias_weights,
strides,
padding,
output_padding,
data_format,
dilation_rate,
)
self.assertAllClose(
outputs, expected, atol=1e-5, tpu_atol=1e-1, tpu_rtol=1e-1
)
@parameterized.parameters(
{
"filters": 5,
"kernel_size": 2,
"strides": 2,
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"filters": 6,
"kernel_size": 2,
"strides": 3,
"padding": "same",
"output_padding": 2,
"data_format": "channels_last",
"dilation_rate": (1, 1, 1),
},
{
"filters": 6,
"kernel_size": (2, 2, 3),
"strides": (2, 1, 2),
"padding": "valid",
"output_padding": None,
"data_format": "channels_last",
"dilation_rate": (1, 1, 1),
},
)
def test_conv3d_transpose(
self,
filters,
kernel_size,
strides,
padding,
output_padding,
data_format,
dilation_rate,
):
layer = layers.Conv3DTranspose(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
output_padding=output_padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 8, 8, 8, 4])
layer.build(input_shape=inputs.shape)
kernel_shape = layer.kernel.shape
kernel_weights = np.random.normal(size=kernel_shape)
bias_weights = np.random.normal(size=(filters,))
layer.kernel.assign(kernel_weights)
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected = np_conv3d_transpose(
inputs,
kernel_weights,
bias_weights,
strides,
padding,
output_padding,
data_format,
dilation_rate,
)
self.assertAllClose(
outputs, expected, atol=1e-5, tpu_atol=1e-1, tpu_rtol=1e-1
)
@parameterized.product(
kernel_size=list(range(1, 5)),
strides=list(range(1, 5)),
padding=["same", "valid"],
output_padding=[None] + list(range(1, 5)),
)
def test_conv1d_transpose_consistency(
self, kernel_size, strides, padding, output_padding
):
"""Test conv transpose, on an 1D array of size 3, against several
convolution parameters. In particular, tests if Torch inconsistencies
are raised.
"""
# output_padding cannot be greater than strides
if isinstance(output_padding, int) and output_padding >= strides:
pytest.skip(
"`output_padding` greater than `strides` is not supported"
)
if backend.config.image_data_format() == "channels_last":
input_shape = (1, 3, 1)
else:
input_shape = (1, 1, 3)
input = np.ones(shape=input_shape)
kernel_weights = np.arange(1, kernel_size + 1).reshape(
(kernel_size, 1, 1)
)
# Expected result
expected_res = np_conv1d_transpose(
x=input,
kernel_weights=kernel_weights,
bias_weights=np.zeros(shape=(1,)),
strides=strides,
padding=padding,
output_padding=output_padding,
data_format=backend.config.image_data_format(),
dilation_rate=1,
)
# keras layer
kc_layer = layers.Conv1DTranspose(
filters=1,
kernel_size=kernel_size,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=1,
)
kc_layer.build(input_shape=input_shape)
kc_layer.kernel.assign(kernel_weights)
# Special cases for Torch
if backend.backend() == "torch":
# Args that cause output_padding >= strides
# are clamped with a warning.
if (kernel_size, strides, padding, output_padding) in [
(2, 1, "same", None),
(4, 1, "same", None),
]:
clamped_output_padding = strides - 1 # usually 0 when stride=1
expected_res = np_conv1d_transpose(
x=input,
kernel_weights=kernel_weights,
bias_weights=np.zeros(shape=(1,)),
strides=strides,
padding=padding,
output_padding=clamped_output_padding,
data_format=backend.config.image_data_format(),
dilation_rate=1,
)
with pytest.warns(UserWarning):
kc_res = kc_layer(input)
self.assertAllClose(expected_res, kc_res, atol=1e-5)
return
# torch_padding > 0 and torch_output_padding > 0 case
# Torch output differs from TF.
(
torch_padding,
torch_output_padding,
) = _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=kernel_size,
stride=strides,
dilation_rate=1,
padding=padding,
output_padding=output_padding,
)
if torch_padding > 0 and torch_output_padding > 0:
with pytest.raises(AssertionError):
kc_res = kc_layer(input)
self.assertAllClose(expected_res, kc_res, atol=1e-5)
return
# Compare results
kc_res = kc_layer(input)
self.assertAllClose(expected_res, kc_res, atol=1e-5)
@parameterized.product(
kernel_size=list(range(1, 5)),
strides=list(range(1, 5)),
padding=["same", "valid"],
output_padding=[None] + list(range(1, 5)),
)
def test_shape_inference_static_unknown_shape(
self, kernel_size, strides, padding, output_padding
):
if backend.config.image_data_format() == "channels_last":
input_shape = (None, None, 3)
output_tensor_shape = (None, None, None, 2)
else:
input_shape = (3, None, None)
output_tensor_shape = (None, 2, None, None)
x = layers.Input(shape=input_shape)
x = layers.Conv2DTranspose(
filters=2,
kernel_size=kernel_size,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=1,
)(x)
self.assertEqual(x.shape, output_tensor_shape)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/depthwise_conv2d.py | keras/src/layers/convolutional/depthwise_conv2d.py | from keras.src.api_export import keras_export
from keras.src.layers.convolutional.base_depthwise_conv import BaseDepthwiseConv
@keras_export("keras.layers.DepthwiseConv2D")
class DepthwiseConv2D(BaseDepthwiseConv):
"""2D depthwise convolution layer.
Depthwise convolution is a type of convolution in which each input channel
is convolved with a different kernel (called a depthwise kernel). You can
understand depthwise convolution as the first step in a depthwise separable
convolution.
It is implemented via the following steps:
- Split the input into individual channels.
- Convolve each channel with an individual depthwise kernel with
`depth_multiplier` output channels.
- Concatenate the convolved outputs along the channels axis.
Unlike a regular 2D convolution, depthwise convolution does not mix
information across different input channels.
The `depth_multiplier` argument determines how many filters are applied to
one input channel. As such, it controls the amount of output channels that
are generated per input channel in the depthwise step.
Args:
kernel_size: int or tuple/list of 2 integer, specifying the size of the
depthwise convolution window.
strides: int or tuple/list of 2 integer, specifying the stride length
of the depthwise convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `input_channel * depth_multiplier`.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file
at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of 2 integers, specifying the dilation
rate to use for dilated convolution.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
depthwise_initializer: Initializer for the convolution kernel.
If `None`, the default initializer (`"glorot_uniform"`)
will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
depthwise_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
A 4D tensor with shape: `(batch_size, height, width, channels)`
- If `data_format="channels_first"`:
A 4D tensor with shape: `(batch_size, channels, height, width)`
Output shape:
- If `data_format="channels_last"`:
A 4D tensor with shape:
`(batch_size, new_height, new_width, channels * depth_multiplier)`
- If `data_format="channels_first"`:
A 4D tensor with shape:
`(batch_size, channels * depth_multiplier, new_height, new_width)`
Returns:
A 4D tensor representing
`activation(depthwise_conv2d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides > 1` and `dilation_rate > 1`.
Example:
>>> x = np.random.rand(4, 10, 10, 12)
>>> y = keras.layers.DepthwiseConv2D(kernel_size=3, activation='relu')(x)
>>> print(y.shape)
(4, 8, 8, 12)
"""
def __init__(
self,
kernel_size,
strides=(1, 1),
padding="valid",
depth_multiplier=1,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
depthwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
rank=2,
depth_multiplier=depth_multiplier,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/depthwise_conv1d.py | keras/src/layers/convolutional/depthwise_conv1d.py | from keras.src.api_export import keras_export
from keras.src.layers.convolutional.base_depthwise_conv import BaseDepthwiseConv
@keras_export("keras.layers.DepthwiseConv1D")
class DepthwiseConv1D(BaseDepthwiseConv):
"""1D depthwise convolution layer.
Depthwise convolution is a type of convolution in which each input channel
is convolved with a different kernel (called a depthwise kernel). You can
understand depthwise convolution as the first step in a depthwise separable
convolution.
It is implemented via the following steps:
- Split the input into individual channels.
- Convolve each channel with an individual depthwise kernel with
`depth_multiplier` output channels.
- Concatenate the convolved outputs along the channels axis.
Unlike a regular 1D convolution, depthwise convolution does not mix
information across different input channels.
The `depth_multiplier` argument determines how many filters are applied to
one input channel. As such, it controls the amount of output channels that
are generated per input channel in the depthwise step.
Args:
kernel_size: int or tuple/list of 1 integer, specifying the size of the
depthwise convolution window.
strides: int or tuple/list of 1 integer, specifying the stride length
of the convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `input_channel * depth_multiplier`.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of 1 integers, specifying the dilation
rate to use for dilated convolution.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
depthwise_initializer: Initializer for the convolution kernel.
If `None`, the default initializer (`"glorot_uniform"`)
will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
depthwise_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, steps, channels)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, channels, steps)`
Output shape:
- If `data_format="channels_last"`:
A 3D tensor with shape:
`(batch_shape, new_steps, channels * depth_multiplier)`
- If `data_format="channels_first"`:
A 3D tensor with shape:
`(batch_shape, channels * depth_multiplier, new_steps)`
Returns:
A 3D tensor representing
`activation(depthwise_conv1d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides > 1` and `dilation_rate > 1`.
Example:
>>> x = np.random.rand(4, 10, 12)
>>> y = keras.layers.DepthwiseConv1D(3, 3, 2, activation='relu')(x)
>>> print(y.shape)
(4, 4, 36)
"""
def __init__(
self,
kernel_size,
strides=1,
padding="valid",
depth_multiplier=1,
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
depthwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
rank=1,
depth_multiplier=depth_multiplier,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/separable_conv2d.py | keras/src/layers/convolutional/separable_conv2d.py | from keras.src.api_export import keras_export
from keras.src.layers.convolutional.base_separable_conv import BaseSeparableConv
@keras_export(
[
"keras.layers.SeparableConv2D",
"keras.layers.SeparableConvolution2D",
]
)
class SeparableConv2D(BaseSeparableConv):
"""2D separable convolution layer.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels.
If `use_bias` is True and a bias initializer is provided,
it adds a bias vector to the output. It then optionally applies an
activation function to produce the final output.
Args:
filters: int, the dimensionality of the output space (i.e. the number
of filters in the pointwise convolution).
kernel_size: int or tuple/list of 2 integers, specifying the size of the
depthwise convolution window.
strides: int or tuple/list of 2 integers, specifying the stride length
of the depthwise convolution. If only one int is specified, the same
stride size will be used for all dimensions. `strides > 1` is
incompatible with `dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file
at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of 2 integers, specifying the dilation
rate to use for dilated convolution. If only one int is specified,
the same dilation rate will be used for all dimensions.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `input_channel * depth_multiplier`.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
depthwise_initializer: An initializer for the depthwise convolution
kernel. If None, then the default initializer (`"glorot_uniform"`)
will be used.
pointwise_initializer: An initializer for the pointwise convolution
kernel. If None, then the default initializer (`"glorot_uniform"`)
will be used.
bias_initializer: An initializer for the bias vector. If None, the
default initializer ('"zeros"') will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used
for norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape).
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
A 4D tensor with shape: `(batch_size, height, width, channels)`
- If `data_format="channels_first"`:
A 4D tensor with shape: `(batch_size, channels, height, width)`
Output shape:
- If `data_format="channels_last"`:
A 4D tensor with shape: `(batch_size, new_height, new_width, filters)`
- If `data_format="channels_first"`:
A 4D tensor with shape: `(batch_size, filters, new_height, new_width)`
Returns:
A 4D tensor representing
`activation(separable_conv2d(inputs, kernel) + bias)`.
Example:
>>> x = np.random.rand(4, 10, 10, 12)
>>> y = keras.layers.SeparableConv2D(3, 4, 3, 2, activation='relu')(x)
>>> print(y.shape)
(4, 4, 4, 4)
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
depth_multiplier=1,
activation=None,
use_bias=True,
depthwise_initializer="glorot_uniform",
pointwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
rank=2,
depth_multiplier=depth_multiplier,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
depthwise_initializer=depthwise_initializer,
pointwise_initializer=pointwise_initializer,
bias_initializer=bias_initializer,
depthwise_regularizer=depthwise_regularizer,
pointwise_regularizer=pointwise_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
depthwise_constraint=depthwise_constraint,
pointwise_constraint=pointwise_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/separable_conv_test.py | keras/src/layers/convolutional/separable_conv_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import testing
from keras.src.layers.convolutional.conv_test import np_conv1d
from keras.src.layers.convolutional.conv_test import np_conv2d
from keras.src.layers.convolutional.depthwise_conv_test import (
np_depthwise_conv1d,
)
from keras.src.layers.convolutional.depthwise_conv_test import (
np_depthwise_conv2d,
)
class SeparableConvBasicTest(testing.TestCase):
@parameterized.parameters(
{
"depth_multiplier": 5,
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (3, 5, 4),
"output_shape": (3, 4, 5),
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2,),
"input_shape": (3, 4, 4),
"output_shape": (3, 4, 6),
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": 2,
"strides": (2,),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (3, 5, 4),
"output_shape": (3, 2, 6),
},
)
@pytest.mark.requires_trainable_backend
def test_separable_conv1d_basic(
self,
depth_multiplier,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
input_shape,
output_shape,
):
self.run_layer_test(
layers.SeparableConv1D,
init_kwargs={
"depth_multiplier": depth_multiplier,
"filters": filters,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
{
"depth_multiplier": 5,
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (3, 5, 5, 4),
"output_shape": (3, 4, 4, 5),
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 2),
"input_shape": (3, 4, 4, 4),
"output_shape": (3, 4, 4, 6),
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": (2, 2),
"strides": (2, 2),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": (1, 1),
"input_shape": (3, 5, 5, 4),
"output_shape": (3, 2, 2, 6),
},
)
@pytest.mark.requires_trainable_backend
def test_separable_conv2d_basic(
self,
depth_multiplier,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
input_shape,
output_shape,
):
self.run_layer_test(
layers.SeparableConv2D,
init_kwargs={
"depth_multiplier": depth_multiplier,
"filters": filters,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
def test_bad_init_args(self):
# `depth_multiplier` is not positive.
with self.assertRaisesRegex(
ValueError,
"Invalid value for argument `depth_multiplier`. "
"Expected a strictly positive value. Received "
"depth_multiplier=0.",
):
layers.SeparableConv1D(depth_multiplier=0, filters=1, kernel_size=1)
# `filters` is not positive.
with self.assertRaisesRegex(
ValueError,
"Invalid value for argument `filters`. Expected a "
"strictly positive value. Received filters=0.",
):
layers.SeparableConv1D(depth_multiplier=1, filters=0, kernel_size=1)
# `kernel_size` has 0.
with self.assertRaisesRegex(
ValueError,
r"The `kernel_size` argument must be a tuple of "
r"\d+ integers. Received kernel_size=\(1, 0\), including values"
r" \{0\} that do not satisfy `value > 0`",
):
layers.SeparableConv2D(
depth_multiplier=2, filters=2, kernel_size=(1, 0)
)
# `strides` has 0.
with self.assertRaisesRegex(
ValueError,
r"The `strides` argument must be a tuple of \d+ "
r"integers. Received strides=\(1, 0\), including values \{0\} "
r"that do not satisfy `value > 0`",
):
layers.SeparableConv2D(
depth_multiplier=2,
filters=2,
kernel_size=(2, 2),
strides=(1, 0),
)
# `dilation_rate > 1` while `strides > 1`.
with self.assertRaisesRegex(
ValueError,
r"`strides > 1` not supported in conjunction with "
r"`dilation_rate > 1`. Received: strides=\(2, 2\) and "
r"dilation_rate=\(2, 1\)",
):
layers.SeparableConv2D(
depth_multiplier=2,
filters=2,
kernel_size=(2, 2),
strides=2,
dilation_rate=(2, 1),
)
class SeparableConvCorrectnessTest(testing.TestCase):
@parameterized.parameters(
{
"depth_multiplier": 5,
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2,),
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": (2,),
"strides": (2,),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
)
def test_separable_conv1d(
self,
depth_multiplier,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
):
layer = layers.SeparableConv1D(
depth_multiplier=depth_multiplier,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 8, 4])
layer.build(input_shape=inputs.shape)
depthwise_kernel_shape = layer.depthwise_kernel.shape
depthwise_kernel_weights = np.random.normal(size=depthwise_kernel_shape)
layer.depthwise_kernel.assign(depthwise_kernel_weights)
pointwise_kernel_shape = layer.pointwise_kernel.shape
pointwise_kernel_weights = np.random.normal(size=pointwise_kernel_shape)
layer.pointwise_kernel.assign(pointwise_kernel_weights)
bias_weights = np.random.normal(size=(filters,))
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected_depthwise = np_depthwise_conv1d(
inputs,
depthwise_kernel_weights,
np.zeros(4 * depth_multiplier),
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
expected = np_conv1d(
expected_depthwise,
pointwise_kernel_weights,
bias_weights,
strides=1,
padding=padding,
data_format=data_format,
dilation_rate=1,
groups=1,
)
self.assertAllClose(outputs.shape, expected.shape)
self.assertAllClose(
outputs,
expected,
rtol=1e-5,
atol=1e-5,
tpu_atol=1e-1,
tpu_rtol=1e-1,
)
@parameterized.parameters(
{
"depth_multiplier": 5,
"filters": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 2),
},
{
"depth_multiplier": 6,
"filters": 6,
"kernel_size": (2, 2),
"strides": (2, 2),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": (1, 1),
},
)
def test_separable_conv2d(
self,
depth_multiplier,
filters,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
):
layer = layers.SeparableConv2D(
depth_multiplier=depth_multiplier,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 8, 8, 4])
layer.build(input_shape=inputs.shape)
depthwise_kernel_shape = layer.depthwise_kernel.shape
depthwise_kernel_weights = np.random.normal(size=depthwise_kernel_shape)
layer.depthwise_kernel.assign(depthwise_kernel_weights)
pointwise_kernel_shape = layer.pointwise_kernel.shape
pointwise_kernel_weights = np.random.normal(size=pointwise_kernel_shape)
layer.pointwise_kernel.assign(pointwise_kernel_weights)
bias_weights = np.random.normal(size=(filters,))
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected_depthwise = np_depthwise_conv2d(
inputs,
depthwise_kernel_weights,
np.zeros(4 * depth_multiplier),
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
expected = np_conv2d(
expected_depthwise,
pointwise_kernel_weights,
bias_weights,
strides=1,
padding=padding,
data_format=data_format,
dilation_rate=1,
groups=1,
)
self.assertAllClose(outputs.shape, expected.shape)
self.assertAllClose(
outputs,
expected,
rtol=1e-5,
atol=1e-5,
tpu_atol=1e-1,
tpu_rtol=1e-1,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/depthwise_conv_test.py | keras/src/layers/convolutional/depthwise_conv_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from numpy.lib.stride_tricks import as_strided
from keras.src import layers
from keras.src import testing
def _same_padding(input_size, kernel_size, stride):
if input_size % stride == 0:
padding = max(kernel_size - stride, 0)
else:
padding = max(kernel_size - (input_size % stride), 0)
return padding // 2, padding - padding // 2
def np_depthwise_conv1d(
x,
kernel_weights,
bias_weights,
strides,
padding,
data_format,
dilation_rate,
):
if data_format == "channels_first":
x = x.transpose((0, 2, 1))
if isinstance(strides, (tuple, list)):
h_stride = strides[0]
else:
h_stride = strides
if isinstance(dilation_rate, (tuple, list)):
h_dilation = dilation_rate[0]
else:
h_dilation = dilation_rate
h_kernel, ch_in, ch_out = kernel_weights.shape
if h_dilation > 1:
new_h_kernel = h_kernel + (h_dilation - 1) * (h_kernel - 1)
new_kernel_weights = np.zeros(
(new_h_kernel, ch_in, ch_out),
dtype=kernel_weights.dtype,
)
new_kernel_weights[::h_dilation] = kernel_weights
kernel_weights = new_kernel_weights
h_kernel = kernel_weights.shape[0]
if padding == "same":
n_batch, h_x, _ = x.shape
h_pad = _same_padding(h_x, h_kernel, h_stride)
npad = [(0, 0)] * x.ndim
npad[1] = h_pad
x = np.pad(x, pad_width=npad, mode="constant", constant_values=0)
n_batch, h_x, _ = x.shape
h_out = int((h_x - h_kernel) / h_stride) + 1
out_grps = []
bias_weights = bias_weights.reshape(ch_in, ch_out)
for ch_in_idx in range(ch_in):
for ch_out_idx in range(ch_out):
x_in = np.ascontiguousarray(x[..., ch_in_idx])
stride_shape = (n_batch, h_out, h_kernel)
strides = (
x_in.strides[0],
h_stride * x_in.strides[1],
x_in.strides[1],
)
inner_dim = h_kernel
x_strided = as_strided(
x_in, shape=stride_shape, strides=strides
).reshape(-1, inner_dim)
kernel_weights_grp = kernel_weights[
..., ch_in_idx, ch_out_idx
].reshape(-1, 1)
bias_weights_grp = bias_weights[..., ch_in_idx, ch_out_idx]
out_grps.append(
(x_strided @ kernel_weights_grp + bias_weights_grp).reshape(
n_batch, h_out, 1
)
)
out = np.concatenate(out_grps, axis=-1)
if data_format == "channels_first":
out = out.transpose((0, 2, 1))
return out
def np_depthwise_conv2d(
x,
kernel_weights,
bias_weights,
strides,
padding,
data_format,
dilation_rate,
):
if data_format == "channels_first":
x = x.transpose((0, 2, 3, 1))
if isinstance(strides, (tuple, list)):
h_stride, w_stride = strides
else:
h_stride = strides
w_stride = strides
if isinstance(dilation_rate, (tuple, list)):
h_dilation, w_dilation = dilation_rate
else:
h_dilation = dilation_rate
w_dilation = dilation_rate
h_kernel, w_kernel, ch_in, ch_out = kernel_weights.shape
if h_dilation > 1 or w_dilation > 1:
new_h_kernel = h_kernel + (h_dilation - 1) * (h_kernel - 1)
new_w_kernel = w_kernel + (w_dilation - 1) * (w_kernel - 1)
new_kenel_size_tuple = (new_h_kernel, new_w_kernel)
new_kernel_weights = np.zeros(
(*new_kenel_size_tuple, ch_in, ch_out),
dtype=kernel_weights.dtype,
)
new_kernel_weights[::h_dilation, ::w_dilation] = kernel_weights
kernel_weights = new_kernel_weights
h_kernel, w_kernel = kernel_weights.shape[:2]
if padding == "same":
n_batch, h_x, w_x, _ = x.shape
h_pad = _same_padding(h_x, h_kernel, h_stride)
w_pad = _same_padding(w_x, w_kernel, w_stride)
npad = [(0, 0)] * x.ndim
npad[1] = h_pad
npad[2] = w_pad
x = np.pad(x, pad_width=npad, mode="constant", constant_values=0)
n_batch, h_x, w_x, _ = x.shape
h_out = int((h_x - h_kernel) / h_stride) + 1
w_out = int((w_x - w_kernel) / w_stride) + 1
out_grps = []
bias_weights = bias_weights.reshape(ch_in, ch_out)
for ch_in_idx in range(ch_in):
for ch_out_idx in range(ch_out):
x_in = np.ascontiguousarray(x[..., ch_in_idx])
stride_shape = (n_batch, h_out, w_out, h_kernel, w_kernel)
strides = (
x_in.strides[0],
h_stride * x_in.strides[1],
w_stride * x_in.strides[2],
x_in.strides[1],
x_in.strides[2],
)
inner_dim = h_kernel * w_kernel
x_strided = as_strided(
x_in, shape=stride_shape, strides=strides
).reshape(-1, inner_dim)
kernel_weights_grp = kernel_weights[
..., ch_in_idx, ch_out_idx
].reshape(-1, 1)
bias_weights_grp = bias_weights[..., ch_in_idx, ch_out_idx]
out_grps.append(
(x_strided @ kernel_weights_grp + bias_weights_grp).reshape(
n_batch, h_out, w_out, 1
)
)
out = np.concatenate(out_grps, axis=-1)
if data_format == "channels_first":
out = out.transpose((0, 3, 1, 2))
return out
class DepthwiseConvBasicTest(testing.TestCase):
@parameterized.parameters(
{
"depth_multiplier": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (3, 5, 4),
"output_shape": (3, 4, 20),
},
{
"depth_multiplier": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2,),
"input_shape": (3, 4, 4),
"output_shape": (3, 4, 24),
},
{
"depth_multiplier": 6,
"kernel_size": 2,
"strides": (2,),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (3, 5, 4),
"output_shape": (3, 2, 24),
},
)
@pytest.mark.requires_trainable_backend
def test_depthwise_conv1d_basic(
self,
depth_multiplier,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
input_shape,
output_shape,
):
self.run_layer_test(
layers.DepthwiseConv1D,
init_kwargs={
"depth_multiplier": depth_multiplier,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.parameters(
{
"depth_multiplier": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
"input_shape": (3, 5, 5, 4),
"output_shape": (3, 4, 4, 20),
},
{
"depth_multiplier": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 2),
"input_shape": (3, 4, 4, 4),
"output_shape": (3, 4, 4, 24),
},
{
"depth_multiplier": 6,
"kernel_size": (2, 2),
"strides": (2, 2),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": (1, 1),
"input_shape": (3, 5, 5, 4),
"output_shape": (3, 2, 2, 24),
},
)
@pytest.mark.requires_trainable_backend
def test_depthwise_conv2d_basic(
self,
depth_multiplier,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
input_shape,
output_shape,
):
self.run_layer_test(
layers.DepthwiseConv2D,
init_kwargs={
"depth_multiplier": depth_multiplier,
"kernel_size": kernel_size,
"strides": strides,
"padding": padding,
"data_format": data_format,
"dilation_rate": dilation_rate,
},
input_shape=input_shape,
expected_output_shape=output_shape,
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_losses=0,
supports_masking=False,
)
def test_bad_init_args(self):
# `depth_multiplier` is not positive.
with self.assertRaisesRegex(
ValueError,
"Invalid value for argument `depth_multiplier`. "
"Expected a strictly positive value. Received "
"depth_multiplier=0.",
):
layers.DepthwiseConv1D(depth_multiplier=0, kernel_size=1)
# `kernel_size` has 0.
with self.assertRaisesRegex(
ValueError,
r"The `kernel_size` argument must be a tuple of 2 "
r"integers. Received kernel_size=\(1, 0\), including values "
r"\{0\} that do not satisfy `value > 0`",
):
layers.DepthwiseConv2D(depth_multiplier=2, kernel_size=(1, 0))
# `strides` has 0.
with self.assertRaisesRegex(
ValueError,
r"The `strides` argument must be a tuple of \d+ "
r"integers. Received strides=\(1, 0\), including values \{0\} "
r"that do not satisfy `value > 0`",
):
layers.DepthwiseConv2D(
depth_multiplier=2, kernel_size=(2, 2), strides=(1, 0)
)
# `dilation_rate > 1` while `strides > 1`.
with self.assertRaisesRegex(
ValueError,
r"`strides > 1` not supported in conjunction with "
r"`dilation_rate > 1`. Received: strides=\(2, 2\) and "
r"dilation_rate=\(2, 1\)",
):
layers.DepthwiseConv2D(
depth_multiplier=2,
kernel_size=(2, 2),
strides=2,
dilation_rate=(2, 1),
)
class DepthwiseConvCorrectnessTest(testing.TestCase):
@parameterized.parameters(
{
"depth_multiplier": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"depth_multiplier": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2,),
},
{
"depth_multiplier": 6,
"kernel_size": (2,),
"strides": (2,),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
)
def test_depthwise_conv1d(
self,
depth_multiplier,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
):
layer = layers.DepthwiseConv1D(
depth_multiplier=depth_multiplier,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 8, 4])
layer.build(input_shape=inputs.shape)
kernel_shape = layer.kernel.shape
kernel_weights = np.random.normal(size=kernel_shape)
bias_weights = np.random.normal(size=(depth_multiplier * 4,))
layer.kernel.assign(kernel_weights)
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected = np_depthwise_conv1d(
inputs,
kernel_weights,
bias_weights,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
self.assertAllClose(outputs, expected, tpu_atol=1e-2, tpu_rtol=1e-2)
@parameterized.parameters(
{
"depth_multiplier": 5,
"kernel_size": 2,
"strides": 1,
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": 1,
},
{
"depth_multiplier": 6,
"kernel_size": 2,
"strides": 1,
"padding": "same",
"data_format": "channels_last",
"dilation_rate": (2, 2),
},
{
"depth_multiplier": 6,
"kernel_size": (2, 2),
"strides": (2, 2),
"padding": "valid",
"data_format": "channels_last",
"dilation_rate": (1, 1),
},
)
def test_depthwise_conv2d(
self,
depth_multiplier,
kernel_size,
strides,
padding,
data_format,
dilation_rate,
):
layer = layers.DepthwiseConv2D(
depth_multiplier=depth_multiplier,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
inputs = np.random.normal(size=[2, 8, 8, 4])
layer.build(input_shape=inputs.shape)
kernel_shape = layer.kernel.shape
kernel_weights = np.random.normal(size=kernel_shape)
bias_weights = np.random.normal(size=(depth_multiplier * 4,))
layer.kernel.assign(kernel_weights)
layer.bias.assign(bias_weights)
outputs = layer(inputs)
expected = np_depthwise_conv2d(
inputs,
kernel_weights,
bias_weights,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
self.assertAllClose(
outputs.shape, expected.shape, tpu_atol=1e-2, tpu_rtol=1e-2
)
self.assertAllClose(
outputs, expected, atol=1e-5, tpu_atol=1e-1, tpu_rtol=1e-1
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/base_depthwise_conv.py | keras/src/layers/convolutional/base_depthwise_conv.py | """Keras base class for depthwise convolution layers."""
from keras.src import activations
from keras.src import constraints
from keras.src import initializers
from keras.src import ops
from keras.src import regularizers
from keras.src.backend import standardize_data_format
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.ops.operation_utils import compute_conv_output_shape
from keras.src.utils.argument_validation import standardize_padding
from keras.src.utils.argument_validation import standardize_tuple
class BaseDepthwiseConv(Layer):
"""Abstract N-D depthwise convolution layer.
Depthwise convolution is a type of convolution in which each input channel
is convolved with a different kernel (called a depthwise kernel). You can
understand depthwise convolution as the first step in a depthwise separable
convolution.
It is implemented via the following steps:
- Split the input into individual channels.
- Convolve each channel with an individual depthwise kernel with
`depth_multiplier` output channels.
- Concatenate the convolved outputs along the channels axis.
Unlike a regular convolution, depthwise convolution does not mix information
across different input channels.
The `depth_multiplier` argument determines how many filter are applied to
one input channel. As such, it controls the amount of output channels that
are generated per input channel in the depthwise step.
Args:
rank: int, the rank of the convolution, e.g. 2 for 2D convolution.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `input_channel * depth_multiplier`.
kernel_size: int or tuple/list of `rank` integers, specifying the size
of the depthwise convolution window.
strides: int or tuple/list of `rank` integers, specifying the stride
length of the depthwise convolution. If only one int is specified,
the same stride size will be used for all dimensions.
`strides > 1` is incompatible with `dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of `rank` integers, specifying the
dilation rate to use for dilated convolution. If only one int is
specified, the same dilation rate will be used for all dimensions.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
depthwise_initializer: Initializer for the depthwsie convolution
kernel. If `None`, the default initializer (`"glorot_uniform"`)
will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
depthwise_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
"""
def __init__(
self,
rank,
depth_multiplier,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
depthwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs,
):
super().__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs,
)
self.rank = rank
self.depth_multiplier = depth_multiplier
self.kernel_size = standardize_tuple(kernel_size, rank, "kernel_size")
self.strides = standardize_tuple(strides, rank, "strides")
self.dilation_rate = standardize_tuple(
dilation_rate, rank, "dilation_rate"
)
self.padding = standardize_padding(padding)
self.data_format = standardize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(min_ndim=self.rank + 2)
self.data_format = self.data_format
if self.depth_multiplier is not None and self.depth_multiplier <= 0:
raise ValueError(
"Invalid value for argument `depth_multiplier`. Expected a "
"strictly positive value. Received "
f"depth_multiplier={self.depth_multiplier}."
)
if not all(self.kernel_size):
raise ValueError(
"The argument `kernel_size` cannot contain 0. Received "
f"kernel_size={self.kernel_size}."
)
if not all(self.strides):
raise ValueError(
"The argument `strides` cannot contains 0. Received "
f"strides={self.strides}"
)
if max(self.strides) > 1 and max(self.dilation_rate) > 1:
raise ValueError(
"`strides > 1` not supported in conjunction with "
f"`dilation_rate > 1`. Received: strides={self.strides} and "
f"dilation_rate={self.dilation_rate}"
)
def build(self, input_shape):
if self.data_format == "channels_last":
channel_axis = -1
input_channel = input_shape[-1]
else:
channel_axis = 1
input_channel = input_shape[1]
self.input_spec = InputSpec(
min_ndim=self.rank + 2, axes={channel_axis: input_channel}
)
depthwise_shape = self.kernel_size + (
input_channel,
self.depth_multiplier,
)
self.kernel = self.add_weight(
name="kernel",
shape=depthwise_shape,
initializer=self.depthwise_initializer,
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint,
trainable=True,
dtype=self.dtype,
)
if self.use_bias:
self.bias = self.add_weight(
name="bias",
shape=(self.depth_multiplier * input_channel,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype,
)
else:
self.bias = None
def _get_input_channel(self, input_shape):
if self.data_format == "channels_last":
input_channel = input_shape[-1]
else:
input_channel = input_shape[1]
return input_channel
def call(self, inputs):
input_channel = self._get_input_channel(inputs.shape)
outputs = ops.depthwise_conv(
inputs,
self.kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format,
)
if self.use_bias:
if self.data_format == "channels_last":
bias_shape = (1,) * (self.rank + 1) + (
self.depth_multiplier * input_channel,
)
else:
bias_shape = (1, self.depth_multiplier * input_channel) + (
1,
) * self.rank
bias = ops.reshape(self.bias, bias_shape)
outputs = ops.add(outputs, bias)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_channel = self._get_input_channel(input_shape)
return compute_conv_output_shape(
input_shape,
self.depth_multiplier * input_channel,
self.kernel_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
def get_config(self):
config = super().get_config()
config.update(
{
"depth_multiplier": self.depth_multiplier,
"kernel_size": self.kernel_size,
"strides": self.strides,
"padding": self.padding,
"data_format": self.data_format,
"dilation_rate": self.dilation_rate,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"depthwise_initializer": initializers.serialize(
self.depthwise_initializer
),
"bias_initializer": initializers.serialize(
self.bias_initializer
),
"depthwise_regularizer": regularizers.serialize(
self.depthwise_regularizer
),
"bias_regularizer": regularizers.serialize(
self.bias_regularizer
),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"depthwise_constraint": constraints.serialize(
self.depthwise_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
)
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/__init__.py | keras/src/layers/convolutional/__init__.py | python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false | |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/conv1d_transpose.py | keras/src/layers/convolutional/conv1d_transpose.py | from keras.src.api_export import keras_export
from keras.src.layers.convolutional.base_conv_transpose import BaseConvTranspose
@keras_export(
[
"keras.layers.Conv1DTranspose",
"keras.layers.Convolution1DTranspose",
]
)
class Conv1DTranspose(BaseConvTranspose):
"""1D transposed convolution layer.
The need for transposed convolutions generally arise from the desire to use
a transformation going in the opposite direction of a normal convolution,
i.e., from something that has the shape of the output of some convolution
to something that has the shape of its input while maintaining a
connectivity pattern that is compatible with said convolution.
Args:
filters: int, the dimension of the output space (the number of filters
in the transpose convolution).
kernel_size: int or tuple/list of 1 integer, specifying the size of the
transposed convolution window.
strides: int or tuple/list of 1 integer, specifying the stride length
of the transposed convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input such that output has the same
height/width dimension as the input.
output_padding: An integer tuple/list of 1 integer specifying the
amount of padding along the time dimension of the output tensor.
The amount of output padding must be lower than the stride.
If set to `None` (default), the output shape is inferred.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: An integer tuple/list of 1 integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying a `dilation_rate` value != 1 is
incompatible with specifying a stride value != 1.
Also dilation rate larger than 1 is not currently supported.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
kernel_initializer: Initializer for the convolution kernel. If `None`,
the default initializer (`"glorot_uniform"`) will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, steps, channels)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, channels, steps)`
Output shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, new_steps, filters)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, filters, new_steps)`
Returns:
A 3D tensor representing
`activation(conv1d_transpose(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides > 1` and `dilation_rate > 1`.
References:
- [A guide to convolution arithmetic for deep learning](
https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional Networks](
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
Example:
>>> x = np.random.rand(4, 10, 128)
>>> y = keras.layers.Conv1DTranspose(32, 3, 2, activation='relu')(x)
>>> print(y.shape)
(4, 21, 32)
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
output_padding=output_padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/conv3d_transpose.py | keras/src/layers/convolutional/conv3d_transpose.py | from keras.src.api_export import keras_export
from keras.src.layers.convolutional.base_conv_transpose import BaseConvTranspose
@keras_export(
[
"keras.layers.Conv3DTranspose",
"keras.layers.Convolution3DTranspose",
]
)
class Conv3DTranspose(BaseConvTranspose):
"""3D transposed convolution layer.
The need for transposed convolutions generally arise from the desire to use
a transformation going in the opposite direction of a normal convolution,
i.e., from something that has the shape of the output of some convolution
to something that has the shape of its input while maintaining a
connectivity pattern that is compatible with said convolution.
Args:
filters: int, the dimension of the output space (the number of filters
in the transposed convolution).
kernel_size: int or tuple/list of 1 integer, specifying the size of the
transposed convolution window.
strides: int or tuple/list of 1 integer, specifying the stride length
of the transposed convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
output_padding: An integer or tuple/list of 3 integers,
specifying the amount of padding along the depth, height, and
width.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your Keras
config file at `~/.keras/keras.json`. If you never set it, then it
will be `"channels_last"`.
dilation_rate: an integer or tuple/list of 3 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
kernel_initializer: Initializer for the convolution kernel. If `None`,
the default initializer (`"glorot_uniform"`) will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, new_spatial_dim1, new_spatial_dim2, new_spatial_dim3,
filters)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, filters, new_spatial_dim1, new_spatial_dim2,
new_spatial_dim3)`
Returns:
A 5D tensor representing `activation(conv3d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides > 1` and `dilation_rate > 1`.
References:
- [A guide to convolution arithmetic for deep learning](
https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional Networks](
https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
Example:
>>> x = np.random.rand(4, 10, 8, 12, 128)
>>> y = keras.layers.Conv3DTranspose(32, 2, 2, activation='relu')(x)
>>> print(y.shape)
(4, 20, 16, 24, 32)
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1, 1),
padding="valid",
data_format=None,
output_padding=None,
dilation_rate=(1, 1, 1),
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
rank=3,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
output_padding=output_padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/conv1d.py | keras/src/layers/convolutional/conv1d.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.convolutional.base_conv import BaseConv
@keras_export(["keras.layers.Conv1D", "keras.layers.Convolution1D"])
class Conv1D(BaseConv):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved with the layer
input over a single spatial (or temporal) dimension to produce a tensor of
outputs. If `use_bias` is True, a bias vector is created and added to the
outputs. Finally, if `activation` is not `None`, it is applied to the
outputs as well.
Args:
filters: int, the dimension of the output space (the number of filters
in the convolution).
kernel_size: int or tuple/list of 1 integer, specifying the size of the
convolution window.
strides: int or tuple/list of 1 integer, specifying the stride length
of the convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, `"valid"`, `"same"` or `"causal"`(case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
`"causal"` results in causal(dilated) convolutions, e.g. `output[t]`
does not depend on`input[t+1:]`. Useful when modeling temporal data
where the model should not violate the temporal order.
See [WaveNet: A Generative Model for Raw Audio, section2.1](
https://arxiv.org/abs/1609.03499).
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of 1 integers, specifying the dilation
rate to use for dilated convolution.
groups: A positive int specifying the number of groups in which the
input is split along the channel axis. Each group is convolved
separately with `filters // groups` filters. The output is the
concatenation of all the `groups` results along the channel axis.
Input channels and `filters` must both be divisible by `groups`.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
kernel_initializer: Initializer for the convolution kernel. If `None`,
the default initializer (`"glorot_uniform"`) will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, steps, channels)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, channels, steps)`
Output shape:
- If `data_format="channels_last"`:
A 3D tensor with shape: `(batch_shape, new_steps, filters)`
- If `data_format="channels_first"`:
A 3D tensor with shape: `(batch_shape, filters, new_steps)`
Returns:
A 3D tensor representing `activation(conv1d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides > 1` and `dilation_rate > 1`.
Example:
>>> # The inputs are 128-length vectors with 10 timesteps, and the
>>> # batch size is 4.
>>> x = np.random.rand(4, 10, 128)
>>> y = keras.layers.Conv1D(32, 3, activation='relu')(x)
>>> print(y.shape)
(4, 8, 32)
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
def _compute_causal_padding(self):
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if self.data_format == "channels_last":
causal_padding = [[0, 0], [left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0], [0, 0], [left_pad, 0]]
return causal_padding
def call(self, inputs):
padding = self.padding
if self.padding == "causal":
# Apply causal padding to inputs.
inputs = ops.pad(inputs, self._compute_causal_padding())
padding = "valid"
outputs = ops.conv(
inputs,
self.kernel,
strides=list(self.strides),
padding=padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format,
)
if self.use_bias:
if self.data_format == "channels_last":
bias_shape = (1,) * (self.rank + 1) + (self.filters,)
else:
bias_shape = (1, self.filters) + (1,) * self.rank
bias = ops.reshape(self.bias, bias_shape)
outputs = ops.add(outputs, bias)
if self.activation is not None:
return self.activation(outputs)
return outputs
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/conv2d.py | keras/src/layers/convolutional/conv2d.py | from keras.src.api_export import keras_export
from keras.src.layers.convolutional.base_conv import BaseConv
@keras_export(["keras.layers.Conv2D", "keras.layers.Convolution2D"])
class Conv2D(BaseConv):
"""2D convolution layer.
This layer creates a convolution kernel that is convolved with the layer
input over a 2D spatial (or temporal) dimension (height and width) to
produce a tensor of outputs. If `use_bias` is True, a bias vector is created
and added to the outputs. Finally, if `activation` is not `None`, it is
applied to the outputs as well.
Note on numerical precision: While in general Keras operation execution
results are identical across backends up to 1e-7 precision in float32,
`Conv2D` operations may show larger variations. Due to the large
number of element-wise multiplications and additions in convolution
operations, especially with large inputs or kernel sizes, accumulated
floating-point differences can exceed this 1e-7 threshold. These variations
are particularly noticeable when using different backends (e.g., TensorFlow
vs JAX) or different hardware.
Args:
filters: int, the dimension of the output space (the number of filters
in the convolution).
kernel_size: int or tuple/list of 2 integer, specifying the size of the
convolution window.
strides: int or tuple/list of 2 integer, specifying the stride length
of the convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch_size, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch_size, channels, height, width)`. It defaults to the
`image_data_format` value found in your Keras config file at
`~/.keras/keras.json`. If you never set it, then it will be
`"channels_last"`.
dilation_rate: int or tuple/list of 2 integers, specifying the dilation
rate to use for dilated convolution.
groups: A positive int specifying the number of groups in which the
input is split along the channel axis. Each group is convolved
separately with `filters // groups` filters. The output is the
concatenation of all the `groups` results along the channel axis.
Input channels and `filters` must both be divisible by `groups`.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
kernel_initializer: Initializer for the convolution kernel. If `None`,
the default initializer (`"glorot_uniform"`) will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
A 4D tensor with shape: `(batch_size, height, width, channels)`
- If `data_format="channels_first"`:
A 4D tensor with shape: `(batch_size, channels, height, width)`
Output shape:
- If `data_format="channels_last"`:
A 4D tensor with shape: `(batch_size, new_height, new_width, filters)`
- If `data_format="channels_first"`:
A 4D tensor with shape: `(batch_size, filters, new_height, new_width)`
Returns:
A 4D tensor representing `activation(conv2d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides > 1` and `dilation_rate > 1`.
Example:
>>> x = np.random.rand(4, 10, 10, 128)
>>> y = keras.layers.Conv2D(32, 3, activation='relu')(x)
>>> print(y.shape)
(4, 8, 8, 32)
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
groups=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
rank=2,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/conv3d.py | keras/src/layers/convolutional/conv3d.py | from keras.src.api_export import keras_export
from keras.src.layers.convolutional.base_conv import BaseConv
@keras_export(["keras.layers.Conv3D", "keras.layers.Convolution3D"])
class Conv3D(BaseConv):
"""3D convolution layer.
This layer creates a convolution kernel that is convolved with the layer
input over a 3D spatial (or temporal) dimension (width,height and depth) to
produce a tensor of outputs. If `use_bias` is True, a bias vector is created
and added to the outputs. Finally, if `activation` is not `None`, it is
applied to the outputs as well.
Args:
filters: int, the dimension of the output space (the number of filters
in the convolution).
kernel_size: int or tuple/list of 3 integer, specifying the size of the
convolution window.
strides: int or tuple/list of 3 integer, specifying the stride length
of the convolution. `strides > 1` is incompatible with
`dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your Keras
config file at `~/.keras/keras.json`. If you never set it, then it
will be `"channels_last"`.
dilation_rate: int or tuple/list of 3 integers, specifying the dilation
rate to use for dilated convolution.
groups: A positive int specifying the number of groups in which the
input is split along the channel axis. Each group is convolved
separately with `filters // groups` filters. The output is the
concatenation of all the `groups` results along the channel axis.
Input channels and `filters` must both be divisible by `groups`.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
kernel_initializer: Initializer for the convolution kernel. If `None`,
the default initializer (`"glorot_uniform"`) will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
Input shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`
Output shape:
- If `data_format="channels_last"`:
5D tensor with shape:
`(batch_size, new_spatial_dim1, new_spatial_dim2, new_spatial_dim3,
filters)`
- If `data_format="channels_first"`:
5D tensor with shape:
`(batch_size, filters, new_spatial_dim1, new_spatial_dim2,
new_spatial_dim3)`
Returns:
A 5D tensor representing `activation(conv3d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides > 1` and `dilation_rate > 1`.
Example:
>>> x = np.random.rand(4, 10, 10, 10, 128)
>>> y = keras.layers.Conv3D(32, 3, activation='relu')(x)
>>> print(y.shape)
(4, 8, 8, 8, 32)
"""
def __init__(
self,
filters,
kernel_size,
strides=(1, 1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1, 1),
groups=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
rank=3,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/base_conv.py | keras/src/layers/convolutional/base_conv.py | """Keras base class for convolution layers."""
from keras.src import activations
from keras.src import constraints
from keras.src import initializers
from keras.src import ops
from keras.src import regularizers
from keras.src.backend import standardize_data_format
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.ops.operation_utils import compute_conv_output_shape
from keras.src.utils.argument_validation import standardize_padding
from keras.src.utils.argument_validation import standardize_tuple
class BaseConv(Layer):
"""Abstract N-D convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved (actually
cross-correlated) with the layer input to produce a tensor of outputs. If
`use_bias` is True (and a `bias_initializer` is provided), a bias vector is
created and added to the outputs. Finally, if `activation` is not `None`, it
is applied to the outputs as well.
Note: layer attributes cannot be modified after the layer has been called
once (except the `trainable` attribute).
Args:
rank: int, the rank of the convolution, e.g. 2 for 2D convolution.
filters: int, the dimension of the output space (the number of filters
in the convolution).
kernel_size: int or tuple/list of `rank` integers, specifying the size
of the convolution window.
strides: int or tuple/list of `rank` integers, specifying the stride
length of the convolution. If only one int is specified, the same
stride size will be used for all dimensions. `strides > 1` is
incompatible with `dilation_rate > 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of `rank` integers, specifying the
dilation rate to use for dilated convolution. If only one int is
specified, the same dilation rate will be used for all dimensions.
groups: A positive int specifying the number of groups in which the
input is split along the channel axis. Each group is convolved
separately with `filters // groups` filters. The output is the
concatenation of all the `groups` results along the channel axis.
Input channels and `filters` must both be divisible by `groups`.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
kernel_initializer: Initializer for the convolution kernel. If `None`,
the default initializer (`"glorot_uniform"`) will be used.
bias_initializer: Initializer for the bias vector. If `None`, the
default initializer (`"zeros"`) will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape). Constraints
are not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
lora_rank: Optional integer. If set, the layer's forward pass
will implement LoRA (Low-Rank Adaptation)
with the provided rank. LoRA sets the layer's kernel
to non-trainable and replaces it with a delta over the
original kernel, obtained via multiplying two lower-rank
trainable matrices. This can be useful to reduce the
computation cost of fine-tuning large dense layers.
You can also enable LoRA on an existing layer by calling
`layer.enable_lora(rank)`.
lora_alpha: Optional integer. If set, this parameter scales the
low-rank adaptation delta (computed as the product of two lower-rank
trainable matrices) during the forward pass. The delta is scaled by
`lora_alpha / lora_rank`, allowing you to fine-tune the strength of
the LoRA adjustment independently of `lora_rank`.
"""
def __init__(
self,
rank,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
lora_rank=None,
lora_alpha=None,
**kwargs,
):
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
self.rank = rank
self.filters = filters
self.groups = groups
self.kernel_size = standardize_tuple(kernel_size, rank, "kernel_size")
self.strides = standardize_tuple(strides, rank, "strides")
self.dilation_rate = standardize_tuple(
dilation_rate, rank, "dilation_rate"
)
self.padding = standardize_padding(padding, allow_causal=rank == 1)
self.data_format = standardize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.lora_rank = lora_rank
self.lora_alpha = lora_alpha if lora_alpha is not None else lora_rank
self.lora_enabled = False
self.input_spec = InputSpec(min_ndim=self.rank + 2)
self.data_format = self.data_format
if self.filters is not None and self.filters <= 0:
raise ValueError(
"Invalid value for argument `filters`. Expected a strictly "
f"positive value. Received filters={self.filters}."
)
if self.groups <= 0:
raise ValueError(
"The number of groups must be a positive integer. "
f"Received: groups={self.groups}."
)
if self.filters is not None and self.filters % self.groups != 0:
raise ValueError(
"The number of filters must be evenly divisible by the "
f"number of groups. Received: groups={self.groups}, "
f"filters={self.filters}."
)
if not all(self.kernel_size):
raise ValueError(
"The argument `kernel_size` cannot contain 0. Received "
f"kernel_size={self.kernel_size}."
)
if not all(self.strides):
raise ValueError(
"The argument `strides` cannot contains 0. Received "
f"strides={self.strides}"
)
if max(self.strides) > 1 and max(self.dilation_rate) > 1:
raise ValueError(
"`strides > 1` not supported in conjunction with "
f"`dilation_rate > 1`. Received: strides={self.strides} and "
f"dilation_rate={self.dilation_rate}"
)
def build(self, input_shape):
if self.data_format == "channels_last":
channel_axis = -1
input_channel = input_shape[-1]
else:
channel_axis = 1
input_channel = input_shape[1]
self.input_spec = InputSpec(
min_ndim=self.rank + 2, axes={channel_axis: input_channel}
)
if input_channel % self.groups != 0:
raise ValueError(
"The number of input channels must be evenly divisible by "
f"the number of groups. Received groups={self.groups}, but the "
f"input has {input_channel} channels (full input shape is "
f"{input_shape})."
)
kernel_shape = self.kernel_size + (
input_channel // self.groups,
self.filters,
)
# compute_output_shape contains some validation logic for the input
# shape, and make sure the output shape has all positive dimensions.
self.compute_output_shape(input_shape)
self._kernel = self.add_weight(
name="kernel",
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype,
)
if self.use_bias:
self.bias = self.add_weight(
name="bias",
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype,
)
else:
self.bias = None
self.built = True
if self.lora_rank:
self.enable_lora(self.lora_rank, lora_alpha=self.lora_alpha)
@property
def kernel(self):
if not self.built:
raise AttributeError(
"You must build the layer before accessing `kernel`."
)
if self.lora_enabled:
return self._kernel + (
self.lora_alpha / self.lora_rank
) * ops.matmul(self.lora_kernel_a, self.lora_kernel_b)
return self._kernel
def convolution_op(self, inputs, kernel):
return ops.conv(
inputs,
kernel,
strides=list(self.strides),
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format,
)
def call(self, inputs):
outputs = self.convolution_op(
inputs,
self.kernel,
)
if self.use_bias:
if self.data_format == "channels_last":
bias_shape = (1,) * (self.rank + 1) + (self.filters,)
else:
bias_shape = (1, self.filters) + (1,) * self.rank
bias = ops.reshape(self.bias, bias_shape)
outputs = ops.add(outputs, bias)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
return compute_conv_output_shape(
input_shape,
self.filters,
self.kernel_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
def enable_lora(
self,
rank,
lora_alpha=None,
a_initializer="he_uniform",
b_initializer="zeros",
):
if self.kernel_constraint:
raise ValueError(
"Lora is incompatible with kernel constraints. "
"In order to enable lora on this layer, remove the "
"`kernel_constraint` argument."
)
if not self.built:
raise ValueError(
"Cannot enable lora on a layer that isn't yet built."
)
if self.lora_enabled:
raise ValueError(
"lora is already enabled. This can only be done once per layer."
)
self._tracker.unlock()
self.lora_kernel_a = self.add_weight(
name="lora_kernel_a",
shape=self._kernel.shape[:-1] + (rank,),
initializer=initializers.get(a_initializer),
regularizer=self.kernel_regularizer,
)
self.lora_kernel_b = self.add_weight(
name="lora_kernel_b",
shape=(rank, self.filters),
initializer=initializers.get(b_initializer),
regularizer=self.kernel_regularizer,
)
self._kernel.trainable = False
self._tracker.lock()
self.lora_enabled = True
self.lora_rank = rank
self.lora_alpha = lora_alpha if lora_alpha is not None else rank
def save_own_variables(self, store):
# Do nothing if the layer isn't yet built
if not self.built:
return
target_variables = [self.kernel]
if self.use_bias:
target_variables.append(self.bias)
for i, variable in enumerate(target_variables):
store[str(i)] = variable
def load_own_variables(self, store):
if not self.lora_enabled:
self._check_load_own_variables(store)
# Do nothing if the layer isn't yet built
if not self.built:
return
target_variables = [self._kernel]
if self.use_bias:
target_variables.append(self.bias)
for i, variable in enumerate(target_variables):
variable.assign(store[str(i)])
if self.lora_enabled:
self.lora_kernel_a.assign(ops.zeros(self.lora_kernel_a.shape))
self.lora_kernel_b.assign(ops.zeros(self.lora_kernel_b.shape))
def get_config(self):
config = super().get_config()
config.update(
{
"filters": self.filters,
"kernel_size": self.kernel_size,
"strides": self.strides,
"padding": self.padding,
"data_format": self.data_format,
"dilation_rate": self.dilation_rate,
"groups": self.groups,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"bias_initializer": initializers.serialize(
self.bias_initializer
),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"bias_regularizer": regularizers.serialize(
self.bias_regularizer
),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(
self.kernel_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
)
if self.lora_rank:
config["lora_rank"] = self.lora_rank
config["lora_alpha"] = self.lora_alpha
return config
def _check_load_own_variables(self, store):
all_vars = self._trainable_variables + self._non_trainable_variables
if len(store.keys()) != len(all_vars):
if len(all_vars) == 0 and not self.built:
raise ValueError(
f"Layer '{self.name}' was never built "
"and thus it doesn't have any variables. "
f"However the weights file lists {len(store.keys())} "
"variables for this layer.\n"
"In most cases, this error indicates that either:\n\n"
"1. The layer is owned by a parent layer that "
"implements a `build()` method, but calling the "
"parent's `build()` method did NOT create the state of "
f"the child layer '{self.name}'. A `build()` method "
"must create ALL state for the layer, including "
"the state of any children layers.\n\n"
"2. You need to implement "
"the `def build_from_config(self, config)` method "
f"on layer '{self.name}', to specify how to rebuild "
"it during loading. "
"In this case, you might also want to implement the "
"method that generates the build config at saving time, "
"`def get_build_config(self)`. "
"The method `build_from_config()` is meant "
"to create the state "
"of the layer (i.e. its variables) upon deserialization.",
)
raise ValueError(
f"Layer '{self.name}' expected {len(all_vars)} variables, "
"but received "
f"{len(store.keys())} variables during loading. "
f"Expected: {[v.name for v in all_vars]}"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/convolutional/base_separable_conv.py | keras/src/layers/convolutional/base_separable_conv.py | """Keras abstract base layer for separable convolution."""
from keras.src import activations
from keras.src import constraints
from keras.src import initializers
from keras.src import ops
from keras.src import regularizers
from keras.src.backend import standardize_data_format
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.ops.operation_utils import compute_conv_output_shape
from keras.src.utils.argument_validation import standardize_padding
from keras.src.utils.argument_validation import standardize_tuple
class BaseSeparableConv(Layer):
"""Abstract base layer for separable convolution.
This layer performs a depthwise convolution that acts separately on
channels, followed by a pointwise convolution that mixes channels. If
`use_bias` is True and a bias initializer is provided, it adds a bias vector
to the output.
Args:
rank: int, the rank of the convolution, e.g. 2 for 2D convolution.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `input_channel * depth_multiplier`.
filters: int, the dimensionality of the output space (i.e. the number
of filters in the pointwise convolution).
kernel_size: int or tuple/list of `rank` integers, specifying the size
of the depthwise convolution window.
strides: int or tuple/list of `rank` integers, specifying the stride
length of the depthwise convolution. If only one int is specified,
the same stride size will be used for all dimensions.
`stride value != 1` is incompatible with `dilation_rate != 1`.
padding: string, either `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding evenly to
the left/right or up/down of the input. When `padding="same"` and
`strides=1`, the output has the same size as the input.
data_format: string, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, steps, features)`
while `"channels_first"` corresponds to inputs with shape
`(batch, features, steps)`. It defaults to the `image_data_format`
value found in your Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
dilation_rate: int or tuple/list of `rank` integers, specifying the
dilation rate to use for dilated convolution. If only one int is
specified, the same dilation rate will be used for all dimensions.
activation: Activation function. If `None`, no activation is applied.
use_bias: bool, if `True`, bias will be added to the output.
depthwise_initializer: An initializer for the depthwise convolution
kernel. If None, then the default initializer (`"glorot_uniform"`)
will be used.
pointwise_initializer: An initializer for the pointwise convolution
kernel. If None, then the default initializer (`"glorot_uniform"`)
will be used.
bias_initializer: An initializer for the bias vector. If None, the
default initializer ('"zeros"') will be used.
depthwise_regularizer: Optional regularizer for the depthwise
convolution kernel.
pointwise_regularizer: Optional regularizer for the pointwise
convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
depthwise_constraint: Optional projection function to be applied to the
depthwise kernel after being updated by an `Optimizer` (e.g. used
for norm constraints or value constraints for layer weights). The
function must take as input the unprojected variable and must return
the projected variable (which must have the same shape).
pointwise_constraint: Optional projection function to be applied to the
pointwise kernel after being updated by an `Optimizer`.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
"""
def __init__(
self,
rank,
depth_multiplier,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
depthwise_initializer="glorot_uniform",
pointwise_initializer="glorot_uniform",
bias_initializer="zeros",
depthwise_regularizer=None,
pointwise_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
depthwise_constraint=None,
pointwise_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs,
):
super().__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs,
)
self.rank = rank
self.depth_multiplier = depth_multiplier
self.filters = filters
self.kernel_size = standardize_tuple(kernel_size, rank, "kernel_size")
self.strides = standardize_tuple(strides, rank, "strides")
self.dilation_rate = standardize_tuple(
dilation_rate, rank, "dilation_rate"
)
self.padding = standardize_padding(padding)
self.data_format = standardize_data_format(data_format)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.depthwise_initializer = initializers.get(depthwise_initializer)
self.pointwise_initializer = initializers.get(pointwise_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
self.pointwise_regularizer = regularizers.get(pointwise_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.depthwise_constraint = constraints.get(depthwise_constraint)
self.pointwise_constraint = constraints.get(pointwise_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.data_format = self.data_format
self.input_spec = InputSpec(min_ndim=self.rank + 2)
if self.depth_multiplier is not None and self.depth_multiplier <= 0:
raise ValueError(
"Invalid value for argument `depth_multiplier`. Expected a "
"strictly positive value. Received "
f"depth_multiplier={self.depth_multiplier}."
)
if self.filters is not None and self.filters <= 0:
raise ValueError(
"Invalid value for argument `filters`. Expected a strictly "
f"positive value. Received filters={self.filters}."
)
if not all(self.kernel_size):
raise ValueError(
"The argument `kernel_size` cannot contain 0. Received: "
f"kernel_size={self.kernel_size}."
)
if not all(self.strides):
raise ValueError(
"The argument `strides` cannot contains 0(s). Received: "
f"strides={self.strides}"
)
if max(self.strides) > 1 and max(self.dilation_rate) > 1:
raise ValueError(
"`strides > 1` not supported in conjunction with "
f"`dilation_rate > 1`. Received: strides={self.strides} and "
f"dilation_rate={self.dilation_rate}"
)
def build(self, input_shape):
if self.data_format == "channels_last":
channel_axis = -1
input_channel = input_shape[-1]
else:
channel_axis = 1
input_channel = input_shape[1]
self.input_spec = InputSpec(
min_ndim=self.rank + 2, axes={channel_axis: input_channel}
)
depthwise_kernel_shape = self.kernel_size + (
input_channel,
self.depth_multiplier,
)
pointwise_kernel_shape = (1,) * self.rank + (
self.depth_multiplier * input_channel,
self.filters,
)
self.depthwise_kernel = self.add_weight(
name="depthwise_kernel",
shape=depthwise_kernel_shape,
initializer=self.depthwise_initializer,
regularizer=self.depthwise_regularizer,
constraint=self.depthwise_constraint,
trainable=True,
dtype=self.dtype,
)
self.pointwise_kernel = self.add_weight(
name="pointwise_kernel",
shape=pointwise_kernel_shape,
initializer=self.pointwise_initializer,
regularizer=self.pointwise_regularizer,
constraint=self.pointwise_constraint,
trainable=True,
dtype=self.dtype,
)
if self.use_bias:
self.bias = self.add_weight(
name="bias",
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype,
)
else:
self.bias = None
def call(self, inputs):
outputs = ops.separable_conv(
inputs,
self.depthwise_kernel,
self.pointwise_kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format,
)
if self.use_bias:
if self.data_format == "channels_last":
bias_shape = (1,) * (self.rank + 1) + (self.filters,)
else:
bias_shape = (1, self.filters) + (1,) * self.rank
bias = ops.reshape(self.bias, bias_shape)
outputs = ops.add(outputs, bias)
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
return compute_conv_output_shape(
input_shape,
self.filters,
self.kernel_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
def get_config(self):
config = super().get_config()
config.update(
{
"depth_multiplier": self.depth_multiplier,
"filters": self.filters,
"kernel_size": self.kernel_size,
"strides": self.strides,
"padding": self.padding,
"data_format": self.data_format,
"dilation_rate": self.dilation_rate,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"depthwise_initializer": initializers.serialize(
self.depthwise_initializer
),
"pointwise_initializer": initializers.serialize(
self.pointwise_initializer
),
"bias_initializer": initializers.serialize(
self.bias_initializer
),
"depthwise_regularizer": regularizers.serialize(
self.depthwise_regularizer
),
"pointwise_regularizer": regularizers.serialize(
self.pointwise_regularizer
),
"bias_regularizer": regularizers.serialize(
self.bias_regularizer
),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"depthwise_constraint": constraints.serialize(
self.depthwise_constraint
),
"pointwise_constraint": constraints.serialize(
self.pointwise_constraint
),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
)
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/normalization/rms_normalization.py | keras/src/layers/normalization/rms_normalization.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.RMSNormalization")
class RMSNormalization(Layer):
"""Root Mean Square (RMS) Normalization layer.
This layer normalizes the input tensor based on its RMS value.
The Keras layer performs the operation as described in
[Root Mean Square Layer Normalization](https://arxiv.org/pdf/1910.07467)
by Biao Zhang et al.
If `scale` is enabled, the layer will scale the normalized outputs via
a learnable scaling factor.
So, with scaling enabled, the normalization equations
are as follows:
Let the intermediate activations for a mini-batch to be the `inputs`.
```python
rms_normalization(x) = x * rsqrt(mean(square(x))) * scale
```
For example:
>>> layer = keras.layers.RMSNormalization()
>>> layer.build([5, 20, 30, 10])
>>> print(layer.scale.shape)
(10,)
>>> layer(np.random.rand(1, 10)).numpy()
array([[0.35098287, 1.0495652 , 1.4645109 , 1.2944688 , 0.31124955,
1.2768592 , 1.184331 , 0.17474432, 0.49955517, 1.2428929 ]],
dtype=float32)
Args:
axis: int. The axis on which to perform the normalization.
epsilon: float. A small number to add to avoid division by zero.
"""
def __init__(self, axis=-1, epsilon=1e-6, **kwargs):
super().__init__(**kwargs)
self.axis = axis
self.epsilon = epsilon
def build(self, input_shape):
if isinstance(self.axis, list):
shape = tuple([input_shape[dim] for dim in self.axis])
else:
shape = (input_shape[self.axis],)
self.axis = [self.axis]
self.scale = self.add_weight(
name="scale", shape=shape, initializer="ones"
)
self.built = True
def call(self, x):
"""Applies RMS normalization to the input tensor.
Args:
x: Input tensor of shape (batch_size, input_dim).
Returns:
The RMS-normalized tensor of the same shape (batch_size, input_dim),
scaled by the learned `scale` parameter.
"""
return ops.rms_normalization(
x, scale=self.scale, axis=self.axis, epsilon=self.epsilon
)
def compute_output_shape(self, input_shape):
if isinstance(self.axis, int):
axes = [self.axis]
else:
axes = self.axis
for axis in axes:
if axis >= len(input_shape) or axis < -len(input_shape):
raise ValueError(
f"Axis {axis} is out of bounds for "
f"input shape {input_shape}. "
f"Received: axis={self.axis}"
)
return input_shape
def get_config(self):
config = {
"axis": self.axis,
"epsilon": self.epsilon,
}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/normalization/batch_normalization_test.py | keras/src/layers/normalization/batch_normalization_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src import testing
from keras.src.losses import MeanSquaredError
from keras.src.models import Model
class BatchNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_bn_basics(self):
# vector case
self.run_layer_test(
layers.BatchNormalization,
init_kwargs={
"center": True,
"scale": True,
},
call_kwargs={"training": True},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=2,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.BatchNormalization,
init_kwargs={
"center": False,
"scale": False,
},
call_kwargs={"training": True},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=2,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
# image case, with regularizers
self.run_layer_test(
layers.BatchNormalization,
init_kwargs={
"center": True,
"scale": True,
"beta_regularizer": "l2",
"gamma_regularizer": "l2",
},
call_kwargs={"training": True},
input_shape=(2, 4, 4, 3),
expected_output_shape=(2, 4, 4, 3),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=2,
expected_num_seed_generators=0,
expected_num_losses=2, # we have 2 regularizers.
supports_masking=True,
)
@parameterized.product(
axis=(-1, 1),
input_shape=((5, 2, 3), (5, 3, 3, 2)),
moving_mean_initializer=("zeros", "ones"),
moving_variance_initializer=("zeros", "ones"),
)
def test_correctness(
self,
axis,
input_shape,
moving_mean_initializer,
moving_variance_initializer,
):
# Training
layer = layers.BatchNormalization(
axis=axis,
momentum=0,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
)
# Random data centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=input_shape)
out = x
for _ in range(3):
out = layer(out, training=True)
# Assert the normalization is correct.
broadcast_shape = [1] * len(input_shape)
broadcast_shape[axis] = input_shape[axis]
out = backend.convert_to_numpy(out)
out = out - np.reshape(
backend.convert_to_numpy(layer.beta), broadcast_shape
)
out = out / np.reshape(
backend.convert_to_numpy(layer.gamma), broadcast_shape
)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[axis]
reduction_axes = tuple(reduction_axes)
self.assertAllClose(np.mean(out, axis=reduction_axes), 0.0, atol=1e-3)
self.assertAllClose(np.std(out, axis=reduction_axes), 1.0, atol=1e-3)
self.assertAllClose(layer.moving_mean, 0.0, atol=1e-3)
self.assertAllClose(layer.moving_variance, 1.0, atol=1e-3)
# Inference done before training shouldn't match.
inference_out = layer(x, training=False)
training_out = layer(x, training=True)
self.assertNotAllClose(inference_out, training_out)
# Since momentum is zero, inference after training should match.
training_out = layer(x, training=True)
inference_out = layer(x, training=False)
self.assertAllClose(inference_out, training_out)
# Masked result with no training should not differ
x[:, 1, :] = 0.0
unmasked_out = layer(x, training=False)
masked = layers.Masking()(x)
masked_out = layer(masked, training=False)
self.assertAllClose(unmasked_out, masked_out)
# Masked result should differ from unmasked result
unmasked_out = layer(x, training=False)
x[:, 1, :] = 0.0
masked = layers.Masking()(x)
masked_out = layer(masked, training=True)
self.assertNotAllClose(unmasked_out, masked_out)
@parameterized.product(
synchronized=(
(False, True) if backend.backend == "tensorflow" else (False,)
),
)
def test_input_fully_masked(self, synchronized):
norm = layers.BatchNormalization(
scale=False,
center=False,
synchronized=synchronized,
)
x = np.zeros((4, 5))
mask = np.zeros((4,), dtype=np.float32)
y = norm(x, mask=mask, training=True)
self.assertAllClose(y, np.zeros_like(x, dtype=np.float32))
@parameterized.product(run_eagerly=(True, False), mask_value=(0.0, 0.1, 1))
@pytest.mark.requires_trainable_backend
def test_bachnorm_ignore_masked_values(self, run_eagerly, mask_value):
padded_data = np.array(
[
[
[1, 5],
[2, 5],
[mask_value, mask_value],
[mask_value, mask_value],
]
for _ in range(10)
],
dtype="float32",
)
inputs = layers.Input((None, 2))
masked = layers.Masking(mask_value=mask_value)(inputs)
normed = layers.BatchNormalization(momentum=0.0)(masked)
model = Model(inputs, normed)
loss = MeanSquaredError()
model.compile(
"rmsprop",
loss=loss,
run_eagerly=run_eagerly,
)
model.fit(x=padded_data, y=padded_data, batch_size=10, epochs=5)
self.assertAllClose(model.layers[2].moving_mean.numpy(), [1.5, 5.0])
self.assertAllClose(
model.layers[2].moving_variance.numpy(), [0.25, 0.0]
)
def test_trainable_behavior(self):
layer = layers.BatchNormalization(axis=-1, momentum=0.8, epsilon=1e-7)
layer.build((1, 4, 4, 3))
layer.trainable = False
self.assertEqual(len(layer.weights), 4)
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.non_trainable_weights), 4)
# Random data centered on 5.0, variance 10.0
x = np.random.normal(loc=5.0, scale=10.0, size=(200, 4, 4, 3))
out = layer(x, training=True)
self.assertAllClose(out, x)
layer.trainable = True
self.assertEqual(len(layer.weights), 4)
self.assertEqual(len(layer.trainable_weights), 2)
self.assertEqual(len(layer.non_trainable_weights), 2)
for _ in range(10):
out = layer(x, training=True)
out = backend.convert_to_numpy(out)
out = out - np.reshape(
backend.convert_to_numpy(layer.beta), (1, 1, 1, 3)
)
out = out / np.reshape(
backend.convert_to_numpy(layer.gamma), (1, 1, 1, 3)
)
self.assertAllClose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-3)
self.assertAllClose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-3)
def test_large_value_within_autocast_scope(self):
layer = layers.BatchNormalization()
layer.build((1, 4, 4, 3))
# Use 70000 to trigger overflow for float16
large_value = ops.full(layer.moving_variance.shape, 70000)
with backend.AutocastScope("float16"):
layer.moving_variance.assign(large_value)
self.assertAllClose(layer.moving_variance.value, large_value)
def test_masked_broadcast_normalization(self):
input_shape = (1, 2, 3, 4)
mask_shape = (1, 2, 1)
x = ops.ones(input_shape)
mask = ops.ones(mask_shape)
layer = layers.BatchNormalization(axis=-1, momentum=0.0, epsilon=1e-3)
y = layer(x, training=True, mask=mask)
mean_y = ops.mean(y, axis=[0, 1, 2])
self.assertAllClose(mean_y, ops.zeros((4,)), atol=1e-6)
self.assertAllClose(y, ops.zeros_like(y), atol=1e-6)
self.assertAllClose(layer.moving_mean, ops.ones((4,)), atol=1e-6)
self.assertAllClose(layer.moving_variance, ops.zeros((4,)), atol=1e-6)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/normalization/rms_normalization_test.py | keras/src/layers/normalization/rms_normalization_test.py | import numpy as np
import pytest
from keras.src import layers
from keras.src import ops
from keras.src import testing
class RMSNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_ln_basics(self):
self.run_layer_test(
layers.RMSNormalization,
init_kwargs={},
input_shape=(4, 2),
expected_output_shape=(4, 2),
expected_num_trainable_weights=1,
expected_num_seed_generators=0,
)
self.run_layer_test(
layers.RMSNormalization,
init_kwargs={
"axis": -1,
},
input_shape=(4, 2),
expected_output_shape=(4, 2),
expected_num_trainable_weights=1,
expected_num_seed_generators=0,
)
def test_correctness(self):
layer = layers.RMSNormalization()
layer.build(input_shape=(2, 2, 2))
inputs = np.random.normal(
loc=5.0, scale=10.0, size=(1000, 2, 2, 2)
).astype("float32")
inputs = ops.convert_to_tensor(inputs)
out = layer(inputs)
expected = ops.multiply(
ops.multiply(
inputs,
ops.rsqrt(ops.mean(ops.square(inputs), axis=-1, keepdims=True)),
),
layer.scale,
)
self.assertAllClose(out, expected, atol=1e-1)
def test_output(self):
layer = layers.RMSNormalization()
inputs = np.arange(10).astype("float32")[None, :]
out = layer(inputs)
self.assertAllClose(
out,
[
[
0.0,
0.18731716,
0.37463433,
0.5619515,
0.74926865,
0.9365858,
1.123903,
1.3112202,
1.4985373,
1.6858544,
]
],
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/normalization/unit_normalization.py | keras/src/layers/normalization/unit_normalization.py | from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.UnitNormalization")
class UnitNormalization(Layer):
"""Unit normalization layer.
Normalize a batch of inputs so that each input in the batch has a L2 norm
equal to 1 (across the axes specified in `axis`).
Example:
>>> data = np.arange(6).reshape(2, 3)
>>> normalized_data = keras.layers.UnitNormalization()(data)
>>> np.sum(normalized_data[0, :] ** 2)
1.0
Args:
axis: Integer or list/tuple. The axis or axes to normalize across.
Typically, this is the features axis or axes. The left-out axes are
typically the batch axis or axes. `-1` is the last dimension
in the input. Defaults to `-1`.
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Invalid value for `axis` argument: "
"expected an int or a list/tuple of ints. "
f"Received: axis={axis}"
)
self.supports_masking = True
self._build_at_init()
def call(self, inputs):
return ops.normalize(inputs, axis=self.axis, order=2, epsilon=1e-12)
def compute_output_shape(self, input_shape):
# Ensure axis is always treated as a list
if isinstance(self.axis, int):
axes = [self.axis]
else:
axes = self.axis
for axis in axes:
if axis >= len(input_shape) or axis < -len(input_shape):
raise ValueError(
f"Axis {self.axis} is out of bounds for "
f"input shape {input_shape}."
)
return input_shape
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/normalization/unit_normalization_test.py | keras/src/layers/normalization/unit_normalization_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
def squared_l2_norm(x):
x = backend.convert_to_numpy(x)
return np.sum(x**2)
class UnitNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_un_basics(self):
self.run_layer_test(
layers.UnitNormalization,
init_kwargs={"axis": -1},
input_shape=(2, 3),
expected_output_shape=(2, 3),
supports_masking=True,
assert_built_after_instantiation=True,
)
self.run_layer_test(
layers.UnitNormalization,
init_kwargs={"axis": (1, 2)},
input_shape=(1, 3, 3),
expected_output_shape=(1, 3, 3),
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_invalid_axis(self):
with self.assertRaisesRegex(
TypeError,
(
"Invalid value for `axis` argument: expected an int or a "
"list/tuple of ints."
),
):
layers.UnitNormalization(axis={"axis": -1})
def test_correctness(self):
layer = layers.UnitNormalization(axis=-1)
inputs = np.random.normal(size=(2, 3))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :]), 1.0)
layer = layers.UnitNormalization(axis=(1, 2))
inputs = np.random.normal(size=(2, 3, 3))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :, :]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, :]), 1.0)
layer = layers.UnitNormalization(axis=1)
inputs = np.random.normal(size=(2, 3, 2))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :, 0]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, 0]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[0, :, 1]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, 1]), 1.0)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/normalization/layer_normalization_test.py | keras/src/layers/normalization/layer_normalization_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src import regularizers
from keras.src import testing
class LayerNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_ln_basics(self):
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={
"gamma_regularizer": regularizers.L2(0.01),
"beta_regularizer": regularizers.L2(0.01),
},
input_shape=(3, 4, 2),
expected_output_shape=(3, 4, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=2,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={
"gamma_initializer": "ones",
"beta_initializer": "ones",
},
input_shape=(3, 4, 2),
expected_output_shape=(3, 4, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={"scale": False, "center": False},
input_shape=(3, 3),
expected_output_shape=(3, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={"rms_scaling": True},
input_shape=(3, 3),
expected_output_shape=(3, 3),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={"axis": (-3, -2, -1)},
input_shape=(2, 8, 8, 3),
expected_output_shape=(2, 8, 8, 3),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
self.run_layer_test(
layers.LayerNormalization,
init_kwargs={},
input_shape=(1, 0, 10),
expected_output_shape=(1, 0, 10),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
def test_invalid_axis(self):
with self.assertRaisesRegex(
TypeError,
("Expected an int or a list/tuple of ints for the argument 'axis'"),
):
layers.LayerNormalization(axis={"axis": -1})
def test_correctness(self):
layer = layers.LayerNormalization(dtype="float32")
layer.build(input_shape=(2, 2, 2))
inputs = np.random.normal(
loc=5.0, scale=10.0, size=(1000, 2, 2, 2)
).astype("float32")
out = layer(inputs)
out = ops.subtract(out, layer.beta)
out = ops.divide(out, layer.gamma)
self.assertAllClose(ops.mean(out), 0.0, atol=1e-1)
self.assertAllClose(ops.std(out), 1.0, atol=1e-1)
def test_output(self):
layer = layers.LayerNormalization(
dtype="float32",
beta_initializer="ones",
gamma_initializer="ones",
)
inputs = np.arange(5).astype("float32")[None, :]
out = layer(inputs)
self.assertAllClose(out, [[-0.41386, 0.29307, 1.0, 1.70693, 2.41386]])
def test_output_with_rms_scaling(self):
layer = layers.LayerNormalization(
dtype="float32",
rms_scaling=True,
gamma_initializer="ones",
)
inputs = np.arange(5).astype("float32")[None, :]
out = layer(inputs)
self.assertAllClose(out, [[0.0, 0.70693, 1.41386, 2.12079, 2.82772]])
def test_large_value_within_autocast_scope(self):
layer = layers.LayerNormalization()
layer.build((1, 4, 4, 3))
# Use 70000 to trigger overflow for float16
large_value = ops.full(layer.gamma.shape, 70000)
with backend.AutocastScope("float16"):
layer.gamma.assign(large_value)
self.assertAllClose(layer.gamma.value, large_value)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/normalization/group_normalization.py | keras/src/layers/normalization/group_normalization.py | from keras.src import backend
from keras.src import constraints
from keras.src import initializers
from keras.src import ops
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.GroupNormalization")
class GroupNormalization(Layer):
"""Group normalization layer.
Group Normalization divides the channels into groups and computes
within each group the mean and variance for normalization.
Empirically, its accuracy is more stable than batch norm in a wide
range of small batch sizes, if learning rate is adjusted linearly
with batch sizes.
Relation to Layer Normalization:
If the number of groups is set to 1, then this operation becomes nearly
identical to Layer Normalization (see Layer Normalization docs for details).
Relation to Instance Normalization:
If the number of groups is set to the input dimension (number of groups is
equal to number of channels), then this operation becomes identical to
Instance Normalization. You can achieve this via `groups=-1`.
Args:
groups: Integer, the number of groups for Group Normalization. Can be in
the range `[1, N]` where N is the input dimension. The input
dimension must be divisible by the number of groups.
Defaults to 32.
axis: Integer or List/Tuple. The axis or axes to normalize across.
Typically, this is the features axis/axes. The left-out axes are
typically the batch axis/axes. -1 is the last dimension in the
input. Defaults to `-1`.
epsilon: Small float added to variance to avoid dividing by zero.
Defaults to 1e-3.
center: If `True`, add offset of `beta` to normalized tensor.
If `False`, `beta` is ignored. Defaults to `True`.
scale: If `True`, multiply by `gamma`. If `False`, `gamma` is not used.
When the next layer is linear (also e.g. `relu`), this can be
disabled since the scaling will be done by the next layer.
Defaults to `True`.
beta_initializer: Initializer for the beta weight. Defaults to zeros.
gamma_initializer: Initializer for the gamma weight. Defaults to ones.
beta_regularizer: Optional regularizer for the beta weight. None by
default.
gamma_regularizer: Optional regularizer for the gamma weight. None by
default.
beta_constraint: Optional constraint for the beta weight.
None by default.
gamma_constraint: Optional constraint for the gamma weight. None by
default. Input shape: Arbitrary. Use the keyword argument
`input_shape` (tuple of integers, does not include the samples
axis) when using this layer as the first layer in a model.
Output shape: Same shape as input.
**kwargs: Base layer keyword arguments (e.g. `name` and `dtype`).
Reference:
- [Yuxin Wu & Kaiming He, 2018](https://arxiv.org/abs/1803.08494)
"""
def __init__(
self,
groups=32,
axis=-1,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer="zeros",
gamma_initializer="ones",
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs,
):
super().__init__(**kwargs)
self.supports_masking = True
self.groups = groups
self.axis = axis
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
def build(self, input_shape):
dim = input_shape[self.axis]
if dim is None:
raise ValueError(
f"Axis {self.axis} of input tensor should have a defined "
"dimension but the layer received an input with shape "
f"{input_shape}."
)
if self.groups == -1:
self.groups = dim
if dim < self.groups:
raise ValueError(
f"Number of groups ({self.groups}) cannot be more than the "
f"number of channels ({dim})."
)
if dim % self.groups != 0:
raise ValueError(
f"Number of groups ({self.groups}) must be a multiple "
f"of the number of channels ({dim})."
)
self.input_spec = InputSpec(
ndim=len(input_shape), axes={self.axis: dim}
)
if self.scale:
self.gamma = self.add_weight(
shape=(dim,),
name="gamma",
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(
shape=(dim,),
name="beta",
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
)
else:
self.beta = None
super().build(input_shape)
def call(self, inputs):
reshaped_inputs = self._reshape_into_groups(inputs)
normalized_inputs = self._apply_normalization(
reshaped_inputs, inputs.shape
)
return ops.reshape(normalized_inputs, ops.shape(inputs))
def _reshape_into_groups(self, inputs):
input_shape = ops.shape(inputs)
group_shape = list(inputs.shape)
group_shape[0] = -1
for i, e in enumerate(group_shape[1:]):
if e is None:
group_shape[i + 1] = input_shape[i + 1]
group_shape[self.axis] = input_shape[self.axis] // self.groups
group_shape.insert(self.axis, self.groups)
reshaped_inputs = ops.reshape(inputs, group_shape)
return reshaped_inputs
def _apply_normalization(self, reshaped_inputs, input_shape):
inputs_dtype = reshaped_inputs.dtype
compute_dtype = backend.result_type(inputs_dtype, "float32")
# GN is prone to overflow with float16/bfloat16 inputs, so we upcast to
# float32 for the subsequent computations.
reshaped_inputs = ops.cast(reshaped_inputs, compute_dtype)
group_reduction_axes = list(range(1, len(reshaped_inputs.shape)))
axis = -2 if self.axis == -1 else self.axis - 1
group_reduction_axes.pop(axis)
broadcast_shape = self._create_broadcast_shape(input_shape)
mean, variance = ops.moments(
reshaped_inputs, axes=group_reduction_axes, keepdims=True
)
# Compute the batch normalization.
inv = ops.rsqrt(variance + self.epsilon)
if self.scale:
gamma = ops.reshape(self.gamma, broadcast_shape)
gamma = ops.cast(gamma, reshaped_inputs.dtype)
inv = inv * gamma
res = -mean * inv
if self.center:
beta = ops.reshape(self.beta, broadcast_shape)
beta = ops.cast(beta, reshaped_inputs.dtype)
res = res + beta
normalized_inputs = reshaped_inputs * inv + res
normalized_inputs = ops.cast(normalized_inputs, inputs_dtype)
return normalized_inputs
def _create_broadcast_shape(self, input_shape):
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
broadcast_shape.insert(self.axis, self.groups)
return broadcast_shape
def compute_output_shape(self, input_shape):
if isinstance(self.axis, int):
axes = [self.axis]
else:
axes = self.axis
for axis in axes:
if axis >= len(input_shape) or axis < -len(input_shape):
raise ValueError(
f"Axis {axis} is out of bounds for "
f"input shape {input_shape}. "
f"Received: axis={self.axis}"
)
return input_shape
def get_config(self):
config = {
"groups": self.groups,
"axis": self.axis,
"epsilon": self.epsilon,
"center": self.center,
"scale": self.scale,
"beta_initializer": initializers.serialize(self.beta_initializer),
"gamma_initializer": initializers.serialize(self.gamma_initializer),
"beta_regularizer": regularizers.serialize(self.beta_regularizer),
"gamma_regularizer": regularizers.serialize(self.gamma_regularizer),
"beta_constraint": constraints.serialize(self.beta_constraint),
"gamma_constraint": constraints.serialize(self.gamma_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/normalization/layer_normalization.py | keras/src/layers/normalization/layer_normalization.py | import warnings
from keras.src import constraints
from keras.src import initializers
from keras.src import ops
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.LayerNormalization")
class LayerNormalization(Layer):
"""Layer normalization layer (Ba et al., 2016).
Normalize the activations of the previous layer for each given example in a
batch independently, rather than across a batch like Batch Normalization.
i.e. applies a transformation that maintains the mean activation within each
example close to 0 and the activation standard deviation close to 1.
If `scale` or `center` are enabled, the layer will scale the normalized
outputs by broadcasting them with a trainable variable `gamma`, and center
the outputs by broadcasting with a trainable variable `beta`. `gamma` will
default to a ones tensor and `beta` will default to a zeros tensor, so that
centering and scaling are no-ops before training has begun.
So, with scaling and centering enabled the normalization equations
are as follows:
Let the intermediate activations for a mini-batch to be the `inputs`.
For each sample `x_i` in `inputs` with `k` features, we compute the mean and
variance of the sample:
```python
mean_i = sum(x_i[j] for j in range(k)) / k
var_i = sum((x_i[j] - mean_i) ** 2 for j in range(k)) / k
```
and then compute a normalized `x_i_normalized`, including a small factor
`epsilon` for numerical stability.
```python
x_i_normalized = (x_i - mean_i) / sqrt(var_i + epsilon)
```
And finally `x_i_normalized ` is linearly transformed by `gamma` and `beta`,
which are learned parameters:
```python
output_i = x_i_normalized * gamma + beta
```
`gamma` and `beta` will span the axes of `inputs` specified in `axis`, and
this part of the inputs' shape must be fully defined.
For example:
>>> layer = keras.layers.LayerNormalization(axis=[1, 2, 3])
>>> layer.build([5, 20, 30, 40])
>>> print(layer.beta.shape)
(20, 30, 40)
>>> print(layer.gamma.shape)
(20, 30, 40)
Note that other implementations of layer normalization may choose to define
`gamma` and `beta` over a separate set of axes from the axes being
normalized across. For example, Group Normalization
([Wu et al. 2018](https://arxiv.org/abs/1803.08494)) with group size of 1
corresponds to a Layer Normalization that normalizes across height, width,
and channel and has `gamma` and `beta` span only the channel dimension.
So, this Layer Normalization implementation will not match a Group
Normalization layer with group size set to 1.
Args:
axis: Integer or List/Tuple. The axis or axes to normalize across.
Typically, this is the features axis/axes. The left-out axes are
typically the batch axis/axes. `-1` is the last dimension in the
input. Defaults to `-1`.
epsilon: Small float added to variance to avoid dividing by zero.
Defaults to 1e-3.
center: If True, add offset of `beta` to normalized tensor. If False,
`beta` is ignored. Defaults to `True`.
scale: If True, multiply by `gamma`. If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling will be done by the next layer.
Defaults to `True`.
beta_initializer: Initializer for the beta weight. Defaults to zeros.
gamma_initializer: Initializer for the gamma weight. Defaults to ones.
beta_regularizer: Optional regularizer for the beta weight.
None by default.
gamma_regularizer: Optional regularizer for the gamma weight.
None by default.
beta_constraint: Optional constraint for the beta weight.
None by default.
gamma_constraint: Optional constraint for the gamma weight.
None by default.
**kwargs: Base layer keyword arguments (e.g. `name` and `dtype`).
Reference:
- [Lei Ba et al., 2016](https://arxiv.org/abs/1607.06450).
"""
def __init__(
self,
axis=-1,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer="zeros",
gamma_initializer="ones",
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs,
):
rms_scaling = kwargs.pop("rms_scaling", False)
if rms_scaling:
warnings.warn(
"You passed `rms_scaling=True`, which is deprecated. This "
"argument incorrectly scales the input by the variance, not "
"the root mean square. To correctly use RMS Normalization, "
"please use `keras.layers.RMSNormalization` instead."
)
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Expected an int or a list/tuple of ints for the "
"argument 'axis', but received: %r" % axis
)
self.epsilon = epsilon
self.center = center
self.scale = scale
self.rms_scaling = rms_scaling
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
self.supports_masking = True
self.autocast = False
def build(self, input_shape):
if isinstance(self.axis, list):
shape = tuple([input_shape[dim] for dim in self.axis])
else:
shape = (input_shape[self.axis],)
self.axis = [self.axis]
if self.scale or self.rms_scaling:
self.gamma = self.add_weight(
name="gamma",
shape=shape,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True,
autocast=False,
)
else:
self.gamma = None
if self.center and not self.rms_scaling:
self.beta = self.add_weight(
name="beta",
shape=shape,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True,
autocast=False,
)
else:
self.beta = None
def call(self, inputs):
outputs = ops.layer_normalization(
inputs,
self.gamma,
self.beta,
self.axis,
self.epsilon,
rms_scaling=self.rms_scaling,
)
return ops.cast(outputs, self.compute_dtype)
def compute_output_shape(self, input_shape):
if isinstance(self.axis, int):
axes = [self.axis]
else:
axes = self.axis
for axis in axes:
if axis >= len(input_shape) or axis < -len(input_shape):
raise ValueError(
f"Axis {axis} is out of bounds for "
f"input shape {input_shape}. "
f"Received: axis={self.axis}"
)
return input_shape
def get_config(self):
config = {
"axis": self.axis,
"epsilon": self.epsilon,
"center": self.center,
"scale": self.scale,
"rms_scaling": self.rms_scaling,
"beta_initializer": initializers.serialize(self.beta_initializer),
"gamma_initializer": initializers.serialize(self.gamma_initializer),
"beta_regularizer": regularizers.serialize(self.beta_regularizer),
"gamma_regularizer": regularizers.serialize(self.gamma_regularizer),
"beta_constraint": constraints.serialize(self.beta_constraint),
"gamma_constraint": constraints.serialize(self.gamma_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/normalization/spectral_normalization.py | keras/src/layers/normalization/spectral_normalization.py | from keras.src import initializers
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers import Wrapper
from keras.src.layers.input_spec import InputSpec
from keras.src.utils.numerical_utils import normalize
@keras_export("keras.layers.SpectralNormalization")
class SpectralNormalization(Wrapper):
"""Performs spectral normalization on the weights of a target layer.
This wrapper controls the Lipschitz constant of the weights of a layer by
constraining their spectral norm, which can stabilize the training of GANs.
Args:
layer: A `keras.layers.Layer` instance that
has either a `kernel` (e.g. `Conv2D`, `Dense`...)
or an `embeddings` attribute (`Embedding` layer).
power_iterations: int, the number of iterations during normalization.
**kwargs: Base wrapper keyword arguments.
Examples:
Wrap `keras.layers.Conv2D`:
>>> x = np.random.rand(1, 10, 10, 1)
>>> conv2d = SpectralNormalization(keras.layers.Conv2D(2, 2))
>>> y = conv2d(x)
>>> y.shape
(1, 9, 9, 2)
Wrap `keras.layers.Dense`:
>>> x = np.random.rand(1, 10, 10, 1)
>>> dense = SpectralNormalization(keras.layers.Dense(10))
>>> y = dense(x)
>>> y.shape
(1, 10, 10, 10)
Reference:
- [Spectral Normalization for GAN](https://arxiv.org/abs/1802.05957).
"""
def __init__(self, layer, power_iterations=1, **kwargs):
super().__init__(layer, **kwargs)
if power_iterations <= 0:
raise ValueError(
"`power_iterations` should be greater than zero. Received: "
f"`power_iterations={power_iterations}`"
)
self.power_iterations = power_iterations
def build(self, input_shape):
super().build(input_shape)
self.input_spec = InputSpec(min_ndim=1, axes={-1: input_shape[-1]})
if hasattr(self.layer, "kernel"):
self.kernel = self.layer.kernel
elif hasattr(self.layer, "embeddings"):
self.kernel = self.layer.embeddings
else:
raise ValueError(
f"{type(self.layer).__name__} object has no attribute 'kernel' "
"nor 'embeddings'"
)
self.kernel_shape = self.kernel.shape
self.vector_u = self.add_weight(
shape=(1, self.kernel_shape[-1]),
initializer=initializers.TruncatedNormal(stddev=0.02),
trainable=False,
name="vector_u",
dtype=self.kernel.dtype,
)
def call(self, inputs, training=False):
if training:
new_vector_u, new_kernel = ops.cond(
ops.all(ops.equal(self.kernel.value, 0)),
lambda: (self.vector_u.value, self.kernel.value),
self.normalized_weights,
)
self.vector_u.assign(new_vector_u)
self.kernel.assign(new_kernel)
output = self.layer(inputs)
return ops.cast(output, inputs.dtype)
def compute_output_shape(self, input_shape):
return self.layer.compute_output_shape(input_shape)
def normalized_weights(self):
"""Generate spectral normalized weights.
This method returns the updated value for `self.kernel` with the
spectral normalized value, so that the layer is ready for `call()`.
"""
weights = ops.reshape(self.kernel, [-1, self.kernel_shape[-1]])
vector_u = self.vector_u.value
for _ in range(self.power_iterations):
vector_v = normalize(
ops.matmul(vector_u, ops.transpose(weights)), axis=None
)
vector_u = normalize(ops.matmul(vector_v, weights), axis=None)
vector_u = ops.stop_gradient(vector_u)
vector_v = ops.stop_gradient(vector_v)
sigma = ops.matmul(
ops.matmul(vector_v, weights), ops.transpose(vector_u)
)
kernel = ops.reshape(ops.divide(self.kernel, sigma), self.kernel_shape)
return ops.cast(vector_u, self.vector_u.dtype), ops.cast(
kernel, self.kernel.dtype
)
def get_config(self):
config = {"power_iterations": self.power_iterations}
base_config = super().get_config()
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/normalization/group_normalization_test.py | keras/src/layers/normalization/group_normalization_test.py | import numpy as np
import pytest
from keras.src import constraints
from keras.src import layers
from keras.src import regularizers
from keras.src import testing
class GroupNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_groupnorm(self):
self.run_layer_test(
layers.GroupNormalization,
init_kwargs={
"gamma_regularizer": regularizers.L2(0.01),
"beta_regularizer": regularizers.L2(0.01),
},
input_shape=(3, 4, 32),
expected_output_shape=(3, 4, 32),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=2,
supports_masking=True,
)
self.run_layer_test(
layers.GroupNormalization,
init_kwargs={
"groups": 4,
"gamma_constraint": constraints.UnitNorm(),
"beta_constraint": constraints.UnitNorm(),
},
input_shape=(3, 4, 4),
expected_output_shape=(3, 4, 4),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
def test_undefined_dim_error(self):
inputs = layers.Input(shape=(2, 2, 2, None))
layer = layers.GroupNormalization()
with self.assertRaisesRegex(
ValueError,
(
"input tensor should have a defined dimension but the layer "
"received an input with shape"
),
):
_ = layer(inputs)
def test_groups_bigger_than_dim_error(self):
inputs = np.ones(shape=(2, 2, 2, 4))
layer = layers.GroupNormalization(groups=5)
with self.assertRaisesRegex(
ValueError,
"cannot be more than the number of channels",
):
_ = layer(inputs)
def test_groups_not_a_multiple_of_dim_error(self):
inputs = np.ones(shape=(2, 2, 2, 4))
layer = layers.GroupNormalization(groups=3)
with self.assertRaisesRegex(
ValueError,
"must be a multiple of the number of channels",
):
_ = layer(inputs)
def test_groups_instance_norm(self):
# GroupNormalization with groups=-1 will become InstanceNormalization
instance_norm_layer_1 = layers.GroupNormalization(
groups=-1, axis=-1, scale=False, center=False
)
instance_norm_layer_2 = layers.GroupNormalization(
groups=4, axis=-1, scale=False, center=False
)
inputs = np.array([[[-1.0, 1.0, 0, 2.0], [1.0, 3.0, -4, -2.0]]])
outputs_1 = instance_norm_layer_1(inputs)
outputs_2 = instance_norm_layer_2(inputs)
self.assertAllClose(outputs_1, outputs_2)
def test_correctness_instance_norm(self):
instance_norm_layer = layers.GroupNormalization(
groups=4, axis=-1, scale=False, center=False
)
inputs = np.array([[[-1.0, 1.0, 0, 2.0], [1.0, 3.0, -4, -2.0]]])
expected_instance_norm_output = np.array(
[[[-1.0, -1.0, 1.0, 1.0], [1.0, 1.0, -1.0, -1.0]]]
)
self.assertAllClose(
instance_norm_layer(inputs),
expected_instance_norm_output,
atol=1e-3,
)
def test_correctness_1d(self):
layer_with_1_group = layers.GroupNormalization(
groups=1, axis=-1, scale=False, center=False
)
layer_with_2_groups = layers.GroupNormalization(
groups=2, axis=1, scale=False, center=False
)
inputs = np.array([[-1.0, -1.0, 1.0, 1.0, 2.0, 2.0, 0, -2.0]])
expected_output_1_group = np.array(
[[-0.898, -0.898, 0.539, 0.539, 1.257, 1.257, -0.180, -1.616]],
)
self.assertAllClose(
layer_with_1_group(inputs),
expected_output_1_group,
atol=1e-3,
)
expected_output_2_groups = np.array(
[[-1.0, -1.0, 1.0, 1.0, 0.904, 0.904, -0.301, -1.507]]
)
self.assertAllClose(
layer_with_2_groups(inputs),
expected_output_2_groups,
atol=1e-3,
)
def test_correctness_2d(self):
layer_with_1_group = layers.GroupNormalization(
groups=1, axis=-1, scale=False, center=False
)
layer_with_2_groups = layers.GroupNormalization(
groups=2, axis=2, scale=False, center=False
)
inputs = np.array([[[-1.0, -1.0, 2.0, 2.0], [1.0, 1.0, 0, -2.0]]])
expected_output_1_group = np.array(
[[[-0.898, -0.898, 1.257, 1.257], [0.539, 0.539, -0.180, -1.616]]]
)
self.assertAllClose(
layer_with_1_group(inputs),
expected_output_1_group,
atol=1e-3,
)
expected_output_2_groups = np.array(
[[[-1.0, -1.0, 0.904, 0.904], [1.0, 1.0, -0.301, -1.507]]]
)
self.assertAllClose(
layer_with_2_groups(inputs),
expected_output_2_groups,
atol=1e-3,
)
def test_broadcasting_2d_channels_first(self):
x = np.arange(16).reshape((1, 4, 2, 2)).astype("float32")
x = layers.GroupNormalization(groups=2, axis=1)(x)
self.assertAllClose(
x,
np.array(
[
[
[[-1.5274, -1.0910], [-0.6546, -0.2182]],
[[0.2182, 0.6546], [1.0910, 1.5274]],
[[-1.5274, -1.0910], [-0.6546, -0.2182]],
[[0.2182, 0.6546], [1.0910, 1.5274]],
]
]
),
atol=1e-3,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/normalization/__init__.py | keras/src/layers/normalization/__init__.py | python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false | |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/normalization/spectral_normalization_test.py | keras/src/layers/normalization/spectral_normalization_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import initializers
from keras.src import layers
from keras.src import models
from keras.src import testing
class SpectralNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basic_spectralnorm(self):
self.run_layer_test(
layers.SpectralNormalization,
init_kwargs={"layer": layers.Dense(2)},
input_data=np.random.uniform(size=(10, 3, 4)),
expected_output_shape=(10, 3, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.SpectralNormalization,
init_kwargs={"layer": layers.Embedding(10, 4)},
input_data=np.random.randint(10, size=(10,)).astype("float32"),
expected_output_shape=(10, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
run_training_check=False,
)
@pytest.mark.requires_trainable_backend
def test_spectralnorm_higher_dim(self):
self.run_layer_test(
layers.SpectralNormalization,
init_kwargs={"layer": layers.Dense(2)},
input_data=np.random.uniform(size=(10, 3, 4, 5)),
expected_output_shape=(10, 3, 4, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
def test_invalid_power_iterations(self):
with self.assertRaisesRegex(
ValueError, "`power_iterations` should be greater than zero."
):
layers.SpectralNormalization(layers.Dense(2), power_iterations=0)
def test_invalid_layer(self):
layer = layers.SpectralNormalization(layers.ReLU())
inputs = np.ones(shape=(4, 2))
with self.assertRaisesRegex(
ValueError, "object has no attribute 'kernel' nor 'embeddings'"
):
layer(inputs)
def test_apply_layer(self):
if backend.config.image_data_format() == "channels_last":
images = np.ones((1, 2, 2, 1))
else:
images = np.ones((1, 1, 2, 2))
sn_wrapper = layers.SpectralNormalization(
layers.Conv2D(
1, (2, 2), kernel_initializer=initializers.Constant(value=1)
),
power_iterations=8,
)
result = sn_wrapper(images, training=False)
result_train = sn_wrapper(images, training=True)
expected_output = np.array([[[[4.0]]]], dtype=np.float32)
self.assertAllClose(result, expected_output)
# max eigen value of 2x2 matrix of ones is 2
self.assertAllClose(result_train, expected_output / 2)
@pytest.mark.requires_trainable_backend
def test_end_to_end(self):
sn_wrapper = layers.SpectralNormalization(
layers.Conv2D(
3, (2, 2), padding="same", data_format="channels_last"
),
power_iterations=2,
)
model = models.Sequential([sn_wrapper])
model.compile("rmsprop", loss="mse")
x = np.random.random((4, 8, 8, 3))
y = np.random.random((4, 8, 8, 3))
model.fit(x, y)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/normalization/batch_normalization.py | keras/src/layers/normalization/batch_normalization.py | from keras.src import backend
from keras.src import constraints
from keras.src import initializers
from keras.src import ops
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
@keras_export("keras.layers.BatchNormalization")
class BatchNormalization(Layer):
"""Layer that normalizes its inputs.
Batch normalization applies a transformation that maintains the mean output
close to 0 and the output standard deviation close to 1.
Importantly, batch normalization works differently during training and
during inference.
**During training** (i.e. when using `fit()` or when calling the layer/model
with the argument `training=True`), the layer normalizes its output using
the mean and standard deviation of the current batch of inputs. That is to
say, for each channel being normalized, the layer returns
`gamma * (batch - mean(batch)) / sqrt(var(batch) + epsilon) + beta`, where:
- `epsilon` is small constant (configurable as part of the constructor
arguments)
- `gamma` is a learned scaling factor (initialized as 1), which
can be disabled by passing `scale=False` to the constructor.
- `beta` is a learned offset factor (initialized as 0), which
can be disabled by passing `center=False` to the constructor.
**During inference** (i.e. when using `evaluate()` or `predict()` or when
calling the layer/model with the argument `training=False` (which is the
default), the layer normalizes its output using a moving average of the
mean and standard deviation of the batches it has seen during training. That
is to say, it returns
`gamma * (batch - self.moving_mean) / sqrt(self.moving_var+epsilon) + beta`.
`self.moving_mean` and `self.moving_var` are non-trainable variables that
are updated each time the layer in called in training mode, as such:
- `moving_mean = moving_mean * momentum + mean(batch) * (1 - momentum)`
- `moving_var = moving_var * momentum + var(batch) * (1 - momentum)`
As such, the layer will only normalize its inputs during inference
*after having been trained on data that has similar statistics as the
inference data*.
Args:
axis: Integer, the axis that should be normalized
(typically the features axis). For instance, after a `Conv2D` layer
with `data_format="channels_first"`, use `axis=1`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If `True`, add offset of `beta` to normalized tensor.
If `False`, `beta` is ignored.
scale: If `True`, multiply by `gamma`. If `False`, `gamma` is not used.
When the next layer is linear this can be disabled
since the scaling will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
synchronized: Only applicable with the TensorFlow backend.
If `True`, synchronizes the global batch statistics (mean and
variance) for the layer across all devices at each training step
in a distributed training strategy.
If `False`, each replica uses its own local batch statistics.
**kwargs: Base layer keyword arguments (e.g. `name` and `dtype`).
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode.
- `training=True`: The layer will normalize its inputs using
the mean and variance of the current batch of inputs.
- `training=False`: The layer will normalize its inputs using
the mean and variance of its moving statistics, learned during
training.
mask: Binary tensor of shape broadcastable to `inputs` tensor, with
`True` values indicating the positions for which mean and variance
should be computed. Masked elements of the current inputs are not
taken into account for mean and variance computation during
training. Any prior unmasked element values will be taken into
account until their momentum expires.
Reference:
- [Ioffe and Szegedy, 2015](https://arxiv.org/abs/1502.03167).
**About setting `layer.trainable = False` on a `BatchNormalization` layer:**
The meaning of setting `layer.trainable = False` is to freeze the layer,
i.e. its internal state will not change during training:
its trainable weights will not be updated
during `fit()` or `train_on_batch()`, and its state updates will not be run.
Usually, this does not necessarily mean that the layer is run in inference
mode (which is normally controlled by the `training` argument that can
be passed when calling a layer). "Frozen state" and "inference mode"
are two separate concepts.
However, in the case of the `BatchNormalization` layer, **setting
`trainable = False` on the layer means that the layer will be
subsequently run in inference mode** (meaning that it will use
the moving mean and the moving variance to normalize the current batch,
rather than using the mean and variance of the current batch).
Note that:
- Setting `trainable` on an model containing other layers will recursively
set the `trainable` value of all inner layers.
- If the value of the `trainable` attribute is changed after calling
`compile()` on a model, the new value doesn't take effect for this model
until `compile()` is called again.
"""
def __init__(
self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer="zeros",
gamma_initializer="ones",
moving_mean_initializer="zeros",
moving_variance_initializer="ones",
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
synchronized=False,
**kwargs,
):
super().__init__(**kwargs)
self.axis = int(axis)
if synchronized and backend.backend() != "tensorflow":
raise ValueError(
"Argument synchronized=True is only supported "
"with the TensorFlow backend."
)
self.synchronized = synchronized
self.momentum = float(momentum)
self.epsilon = float(epsilon)
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.moving_mean_initializer = initializers.get(moving_mean_initializer)
self.moving_variance_initializer = initializers.get(
moving_variance_initializer
)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
self.supports_masking = True
self.gamma = None
self.beta = None
self.moving_mean = None
self.moving_variance = None
self._reduction_axes = None
def build(self, input_shape):
shape = (input_shape[self.axis],)
if self.scale:
self.gamma = self.add_weight(
shape=shape,
name="gamma",
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True,
autocast=False,
)
if self.center:
self.beta = self.add_weight(
shape=shape,
name="beta",
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True,
autocast=False,
)
self.moving_mean = self.add_weight(
shape=shape,
name="moving_mean",
initializer=self.moving_mean_initializer,
trainable=False,
autocast=False,
)
self.moving_variance = self.add_weight(
shape=shape,
name="moving_variance",
initializer=self.moving_variance_initializer,
trainable=False,
autocast=False,
)
self.input_spec = InputSpec(
ndim=len(input_shape), axes={self.axis: input_shape[self.axis]}
)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
self._reduction_axes = reduction_axes
def compute_output_shape(self, input_shape):
if isinstance(self.axis, int):
axes = [self.axis]
else:
axes = self.axis
for axis in axes:
if axis >= len(input_shape) or axis < -len(input_shape):
raise ValueError(
f"Axis {axis} is out of bounds for "
f"input shape {input_shape}. "
f"Received: axis={self.axis}"
)
return input_shape
def call(self, inputs, training=None, mask=None):
# Check if the mask has one less dimension than the inputs.
if mask is not None:
if len(mask.shape) != len(inputs.shape) - 1:
# Raise a value error
raise ValueError(
"The mask provided should be one dimension less "
"than the inputs. Received: "
f"mask.shape={mask.shape}, inputs.shape={inputs.shape}"
)
compute_dtype = backend.result_type(inputs.dtype, "float32")
# BN is prone to overflow with float16/bfloat16 inputs, so we upcast to
# float32 for the subsequent computations.
inputs = ops.cast(inputs, compute_dtype)
moving_mean = ops.cast(self.moving_mean, inputs.dtype)
moving_variance = ops.cast(self.moving_variance, inputs.dtype)
if training and self.trainable:
mean, variance = self._moments(inputs, mask)
self.moving_mean.assign(
moving_mean * self.momentum + mean * (1.0 - self.momentum)
)
self.moving_variance.assign(
moving_variance * self.momentum
+ variance * (1.0 - self.momentum)
)
else:
mean = moving_mean
variance = moving_variance
if self.scale:
gamma = ops.cast(self.gamma, inputs.dtype)
else:
gamma = None
if self.center:
beta = ops.cast(self.beta, inputs.dtype)
else:
beta = None
outputs = ops.batch_normalization(
x=inputs,
mean=mean,
variance=variance,
axis=self.axis,
offset=beta,
scale=gamma,
epsilon=self.epsilon,
)
return ops.cast(outputs, self.compute_dtype)
def get_config(self):
base_config = super().get_config()
config = {
"axis": self.axis,
"momentum": self.momentum,
"epsilon": self.epsilon,
"center": self.center,
"scale": self.scale,
"beta_initializer": initializers.serialize(self.beta_initializer),
"gamma_initializer": initializers.serialize(self.gamma_initializer),
"moving_mean_initializer": initializers.serialize(
self.moving_mean_initializer
),
"moving_variance_initializer": initializers.serialize(
self.moving_variance_initializer
),
"beta_regularizer": regularizers.serialize(self.beta_regularizer),
"gamma_regularizer": regularizers.serialize(self.gamma_regularizer),
"beta_constraint": constraints.serialize(self.beta_constraint),
"gamma_constraint": constraints.serialize(self.gamma_constraint),
"synchronized": self.synchronized,
}
return {**base_config, **config}
def _moments(self, inputs, mask):
if mask is None:
return ops.moments(
inputs,
axes=self._reduction_axes,
synchronized=self.synchronized,
)
mask_weights = ops.cast(mask, inputs.dtype)
mask_weights_broadcasted = ops.expand_dims(mask_weights, axis=-1)
broadcasted_mask = ops.broadcast_to(
mask_weights_broadcasted, ops.shape(inputs)
)
weighted_inputs = broadcasted_mask * inputs
weighted_input_sum = ops.sum(
weighted_inputs,
self._reduction_axes,
keepdims=True,
)
sum_of_weights = ops.sum(
broadcasted_mask,
self._reduction_axes,
keepdims=True,
)
mean = weighted_input_sum / (sum_of_weights + backend.epsilon())
difference = weighted_inputs - mean
squared_difference = ops.square(difference)
weighted_distsq = ops.sum(
broadcasted_mask * squared_difference,
self._reduction_axes,
keepdims=True,
)
variance = weighted_distsq / (sum_of_weights + backend.epsilon())
return ops.squeeze(mean), ops.squeeze(variance)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/input_layer.py | keras/src/layers/core/input_layer.py | import warnings
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.ops.node import Node
@keras_export("keras.layers.InputLayer")
class InputLayer(Layer):
def __init__(
self,
shape=None,
batch_size=None,
dtype=None,
sparse=None,
ragged=None,
batch_shape=None,
input_tensor=None,
optional=False,
name=None,
**kwargs,
):
super().__init__(name=name)
if "input_shape" in kwargs:
warnings.warn(
"Argument `input_shape` is deprecated. Use `shape` instead."
)
shape = kwargs.pop("input_shape")
if "batch_input_shape" in kwargs:
batch_shape = kwargs.pop("batch_input_shape")
if input_tensor is not None:
if not isinstance(input_tensor, backend.KerasTensor):
raise ValueError(
"Argument `input_tensor` must be a KerasTensor. "
f"Received invalid type: input_tensor={input_tensor} "
f"(of type {type(input_tensor)})"
)
if batch_size is not None:
if (
len(input_tensor.shape) < 1
or input_tensor.shape[0] != batch_size
):
raise ValueError(
"When providing the `input_tensor` argument, you "
"cannot provide an incompatible `batch_size` argument."
)
if shape is not None:
if (
len(shape) != len(input_tensor.shape) - 1
or shape != input_tensor.shape[1:]
):
raise ValueError(
"When providing the `input_tensor` argument, you "
"cannot provide an incompatible `shape` argument."
)
if batch_shape is not None and batch_shape != input_tensor.shape:
raise ValueError(
"When providing the `input_tensor` argument, you "
"cannot provide an incompatible `batch_shape` argument."
)
if dtype is not None and input_tensor.dtype != dtype:
raise ValueError(
"When providing the `input_tensor` argument, you "
"cannot provide an incompatible `dtype` argument."
)
if sparse is not None and input_tensor.sparse != sparse:
raise ValueError(
"When providing the `input_tensor` argument, you "
"cannot provide an incompatible `sparse` argument."
)
batch_shape = input_tensor.shape
dtype = input_tensor.dtype
sparse = input_tensor.sparse
else:
if shape is not None and batch_shape is not None:
raise ValueError(
"You cannot pass both `shape` and `batch_shape` at the "
"same time."
)
if batch_size is not None and batch_shape is not None:
raise ValueError(
"You cannot pass both `batch_size` and `batch_shape` "
"at the same time."
)
if shape is None and batch_shape is None:
raise ValueError("You must pass a `shape` argument.")
if shape is not None:
shape = backend.standardize_shape(shape)
batch_shape = (batch_size,) + shape
self._batch_shape = backend.standardize_shape(batch_shape)
self._dtype = backend.standardize_dtype(dtype)
self.sparse = bool(sparse)
if self.sparse and not backend.SUPPORTS_SPARSE_TENSORS:
raise ValueError(
f"`sparse=True` is not supported with the {backend.backend()} "
"backend"
)
self.ragged = bool(ragged)
if self.ragged and not backend.SUPPORTS_RAGGED_TENSORS:
raise ValueError(
f"`ragged=True` is not supported with the {backend.backend()} "
"backend"
)
if input_tensor is None:
input_tensor = backend.KerasTensor(
shape=batch_shape,
dtype=dtype,
sparse=sparse,
ragged=ragged,
name=name,
)
self._input_tensor = input_tensor
Node(operation=self, call_args=(), call_kwargs={}, outputs=input_tensor)
self.built = True
self.optional = optional
def call(self):
return
@property
def batch_shape(self):
return self._batch_shape
@property
def dtype(self):
return self._dtype
def get_config(self):
return {
"batch_shape": self.batch_shape,
"dtype": self.dtype,
"sparse": self.sparse,
"ragged": self.ragged,
"name": self.name,
"optional": self.optional,
}
@keras_export(["keras.layers.Input", "keras.Input"])
def Input(
shape=None,
batch_size=None,
dtype=None,
sparse=None,
ragged=None,
batch_shape=None,
name=None,
tensor=None,
optional=False,
):
"""Used to instantiate a Keras tensor.
A Keras tensor is a symbolic tensor-like object, which we augment with
certain attributes that allow us to build a Keras model just by knowing the
inputs and outputs of the model.
For instance, if `a`, `b` and `c` are Keras tensors,
it becomes possible to do:
`model = Model(input=[a, b], output=c)`
Args:
shape: A shape tuple (tuple of integers or `None` objects),
not including the batch size.
For instance, `shape=(32,)` indicates that the expected input
will be batches of 32-dimensional vectors. Elements of this tuple
can be `None`; `None` elements represent dimensions where the shape
is not known and may vary (e.g. sequence length).
batch_size: Optional static batch size (integer).
dtype: The data type expected by the input, as a string
(e.g. `"float32"`, `"int32"`...)
sparse: A boolean specifying whether the expected input will be sparse
tensors. Note that, if `sparse` is `False`, sparse tensors can still
be passed into the input - they will be densified with a default
value of 0. This feature is only supported with the TensorFlow and
the JAX backends. Defaults to `False`.
ragged: A boolean specifying whether the expected input will be ragged
tensors. Note that, if `ragged` is `False`, ragged tensors can still
be passed into the input - they will be densified with a default
value of 0. This feature is only supported with the TensorFlow
backend. Defaults to `False`.
batch_shape: Optional shape tuple (tuple of integers or `None` objects),
including the batch size.
name: Optional name string for the layer.
Should be unique in a model (do not reuse the same name twice).
It will be autogenerated if it isn't provided.
tensor: Optional existing tensor to wrap into the `Input` layer.
If set, the layer will use this tensor rather
than creating a new placeholder tensor.
optional: Boolean, whether the input is optional or not.
An optional input can accept `None` values.
Returns:
A Keras tensor.
Example:
```python
# This is a logistic regression in Keras
x = Input(shape=(32,))
y = Dense(16, activation='softmax')(x)
model = Model(x, y)
```
"""
layer = InputLayer(
shape=shape,
batch_size=batch_size,
dtype=dtype,
sparse=sparse,
ragged=ragged,
batch_shape=batch_shape,
name=name,
input_tensor=tensor,
optional=optional,
)
return layer.output
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/identity_test.py | keras/src/layers/core/identity_test.py | import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import testing
class IdentityTest(testing.TestCase):
@parameterized.named_parameters(
[
{"testcase_name": "dense", "sparse": False},
{"testcase_name": "sparse", "sparse": True},
]
)
@pytest.mark.requires_trainable_backend
def test_identity_basics(self, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors.")
self.run_layer_test(
layers.Identity,
init_kwargs={},
input_shape=(2, 3),
input_sparse=sparse,
expected_output_shape=(2, 3),
expected_output_sparse=sparse,
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
run_training_check=not sparse,
supports_masking=True,
assert_built_after_instantiation=True,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/masking.py | keras/src/layers/core/masking.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving.serialization_lib import deserialize_keras_object
@keras_export("keras.layers.Masking")
class Masking(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a NumPy data array `x` of shape `(samples, timesteps, features)`,
to be fed to an LSTM layer. You want to mask timestep #3 and #5 because you
lack data for these timesteps. You can:
- Set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- Insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
samples, timesteps, features = 32, 10, 8
inputs = np.random.random([samples, timesteps, features]).astype(np.float32)
inputs[:, 3, :] = 0.
inputs[:, 5, :] = 0.
model = keras.models.Sequential()
model.add(keras.layers.Masking(mask_value=0.0))
model.add(keras.layers.LSTM(32))
output = model(inputs)
# The time step 3 and 5 will be skipped from LSTM calculation.
```
Note: in the Keras masking convention, a masked timestep is denoted by
a mask value of `False`, while a non-masked (i.e. usable) timestep
is denoted by a mask value of `True`.
"""
def __init__(self, mask_value=0.0, **kwargs):
super().__init__(**kwargs)
# `mask_value` can be a serialized tensor, hence verify it
if isinstance(mask_value, dict) and mask_value.get("config", None):
mask_value = deserialize_keras_object(mask_value)
self.mask_value = mask_value
self.supports_masking = True
self._build_at_init()
def compute_mask(self, inputs, mask=None):
return ops.any(ops.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = ops.any(
ops.not_equal(inputs, self.mask_value), axis=-1, keepdims=True
)
# Set masked outputs to 0
outputs = inputs * backend.cast(boolean_mask, dtype=inputs.dtype)
# Compute the mask and outputs simultaneously.
backend.set_keras_mask(outputs, mask=ops.squeeze(boolean_mask, axis=-1))
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {"mask_value": self.mask_value}
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/masking_test.py | keras/src/layers/core/masking_test.py | import os
import numpy as np
import pytest
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src.saving import load_model
class MaskingTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_masking_basics(self):
self.run_layer_test(
layers.Masking,
init_kwargs={"mask_value": 0.0},
input_shape=(2, 3, 2),
expected_output_shape=(2, 3, 2),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
@pytest.mark.requires_trainable_backend
def test_masking_correctness(self):
x = np.array(
[
[[0.0, 0.0], [1.0, 2.0], [0.0, 0.0]],
[[2.0, 2.0], [0.0, 0.0], [2.0, 1.0]],
]
)
expected_mask = [[False, True, False], [True, False, True]]
layer = layers.Masking(mask_value=0.0)
self.assertAllClose(layer.compute_mask(x), expected_mask)
test_obj = self
class TestLayer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
def compute_output_shape(self, input_shape):
return input_shape
def call(self, inputs, mask=None):
assert mask is not None
test_obj.assertAllClose(mask, expected_mask)
return inputs
model = models.Sequential(
[
layers.Masking(mask_value=0.0),
TestLayer(),
]
)
model(x)
@pytest.mark.requires_trainable_backend
def test_masking_with_tensor(self):
model = models.Sequential(
[
layers.Masking(mask_value=ops.convert_to_tensor([0.0])),
layers.LSTM(1),
]
)
x = np.array(
[
[[0.0, 0.0], [1.0, 2.0], [0.0, 0.0]],
[[2.0, 2.0], [0.0, 0.0], [2.0, 1.0]],
]
)
model(x)
temp_filepath = os.path.join(self.get_temp_dir(), "model.keras")
model.save(temp_filepath)
reload_model = load_model(temp_filepath)
reload_model(x)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/identity.py | keras/src/layers/core/identity.py | from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Identity")
class Identity(Layer):
"""Identity layer.
This layer should be used as a placeholder when no operation is to be
performed. The layer just returns its `inputs` argument as output.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self._build_at_init()
def call(self, inputs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_spec(self, inputs):
return tree.map_structure(
lambda x: KerasTensor(x.shape, dtype=x.dtype, sparse=x.sparse),
inputs,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/lambda_layer_test.py | keras/src/layers/core/lambda_layer_test.py | import numpy as np
import pytest
from keras.src import layers
from keras.src import ops
from keras.src import testing
class LambdaTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_lambda_basics(self):
self.run_layer_test(
layers.Lambda,
init_kwargs={
"function": ops.square,
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
custom_objects={"square": ops.square},
)
self.run_layer_test(
layers.Lambda,
init_kwargs={"function": ops.square, "mask": ops.ones((2, 3))},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
custom_objects={"square": ops.square},
)
def stacker(x):
return ops.concatenate([x, x], axis=1)
self.run_layer_test(
layers.Lambda,
init_kwargs={"function": stacker, "output_shape": (6,)},
input_shape=(2, 3),
expected_output_shape=(2, 6),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
custom_objects={"stacker": stacker},
)
def stacker_shape(s):
return (s[0], s[1] * 2)
self.run_layer_test(
layers.Lambda,
init_kwargs={
"function": stacker,
"output_shape": stacker_shape,
},
input_shape=(2, 3),
expected_output_shape=(2, 6),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
custom_objects={"stacker": stacker, "stacker_shape": stacker_shape},
)
def test_correctness(self):
layer = layers.Lambda(lambda x: x**2)
output = layer(2 * np.ones((2, 3)))
self.assertAllClose(4 * np.ones((2, 3)), output)
# Test serialization roundtrip
config = layer.get_config()
layer = layers.Lambda.from_config(config, safe_mode=False)
output = layer(2 * np.ones((2, 3)))
self.assertAllClose(4 * np.ones((2, 3)), output)
def test_correctness_lambda_shape(self):
layer = layers.Lambda(lambda x: x**2, output_shape=lambda x: x)
output = layer(2 * np.ones((2, 3)))
self.assertAllClose(4 * np.ones((2, 3)), output)
# Test serialization roundtrip
config = layer.get_config()
layer = layers.Lambda.from_config(config, safe_mode=False)
output = layer(2 * np.ones((2, 3)))
self.assertAllClose(4 * np.ones((2, 3)), output)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/einsum_dense.py | keras/src/layers/core/einsum_dense.py | import math
import re
import string
import ml_dtypes
import numpy as np
from keras.src import activations
from keras.src import backend
from keras.src import constraints
from keras.src import dtype_policies
from keras.src import initializers
from keras.src import ops
from keras.src import quantizers
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.quantizers.quantization_config import QuantizationConfig
from keras.src.quantizers.quantizers import dequantize_with_sz_map
from keras.src.saving import serialization_lib
@keras_export("keras.layers.EinsumDense")
class EinsumDense(Layer):
"""A layer that uses `einsum` as the backing computation.
This layer can perform einsum calculations of arbitrary dimensionality.
Args:
equation: An equation describing the einsum to perform.
This equation must be a valid einsum string of the form
`ab,bc->ac`, `...ab,bc->...ac`, or
`ab...,bc->ac...` where 'ab', 'bc', and 'ac' can be any valid einsum
axis expression sequence.
output_shape: The expected shape of the output tensor
(excluding the batch dimension and any dimensions
represented by ellipses). You can specify `None` for any dimension
that is unknown or can be inferred from the input shape.
activation: Activation function to use. If you don't specify anything,
no activation is applied
(that is, a "linear" activation: `a(x) = x`).
bias_axes: A string containing the output dimension(s)
to apply a bias to. Each character in the `bias_axes` string
should correspond to a character in the output portion
of the `equation` string.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
bias_constraint: Constraint function applied to the bias vector.
lora_rank: Optional integer. If set, the layer's forward pass
will implement LoRA (Low-Rank Adaptation)
with the provided rank. LoRA sets the layer's kernel
to non-trainable and replaces it with a delta over the
original kernel, obtained via multiplying two lower-rank
trainable matrices
(the factorization happens on the last dimension).
This can be useful to reduce the
computation cost of fine-tuning large dense layers.
You can also enable LoRA on an existing
`EinsumDense` layer by calling `layer.enable_lora(rank)`.
lora_alpha: Optional integer. If set, this parameter scales the
low-rank adaptation delta (computed as the product of two lower-rank
trainable matrices) during the forward pass. The delta is scaled by
`lora_alpha / lora_rank`, allowing you to fine-tune the strength of
the LoRA adjustment independently of `lora_rank`.
**kwargs: Base layer keyword arguments, such as `name` and `dtype`.
Examples:
**Biased dense layer with einsums**
This example shows how to instantiate a standard Keras dense layer using
einsum operations. This example is equivalent to
`keras.layers.Dense(64, use_bias=True)`.
>>> layer = keras.layers.EinsumDense("ab,bc->ac",
... output_shape=64,
... bias_axes="c")
>>> input_tensor = keras.Input(shape=[32])
>>> output_tensor = layer(input_tensor)
>>> output_tensor.shape
(None, 64)
**Applying a dense layer to a sequence**
This example shows how to instantiate a layer that applies the same dense
operation to every element in a sequence. Here, the `output_shape` has two
values (since there are two non-batch dimensions in the output); the first
dimension in the `output_shape` is `None`, because the sequence dimension
`b` has an unknown shape.
>>> layer = keras.layers.EinsumDense("abc,cd->abd",
... output_shape=(None, 64),
... bias_axes="d")
>>> input_tensor = keras.Input(shape=[32, 128])
>>> output_tensor = layer(input_tensor)
>>> output_tensor.shape
(None, 32, 64)
**Applying a dense layer to a sequence using ellipses**
This example shows how to instantiate a layer that applies the same dense
operation to every element in a sequence, but uses the ellipsis notation
instead of specifying the batch and sequence dimensions.
Because we are using ellipsis notation and have specified only one axis, the
`output_shape` arg is a single value. When instantiated in this way, the
layer can handle any number of sequence dimensions - including the case
where no sequence dimension exists.
>>> layer = keras.layers.EinsumDense("...x,xy->...y",
... output_shape=64,
... bias_axes="y")
>>> input_tensor = keras.Input(shape=[32, 128])
>>> output_tensor = layer(input_tensor)
>>> output_tensor.shape
(None, 32, 64)
"""
def __init__(
self,
equation,
output_shape,
activation=None,
bias_axes=None,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
lora_rank=None,
lora_alpha=None,
gptq_unpacked_column_size=None,
quantization_config=None,
**kwargs,
):
super().__init__(**kwargs)
self.equation = equation
if isinstance(output_shape, int):
self.partial_output_shape = (output_shape,)
else:
self.partial_output_shape = tuple(output_shape)
self.bias_axes = bias_axes
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.lora_rank = lora_rank
self.lora_alpha = lora_alpha if lora_alpha is not None else lora_rank
self.lora_enabled = False
self.gptq_unpacked_column_size = gptq_unpacked_column_size
self.quantization_config = quantization_config
def build(self, input_shape):
shape_data = _analyze_einsum_string(
self.equation,
self.bias_axes,
input_shape,
self.partial_output_shape,
)
kernel_shape, bias_shape, full_output_shape = shape_data
self.full_output_shape = tuple(full_output_shape)
self.input_spec = InputSpec(ndim=len(input_shape))
if self.quantization_mode is not None:
self.quantized_build(
kernel_shape,
mode=self.quantization_mode,
config=self.quantization_config,
)
# Skip creating a duplicate kernel variable when the layer is already
# quantized to int8 or int4, because `quantized_build` has created the
# appropriate kernel variable. For other modes (e.g., float8 or no
# quantization), we still need the floating-point kernel.
if self.quantization_mode not in ("int8", "int4", "gptq"):
# If the layer is quantized to int8, `self._kernel` will be added
# in `self._int8_build`. Therefore, we skip it here.
self._kernel = self.add_weight(
name="kernel",
shape=tuple(kernel_shape),
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True,
)
if bias_shape is not None:
self.bias = self.add_weight(
name="bias",
shape=tuple(bias_shape),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True,
)
else:
self.bias = None
self.built = True
if self.lora_rank:
self.enable_lora(self.lora_rank, lora_alpha=self.lora_alpha)
@property
def kernel(self):
from keras.src.quantizers import gptq_core
if not self.built:
raise AttributeError(
"You must build the layer before accessing `kernel`."
)
mode = self.quantization_mode
is_gptq = mode == "gptq"
is_int4 = mode == "int4"
calibrated = bool(getattr(self, "is_gptq_calibrated", False))
gptq_bits = (
gptq_core.get_weight_bits_for_layer(self, None) if is_gptq else None
)
# Decide the source tensor first (packed vs already-quantized vs plain
# kernel)
if is_gptq and calibrated and gptq_bits != 4:
# calibrated GPTQ, not 4-bit, no unpacking needed
kernel = self.quantized_kernel
else:
# Start with the stored kernel
kernel = getattr(self, "_kernel", None)
# Handle int4 unpacking cases in one place
if is_int4:
kernel = quantizers.unpack_int4(
kernel,
self._orig_length_along_pack_axis,
self._int4_pack_axis,
)
elif is_gptq and calibrated and gptq_bits == 4:
kernel = quantizers.unpack_int4(
self.quantized_kernel,
orig_len=self.gptq_unpacked_column_size,
axis=0,
dtype="uint8",
)
# Apply LoRA if enabled
if self.lora_enabled:
kernel = kernel + (self.lora_alpha / self.lora_rank) * ops.matmul(
self.lora_kernel_a, self.lora_kernel_b
)
return kernel
def compute_output_shape(self, _):
return self.full_output_shape
def call(self, inputs, training=None):
x = ops.einsum(self.equation, inputs, self.kernel)
if self.bias is not None:
x = ops.add(x, self.bias)
if self.activation is not None:
x = self.activation(x)
return x
def enable_lora(
self,
rank,
lora_alpha=None,
a_initializer="he_uniform",
b_initializer="zeros",
):
if self.kernel_constraint:
raise ValueError(
"Lora is incompatible with kernel constraints. "
"In order to enable lora on this layer, remove the "
"`kernel_constraint` argument."
)
if not self.built:
raise ValueError(
"Cannot enable lora on a layer that isn't yet built."
)
if self.lora_enabled:
raise ValueError(
"lora is already enabled. This can only be done once per layer."
)
if self.quantization_mode == "gptq":
raise NotImplementedError(
"lora is not currently supported with GPTQ quantization."
)
self._tracker.unlock()
# Determine the appropriate (unpacked) kernel shape for LoRA.
if self.quantization_mode == "int4":
# When int4-quantized, `self._kernel` is packed along
# `self._int4_pack_axis` and its length equals
# `(orig_len + 1) // 2`. Recover the original length so that
# the LoRA matrices operate in the full-precision space.
kernel_shape_for_lora = list(self._kernel.shape)
pack_axis = getattr(self, "_int4_pack_axis", 0)
orig_len = getattr(self, "_orig_length_along_pack_axis", None)
if orig_len is not None:
kernel_shape_for_lora[pack_axis] = orig_len
kernel_shape_for_lora = tuple(kernel_shape_for_lora)
else:
kernel_shape_for_lora = self.kernel.shape
self.lora_kernel_a = self.add_weight(
name="lora_kernel_a",
shape=(kernel_shape_for_lora[:-1] + (rank,)),
initializer=initializers.get(a_initializer),
regularizer=self.kernel_regularizer,
)
self.lora_kernel_b = self.add_weight(
name="lora_kernel_b",
shape=(rank, self.kernel.shape[-1]),
initializer=initializers.get(b_initializer),
regularizer=self.kernel_regularizer,
)
self._kernel.trainable = False
self._tracker.lock()
self.lora_enabled = True
self.lora_rank = rank
self.lora_alpha = lora_alpha if lora_alpha is not None else rank
def save_own_variables(self, store):
# Do nothing if the layer isn't yet built
if not self.built:
return
mode = self.quantization_mode
if mode not in self.variable_serialization_spec:
raise self._quantization_mode_error(mode)
# Kernel plus optional merged LoRA-aware scale (returns (kernel, None)
# for None/gptq)
kernel_value, merged_kernel_scale = self._get_kernel_with_merged_lora()
idx = 0
for name in self.variable_serialization_spec[mode]:
if name == "kernel":
store[str(idx)] = kernel_value
elif name == "bias" and self.bias is None:
continue
elif name == "kernel_scale" and mode in ("int4", "int8"):
# For int4/int8, the merged LoRA scale (if any) comes from
# `_get_kernel_with_merged_lora()`
store[str(idx)] = merged_kernel_scale
else:
store[str(idx)] = getattr(self, name)
idx += 1
def load_own_variables(self, store):
if not self.lora_enabled:
self._check_load_own_variables(store)
# Do nothing if the layer isn't yet built
if not self.built:
return
mode = self.quantization_mode
if mode not in self.variable_serialization_spec:
raise self._quantization_mode_error(mode)
# A saved GPTQ quantized model will always be calibrated.
self.is_gptq_calibrated = mode == "gptq"
idx = 0
for name in self.variable_serialization_spec[mode]:
if name == "kernel":
self._kernel.assign(store[str(idx)])
elif name == "bias" and self.bias is None:
continue
else:
getattr(self, name).assign(store[str(idx)])
idx += 1
if self.lora_enabled:
self.lora_kernel_a.assign(ops.zeros(self.lora_kernel_a.shape))
self.lora_kernel_b.assign(ops.zeros(self.lora_kernel_b.shape))
def get_config(self):
base_config = super().get_config()
config = {
"output_shape": self.partial_output_shape,
"equation": self.equation,
"activation": activations.serialize(self.activation),
"bias_axes": self.bias_axes,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_constraint": constraints.serialize(self.bias_constraint),
"quantization_config": serialization_lib.serialize_keras_object(
self.quantization_config
),
}
if self.lora_rank:
config["lora_rank"] = self.lora_rank
config["lora_alpha"] = self.lora_alpha
if self.gptq_unpacked_column_size:
config["gptq_unpacked_column_size"] = self.gptq_unpacked_column_size
return {**base_config, **config}
@classmethod
def from_config(cls, config):
config = config.copy()
config["quantization_config"] = (
serialization_lib.deserialize_keras_object(
config.get("quantization_config", None)
)
)
return super().from_config(config)
@property
def variable_serialization_spec(self):
"""Returns a dict mapping quantization modes to variable names in order.
This spec is used by `save_own_variables` and `load_own_variables` to
determine the correct ordering of variables during serialization for
each quantization mode. `None` means no quantization.
"""
return {
None: [
"kernel",
"bias",
],
"int8": [
"kernel",
"bias",
"kernel_scale",
],
"int4": [
"kernel",
"bias",
"kernel_scale",
],
"float8": [
"kernel",
"bias",
"inputs_scale",
"inputs_amax_history",
"kernel_scale",
"kernel_amax_history",
"outputs_grad_scale",
"outputs_grad_amax_history",
],
"gptq": [
"bias",
"quantized_kernel",
"kernel_scale",
"kernel_zero",
"g_idx",
],
}
def quantized_build(self, kernel_shape, mode, config=None):
if mode == "int8":
self._int8_build(kernel_shape, config)
elif mode == "int4":
self._int4_build(kernel_shape, config)
elif mode == "float8":
self._float8_build()
elif mode == "gptq":
self._gptq_build(kernel_shape, config)
else:
raise self._quantization_mode_error(mode)
self._is_quantized = True
def _int8_build(self, kernel_shape, config=None):
self._set_quantization_info()
self.inputs_quantizer = (
QuantizationConfig.activation_quantizer_or_default(
config,
quantizers.AbsMaxQuantizer(),
)
)
# If the config provided a default AbsMaxQuantizer, we need to
# override the axis to match the equation's reduction axes.
self.quantization_axis = tuple(self._input_reduced_axes)
self._kernel = self.add_weight(
name="kernel",
shape=kernel_shape,
initializer="zeros",
dtype="int8",
trainable=False,
)
kernel_scale_shape = self._get_kernel_scale_shape(kernel_shape)
self.kernel_scale = self.add_weight(
name="kernel_scale",
shape=kernel_scale_shape,
initializer="ones",
trainable=False,
)
def _gptq_build(self, kernel_shape, config):
"""
Allocate quantized kernel & params for EinsumDense.
Args:
kernel_shape: tuple/list; the layer's original kernel shape, e.g.
[in_features, out_features] or [in_features, heads, head_dim].
group_size: int; contiguous input-group size for quantization
(=-1 means per-output-channel with no grouping).
"""
from keras.src.quantizers import gptq_core
# Ensures the forward pass uses the original high-precision kernel
# until calibration has been performed.
self.is_gptq_calibrated = False
self.original_kernel_shape = kernel_shape
if len(kernel_shape) == 2:
rows = kernel_shape[0]
columns = kernel_shape[1]
elif len(kernel_shape) == 3:
shape = list(self.original_kernel_shape)
d_model_dim_index = shape.index(max(shape))
if d_model_dim_index == 0: # QKV projection case
in_features, heads, head_dim = shape
rows, columns = (
in_features,
heads * head_dim,
)
elif d_model_dim_index in [1, 2]: # Attention Output case
heads, head_dim, out_features = shape
rows, columns = (
heads * head_dim,
out_features,
)
else:
raise ValueError("Could not determine row/column split.")
group_size = gptq_core.get_group_size_for_layer(self, config)
n_groups = 1 if group_size == -1 else math.ceil(rows / group_size)
self.gptq_unpacked_column_size = columns
weight_bits = gptq_core.get_weight_bits_for_layer(self, config)
# For 4-bit weights, we pack two values per byte.
kernel_columns = (columns + 1) // 2 if weight_bits == 4 else columns
self._set_quantization_info()
self.quantized_kernel = self.add_weight(
name="kernel",
shape=(kernel_columns, rows),
initializer="zeros",
dtype="uint8",
trainable=False,
)
self.kernel_scale = self.add_weight(
name="kernel_scale",
shape=(columns, n_groups),
initializer="ones",
trainable=False,
)
self.kernel_zero = self.add_weight(
name="zero_point",
shape=(columns, n_groups),
initializer="zeros",
dtype="uint8",
trainable=False,
)
self.g_idx = self.add_weight(
name="g_idx",
shape=(rows,),
initializer="zeros",
dtype="float32",
trainable=False,
)
def _gptq_call(self, inputs, training=False):
from keras.src.quantizers import gptq_core
if not self.is_gptq_calibrated:
W = self._kernel
else:
should_unpack = (
gptq_core.get_weight_bits_for_layer(self, config=None) == 4
)
W = (
quantizers.unpack_int4(
self.quantized_kernel,
orig_len=self.gptq_unpacked_column_size,
axis=0,
dtype="uint8",
)
if should_unpack
else self.quantized_kernel
)
W = dequantize_with_sz_map(
W,
self.kernel_scale,
self.kernel_zero,
self.g_idx,
)
W = ops.transpose(W)
W = ops.reshape(W, self.original_kernel_shape)
y = ops.einsum(self.equation, inputs, W)
if self.bias is not None:
y = ops.add(y, self.bias)
if self.activation is not None:
y = self.activation(y)
return y
def _int4_build(self, kernel_shape, config=None):
"""Build variables for int4 quantization.
The packed int4 kernel stores two int4 values within a single int8
byte. Packing is performed along the first axis contained in
`self._kernel_reduced_axes` (which is the axis that gets reduced in
the einsum and thus analogous to the input-dim axis of a `Dense`
layer).
"""
self._set_quantization_info()
# Quantizer for the inputs (per the reduced axes)
self.inputs_quantizer = (
QuantizationConfig.activation_quantizer_or_default(
config,
quantizers.AbsMaxQuantizer(),
)
)
# If the config provided a default AbsMaxQuantizer, we need to
# override the axis to match the equation's reduction axes.
self.quantization_axis = tuple(self._input_reduced_axes)
# Choose the axis to perform int4 packing - use the first reduced axis
# for the kernel (analogous to the input dimension of a Dense layer).
self._int4_pack_axis = (
self._kernel_reduced_axes[0] if self._kernel_reduced_axes else 0
)
# Original length along the packing axis (needed for unpacking).
self._orig_length_along_pack_axis = kernel_shape[self._int4_pack_axis]
# Packed length (ceil division by 2). Note: assumes static integer.
packed_len = (self._orig_length_along_pack_axis + 1) // 2
# Derive packed kernel shape by replacing the pack axis dimension.
packed_kernel_shape = list(kernel_shape)
packed_kernel_shape[self._int4_pack_axis] = packed_len
packed_kernel_shape = tuple(packed_kernel_shape)
# Add packed int4 kernel variable (stored as int8 dtype).
self._kernel = self.add_weight(
name="kernel",
shape=packed_kernel_shape,
initializer="zeros",
dtype="int8",
trainable=False,
)
# Kernel scale
kernel_scale_shape = self._get_kernel_scale_shape(kernel_shape)
self.kernel_scale = self.add_weight(
name="kernel_scale",
shape=kernel_scale_shape,
initializer="ones",
trainable=False,
)
def _float8_build(self):
from keras.src.dtype_policies import QuantizedFloat8DTypePolicy
# If `self.dtype_policy` is not QuantizedFloat8DTypePolicy, then set
# `amax_history_length` to its default value.
amax_history_length = getattr(
self.dtype_policy,
"amax_history_length",
QuantizedFloat8DTypePolicy.default_amax_history_length,
)
# We set `trainable=True` because we will use the gradients to overwrite
# these variables
scale_kwargs = {
"shape": (),
"initializer": "ones",
"dtype": "float32", # Always be float32
"trainable": True,
"autocast": False,
"overwrite_with_gradient": True,
}
amax_history_kwargs = {
"shape": (amax_history_length,),
"initializer": "zeros",
"dtype": "float32", # Always be float32
"trainable": True,
"autocast": False,
"overwrite_with_gradient": True,
}
self.inputs_scale = self.add_weight(name="inputs_scale", **scale_kwargs)
self.inputs_amax_history = self.add_weight(
name="inputs_amax_history", **amax_history_kwargs
)
self.kernel_scale = self.add_weight(name="kernel_scale", **scale_kwargs)
self.kernel_amax_history = self.add_weight(
name="kernel_amax_history", **amax_history_kwargs
)
self.outputs_grad_scale = self.add_weight(
name="outputs_grad_scale", **scale_kwargs
)
self.outputs_grad_amax_history = self.add_weight(
name="outputs_grad_amax_history", **amax_history_kwargs
)
def _int8_call(self, inputs, training=None):
@ops.custom_gradient
def einsum_with_inputs_gradient(inputs, kernel, kernel_scale):
"""Performs int8 quantized einsum with a custom gradient.
Computes the einsum operation with quantized inputs and a quantized
kernel, then de-quantizes the result.
Also computes the gradient with respect to the original,
full-precision inputs by using a de-quantized kernel.
Args:
inputs: The full-precision input tensor.
kernel: The int8 quantized kernel tensor.
kernel_scale: The float32 scale factor for the kernel.
Returns:
A tuple `(output, grad_fn)`:
`output`: The de-quantized result of the einsum operation.
`grad_fn`: The custom gradient function for the backward
pass.
Raises:
ValueError: If the quantization mode is not supported.
"""
def grad_fn(*args, upstream=None):
if upstream is None:
(upstream,) = args
# De-scale kernel
_kernel_scale = kernel_scale
_kernel_scale = self._adjust_scale_for_dequant(_kernel_scale)
float_kernel = ops.divide(
ops.cast(kernel, dtype=self.compute_dtype),
_kernel_scale,
)
# From https://stackoverflow.com/a/47609896
inputs_grad = ops.einsum(
self._custom_gradient_equation, upstream, float_kernel
)
return (inputs_grad, None, None)
if self.inputs_quantizer:
inputs, inputs_scale = self.inputs_quantizer(
inputs, axis=self.quantization_axis
)
# Align `inputs_scale` axes with the output
# for correct broadcasting
inputs_scale = self._adjust_scale_for_quant(
inputs_scale, "input"
)
x = ops.einsum(self.equation, inputs, kernel)
# De-scale outputs
x = ops.cast(x, self.compute_dtype)
x = ops.divide(x, ops.multiply(inputs_scale, kernel_scale))
else:
# Weight-only quantization: dequantize kernel and use float
# einsum. This is a workaround for PyTorch's einsum which
# doesn't support mixed-precision inputs (float input,
# int8 kernel).
if backend.backend() == "torch":
kernel_scale = self._adjust_scale_for_dequant(kernel_scale)
float_kernel = ops.divide(
ops.cast(kernel, dtype=self.compute_dtype),
kernel_scale,
)
x = ops.einsum(self.equation, inputs, float_kernel)
else:
x = ops.einsum(self.equation, inputs, kernel)
# De-scale outputs
x = ops.cast(x, self.compute_dtype)
x = ops.divide(x, kernel_scale)
return x, grad_fn
x = einsum_with_inputs_gradient(
inputs,
ops.convert_to_tensor(self._kernel),
ops.convert_to_tensor(self.kernel_scale),
)
if self.lora_enabled:
lora_x = ops.einsum(self.equation, inputs, self.lora_kernel_a)
lora_x = ops.matmul(lora_x, self.lora_kernel_b)
x = ops.add(x, (self.lora_alpha / self.lora_rank) * lora_x)
if self.bias is not None:
x = ops.add(x, self.bias)
if self.activation is not None:
x = self.activation(x)
return x
def _int4_call(self, inputs, training=None):
"""Forward pass for int4 quantized `EinsumDense`."""
pack_axis = getattr(self, "_int4_pack_axis", 0)
orig_len = getattr(self, "_orig_length_along_pack_axis", None)
@ops.custom_gradient
def einsum_with_inputs_gradient(inputs, packed_kernel, kernel_scale):
"""Performs int4 quantized einsum with a custom gradient.
Computes the einsum operation with quantized inputs and a quantized
kernel, then de-quantizes the result.
Also computes the gradient with respect to the original,
full-precision inputs by using a de-quantized kernel.
Args:
inputs: The full-precision input tensor.
packed_kernel: The int4-packed kernel tensor.
kernel_scale: The float32 scale factor for the kernel.
Returns:
A tuple `(output, grad_fn)`:
`output`: The de-quantized result of the einsum operation.
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/reversible_embedding_test.py | keras/src/layers/core/reversible_embedding_test.py | import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import saving
from keras.src.quantizers.quantization_config import Int4QuantizationConfig
from keras.src.quantizers.quantization_config import Int8QuantizationConfig
from keras.src.quantizers.quantizers import AbsMaxQuantizer
from keras.src.testing import test_case
from keras.src.testing.test_utils import named_product
class ReversibleEmbeddingTest(test_case.TestCase):
@parameterized.named_parameters(
("int8", "int8", {"axis": -1}, {"axis": -1}),
(
"int4",
"int4",
{"axis": -1, "value_range": (-8, 7), "output_dtype": "int8"},
{"axis": -1},
),
("int8_weight_only", "int8", {"axis": -1}, None),
)
def test_reversible_embedding_quantize(
self, mode, weight_quantizer_args, activation_quantizer_args
):
"""Test ReversibleEmbedding quantization with QuantizationConfig."""
layer = layers.ReversibleEmbedding(
input_dim=10, output_dim=6, tie_weights=True
)
layer.build((None,))
weight_quantizer = AbsMaxQuantizer(**weight_quantizer_args)
if activation_quantizer_args is not None:
activation_quantizer = AbsMaxQuantizer(**activation_quantizer_args)
else:
activation_quantizer = None
if mode == "int8":
config = Int8QuantizationConfig(
weight_quantizer=weight_quantizer,
activation_quantizer=activation_quantizer,
)
elif mode == "int4":
config = Int4QuantizationConfig(
weight_quantizer=weight_quantizer,
activation_quantizer=activation_quantizer,
)
layer.quantize(mode, config=config)
if activation_quantizer_args is not None:
# Verify inputs_quantizer is set correctly
self.assertIsInstance(layer.inputs_quantizer, AbsMaxQuantizer)
else:
# Verify inputs_quantizer is None
self.assertIsNone(layer.inputs_quantizer)
# Verify reverse call works
x = np.random.random((2, 6)).astype("float32")
y = layer(x, reverse=True)
self.assertEqual(y.shape, (2, 10))
@parameterized.named_parameters(
("tie_weights", True),
("untie_weights", False),
)
@pytest.mark.requires_trainable_backend
def test_reversible_embedding_basics(self, tie_weights):
self.run_layer_test(
layers.ReversibleEmbedding,
init_kwargs={
"input_dim": 100,
"output_dim": 32,
"tie_weights": tie_weights,
"embeddings_initializer": "HeNormal",
"logit_soft_cap": 50,
},
input_data=np.random.randint(low=0, high=100, size=(4, 10)),
expected_output_shape=(4, 10, 32),
expected_num_trainable_weights=1 if tie_weights else 2,
)
@parameterized.named_parameters(
("tie_weights", True),
("untie_weights", False),
)
def test_saving(self, tie_weights):
input_data = np.random.randint(low=0, high=100, size=(4, 10))
model = models.Sequential(
[
layers.ReversibleEmbedding(
input_dim=100,
output_dim=32,
tie_weights=tie_weights,
)
]
)
path = os.path.join(self.get_temp_dir(), "model.keras")
model_output = model(input_data)
model.save(path)
restored_model = saving.load_model(path)
restored_output = restored_model(input_data)
self.assertAllClose(model_output, restored_output)
def test_correctness(self):
layer = layers.ReversibleEmbedding(input_dim=3, output_dim=2)
layer.build()
layer.embeddings.assign(np.array([[0.0, 0.0], [2.0, 2.0], [3.0, 3.0]]))
out = layer(np.array(([2, 1, 0])))
self.assertAllClose(out, np.array([[3.0, 3.0], [2.0, 2.0], [0.0, 0.0]]))
layer = layers.ReversibleEmbedding(input_dim=3, output_dim=2)
layer.build()
layer.embeddings.assign(np.array([[0.0, 0.0], [2.0, 2.0], [3.0, 3.0]]))
out = layer(np.array(([[1.0, 1.0]])), reverse=True)
self.assertAllClose(out, np.array([[0.0, 4.0, 6.0]]))
layer = layers.ReversibleEmbedding(
input_dim=3, output_dim=2, logit_soft_cap=5
)
layer.build()
layer.embeddings.assign(np.array([[0.0, 0.0], [2.0, 2.0], [3.0, 3.0]]))
out = layer(np.array(([[1.0, 1.0]])), reverse=True)
self.assertAllClose(out, np.array([[0.0, 3.320184, 4.168273]]))
def test_reverse_dtype(self):
embedding = layers.ReversibleEmbedding(100, 16, reverse_dtype="float32")
input_data = ops.ones(shape=(4, 10, 16))
output_data = embedding(input_data, reverse=True)
self.assertEqual(output_data.shape, (4, 10, 100))
self.assertDType(output_data, "float32")
if backend.backend() == "torch":
import torch
if not torch.cuda.is_available():
self.skipTest("Torch CPU does not support float16")
embedding = layers.ReversibleEmbedding(100, 16, reverse_dtype="float16")
input_data = ops.ones(shape=(4, 10, 16))
output_data = embedding(input_data, reverse=True)
self.assertEqual(output_data.shape, (4, 10, 100))
self.assertDType(output_data, "float16")
@parameterized.named_parameters(
named_product(mode=("int4", "int8"), tie_weights=(False, True))
)
def test_quantize_int(self, mode, tie_weights):
layer = layers.ReversibleEmbedding(10, 16, tie_weights=tie_weights)
layer.build()
x = np.random.randint(0, 9, size=(64, 3))
x_reverse = np.random.uniform(size=(64, 16))
y_float = layer(x)
y_reverse_float = layer(x_reverse, reverse=True)
layer.quantize(mode)
# Verify the dtype of the weights.
if not tie_weights:
# The reverse_embeddings's dtype is int8, despite the int4
# quantization, because we pack the int4 values into int8.
self.assertDType(layer.reverse_embeddings, "int8")
self.assertDType(
layer.reverse_embeddings_scale, layer.variable_dtype
)
# Verify the correctness of the outputs.
y_quantized = layer(x)
y_reverse_quantized = layer(x_reverse, reverse=True)
mse = ops.mean(ops.square(y_float - y_quantized))
mse_reverse = ops.mean(
ops.square(y_reverse_float - y_reverse_quantized)
)
self.assertLess(mse, 1e-3) # A weak correctness test
self.assertLess(mse_reverse, 1e-3) # A weak correctness test
# Check model save / load round-trip.
model = models.Sequential([layer])
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_model.keras"
)
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Check weights-only save / load round-trip.
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_model.weights.h5"
)
model.save_weights(temp_filepath)
new_model = models.Sequential(
[layers.ReversibleEmbedding(10, 16, tie_weights=tie_weights)]
)
new_model.build((None, 3))
new_model.quantize(mode)
new_model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
@parameterized.named_parameters(
("int8_tie_weights", "int8_from_mixed_bfloat16", True, 0, 2),
("int8_untie_weights", "int8_from_mixed_bfloat16", False, 0, 4),
("int4_tie_weights", "int4_from_mixed_bfloat16", True, 0, 2),
("int4_untie_weights", "int4_from_mixed_bfloat16", False, 0, 4),
)
@pytest.mark.requires_trainable_backend
def test_quantize_dtype_argument(
self,
dtype,
tie_weights,
num_trainable_weights,
num_non_trainable_weights,
):
self.run_layer_test(
layers.ReversibleEmbedding,
init_kwargs={
"input_dim": 100,
"output_dim": 32,
"tie_weights": tie_weights,
"embeddings_initializer": "HeNormal",
"dtype": dtype,
},
input_data=np.random.randint(low=0, high=100, size=(4, 10)),
expected_output_shape=(4, 10, 32),
expected_num_trainable_weights=num_trainable_weights,
expected_num_non_trainable_weights=num_non_trainable_weights,
expected_num_non_trainable_variables=num_non_trainable_weights,
)
def test_reversible_embedding_int8_custom_quantizer(self):
"""
Test custom quantizer serialization for reversible embedding layer with
int8 quantization.
"""
# Setup
weight_range = (-20, 20)
config = Int8QuantizationConfig(
weight_quantizer=AbsMaxQuantizer(axis=-1, value_range=weight_range),
)
# Build & Quantize
layer = layers.ReversibleEmbedding(input_dim=100, output_dim=16)
layer.build(None)
layer.quantize("int8", config=config)
# Serialize & Deserialize
serialized = layer.get_config()
new_layer = layers.ReversibleEmbedding.from_config(serialized)
# Verify
self.assertIsInstance(
new_layer.quantization_config, Int8QuantizationConfig
)
quantizer = new_layer.quantization_config.weight_quantizer
self.assertIsInstance(quantizer, AbsMaxQuantizer)
self.assertAllEqual(quantizer.value_range, weight_range)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/wrapper_test.py | keras/src/layers/core/wrapper_test.py | import pytest
from keras.src import layers
from keras.src import ops
from keras.src import testing
class ExampleWrapper(layers.Wrapper):
"""Simple Wrapper subclass."""
def call(self, inputs, **kwargs):
return ops.cast(self.layer(inputs, **kwargs), self.compute_dtype)
class WrapperTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_wrapper_basics(self):
self.run_layer_test(
ExampleWrapper,
init_kwargs={
"layer": layers.Dense(2),
},
input_shape=(2, 3),
expected_output_shape=(2, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
ExampleWrapper,
init_kwargs={
"layer": layers.Dense(2, activity_regularizer="l2"),
},
input_shape=(2, 3),
expected_output_shape=(2, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=1,
supports_masking=False,
)
self.run_layer_test(
ExampleWrapper,
init_kwargs={
"layer": layers.Dense(2),
"activity_regularizer": "l2",
},
input_shape=(2, 3),
expected_output_shape=(2, 2),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=1,
supports_masking=False,
)
self.run_layer_test(
ExampleWrapper,
init_kwargs={
"layer": layers.BatchNormalization(),
},
input_shape=(2, 3),
expected_output_shape=(2, 3),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=2,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
def test_wrapper_invalid_layer(self):
invalid_layer = "This is not a valid Keras layer."
with self.assertRaisesRegex(
ValueError,
"Layer .* supplied to Wrapper isn't a supported layer type. "
"Please ensure wrapped layer is a valid Keras layer.",
):
layers.Wrapper(invalid_layer)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/wrapper.py | keras/src/layers/core/wrapper.py | from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Wrapper")
class Wrapper(Layer):
"""Abstract wrapper base class.
Wrappers take another layer and augment it in various ways.
Do not use this class as a layer, it is only an abstract base class.
Two usable wrappers are the `TimeDistributed` and `Bidirectional` layers.
Args:
layer: The layer to be wrapped.
"""
def __init__(self, layer, **kwargs):
try:
assert isinstance(layer, Layer)
except Exception:
raise ValueError(
f"Layer {layer} supplied to Wrapper isn't "
"a supported layer type. Please "
"ensure wrapped layer is a valid Keras layer."
)
super().__init__(**kwargs)
self.layer = layer
def build(self, input_shape=None):
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = True
def get_config(self):
config = {"layer": serialization_lib.serialize_keras_object(self.layer)}
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config, custom_objects=None):
layer = serialization_lib.deserialize_keras_object(
config.pop("layer"),
custom_objects=custom_objects,
)
return cls(layer, **config)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/dense.py | keras/src/layers/core/dense.py | import math
import ml_dtypes
from keras.src import activations
from keras.src import constraints
from keras.src import initializers
from keras.src import ops
from keras.src import quantizers
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.quantizers.quantization_config import QuantizationConfig
from keras.src.quantizers.quantizers import dequantize_with_sz_map
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Dense")
class Dense(Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`). When this layer is
followed by a `BatchNormalization` layer, it is recommended to set
`use_bias=False` as `BatchNormalization` has its own bias term.
Note: If the input to the layer has a rank greater than 2, `Dense`
computes the dot product between the `inputs` and the `kernel` along the
last axis of the `inputs` and axis 0 of the `kernel` (using `tf.tensordot`).
For example, if input has dimensions `(batch_size, d0, d1)`, then we create
a `kernel` with shape `(d1, units)`, and the `kernel` operates along axis 2
of the `input`, on every sub-tensor of shape `(1, 1, d1)` (there are
`batch_size * d0` such sub-tensors). The output in this case will have
shape `(batch_size, d0, units)`.
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation").
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
lora_rank: Optional integer. If set, the layer's forward pass
will implement LoRA (Low-Rank Adaptation)
with the provided rank. LoRA sets the layer's kernel
to non-trainable and replaces it with a delta over the
original kernel, obtained via multiplying two lower-rank
trainable matrices. This can be useful to reduce the
computation cost of fine-tuning large dense layers.
You can also enable LoRA on an existing
`Dense` layer by calling `layer.enable_lora(rank)`.
lora_alpha: Optional integer. If set, this parameter scales the
low-rank adaptation delta (computed as the product of two lower-rank
trainable matrices) during the forward pass. The delta is scaled by
`lora_alpha / lora_rank`, allowing you to fine-tune the strength of
the LoRA adjustment independently of `lora_rank`.
Input shape:
N-D tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
N-D tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
def __init__(
self,
units,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
lora_rank=None,
lora_alpha=None,
quantization_config=None,
**kwargs,
):
if not isinstance(units, int) or units <= 0:
raise ValueError(
"Received an invalid value for `units`, expected a positive "
f"integer. Received: units={units}"
)
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.lora_rank = lora_rank
self.lora_alpha = lora_alpha if lora_alpha is not None else lora_rank
self.lora_enabled = False
self.quantization_config = quantization_config
self.input_spec = InputSpec(min_ndim=2)
self.supports_masking = True
def build(self, input_shape):
kernel_shape = (input_shape[-1], self.units)
if self.quantization_mode:
self.quantized_build(
kernel_shape,
mode=self.quantization_mode,
config=self.quantization_config,
)
if self.quantization_mode not in ("int8", "int4", "gptq"):
# If the layer is quantized to int8 or int4, `self._kernel` will be
# added in `self._int8_build` or `_int4_build`. Therefore, we skip
# it here.
self._kernel = self.add_weight(
name="kernel",
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
name="bias",
shape=(self.units,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
else:
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_shape[-1]})
self.built = True
if self.lora_rank:
self.enable_lora(self.lora_rank)
@property
def kernel(self):
from keras.src.quantizers import gptq_core
if not self.built:
raise AttributeError(
"You must build the layer before accessing `kernel`."
)
mode = self.quantization_mode
is_gptq = mode == "gptq"
is_int4 = mode == "int4"
calibrated = bool(getattr(self, "is_gptq_calibrated", False))
gptq_bits = (
gptq_core.get_weight_bits_for_layer(self, None) if is_gptq else None
)
# Decide the source tensor first (packed vs already-quantized vs plain
# kernel)
if is_gptq and calibrated and gptq_bits != 4:
# calibrated GPTQ, not 4-bit, no unpacking needed
kernel = self.quantized_kernel
else:
# Start with the stored kernel
kernel = getattr(self, "_kernel", None)
# Handle int4 unpacking cases in one place
if is_int4:
kernel = quantizers.unpack_int4(kernel, self._orig_input_dim)
elif is_gptq and calibrated and gptq_bits == 4:
kernel = quantizers.unpack_int4(
self.quantized_kernel,
orig_len=self.units,
axis=0,
dtype="uint8",
)
# Apply LoRA once at the end.
if self.lora_enabled:
kernel = kernel + (self.lora_alpha / self.lora_rank) * ops.matmul(
self.lora_kernel_a, self.lora_kernel_b
)
return kernel
def call(self, inputs, training=None):
x = ops.matmul(inputs, self.kernel)
if self.bias is not None:
x = ops.add(x, self.bias)
if self.activation is not None:
x = self.activation(x)
return x
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape[-1] = self.units
return tuple(output_shape)
def enable_lora(
self,
rank,
lora_alpha=None,
a_initializer="he_uniform",
b_initializer="zeros",
):
if self.kernel_constraint:
raise ValueError(
"Lora is incompatible with kernel constraints. "
"In order to enable lora on this layer, remove the "
"`kernel_constraint` argument."
)
if not self.built:
raise ValueError(
"Cannot enable lora on a layer that isn't yet built."
)
if self.lora_enabled:
raise ValueError(
"lora is already enabled. This can only be done once per layer."
)
if self.quantization_mode == "gptq":
raise NotImplementedError(
"lora is not currently supported with GPTQ quantization."
)
self._tracker.unlock()
# Determine the correct input dimension for the LoRA A matrix. When
# the layer has been int4-quantized, `self._kernel` stores a *packed*
# representation whose first dimension is `ceil(input_dim/2)`. We
# saved the true, *unpacked* input dimension in `self._orig_input_dim`
# during quantization. Use it if available; otherwise fall back to the
# first dimension of `self.kernel`.
if self.quantization_mode == "int4" and hasattr(
self, "_orig_input_dim"
):
input_dim_for_lora = self._orig_input_dim
else:
input_dim_for_lora = self.kernel.shape[0]
self.lora_kernel_a = self.add_weight(
name="lora_kernel_a",
shape=(input_dim_for_lora, rank),
initializer=initializers.get(a_initializer),
regularizer=self.kernel_regularizer,
)
self.lora_kernel_b = self.add_weight(
name="lora_kernel_b",
shape=(rank, self.kernel.shape[1]),
initializer=initializers.get(b_initializer),
regularizer=self.kernel_regularizer,
)
self._kernel.trainable = False
self._tracker.lock()
self.lora_enabled = True
self.lora_rank = rank
self.lora_alpha = lora_alpha if lora_alpha is not None else rank
def save_own_variables(self, store):
# Do nothing if the layer isn't yet built
if not self.built:
return
mode = self.quantization_mode
if mode not in self.variable_serialization_spec:
raise self._quantization_mode_error(mode)
# Kernel plus optional merged LoRA-aware scale (returns (kernel, None)
# for None/gptq)
kernel_value, merged_kernel_scale = self._get_kernel_with_merged_lora()
idx = 0
for name in self.variable_serialization_spec[mode]:
if name == "kernel":
store[str(idx)] = kernel_value
elif name == "bias" and self.bias is None:
continue
elif name == "kernel_scale" and mode in ("int4", "int8"):
# For int4/int8, the merged LoRA scale (if any) comes from
# `_get_kernel_with_merged_lora()`
store[str(idx)] = merged_kernel_scale
else:
store[str(idx)] = getattr(self, name)
idx += 1
def load_own_variables(self, store):
if not self.lora_enabled:
self._check_load_own_variables(store)
# Do nothing if the layer isn't yet built
if not self.built:
return
mode = self.quantization_mode
if mode not in self.variable_serialization_spec:
raise self._quantization_mode_error(mode)
# A saved GPTQ quantized model will always be calibrated.
self.is_gptq_calibrated = mode == "gptq"
idx = 0
for name in self.variable_serialization_spec[mode]:
if name == "kernel":
self._kernel.assign(store[str(idx)])
elif name == "bias" and self.bias is None:
continue
else:
getattr(self, name).assign(store[str(idx)])
idx += 1
if self.lora_enabled:
self.lora_kernel_a.assign(ops.zeros(self.lora_kernel_a.shape))
self.lora_kernel_b.assign(ops.zeros(self.lora_kernel_b.shape))
def get_config(self):
base_config = super().get_config()
config = {
"units": self.units,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(
self.kernel_initializer
),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(
self.kernel_regularizer
),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_constraint": constraints.serialize(self.bias_constraint),
"quantization_config": serialization_lib.serialize_keras_object(
self.quantization_config
),
}
if self.lora_rank:
config["lora_rank"] = self.lora_rank
config["lora_alpha"] = self.lora_alpha
return {**base_config, **config}
@classmethod
def from_config(cls, config):
config = config.copy()
config["quantization_config"] = (
serialization_lib.deserialize_keras_object(
config.get("quantization_config", None)
)
)
return super().from_config(config)
@property
def variable_serialization_spec(self):
"""Returns a dict mapping quantization modes to variable names in order.
This spec is used by `save_own_variables` and `load_own_variables` to
determine the correct ordering of variables during serialization for
each quantization mode. `None` means no quantization.
"""
return {
None: [
"kernel",
"bias",
],
"int8": [
"kernel",
"bias",
"kernel_scale",
],
"int4": [
"kernel",
"bias",
"kernel_scale",
],
"float8": [
"kernel",
"bias",
"inputs_scale",
"inputs_amax_history",
"kernel_scale",
"kernel_amax_history",
"outputs_grad_scale",
"outputs_grad_amax_history",
],
"gptq": [
"bias",
"quantized_kernel",
"kernel_scale",
"kernel_zero",
"g_idx",
],
}
def quantized_build(self, kernel_shape, mode, config=None):
if mode == "int8":
self._int8_build(kernel_shape, config)
elif mode == "int4":
self._int4_build(kernel_shape, config)
elif mode == "float8":
self._float8_build()
elif mode == "gptq":
self._gptq_build(kernel_shape, config)
else:
raise self._quantization_mode_error(mode)
self._is_quantized = True
def _int8_build(self, kernel_shape, config=None):
self.inputs_quantizer = (
QuantizationConfig.activation_quantizer_or_default(
config, quantizers.AbsMaxQuantizer()
)
)
self._kernel = self.add_weight(
name="kernel",
shape=kernel_shape,
initializer="zeros",
dtype="int8",
trainable=False,
)
self.kernel_scale = self.add_weight(
name="kernel_scale",
shape=(self.units,),
initializer="ones",
trainable=False,
)
def _gptq_build(self, kernel_shape, config):
from keras.src.quantizers import gptq_core
# Ensures the forward pass uses the original high-precision kernel
# until calibration has been performed.
self.is_gptq_calibrated = False
self.kernel_shape = kernel_shape
weight_bits = gptq_core.get_weight_bits_for_layer(self, config)
# For 4-bit weights, we pack two values per byte.
units = (
(kernel_shape[1] + 1) // 2 if weight_bits == 4 else kernel_shape[1]
)
self.quantized_kernel = self.add_weight(
name="kernel",
shape=(units, kernel_shape[0]),
initializer="zeros",
dtype="uint8",
trainable=False,
)
group_size = gptq_core.get_group_size_for_layer(self, config)
n_groups = (
1
if group_size == -1
else math.ceil(self.kernel_shape[0] / group_size)
)
self.kernel_scale = self.add_weight(
name="kernel_scale",
shape=(self.units, n_groups),
initializer="ones",
trainable=False,
)
self.kernel_zero = self.add_weight(
name="kernel_zero",
shape=(self.units, n_groups),
initializer="zeros",
dtype="uint8",
trainable=False,
)
self.g_idx = self.add_weight(
name="g_idx",
shape=(self.kernel_shape[0],),
initializer="zeros",
dtype="float32",
trainable=False,
)
def _gptq_call(self, inputs, training=False):
from keras.src.quantizers import gptq_core
if not self.is_gptq_calibrated:
W = self._kernel
else:
should_unpack = (
gptq_core.get_weight_bits_for_layer(self, config=None) == 4
)
W = (
quantizers.unpack_int4(
self.quantized_kernel,
orig_len=self.units,
axis=0,
dtype="uint8",
)
if should_unpack
else self.quantized_kernel
)
W = ops.transpose(
dequantize_with_sz_map(
W,
self.kernel_scale,
self.kernel_zero,
self.g_idx,
)
)
y = ops.matmul(inputs, W)
if self.bias is not None:
y = ops.add(y, self.bias)
if self.activation is not None:
y = self.activation(y)
return y
def _int4_build(self, kernel_shape, config=None):
"""Build variables for int4 quantization.
`kernel_shape` is the *original* float32 kernel shape
`(input_dim, units)`. We allocate the stored kernel with rows
`ceil(input_dim/2)` because two int4 values are packed into a single
int8 byte.
"""
# Per-channel int8 quantizer for the last axis (features).
self.inputs_quantizer = (
QuantizationConfig.activation_quantizer_or_default(
config, quantizers.AbsMaxQuantizer()
)
)
input_dim, output_dim = kernel_shape
packed_rows = (input_dim + 1) // 2 # ceil for odd dims
# Kernel is stored *packed*: each int8 byte contains two int4 values.
self._kernel = self.add_weight(
name="kernel",
shape=(packed_rows, output_dim),
initializer="zeros",
dtype="int8",
trainable=False,
)
# One scale per output unit (per-channel).
self.kernel_scale = self.add_weight(
name="kernel_scale",
shape=(self.units,),
initializer="ones",
trainable=False,
)
# Record original input_dim for unpacking at runtime.
self._orig_input_dim = input_dim
def _float8_build(self):
from keras.src.dtype_policies import QuantizedFloat8DTypePolicy
# If `self.dtype_policy` is not QuantizedFloat8DTypePolicy, then set
# `amax_history_length` to its default value.
amax_history_length = getattr(
self.dtype_policy,
"amax_history_length",
QuantizedFloat8DTypePolicy.default_amax_history_length,
)
# We set `trainable=True` because we will use the gradients to overwrite
# these variables
scale_kwargs = {
"shape": (),
"initializer": "ones",
"dtype": "float32", # Always be float32
"trainable": True,
"autocast": False,
"overwrite_with_gradient": True,
}
amax_history_kwargs = {
"shape": (amax_history_length,),
"initializer": "zeros",
"dtype": "float32", # Always be float32
"trainable": True,
"autocast": False,
"overwrite_with_gradient": True,
}
self.inputs_scale = self.add_weight(name="inputs_scale", **scale_kwargs)
self.inputs_amax_history = self.add_weight(
name="inputs_amax_history", **amax_history_kwargs
)
self.kernel_scale = self.add_weight(name="kernel_scale", **scale_kwargs)
self.kernel_amax_history = self.add_weight(
name="kernel_amax_history", **amax_history_kwargs
)
self.outputs_grad_scale = self.add_weight(
name="outputs_grad_scale", **scale_kwargs
)
self.outputs_grad_amax_history = self.add_weight(
name="outputs_grad_amax_history", **amax_history_kwargs
)
def _int8_call(self, inputs, training=None):
@ops.custom_gradient
def matmul_with_inputs_gradient(inputs, kernel, kernel_scale):
"""Custom gradient function to handle the int8 quantized weights.
Automatic differentiation will not know how to handle the int8
quantized weights. So a custom gradient function is needed to
handle the int8 quantized weights.
The custom gradient function will use the dequantized kernel to
compute the gradient.
"""
def grad_fn(*args, upstream=None):
if upstream is None:
(upstream,) = args
float_kernel = ops.divide(
ops.cast(kernel, dtype=self.compute_dtype),
kernel_scale,
)
inputs_grad = ops.matmul(upstream, ops.transpose(float_kernel))
return (inputs_grad, None, None)
output_scale = kernel_scale
if self.inputs_quantizer:
inputs, inputs_scale = self.inputs_quantizer(inputs, axis=-1)
output_scale = ops.multiply(output_scale, inputs_scale)
x = ops.matmul(inputs, kernel)
# De-scale outputs
x = ops.cast(x, self.compute_dtype)
x = ops.divide(x, output_scale)
return x, grad_fn
x = matmul_with_inputs_gradient(
inputs,
ops.convert_to_tensor(self._kernel),
ops.convert_to_tensor(self.kernel_scale),
)
if self.lora_enabled:
lora_x = ops.matmul(inputs, self.lora_kernel_a)
lora_x = ops.matmul(lora_x, self.lora_kernel_b)
x = ops.add(x, (self.lora_alpha / self.lora_rank) * lora_x)
if self.bias is not None:
x = ops.add(x, self.bias)
if self.activation is not None:
x = self.activation(x)
return x
def _int4_call(self, inputs, training=None):
"""Forward pass for int4 quantized Dense layer."""
@ops.custom_gradient
def matmul_with_inputs_gradient(inputs, kernel, kernel_scale):
"""Custom gradient function for int4 quantized weights.
Automatic differentiation will not know how to handle the
int4 quantized weights. So a custom gradient function is needed
to handle the int4 quantized weights.
The custom gradient function will use the dequantized kernel to
compute the gradient.
"""
unpacked_kernel = quantizers.unpack_int4(
kernel, self._orig_input_dim
)
def grad_fn(*args, upstream=None):
if upstream is None:
(upstream,) = args
float_kernel = ops.divide(
ops.cast(unpacked_kernel, dtype=self.compute_dtype),
kernel_scale,
)
inputs_grad = ops.matmul(upstream, ops.transpose(float_kernel))
return (inputs_grad, None, None)
output_scale = kernel_scale
if self.inputs_quantizer:
inputs, inputs_scale = self.inputs_quantizer(inputs, axis=-1)
output_scale = ops.multiply(output_scale, inputs_scale)
x = ops.matmul(inputs, unpacked_kernel)
x = ops.cast(x, self.compute_dtype)
x = ops.divide(x, output_scale)
return x, grad_fn
x = matmul_with_inputs_gradient(
inputs,
ops.convert_to_tensor(self._kernel),
ops.convert_to_tensor(self.kernel_scale),
)
if self.lora_enabled:
lora_x = ops.matmul(inputs, self.lora_kernel_a)
lora_x = ops.matmul(lora_x, self.lora_kernel_b)
x = ops.add(x, (self.lora_alpha / self.lora_rank) * lora_x)
# Add bias and activation
if self.bias is not None:
x = ops.add(x, self.bias)
if self.activation is not None:
x = self.activation(x)
return x
def _float8_call(self, inputs, training=None):
if self.lora_enabled:
raise NotImplementedError(
"Currently, `_float8_call` doesn't support LoRA"
)
@ops.custom_gradient
def quantized_dequantize_inputs(inputs, scale, amax_history):
if training:
new_scale = quantizers.compute_float8_scale(
ops.max(amax_history, axis=0),
scale,
ops.cast(
float(ml_dtypes.finfo("float8_e4m3fn").max), "float32"
),
)
new_amax_history = quantizers.compute_float8_amax_history(
inputs, amax_history
)
else:
new_scale = None
new_amax_history = None
qdq_inputs = quantizers.quantize_and_dequantize(
inputs, scale, "float8_e4m3fn", self.compute_dtype
)
def grad(*args, upstream=None, variables=None):
if upstream is None:
(upstream,) = args
return upstream, new_scale, new_amax_history
return qdq_inputs, grad
@ops.custom_gradient
def quantized_dequantize_outputs(outputs, scale, amax_history):
"""Quantize-dequantize the output gradient but not the output."""
def grad(*args, upstream=None, variables=None):
if upstream is None:
(upstream,) = args
new_scale = quantizers.compute_float8_scale(
ops.max(amax_history, axis=0),
scale,
ops.cast(
float(ml_dtypes.finfo("float8_e5m2").max), "float32"
),
)
qdq_upstream = quantizers.quantize_and_dequantize(
upstream, scale, "float8_e5m2", self.compute_dtype
)
new_amax_history = quantizers.compute_float8_amax_history(
upstream, amax_history
)
return qdq_upstream, new_scale, new_amax_history
return outputs, grad
x = ops.matmul(
quantized_dequantize_inputs(
inputs,
ops.convert_to_tensor(self.inputs_scale),
ops.convert_to_tensor(self.inputs_amax_history),
),
quantized_dequantize_inputs(
ops.convert_to_tensor(self._kernel),
ops.convert_to_tensor(self.kernel_scale),
ops.convert_to_tensor(self.kernel_amax_history),
),
)
# `quantized_dequantize_outputs` is placed immediately after
# `ops.matmul` for the sake of pattern matching in gemm_rewrite. That
# way, the qdq will be adjacent to the corresponding matmul_bprop in the
# bprop.
x = quantized_dequantize_outputs(
x,
ops.convert_to_tensor(self.outputs_grad_scale),
ops.convert_to_tensor(self.outputs_grad_amax_history),
)
if self.bias is not None:
# Under non-mixed precision cases, F32 bias has to be converted to
# BF16 first to get the biasAdd fusion support. ref. PR
# https://github.com/tensorflow/tensorflow/pull/60306
bias = self.bias
if self.dtype_policy.compute_dtype == "float32":
bias_bf16 = ops.cast(bias, "bfloat16")
bias = ops.cast(bias_bf16, bias.dtype)
x = ops.add(x, bias)
if self.activation is not None:
x = self.activation(x)
return x
def quantize(self, mode=None, type_check=True, config=None):
# Prevent quantization of the subclasses
if type_check and (type(self) is not Dense):
raise self._not_implemented_error(self.quantize)
self.quantization_config = config
kernel_shape = self._kernel.shape
if mode == "int8":
weight_quantizer = QuantizationConfig.weight_quantizer_or_default(
self.quantization_config, quantizers.AbsMaxQuantizer(axis=0)
)
kernel_value, kernel_scale = weight_quantizer(
self._kernel, to_numpy=True
)
kernel_scale = ops.squeeze(kernel_scale, axis=0)
del self._kernel
# Build variables for int8 mode
self.quantized_build(kernel_shape, mode, self.quantization_config)
self._kernel.assign(kernel_value)
self.kernel_scale.assign(kernel_scale)
elif mode == "int4":
# 1. Quantize to int4 values (still int8 dtype, range [-8,7])
weight_quantizer = QuantizationConfig.weight_quantizer_or_default(
self.quantization_config,
quantizers.AbsMaxQuantizer(
axis=0, value_range=(-8, 7), output_dtype="int8"
),
)
kernel_value_int4, kernel_scale = weight_quantizer(
self._kernel, to_numpy=True
)
kernel_scale = ops.squeeze(kernel_scale, axis=0)
# 2. Pack two int4 values into a single int8 byte.
packed_kernel_value, _, _ = quantizers.pack_int4(kernel_value_int4)
del self._kernel
# Build variables using the original kernel shape; _int4_build will
# compute the packed shape internally.
self.quantized_build(kernel_shape, mode, self.quantization_config)
# Assign packed values.
self._kernel.assign(packed_kernel_value)
self.kernel_scale.assign(kernel_scale)
elif mode == "gptq":
self.quantized_build(kernel_shape, mode, self.quantization_config)
elif mode == "float8":
self.quantized_build(kernel_shape, mode)
else:
raise self._quantization_mode_error(mode)
# Set new dtype policy only for modes that already have a policy.
if self.dtype_policy.quantization_mode is None:
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/lambda_layer.py | keras/src/layers/core/lambda_layer.py | import inspect
import types
from keras.src import backend
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.saving import serialization_lib
from keras.src.utils import python_utils
@keras_export("keras.layers.Lambda")
class Lambda(Layer):
"""Wraps arbitrary expressions as a `Layer` object.
The `Lambda` layer exists so that arbitrary expressions can be used
as a `Layer` when constructing Sequential
and Functional API models. `Lambda` layers are best suited for simple
operations or quick experimentation. For more advanced use cases,
prefer writing new subclasses of `Layer`.
WARNING: `Lambda` layers have (de)serialization limitations!
The main reason to subclass `Layer` instead of using a
`Lambda` layer is saving and inspecting a model. `Lambda` layers
are saved by serializing the Python bytecode, which is fundamentally
non-portable and potentially unsafe.
They should only be loaded in the same environment where
they were saved. Subclassed layers can be saved in a more portable way
by overriding their `get_config()` method. Models that rely on
subclassed Layers are also often easier to visualize and reason about.
Example:
```python
# add a x -> x^2 layer
model.add(Lambda(lambda x: x ** 2))
```
Args:
function: The function to be evaluated. Takes input tensor as first
argument.
output_shape: Expected output shape from function. This argument
can usually be inferred if not explicitly provided.
Can be a tuple or function. If a tuple, it only specifies
the first dimension onward; sample dimension is assumed
either the same as the input:
`output_shape = (input_shape[0], ) + output_shape` or,
the input is `None` and the sample dimension is also `None`:
`output_shape = (None, ) + output_shape`.
If a function, it specifies the
entire shape as a function of the input shape:
`output_shape = f(input_shape)`.
mask: Either None (indicating no masking) or a callable with the same
signature as the `compute_mask` layer method, or a tensor
that will be returned as output mask regardless
of what the input is.
arguments: Optional dictionary of keyword arguments to be passed to the
function.
"""
def __init__(
self, function, output_shape=None, mask=None, arguments=None, **kwargs
):
super().__init__(**kwargs)
self.arguments = arguments or {}
self.function = function
if mask is not None:
self.supports_masking = True
else:
self.supports_masking = False
self.mask = mask
self._output_shape = output_shape
# Warning on every invocation will be quite irksome in Eager mode.
self._already_warned = False
function_args = inspect.getfullargspec(function).args
self._fn_expects_training_arg = "training" in function_args
self._fn_expects_mask_arg = "mask" in function_args
def compute_output_shape(self, input_shape):
if self._output_shape is None:
# Leverage backend shape inference
try:
inputs = tree.map_shape_structure(
lambda x: backend.KerasTensor(x, dtype=self.compute_dtype),
input_shape,
)
output_spec = backend.compute_output_spec(self.call, inputs)
return tree.map_structure(lambda x: x.shape, output_spec)
except:
raise NotImplementedError(
"We could not automatically infer the shape of "
"the Lambda's output. Please specify the `output_shape` "
"argument for this Lambda layer."
)
if callable(self._output_shape):
return self._output_shape(input_shape)
# Output shapes are passed directly and don't include batch dimension.
batch_size = tree.flatten(input_shape)[0]
def _add_batch(shape):
return (batch_size,) + shape
return tree.map_shape_structure(_add_batch, self._output_shape)
def call(self, inputs, mask=None, training=None):
# We must copy for thread safety,
# but it only needs to be a shallow copy.
kwargs = {k: v for k, v in self.arguments.items()}
if self._fn_expects_mask_arg:
kwargs["mask"] = mask
if self._fn_expects_training_arg:
kwargs["training"] = training
return self.function(inputs, **kwargs)
def compute_mask(self, inputs, mask=None):
if callable(self.mask):
return self.mask(inputs, mask)
return self.mask
def get_config(self):
config = {
"function": self._serialize_function_to_config(self.function),
}
if self._output_shape is not None:
if callable(self._output_shape):
output_shape = self._serialize_function_to_config(
self._output_shape
)
else:
output_shape = self._output_shape
config["output_shape"] = output_shape
if self.mask is not None:
if callable(self.mask):
mask = self._serialize_function_to_config(self.mask)
else:
mask = serialization_lib.serialize_keras_object(self.mask)
config["mask"] = mask
config["arguments"] = serialization_lib.serialize_keras_object(
self.arguments
)
base_config = super().get_config()
return {**base_config, **config}
def _serialize_function_to_config(self, fn):
if isinstance(fn, types.LambdaType) and fn.__name__ == "<lambda>":
code, defaults, closure = python_utils.func_dump(fn)
return {
"class_name": "__lambda__",
"config": {
"code": code,
"defaults": defaults,
"closure": closure,
},
}
elif callable(fn):
return serialization_lib.serialize_keras_object(fn)
raise ValueError(
"Invalid input type for serialization. "
f"Received: {fn} of type {type(fn)}."
)
@staticmethod
def _raise_for_lambda_deserialization(safe_mode):
if safe_mode:
raise ValueError(
"Requested the deserialization of a `Lambda` layer whose "
"`function` is a Python lambda. This carries a potential risk "
"of arbitrary code execution and thus it is disallowed by "
"default. If you trust the source of the artifact, you can "
"override this error by passing `safe_mode=False` to the "
"loading function, or calling "
"`keras.config.enable_unsafe_deserialization()."
)
@classmethod
def from_config(cls, config, custom_objects=None, safe_mode=None):
safe_mode = safe_mode or serialization_lib.in_safe_mode()
fn_config = config["function"]
if (
isinstance(fn_config, dict)
and "class_name" in fn_config
and fn_config["class_name"] == "__lambda__"
):
cls._raise_for_lambda_deserialization(safe_mode)
inner_config = fn_config["config"]
fn = python_utils.func_load(
inner_config["code"],
defaults=inner_config["defaults"],
closure=inner_config["closure"],
)
config["function"] = fn
else:
config["function"] = serialization_lib.deserialize_keras_object(
fn_config, custom_objects=custom_objects
)
if "output_shape" in config:
fn_config = config["output_shape"]
if (
isinstance(fn_config, dict)
and "class_name" in fn_config
and fn_config["class_name"] == "__lambda__"
):
cls._raise_for_lambda_deserialization(safe_mode)
inner_config = fn_config["config"]
fn = python_utils.func_load(
inner_config["code"],
defaults=inner_config["defaults"],
closure=inner_config["closure"],
)
config["output_shape"] = fn
else:
output_shape = serialization_lib.deserialize_keras_object(
fn_config, custom_objects=custom_objects
)
if isinstance(output_shape, list) and all(
isinstance(e, (int, type(None))) for e in output_shape
):
output_shape = tuple(output_shape)
config["output_shape"] = output_shape
if "arguments" in config:
config["arguments"] = serialization_lib.deserialize_keras_object(
config["arguments"], custom_objects=custom_objects
)
return cls(**config)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/__init__.py | keras/src/layers/core/__init__.py | python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false | |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/embedding.py | keras/src/layers/core/embedding.py | import warnings
from keras.src import backend
from keras.src import constraints
from keras.src import dtype_policies
from keras.src import initializers
from keras.src import ops
from keras.src import quantizers
from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.layers.layer import Layer
from keras.src.quantizers.quantization_config import QuantizationConfig
from keras.src.saving import serialization_lib
@keras_export("keras.layers.Embedding")
class Embedding(Layer):
"""Turns nonnegative integers (indexes) into dense vectors of fixed size.
e.g. `[[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]`
This layer can only be used on nonnegative integer inputs of a fixed range.
Example:
>>> model = keras.Sequential()
>>> model.add(keras.layers.Embedding(1000, 64))
>>> # The model will take as input an integer matrix of size (batch,
>>> # input_length), and the largest integer (i.e. word index) in the input
>>> # should be no larger than 999 (vocabulary size).
>>> # Now model.output_shape is (None, 10, 64), where `None` is the batch
>>> # dimension.
>>> input_array = np.random.randint(1000, size=(32, 10))
>>> model.compile('rmsprop', 'mse')
>>> output_array = model.predict(input_array)
>>> print(output_array.shape)
(32, 10, 64)
Args:
input_dim: Integer. Size of the vocabulary,
i.e. maximum integer index + 1.
output_dim: Integer. Dimension of the dense embedding.
embeddings_initializer: Initializer for the `embeddings`
matrix (see `keras.initializers`).
embeddings_regularizer: Regularizer function applied to
the `embeddings` matrix (see `keras.regularizers`).
embeddings_constraint: Constraint function applied to
the `embeddings` matrix (see `keras.constraints`).
mask_zero: Boolean, whether or not the input value 0 is a special
"padding" value that should be masked out.
This is useful when using recurrent layers which
may take variable length input. If this is `True`,
then all subsequent layers in the model need
to support masking or an exception will be raised.
If `mask_zero` is set to `True`, as a consequence,
index 0 cannot be used in the vocabulary (`input_dim` should
equal size of vocabulary + 1).
weights: Optional floating-point matrix of size
`(input_dim, output_dim)`. The initial embeddings values
to use.
lora_rank: Optional integer. If set, the layer's forward pass
will implement LoRA (Low-Rank Adaptation)
with the provided rank. LoRA sets the layer's embeddings
matrix to non-trainable and replaces it with a delta over the
original matrix, obtained via multiplying two lower-rank
trainable matrices. This can be useful to reduce the
computation cost of fine-tuning large embedding layers.
You can also enable LoRA on an existing
`Embedding` layer by calling `layer.enable_lora(rank)`.
lora_alpha: Optional integer. If set, this parameter scales the
low-rank adaptation delta (computed as the product of two lower-rank
trainable matrices) during the forward pass. The delta is scaled by
`lora_alpha / lora_rank`, allowing you to fine-tune the strength of
the LoRA adjustment independently of `lora_rank`.
Input shape:
2D tensor with shape: `(batch_size, input_length)`.
Output shape:
3D tensor with shape: `(batch_size, input_length, output_dim)`.
"""
def __init__(
self,
input_dim,
output_dim,
embeddings_initializer="uniform",
embeddings_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
weights=None,
lora_rank=None,
lora_alpha=None,
quantization_config=None,
**kwargs,
):
input_length = kwargs.pop("input_length", None)
if input_length is not None:
warnings.warn(
"Argument `input_length` is deprecated. Just remove it."
)
super().__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
self.embeddings_constraint = constraints.get(embeddings_constraint)
self.mask_zero = mask_zero
self.supports_masking = mask_zero
self.autocast = False
self.lora_rank = lora_rank
self.lora_alpha = lora_alpha if lora_alpha is not None else lora_rank
self.lora_enabled = False
self.quantization_config = quantization_config
if weights is not None:
self.build()
if not (isinstance(weights, list) and len(weights) == 1):
weights = [weights]
self.set_weights(weights)
def build(self, input_shape=None):
if self.built:
return
embeddings_shape = (self.input_dim, self.output_dim)
if self.quantization_mode:
self.quantized_build(
embeddings_shape,
mode=self.quantization_mode,
config=self.quantization_config,
)
if self.quantization_mode not in ("int8", "int4"):
self._embeddings = self.add_weight(
shape=embeddings_shape,
initializer=self.embeddings_initializer,
name="embeddings",
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint,
trainable=True,
)
self.built = True
if self.lora_rank:
self.enable_lora(self.lora_rank)
@property
def embeddings(self):
if not self.built:
raise AttributeError(
"You must build the layer before accessing `embeddings`."
)
embeddings = self._embeddings
if self.quantization_mode == "int4":
embeddings = quantizers.unpack_int4(
embeddings, self._orig_output_dim, axis=-1
)
if self.lora_enabled:
return embeddings + (self.lora_alpha / self.lora_rank) * ops.matmul(
self.lora_embeddings_a, self.lora_embeddings_b
)
return embeddings
def call(self, inputs):
if inputs.dtype != "int32" and inputs.dtype != "int64":
inputs = ops.cast(inputs, "int32")
outputs = ops.take(self.embeddings, inputs, axis=0)
return ops.cast(outputs, dtype=self.compute_dtype)
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
return ops.not_equal(inputs, 0)
def compute_output_shape(self, input_shape):
return (*input_shape, self.output_dim)
def compute_output_spec(self, inputs):
output_shape = self.compute_output_shape(inputs.shape)
ragged = getattr(inputs, "ragged", False)
return KerasTensor(
output_shape, dtype=self.compute_dtype, ragged=ragged
)
def enable_lora(
self,
rank,
lora_alpha=None,
a_initializer="he_uniform",
b_initializer="zeros",
):
if self.embeddings_constraint:
raise ValueError(
"Lora is incompatible with embedding constraints. "
"In order to enable lora on this layer, remove the "
"`embeddings_constraint` argument."
)
if not self.built:
raise ValueError(
"Cannot enable lora on a layer that isn't yet built."
)
if self.lora_enabled:
raise ValueError(
"lora is already enabled. This can only be done once per layer."
)
self._tracker.unlock()
self.lora_embeddings_a = self.add_weight(
name="lora_embeddings_a",
shape=(self.input_dim, rank),
initializer=initializers.get(a_initializer),
regularizer=self.embeddings_regularizer,
)
self.lora_embeddings_b = self.add_weight(
name="lora_embeddings_b",
shape=(rank, self.output_dim),
initializer=initializers.get(b_initializer),
regularizer=self.embeddings_regularizer,
)
self.embeddings.trainable = False
self._tracker.lock()
self.lora_enabled = True
self.lora_rank = rank
self.lora_alpha = lora_alpha if lora_alpha is not None else rank
def save_own_variables(self, store):
# Do nothing if the layer isn't yet built
if not self.built:
return
mode = self.quantization_mode
if mode not in self.variable_serialization_spec:
raise self._quantization_mode_error(mode)
# Embeddings plus optional merged LoRA-aware scale
# (returns (embeddings, None) for `None` mode).
embeddings_value, merged_kernel_scale = (
self._get_embeddings_with_merged_lora()
)
idx = 0
for name in self.variable_serialization_spec[mode]:
if name == "embeddings":
store[str(idx)] = embeddings_value
elif name == "embeddings_scale" and mode in ("int4", "int8"):
# For int4/int8, the merged LoRA scale (if any) comes from
# `_get_embeddings_with_merged_lora()`
store[str(idx)] = merged_kernel_scale
else:
store[str(idx)] = getattr(self, name)
idx += 1
def load_own_variables(self, store):
if not self.lora_enabled:
self._check_load_own_variables(store)
# Do nothing if the layer isn't yet built
if not self.built:
return
mode = self.quantization_mode
if mode not in self.variable_serialization_spec:
raise self._quantization_mode_error(mode)
idx = 0
for name in self.variable_serialization_spec[mode]:
if name == "embeddings":
self._embeddings.assign(store[str(idx)])
else:
getattr(self, name).assign(store[str(idx)])
idx += 1
if self.lora_enabled:
self.lora_embeddings_a.assign(
ops.zeros(self.lora_embeddings_a.shape)
)
self.lora_embeddings_b.assign(
ops.zeros(self.lora_embeddings_b.shape)
)
def get_config(self):
base_config = super().get_config()
config = {
"input_dim": self.input_dim,
"output_dim": self.output_dim,
"embeddings_initializer": initializers.serialize(
self.embeddings_initializer
),
"embeddings_regularizer": regularizers.serialize(
self.embeddings_regularizer
),
"activity_regularizer": regularizers.serialize(
self.activity_regularizer
),
"embeddings_constraint": constraints.serialize(
self.embeddings_constraint
),
"mask_zero": self.mask_zero,
"quantization_config": serialization_lib.serialize_keras_object(
self.quantization_config
),
}
if self.lora_rank:
config["lora_rank"] = self.lora_rank
config["lora_alpha"] = self.lora_alpha
return {**base_config, **config}
@classmethod
def from_config(cls, config):
config = config.copy()
config["quantization_config"] = (
serialization_lib.deserialize_keras_object(
config.get("quantization_config", None)
)
)
return super().from_config(config)
def _quantization_mode_error(self, mode):
return NotImplementedError(
"Invalid quantization mode. Expected one of ('int8', 'int4'). "
f"Received: quantization_mode={mode}"
)
@property
def variable_serialization_spec(self):
"""Returns a dict mapping quantization modes to variable names in order.
This spec is used by `save_own_variables` and `load_own_variables` to
determine the correct ordering of variables during serialization for
each quantization mode. `None` means no quantization.
"""
return {
None: [
"embeddings",
],
"int8": [
"embeddings",
"embeddings_scale",
],
"int4": [
"embeddings",
"embeddings_scale",
],
}
def quantized_build(self, embeddings_shape, mode, config=None):
if mode == "int8":
self._int8_build(embeddings_shape, config)
elif mode == "int4":
self._int4_build(embeddings_shape, config)
else:
raise self._quantization_mode_error(mode)
self._is_quantized = True
def _int8_build(self, embeddings_shape, config=None):
self._embeddings = self.add_weight(
name="embeddings",
shape=embeddings_shape,
initializer="zeros",
dtype="int8",
trainable=False,
)
# We choose to reduce the axis of `output_dim` because, typically,
# `input_dim` is larger than `output_dim`. This reduces quantization
# error.
self.embeddings_scale = self.add_weight(
name="embeddings_scale",
shape=(self.input_dim,),
initializer="ones",
trainable=False,
)
def _int4_build(self, embeddings_shape, config=None):
input_dim, output_dim = embeddings_shape
packed_rows = (output_dim + 1) // 2 # ceil for odd dims
# Embeddings are stored *packed*: each int8 byte contains two int4
# values.
self._embeddings = self.add_weight(
name="embeddings",
shape=(input_dim, packed_rows),
initializer="zeros",
dtype="int8",
trainable=False,
)
self.embeddings_scale = self.add_weight(
name="embeddings_scale",
shape=(self.input_dim,),
initializer="ones",
trainable=False,
)
# Record original output_dim for unpacking at runtime.
self._orig_output_dim = output_dim
def _int8_call(self, inputs, training=None):
# We cannot update quantized self._embeddings, so the custom gradient is
# not needed
if backend.standardize_dtype(inputs.dtype) not in ("int32", "int64"):
inputs = ops.cast(inputs, "int32")
embeddings_scale = ops.take(self.embeddings_scale, inputs, axis=0)
outputs = ops.take(self._embeddings, inputs, axis=0)
# De-scale outputs
outputs = ops.divide(
ops.cast(outputs, dtype=self.compute_dtype),
ops.expand_dims(embeddings_scale, axis=-1),
)
if self.lora_enabled:
lora_outputs = ops.take(self.lora_embeddings_a, inputs, axis=0)
lora_outputs = ops.matmul(lora_outputs, self.lora_embeddings_b)
outputs = ops.add(
outputs, (self.lora_alpha / self.lora_rank) * lora_outputs
)
return outputs
def _int4_call(self, inputs, training=None):
# We cannot update quantized self._embeddings, so the custom gradient is
# not needed
if backend.standardize_dtype(inputs.dtype) not in ("int32", "int64"):
inputs = ops.cast(inputs, "int32")
embeddings_scale = ops.take(self.embeddings_scale, inputs, axis=0)
unpacked_embeddings = quantizers.unpack_int4(
self._embeddings, self._orig_output_dim, axis=-1
)
outputs = ops.take(unpacked_embeddings, inputs, axis=0)
# De-scale outputs
outputs = ops.divide(
ops.cast(outputs, dtype=self.compute_dtype),
ops.expand_dims(embeddings_scale, axis=-1),
)
if self.lora_enabled:
lora_outputs = ops.take(self.lora_embeddings_a, inputs, axis=0)
lora_outputs = ops.matmul(lora_outputs, self.lora_embeddings_b)
outputs = ops.add(
outputs, (self.lora_alpha / self.lora_rank) * lora_outputs
)
return outputs
def quantize(self, mode=None, type_check=True, config=None):
# Prevent quantization of the subclasses.
if type_check and (type(self) is not Embedding):
raise self._not_implemented_error(self.quantize)
self.quantization_config = config
embeddings_shape = (self.input_dim, self.output_dim)
if mode == "int8":
# Quantize `self._embeddings` to int8 and compute corresponding
# scale.
weight_quantizer = QuantizationConfig.weight_quantizer_or_default(
self.quantization_config,
quantizers.AbsMaxQuantizer(axis=-1),
)
embeddings_value, embeddings_scale = weight_quantizer(
self._embeddings, to_numpy=True
)
embeddings_scale = ops.squeeze(embeddings_scale, axis=-1)
del self._embeddings
self.quantized_build(
embeddings_shape, mode, self.quantization_config
)
self._embeddings.assign(embeddings_value)
self.embeddings_scale.assign(embeddings_scale)
elif mode == "int4":
# Quantize to int4 values (stored in int8 dtype, range [-8, 7]).
weight_quantizer = QuantizationConfig.weight_quantizer_or_default(
self.quantization_config,
quantizers.AbsMaxQuantizer(
axis=-1,
value_range=(-8, 7),
output_dtype="int8",
),
)
embeddings_value, embeddings_scale = weight_quantizer(
self._embeddings, to_numpy=True
)
embeddings_scale = ops.squeeze(embeddings_scale, axis=-1)
# 2. Pack two int4 values into a single int8 byte.
packed_embeddings_value, _, _ = quantizers.pack_int4(
embeddings_value, axis=-1
)
del self._embeddings
self.quantized_build(
embeddings_shape, mode, self.quantization_config
)
self._embeddings.assign(packed_embeddings_value)
self.embeddings_scale.assign(embeddings_scale)
else:
raise self._quantization_mode_error(mode)
# Set new dtype policy.
if self.dtype_policy.quantization_mode is None:
policy = dtype_policies.get(f"{mode}_from_{self.dtype_policy.name}")
self.dtype_policy = policy
def _get_embeddings_with_merged_lora(self):
"""Returns the embeddings with LoRA matrices merged, for serialization.
This method is called by `save_own_variables` to produce a single
embeddings tensor that includes the adaptations from LoRA. This is
useful for deploying the model or for continuing training after
permanently applying the LoRA update.
If the layer is quantized (`int8` or `int4`), the process is:
1. Dequantize the base embeddings to float.
2. Compute the LoRA delta (`lora_embeddings_a @ lora_embeddings_b`) and
add it to the dequantized embeddings.
3. Re-quantize the merged result back to the original quantized
type (`int8` or packed `int4`), calculating a new scale factor.
If the layer is not quantized, this method returns the result of the
`embeddings` property (which computes the merge in floating-point) and a
scale of `None`.
If LoRA is not enabled, it returns the original embeddings and scale
without modification.
Returns:
A tuple `(embeddings_value, embeddings_scale)`:
`embeddings_value`: The merged embeddings. A quantized tensor if
quantization is active, otherwise a high precision tensor.
`embeddings_scale`: The quantization scale for the merged
embeddings. This is `None` if the layer is not quantized.
"""
if self.dtype_policy.quantization_mode in (None, "gptq"):
return self.embeddings, None
embeddings_value = self._embeddings
embeddings_scale = self.embeddings_scale
if not self.lora_enabled:
return embeddings_value, embeddings_scale
# Dequantize embeddings to float.
if self.quantization_mode == "int4":
unpacked_embeddings = quantizers.unpack_int4(
embeddings_value, self._orig_output_dim, axis=-1
)
float_embeddings = ops.divide(
ops.cast(unpacked_embeddings, self.compute_dtype),
ops.expand_dims(embeddings_scale, axis=-1),
)
quant_range = (-8, 7)
elif self.quantization_mode == "int8":
float_embeddings = ops.divide(
ops.cast(embeddings_value, self.compute_dtype),
ops.expand_dims(embeddings_scale, axis=-1),
)
quant_range = (-127, 127)
else:
raise ValueError(
f"Unsupported quantization mode: {self.quantization_mode}"
)
# Merge LoRA weights in float domain.
lora_delta = (self.lora_alpha / self.lora_rank) * ops.matmul(
self.lora_embeddings_a, self.lora_embeddings_b
)
merged_float_embeddings = ops.add(float_embeddings, lora_delta)
# Requantize.
requantized_embeddings, embeddings_scale = quantizers.abs_max_quantize(
merged_float_embeddings,
axis=-1,
value_range=quant_range,
dtype="int8",
to_numpy=True,
)
embeddings_scale = ops.squeeze(embeddings_scale, axis=-1)
# Pack if int4.
if self.quantization_mode == "int4":
embeddings_value, _, _ = quantizers.pack_int4(
requantized_embeddings, axis=-1
)
else:
embeddings_value = requantized_embeddings
return embeddings_value, embeddings_scale
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/input_layer_test.py | keras/src/layers/core/input_layer_test.py | import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.backend import KerasTensor
from keras.src.layers import InputLayer
class InputLayerTest(testing.TestCase):
# Testing happy path for layer without input tensor
@parameterized.named_parameters(
[
{"testcase_name": "dense"},
{"testcase_name": "sparse", "sparse": True},
{"testcase_name": "ragged", "ragged": True},
]
)
def test_input_basic(self, sparse=False, ragged=False):
input_shape = (2, 3)
batch_size = 4
dtype = "float32"
ndim = len(tuple((batch_size,) + input_shape))
init_kwargs = {
"shape": input_shape,
"batch_size": batch_size,
"dtype": dtype,
"sparse": sparse,
"ragged": ragged,
}
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
with self.assertRaisesRegex(
ValueError, "`sparse=True` is not supported"
):
InputLayer(**init_kwargs)
return
if ragged and not backend.SUPPORTS_RAGGED_TENSORS:
with self.assertRaisesRegex(
ValueError, "`ragged=True` is not supported"
):
InputLayer(**init_kwargs)
return
values = InputLayer(**init_kwargs)
self.assertEqual(values.dtype, dtype)
self.assertEqual(values.batch_shape[0], batch_size)
self.assertEqual(values.batch_shape[1:], input_shape)
self.assertEqual(values.sparse, sparse)
self.assertEqual(values.ragged, ragged)
self.assertEqual(values.trainable, True)
self.assertIsInstance(values.output, KerasTensor)
self.assertEqual(values.output.ndim, ndim)
self.assertEqual(values.output.dtype, dtype)
self.assertEqual(values.output.sparse, sparse)
self.assertEqual(values.output.ragged, ragged)
# Testing shape is not None and batch_shape is not None condition
def test_input_error1(self):
input_shape = (2, 3)
with self.assertRaisesRegex(
ValueError, "cannot pass both `shape` and `batch_shape`"
):
InputLayer(shape=input_shape, batch_shape=input_shape)
# Testing batch_size is not None and batch_shape is not None
def test_input_error2(self):
input_shape = (2, 3)
batch_size = 4
with self.assertRaisesRegex(
ValueError, "cannot pass both `batch_size` and `batch_shape`"
):
InputLayer(batch_size=batch_size, batch_shape=input_shape)
# Testing shape is None and batch_shape is None
def test_input_error3(self):
with self.assertRaisesRegex(ValueError, "pass a `shape` argument."):
InputLayer(shape=None, batch_shape=None)
# Testing Input tensor is not Keras tensor
def test_input_tensor_error(self):
input_shape = (2, 3)
batch_size = 4
input_tensor = np.zeros(input_shape)
with self.assertRaisesRegex(
ValueError, "Argument `input_tensor` must be a KerasTensor"
):
InputLayer(
shape=input_shape,
batch_size=batch_size,
input_tensor=input_tensor,
)
# Testing happy path for layer with input tensor
def testing_input_tensor(self):
input_shape = (2, 3)
dtype = "float32"
input_tensor = KerasTensor(shape=input_shape, dtype=dtype)
layer = InputLayer(
input_tensor=input_tensor,
)
self.assertEqual(layer.dtype, dtype)
self.assertEqual(layer.batch_shape, (2, 3))
self.assertEqual(layer.trainable, True)
self.assertIsInstance(layer.output, KerasTensor)
self.assertEqual(layer.output, input_tensor)
self.assertEqual(layer.output.ndim, input_tensor.ndim)
self.assertEqual(layer.output.dtype, dtype)
def test_input_shape_deprecated(self):
input_shape = (2, 3)
batch_size = 4
dtype = "float32"
with self.assertWarnsRegex(
UserWarning,
"Argument `input_shape` is deprecated. Use `shape` instead.",
):
layer = InputLayer(
input_shape=input_shape, batch_size=batch_size, dtype=dtype
)
self.assertEqual(layer.batch_shape[0], batch_size)
self.assertEqual(layer.batch_shape[1:], input_shape)
self.assertEqual(layer.dtype, dtype)
self.assertIsInstance(layer.output, KerasTensor)
def test_call_method(self):
layer = InputLayer(shape=(32,))
output = layer.call()
self.assertIsNone(output)
def test_numpy_shape(self):
# non-python int type shapes should be ok
InputLayer(shape=(np.int64(32),))
def test_invalid_arg_combinations(self):
input_tensor = KerasTensor(shape=(2, 3), dtype="float32")
with self.assertRaisesRegex(
ValueError, "cannot provide an incompatible `shape`"
):
_ = InputLayer(
shape=(2, 4),
input_tensor=input_tensor,
)
with self.assertRaisesRegex(
ValueError, "cannot provide an incompatible `batch_shape`"
):
_ = InputLayer(
batch_shape=(2, 4),
input_tensor=input_tensor,
)
with self.assertRaisesRegex(
ValueError, "cannot provide an incompatible `batch_size`"
):
_ = InputLayer(
batch_size=5,
input_tensor=input_tensor,
)
with self.assertRaisesRegex(
ValueError, "cannot provide an incompatible `dtype`"
):
_ = InputLayer(
dtype="float16",
input_tensor=input_tensor,
)
with self.assertRaisesRegex(
ValueError, "cannot provide an incompatible `sparse`"
):
_ = InputLayer(
sparse=True,
input_tensor=input_tensor,
)
# This works
_ = InputLayer(
shape=(3,),
batch_size=2,
sparse=False,
dtype="float32",
input_tensor=input_tensor,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/einsum_dense_test.py | keras/src/layers/core/einsum_dense_test.py | import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import constraints
from keras.src import export
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import optimizers
from keras.src import quantizers
from keras.src import random
from keras.src import saving
from keras.src import testing
from keras.src.quantizers.gptq_config import GPTQConfig
from keras.src.quantizers.quantization_config import Int4QuantizationConfig
from keras.src.quantizers.quantization_config import Int8QuantizationConfig
from keras.src.quantizers.quantizers import AbsMaxQuantizer
class EinsumDenseTest(testing.TestCase):
@parameterized.named_parameters(
("int8", "int8", {"axis": 0}, {"axis": -1}),
(
"int4",
"int4",
{"axis": 0, "value_range": (-8, 7), "output_dtype": "int8"},
{"axis": -1},
),
("int8_weight_only", "int8", {"axis": 0}, None),
(
"int4_weight_only",
"int4",
{"axis": 0, "value_range": (-8, 7), "output_dtype": "int8"},
None,
),
)
def test_einsum_dense_quantize(
self, mode, weight_quantizer_args, activation_quantizer_args
):
"""Test EinsumDense quantization with QuantizationConfig."""
layer = layers.EinsumDense(
equation="ab,bcd->acd",
output_shape=(8, 32),
bias_axes="d",
)
layer.build((None, 3))
weight_quantizer = AbsMaxQuantizer(**weight_quantizer_args)
if activation_quantizer_args is not None:
activation_quantizer = AbsMaxQuantizer(**activation_quantizer_args)
else:
activation_quantizer = None
if mode == "int8":
config = Int8QuantizationConfig(
weight_quantizer=weight_quantizer,
activation_quantizer=activation_quantizer,
)
elif mode == "int4":
config = Int4QuantizationConfig(
weight_quantizer=weight_quantizer,
activation_quantizer=activation_quantizer,
)
layer.quantize(mode, config=config)
if activation_quantizer_args is not None:
# Verify inputs_quantizer is set correctly
self.assertIsInstance(layer.inputs_quantizer, AbsMaxQuantizer)
else:
# Verify inputs_quantizer is None
self.assertIsNone(layer.inputs_quantizer)
# Verify call works
x = np.random.random((2, 3)).astype("float32")
y = layer(x)
self.assertEqual(y.shape, (2, 8, 32))
if mode == "int4":
# Verify kernel is int8 (packed int4)
self.assertEqual(
backend.standardize_dtype(layer._kernel.dtype), "int8"
)
@parameterized.named_parameters(
{
"testcase_name": "_1d_end_weight",
"equation": "ab,b->a",
"bias_axes": None,
"input_shape": (2, 32),
"output_shape": (),
"expected_kernel_shape": (32,),
"expected_bias_shape": None,
"expected_output_shape": (2,),
},
{
"testcase_name": "_2d_middle_weight",
"equation": "ab,bc->ac",
"bias_axes": None,
"input_shape": (2, 32),
"output_shape": (64),
"expected_kernel_shape": (32, 64),
"expected_bias_shape": None,
"expected_output_shape": (2, 64),
},
{
"testcase_name": "_3d_bert",
"equation": "abc,cde->abde",
"bias_axes": None,
"input_shape": (2, 1, 2),
"output_shape": (1, 3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_3_bias",
"equation": "abc,cde->abde",
"bias_axes": "e",
"input_shape": (2, 1, 2),
"output_shape": (1, 3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (4,),
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_2_bias",
"equation": "abc,cde->abde",
"bias_axes": "d",
"input_shape": (2, 1, 2),
"output_shape": (1, 3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (3, 1),
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_1_3_bias",
"equation": "abc,cde->abde",
"bias_axes": "be",
"input_shape": (2, 7, 2),
"output_shape": (7, 3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (7, 1, 4),
"expected_output_shape": (2, 7, 3, 4),
},
{
"testcase_name": "_3d_bert_projection",
"equation": "BFNH,NHD->BFD",
"bias_axes": None,
"input_shape": (2, 1, 2, 3),
"output_shape": (1, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 1, 4),
},
{
"testcase_name": "_2d_bert",
"equation": "abc,cd->abd",
"bias_axes": None,
"input_shape": (2, 1, 2),
"output_shape": (1, 4),
"expected_kernel_shape": (2, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 1, 4),
},
{
"testcase_name": "_embedding_1d",
"equation": "i,d->id",
"bias_axes": None,
"input_shape": (2,),
"output_shape": (2,),
"expected_kernel_shape": (2,),
"expected_bias_shape": None,
"expected_output_shape": (2, 2),
},
{
"testcase_name": "_xlnet_lm",
"equation": "ibd,nd->ibn",
"bias_axes": None,
"input_shape": (2, 2, 1),
"output_shape": (2, 2),
"expected_kernel_shape": (2, 1),
"expected_bias_shape": None,
"expected_output_shape": (2, 2, 2),
},
{
"testcase_name": "_2d_precast",
"equation": "...b,bc->...c",
"bias_axes": None,
"input_shape": (2, 32),
"output_shape": (64,),
"expected_kernel_shape": (32, 64),
"expected_bias_shape": None,
"expected_output_shape": (2, 64),
},
{
"testcase_name": "_2d_precast_elided_input_used_in_output",
"equation": "...bc,bc->...b",
"bias_axes": None,
"input_shape": (2, 32, 64),
"output_shape": (32,),
"expected_kernel_shape": (32, 64),
"expected_bias_shape": None,
"expected_output_shape": (2, 32),
},
{
"testcase_name": "_2d_precast_multiple_elided_dims",
"equation": "...b,bc->...c",
"bias_axes": None,
"input_shape": (2, 3, 32),
"output_shape": (64,),
"expected_kernel_shape": (32, 64),
"expected_bias_shape": None,
"expected_output_shape": (2, 3, 64),
},
{
"testcase_name": "_3d_precast",
"equation": "...c,cde->...de",
"bias_axes": None,
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_precast_3_bias",
"equation": "...c,cde->...de",
"bias_axes": "e",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (4,),
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_precast_2_bias",
"equation": "...c,cde->...de",
"bias_axes": "d",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (3, 1),
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_3d_precast_2_3_bias",
"equation": "...c,cde->...de",
"bias_axes": "de",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (2, 3, 4),
"expected_bias_shape": (3, 4),
"expected_output_shape": (2, 1, 3, 4),
},
{
"testcase_name": "_2d_postcast",
"equation": "bc...,cd->bd...",
"bias_axes": None,
"input_shape": (2, 1, 2, 3),
"output_shape": (4,),
"expected_kernel_shape": (1, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 4, 2, 3),
},
{
"testcase_name": "_3d_postcast",
"equation": "bc...,cde->bde...",
"bias_axes": None,
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (1, 3, 4),
"expected_bias_shape": None,
"expected_output_shape": (2, 3, 4, 2),
},
{
"testcase_name": "_3d_postcast_1_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "d",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (1, 3, 4),
"expected_bias_shape": (3, 1, 1),
"expected_output_shape": (2, 3, 4, 2),
},
{
"testcase_name": "_3d_postcast_2_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "e",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (1, 3, 4),
"expected_bias_shape": (4, 1),
"expected_output_shape": (2, 3, 4, 2),
},
{
"testcase_name": "_3d_postcast_1_2_bias",
"equation": "bc...,cde->bde...",
"bias_axes": "de",
"input_shape": (2, 1, 2),
"output_shape": (3, 4),
"expected_kernel_shape": (1, 3, 4),
"expected_bias_shape": (3, 4, 1),
"expected_output_shape": (2, 3, 4, 2),
},
)
@pytest.mark.requires_trainable_backend
def test_einsum_dense_basics(
self,
equation,
bias_axes,
input_shape,
output_shape,
expected_kernel_shape,
expected_bias_shape,
expected_output_shape,
):
self.run_layer_test(
layers.EinsumDense,
init_kwargs={
"equation": equation,
"output_shape": output_shape,
"bias_axes": bias_axes,
},
input_shape=input_shape,
expected_output_shape=expected_output_shape,
expected_num_trainable_weights=(
2 if expected_bias_shape is not None else 1
),
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
layer = layers.EinsumDense(
equation, output_shape=output_shape, bias_axes=bias_axes
)
layer.build(input_shape)
self.assertEqual(layer.kernel.shape, expected_kernel_shape)
if expected_bias_shape is not None:
self.assertEqual(layer.bias.shape, expected_bias_shape)
def test_einsum_dense_constraints(self):
layer = layers.EinsumDense(
"abc,cde->abde", (1, 3, 4), kernel_constraint="non_neg"
)
layer.build((2, 1, 2))
self.assertIsInstance(layer.kernel.constraint, constraints.NonNeg)
layer = layers.EinsumDense(
"ab,b->a", (1, 3, 4), bias_axes="a", bias_constraint="non_neg"
)
layer.build((2, 1, 2))
self.assertIsInstance(layer.bias.constraint, constraints.NonNeg)
@pytest.mark.requires_trainable_backend
def test_enable_lora(self):
layer = layers.EinsumDense(
equation="ab,bcd->acd",
output_shape=(8, 32),
bias_axes=None,
)
layer.build((None, 3))
layer.enable_lora(2)
self.assertLen(layer.trainable_weights, 2)
self.assertLen(layer.non_trainable_weights, 1)
if backend.backend() == "torch":
self.assertLen(layer.torch_params, 3)
# Try eager call
x = np.random.random((64, 3))
y = np.random.random((64, 8, 32))
_ = layer(x[:2])
init_lora_a_kernel_value = layer.lora_kernel_a.numpy()
init_lora_b_kernel_value = layer.lora_kernel_b.numpy()
# Try calling fit()
model = models.Sequential(
[
layer,
]
)
model.compile(optimizer="sgd", loss="mse")
model.fit(x, y, epochs=2)
final_lora_a_kernel_value = layer.lora_kernel_a.numpy()
final_lora_b_kernel_value = layer.lora_kernel_b.numpy()
diff_a = np.max(
np.abs(init_lora_a_kernel_value - final_lora_a_kernel_value)
)
diff_b = np.max(
np.abs(init_lora_b_kernel_value - final_lora_b_kernel_value)
)
self.assertGreater(diff_a, 0.0)
self.assertGreater(diff_b, 0.0)
# Try saving and reloading the model
temp_filepath = os.path.join(self.get_temp_dir(), "lora_model.keras")
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertTrue(new_model.layers[0].lora_enabled)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try saving and reloading the model's weights only
temp_filepath = os.path.join(
self.get_temp_dir(), "lora_model.weights.h5"
)
model.save_weights(temp_filepath)
# Load the file into a fresh, non-lora model
new_model = models.Sequential(
[
layers.EinsumDense(
equation="ab,bcd->acd",
output_shape=(8, 32),
bias_axes=None,
),
]
)
new_model.build((None, 3))
new_model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try loading a normal checkpoint into a lora model
new_model.save_weights(temp_filepath)
model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
@pytest.mark.requires_trainable_backend
def test_enable_lora_with_alpha(self):
# Use a simple equation that mimics a `Dense` layer behavior.
equation = "ab,bc->ac"
output_shape = 3 # This means the kernel shape will be (input_dim, 3).
bias_axes = None
# Create and build the `EinsumDense` layer
# with an input shape (None, 2).
layer = layers.EinsumDense(
equation=equation, output_shape=output_shape, bias_axes=bias_axes
)
# Build the layer with an input shape of (batch, 2).
layer.build((None, 2))
# Set the base kernel weights to a known value.
base_kernel = np.array(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float32
)
layer._kernel.assign(base_kernel)
# Enable LoRA with `rank`=2 and a custom `lora_alpha`=3.0.
layer.enable_lora(rank=2, lora_alpha=3.0)
self.assertEqual(layer.lora_rank, 2)
self.assertEqual(layer.lora_alpha, 3.0)
# The expected shapes are:
# `base_kernel`: (2, 3)
# `lora_kernel_a`: (2, 2) and `lora_kernel_b`: (2, 3)
a_val = np.array([[0.1, 0.2], [0.3, 0.4]], dtype=np.float32)
b_val = np.array([[0.5, 0.6, 0.7], [0.8, 0.9, 1.0]], dtype=np.float32)
layer.lora_kernel_a.assign(a_val)
layer.lora_kernel_b.assign(b_val)
# Compute expected effective kernel.
# Scaling factor is `lora_alpha / lora_rank` = 3.0 / 2 = 1.5
expected_delta = 1.5 * np.matmul(a_val, b_val)
expected_kernel = base_kernel + expected_delta
# Verify that the effective kernel property returns the expected value.
actual_kernel = ops.convert_to_numpy(layer.kernel)
self.assertAllClose(
actual_kernel, expected_kernel, tpu_atol=1e-3, tpu_rtol=1e-3
)
@pytest.mark.requires_trainable_backend
def test_lora_rank_argument(self):
self.run_layer_test(
layers.EinsumDense,
init_kwargs={
"equation": "ab,bcd->acd",
"output_shape": (8, 32),
"bias_axes": None,
"lora_rank": 2,
},
input_shape=(2, 3),
expected_output_shape=(2, 8, 32),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
# Test quantization-related methods.
@parameterized.named_parameters(
("int8", "int8", 1e-3),
("int4", "int4", 3e-3),
)
def test_quantize_int(self, mode, error_threshold):
layer = layers.EinsumDense(
equation="ab,bcd->acd",
output_shape=(8, 32),
bias_axes="d",
)
layer.build((None, 3))
x = np.random.random((2, 3))
y_float = layer(x)
layer.quantize(mode)
# Verify weights dtype
self.assertEqual(backend.standardize_dtype(layer._kernel.dtype), "int8")
self.assertEqual(
backend.standardize_dtype(layer.kernel_scale.dtype),
layer.variable_dtype,
)
# Try eager call and verify output correctness
y_quantized = layer(x)
mse = ops.mean(ops.square(y_float - y_quantized))
self.assertLess(mse, error_threshold) # A weak correctness test
# Try saving and reloading the model
model = models.Sequential([layer])
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_model.keras"
)
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try saving and reloading the model's weights only
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_model.weights.h5"
)
model.save_weights(temp_filepath)
# Try building with quantized dtype policy
layer = layers.EinsumDense(
equation="abcde,afce->acdbf", # Test reduce and transpose
output_shape=(2, 4, 8, 16),
bias_axes="d",
dtype=f"{mode}_from_mixed_bfloat16",
)
layer.build((1, 8, 2, 4, 32))
self.assertEqual(backend.standardize_dtype(layer._kernel.dtype), "int8")
self.assertEqual(
backend.standardize_dtype(layer.kernel_scale.dtype), "float32"
)
layer = layers.EinsumDense(
equation="a,b->ab", # Test expand
output_shape=(4,),
dtype=f"{mode}_from_float32",
)
layer.build((None,))
self.assertEqual(backend.standardize_dtype(layer._kernel.dtype), "int8")
self.assertEqual(
backend.standardize_dtype(layer.kernel_scale.dtype), "float32"
)
layer = layers.EinsumDense(
equation="ab,ab->a", # Test squeeze
output_shape=(2,),
dtype="int8_from_float32",
)
layer.build((2, 4))
self.assertEqual(backend.standardize_dtype(layer._kernel.dtype), "int8")
self.assertEqual(
backend.standardize_dtype(layer.kernel_scale.dtype), "float32"
)
@parameterized.named_parameters(
(
"int8_btnh,nhd->btd",
"int8",
"btnh,nhd->btd",
(None, 8),
(1, 2, 2, 4),
1e-3,
),
(
"int8_btd,ndh->btnh",
"int8",
"btd,ndh->btnh",
(None, 2, 8),
(1, 2, 4),
1e-3,
),
("int8_btd,df->btf", "int8", "btd,df->btf", (None, 4), (1, 2, 4), 1e-3),
(
"int4_btnh,nhd->btd",
"int4",
"btnh,nhd->btd",
(None, 8),
(1, 2, 2, 4),
3e-3,
),
(
"int4_btd,ndh->btnh",
"int4",
"btd,ndh->btnh",
(None, 2, 8),
(1, 2, 4),
3e-3,
),
(
"int4_btd,df->btf",
"int4",
"btd,df->btf",
(None, 4),
(1, 2, 4),
3e-3,
),
)
def test_quantize_with_specific_equations(
self,
quantization_mode,
equation,
output_shape,
input_shape,
error_threshold,
):
layer = layers.EinsumDense(equation=equation, output_shape=output_shape)
layer.build(input_shape)
x = ops.random.uniform(input_shape)
y_float = layer(x)
layer.quantize(quantization_mode)
y_quantized = layer(x)
mse = ops.mean(ops.square(y_float - y_quantized))
self.assertLess(mse, error_threshold) # A weak correctness test
@parameterized.named_parameters(
("int8", "int8"),
("float8", "float8"),
("int4", "int4"),
)
def test_quantize_on_unbuilt_layer(self, mode):
layer = layers.EinsumDense(
equation="ab,bcd->acd",
output_shape=(8, 32),
bias_axes="d",
)
with self.assertRaisesRegex(
ValueError, "Cannot quantize a layer that isn't yet built."
):
layer.quantize(mode)
@parameterized.named_parameters(
("int8", "int8"),
("float8", "float8"),
("int4", "int4"),
)
def test_quantize_on_subclass(self, mode):
class MyEinsumDense(layers.EinsumDense):
pass
layer = MyEinsumDense(
equation="ab,bcd->acd",
output_shape=(8, 32),
bias_axes="d",
)
layer.build((None, 3))
with self.assertRaises(NotImplementedError):
layer.quantize(mode)
layer.quantize(mode, type_check=False) # No error
@parameterized.named_parameters(
("int8", "int8"),
("float8", "float8"),
("int4", "int4"),
)
def test_quantize_when_already_quantized(self, mode):
layer = layers.EinsumDense(
equation="ab,bcd->acd",
output_shape=(8, 16),
bias_axes="d",
)
layer.build((None, 3))
layer.quantize(mode)
for m in ["int8", "float8"]:
with self.assertRaisesRegex(
ValueError, "is already quantized with dtype_policy="
):
layer.quantize(m)
layer = layers.EinsumDense(
equation="ab,bcd->acd",
output_shape=(8, 16),
bias_axes="d",
dtype=f"{mode}_from_float32",
)
layer.build((None, 3))
for m in ["int8", "float8"]:
with self.assertRaisesRegex(
ValueError, "is already quantized with dtype_policy="
):
layer.quantize(m)
@parameterized.named_parameters(
("int8", "int8_from_float32", 3),
("float8", "float8_from_float32", 8),
("int4", "int4_from_float32", 3),
)
def test_quantize_by_setting_dtype_policy(
self, policy, expected_num_variables
):
layer = layers.EinsumDense(
equation="ab,bcd->acd",
output_shape=(8, 32),
bias_axes="d",
)
layer.build((None, 3))
layer.dtype_policy = policy
self.assertLen(layer.variables, expected_num_variables)
@parameterized.named_parameters(
("int7", "int7"),
("float7", "float7"),
("int3", "int3"),
)
def test_quantize_invalid_mode(self, mode):
layer = layers.EinsumDense(
equation="ab,bcd->acd",
output_shape=(8, 32),
bias_axes="d",
)
layer.build((None, 3))
x = np.random.random((1, 3))
# dtype_policy should not be altered by failed quantization
original_dtype_policy = layer.dtype_policy
# Test quantize
with self.assertRaisesRegex(ValueError, "Invalid quantization mode."):
layer.quantize(mode)
self.assertEqual(layer.dtype_policy, original_dtype_policy)
# Test quantized_build
with self.assertRaisesRegex(
NotImplementedError, "Invalid quantization mode."
):
layer.quantized_build((None, 2), mode)
self.assertEqual(layer.dtype_policy, original_dtype_policy)
# Test quantized_call
with self.assertRaisesRegex(
NotImplementedError, "Invalid quantization mode."
):
# Explicitly set quantization_mode
layer._dtype_policy._quantization_mode = mode
layer.quantized_call(x)
self.assertEqual(layer.dtype_policy, original_dtype_policy)
@parameterized.named_parameters(
("int8", "int8_from_mixed_bfloat16", 1, 2),
("float8", "float8_from_mixed_bfloat16", 8, 0),
("int4", "int4_from_mixed_bfloat16", 1, 2),
)
@pytest.mark.requires_trainable_backend
def test_quantize_dtype_argument(
self, dtype, num_trainable_weights, num_non_trainable_weights
):
self.run_layer_test(
layers.EinsumDense,
init_kwargs={
"equation": "ab,bcd->acd",
"output_shape": (8, 32),
"bias_axes": "d",
"dtype": dtype,
},
input_shape=(2, 3),
expected_output_shape=(2, 8, 32),
expected_num_trainable_weights=num_trainable_weights,
expected_num_non_trainable_weights=num_non_trainable_weights,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
@parameterized.named_parameters(
("int8_ab,bcd->acd", "int8", "ab,bcd->acd", (64, 3), (64, 8, 32)),
(
"int8_btd,ndh->btnh",
"int8",
"btd,ndh->btnh",
(1, 4, 32),
(1, 4, 8, 16),
),
("int4_ab,bcd->acd", "int4", "ab,bcd->acd", (64, 3), (64, 8, 32)),
(
"int4_btd,ndh->btnh",
"int4",
"btd,ndh->btnh",
(1, 4, 32),
(1, 4, 8, 16),
),
)
@pytest.mark.requires_trainable_backend
def test_quantize_lora_integration(
self, quantization_mode, equation, input_shape, output_shape
):
config = dict(
equation=equation, output_shape=output_shape[1:], bias_axes=None
)
layer = layers.EinsumDense(**config)
layer.build(input_shape)
layer.enable_lora(2)
layer.quantize(quantization_mode)
self.assertLen(layer.trainable_weights, 2)
self.assertLen(layer.non_trainable_weights, 2)
if backend.backend() == "torch":
self.assertLen(layer.torch_params, 4)
# Try calling fit()
init_lora_a_kernel_value = layer.lora_kernel_a.numpy()
init_lora_b_kernel_value = layer.lora_kernel_b.numpy()
x = np.random.random(input_shape)
y = np.random.random(output_shape)
model = models.Sequential([layer])
model.compile(optimizer="sgd", loss="mse")
model.fit(x, y, epochs=2)
final_lora_a_kernel_value = layer.lora_kernel_a.numpy()
final_lora_b_kernel_value = layer.lora_kernel_b.numpy()
diff_a = np.max(
np.abs(init_lora_a_kernel_value - final_lora_a_kernel_value)
)
diff_b = np.max(
np.abs(init_lora_b_kernel_value - final_lora_b_kernel_value)
)
self.assertGreater(diff_a, 0.0)
self.assertGreater(diff_b, 0.0)
# Try saving and reloading the model
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_lora_model.keras"
)
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertTrue(new_model.layers[0].lora_enabled)
self.assertAllClose(model.predict(x), new_model.predict(x), atol=0.5)
# Try saving and reloading the model's weights only
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_lora_model.weights.h5"
)
model.save_weights(temp_filepath)
new_model = models.Sequential([layers.EinsumDense(**config)])
new_model.build(input_shape)
new_model.quantize(quantization_mode)
new_model.load_weights(temp_filepath)
self.assertFalse(new_model.layers[0].lora_enabled)
self.assertAllClose(model.predict(x), new_model.predict(x), atol=0.5)
# Try loading a normal checkpoint into a lora model
new_model.save_weights(temp_filepath)
model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x), atol=0.5)
# Test export and TFSMLayer reloading when using tensorflow backend
if backend.backend() == "tensorflow":
import tensorflow as tf
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
ref_input = tf.random.normal(input_shape)
ref_output = model(ref_input)
model.export(temp_filepath, format="tf_saved_model")
reloaded_layer = export.TFSMLayer(temp_filepath)
self.assertAllClose(
reloaded_layer(ref_input), ref_output, atol=1e-7
)
self.assertLen(reloaded_layer.weights, len(model.weights))
self.assertLen(
reloaded_layer.trainable_weights, len(model.trainable_weights)
)
self.assertLen(
reloaded_layer.non_trainable_weights,
len(model.non_trainable_weights),
)
@pytest.mark.requires_trainable_backend
def test_quantize_float8(self):
import ml_dtypes
from keras.src import quantizers
layer = layers.EinsumDense(
"ab,bc->ac",
output_shape=[32],
bias_axes="c",
)
layer.build((None, 16))
layer.quantize("float8")
optimizer = optimizers.AdamW(learning_rate=0.1)
optimizer.build(layer.trainable_variables)
def loss_fn(x, dy):
y = layer(x, training=True)
loss = y * ops.cast(dy, y.dtype)
return ops.sum(loss)
if backend.backend() == "tensorflow":
import tensorflow as tf
@tf.function(jit_compile=True)
def train_one_step(x, dy):
with tf.GradientTape() as tape:
loss = loss_fn(x, dy)
grads = tape.gradient(loss, layer.trainable_variables)
optimizer.apply(grads, layer.trainable_variables)
elif backend.backend() == "jax":
import jax
def stateless_loss_fn(trainable_variables, x, dy):
y = layer.stateless_call(
trainable_variables, [], x, training=True
)[0]
loss = y * ops.cast(dy, y.dtype)
return ops.sum(loss)
grad_fn = jax.jit(jax.grad(stateless_loss_fn))
def train_one_step(x, dy):
trainable_variables = [
v.value for v in layer.trainable_variables
]
optimizer_variables = [v.value for v in optimizer.variables]
grads = grad_fn(trainable_variables, x, dy)
trainable_variables, optimizer_variables = (
optimizer.stateless_apply(
optimizer_variables, grads, trainable_variables
)
)
for variable, value in zip(
layer.trainable_variables, trainable_variables
):
variable.assign(value)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/dense_test.py | keras/src/layers/core/dense_test.py | import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import constraints
from keras.src import export
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import optimizers
from keras.src import quantizers
from keras.src import random
from keras.src import saving
from keras.src import testing
from keras.src.backend.common import keras_tensor
from keras.src.quantizers.gptq_config import GPTQConfig
from keras.src.quantizers.quantization_config import Int4QuantizationConfig
from keras.src.quantizers.quantization_config import Int8QuantizationConfig
from keras.src.quantizers.quantizers import AbsMaxQuantizer
class DenseTest(testing.TestCase):
@parameterized.named_parameters(
("int8", "int8", {"axis": 0}, {}),
(
"int4",
"int4",
{"axis": 0, "value_range": (-8, 7), "output_dtype": "int8"},
{"axis": -1},
),
("int8_weight_only", "int8", {"axis": 0}, None),
)
def test_dense_quantize_config(
self, mode, weight_quantizer_args, activation_quantizer_args
):
"""Test Dense quantization with QuantizationConfig."""
layer = layers.Dense(units=32)
layer.build((None, 8))
weight_quantizer = AbsMaxQuantizer(**weight_quantizer_args)
if activation_quantizer_args is not None:
activation_quantizer = AbsMaxQuantizer(**activation_quantizer_args)
else:
activation_quantizer = None
if mode == "int8":
config = Int8QuantizationConfig(
weight_quantizer=weight_quantizer,
activation_quantizer=activation_quantizer,
)
elif mode == "int4":
config = Int4QuantizationConfig(
weight_quantizer=weight_quantizer,
activation_quantizer=activation_quantizer,
)
layer.quantize(mode, config=config)
if activation_quantizer_args is not None:
# Verify inputs_quantizer is set correctly
self.assertIsInstance(layer.inputs_quantizer, AbsMaxQuantizer)
else:
# Verify inputs_quantizer is None
self.assertIsNone(layer.inputs_quantizer)
# Verify call works
x = np.random.random((2, 8)).astype("float32")
y = layer(x)
self.assertEqual(y.shape, (2, 32))
if mode == "int4":
# Verify kernel is int8 (packed int4)
self.assertEqual(
backend.standardize_dtype(layer._kernel.dtype), "int8"
)
@pytest.mark.requires_trainable_backend
def test_dense_basics(self):
# 2D case, no bias.
self.run_layer_test(
layers.Dense,
init_kwargs={
"units": 4,
"activation": "relu",
"kernel_initializer": "random_uniform",
"bias_initializer": "ones",
"use_bias": False,
},
input_shape=(2, 3),
expected_output_shape=(2, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
# 3D case, some regularizers.
self.run_layer_test(
layers.Dense,
init_kwargs={
"units": 5,
"activation": "sigmoid",
"kernel_regularizer": "l2",
"bias_regularizer": "l2",
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 5),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=2, # we have 2 regularizers.
supports_masking=True,
)
@parameterized.named_parameters(
("zero", 0),
("negative", -3),
("float", 2.5),
("none", None),
("string", "64"),
)
def test_dense_invalid_units_raises(self, units):
with self.assertRaisesRegex(ValueError, "positive integer"):
layers.Dense(units)
def test_dense_correctness(self):
# With bias and activation.
layer = layers.Dense(units=2, activation="relu")
layer.build((1, 2))
layer.set_weights(
[
np.array([[1.0, -2.0], [3.0, -4.0]]),
np.array([5.0, -6.0]),
]
)
inputs = np.array(
[[-1.0, 2.0]],
)
self.assertAllClose(layer(inputs), [[10.0, 0.0]])
# Just a kernel matmul.
layer = layers.Dense(units=2, use_bias=False)
layer.build((1, 2))
layer.set_weights(
[
np.array([[1.0, -2.0], [3.0, -4.0]]),
]
)
inputs = np.array(
[[-1.0, 2.0]],
)
self.assertEqual(layer.bias, None)
self.assertAllClose(layer(inputs), [[5.0, -6.0]])
def test_dense_errors(self):
with self.assertRaisesRegex(ValueError, "incompatible with the layer"):
layer = layers.Dense(units=2, activation="relu")
layer(keras_tensor.KerasTensor((1, 2)))
layer(keras_tensor.KerasTensor((1, 3)))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_dense_sparse(self):
import tensorflow as tf
self.run_layer_test(
layers.Dense,
init_kwargs={
"units": 4,
},
input_shape=(2, 3),
input_sparse=True,
expected_output_shape=(2, 4),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
)
inputs = 4 * backend.random.uniform((10, 10))
inputs = tf.sparse.from_dense(tf.nn.dropout(inputs, 0.8))
inputs = np.random.random((10, 10)).astype("float32")
inputs = np.multiply(inputs, inputs >= 0.8)
if backend.backend() == "tensorflow":
import tensorflow as tf
inputs = tf.sparse.from_dense(inputs)
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
inputs = jax_sparse.BCOO.fromdense(inputs)
else:
self.fail(f"Sparse is unsupported with backend {backend.backend()}")
layer = layers.Dense(units=10)
outputs = layer(inputs)
# Verify the computation is the same as if it had been a dense tensor
expected_outputs = ops.add(
ops.matmul(
backend.convert_to_tensor(inputs, sparse=False), layer.kernel
),
layer.bias,
)
self.assertAllClose(
outputs, expected_outputs, tpu_atol=1e-2, tpu_rtol=1e-2
)
# Verify the gradient is sparse
if backend.backend() == "tensorflow":
import tensorflow as tf
with tf.GradientTape() as g:
outputs = layer(inputs)
self.assertIsInstance(
g.gradient(outputs, layer.kernel), tf.IndexedSlices
)
def test_dense_no_activation(self):
layer = layers.Dense(units=2, use_bias=False, activation=None)
layer.build((1, 2))
layer.set_weights(
[
np.array([[1.0, -2.0], [3.0, -4.0]]),
]
)
inputs = np.array(
[[-1.0, 2.0]],
)
self.assertEqual(layer.bias, None)
self.assertAllClose(layer(inputs), [[5.0, -6.0]])
def test_dense_without_activation_set(self):
layer = layers.Dense(units=2, use_bias=False)
layer.build((1, 2))
layer.set_weights(
[
np.array([[1.0, -2.0], [3.0, -4.0]]),
]
)
layer.activation = None
inputs = np.array(
[[-1.0, 2.0]],
)
self.assertEqual(layer.bias, None)
self.assertAllClose(layer(inputs), [[5.0, -6.0]])
def test_dense_with_activation(self):
layer = layers.Dense(units=2, use_bias=False, activation="relu")
layer.build((1, 2))
layer.set_weights(
[
np.array([[1.0, -2.0], [3.0, -4.0]]),
]
)
inputs = np.array(
[[-1.0, 2.0]],
)
output = layer(inputs)
expected_output = np.array([[5.0, 0.0]])
self.assertAllClose(output, expected_output)
def test_dense_constraints(self):
layer = layers.Dense(units=2, kernel_constraint="non_neg")
layer.build((None, 2))
self.assertIsInstance(layer.kernel.constraint, constraints.NonNeg)
layer = layers.Dense(units=2, bias_constraint="non_neg")
layer.build((None, 2))
self.assertIsInstance(layer.bias.constraint, constraints.NonNeg)
@pytest.mark.requires_trainable_backend
def test_enable_lora(self):
layer = layers.Dense(units=16)
layer.build((None, 8))
layer.enable_lora(4)
self.assertLen(layer.trainable_weights, 3)
self.assertLen(layer.non_trainable_weights, 1)
if backend.backend() == "torch":
self.assertLen(layer.torch_params, 4)
# Try eager call
x = np.random.random((64, 8))
y = np.random.random((64, 16))
_ = layer(x[:2])
init_lora_a_kernel_value = layer.lora_kernel_a.numpy()
init_lora_b_kernel_value = layer.lora_kernel_b.numpy()
# Try calling fit()
model = models.Sequential(
[
layer,
]
)
model.compile(optimizer="sgd", loss="mse")
model.fit(x, y)
final_lora_a_kernel_value = layer.lora_kernel_a.numpy()
final_lora_b_kernel_value = layer.lora_kernel_b.numpy()
diff_a = np.max(
np.abs(init_lora_a_kernel_value - final_lora_a_kernel_value)
)
diff_b = np.max(
np.abs(init_lora_b_kernel_value - final_lora_b_kernel_value)
)
self.assertGreater(diff_a, 0.0)
self.assertGreater(diff_b, 0.0)
# Try saving and reloading the model
temp_filepath = os.path.join(self.get_temp_dir(), "lora_model.keras")
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertTrue(new_model.layers[0].lora_enabled)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try saving and reloading the model's weights only
temp_filepath = os.path.join(
self.get_temp_dir(), "lora_model.weights.h5"
)
model.save_weights(temp_filepath)
# Load the file into a fresh, non-lora model
new_model = models.Sequential(
[
layers.Dense(units=16),
]
)
new_model.build((None, 8))
new_model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try loading a normal checkpoint into a lora model
new_model.save_weights(temp_filepath)
model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
@pytest.mark.requires_trainable_backend
def test_enable_lora_with_alpha(self):
# Create a `Dense` layer and build it.
layer = layers.Dense(units=8)
layer.build((None, 4))
# Enable LoRA with `rank`=2 and `lora_alpha`=3.0.
layer.enable_lora(2, lora_alpha=3.0)
self.assertEqual(layer.lora_rank, 2)
self.assertEqual(layer.lora_alpha, 3.0)
# Manually compute the expected effective kernel:
# `effective_kernel_expected` = `base_kernel` +
# `lora_alpha / lora_rank` * `lora_kernel_a @ lora_kernel_b`
base_kernel = ops.convert_to_numpy(layer._kernel)
lora_update = np.matmul(
ops.convert_to_numpy(layer.lora_kernel_a),
ops.convert_to_numpy(layer.lora_kernel_b),
)
effective_kernel_expected = base_kernel + (3.0 / 2) * lora_update
# Verify that the effective kernel matches expectation.
self.assertAllClose(
ops.convert_to_numpy(layer.kernel), effective_kernel_expected
)
@pytest.mark.requires_trainable_backend
def test_lora_weight_name(self):
class MyModel(models.Model):
def __init__(self):
super().__init__(name="mymodel")
self.dense = layers.Dense(16, name="dense")
def build(self, input_shape):
self.dense.build(input_shape)
def call(self, x):
return self.dense(x)
model = MyModel()
model.build((None, 8))
model.dense.enable_lora(4)
self.assertEqual(
model.dense.lora_kernel_a.path, "mymodel/dense/lora_kernel_a"
)
@pytest.mark.requires_trainable_backend
def test_lora_rank_argument(self):
self.run_layer_test(
layers.Dense,
init_kwargs={
"units": 5,
"activation": "sigmoid",
"kernel_regularizer": "l2",
"lora_rank": 2,
},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 5),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=2, # we have 2 regularizers.
supports_masking=True,
)
def test_enable_lora_with_kernel_constraint(self):
layer = layers.Dense(units=2, kernel_constraint="max_norm")
with self.assertRaisesRegex(
ValueError, "incompatible with kernel constraints"
):
layer.enable_lora(rank=2)
def test_enable_lora_on_unbuilt_layer(self):
layer = layers.Dense(units=2)
with self.assertRaisesRegex(
ValueError, "Cannot enable lora on a layer that isn't yet built"
):
layer.enable_lora(rank=2)
def test_enable_lora_when_already_enabled(self):
layer = layers.Dense(units=2)
layer.build((None, 2))
layer.enable_lora(rank=2)
with self.assertRaisesRegex(ValueError, "lora is already enabled"):
layer.enable_lora(rank=2)
# Test quantization-related methods.
@parameterized.named_parameters(
("int8", "int8", 1e-3),
("int4", "int4", 2e-3),
)
def test_quantize_int(self, mode, error_threshold):
if mode == "int4" and testing.tensorflow_uses_gpu():
self.skipTest("Segfault")
layer = layers.Dense(units=16)
layer.build((None, 8))
x = np.random.random((2, 8))
y_float = layer(x)
layer.quantize(mode)
# Verify the dtype of the weights.
# The kernel's data type is int8, despite the int4 quantization, because
# we pack the int4 values into int8.
self.assertEqual(backend.standardize_dtype(layer._kernel.dtype), "int8")
self.assertEqual(
backend.standardize_dtype(layer.kernel_scale.dtype),
layer.variable_dtype,
)
# Verify the correctness of the outputs.
y_quantized = layer(x)
mse = ops.mean(ops.square(y_float - y_quantized))
self.assertLess(mse, error_threshold) # A weak correctness test
# Check model save / load round-trip.
model = models.Sequential([layer])
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_model.keras"
)
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Check weights-only save / load round-trip.
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_model.weights.h5"
)
model.save_weights(temp_filepath)
new_model = models.Sequential([layers.Dense(units=16)])
new_model.build((None, 8))
new_model.quantize(mode)
new_model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
@parameterized.named_parameters(
("int8", "int8"),
("int4", "int4"),
("float8", "float8"),
)
def test_quantize_on_unbuilt_layer(self, mode):
layer = layers.Dense(units=2)
with self.assertRaisesRegex(
ValueError, "Cannot quantize a layer that isn't yet built."
):
layer.quantize(mode)
@parameterized.named_parameters(
("int8", "int8"),
("int4", "int4"),
("float8", "float8"),
)
def test_quantize_on_subclass(self, mode):
class MyDense(layers.Dense):
pass
layer = MyDense(units=16)
layer.build((None, 8))
with self.assertRaises(NotImplementedError):
layer.quantize(mode)
layer.quantize(mode, type_check=False) # No error
@parameterized.named_parameters(
("int8", "int8"),
("int4", "int4"),
("float8", "float8"),
)
def test_quantize_when_already_quantized(self, mode):
layer = layers.Dense(units=2)
layer.build((None, 2))
layer.quantize(mode)
for m in ["int8", "int4", "float8"]:
with self.assertRaisesRegex(
ValueError, "is already quantized with dtype_policy="
):
layer.quantize(m)
layer = layers.Dense(units=2, dtype=f"{mode}_from_float32")
layer.build((None, 2))
for m in ["int8", "int4", "float8"]:
with self.assertRaisesRegex(
ValueError, "is already quantized with dtype_policy="
):
layer.quantize(m)
@parameterized.named_parameters(
("int8", "int8_from_float32", 3),
("int4", "int4_from_float32", 3), # bias + packed kernel + scale
("float8", "float8_from_float32", 8),
)
@pytest.mark.skipif(testing.tensorflow_uses_gpu(), reason="Segfault")
def test_quantize_by_setting_dtype_policy(
self, policy, expected_num_variables
):
layer = layers.Dense(units=2)
layer.build((None, 2))
layer.dtype_policy = policy
self.assertLen(layer.variables, expected_num_variables)
@parameterized.named_parameters(
("int7", "int7"),
("float7", "float7"),
)
def test_quantize_invalid_mode(self, mode):
layer = layers.Dense(units=2)
layer.build((None, 2))
x = np.random.random((1, 2))
# dtype_policy should not be altered by failed quantization
original_dtype_policy = layer.dtype_policy
# Test quantize
with self.assertRaisesRegex(ValueError, "Invalid quantization mode."):
layer.quantize(mode)
self.assertEqual(layer.dtype_policy, original_dtype_policy)
# Test quantized_build
with self.assertRaisesRegex(
NotImplementedError, "Invalid quantization mode."
):
layer.quantized_build((None, 2), mode)
self.assertEqual(layer.dtype_policy, original_dtype_policy)
# Test quantized_call
with self.assertRaisesRegex(
NotImplementedError, "Invalid quantization mode."
):
# Explicitly set quantization_mode
layer._dtype_policy._quantization_mode = mode
layer.quantized_call(x)
self.assertEqual(layer.dtype_policy, original_dtype_policy)
@parameterized.named_parameters(
("int8", "int8_from_mixed_bfloat16", 1, 2),
("int4", "int4_from_mixed_bfloat16", 1, 2),
("float8", "float8_from_mixed_bfloat16", 8, 0),
)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(testing.tensorflow_uses_gpu(), reason="Segfault")
def test_quantize_dtype_argument(
self, dtype, num_trainable_weights, num_non_trainable_weights
):
self.run_layer_test(
layers.Dense,
init_kwargs={"units": 5, "dtype": dtype},
input_shape=(2, 3, 4),
expected_output_shape=(2, 3, 5),
expected_num_trainable_weights=num_trainable_weights,
expected_num_non_trainable_weights=num_non_trainable_weights,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@parameterized.named_parameters(
("int8", "int8", 3, 2, 5),
("int4", "int4", 3, 2, 5),
)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(testing.tensorflow_uses_gpu(), reason="Segfault")
def test_quantize_lora_integration(
self,
mode,
num_trainable_weights,
num_non_trainable_weights,
num_torch_params,
):
# Note that saving and loading with lora_enabled and quantized are
# lossy, so we use a weak correctness test for model outputs (atol=0.5).
config = dict(units=16)
layer = layers.Dense(**config)
layer.build((None, 8))
layer.enable_lora(4)
layer.quantize(mode)
self.assertLen(layer.trainable_weights, num_trainable_weights)
self.assertLen(layer.non_trainable_weights, num_non_trainable_weights)
if backend.backend() == "torch":
self.assertLen(layer.torch_params, num_torch_params)
# Try calling fit()
init_lora_a_kernel_value = layer.lora_kernel_a.numpy()
init_lora_b_kernel_value = layer.lora_kernel_b.numpy()
x = np.random.random((64, 8))
y = np.random.random((64, 16))
model = models.Sequential([layer])
model.compile(optimizer="sgd", loss="mse")
model.fit(x, y, epochs=2)
final_lora_a_kernel_value = layer.lora_kernel_a.numpy()
final_lora_b_kernel_value = layer.lora_kernel_b.numpy()
diff_a = np.max(
np.abs(init_lora_a_kernel_value - final_lora_a_kernel_value)
)
diff_b = np.max(
np.abs(init_lora_b_kernel_value - final_lora_b_kernel_value)
)
self.assertGreater(diff_a, 0.0)
self.assertGreater(diff_b, 0.0)
# Try saving and reloading the model
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_lora_model.keras"
)
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertTrue(new_model.layers[0].lora_enabled)
self.assertAllClose(model.predict(x), new_model.predict(x), atol=0.5)
# Try saving and reloading the model's weights only
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_lora_model.weights.h5"
)
model.save_weights(temp_filepath)
new_model = models.Sequential([layers.Dense(**config)])
new_model.build((None, 8))
new_model.quantize(mode)
new_model.load_weights(temp_filepath)
self.assertFalse(new_model.layers[0].lora_enabled)
self.assertAllClose(model.predict(x), new_model.predict(x), atol=0.5)
# Try loading a normal checkpoint into a lora model
new_model.save_weights(temp_filepath)
model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x), atol=0.5)
# Test export and TFSMLayer reloading when using tensorflow backend
if backend.backend() == "tensorflow":
import tensorflow as tf
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
ref_input = tf.random.normal((2, 8))
ref_output = model(ref_input)
model.export(temp_filepath, format="tf_saved_model")
reloaded_layer = export.TFSMLayer(temp_filepath)
self.assertAllClose(
reloaded_layer(ref_input), ref_output, atol=1e-7
)
self.assertLen(reloaded_layer.weights, len(model.weights))
self.assertLen(
reloaded_layer.trainable_weights, len(model.trainable_weights)
)
self.assertLen(
reloaded_layer.non_trainable_weights,
len(model.non_trainable_weights),
)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(testing.tensorflow_uses_gpu(), reason="Segfault")
def test_quantize_float8(self):
import ml_dtypes
from keras.src import quantizers
layer = layers.Dense(units=32)
layer.build((None, 16))
layer.quantize("float8")
optimizer = optimizers.AdamW(learning_rate=0.1)
optimizer.build(layer.trainable_variables)
def loss_fn(x, dy):
y = layer(x, training=True)
loss = y * ops.cast(dy, y.dtype)
return ops.sum(loss)
if backend.backend() == "tensorflow":
import tensorflow as tf
@tf.function(jit_compile=True)
def train_one_step(x, dy):
with tf.GradientTape() as tape:
loss = loss_fn(x, dy)
grads = tape.gradient(loss, layer.trainable_variables)
optimizer.apply(grads, layer.trainable_variables)
elif backend.backend() == "jax":
import jax
def stateless_loss_fn(trainable_variables, x, dy):
y = layer.stateless_call(
trainable_variables, [], x, training=True
)[0]
loss = y * ops.cast(dy, y.dtype)
return ops.sum(loss)
grad_fn = jax.jit(jax.grad(stateless_loss_fn))
def train_one_step(x, dy):
trainable_variables = [
v.value for v in layer.trainable_variables
]
optimizer_variables = [v.value for v in optimizer.variables]
grads = grad_fn(trainable_variables, x, dy)
trainable_variables, optimizer_variables = (
optimizer.stateless_apply(
optimizer_variables, grads, trainable_variables
)
)
for variable, value in zip(
layer.trainable_variables, trainable_variables
):
variable.assign(value)
for variable, value in zip(
optimizer.variables, optimizer_variables
):
variable.assign(value)
elif backend.backend() == "torch":
def train_one_step(x, dy):
layer.zero_grad()
loss = loss_fn(x, dy)
loss.backward()
grads = [v.value.grad for v in layer.trainable_variables]
optimizer.apply(grads, layer.trainable_variables)
scale_x, amax_history_x = ops.ones(()), ops.zeros((1024,))
scale_k, amax_history_k = ops.ones(()), ops.zeros((1024,))
scale_g, amax_history_g = ops.ones(()), ops.zeros((1024,))
e4m3_max = ops.cast(
float(ml_dtypes.finfo("float8_e4m3fn").max), "float32"
)
e5m2_max = ops.cast(
float(ml_dtypes.finfo("float8_e5m2").max), "float32"
)
for _ in range(3):
x = random.normal((16, 16), dtype="float32")
g = random.normal((16, 32), dtype="float32")
k = ops.convert_to_tensor(layer._kernel)
# Manually compute the expected amax history and scaling factors.
amax_from_history_x = ops.max(amax_history_x)
amax_from_history_k = ops.max(amax_history_k)
amax_from_history_g = ops.max(amax_history_g)
scale_x = quantizers.compute_float8_scale(
amax_from_history_x, scale_x, e4m3_max
)
scale_k = quantizers.compute_float8_scale(
amax_from_history_k, scale_k, e4m3_max
)
scale_g = quantizers.compute_float8_scale(
amax_from_history_g, scale_g, e5m2_max
)
amax_history_x = quantizers.compute_float8_amax_history(
x, amax_history_x
)
amax_history_k = quantizers.compute_float8_amax_history(
k, amax_history_k
)
amax_history_g = quantizers.compute_float8_amax_history(
g, amax_history_g
)
train_one_step(x, g)
self.assertAllClose(layer.inputs_amax_history, amax_history_x)
self.assertAllClose(layer.kernel_amax_history, amax_history_k)
self.assertAllClose(layer.outputs_grad_amax_history, amax_history_g)
self.assertAllClose(layer.inputs_scale, scale_x)
self.assertAllClose(layer.kernel_scale, scale_k)
self.assertAllClose(layer.outputs_grad_scale, scale_g)
@pytest.mark.requires_trainable_backend
def test_quantize_float8_fitting(self):
config = dict(units=16)
layer = layers.Dense(**config)
layer.build((None, 8))
layer.quantize("float8")
self.assertLen(layer.trainable_weights, 8)
self.assertLen(layer.non_trainable_weights, 0)
# Try calling fit()
x = np.random.random((64, 8))
y = np.random.random((64, 16))
model = models.Sequential([layer])
model.compile(optimizer="sgd", loss="mse")
model.fit(x, y, epochs=2)
# Try saving and reloading the model
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_float8_model.keras"
)
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try saving and reloading the model's weights only
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_float8_model.weights.h5"
)
model.save_weights(temp_filepath)
new_model = models.Sequential([layers.Dense(**config)])
new_model.build((None, 8))
new_model.quantize("float8")
new_model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Test export and TFSMLayer reloading when using tensorflow backend
if backend.backend() == "tensorflow":
import tensorflow as tf
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
ref_input = tf.random.normal((2, 8))
ref_output = model(ref_input)
model.export(temp_filepath, format="tf_saved_model")
reloaded_layer = export.TFSMLayer(temp_filepath)
self.assertAllClose(reloaded_layer(ref_input), ref_output)
self.assertLen(reloaded_layer.weights, len(model.weights))
self.assertLen(
reloaded_layer.trainable_weights, len(model.trainable_weights)
)
self.assertLen(
reloaded_layer.non_trainable_weights,
len(model.non_trainable_weights),
)
def test_quantize_float8_inference(self):
config = dict(units=16)
layer = layers.Dense(**config)
layer.build((None, 8))
layer.quantize("float8")
# Try calling with `training=False` and the result must match
# `training=True` because there is no update.
x = np.random.random((64, 8))
y_inference = layer(x, training=False)
y_training = layer(x, training=True)
self.assertAllClose(y_inference, y_training)
def test_gptq_serialization(self):
"""Test that a GPTQ-quantized layer can be serialized and deserialized
correctly."""
layer = layers.Dense(units=16)
layer.build((None, 8))
layer.quantize(
"gptq",
config=GPTQConfig(
dataset=None, tokenizer=None, weight_bits=4, group_size=8
),
)
config = layer.get_config()
new_layer = layers.Dense.from_config(config)
new_layer.build((None, 8))
self.assertEqual(new_layer.quantization_mode, "gptq")
def test_int4_kernel_returns_unpacked_form(self):
"""Test that the `kernel` property returns the unpacked int4 kernel."""
layer = layers.Dense(units=2)
layer.build((None, 2))
layer.quantize("int4")
packed_kernel = layer._kernel
self.assertAllClose(
layer.kernel, quantizers.unpack_int4(packed_kernel, 2)
)
def test_legacy_load_own_variables(self):
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/embedding_test.py | keras/src/layers/core/embedding_test.py | import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import constraints
from keras.src import export
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import quantizers
from keras.src import saving
from keras.src.quantizers.quantization_config import Int4QuantizationConfig
from keras.src.quantizers.quantization_config import Int8QuantizationConfig
from keras.src.quantizers.quantizers import AbsMaxQuantizer
from keras.src.testing import test_case
class EmbeddingTest(test_case.TestCase):
@parameterized.named_parameters(
("int8", "int8", {"axis": -1}),
(
"int4",
"int4",
{"axis": -1, "value_range": (-8, 7), "output_dtype": "int8"},
),
("int8_custom", "int8", {"axis": -1}),
)
def test_embedding_quantize_config(self, mode, weight_quantizer_args):
"""Test Embedding quantization with QuantizationConfig."""
layer = layers.Embedding(input_dim=10, output_dim=6)
layer.build((None,))
weight_quantizer = AbsMaxQuantizer(**weight_quantizer_args)
if mode == "int8":
config = Int8QuantizationConfig(
weight_quantizer=weight_quantizer, activation_quantizer=None
)
elif mode == "int4":
config = Int4QuantizationConfig(
weight_quantizer=weight_quantizer, activation_quantizer=None
)
layer.quantize(mode, config=config)
# Verify weights are quantized
self.assertEqual(
backend.standardize_dtype(layer._embeddings.dtype), "int8"
)
self.assertTrue(hasattr(layer, "embeddings_scale"))
# Verify call works
x = np.random.randint(0, 10, size=(2, 3))
y = layer(x)
self.assertEqual(y.shape, (2, 3, 6))
@pytest.mark.requires_trainable_backend
def test_embedding_basics(self):
self.run_layer_test(
layers.Embedding,
{"input_dim": 4, "output_dim": 3},
input_shape=(2,),
input_dtype="int32",
expected_output_shape=(2, 3),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.Embedding,
{"input_dim": 5, "output_dim": 4, "mask_zero": True},
input_shape=(2, 3),
input_dtype="int64",
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_sparse(self):
self.run_layer_test(
layers.Embedding,
{"input_dim": 5, "output_dim": 4},
input_shape=(2, 3),
input_dtype="int32",
input_sparse=True,
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
@pytest.mark.skipif(
not backend.SUPPORTS_RAGGED_TENSORS,
reason="Backend does not support ragged tensors.",
)
def test_ragged(self):
self.run_layer_test(
layers.Embedding,
{"input_dim": 5, "output_dim": 4},
input_shape=(2, 3),
input_dtype="int32",
input_ragged=True,
expected_output_shape=(2, None, 4),
expected_output_ragged=True,
expected_num_trainable_weights=1,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
# run_training_check=False,
)
def test_correctness(self):
layer = layers.Embedding(input_dim=3, output_dim=2)
layer.build()
layer.embeddings.assign(np.array([[0.0, 0.0], [2.0, 2.0], [3.0, 3.0]]))
out = layer(np.array([2, 1, 0]))
self.assertAllClose(out, np.array([[3.0, 3.0], [2.0, 2.0], [0.0, 0.0]]))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_correctness_sparse(self):
layer = layers.Embedding(input_dim=3, output_dim=2)
layer.build()
layer.embeddings.assign(np.array([[0.0, 0.0], [2.0, 2.0], [3.0, 3.0]]))
if backend.backend() == "tensorflow":
import tensorflow as tf
x = tf.SparseTensor([[0, 0], [1, 2]], [2, 1], (2, 3))
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
x = jax_sparse.BCOO(([2, 1], [[0, 0], [1, 2]]), shape=(2, 3))
else:
self.fail(f"Sparse is unsupported with backend {backend.backend()}")
self.assertAllClose(
layer(x),
np.array(
[
[[3.0, 3.0], [0.0, 0.0], [0.0, 0.0]],
[[0.0, 0.0], [0.0, 0.0], [2.0, 2.0]],
]
),
)
def test_masking(self):
layer = layers.Embedding(input_dim=3, output_dim=2, mask_zero=True)
layer.build()
out = layer.compute_mask(np.array(([2, 1, 0])))
self.assertAllClose(out, np.array([True, True, False]))
def test_compute_mask_no_masking(self):
layer = layers.Embedding(input_dim=3, output_dim=2, mask_zero=False)
input_data = np.array([2, 1, 0])
mask = layer.compute_mask(input_data)
self.assertIsNone(mask)
def test_embedding_constraints(self):
layer = layers.Embedding(3, 2, embeddings_constraint="non_neg")
layer.build((None, 2))
self.assertIsInstance(layer.embeddings.constraint, constraints.NonNeg)
def test_weights_constructor_arg(self):
layer = layers.Embedding(3, 4, weights=np.ones((3, 4)))
self.assertAllClose(layer.embeddings.numpy(), np.ones((3, 4)))
layer = layers.Embedding(3, 4, weights=[np.ones((3, 4))])
self.assertAllClose(layer.embeddings.numpy(), np.ones((3, 4)))
@pytest.mark.requires_trainable_backend
def test_enable_lora(self):
layer = layers.Embedding(10, 16)
layer.build()
layer.enable_lora(4)
self.assertLen(layer.trainable_weights, 2)
self.assertLen(layer.non_trainable_weights, 1)
if backend.backend() == "torch":
self.assertLen(layer.torch_params, 3)
# Try eager call
x = np.random.randint(0, 9, size=(64, 3))
y = np.random.random((64, 3, 16))
_ = layer(x[:2])
init_lora_a_embeddings_value = layer.lora_embeddings_a.numpy()
init_lora_b_embeddings_value = layer.lora_embeddings_b.numpy()
# Try calling fit()
model = models.Sequential(
[
layer,
]
)
model.compile(optimizer="sgd", loss="mse")
model.fit(x, y)
final_lora_a_embeddings_value = layer.lora_embeddings_a.numpy()
final_lora_b_embeddings_value = layer.lora_embeddings_b.numpy()
diff_a = np.max(
np.abs(init_lora_a_embeddings_value - final_lora_a_embeddings_value)
)
diff_b = np.max(
np.abs(init_lora_b_embeddings_value - final_lora_b_embeddings_value)
)
self.assertGreater(diff_a, 0.0)
self.assertGreater(diff_b, 0.0)
# Try saving and reloading the model
temp_filepath = os.path.join(self.get_temp_dir(), "lora_model.keras")
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertTrue(new_model.layers[0].lora_enabled)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try saving and reloading the model's weights only
temp_filepath = os.path.join(
self.get_temp_dir(), "lora_model.weights.h5"
)
model.save_weights(temp_filepath)
# Load the file into a fresh, non-lora model
new_model = models.Sequential(
[
layers.Input((3,), dtype="int32"),
layers.Embedding(10, 16),
]
)
new_model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Try loading a normal checkpoint into a lora model
new_model.save_weights(temp_filepath)
model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
@pytest.mark.requires_trainable_backend
def test_enable_lora_with_alpha(self):
# Create an `Embedding` layer without specifying `lora_rank`
layer = layers.Embedding(input_dim=3, output_dim=2)
layer.build((None,)) # Build the layer
# Set the base embeddings to known values.
base_emb = np.array(
[[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]], dtype=np.float32
)
layer.embeddings.assign(base_emb)
# Enable LoRA with a custom alpha: `rank`=2, `lora_alpha`=3.0.
layer.enable_lora(2, lora_alpha=3.0)
self.assertEqual(layer.lora_rank, 2)
self.assertEqual(layer.lora_alpha, 3.0)
# Manually assign known values to lora weights.
a_val = np.array([[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], dtype=np.float32)
b_val = np.array([[0.5, 0.5], [0.6, 0.6]], dtype=np.float32)
layer.lora_embeddings_a.assign(a_val)
layer.lora_embeddings_b.assign(b_val)
# Compute the expected delta.
# Scaling factor: (3.0 / 2) = 1.5
effective_delta = 1.5 * np.matmul(a_val, b_val)
expected_embeddings = base_emb + effective_delta
# Verify that the effective embeddings match expectation.
actual_embeddings = ops.convert_to_numpy(layer.embeddings)
self.assertAllClose(
actual_embeddings, expected_embeddings, tpu_atol=1e-3, tpu_rtol=1e-3
)
@pytest.mark.requires_trainable_backend
def test_lora_rank_argument(self):
self.run_layer_test(
layers.Embedding,
init_kwargs={"input_dim": 5, "output_dim": 4, "lora_rank": 2},
input_shape=(2, 3),
input_dtype="int32",
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=2,
expected_num_non_trainable_weights=1,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
def test_enable_lora_with_embeddings_constraint(self):
layer = layers.Embedding(
input_dim=10, output_dim=16, embeddings_constraint="max_norm"
)
with self.assertRaisesRegex(
ValueError, "incompatible with embedding constraints"
):
layer.enable_lora(rank=2)
def test_enable_lora_when_already_enabled(self):
layer = layers.Embedding(input_dim=10, output_dim=16)
layer.build()
layer.enable_lora(rank=2)
with self.assertRaisesRegex(ValueError, "lora is already enabled"):
layer.enable_lora(rank=2)
# Test quantization-related methods.
@parameterized.named_parameters(
("int8", "int8"),
("int4", "int4"),
)
def test_quantize_int(self, mode):
layer = layers.Embedding(10, 16)
layer.build()
x = np.random.randint(0, 9, size=(64, 3))
y_float = layer(x)
layer.quantize(mode)
# Verify the dtype of the weights.
# The embeddings's dtype is int8, despite the int4 quantization, because
# we pack the int4 values into int8.
self.assertEqual(
backend.standardize_dtype(layer._embeddings.dtype), "int8"
)
self.assertEqual(
backend.standardize_dtype(layer.embeddings_scale.dtype),
layer.variable_dtype,
)
# Verify the unpacked embeddings for int4 quantization.
if mode == "int4":
self.assertAllClose(
layer.embeddings,
quantizers.unpack_int4(
layer._embeddings, layer.output_dim, axis=-1
),
)
# Verify the correctness of the outputs.
y_quantized = layer(x)
mse = ops.mean(ops.square(y_float - y_quantized))
self.assertLess(mse, 1e-3) # A weak correctness test
# Check model save / load round-trip.
model = models.Sequential([layer])
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_model.keras"
)
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
# Check weights-only save / load round-trip.
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_model.weights.h5"
)
model.save_weights(temp_filepath)
new_model = models.Sequential([layers.Embedding(10, 16)])
new_model.build((None, 3))
new_model.quantize(mode)
new_model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x))
@parameterized.named_parameters(
("int8", "int8"),
("int4", "int4"),
)
def test_quantize_on_unbuilt_layer(self, mode):
layer = layers.Embedding(10, 16)
with self.assertRaisesRegex(
ValueError, "Cannot quantize a layer that isn't yet built."
):
layer.quantize(mode)
@parameterized.named_parameters(
("int8", "int8"),
("int4", "int4"),
)
def test_quantize_on_subclass(self, mode):
class MyEmbedding(layers.Embedding):
pass
layer = MyEmbedding(10, 16)
layer.build()
with self.assertRaises(NotImplementedError):
layer.quantize(mode)
layer.quantize(mode, type_check=False) # No error
@parameterized.named_parameters(
("int8", "int8"),
("int4", "int4"),
)
def test_quantize_when_already_quantized(self, mode):
layer = layers.Embedding(10, 16)
layer.build()
layer.quantize(mode)
for m in ("int8", "int4"):
with self.assertRaisesRegex(
ValueError, "is already quantized with dtype_policy="
):
layer.quantize(m)
layer = layers.Embedding(10, 16, dtype=f"{mode}_from_float32")
layer.build()
for m in ("int8", "int4"):
with self.assertRaisesRegex(
ValueError, "is already quantized with dtype_policy="
):
layer.quantize(m)
@parameterized.named_parameters(
("int8", "int8_from_float32", 2),
("int4", "int4_from_float32", 2),
)
def test_quantize_by_setting_dtype_policy(
self, policy, expected_num_variables
):
layer = layers.Embedding(10, 16)
layer.build()
layer.dtype_policy = policy
self.assertLen(layer.variables, expected_num_variables)
@parameterized.named_parameters(
("int7", "int7"),
("float7", "float7"),
)
def test_quantize_invalid_mode(self, mode):
layer = layers.Embedding(10, 16)
layer.build()
x = np.random.randint(0, 9, size=(1, 3))
# dtype_policy should not be altered by failed quantization
original_dtype_policy = layer.dtype_policy
# Test quantize
with self.assertRaisesRegex(ValueError, "Invalid quantization mode."):
layer.quantize(mode)
self.assertEqual(layer.dtype_policy, original_dtype_policy)
# Test quantized_build
with self.assertRaisesRegex(
NotImplementedError, "Invalid quantization mode."
):
layer.quantized_build((None, 2), mode)
self.assertEqual(layer.dtype_policy, original_dtype_policy)
# Test quantized_call
with self.assertRaisesRegex(
NotImplementedError, "Invalid quantization mode."
):
# Explicitly set quantization_mode
layer._dtype_policy._quantization_mode = mode
layer.quantized_call(x)
self.assertEqual(layer.dtype_policy, original_dtype_policy)
@parameterized.named_parameters(
("int8", "int8_from_mixed_bfloat16", 0, 2),
("int4", "int4_from_mixed_bfloat16", 0, 2),
)
@pytest.mark.requires_trainable_backend
def test_quantize_dtype_argument(
self, dtype, num_trainable_weights, num_non_trainable_weights
):
self.run_layer_test(
layers.Embedding,
{"input_dim": 4, "output_dim": 3, "dtype": dtype},
input_shape=(2,),
input_dtype="int32",
expected_output_shape=(2, 3),
expected_num_trainable_weights=num_trainable_weights,
expected_num_non_trainable_weights=num_non_trainable_weights,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=False,
)
self.run_layer_test(
layers.Embedding,
{
"input_dim": 5,
"output_dim": 4,
"mask_zero": True,
"dtype": dtype,
},
input_shape=(2, 3),
input_dtype="int64",
expected_output_shape=(2, 3, 4),
expected_num_trainable_weights=num_trainable_weights,
expected_num_non_trainable_weights=num_non_trainable_weights,
expected_num_seed_generators=0,
expected_num_losses=0,
supports_masking=True,
)
@parameterized.named_parameters(
("int8", "int8", 2, 2, 4),
("int4", "int4", 2, 2, 4),
)
@pytest.mark.requires_trainable_backend
def test_quantize_lora_integration(
self,
mode,
num_trainable_weights,
num_non_trainable_weights,
num_torch_params,
):
layer = layers.Embedding(10, 16)
layer.build()
layer.enable_lora(4)
layer.quantize(mode)
self.assertLen(layer.trainable_weights, num_trainable_weights)
self.assertLen(layer.non_trainable_weights, num_non_trainable_weights)
if backend.backend() == "torch":
self.assertLen(layer.torch_params, num_torch_params)
# Try calling fit()
init_lora_a_embeddings_value = layer.lora_embeddings_a.numpy()
init_lora_b_embeddings_value = layer.lora_embeddings_b.numpy()
x = np.random.randint(0, 9, size=(64, 3))
y = np.random.random((64, 3, 16))
model = models.Sequential([layer])
model.compile(optimizer="sgd", loss="mse")
model.fit(x, y)
final_lora_a_embeddings_value = layer.lora_embeddings_a.numpy()
final_lora_b_embeddings_value = layer.lora_embeddings_b.numpy()
diff_a = np.max(
np.abs(init_lora_a_embeddings_value - final_lora_a_embeddings_value)
)
diff_b = np.max(
np.abs(init_lora_b_embeddings_value - final_lora_b_embeddings_value)
)
self.assertGreater(diff_a, 0.0)
self.assertGreater(diff_b, 0.0)
# Try saving and reloading the model
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_lora_model.keras"
)
model.save(temp_filepath)
new_model = saving.load_model(temp_filepath)
self.assertTrue(new_model.layers[0].lora_enabled)
self.assertAllClose(model.predict(x), new_model.predict(x), atol=0.5)
# Try saving and reloading the model's weights only
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_lora_model.weights.h5"
)
model.save_weights(temp_filepath)
new_model = models.Sequential(
[layers.Input((3,), dtype="int32"), layers.Embedding(10, 16)]
)
new_model.quantize(mode)
new_model.load_weights(temp_filepath)
self.assertFalse(new_model.layers[0].lora_enabled)
self.assertAllClose(model.predict(x), new_model.predict(x), atol=0.5)
# Try loading a normal checkpoint into a lora model
new_model.save_weights(temp_filepath)
model.load_weights(temp_filepath)
self.assertAllClose(model.predict(x), new_model.predict(x), atol=0.5)
# Test export and TFSMLayer reloading when using tensorflow backend
if backend.backend() == "tensorflow":
import tensorflow as tf
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
ref_input = tf.random.normal((32, 3))
ref_output = model(ref_input)
model.export(temp_filepath, format="tf_saved_model")
reloaded_layer = export.TFSMLayer(temp_filepath)
self.assertAllClose(
reloaded_layer(ref_input), ref_output, atol=1e-7
)
self.assertLen(reloaded_layer.weights, len(model.weights))
self.assertLen(
reloaded_layer.trainable_weights, len(model.trainable_weights)
)
self.assertLen(
reloaded_layer.non_trainable_weights,
len(model.non_trainable_weights),
)
def test_legacy_load_own_variables(self):
# In previous versions, `load_own_variables` accepted a store with
# numeric keys.
float32_store = {
"0": np.random.random((10, 16)).astype("float32"),
}
int8_store = {
"0": np.random.randint(-128, 127, size=(10, 16), dtype="int8"),
"1": np.random.random((10,)).astype("float32"),
}
int4_store = {
"0": np.random.randint(-128, 127, size=(10, 8), dtype="int8"),
"1": np.random.random((10,)).astype("float32"),
}
# Test float32 layer.
layer = layers.Embedding(10, 16)
layer.build()
layer.load_own_variables(float32_store)
self.assertAllClose(layer._embeddings, float32_store["0"])
# Test int8-quantized layer.
layer = layers.Embedding(10, 16, dtype="int8_from_float32")
layer.build()
layer.load_own_variables(int8_store)
self.assertAllClose(layer._embeddings, int8_store["0"])
self.assertAllClose(layer.embeddings_scale, int8_store["1"])
# Test int4-quantized layer.
layer = layers.Embedding(10, 16, dtype="int4_from_float32")
layer.build()
layer.load_own_variables(int4_store)
self.assertAllClose(layer._embeddings, int4_store["0"])
self.assertAllClose(layer.embeddings_scale, int4_store["1"])
def test_embedding_int8_custom_quantizer(self):
"""
Test custom quantizer serialization for embedding layer with
int8 quantization.
"""
# Setup
weight_range = (-50, 50)
config = Int8QuantizationConfig(
weight_quantizer=AbsMaxQuantizer(axis=-1, value_range=weight_range),
)
# Build & Quantize
layer = layers.Embedding(input_dim=100, output_dim=16)
layer.build(None)
layer.quantize("int8", config=config)
# Serialize & Deserialize
serialized = layer.get_config()
new_layer = layers.Embedding.from_config(serialized)
# Verify
self.assertIsInstance(
new_layer.quantization_config, Int8QuantizationConfig
)
quantizer = new_layer.quantization_config.weight_quantizer
self.assertIsInstance(quantizer, AbsMaxQuantizer)
self.assertEqual(quantizer.axis, (-1,))
self.assertAllEqual(quantizer.value_range, weight_range)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/core/reversible_embedding.py | keras/src/layers/core/reversible_embedding.py | import copy
from keras.src import dtype_policies
from keras.src import layers
from keras.src import ops
from keras.src import quantizers
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.quantizers.quantization_config import QuantizationConfig
@keras_export("keras.layers.ReversibleEmbedding")
class ReversibleEmbedding(layers.Embedding):
"""An embedding layer which can project backwards to the input dim.
This layer is an extension of `keras.layers.Embedding` for language models.
This layer can be called "in reverse" with `reverse=True`, in which case the
layer will linearly project from `output_dim` back to `input_dim`.
By default, the reverse projection will use the transpose of the
`embeddings` weights to project to `input_dim` (weights are "tied"). If
`tie_weights=False`, the model will use a separate, trainable variable for
reverse projection.
This layer has no bias terms.
Args:
input_dim: Integer. Size of the vocabulary,
i.e. maximum integer index + 1.
output_dim: Integer. Dimension of the dense embedding.
tie_weights: Boolean, whether or not the matrix for embedding and
the matrix for the `reverse` projection should share the same
weights.
embeddings_initializer: Initializer for the `embeddings`
matrix (see `keras.initializers`).
embeddings_regularizer: Regularizer function applied to
the `embeddings` matrix (see `keras.regularizers`).
embeddings_constraint: Constraint function applied to
the `embeddings` matrix (see `keras.constraints`).
mask_zero: Boolean, whether or not the input value 0 is a special
"padding" value that should be masked out.
reverse_dtype: The dtype for the reverse projection computation.
Defaults to the `compute_dtype` of the layer.
logit_soft_cap: If `logit_soft_cap` is set and `reverse=True`, the
output logits will be scaled by
`tanh(logits / logit_soft_cap) * logit_soft_cap`. This narrows the
range of output logits and can improve training.
**kwargs: other keyword arguments passed to `keras.layers.Embedding`,
including `name`, `trainable`, `dtype` etc.
Call arguments:
inputs: The tensor inputs to the layer.
reverse: Boolean. If `True` the layer will perform a linear projection
from `output_dim` to `input_dim`, instead of a normal embedding
call. Default to `False`.
Example:
```python
batch_size = 16
vocab_size = 100
hidden_dim = 32
seq_length = 50
# Generate random inputs.
token_ids = np.random.randint(vocab_size, size=(batch_size, seq_length))
embedding = keras.layers.ReversibleEmbedding(vocab_size, hidden_dim)
# Embed tokens to shape `(batch_size, seq_length, hidden_dim)`.
hidden_states = embedding(token_ids)
# Project hidden states to shape `(batch_size, seq_length, vocab_size)`.
logits = embedding(hidden_states, reverse=True)
```
References:
- [Vaswani et al., 2017](https://arxiv.org/abs/1706.03762)
- [Press and Wolf, 2016](https://arxiv.org/abs/1608.05859)
"""
def __init__(
self,
input_dim,
output_dim,
tie_weights=True,
embeddings_initializer="uniform",
embeddings_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
reverse_dtype=None,
logit_soft_cap=None,
**kwargs,
):
super().__init__(
input_dim,
output_dim,
embeddings_initializer=embeddings_initializer,
embeddings_regularizer=embeddings_regularizer,
embeddings_constraint=embeddings_constraint,
mask_zero=mask_zero,
**kwargs,
)
self.tie_weights = tie_weights
self.reverse_dtype = reverse_dtype
self.logit_soft_cap = logit_soft_cap
def build(self, inputs_shape=None):
super().build(inputs_shape)
if not self.tie_weights and self.quantization_mode not in (
"int8",
"int4",
):
self.reverse_embeddings = self.add_weight(
shape=(self.output_dim, self.input_dim),
initializer=self.embeddings_initializer,
name="reverse_embeddings",
trainable=True,
)
def call(self, inputs, reverse=False):
if not reverse:
return super().call(inputs)
else:
if self.tie_weights:
kernel = ops.transpose(ops.convert_to_tensor(self.embeddings))
else:
kernel = self.reverse_embeddings
if self.reverse_dtype is not None:
inputs = ops.cast(inputs, self.reverse_dtype)
kernel = ops.cast(kernel, self.reverse_dtype)
logits = ops.matmul(inputs, kernel)
# Optionally soft-cap logits.
if self.logit_soft_cap is not None:
soft_cap = self.logit_soft_cap
logits = ops.multiply(
ops.tanh(ops.divide(logits, soft_cap)), soft_cap
)
return logits
def compute_output_shape(self, input_shape, reverse=False):
output_shape = list(input_shape)
if reverse:
output_shape[-1] = self.input_dim
else:
output_shape += [self.output_dim]
return output_shape
def compute_output_spec(self, inputs, reverse=False):
output_shape = list(inputs.shape)
if reverse:
output_shape[-1] = self.input_dim
else:
output_shape += [self.output_dim]
return KerasTensor(output_shape, dtype=self.compute_dtype)
def get_config(self):
config = super().get_config()
config.update(
{
"tie_weights": self.tie_weights,
"reverse_dtype": self.reverse_dtype,
"logit_soft_cap": self.logit_soft_cap,
}
)
return config
@property
def variable_serialization_spec(self):
# Avoid modifying the parent's spec.
_spec = copy.deepcopy(super().variable_serialization_spec)
if not self.tie_weights:
for mode, variable_spec in _spec.items():
variable_spec.append("reverse_embeddings")
if mode in ("int4", "int8"):
variable_spec.append("reverse_embeddings_scale")
return _spec
def quantized_build(self, embeddings_shape, mode, config=None):
if mode == "int8":
self._int8_build(embeddings_shape, config)
elif mode == "int4":
self._int4_build(embeddings_shape, config)
else:
raise self._quantization_mode_error(mode)
self._is_quantized = True
def _int8_build(self, embeddings_shape, config=None):
if embeddings_shape is None:
embeddings_shape = (self.input_dim, self.output_dim)
super()._int8_build(embeddings_shape=embeddings_shape)
self.inputs_quantizer = (
QuantizationConfig.activation_quantizer_or_default(
config, quantizers.AbsMaxQuantizer(axis=-1)
)
)
if not self.tie_weights:
self.reverse_embeddings = self.add_weight(
name="reverse_embeddings",
shape=(self.output_dim, self.input_dim),
initializer="zeros",
dtype="int8",
trainable=False,
)
self.reverse_embeddings_scale = self.add_weight(
name="reverse_embeddings_scale",
shape=(self.input_dim,),
initializer="ones",
trainable=False,
)
def _int4_build(self, embeddings_shape, config=None):
if embeddings_shape is None:
embeddings_shape = (self.input_dim, self.output_dim)
super()._int4_build(embeddings_shape=embeddings_shape, config=config)
self.inputs_quantizer = (
QuantizationConfig.activation_quantizer_or_default(
config, quantizers.AbsMaxQuantizer(axis=-1)
)
)
if not self.tie_weights:
packed_rows = (self.output_dim + 1) // 2 # ceil for odd dims
self.reverse_embeddings = self.add_weight(
name="reverse_embeddings",
shape=(packed_rows, self.input_dim),
initializer="zeros",
dtype="int8",
trainable=False,
)
self.reverse_embeddings_scale = self.add_weight(
name="reverse_embeddings_scale",
shape=(self.input_dim,),
initializer="ones",
trainable=False,
)
def _int8_call(self, inputs, reverse=False):
if not reverse:
return super()._int8_call(inputs)
else:
if self.tie_weights:
kernel = ops.transpose(self._embeddings)
scale = ops.transpose(self.embeddings_scale)
else:
kernel = self.reverse_embeddings
scale = self.reverse_embeddings_scale
if self.inputs_quantizer:
inputs, inputs_scale = self.inputs_quantizer(inputs)
else:
inputs_scale = ops.ones((1,), dtype=self.compute_dtype)
logits = ops.matmul(inputs, kernel)
# De-scale outputs
logits = ops.cast(logits, self.compute_dtype)
logits = ops.divide(logits, ops.multiply(inputs_scale, scale))
# Optionally soft-cap logits.
if self.logit_soft_cap is not None:
soft_cap = self.logit_soft_cap
logits = ops.multiply(
ops.tanh(ops.divide(logits, soft_cap)), soft_cap
)
return logits
def _int4_call(self, inputs, reverse=False):
if not reverse:
return super()._int4_call(inputs)
else:
if self.tie_weights:
embeddings = ops.transpose(self._embeddings)
scale = ops.transpose(self.embeddings_scale)
else:
embeddings = self.reverse_embeddings
scale = self.reverse_embeddings_scale
unpacked_embeddings = quantizers.unpack_int4(
embeddings, self.output_dim, axis=0
)
if self.inputs_quantizer:
inputs, inputs_scale = self.inputs_quantizer(inputs)
else:
inputs_scale = ops.ones((1,), dtype=self.compute_dtype)
logits = ops.matmul(inputs, unpacked_embeddings)
# De-scale outputs
logits = ops.cast(logits, self.compute_dtype)
logits = ops.divide(logits, ops.multiply(inputs_scale, scale))
# Optionally soft-cap logits.
if self.logit_soft_cap is not None:
soft_cap = self.logit_soft_cap
logits = ops.multiply(
ops.tanh(ops.divide(logits, soft_cap)), soft_cap
)
return logits
def quantize(self, mode=None, type_check=True, config=None):
if type_check and type(self) is not ReversibleEmbedding:
raise self._not_implemented_error(self.quantize)
self.quantization_config = config
embeddings_shape = (self.input_dim, self.output_dim)
if mode == "int8":
# Quantize `self._embeddings` to int8 and compute corresponding
# scale.
weight_quantizer = QuantizationConfig.weight_quantizer_or_default(
self.quantization_config, quantizers.AbsMaxQuantizer(axis=-1)
)
embeddings_value, embeddings_scale = weight_quantizer(
self._embeddings, to_numpy=True
)
embeddings_scale = ops.squeeze(embeddings_scale, axis=-1)
del self._embeddings
if not self.tie_weights:
reverse_weight_quantizer = (
QuantizationConfig.weight_quantizer_or_default(
self.quantization_config,
quantizers.AbsMaxQuantizer(axis=0),
)
)
reverse_embeddings_value, reverse_embeddings_scale = (
reverse_weight_quantizer(
self.reverse_embeddings, to_numpy=True
)
)
reverse_embeddings_scale = ops.squeeze(
reverse_embeddings_scale, axis=0
)
del self.reverse_embeddings
self.quantized_build(
embeddings_shape, mode, self.quantization_config
)
self._embeddings.assign(embeddings_value)
self.embeddings_scale.assign(embeddings_scale)
if not self.tie_weights:
self.reverse_embeddings.assign(reverse_embeddings_value)
self.reverse_embeddings_scale.assign(reverse_embeddings_scale)
elif mode == "int4":
# Quantize to int4 values (stored in int8 dtype, range [-8, 7]).
weight_quantizer = QuantizationConfig.weight_quantizer_or_default(
self.quantization_config,
quantizers.AbsMaxQuantizer(
axis=-1,
value_range=(-8, 7),
output_dtype="int8",
),
)
embeddings_value, embeddings_scale = weight_quantizer(
self._embeddings, to_numpy=True
)
embeddings_scale = ops.squeeze(embeddings_scale, axis=-1)
# 2. Pack two int4 values into a single int8 byte.
packed_embeddings_value, _, _ = quantizers.pack_int4(
embeddings_value, axis=-1
)
del self._embeddings
if not self.tie_weights:
reverse_weight_quantizer = (
QuantizationConfig.weight_quantizer_or_default(
self.quantization_config,
quantizers.AbsMaxQuantizer(
axis=0,
value_range=(-8, 7),
output_dtype="int8",
),
)
)
reverse_embeddings_value, reverse_embeddings_scale = (
reverse_weight_quantizer(
self.reverse_embeddings, to_numpy=True
)
)
reverse_embeddings_scale = ops.squeeze(
reverse_embeddings_scale, axis=0
)
# Pack two int4 values into a single int8 byte.
packed_reverse_embeddings_value, _, _ = quantizers.pack_int4(
reverse_embeddings_value, axis=0
)
del self.reverse_embeddings
self.quantized_build(
embeddings_shape, mode, self.quantization_config
)
self._embeddings.assign(packed_embeddings_value)
self.embeddings_scale.assign(embeddings_scale)
if not self.tie_weights:
self.reverse_embeddings.assign(packed_reverse_embeddings_value)
self.reverse_embeddings_scale.assign(reverse_embeddings_scale)
else:
raise self._quantization_mode_error(mode)
# Set new dtype policy.
if self.dtype_policy.quantization_mode is None:
policy = dtype_policies.get(f"{mode}_from_{self.dtype_policy.name}")
self.dtype_policy = policy
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/regularization/gaussian_noise.py | keras/src/layers/regularization/gaussian_noise.py | from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.layers.GaussianNoise")
class GaussianNoise(layers.Layer):
"""Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting
(you could see it as a form of random data augmentation).
Gaussian Noise (GS) is a natural choice as corruption process
for real valued inputs.
As it is a regularization layer, it is only active at training time.
Args:
stddev: Float, standard deviation of the noise distribution.
seed: Integer, optional random seed to enable deterministic behavior.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding noise) or in inference mode (doing nothing).
"""
def __init__(self, stddev, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= stddev <= 1:
raise ValueError(
f"Invalid value received for argument "
"`stddev`. Expected a float value between 0 and 1. "
f"Received: stddev={stddev}"
)
self.stddev = stddev
self.seed = seed
if stddev > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self._build_at_init()
def call(self, inputs, training=False):
if training and self.stddev > 0:
return inputs + backend.random.normal(
shape=ops.shape(inputs),
mean=0.0,
stddev=self.stddev,
dtype=self.compute_dtype,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"stddev": self.stddev,
"seed": self.seed,
}
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/regularization/alpha_dropout.py | keras/src/layers/regularization/alpha_dropout.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.AlphaDropout")
class AlphaDropout(Layer):
"""Applies Alpha Dropout to the input.
Alpha Dropout is a `Dropout` that keeps mean and variance of inputs
to their original values, in order to ensure the self-normalizing property
even after this dropout.
Alpha Dropout fits well to Scaled Exponential Linear Units (SELU) by
randomly setting activations to the negative saturation value.
Args:
rate: Float between 0 and 1. The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
noise_shape: 1D integer tensor representing the shape of the
binary alpha dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the alpha dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding alpha dropout) or in inference mode
(doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
self.noise_shape = noise_shape
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self._build_at_init()
def call(self, inputs, training=False):
if training and self.rate > 0:
noise_shape = self._get_concrete_noise_shape(
inputs, self.noise_shape
)
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
alpha_p = -alpha * scale
kept_idx = ops.greater_equal(
ops.random.uniform(noise_shape, seed=self.seed_generator),
self.rate,
)
kept_idx = ops.cast(kept_idx, inputs.dtype)
# Compute affine transformation parameters
a = ((1 - self.rate) * (1 + self.rate * alpha_p**2)) ** -0.5
b = -a * alpha_p * self.rate
# Apply mask
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
return a * x + b
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def _get_concrete_noise_shape(self, inputs, noise_shape):
if noise_shape is None:
return ops.shape(inputs)
concrete_inputs_shape = ops.shape(inputs)
concrete_noise_shape = []
for i, value in enumerate(noise_shape):
concrete_noise_shape.append(
concrete_inputs_shape[i] if value is None else value
)
return concrete_noise_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
"noise_shape": self.noise_shape,
}
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/regularization/activity_regularization_test.py | keras/src/layers/regularization/activity_regularization_test.py | import numpy as np
import pytest
from keras.src import layers
from keras.src.testing import test_case
class ActivityRegularizationTest(test_case.TestCase):
def test_correctness(self):
layer = layers.ActivityRegularization(l1=0.2, l2=0.3)
layer(2 * np.ones((1,)))
self.assertLen(layer.losses, 1)
self.assertAllClose(layer.losses[0], 4 * 0.3 + 2 * 0.2)
@pytest.mark.requires_trainable_backend
def test_activity_regularization_basics(self):
self.run_layer_test(
layers.ActivityRegularization,
{"l1": 0.1, "l2": 0.2},
input_shape=(2, 3),
input_dtype="float32",
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=1,
supports_masking=True,
assert_built_after_instantiation=True,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/regularization/alpha_dropout_test.py | keras/src/layers/regularization/alpha_dropout_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
class AlphaDropoutTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_alpha_dropout_basics(self):
self.run_layer_test(
layers.AlphaDropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_alpha_dropout_correctness(self):
inputs = np.ones((20, 500)).astype("float32")
layer = layers.AlphaDropout(0.3, seed=1337)
outputs = layer(inputs, training=True)
self.assertAllClose(
np.std(backend.convert_to_numpy(outputs)), 1.0, atol=1e-1
)
def test_alpha_dropout_partial_noise_shape_dynamic(self):
inputs = np.ones((20, 5, 10))
layer = layers.AlphaDropout(0.5, noise_shape=(None, 1, None))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_alpha_dropout_partial_noise_shape_static(self):
inputs = np.ones((20, 5, 10))
layer = layers.AlphaDropout(0.5, noise_shape=(20, 1, 10))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_alpha_dropout_negative_rate(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.AlphaDropout(rate=-0.5)
def test_alpha_dropout_rate_greater_than_one(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.AlphaDropout(rate=1.5)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/regularization/dropout.py | keras/src/layers/regularization/dropout.py | from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.Dropout")
class Dropout(Layer):
"""Applies dropout to the input.
The `Dropout` layer randomly sets input units to 0 with a frequency of
`rate` at each step during training time, which helps prevent overfitting.
Inputs not set to 0 are scaled up by `1 / (1 - rate)` such that the sum over
all inputs is unchanged.
Note that the `Dropout` layer only applies when `training` is set to `True`
in `call()`, such that no values are dropped during inference.
When using `model.fit`, `training` will be appropriately set to `True`
automatically. In other contexts, you can set the argument explicitly
to `True` when calling the layer.
(This is in contrast to setting `trainable=False` for a `Dropout` layer.
`trainable` does not affect the layer's behavior, as `Dropout` does
not have any variables/weights that can be frozen during training.)
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
self.noise_shape = self._validate_noise_shape(noise_shape)
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self._build_at_init()
def _validate_noise_shape(self, noise_shape):
if noise_shape is None:
return None
if isinstance(noise_shape, str):
raise ValueError(
f"Invalid value received for argument `noise_shape`. "
f"Expected a tuple or list of integers. "
f"Received: noise_shape={noise_shape}"
)
if not isinstance(noise_shape, tuple):
try:
noise_shape = tuple(noise_shape)
except TypeError:
raise ValueError(
f"Invalid value received for argument `noise_shape`. "
f"Expected an iterable of integers "
f"(e.g., a tuple or list). "
f"Received: noise_shape={noise_shape}"
)
for i, dim in enumerate(noise_shape):
if dim is not None:
if not isinstance(dim, int):
raise ValueError(
f"Invalid value received for argument `noise_shape`. "
f"Expected all elements to be integers or None. "
f"Received element at index {i}: {dim} "
f"(type: {type(dim).__name__})"
)
if dim <= 0:
raise ValueError(
f"Invalid value received for argument `noise_shape`. "
f"Expected all dimensions to be positive integers "
f"or None. "
f"Received negative or zero value at index {i}: {dim}"
)
return noise_shape
def call(self, inputs, training=False):
if training and self.rate > 0:
return backend.random.dropout(
inputs,
self.rate,
noise_shape=self.noise_shape,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
"noise_shape": self.noise_shape,
}
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/regularization/activity_regularization.py | keras/src/layers/regularization/activity_regularization.py | from keras.src import regularizers
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
@keras_export("keras.layers.ActivityRegularization")
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Args:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0.0, l2=0.0, **kwargs):
super().__init__(
activity_regularizer=regularizers.L1L2(l1=l1, l2=l2), **kwargs
)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
self._build_at_init()
def call(self, inputs):
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
base_config.pop("activity_regularizer", None)
config = {"l1": self.l1, "l2": self.l2}
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/regularization/spatial_dropout_test.py | keras/src/layers/regularization/spatial_dropout_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src.testing import test_case
class SpatialDropoutTest(test_case.TestCase):
@pytest.mark.requires_trainable_backend
def test_spatial_dropout_1d(self):
self.run_layer_test(
layers.SpatialDropout1D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": True},
input_shape=(2, 3, 4),
assert_built_after_instantiation=True,
)
self.run_layer_test(
layers.SpatialDropout1D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": False},
input_shape=(2, 3, 4),
assert_built_after_instantiation=True,
)
@pytest.mark.requires_trainable_backend
def test_spatial_dropout_2d(self):
self.run_layer_test(
layers.SpatialDropout2D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 5),
assert_built_after_instantiation=True,
)
self.run_layer_test(
layers.SpatialDropout2D,
init_kwargs={"rate": 0.5, "data_format": "channels_first"},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 5),
assert_built_after_instantiation=True,
)
@pytest.mark.requires_trainable_backend
def test_spatial_dropout_3d(self):
self.run_layer_test(
layers.SpatialDropout3D,
init_kwargs={"rate": 0.5},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 4, 5),
assert_built_after_instantiation=True,
)
self.run_layer_test(
layers.SpatialDropout3D,
init_kwargs={"rate": 0.5, "data_format": "channels_first"},
call_kwargs={"training": True},
input_shape=(2, 3, 4, 4, 5),
assert_built_after_instantiation=True,
)
def test_spatial_dropout_1D_dynamic(self):
inputs = layers.Input((3, 2))
layer = layers.SpatialDropout1D(0.5)
layer(inputs, training=True)
def test_spatial_dropout_1D_correctness(self):
inputs = np.ones((10, 3, 10))
layer = layers.SpatialDropout1D(0.5)
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_spatial_dropout_2D_dynamic(self):
inputs = layers.Input((3, 2, 4))
layer = layers.SpatialDropout2D(0.5)
layer(inputs, training=True)
def test_spatial_dropout_2D_correctness(self):
if backend.config.image_data_format() == "channels_last":
inputs = np.ones((10, 3, 3, 10))
else:
inputs = np.ones((10, 10, 3, 3))
layer = layers.SpatialDropout2D(0.5)
outputs = layer(inputs, training=True)
if backend.config.image_data_format() == "channels_last":
self.assertAllClose(outputs[:, 0, 0, :], outputs[:, 1, 1, :])
else:
self.assertAllClose(outputs[:, :, 0, 0], outputs[:, :, 1, 1])
def test_spatial_dropout_3D_dynamic(self):
inputs = layers.Input((3, 2, 4, 2))
layer = layers.SpatialDropout3D(0.5)
layer(inputs, training=True)
def test_spatial_dropout_3D_correctness(self):
if backend.config.image_data_format() == "channels_last":
inputs = np.ones((10, 3, 3, 3, 10))
else:
inputs = np.ones((10, 10, 3, 3, 3))
layer = layers.SpatialDropout3D(0.5)
outputs = layer(inputs, training=True)
if backend.config.image_data_format() == "channels_last":
self.assertAllClose(outputs[:, 0, 0, 0, :], outputs[:, 1, 1, 1, :])
else:
self.assertAllClose(outputs[:, :, 0, 0, 0], outputs[:, :, 1, 1, 1])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/regularization/gaussian_noise_test.py | keras/src/layers/regularization/gaussian_noise_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
class GaussianNoiseTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_gaussian_noise_basics(self):
self.run_layer_test(
layers.GaussianNoise,
init_kwargs={
"stddev": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_gaussian_noise_correctness(self):
inputs = np.ones((20, 500))
layer = layers.GaussianNoise(0.3, seed=1337)
outputs = layer(inputs, training=True)
self.assertAllClose(
np.std(backend.convert_to_numpy(outputs)), 0.3, atol=0.02
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/regularization/__init__.py | keras/src/layers/regularization/__init__.py | python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false | |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/regularization/dropout_test.py | keras/src/layers/regularization/dropout_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
class DropoutTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_dropout_basics(self):
self.run_layer_test(
layers.Dropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_dropout_rescaling(self):
inputs = np.ones((20, 500))
layer = layers.Dropout(0.5, seed=1337)
outputs = layer(inputs, training=True)
outputs = backend.convert_to_numpy(outputs)
self.assertAllClose(np.mean(outputs), 1.0, atol=0.02)
self.assertAllClose(np.max(outputs), 2.0)
def test_dropout_partial_noise_shape_dynamic(self):
inputs = np.ones((20, 5, 10))
layer = layers.Dropout(0.5, noise_shape=(None, 1, None))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_dropout_partial_noise_shape_static(self):
inputs = np.ones((20, 5, 10))
layer = layers.Dropout(0.5, noise_shape=(20, 1, 10))
outputs = layer(inputs, training=True)
self.assertAllClose(outputs[:, 0, :], outputs[:, 1, :])
def test_dropout_negative_rate(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.Dropout(rate=-0.5)
def test_dropout_rate_greater_than_one(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `rate`. "
"Expected a float value between 0 and 1.",
):
_ = layers.Dropout(rate=1.5)
def test_validate_noise_shape_none(self):
layer = layers.Dropout(0.5, noise_shape=None)
self.assertIsNone(layer.noise_shape)
def test_validate_noise_shape_integer_tuple(self):
layer = layers.Dropout(0.5, noise_shape=(20, 1, 10))
self.assertEqual(layer.noise_shape, (20, 1, 10))
def test_validate_noise_shape_none_values(self):
layer = layers.Dropout(0.5, noise_shape=(None, 1, None))
self.assertEqual(layer.noise_shape, (None, 1, None))
def test_validate_noise_shape_cast_to_a_tuple(self):
layer = layers.Dropout(0.5, noise_shape=[20, 1, 10])
self.assertEqual(layer.noise_shape, (20, 1, 10))
self.assertIsInstance(layer.noise_shape, tuple)
def test_validate_noise_shape_non_iterable(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `noise_shape`. "
"Expected a tuple or list of integers.",
):
layers.Dropout(0.5, noise_shape="Invalid")
def test_validate_noise_shape_invalid_type(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `noise_shape`. "
"Expected all elements to be integers or None.",
):
layers.Dropout(0.5, noise_shape=(20, 1.5, 10))
def test_validate_noise_shape_negative_value(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `noise_shape`. "
"Expected all dimensions to be positive integers or None.",
):
layers.Dropout(0.5, noise_shape=(20, -1, 10))
def test_validate_noise_shape_zero_value(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value received for argument `noise_shape`. "
"Expected all dimensions to be positive integers or None.",
):
layers.Dropout(0.5, noise_shape=(20, 0, 10))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/regularization/gaussian_dropout.py | keras/src/layers/regularization/gaussian_dropout.py | import math
from keras.src import backend
from keras.src import layers
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.layers.GaussianDropout")
class GaussianDropout(layers.Layer):
"""Apply multiplicative 1-centered Gaussian noise.
As it is a regularization layer, it is only active at training time.
Args:
rate: Float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
seed: Integer, optional random seed to enable deterministic behavior.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self._build_at_init()
def call(self, inputs, training=False):
if training and self.rate > 0:
stddev = math.sqrt(self.rate / (1.0 - self.rate))
return inputs * backend.random.normal(
shape=ops.shape(inputs),
mean=1.0,
stddev=stddev,
dtype=self.compute_dtype,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
}
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/regularization/spatial_dropout.py | keras/src/layers/regularization/spatial_dropout.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.regularization.dropout import Dropout
class BaseSpatialDropout(Dropout):
def __init__(self, rate, seed=None, name=None, dtype=None):
super().__init__(rate, seed=seed, name=name, dtype=dtype)
def call(self, inputs, training=False):
if training and self.rate > 0:
return backend.random.dropout(
inputs,
self.rate,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed_generator,
)
return inputs
def get_config(self):
return {
"rate": self.rate,
"seed": self.seed,
"name": self.name,
"dtype": self.dtype,
}
@keras_export("keras.layers.SpatialDropout1D")
class SpatialDropout1D(BaseSpatialDropout):
"""Spatial 1D version of Dropout.
This layer performs the same function as Dropout, however, it drops
entire 1D feature maps instead of individual elements. If adjacent frames
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, `SpatialDropout1D` will help promote independence
between feature maps and should be used instead.
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
Call arguments:
inputs: A 3D tensor.
training: Python boolean indicating whether the layer
should behave in training mode (applying dropout)
or in inference mode (pass-through).
Input shape:
3D tensor with shape: `(samples, timesteps, channels)`
Output shape: Same as input.
Reference:
- [Tompson et al., 2014](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, seed=None, name=None, dtype=None):
super().__init__(rate, seed=seed, name=name, dtype=dtype)
self.input_spec = InputSpec(ndim=3)
def _get_noise_shape(self, inputs):
input_shape = ops.shape(inputs)
return (input_shape[0], 1, input_shape[2])
@keras_export("keras.layers.SpatialDropout2D")
class SpatialDropout2D(BaseSpatialDropout):
"""Spatial 2D version of Dropout.
This version performs the same function as Dropout, however, it drops
entire 2D feature maps instead of individual elements. If adjacent pixels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, `SpatialDropout2D` will help promote independence
between feature maps and should be used instead.
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
data_format: `"channels_first"` or `"channels_last"`.
In `"channels_first"` mode, the channels dimension (the depth)
is at index 1, in `"channels_last"` mode is it at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
Call arguments:
inputs: A 4D tensor.
training: Python boolean indicating whether the layer
should behave in training mode (applying dropout)
or in inference mode (pass-through).
Input shape:
4D tensor with shape: `(samples, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(samples, rows, cols, channels)` if
data_format='channels_last'.
Output shape: Same as input.
Reference:
- [Tompson et al., 2014](https://arxiv.org/abs/1411.4280)
"""
def __init__(
self, rate, data_format=None, seed=None, name=None, dtype=None
):
super().__init__(rate, seed=seed, name=name, dtype=dtype)
self.data_format = backend.standardize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
def _get_noise_shape(self, inputs):
input_shape = ops.shape(inputs)
if self.data_format == "channels_first":
return (input_shape[0], input_shape[1], 1, 1)
elif self.data_format == "channels_last":
return (input_shape[0], 1, 1, input_shape[3])
def get_config(self):
base_config = super().get_config()
config = {
"data_format": self.data_format,
}
return {**base_config, **config}
@keras_export("keras.layers.SpatialDropout3D")
class SpatialDropout3D(BaseSpatialDropout):
"""Spatial 3D version of Dropout.
This version performs the same function as Dropout, however, it drops
entire 3D feature maps instead of individual elements. If adjacent voxels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout3D will help promote independence
between feature maps and should be used instead.
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
data_format: `"channels_first"` or `"channels_last"`.
In `"channels_first"` mode, the channels dimension (the depth)
is at index 1, in `"channels_last"` mode is it at index 4.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
Call arguments:
inputs: A 5D tensor.
training: Python boolean indicating whether the layer
should behave in training mode (applying dropout)
or in inference mode (pass-through).
Input shape:
5D tensor with shape: `(samples, channels, dim1, dim2, dim3)` if
data_format='channels_first'
or 5D tensor with shape: `(samples, dim1, dim2, dim3, channels)` if
data_format='channels_last'.
Output shape: Same as input.
Reference:
- [Tompson et al., 2014](https://arxiv.org/abs/1411.4280)
"""
def __init__(
self, rate, data_format=None, seed=None, name=None, dtype=None
):
super().__init__(rate, seed=seed, name=name, dtype=dtype)
self.data_format = backend.standardize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def _get_noise_shape(self, inputs):
input_shape = ops.shape(inputs)
if self.data_format == "channels_first":
return (input_shape[0], input_shape[1], 1, 1, 1)
elif self.data_format == "channels_last":
return (input_shape[0], 1, 1, 1, input_shape[4])
def get_config(self):
base_config = super().get_config()
config = {
"data_format": self.data_format,
}
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/regularization/gaussian_dropout_test.py | keras/src/layers/regularization/gaussian_dropout_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
class GaussianDropoutTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_gaussian_dropout_basics(self):
self.run_layer_test(
layers.GaussianDropout,
init_kwargs={
"rate": 0.2,
},
input_shape=(2, 3),
call_kwargs={"training": True},
expected_output_shape=(2, 3),
expected_num_trainable_weights=0,
expected_num_non_trainable_weights=0,
expected_num_seed_generators=1,
expected_num_losses=0,
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_gaussian_dropout_correctness(self):
inputs = np.ones((20, 500))
layer = layers.GaussianDropout(0.3, seed=1337)
outputs = layer(inputs, training=True)
self.assertAllClose(
np.std(backend.convert_to_numpy(outputs)),
np.sqrt(0.3 / (1 - 0.3)),
atol=0.02,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/legacy/losses.py | keras/src/legacy/losses.py | from keras.src.api_export import keras_export
@keras_export("keras._legacy.losses.Reduction")
class Reduction:
AUTO = "auto"
NONE = "none"
SUM = "sum"
SUM_OVER_BATCH_SIZE = "sum_over_batch_size"
@classmethod
def all(cls):
return (cls.AUTO, cls.NONE, cls.SUM, cls.SUM_OVER_BATCH_SIZE)
@classmethod
def validate(cls, key):
if key not in cls.all():
raise ValueError(
f'Invalid Reduction Key: {key}. Expected keys are "{cls.all()}"'
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/legacy/__init__.py | keras/src/legacy/__init__.py | python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false | |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/legacy/backend.py | keras/src/legacy/backend.py | """Legacy Keras 1/2 backend functions."""
import itertools
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.utils.module_utils import tensorflow as tf
py_any = any
py_all = all
@keras_export("keras._legacy.backend.abs")
def abs(x):
"""DEPRECATED."""
return tf.abs(x)
@keras_export("keras._legacy.backend.all")
def all(x, axis=None, keepdims=False):
"""DEPRECATED."""
x = tf.cast(x, tf.bool)
return tf.reduce_all(x, axis, keepdims)
@keras_export("keras._legacy.backend.any")
def any(x, axis=None, keepdims=False):
"""DEPRECATED."""
x = tf.cast(x, tf.bool)
return tf.reduce_any(x, axis, keepdims)
@keras_export("keras._legacy.backend.argmax")
def argmax(x, axis=-1):
"""DEPRECATED."""
return tf.argmax(x, axis)
@keras_export("keras._legacy.backend.argmin")
def argmin(x, axis=-1):
"""DEPRECATED."""
return tf.argmin(x, axis)
@keras_export("keras._legacy.backend.arange")
def arange(start, stop=None, step=1, dtype="int32"):
"""DEPRECATED."""
if stop is None and start < 0:
start = 0
result = tf.range(start, limit=stop, delta=step, name="arange")
if dtype != "int32":
result = tf.cast(result, dtype)
return result
@keras_export("keras._legacy.backend.batch_dot")
def batch_dot(x, y, axes=None):
"""DEPRECATED."""
x_shape = x.shape
y_shape = y.shape
x_ndim = len(x_shape)
y_ndim = len(y_shape)
if x_ndim < 2 or y_ndim < 2:
raise ValueError(
"Cannot do batch_dot on inputs "
"with rank < 2. "
f"Received inputs with tf.shapes {x_shape} and {y_shape}."
)
x_batch_size = x_shape[0]
y_batch_size = y_shape[0]
if x_batch_size is not None and y_batch_size is not None:
if x_batch_size != y_batch_size:
raise ValueError(
"Cannot do batch_dot on inputs "
"with different batch sizes. "
"Received inputs with tf.shapes "
f"{x_shape} and {y_shape}."
)
if isinstance(axes, int):
axes = [axes, axes]
if axes is None:
if y_ndim == 2:
axes = [x_ndim - 1, y_ndim - 1]
else:
axes = [x_ndim - 1, y_ndim - 2]
if py_any(isinstance(a, (list, tuple)) for a in axes):
raise ValueError(
"Multiple target dimensions are not supported. "
"Expected: None, int, (int, int), "
f"Provided: {axes}"
)
# if tuple, convert to list.
axes = list(axes)
# convert negative indices.
if axes[0] < 0:
axes[0] += x_ndim
if axes[1] < 0:
axes[1] += y_ndim
# sanity checks
if 0 in axes:
raise ValueError(
"Cannot perform batch_dot over axis 0. "
"If your inputs are not batched, "
"add a dummy batch dimension to your "
"inputs using K.expand_dims(x, 0)"
)
a0, a1 = axes
d1 = x_shape[a0]
d2 = y_shape[a1]
if d1 is not None and d2 is not None and d1 != d2:
raise ValueError(
"Cannot do batch_dot on inputs with tf.shapes "
f"{x_shape} and {y_shape} with axes={axes}. "
"x.shape[%d] != y.shape[%d] (%d != %d)."
% (axes[0], axes[1], d1, d2)
)
# backup ndims. Need them later.
orig_x_ndim = x_ndim
orig_y_ndim = y_ndim
# if rank is 2, expand to 3.
if x_ndim == 2:
x = tf.expand_dims(x, 1)
a0 += 1
x_ndim += 1
if y_ndim == 2:
y = tf.expand_dims(y, 2)
y_ndim += 1
# bring x's dimension to be reduced to last axis.
if a0 != x_ndim - 1:
pattern = list(range(x_ndim))
for i in range(a0, x_ndim - 1):
pattern[i] = pattern[i + 1]
pattern[-1] = a0
x = tf.transpose(x, pattern)
# bring y's dimension to be reduced to axis 1.
if a1 != 1:
pattern = list(range(y_ndim))
for i in range(a1, 1, -1):
pattern[i] = pattern[i - 1]
pattern[1] = a1
y = tf.transpose(y, pattern)
# normalize both inputs to rank 3.
if x_ndim > 3:
# squash middle dimensions of x.
x_shape = tf.shape(x)
x_mid_dims = x_shape[1:-1]
x_squashed_shape = tf.stack([x_shape[0], -1, x_shape[-1]])
x = tf.reshape(x, x_squashed_shape)
x_squashed = True
else:
x_squashed = False
if y_ndim > 3:
# squash trailing dimensions of y.
y_shape = tf.shape(y)
y_trail_dims = y_shape[2:]
y_squashed_shape = tf.stack([y_shape[0], y_shape[1], -1])
y = tf.reshape(y, y_squashed_shape)
y_squashed = True
else:
y_squashed = False
result = tf.matmul(x, y)
# if inputs were squashed, we have to reshape the matmul output.
output_shape = tf.shape(result)
do_reshape = False
if x_squashed:
output_shape = tf.concat(
[output_shape[:1], x_mid_dims, output_shape[-1:]], 0
)
do_reshape = True
if y_squashed:
output_shape = tf.concat([output_shape[:-1], y_trail_dims], 0)
do_reshape = True
if do_reshape:
result = tf.reshape(result, output_shape)
# if the inputs were originally rank 2, we remove the added 1 dim.
if orig_x_ndim == 2:
result = tf.squeeze(result, 1)
elif orig_y_ndim == 2:
result = tf.squeeze(result, -1)
return result
@keras_export("keras._legacy.backend.batch_flatten")
def batch_flatten(x):
"""DEPRECATED."""
x = tf.reshape(x, tf.stack([-1, prod(tf.shape(x)[1:])]))
return x
@keras_export("keras._legacy.backend.batch_get_value")
def batch_get_value(tensors):
"""DEPRECATED."""
return [x.numpy() for x in tensors]
@keras_export("keras._legacy.backend.batch_set_value")
def batch_set_value(tuples):
"""DEPRECATED."""
if tf.executing_eagerly() or tf.inside_function():
for x, value in tuples:
value = np.asarray(value, dtype=x.dtype.name)
x.assign(value)
@keras_export("keras._legacy.backend.batch_normalization")
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
"""DEPRECATED."""
return tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
@keras_export("keras._legacy.backend.bias_add")
def bias_add(x, bias, data_format=None):
"""DEPRECATED."""
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
bias_shape = bias.shape
if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:
raise ValueError(
f"Unexpected bias dimensions {len(bias_shape)}. "
f"Expected it to be 1 or {ndim(x) - 1} dimensions"
)
if len(bias_shape) == 1:
if data_format == "channels_first":
return tf.nn.bias_add(x, bias, data_format="NCHW")
return tf.nn.bias_add(x, bias, data_format="NHWC")
if ndim(x) in (3, 4, 5):
if data_format == "channels_first":
bias_reshape_axis = (1, bias_shape[-1]) + bias_shape[:-1]
return x + reshape(bias, bias_reshape_axis)
return x + reshape(bias, (1,) + bias_shape)
return tf.nn.bias_add(x, bias)
@keras_export("keras._legacy.backend.binary_crossentropy")
def binary_crossentropy(target, output, from_logits=False):
"""DEPRECATED."""
target = tf.convert_to_tensor(target)
output = tf.convert_to_tensor(output)
if from_logits:
return tf.nn.sigmoid_cross_entropy_with_logits(
labels=target, logits=output
)
epsilon_ = tf.convert_to_tensor(backend.epsilon(), output.dtype)
output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_)
# Compute cross entropy from probabilities.
bce = target * tf.math.log(output + backend.epsilon())
bce += (1 - target) * tf.math.log(1 - output + backend.epsilon())
return -bce
@keras_export("keras._legacy.backend.binary_focal_crossentropy")
def binary_focal_crossentropy(
target,
output,
apply_class_balancing=False,
alpha=0.25,
gamma=2.0,
from_logits=False,
):
"""DEPRECATED."""
sigmoidal = tf.sigmoid(output) if from_logits else output
p_t = target * sigmoidal + (1 - target) * (1 - sigmoidal)
# Calculate focal factor
focal_factor = tf.pow(1.0 - p_t, gamma)
# Binary crossentropy
bce = binary_crossentropy(
target=target,
output=output,
from_logits=from_logits,
)
focal_bce = focal_factor * bce
if apply_class_balancing:
weight = target * alpha + (1 - target) * (1 - alpha)
focal_bce = weight * focal_bce
return focal_bce
@keras_export("keras._legacy.backend.cast")
def cast(x, dtype):
"""DEPRECATED."""
return tf.cast(x, dtype)
@keras_export("keras._legacy.backend.cast_to_floatx")
def cast_to_floatx(x):
"""DEPRECATED."""
if isinstance(x, (tf.Tensor, tf.Variable, tf.SparseTensor)):
return tf.cast(x, dtype=backend.floatx())
return np.asarray(x, dtype=backend.floatx())
@keras_export("keras._legacy.backend.categorical_crossentropy")
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""DEPRECATED."""
target = tf.convert_to_tensor(target)
output = tf.convert_to_tensor(output)
target.shape.assert_is_compatible_with(output.shape)
if from_logits:
return tf.nn.softmax_cross_entropy_with_logits(
labels=target, logits=output, axis=axis
)
# Adjust the predictions so that the probability of
# each class for every sample adds up to 1
# This is needed to ensure that the cross entropy is
# computed correctly.
output = output / tf.reduce_sum(output, axis, True)
# Compute cross entropy from probabilities.
epsilon_ = tf.convert_to_tensor(backend.epsilon(), output.dtype)
output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_)
return -tf.reduce_sum(target * tf.math.log(output), axis)
@keras_export("keras._legacy.backend.categorical_focal_crossentropy")
def categorical_focal_crossentropy(
target,
output,
alpha=0.25,
gamma=2.0,
from_logits=False,
axis=-1,
):
"""DEPRECATED."""
target = tf.convert_to_tensor(target)
output = tf.convert_to_tensor(output)
target.shape.assert_is_compatible_with(output.shape)
if from_logits:
output = tf.nn.softmax(output, axis=axis)
# Adjust the predictions so that the probability of
# each class for every sample adds up to 1
# This is needed to ensure that the cross entropy is
# computed correctly.
output = output / tf.reduce_sum(output, axis=axis, keepdims=True)
epsilon_ = tf.convert_to_tensor(backend.epsilon(), output.dtype)
output = tf.clip_by_value(output, epsilon_, 1.0 - epsilon_)
# Calculate cross entropy
cce = -target * tf.math.log(output)
# Calculate factors
modulating_factor = tf.pow(1.0 - output, gamma)
weighting_factor = tf.multiply(modulating_factor, alpha)
# Apply weighting factor
focal_cce = tf.multiply(weighting_factor, cce)
focal_cce = tf.reduce_sum(focal_cce, axis=axis)
return focal_cce
@keras_export("keras._legacy.backend.clip")
def clip(x, min_value, max_value):
"""DEPRECATED."""
if isinstance(min_value, (int, float)) and isinstance(
max_value, (int, float)
):
if max_value < min_value:
max_value = min_value
if min_value is None:
min_value = -np.inf
if max_value is None:
max_value = np.inf
return tf.clip_by_value(x, min_value, max_value)
@keras_export("keras._legacy.backend.concatenate")
def concatenate(tensors, axis=-1):
"""DEPRECATED."""
if axis < 0:
rank = ndim(tensors[0])
if rank:
axis %= rank
else:
axis = 0
if py_all(is_sparse(x) for x in tensors):
return tf.compat.v1.sparse_concat(axis, tensors)
elif py_all(isinstance(x, tf.RaggedTensor) for x in tensors):
return tf.concat(tensors, axis)
else:
return tf.concat([to_dense(x) for x in tensors], axis)
@keras_export("keras._legacy.backend.constant")
def constant(value, dtype=None, shape=None, name=None):
"""DEPRECATED."""
if dtype is None:
dtype = backend.floatx()
return tf.constant(value, dtype=dtype, shape=shape, name=name)
def _preprocess_conv1d_input(x, data_format):
tf_data_format = "NWC" # to pass TF Conv2dNative operations
if data_format == "channels_first":
tf_data_format = "NCW"
return x, tf_data_format
def _preprocess_conv2d_input(x, data_format, force_transpose=False):
tf_data_format = "NHWC"
if data_format == "channels_first":
if force_transpose:
x = tf.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
else:
tf_data_format = "NCHW"
return x, tf_data_format
def _preprocess_conv3d_input(x, data_format):
tf_data_format = "NDHWC"
if data_format == "channels_first":
tf_data_format = "NCDHW"
return x, tf_data_format
def _preprocess_padding(padding):
if padding == "same":
padding = "SAME"
elif padding == "valid":
padding = "VALID"
else:
raise ValueError(f"Invalid padding: {padding}")
return padding
@keras_export("keras._legacy.backend.conv1d")
def conv1d(
x, kernel, strides=1, padding="valid", data_format=None, dilation_rate=1
):
"""DEPRECATED."""
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
kernel_shape = kernel.shape.as_list()
if padding == "causal":
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel_shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = "valid"
padding = _preprocess_padding(padding)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
x = tf.compat.v1.nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format,
)
if data_format == "channels_first" and tf_data_format == "NWC":
x = tf.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export("keras._legacy.backend.conv2d")
def conv2d(
x,
kernel,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
):
"""DEPRECATED."""
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
x = tf.compat.v1.nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format,
)
if data_format == "channels_first" and tf_data_format == "NHWC":
x = tf.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export("keras._legacy.backend.conv2d_transpose")
def conv2d_transpose(
x,
kernel,
output_shape,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
):
"""DEPRECATED."""
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
# `atrous_conv2d_transpose` only supports NHWC format, even on GPU.
if data_format == "channels_first" and dilation_rate != (1, 1):
force_transpose = True
else:
force_transpose = False
x, tf_data_format = _preprocess_conv2d_input(
x, data_format, force_transpose
)
if data_format == "channels_first" and tf_data_format == "NHWC":
output_shape = (
output_shape[0],
output_shape[2],
output_shape[3],
output_shape[1],
)
if output_shape[0] is None:
output_shape = (tf.shape(x)[0],) + tuple(output_shape[1:])
if isinstance(output_shape, (tuple, list)):
output_shape = tf.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == "NHWC":
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
if dilation_rate == (1, 1):
x = tf.compat.v1.nn.conv2d_transpose(
x,
kernel,
output_shape,
strides,
padding=padding,
data_format=tf_data_format,
)
else:
if dilation_rate[0] != dilation_rate[1]:
raise ValueError(
"Expected the 2 dimensions of the `dilation_rate` argument "
"to be equal to each other. "
f"Received: dilation_rate={dilation_rate}"
)
x = tf.nn.atrous_conv2d_transpose(
x, kernel, output_shape, rate=dilation_rate[0], padding=padding
)
if data_format == "channels_first" and tf_data_format == "NHWC":
x = tf.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export("keras._legacy.backend.conv3d")
def conv3d(
x,
kernel,
strides=(1, 1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1, 1),
):
"""DEPRECATED."""
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
x = tf.compat.v1.nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format,
)
if data_format == "channels_first" and tf_data_format == "NDHWC":
x = tf.transpose(x, (0, 4, 1, 2, 3))
return x
@keras_export("keras._legacy.backend.cos")
def cos(x):
"""DEPRECATED."""
return tf.cos(x)
@keras_export("keras._legacy.backend.count_params")
def count_params(x):
"""DEPRECATED."""
return np.prod(x.shape.as_list())
@keras_export("keras._legacy.backend.ctc_batch_cost")
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
"""DEPRECATED."""
label_length = tf.cast(tf.squeeze(label_length, axis=-1), tf.int32)
input_length = tf.cast(tf.squeeze(input_length, axis=-1), tf.int32)
sparse_labels = tf.cast(
ctc_label_dense_to_sparse(y_true, label_length), tf.int32
)
y_pred = tf.math.log(
tf.transpose(y_pred, perm=[1, 0, 2]) + backend.epsilon()
)
return tf.expand_dims(
tf.compat.v1.nn.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length
),
1,
)
@keras_export("keras._legacy.backend.ctc_label_dense_to_sparse")
def ctc_label_dense_to_sparse(labels, label_lengths):
"""DEPRECATED."""
label_shape = tf.shape(labels)
num_batches_tns = tf.stack([label_shape[0]])
max_num_labels_tns = tf.stack([label_shape[1]])
def range_less_than(old_input, current_input):
return tf.expand_dims(tf.range(tf.shape(old_input)[1]), 0) < tf.fill(
max_num_labels_tns, current_input
)
init = tf.cast(tf.fill([1, label_shape[1]], 0), tf.bool)
dense_mask = tf.compat.v1.scan(
range_less_than, label_lengths, initializer=init, parallel_iterations=1
)
dense_mask = dense_mask[:, 0, :]
label_array = tf.reshape(
tf.tile(tf.range(0, label_shape[1]), num_batches_tns), label_shape
)
label_ind = tf.compat.v1.boolean_mask(label_array, dense_mask)
batch_array = tf.transpose(
tf.reshape(
tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns),
reverse(label_shape, 0),
)
)
batch_ind = tf.compat.v1.boolean_mask(batch_array, dense_mask)
indices = tf.transpose(
tf.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1])
)
vals_sparse = tf.compat.v1.gather_nd(labels, indices)
return tf.SparseTensor(
tf.cast(indices, tf.int64), vals_sparse, tf.cast(label_shape, tf.int64)
)
@keras_export("keras._legacy.backend.ctc_decode")
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):
"""DEPRECATED."""
input_shape = tf.shape(y_pred)
num_samples, num_steps = input_shape[0], input_shape[1]
y_pred = tf.math.log(
tf.transpose(y_pred, perm=[1, 0, 2]) + backend.epsilon()
)
input_length = tf.cast(input_length, tf.int32)
if greedy:
(decoded, log_prob) = tf.nn.ctc_greedy_decoder(
inputs=y_pred, sequence_length=input_length
)
else:
(decoded, log_prob) = tf.compat.v1.nn.ctc_beam_search_decoder(
inputs=y_pred,
sequence_length=input_length,
beam_width=beam_width,
top_paths=top_paths,
)
decoded_dense = []
for st in decoded:
st = tf.SparseTensor(st.indices, st.values, (num_samples, num_steps))
decoded_dense.append(tf.sparse.to_dense(sp_input=st, default_value=-1))
return (decoded_dense, log_prob)
@keras_export("keras._legacy.backend.cumsum")
def cumsum(x, axis=0):
"""DEPRECATED."""
return tf.cumsum(x, axis=axis)
@keras_export("keras._legacy.backend.cumprod")
def cumprod(x, axis=0):
"""DEPRECATED."""
return tf.math.cumprod(x, axis=axis)
@keras_export("keras._legacy.backend.depthwise_conv2d")
def depthwise_conv2d(
x,
depthwise_kernel,
strides=(1, 1),
padding="valid",
data_format=None,
dilation_rate=(1, 1),
):
"""DEPRECATED."""
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == "NHWC":
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = tf.nn.depthwise_conv2d(
x,
depthwise_kernel,
strides=strides,
padding=padding,
dilations=dilation_rate,
data_format=tf_data_format,
)
if data_format == "channels_first" and tf_data_format == "NHWC":
x = tf.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export("keras._legacy.backend.dot")
def dot(x, y):
"""DEPRECATED."""
if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
x_shape = []
for i, s in zip(x.shape, tf.unstack(tf.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(y.shape, tf.unstack(tf.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = tf.reshape(x, [-1, x_shape[-1]])
yt = tf.reshape(tf.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return tf.reshape(
tf.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:]
)
if is_sparse(x):
out = tf.sparse.sparse_dense_matmul(x, y)
else:
out = tf.matmul(x, y)
return out
@keras_export("keras._legacy.backend.dropout")
def dropout(x, level, noise_shape=None, seed=None):
"""DEPRECATED."""
if seed is None:
seed = np.random.randint(10e6)
return tf.nn.dropout(x, rate=level, noise_shape=noise_shape, seed=seed)
@keras_export("keras._legacy.backend.dtype")
def dtype(x):
"""DEPRECATED."""
return x.dtype.base_dtype.name
@keras_export("keras._legacy.backend.elu")
def elu(x, alpha=1.0):
"""DEPRECATED."""
res = tf.nn.elu(x)
if alpha == 1:
return res
else:
return tf.where(x > 0, res, alpha * res)
@keras_export("keras._legacy.backend.equal")
def equal(x, y):
"""DEPRECATED."""
return tf.equal(x, y)
@keras_export("keras._legacy.backend.eval")
def eval(x):
"""DEPRECATED."""
return get_value(to_dense(x))
@keras_export("keras._legacy.backend.exp")
def exp(x):
"""DEPRECATED."""
return tf.exp(x)
@keras_export("keras._legacy.backend.expand_dims")
def expand_dims(x, axis=-1):
"""DEPRECATED."""
return tf.expand_dims(x, axis)
@keras_export("keras._legacy.backend.eye")
def eye(size, dtype=None, name=None):
"""DEPRECATED."""
if dtype is None:
dtype = backend.floatx()
tf_dtype = tf.as_dtype(dtype)
return variable(tf.eye(size, dtype=tf_dtype), dtype, name)
@keras_export("keras._legacy.backend.flatten")
def flatten(x):
"""DEPRECATED."""
return tf.reshape(x, [-1])
@keras_export("keras._legacy.backend.foldl")
def foldl(fn, elems, initializer=None, name=None):
"""DEPRECATED."""
return tf.compat.v1.foldl(fn, elems, initializer=initializer, name=name)
@keras_export("keras._legacy.backend.foldr")
def foldr(fn, elems, initializer=None, name=None):
"""DEPRECATED."""
return tf.compat.v1.foldr(fn, elems, initializer=initializer, name=name)
@keras_export("keras._legacy.backend.gather")
def gather(reference, indices):
"""DEPRECATED."""
return tf.compat.v1.gather(reference, indices)
@keras_export("keras._legacy.backend.get_value")
def get_value(x):
"""DEPRECATED."""
if not tf.is_tensor(x):
return x
if tf.executing_eagerly() or isinstance(x, tf.__internal__.EagerTensor):
return x.numpy()
if not getattr(x, "_in_graph_mode", True):
# This is a variable which was created in an eager context, but is being
# evaluated from a Graph.
with tf.__internal__.eager_context.eager_mode():
return x.numpy()
with tf.init_scope():
return x.numpy()
@keras_export("keras._legacy.backend.gradients")
def gradients(loss, variables):
"""DEPRECATED."""
return tf.compat.v1.gradients(
loss, variables, colocate_gradients_with_ops=True
)
@keras_export("keras._legacy.backend.greater")
def greater(x, y):
"""DEPRECATED."""
return tf.greater(x, y)
@keras_export("keras._legacy.backend.greater_equal")
def greater_equal(x, y):
"""DEPRECATED."""
return tf.greater_equal(x, y)
@keras_export("keras._legacy.backend.hard_sigmoid")
def hard_sigmoid(x):
"""DEPRECATED."""
point_two = tf.convert_to_tensor(0.2, dtype=x.dtype)
point_five = tf.convert_to_tensor(0.5, dtype=x.dtype)
x = tf.multiply(x, point_two)
x = tf.add(x, point_five)
x = tf.clip_by_value(x, 0.0, 1.0)
return x
@keras_export("keras._legacy.backend.in_top_k")
def in_top_k(predictions, targets, k):
"""DEPRECATED."""
return tf.compat.v1.math.in_top_k(predictions, targets, k)
@keras_export("keras._legacy.backend.int_shape")
def int_shape(x):
"""DEPRECATED."""
try:
shape = x.shape
if not isinstance(shape, tuple):
shape = tuple(shape.as_list())
return shape
except ValueError:
return None
@keras_export("keras._legacy.backend.is_sparse")
def is_sparse(tensor):
"""DEPRECATED."""
spec = getattr(tensor, "_type_spec", None)
if spec is not None:
return isinstance(spec, tf.SparseTensorSpec)
return isinstance(tensor, tf.SparseTensor)
@keras_export("keras._legacy.backend.l2_normalize")
def l2_normalize(x, axis=None):
"""DEPRECATED."""
return tf.linalg.l2_normalize(x, axis=axis)
@keras_export("keras._legacy.backend.less")
def less(x, y):
"""DEPRECATED."""
return tf.less(x, y)
@keras_export("keras._legacy.backend.less_equal")
def less_equal(x, y):
"""DEPRECATED."""
return tf.less_equal(x, y)
@keras_export("keras._legacy.backend.log")
def log(x):
"""DEPRECATED."""
return tf.math.log(x)
@keras_export("keras._legacy.backend.map_fn")
def map_fn(fn, elems, name=None, dtype=None):
"""DEPRECATED."""
return tf.compat.v1.map_fn(fn, elems, name=name, dtype=dtype)
@keras_export("keras._legacy.backend.max")
def max(x, axis=None, keepdims=False):
"""DEPRECATED."""
return tf.reduce_max(x, axis, keepdims)
@keras_export("keras._legacy.backend.maximum")
def maximum(x, y):
"""DEPRECATED."""
return tf.maximum(x, y)
@keras_export("keras._legacy.backend.mean")
def mean(x, axis=None, keepdims=False):
"""DEPRECATED."""
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, backend.floatx())
return tf.reduce_mean(x, axis, keepdims)
@keras_export("keras._legacy.backend.min")
def min(x, axis=None, keepdims=False):
"""DEPRECATED."""
return tf.reduce_min(x, axis, keepdims)
@keras_export("keras._legacy.backend.minimum")
def minimum(x, y):
"""DEPRECATED."""
return tf.minimum(x, y)
@keras_export("keras._legacy.backend.moving_average_update")
def moving_average_update(x, value, momentum):
"""DEPRECATED."""
momentum = tf.cast(momentum, x.dtype)
value = tf.cast(value, x.dtype)
return x.assign_sub((x - value) * (1 - momentum))
@keras_export("keras._legacy.backend.name_scope")
def name_scope(name):
"""DEPRECATED."""
return tf.name_scope(name)
@keras_export("keras._legacy.backend.ndim")
def ndim(x):
"""DEPRECATED."""
return x.shape.rank
@keras_export("keras._legacy.backend.not_equal")
def not_equal(x, y):
"""DEPRECATED."""
return tf.not_equal(x, y)
@keras_export("keras._legacy.backend.one_hot")
def one_hot(indices, num_classes):
"""DEPRECATED."""
return tf.one_hot(indices, depth=num_classes, axis=-1)
@keras_export("keras._legacy.backend.ones")
def ones(shape, dtype=None, name=None):
"""DEPRECATED."""
with tf.init_scope():
if dtype is None:
dtype = backend.floatx()
tf_dtype = tf.as_dtype(dtype)
v = tf.ones(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
return v
@keras_export("keras._legacy.backend.ones_like")
def ones_like(x, dtype=None, name=None):
"""DEPRECATED."""
return tf.ones_like(x, dtype=dtype, name=name)
@keras_export("keras._legacy.backend.permute_dimensions")
def permute_dimensions(x, pattern):
"""DEPRECATED."""
return tf.transpose(x, perm=pattern)
@keras_export("keras._legacy.backend.pool2d")
def pool2d(
x,
pool_size,
strides=(1, 1),
padding="valid",
data_format=None,
pool_mode="max",
):
"""DEPRECATED."""
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
if len(pool_size) != 2:
raise ValueError("`pool_size` must be a tuple of 2 integers.")
if len(strides) != 2:
raise ValueError("`strides` must be a tuple of 2 integers.")
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == "NHWC":
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == "max":
x = tf.compat.v1.nn.max_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format
)
elif pool_mode == "avg":
x = tf.compat.v1.nn.avg_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format
)
else:
raise ValueError(f"Invalid pooling mode: {str(pool_mode)}")
if data_format == "channels_first" and tf_data_format == "NHWC":
x = tf.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export("keras._legacy.backend.pool3d")
def pool3d(
x,
pool_size,
strides=(1, 1, 1),
padding="valid",
data_format=None,
pool_mode="max",
):
"""DEPRECATED."""
if data_format is None:
data_format = backend.image_data_format()
if data_format not in {"channels_first", "channels_last"}:
raise ValueError(f"Unknown data_format: {data_format}")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/legacy/layers.py | keras/src/legacy/layers.py | """Legacy Keras 1/2 layers.
AlphaDropout
RandomHeight
RandomWidth
ThresholdedReLU
"""
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.layers.layer import Layer
from keras.src.utils.module_utils import tensorflow as tf
@keras_export("keras._legacy.layers.AlphaDropout")
class AlphaDropout(Layer):
"""DEPRECATED."""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super().__init__(**kwargs)
self.rate = rate
self.seed = seed
self.noise_shape = noise_shape
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self.built = True
def call(self, inputs, training=False):
if training and self.rate > 0:
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
alpha_p = -alpha * scale
if self.noise_shape is None:
noise_shape = tf.shape(inputs)
else:
noise_shape = self.noise_shape
kept_idx = tf.greater_equal(
backend.random.uniform(noise_shape, seed=self.seed_generator),
self.rate,
)
kept_idx = tf.cast(kept_idx, inputs.dtype)
# Get affine transformation params
a = ((1 - self.rate) * (1 + self.rate * alpha_p**2)) ** -0.5
b = -a * alpha_p * self.rate
# Apply mask
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
# Do affine transformation
return a * x + b
return inputs
def get_config(self):
config = {"rate": self.rate, "seed": self.seed}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shape):
return input_shape
@keras_export("keras._legacy.layers.RandomHeight")
class RandomHeight(Layer):
"""DEPRECATED."""
def __init__(self, factor, interpolation="bilinear", seed=None, **kwargs):
super().__init__(**kwargs)
self.seed_generator = backend.random.SeedGenerator(seed)
self.factor = factor
if isinstance(factor, (tuple, list)):
self.height_lower = factor[0]
self.height_upper = factor[1]
else:
self.height_lower = -factor
self.height_upper = factor
if self.height_upper < self.height_lower:
raise ValueError(
"`factor` argument cannot have an upper bound lesser than the "
f"lower bound. Received: factor={factor}"
)
if self.height_lower < -1.0 or self.height_upper < -1.0:
raise ValueError(
"`factor` argument must have values larger than -1. "
f"Received: factor={factor}"
)
self.interpolation = interpolation
self.seed = seed
def call(self, inputs, training=True):
inputs = tf.convert_to_tensor(inputs, dtype=self.compute_dtype)
def random_height_inputs(inputs):
"""Inputs height-adjusted with random ops."""
inputs_shape = tf.shape(inputs)
img_hd = tf.cast(inputs_shape[-3], tf.float32)
img_wd = inputs_shape[-2]
height_factor = backend.random.uniform(
shape=[],
minval=(1.0 + self.height_lower),
maxval=(1.0 + self.height_upper),
seed=self.seed_generator,
)
adjusted_height = tf.cast(height_factor * img_hd, tf.int32)
adjusted_size = tf.stack([adjusted_height, img_wd])
output = tf.image.resize(
images=inputs,
size=adjusted_size,
method=self.interpolation,
)
# tf.resize will output float32 regardless of input type.
output = tf.cast(output, self.compute_dtype)
output_shape = inputs.shape.as_list()
output_shape[-3] = None
output.set_shape(output_shape)
return output
if training:
return random_height_inputs(inputs)
else:
return inputs
def compute_output_shape(self, input_shape):
input_shape = list(input_shape)
input_shape[-3] = None
return tuple(input_shape)
def get_config(self):
config = {
"factor": self.factor,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
@keras_export("keras._legacy.layers.RandomWidth")
class RandomWidth(Layer):
"""DEPRECATED."""
def __init__(self, factor, interpolation="bilinear", seed=None, **kwargs):
super().__init__(**kwargs)
self.seed_generator = backend.random.SeedGenerator(seed)
self.factor = factor
if isinstance(factor, (tuple, list)):
self.width_lower = factor[0]
self.width_upper = factor[1]
else:
self.width_lower = -factor
self.width_upper = factor
if self.width_upper < self.width_lower:
raise ValueError(
"`factor` argument cannot have an upper bound less than the "
f"lower bound. Received: factor={factor}"
)
if self.width_lower < -1.0 or self.width_upper < -1.0:
raise ValueError(
"`factor` argument must have values larger than -1. "
f"Received: factor={factor}"
)
self.interpolation = interpolation
self.seed = seed
def call(self, inputs, training=True):
inputs = tf.convert_to_tensor(inputs, dtype=self.compute_dtype)
def random_width_inputs(inputs):
"""Inputs width-adjusted with random ops."""
inputs_shape = tf.shape(inputs)
img_hd = inputs_shape[-3]
img_wd = tf.cast(inputs_shape[-2], tf.float32)
width_factor = backend.random.uniform(
shape=[],
minval=(1.0 + self.width_lower),
maxval=(1.0 + self.width_upper),
seed=self.seed_generator,
)
adjusted_width = tf.cast(width_factor * img_wd, tf.int32)
adjusted_size = tf.stack([img_hd, adjusted_width])
output = tf.image.resize(
images=inputs,
size=adjusted_size,
method=self.interpolation,
)
# tf.resize will output float32 regardless of input type.
output = tf.cast(output, self.compute_dtype)
output_shape = inputs.shape.as_list()
output_shape[-2] = None
output.set_shape(output_shape)
return output
if training:
return random_width_inputs(inputs)
else:
return inputs
def compute_output_shape(self, input_shape):
input_shape = list(input_shape)
input_shape[-2] = None
return tuple(input_shape)
def get_config(self):
config = {
"factor": self.factor,
"interpolation": self.interpolation,
"seed": self.seed,
}
base_config = super().get_config()
return {**base_config, **config}
@keras_export("keras._legacy.layers.ThresholdedReLU")
class ThresholdedReLU(Layer):
"""DEPRECATED."""
def __init__(self, theta=1.0, **kwargs):
super().__init__(**kwargs)
if theta is None:
raise ValueError(
"Theta of a Thresholded ReLU layer cannot be None, expecting a "
f"float. Received: {theta}"
)
if theta < 0:
raise ValueError(
"The theta value of a Thresholded ReLU layer "
f"should be >=0. Received: {theta}"
)
self.supports_masking = True
self.theta = tf.convert_to_tensor(theta, dtype=self.compute_dtype)
def call(self, inputs):
dtype = self.compute_dtype
return inputs * tf.cast(tf.greater(inputs, self.theta), dtype)
def get_config(self):
config = {"theta": float(self.theta)}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shape):
return input_shape
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/legacy/saving/legacy_h5_format_test.py | keras/src/legacy/saving/legacy_h5_format_test.py | import os
import numpy as np
import pytest
import keras
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src.legacy.saving import legacy_h5_format
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
# TODO: more thorough testing. Correctness depends
# on exact weight ordering for each layer, so we need
# to test across all types of layers.
try:
import tf_keras
except:
tf_keras = None
def get_sequential_model(keras):
return keras.Sequential(
[
keras.layers.Input((3,), batch_size=2),
keras.layers.Dense(4, activation="relu"),
keras.layers.BatchNormalization(
moving_mean_initializer="uniform", gamma_initializer="uniform"
),
keras.layers.Dense(5, activation="softmax"),
]
)
def get_functional_model(keras):
inputs = keras.Input((3,), batch_size=2)
x = keras.layers.Dense(4, activation="relu")(inputs)
residual = x
x = keras.layers.BatchNormalization(
moving_mean_initializer="uniform", gamma_initializer="uniform"
)(x)
x = keras.layers.Dense(4, activation="relu")(x)
x = keras.layers.add([x, residual])
outputs = keras.layers.Dense(5, activation="softmax")(x)
return keras.Model(inputs, outputs)
def get_subclassed_model(keras):
class MyModel(keras.Model):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dense_1 = keras.layers.Dense(3, activation="relu")
self.dense_2 = keras.layers.Dense(1, activation="sigmoid")
# top_level_model_weights
self.bias = self.add_weight(
name="bias",
shape=[1],
trainable=True,
initializer=keras.initializers.Zeros(),
)
def call(self, x):
x = self.dense_1(x)
x = self.dense_2(x)
# top_level_model_weights
x += ops.cast(self.bias, x.dtype)
return x
model = MyModel()
model(np.random.random((2, 3)))
return model
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(tf_keras is None, reason="Test requires tf_keras")
class LegacyH5WeightsTest(testing.TestCase):
def _check_reloading_weights(self, ref_input, model, tf_keras_model):
ref_output = tf_keras_model(ref_input)
initial_weights = model.get_weights()
# Check weights only file
temp_filepath = os.path.join(self.get_temp_dir(), "weights.h5")
tf_keras_model.save_weights(temp_filepath)
model.load_weights(temp_filepath)
output = model(ref_input)
self.assertAllClose(ref_output, output, atol=1e-5)
model.set_weights(initial_weights)
model.load_weights(temp_filepath)
output = model(ref_input)
self.assertAllClose(ref_output, output, atol=1e-5)
def test_sequential_model_weights(self):
model = get_sequential_model(keras)
tf_keras_model = get_sequential_model(tf_keras)
ref_input = np.random.random((2, 3))
self._check_reloading_weights(ref_input, model, tf_keras_model)
def test_functional_model_weights(self):
model = get_functional_model(keras)
tf_keras_model = get_functional_model(tf_keras)
ref_input = np.random.random((2, 3))
self._check_reloading_weights(ref_input, model, tf_keras_model)
def test_subclassed_model_weights(self):
model = get_subclassed_model(keras)
tf_keras_model = get_subclassed_model(tf_keras)
ref_input = np.random.random((2, 3))
self._check_reloading_weights(ref_input, model, tf_keras_model)
@pytest.mark.requires_trainable_backend
class LegacyH5WholeModelTest(testing.TestCase):
def _check_reloading_model(self, ref_input, model):
# Whole model file
ref_output = model(ref_input)
temp_filepath = os.path.join(self.get_temp_dir(), "model.h5")
legacy_h5_format.save_model_to_hdf5(model, temp_filepath)
loaded = legacy_h5_format.load_model_from_hdf5(temp_filepath)
output = loaded(ref_input)
self.assertAllClose(ref_output, output, atol=1e-5)
def test_sequential_model(self):
model = get_sequential_model(keras)
ref_input = np.random.random((2, 3))
self._check_reloading_model(ref_input, model)
def test_functional_model(self):
model = get_functional_model(keras)
ref_input = np.random.random((2, 3))
self._check_reloading_model(ref_input, model)
def test_compiled_model_with_various_layers(self):
model = models.Sequential()
model.add(layers.Dense(2, input_shape=(3,)))
model.add(layers.RepeatVector(3))
model.add(layers.TimeDistributed(layers.Dense(3)))
model.compile(optimizer="rmsprop", loss="mean_squared_error")
ref_input = np.random.random((1, 3))
self._check_reloading_model(ref_input, model)
def test_saving_lambda(self):
mean = ops.random.uniform((4, 2, 3))
std = ops.abs(ops.random.uniform((4, 2, 3))) + 1e-5
inputs = layers.Input(shape=(4, 2, 3))
output = layers.Lambda(
lambda image, mu, std: (image - mu) / std,
arguments={"mu": mean, "std": std},
)(inputs)
model = models.Model(inputs, output)
model.compile(
loss="mean_squared_error", optimizer="sgd", metrics=["acc"]
)
temp_filepath = os.path.join(self.get_temp_dir(), "lambda_model.h5")
legacy_h5_format.save_model_to_hdf5(model, temp_filepath)
with self.assertRaisesRegex(ValueError, "arbitrary code execution"):
legacy_h5_format.load_model_from_hdf5(temp_filepath)
loaded = legacy_h5_format.load_model_from_hdf5(
temp_filepath, safe_mode=False
)
self.assertAllClose(mean, loaded.layers[1].arguments["mu"])
self.assertAllClose(std, loaded.layers[1].arguments["std"])
def test_saving_include_optimizer_false(self):
model = models.Sequential()
model.add(layers.Dense(1))
model.compile("adam", loss="mean_squared_error")
x, y = np.ones((10, 10)), np.ones((10, 1))
model.fit(x, y)
ref_output = model(x)
temp_filepath = os.path.join(self.get_temp_dir(), "model.h5")
legacy_h5_format.save_model_to_hdf5(
model, temp_filepath, include_optimizer=False
)
loaded = legacy_h5_format.load_model_from_hdf5(temp_filepath)
output = loaded(x)
# Assert that optimizer does not exist in loaded model
with self.assertRaises(AttributeError):
_ = loaded.optimizer
# Compare output
self.assertAllClose(ref_output, output, atol=1e-5)
def test_custom_sequential_registered_no_scope(self):
@object_registration.register_keras_serializable(package="my_package")
class MyDense(layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
inputs = layers.Input(shape=[1])
custom_layer = MyDense(1)
model = models.Sequential(layers=[inputs, custom_layer])
ref_input = np.array([5])
self._check_reloading_model(ref_input, model)
def test_custom_functional_registered_no_scope(self):
@object_registration.register_keras_serializable(package="my_package")
class MyDense(layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
inputs = layers.Input(shape=[1])
outputs = MyDense(1)(inputs)
model = models.Model(inputs, outputs)
ref_input = np.array([5])
self._check_reloading_model(ref_input, model)
def test_nested_layers(self):
class MyLayer(layers.Layer):
def __init__(self, sublayers, **kwargs):
super().__init__(**kwargs)
self.sublayers = sublayers
def call(self, x):
prev_input = x
for layer in self.sublayers:
prev_input = layer(prev_input)
return prev_input
def get_config(self):
config = super().get_config()
config["sublayers"] = serialization_lib.serialize_keras_object(
self.sublayers
)
return config
@classmethod
def from_config(cls, config):
config["sublayers"] = (
serialization_lib.deserialize_keras_object(
config["sublayers"]
)
)
return cls(**config)
@object_registration.register_keras_serializable(package="Foo")
class RegisteredSubLayer(layers.Layer):
pass
layer = MyLayer(
[
layers.Dense(2, name="MyDense"),
RegisteredSubLayer(name="MySubLayer"),
]
)
model = models.Sequential([layer])
with self.subTest("test_JSON"):
from keras.src.models.model import model_from_json
model_json = model.to_json()
self.assertIn("Foo>RegisteredSubLayer", model_json)
loaded_model = model_from_json(
model_json, custom_objects={"MyLayer": MyLayer}
)
loaded_layer = loaded_model.layers[0]
self.assertIsInstance(loaded_layer.sublayers[0], layers.Dense)
self.assertEqual(loaded_layer.sublayers[0].name, "MyDense")
self.assertIsInstance(loaded_layer.sublayers[1], RegisteredSubLayer)
self.assertEqual(loaded_layer.sublayers[1].name, "MySubLayer")
with self.subTest("test_H5"):
temp_filepath = os.path.join(self.get_temp_dir(), "model.h5")
legacy_h5_format.save_model_to_hdf5(model, temp_filepath)
loaded_model = legacy_h5_format.load_model_from_hdf5(
temp_filepath, custom_objects={"MyLayer": MyLayer}
)
loaded_layer = loaded_model.layers[0]
self.assertIsInstance(loaded_layer.sublayers[0], layers.Dense)
self.assertEqual(loaded_layer.sublayers[0].name, "MyDense")
self.assertIsInstance(loaded_layer.sublayers[1], RegisteredSubLayer)
self.assertEqual(loaded_layer.sublayers[1].name, "MySubLayer")
def test_model_loading_with_axis_arg(self):
input1 = layers.Input(shape=(1, 4), name="input1")
input2 = layers.Input(shape=(1, 4), name="input2")
concat1 = layers.Concatenate(axis=1)([input1, input2])
output = layers.Dense(1, activation="sigmoid")(concat1)
model = models.Model(inputs=[input1, input2], outputs=output)
model.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
temp_filepath = os.path.join(
self.get_temp_dir(), "model_with_axis_arg.h5"
)
legacy_h5_format.save_model_to_hdf5(model, temp_filepath)
legacy_h5_format.load_model_from_hdf5(temp_filepath)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(tf_keras is None, reason="Test requires tf_keras")
class LegacyH5BackwardsCompatTest(testing.TestCase):
def _check_reloading_model(self, ref_input, model, tf_keras_model):
# Whole model file
ref_output = tf_keras_model(ref_input)
temp_filepath = os.path.join(self.get_temp_dir(), "model.h5")
tf_keras_model.save(temp_filepath)
loaded = legacy_h5_format.load_model_from_hdf5(temp_filepath)
output = loaded(ref_input)
self.assertAllClose(ref_output, output, atol=1e-5)
def test_sequential_model(self):
model = get_sequential_model(keras)
tf_keras_model = get_sequential_model(tf_keras)
ref_input = np.random.random((2, 3))
self._check_reloading_model(ref_input, model, tf_keras_model)
def test_functional_model(self):
tf_keras_model = get_functional_model(tf_keras)
model = get_functional_model(keras)
ref_input = np.random.random((2, 3))
self._check_reloading_model(ref_input, model, tf_keras_model)
def test_compiled_model_with_various_layers(self):
model = models.Sequential()
model.add(layers.Dense(2, input_shape=(3,)))
model.add(layers.RepeatVector(3))
model.add(layers.TimeDistributed(layers.Dense(3)))
model.compile(optimizer="rmsprop", loss="mse")
tf_keras_model = tf_keras.Sequential()
tf_keras_model.add(tf_keras.layers.Dense(2, input_shape=(3,)))
tf_keras_model.add(tf_keras.layers.RepeatVector(3))
tf_keras_model.add(
tf_keras.layers.TimeDistributed(tf_keras.layers.Dense(3))
)
tf_keras_model.compile(optimizer="rmsprop", loss="mean_squared_error")
ref_input = np.random.random((1, 3))
self._check_reloading_model(ref_input, model, tf_keras_model)
def test_saving_lambda(self):
mean = np.random.random((4, 2, 3))
std = np.abs(np.random.random((4, 2, 3))) + 1e-5
inputs = tf_keras.layers.Input(shape=(4, 2, 3))
output = tf_keras.layers.Lambda(
lambda image, mu, std: (image - mu) / std,
arguments={"mu": mean, "std": std},
output_shape=inputs.shape,
)(inputs)
tf_keras_model = tf_keras.Model(inputs, output)
tf_keras_model.compile(
loss="mean_squared_error", optimizer="sgd", metrics=["acc"]
)
temp_filepath = os.path.join(self.get_temp_dir(), "lambda_model.h5")
tf_keras_model.save(temp_filepath)
with self.assertRaisesRegex(ValueError, "arbitrary code execution"):
legacy_h5_format.load_model_from_hdf5(temp_filepath)
loaded = legacy_h5_format.load_model_from_hdf5(
temp_filepath, safe_mode=False
)
self.assertAllClose(mean, loaded.layers[1].arguments["mu"])
self.assertAllClose(std, loaded.layers[1].arguments["std"])
def test_saving_include_optimizer_false(self):
tf_keras_model = tf_keras.Sequential()
tf_keras_model.add(tf_keras.layers.Dense(1))
tf_keras_model.compile("adam", loss="mse")
x, y = np.ones((10, 10)), np.ones((10, 1))
tf_keras_model.fit(x, y)
ref_output = tf_keras_model(x)
temp_filepath = os.path.join(self.get_temp_dir(), "model.h5")
tf_keras_model.save(temp_filepath, include_optimizer=False)
loaded = legacy_h5_format.load_model_from_hdf5(temp_filepath)
output = loaded(x)
# Assert that optimizer does not exist in loaded model
with self.assertRaises(AttributeError):
_ = loaded.optimizer
# Compare output
self.assertAllClose(ref_output, output, atol=1e-5)
def test_custom_sequential_registered_no_scope(self):
@tf_keras.saving.register_keras_serializable(package="my_package")
class MyDense(tf_keras.layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
inputs = tf_keras.layers.Input(shape=[1])
custom_layer = MyDense(1)
tf_keras_model = tf_keras.Sequential(layers=[inputs, custom_layer])
# Re-implement and re-register in Keras 3
@object_registration.register_keras_serializable(package="my_package")
class MyDense(layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
inputs = layers.Input(shape=[1])
custom_layer = MyDense(1)
model = models.Sequential(layers=[inputs, custom_layer])
ref_input = np.array([5])
self._check_reloading_model(ref_input, model, tf_keras_model)
def test_custom_functional_registered_no_scope(self):
@tf_keras.saving.register_keras_serializable(package="my_package")
class MyDense(tf_keras.layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
inputs = tf_keras.layers.Input(shape=[1])
outputs = MyDense(1)(inputs)
tf_keras_model = tf_keras.Model(inputs, outputs)
# Re-implement and re-register in Keras 3
@object_registration.register_keras_serializable(package="my_package")
class MyDense(layers.Dense):
def __init__(self, units, **kwargs):
super().__init__(units, **kwargs)
inputs = layers.Input(shape=[1])
outputs = MyDense(1)(inputs)
model = models.Model(inputs, outputs)
ref_input = np.array([5])
self._check_reloading_model(ref_input, model, tf_keras_model)
def test_nested_layers(self):
class MyLayer(tf_keras.layers.Layer):
def __init__(self, sublayers, **kwargs):
super().__init__(**kwargs)
self.sublayers = sublayers
def call(self, x):
prev_input = x
for layer in self.sublayers:
prev_input = layer(prev_input)
return prev_input
def get_config(self):
config = super().get_config()
config["sublayers"] = tf_keras.saving.serialize_keras_object(
self.sublayers
)
return config
@classmethod
def from_config(cls, config):
config["sublayers"] = tf_keras.saving.deserialize_keras_object(
config["sublayers"]
)
return cls(**config)
@tf_keras.saving.register_keras_serializable(package="Foo")
class RegisteredSubLayer(layers.Layer):
def call(self, x):
return x
layer = MyLayer(
[
tf_keras.layers.Dense(2, name="MyDense"),
RegisteredSubLayer(name="MySubLayer"),
]
)
tf_keras_model = tf_keras.Sequential([layer])
x = np.random.random((4, 2))
ref_output = tf_keras_model(x)
# Save TF Keras model to H5 file
temp_filepath = os.path.join(self.get_temp_dir(), "model.h5")
tf_keras_model.save(temp_filepath)
# Re-implement in Keras 3
class MyLayer(layers.Layer):
def __init__(self, sublayers, **kwargs):
super().__init__(**kwargs)
self.sublayers = sublayers
def call(self, x):
prev_input = x
for layer in self.sublayers:
prev_input = layer(prev_input)
return prev_input
def get_config(self):
config = super().get_config()
config["sublayers"] = serialization_lib.serialize_keras_object(
self.sublayers
)
return config
@classmethod
def from_config(cls, config):
config["sublayers"] = (
serialization_lib.deserialize_keras_object(
config["sublayers"]
)
)
return cls(**config)
# Re-implement and re-register in Keras 3
@object_registration.register_keras_serializable(package="Foo")
class RegisteredSubLayer(layers.Layer):
def call(self, x):
return x
# Load in Keras 3
loaded_model = legacy_h5_format.load_model_from_hdf5(
temp_filepath, custom_objects={"MyLayer": MyLayer}
)
loaded_layer = loaded_model.layers[0]
output = loaded_model(x)
# Ensure nested layer structure
self.assertIsInstance(loaded_layer.sublayers[0], layers.Dense)
self.assertEqual(loaded_layer.sublayers[0].name, "MyDense")
self.assertIsInstance(loaded_layer.sublayers[1], RegisteredSubLayer)
self.assertEqual(loaded_layer.sublayers[1].name, "MySubLayer")
# Compare output
self.assertAllClose(ref_output, output, atol=1e-5)
@pytest.mark.requires_trainable_backend
class DirectoryCreationTest(testing.TestCase):
def test_directory_creation_on_save(self):
"""Test if directory is created on model save."""
model = get_sequential_model(keras)
nested_dirpath = os.path.join(
self.get_temp_dir(), "dir1", "dir2", "dir3"
)
filepath = os.path.join(nested_dirpath, "model.h5")
self.assertFalse(os.path.exists(nested_dirpath))
legacy_h5_format.save_model_to_hdf5(model, filepath)
self.assertTrue(os.path.exists(nested_dirpath))
loaded_model = legacy_h5_format.load_model_from_hdf5(filepath)
self.assertEqual(model.to_json(), loaded_model.to_json())
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/legacy/saving/serialization.py | keras/src/legacy/saving/serialization.py | """Legacy serialization logic for Keras models."""
import contextlib
import inspect
import threading
import weakref
# isort: off
from keras.src.api_export import keras_export
from keras.src.saving import object_registration
# Flag that determines whether to skip the NotImplementedError when calling
# get_config in custom models and layers. This is only enabled when saving to
# SavedModel, when the config isn't required.
_SKIP_FAILED_SERIALIZATION = False
# If a layer does not have a defined config, then the returned config will be a
# dictionary with the below key.
_LAYER_UNDEFINED_CONFIG_KEY = "layer was saved without config"
# Store a unique, per-object ID for shared objects.
#
# We store a unique ID for each object so that we may, at loading time,
# re-create the network properly. Without this ID, we would have no way of
# determining whether a config is a description of a new object that
# should be created or is merely a reference to an already-created object.
SHARED_OBJECT_KEY = "shared_object_id"
SHARED_OBJECT_DISABLED = threading.local()
SHARED_OBJECT_LOADING = threading.local()
SHARED_OBJECT_SAVING = threading.local()
# Attributes on the threadlocal variable must be set per-thread, thus we
# cannot initialize these globally. Instead, we have accessor functions with
# default values.
def _shared_object_disabled():
"""Get whether shared object handling is disabled in a threadsafe manner."""
return getattr(SHARED_OBJECT_DISABLED, "disabled", False)
def _shared_object_loading_scope():
"""Get the current shared object saving scope in a threadsafe manner."""
return getattr(SHARED_OBJECT_LOADING, "scope", NoopLoadingScope())
def _shared_object_saving_scope():
"""Get the current shared object saving scope in a threadsafe manner."""
return getattr(SHARED_OBJECT_SAVING, "scope", None)
class DisableSharedObjectScope:
"""A context manager for disabling handling of shared objects.
Disables shared object handling for both saving and loading.
Created primarily for use with `clone_model`, which does extra surgery that
is incompatible with shared objects.
"""
def __enter__(self):
SHARED_OBJECT_DISABLED.disabled = True
self._orig_loading_scope = _shared_object_loading_scope()
self._orig_saving_scope = _shared_object_saving_scope()
def __exit__(self, *args, **kwargs):
SHARED_OBJECT_DISABLED.disabled = False
SHARED_OBJECT_LOADING.scope = self._orig_loading_scope
SHARED_OBJECT_SAVING.scope = self._orig_saving_scope
class NoopLoadingScope:
"""The default shared object loading scope. It does nothing.
Created to simplify serialization code that doesn't care about shared
objects (e.g. when serializing a single object).
"""
def get(self, unused_object_id):
return None
def set(self, object_id, obj):
pass
class SharedObjectLoadingScope:
"""A context manager for keeping track of loaded objects.
During the deserialization process, we may come across objects that are
shared across multiple layers. In order to accurately restore the network
structure to its original state, `SharedObjectLoadingScope` allows us to
re-use shared objects rather than cloning them.
"""
def __enter__(self):
if _shared_object_disabled():
return NoopLoadingScope()
global SHARED_OBJECT_LOADING
SHARED_OBJECT_LOADING.scope = self
self._obj_ids_to_obj = {}
return self
def get(self, object_id):
"""Given a shared object ID, returns a previously instantiated object.
Args:
object_id: shared object ID to use when attempting to find
already-loaded object.
Returns:
The object, if we've seen this ID before. Else, `None`.
"""
# Explicitly check for `None` internally to make external calling code a
# bit cleaner.
if object_id is None:
return
return self._obj_ids_to_obj.get(object_id)
def set(self, object_id, obj):
"""Stores an instantiated object for future lookup and sharing."""
if object_id is None:
return
self._obj_ids_to_obj[object_id] = obj
def __exit__(self, *args, **kwargs):
global SHARED_OBJECT_LOADING
SHARED_OBJECT_LOADING.scope = NoopLoadingScope()
class SharedObjectConfig(dict):
"""A configuration container that keeps track of references.
`SharedObjectConfig` will automatically attach a shared object ID to any
configs which are referenced more than once, allowing for proper shared
object reconstruction at load time.
In most cases, it would be more proper to subclass something like
`collections.UserDict` or `collections.Mapping` rather than `dict` directly.
Unfortunately, python's json encoder does not support `Mapping`s. This is
important functionality to retain, since we are dealing with serialization.
We should be safe to subclass `dict` here, since we aren't actually
overriding any core methods, only augmenting with a new one for reference
counting.
"""
def __init__(self, base_config, object_id, **kwargs):
self.ref_count = 1
self.object_id = object_id
super().__init__(base_config, **kwargs)
def increment_ref_count(self):
# As soon as we've seen the object more than once, we want to attach the
# shared object ID. This allows us to only attach the shared object ID
# when it's strictly necessary, making backwards compatibility breakage
# less likely.
if self.ref_count == 1:
self[SHARED_OBJECT_KEY] = self.object_id
self.ref_count += 1
class SharedObjectSavingScope:
"""Keeps track of shared object configs when serializing."""
def __enter__(self):
if _shared_object_disabled():
return None
global SHARED_OBJECT_SAVING
# Serialization can happen at a number of layers for a number of
# reasons. We may end up with a case where we're opening a saving scope
# within another saving scope. In that case, we'd like to use the
# outermost scope available and ignore inner scopes, since there is not
# (yet) a reasonable use case for having these nested and distinct.
if _shared_object_saving_scope() is not None:
self._passthrough = True
return _shared_object_saving_scope()
else:
self._passthrough = False
SHARED_OBJECT_SAVING.scope = self
self._shared_objects_config = weakref.WeakKeyDictionary()
self._next_id = 0
return self
def get_config(self, obj):
"""Gets a `SharedObjectConfig` if one has already been seen for `obj`.
Args:
obj: The object for which to retrieve the `SharedObjectConfig`.
Returns:
The SharedObjectConfig for a given object, if already seen. Else,
`None`.
"""
try:
shared_object_config = self._shared_objects_config[obj]
except (TypeError, KeyError):
# If the object is unhashable (e.g. a subclass of
# `AbstractBaseClass` that has not overridden `__hash__`), a
# `TypeError` will be thrown. We'll just continue on without shared
# object support.
return None
shared_object_config.increment_ref_count()
return shared_object_config
def create_config(self, base_config, obj):
"""Create a new SharedObjectConfig for a given object."""
shared_object_config = SharedObjectConfig(base_config, self._next_id)
self._next_id += 1
try:
self._shared_objects_config[obj] = shared_object_config
except TypeError:
# If the object is unhashable (e.g. a subclass of
# `AbstractBaseClass` that has not overridden `__hash__`), a
# `TypeError` will be thrown. We'll just continue on without shared
# object support.
pass
return shared_object_config
def __exit__(self, *args, **kwargs):
if not getattr(self, "_passthrough", False):
global SHARED_OBJECT_SAVING
SHARED_OBJECT_SAVING.scope = None
def serialize_keras_class_and_config(
cls_name, cls_config, obj=None, shared_object_id=None
):
"""Returns the serialization of the class with the given config."""
base_config = {"class_name": cls_name, "config": cls_config}
# We call `serialize_keras_class_and_config` for some branches of the load
# path. In that case, we may already have a shared object ID we'd like to
# retain.
if shared_object_id is not None:
base_config[SHARED_OBJECT_KEY] = shared_object_id
# If we have an active `SharedObjectSavingScope`, check whether we've
# already serialized this config. If so, just use that config. This will
# store an extra ID field in the config, allowing us to re-create the shared
# object relationship at load time.
if _shared_object_saving_scope() is not None and obj is not None:
shared_object_config = _shared_object_saving_scope().get_config(obj)
if shared_object_config is None:
return _shared_object_saving_scope().create_config(base_config, obj)
return shared_object_config
return base_config
@contextlib.contextmanager
def skip_failed_serialization():
global _SKIP_FAILED_SERIALIZATION
prev = _SKIP_FAILED_SERIALIZATION
try:
_SKIP_FAILED_SERIALIZATION = True
yield
finally:
_SKIP_FAILED_SERIALIZATION = prev
@keras_export(
[
"keras.legacy.saving.serialize_keras_object",
"keras.utils.legacy.serialize_keras_object",
]
)
def serialize_keras_object(instance):
"""Serialize a Keras object into a JSON-compatible representation.
Calls to `serialize_keras_object` while underneath the
`SharedObjectSavingScope` context manager will cause any objects re-used
across multiple layers to be saved with a special shared object ID. This
allows the network to be re-created properly during deserialization.
Args:
instance: The object to serialize.
Returns:
A dict-like, JSON-compatible representation of the object's config.
"""
# _, instance = tf.__internal__.decorator.unwrap(instance)
instance = inspect.unwrap(instance)
if instance is None:
return None
if hasattr(instance, "get_config"):
name = object_registration.get_registered_name(instance.__class__)
try:
config = instance.get_config()
except NotImplementedError as e:
if _SKIP_FAILED_SERIALIZATION:
return serialize_keras_class_and_config(
name, {_LAYER_UNDEFINED_CONFIG_KEY: True}
)
raise e
serialization_config = {}
for key, item in config.items():
if isinstance(item, str):
serialization_config[key] = item
continue
# Any object of a different type needs to be converted to string or
# dict for serialization (e.g. custom functions, custom classes)
try:
serialized_item = serialize_keras_object(item)
if isinstance(serialized_item, dict) and not isinstance(
item, dict
):
serialized_item["__passive_serialization__"] = True
serialization_config[key] = serialized_item
except ValueError:
serialization_config[key] = item
name = object_registration.get_registered_name(instance.__class__)
return serialize_keras_class_and_config(
name, serialization_config, instance
)
if hasattr(instance, "__name__"):
return object_registration.get_registered_name(instance)
raise ValueError(
f"Cannot serialize {instance} because it doesn't implement "
"`get_config()`."
)
def class_and_config_for_serialized_keras_object(
config,
module_objects=None,
custom_objects=None,
printable_module_name="object",
):
"""Returns the class name and config for a serialized keras object."""
if (
not isinstance(config, dict)
or "class_name" not in config
or "config" not in config
):
raise ValueError(
f"Improper config format for {config}. "
"Expecting python dict contains `class_name` and `config` as keys"
)
class_name = config["class_name"]
cls = object_registration.get_registered_object(
class_name, custom_objects, module_objects
)
if cls is None:
raise ValueError(
f"Unknown {printable_module_name}: '{class_name}'. "
"Please ensure you are using a `keras.utils.custom_object_scope` "
"and that this object is included in the scope. See "
"https://www.tensorflow.org/guide/keras/save_and_serialize"
"#registering_the_custom_object for details."
)
cls_config = config["config"]
# Check if `cls_config` is a list. If it is a list, return the class and the
# associated class configs for recursively deserialization. This case will
# happen on the old version of sequential model (e.g. `keras_version` ==
# "2.0.6"), which is serialized in a different structure, for example
# "{'class_name': 'Sequential',
# 'config': [{'class_name': 'Embedding', 'config': ...}, {}, ...]}".
if isinstance(cls_config, list):
return (cls, cls_config)
deserialized_objects = {}
for key, item in cls_config.items():
if key == "name":
# Assume that the value of 'name' is a string that should not be
# deserialized as a function. This avoids the corner case where
# cls_config['name'] has an identical name to a custom function and
# gets converted into that function.
deserialized_objects[key] = item
elif isinstance(item, dict) and "__passive_serialization__" in item:
deserialized_objects[key] = deserialize_keras_object(
item,
module_objects=module_objects,
custom_objects=custom_objects,
printable_module_name="config_item",
)
# TODO(momernick): Should this also have 'module_objects'?
elif isinstance(item, str) and inspect.isfunction(
object_registration.get_registered_object(item, custom_objects)
):
# Handle custom functions here. When saving functions, we only save
# the function's name as a string. If we find a matching string in
# the custom objects during deserialization, we convert the string
# back to the original function.
# Note that a potential issue is that a string field could have a
# naming conflict with a custom function name, but this should be a
# rare case. This issue does not occur if a string field has a
# naming conflict with a custom object, since the config of an
# object will always be a dict.
deserialized_objects[key] = (
object_registration.get_registered_object(item, custom_objects)
)
for key, item in deserialized_objects.items():
cls_config[key] = deserialized_objects[key]
return (cls, cls_config)
@keras_export(
[
"keras.legacy.saving.deserialize_keras_object",
"keras.utils.legacy.deserialize_keras_object",
]
)
def deserialize_keras_object(
identifier,
module_objects=None,
custom_objects=None,
printable_module_name="object",
):
"""Turns the serialized form of a Keras object back into an actual object.
This function is for mid-level library implementers rather than end users.
Importantly, this utility requires you to provide the dict of
`module_objects` to use for looking up the object config; this is not
populated by default. If you need a deserialization utility that has
preexisting knowledge of built-in Keras objects, use e.g.
`keras.layers.deserialize(config)`, `keras.metrics.deserialize(config)`,
etc.
Calling `deserialize_keras_object` while underneath the
`SharedObjectLoadingScope` context manager will cause any already-seen
shared objects to be returned as-is rather than creating a new object.
Args:
identifier: the serialized form of the object.
module_objects: A dictionary of built-in objects to look the name up in.
Generally, `module_objects` is provided by midlevel library
implementers.
custom_objects: A dictionary of custom objects to look the name up in.
Generally, `custom_objects` is provided by the end user.
printable_module_name: A human-readable string representing the type of
the object. Printed in case of exception.
Returns:
The deserialized object.
Example:
A mid-level library implementer might want to implement a utility for
retrieving an object from its config, as such:
```python
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
identifier,
module_objects=globals(),
custom_objects=custom_objects,
name="MyObjectType",
)
```
This is how e.g. `keras.layers.deserialize()` is implemented.
"""
if identifier is None:
return None
if isinstance(identifier, dict):
# In this case we are dealing with a Keras config dictionary.
config = identifier
(cls, cls_config) = class_and_config_for_serialized_keras_object(
config, module_objects, custom_objects, printable_module_name
)
# If this object has already been loaded (i.e. it's shared between
# multiple objects), return the already-loaded object.
shared_object_id = config.get(SHARED_OBJECT_KEY)
shared_object = _shared_object_loading_scope().get(shared_object_id)
if shared_object is not None:
return shared_object
if hasattr(cls, "from_config"):
arg_spec = inspect.getfullargspec(cls.from_config)
custom_objects = custom_objects or {}
if "custom_objects" in arg_spec.args:
deserialized_obj = cls.from_config(
cls_config,
custom_objects={
**object_registration.GLOBAL_CUSTOM_OBJECTS,
**custom_objects,
},
)
else:
with object_registration.CustomObjectScope(custom_objects):
deserialized_obj = cls.from_config(cls_config)
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
custom_objects = custom_objects or {}
with object_registration.CustomObjectScope(custom_objects):
deserialized_obj = cls(**cls_config)
# Add object to shared objects, in case we find it referenced again.
_shared_object_loading_scope().set(shared_object_id, deserialized_obj)
return deserialized_obj
elif isinstance(identifier, str):
object_name = identifier
if custom_objects and object_name in custom_objects:
obj = custom_objects.get(object_name)
elif (
object_name
in object_registration._THREAD_LOCAL_CUSTOM_OBJECTS.__dict__
):
obj = object_registration._THREAD_LOCAL_CUSTOM_OBJECTS.__dict__[
object_name
]
elif object_name in object_registration._GLOBAL_CUSTOM_OBJECTS:
obj = object_registration._GLOBAL_CUSTOM_OBJECTS[object_name]
else:
obj = module_objects.get(object_name)
if obj is None:
raise ValueError(
f"Unknown {printable_module_name}: '{object_name}'. "
"Please ensure you are using a "
"`keras.utils.custom_object_scope` "
"and that this object is included in the scope. See "
"https://www.tensorflow.org/guide/keras/save_and_serialize"
"#registering_the_custom_object for details."
)
# Classes passed by name are instantiated with no args, functions are
# returned as-is.
if inspect.isclass(obj):
return obj()
return obj
elif inspect.isfunction(identifier):
# If a function has already been deserialized, return as is.
return identifier
else:
raise ValueError(
"Could not interpret serialized "
f"{printable_module_name}: {identifier}"
)
def validate_config(config):
"""Determines whether config appears to be a valid layer config."""
return (
isinstance(config, dict) and _LAYER_UNDEFINED_CONFIG_KEY not in config
)
def is_default(method):
"""Check if a method is decorated with the `default` wrapper."""
return getattr(method, "_is_default", False)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/legacy/saving/json_utils_test.py | keras/src/legacy/saving/json_utils_test.py | import enum
import pytest
from keras.src import backend
from keras.src import testing
from keras.src.legacy.saving import json_utils
if backend.backend() == "tensorflow":
import tensorflow as tf
class JsonUtilsTestAllBackends(testing.TestCase):
def test_encode_decode_tuple(self):
metadata = {"key1": (3, 5), "key2": [(1, (3, 4)), (1,)]}
string = json_utils.Encoder().encode(metadata)
loaded = json_utils.decode(string)
self.assertEqual(set(loaded.keys()), {"key1", "key2"})
self.assertAllEqual(loaded["key1"], (3, 5))
self.assertAllEqual(loaded["key2"], [(1, (3, 4)), (1,)])
def test_encode_decode_enum(self):
class Enum(enum.Enum):
CLASS_A = "a"
CLASS_B = "b"
config = {"key": Enum.CLASS_A, "key2": Enum.CLASS_B}
string = json_utils.Encoder().encode(config)
loaded = json_utils.decode(string)
self.assertAllEqual({"key": "a", "key2": "b"}, loaded)
def test_encode_decode_bytes(self):
b_string = b"abc"
json_string = json_utils.Encoder().encode(b_string)
loaded = json_utils.decode(json_string)
self.assertAllEqual(b_string, loaded)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="These JSON serialization tests are specific to TF components.",
)
class JsonUtilsTestTF(testing.TestCase):
def test_encode_decode_tensor_shape(self):
metadata = {
"key1": tf.TensorShape(None),
"key2": [tf.TensorShape([None]), tf.TensorShape([3, None, 5])],
}
string = json_utils.Encoder().encode(metadata)
loaded = json_utils.decode(string)
self.assertEqual(set(loaded.keys()), {"key1", "key2"})
self.assertEqual(loaded["key1"].rank, None)
self.assertAllEqual(loaded["key2"][0].as_list(), [None])
self.assertAllEqual(loaded["key2"][1].as_list(), [3, None, 5])
def test_encode_decode_type_spec(self):
spec = tf.TensorSpec((1, 5), tf.float32)
string = json_utils.Encoder().encode(spec)
loaded = json_utils.decode(string)
self.assertEqual(spec, loaded)
invalid_type_spec = {
"class_name": "TypeSpec",
"type_spec": "Invalid Type",
"serialized": None,
}
string = json_utils.Encoder().encode(invalid_type_spec)
with self.assertRaisesRegex(
ValueError, "No TypeSpec has been registered"
):
loaded = json_utils.decode(string)
def test_encode_decode_ragged_tensor(self):
x = tf.ragged.constant([[1.0, 2.0], [3.0]])
string = json_utils.Encoder().encode(x)
loaded = json_utils.decode(string)
self.assertAllClose(loaded.values, x.values)
def test_encode_decode_extension_type_tensor(self):
class MaskedTensor(tf.experimental.ExtensionType):
__name__ = "MaskedTensor"
values: tf.Tensor
mask: tf.Tensor
x = MaskedTensor(
values=[[1, 2, 3], [4, 5, 6]],
mask=[[True, True, False], [True, False, True]],
)
string = json_utils.Encoder().encode(x)
loaded = json_utils.decode(string)
self.assertAllClose(loaded.values, x.values)
self.assertAllClose(loaded.mask, x.mask)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/legacy/saving/legacy_h5_format.py | keras/src/legacy/saving/legacy_h5_format.py | import json
import os
import warnings
import numpy as np
from absl import logging
from keras.src import backend
from keras.src.backend.common import global_state
from keras.src.legacy.saving import json_utils
from keras.src.legacy.saving import saving_options
from keras.src.legacy.saving import saving_utils
from keras.src.saving import object_registration
from keras.src.saving import serialization_lib
from keras.src.utils import io_utils
try:
import h5py
except ImportError:
h5py = None
HDF5_OBJECT_HEADER_LIMIT = 64512
def save_model_to_hdf5(model, filepath, overwrite=True, include_optimizer=True):
if h5py is None:
raise ImportError(
"`save_model()` using h5 format requires h5py. Could not "
"import h5py."
)
if not isinstance(filepath, h5py.File):
# If file exists and should not be overwritten.
if not overwrite and os.path.isfile(filepath):
proceed = io_utils.ask_to_proceed_with_overwrite(filepath)
if not proceed:
return
dirpath = os.path.dirname(filepath)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath, exist_ok=True)
f = h5py.File(filepath, mode="w")
opened_new_file = True
else:
f = filepath
opened_new_file = False
try:
with saving_options.keras_option_scope(use_legacy_config=True):
model_metadata = saving_utils.model_metadata(
model, include_optimizer
)
for k, v in model_metadata.items():
if isinstance(v, (dict, list, tuple)):
f.attrs[k] = json.dumps(
v, default=json_utils.get_json_type
).encode("utf8")
else:
f.attrs[k] = v
model_weights_group = f.create_group("model_weights")
save_weights_to_hdf5_group(model_weights_group, model)
# TODO(b/128683857): Add integration tests between tf.keras and
# external Keras, to avoid breaking TF.js users.
if include_optimizer and hasattr(model, "optimizer"):
save_optimizer_weights_to_hdf5_group(f, model.optimizer)
f.flush()
finally:
if opened_new_file:
f.close()
def load_model_from_hdf5(
filepath, custom_objects=None, compile=True, safe_mode=True
):
"""Loads a model saved via `save_model_to_hdf5`.
Args:
filepath: One of the following:
- String, path to the saved model
- `h5py.File` object from which to load the model
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
compile: Boolean, whether to compile the model
after loading.
Returns:
A Keras model instance. If an optimizer was found
as part of the saved model, the model is already
compiled. Otherwise, the model is uncompiled and
a warning will be displayed. When `compile` is set
to `False`, the compilation is omitted without any
warning.
Raises:
ImportError: if h5py is not available.
ValueError: In case of an invalid savefile.
"""
if h5py is None:
raise ImportError(
"`load_model()` using h5 format requires h5py. Could not "
"import h5py."
)
if not custom_objects:
custom_objects = {}
gco = object_registration.GLOBAL_CUSTOM_OBJECTS
tlco = global_state.get_global_attribute("custom_objects_scope_dict", {})
custom_objects = {**custom_objects, **gco, **tlco}
opened_new_file = not isinstance(filepath, h5py.File)
if opened_new_file:
f = h5py.File(filepath, mode="r")
else:
f = filepath
model = None
try:
# instantiate model
model_config = f.attrs.get("model_config")
if model_config is None:
raise ValueError(
f"No model config found in the file at {filepath}."
)
if hasattr(model_config, "decode"):
model_config = model_config.decode("utf-8")
model_config = json_utils.decode(model_config)
legacy_scope = saving_options.keras_option_scope(use_legacy_config=True)
safe_mode_scope = serialization_lib.SafeModeScope(safe_mode)
with legacy_scope, safe_mode_scope:
model = saving_utils.model_from_config(
model_config, custom_objects=custom_objects
)
# set weights
load_weights_from_hdf5_group(f["model_weights"], model)
if compile:
# instantiate optimizer
training_config = f.attrs.get("training_config")
if hasattr(training_config, "decode"):
training_config = training_config.decode("utf-8")
if training_config is None:
logging.warning(
"No training configuration found in the save file, so "
"the model was *not* compiled. Compile it manually."
)
return model
training_config = json_utils.decode(training_config)
# Compile model.
model.compile(
**saving_utils.compile_args_from_training_config(
training_config, custom_objects
)
)
saving_utils.try_build_compiled_arguments(model)
# Set optimizer weights.
if "optimizer_weights" in f:
try:
from keras.src import optimizers
if isinstance(model.optimizer, optimizers.Optimizer):
model.optimizer.build(model._trainable_variables)
else:
model.optimizer._create_all_weights(
model._trainable_variables
)
except (NotImplementedError, AttributeError):
logging.warning(
"Error when creating the weights of optimizer {}, "
"making it impossible to restore the saved optimizer "
"state. As a result, your model is starting with "
"a freshly initialized optimizer."
)
optimizer_weight_values = (
load_optimizer_weights_from_hdf5_group(f)
)
try:
model.optimizer.set_weights(optimizer_weight_values)
except ValueError:
logging.warning(
"Error in loading the saved optimizer "
"state. As a result, your model is "
"starting with a freshly initialized "
"optimizer."
)
finally:
if opened_new_file:
f.close()
return model
def save_weights_to_hdf5_group(f, model):
"""Saves the weights of a list of layers to a HDF5 group.
Args:
f: HDF5 group.
model: Model instance.
"""
from keras.src import __version__ as keras_version
save_attributes_to_hdf5_group(
f, "layer_names", [layer.name.encode("utf8") for layer in model.layers]
)
f.attrs["backend"] = backend.backend().encode("utf8")
f.attrs["keras_version"] = str(keras_version).encode("utf8")
# Sort model layers by layer name to ensure that group names are strictly
# growing to avoid prefix issues.
for layer in sorted(model.layers, key=lambda x: x.name):
g = f.create_group(layer.name)
weights = _legacy_weights(layer)
save_subset_weights_to_hdf5_group(g, weights)
weights = list(
v
for v in model._trainable_variables + model._non_trainable_variables
if v in model.weights
)
g = f.create_group("top_level_model_weights")
save_subset_weights_to_hdf5_group(g, weights)
def save_subset_weights_to_hdf5_group(f, weights):
"""Save top-level weights of a model to a HDF5 group.
Args:
f: HDF5 group.
weights: List of weight variables.
"""
weight_values = [backend.convert_to_numpy(w) for w in weights]
weight_names = [str(w.path).encode("utf8") for w in weights]
save_attributes_to_hdf5_group(f, "weight_names", weight_names)
for name, val in zip(weight_names, weight_values):
param_dset = f.create_dataset(name, val.shape, dtype=val.dtype)
if not val.shape:
# scalar
param_dset[()] = val
else:
param_dset[:] = val
def save_optimizer_weights_to_hdf5_group(hdf5_group, optimizer):
"""Saves optimizer weights of a optimizer to a HDF5 group.
Args:
hdf5_group: HDF5 group.
optimizer: optimizer instance.
"""
from keras.src import optimizers
if isinstance(optimizer, optimizers.Optimizer):
symbolic_weights = optimizer.variables
else:
symbolic_weights = getattr(optimizer, "weights")
if symbolic_weights:
weights_group = hdf5_group.create_group("optimizer_weights")
weight_names = [str(w.path).encode("utf8") for w in symbolic_weights]
save_attributes_to_hdf5_group(
weights_group, "weight_names", weight_names
)
weight_values = [backend.convert_to_numpy(w) for w in symbolic_weights]
for name, val in zip(weight_names, weight_values):
param_dset = weights_group.create_dataset(
name, val.shape, dtype=val.dtype
)
if not val.shape:
# scalar
param_dset[()] = val
else:
param_dset[:] = val
def save_attributes_to_hdf5_group(group, name, data):
"""Saves attributes (data) of the specified name into the HDF5 group.
This method deals with an inherent problem of HDF5 file which is not
able to store data larger than HDF5_OBJECT_HEADER_LIMIT bytes.
Args:
group: A pointer to a HDF5 group.
name: A name of the attributes to save.
data: Attributes data to store.
Raises:
RuntimeError: If any single attribute is too large to be saved.
"""
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
bad_attributes = [x for x in data if len(x) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
f"they are larger than {HDF5_OBJECT_HEADER_LIMIT} "
f"bytes: {bad_attributes}"
)
data_npy = np.asarray(data)
num_chunks = 1
chunked_data = np.array_split(data_npy, num_chunks)
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data):
num_chunks += 1
chunked_data = np.array_split(data_npy, num_chunks)
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(chunked_data):
group.attrs["%s%d" % (name, chunk_id)] = chunk_data
else:
group.attrs[name] = data
def load_weights_from_hdf5_group(f, model, skip_mismatch=False):
"""Implements topological (order-based) weight loading.
Args:
f: A pointer to a HDF5 group.
model: Model instance.
skip_mismatch: Boolean, whether to skip loading of weights
where there is a mismatch in the shape of the weights,
Raises:
ValueError: in case of mismatch between provided layers
and weights file.
"""
if "keras_version" in f.attrs:
original_keras_version = f.attrs["keras_version"]
if hasattr(original_keras_version, "decode"):
original_keras_version = original_keras_version.decode("utf8")
else:
original_keras_version = "1"
if "backend" in f.attrs:
original_backend = f.attrs["backend"]
if hasattr(original_backend, "decode"):
original_backend = original_backend.decode("utf8")
else:
original_backend = None
filtered_layers = []
for layer in model.layers:
weights = _legacy_weights(layer)
if weights:
filtered_layers.append(layer)
layer_names = load_attributes_from_hdf5_group(f, "layer_names")
filtered_layer_names = []
for name in layer_names:
g = f[name]
weight_names = load_attributes_from_hdf5_group(g, "weight_names")
if weight_names:
filtered_layer_names.append(name)
layer_names = filtered_layer_names
if len(layer_names) != len(filtered_layers):
raise ValueError(
"Layer count mismatch when loading weights from file. "
f"Model expected {len(filtered_layers)} layers, found "
f"{len(layer_names)} saved layers."
)
for k, name in enumerate(layer_names):
g = f[name]
layer = filtered_layers[k]
symbolic_weights = _legacy_weights(layer)
weight_values = load_subset_weights_from_hdf5_group(g)
if len(weight_values) != len(symbolic_weights):
raise ValueError(
f"Weight count mismatch for layer #{k} (named {layer.name} in "
f"the current model, {name} in the save file). "
f"Layer expects {len(symbolic_weights)} weight(s). Received "
f"{len(weight_values)} saved weight(s)"
)
_set_weights(
layer,
symbolic_weights,
weight_values,
skip_mismatch=skip_mismatch,
name=f"layer #{k} (named {layer.name})",
)
if "top_level_model_weights" in f:
symbolic_weights = list(
# model.weights
v
for v in model._trainable_variables + model._non_trainable_variables
if v in model.weights
)
weight_values = load_subset_weights_from_hdf5_group(
f["top_level_model_weights"]
)
if len(weight_values) != len(symbolic_weights):
raise ValueError(
"Weight count mismatch for top-level weights when loading "
"weights from file. "
f"Model expects {len(symbolic_weights)} top-level weight(s). "
f"Received {len(weight_values)} saved top-level weight(s)"
)
_set_weights(
model,
symbolic_weights,
weight_values,
skip_mismatch=skip_mismatch,
name="top-level model",
)
def _set_weights(
instance, symbolic_weights, weight_values, name, skip_mismatch=False
):
"""Safely set weights into a model or a layer.
Args:
instance: Model or layer instance,
symbolic_weights: symbolic tensors representing
the weights of the variables to load,
weight_values: values of the weights to load,
skip_mismatch: Boolean, whether to skip loading of weights
where there is a mismatch in the shape of the weights,
name: name used to identify the group.
Raises:
ValueError: in case of mismatch between provided
model/layer and weights.
"""
for i, weight_value in enumerate(weight_values):
expected_shape = symbolic_weights[i].shape
received_shape = weight_value.shape
if expected_shape != received_shape:
if skip_mismatch:
warnings.warn(
f"Skipping loading weights for {name}"
f"due to mismatch in shape for "
f"weight {symbolic_weights[i].path}. "
f"Weight expects shape {expected_shape}. "
"Received saved weight "
f"with shape {received_shape}",
stacklevel=2,
)
continue
raise ValueError(
f"Shape mismatch in {name}"
f"for weight {symbolic_weights[i].path}. "
f"Weight expects shape {expected_shape}. "
"Received saved weight "
f"with shape {received_shape}"
)
symbolic_weights[i].assign(weight_value)
if hasattr(instance, "finalize_state") and symbolic_weights:
instance.finalize_state()
def load_weights_from_hdf5_group_by_name(f, model, skip_mismatch=False):
"""Implements name-based weight loading (instead of topological loading).
Layers that have no matching name are skipped.
Args:
f: A pointer to a HDF5 group.
model: Model instance.
skip_mismatch: Boolean, whether to skip loading of layers
where there is a mismatch in the number of weights,
or a mismatch in the shape of the weights.
Raises:
ValueError: in case of mismatch between provided layers
and weights file and skip_match=False.
"""
if "keras_version" in f.attrs:
original_keras_version = f.attrs["keras_version"]
if hasattr(original_keras_version, "decode"):
original_keras_version = original_keras_version.decode("utf8")
else:
original_keras_version = "1"
if "backend" in f.attrs:
original_backend = f.attrs["backend"]
if hasattr(original_backend, "decode"):
original_backend = original_backend.decode("utf8")
else:
original_backend = None
# New file format.
layer_names = load_attributes_from_hdf5_group(f, "layer_names")
# Reverse index of layer name to list of layers with name.
index = {}
for layer in model.layers:
if layer.name:
index.setdefault(layer.name, []).append(layer)
for k, name in enumerate(layer_names):
g = f[name]
weight_values = load_subset_weights_from_hdf5_group(g)
for layer in index.get(name, []):
symbolic_weights = _legacy_weights(layer)
if len(weight_values) != len(symbolic_weights):
if skip_mismatch:
warnings.warn(
f"Skipping loading of weights for layer #{k} (named "
f"{layer.name}) due to mismatch in number of weights. "
f"Layer expects {len(symbolic_weights)} weight(s). "
f"Received {len(weight_values)} saved weight(s)",
stacklevel=2,
)
continue
raise ValueError(
f"Weight count mismatch for layer #{k} "
f"(named {layer.name}). "
f"Layer expects {len(symbolic_weights)} weight(s). "
f"Received {len(weight_values)} saved weight(s)"
)
# Set values.
_set_weights(
layer,
symbolic_weights,
weight_values,
skip_mismatch=skip_mismatch,
name=f"layer #{k} (named {layer.name})",
)
if "top_level_model_weights" in f:
symbolic_weights = (
model._trainable_variables + model._non_trainable_variables
)
weight_values = load_subset_weights_from_hdf5_group(
f["top_level_model_weights"]
)
if len(weight_values) != len(symbolic_weights):
if skip_mismatch:
warnings.warn(
"Skipping loading top-level weights for model due to "
"mismatch in number of weights. "
f"Model expects {len(symbolic_weights)} "
"top-level weight(s). "
f"Received {len(weight_values)} saved top-level weight(s)",
stacklevel=2,
)
else:
raise ValueError(
"Weight count mismatch for top-level weights of model. "
f"Model expects {len(symbolic_weights)} "
"top-level weight(s). "
f"Received {len(weight_values)} saved top-level weight(s)"
)
else:
_set_weights(
model,
symbolic_weights,
weight_values,
skip_mismatch=skip_mismatch,
name="top-level model",
)
def load_subset_weights_from_hdf5_group(f):
"""Load layer weights of a model from hdf5.
Args:
f: A pointer to a HDF5 group.
Returns:
List of NumPy arrays of the weight values.
Raises:
ValueError: in case of mismatch between provided model
and weights file.
"""
weight_names = load_attributes_from_hdf5_group(f, "weight_names")
return [np.asarray(f[weight_name]) for weight_name in weight_names]
def load_optimizer_weights_from_hdf5_group(hdf5_group):
"""Load optimizer weights from a HDF5 group.
Args:
hdf5_group: A pointer to a HDF5 group.
Returns:
data: List of optimizer weight names.
"""
weights_group = hdf5_group["optimizer_weights"]
optimizer_weight_names = load_attributes_from_hdf5_group(
weights_group, "weight_names"
)
return [
weights_group[weight_name] for weight_name in optimizer_weight_names
]
def load_attributes_from_hdf5_group(group, name):
"""Loads attributes of the specified name from the HDF5 group.
This method deals with an inherent problem
of HDF5 file which is not able to store
data larger than HDF5_OBJECT_HEADER_LIMIT bytes.
Args:
group: A pointer to a HDF5 group.
name: A name of the attributes to load.
Returns:
data: Attributes data.
"""
if name in group.attrs:
data = [
n.decode("utf8") if hasattr(n, "decode") else n
for n in group.attrs[name]
]
else:
data = []
chunk_id = 0
while f"{name}{chunk_id}" in group.attrs:
data.extend(
[
n.decode("utf8") if hasattr(n, "decode") else n
for n in group.attrs[f"{name}{chunk_id}"]
]
)
chunk_id += 1
return data
def _legacy_weights(layer):
"""Legacy weight order converter.
For legacy reason, the layer.weights was in the order of
[self.trainable_weights + self.non_trainable_weights], and this order was
used for preserving the weights in h5 format. The new order of layer.weights
are the same as layer.get_weights() which is more intuitive for user. To
keep supporting the existing saved h5 file, this method should be used to
save/load weights.
Args:
layer: a `Model` or `Layer` instance.
Returns:
A list of variables with the legacy weight order.
"""
return layer.trainable_weights + layer.non_trainable_weights
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/legacy/saving/saving_options.py | keras/src/legacy/saving/saving_options.py | import contextlib
from keras.src.backend.common import global_state
@contextlib.contextmanager
def keras_option_scope(use_legacy_config=True):
use_legacy_config_prev_value = global_state.get_global_attribute(
"use_legacy_config", None
)
global_state.set_global_attribute("use_legacy_config", use_legacy_config)
try:
yield
finally:
global_state.set_global_attribute(
"use_legacy_config", use_legacy_config_prev_value
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/legacy/saving/json_utils.py | keras/src/legacy/saving/json_utils.py | """JSON utilities for legacy saving formats (h5 and SavedModel)"""
import collections
import enum
import functools
import json
import numpy as np
from keras.src.legacy.saving import serialization
from keras.src.saving import serialization_lib
from keras.src.utils.module_utils import tensorflow as tf
_EXTENSION_TYPE_SPEC = "_EXTENSION_TYPE_SPEC"
class Encoder(json.JSONEncoder):
"""JSON encoder and decoder that handles TensorShapes and tuples."""
def default(self, obj):
"""Encodes objects for types that aren't handled by the default
encoder."""
if tf.available and isinstance(obj, tf.TensorShape):
items = obj.as_list() if obj.rank is not None else None
return {"class_name": "TensorShape", "items": items}
return get_json_type(obj)
def encode(self, obj):
return super().encode(_encode_tuple(obj))
def _encode_tuple(x):
if isinstance(x, tuple):
return {
"class_name": "__tuple__",
"items": tuple(_encode_tuple(i) for i in x),
}
elif isinstance(x, list):
return [_encode_tuple(i) for i in x]
elif isinstance(x, dict):
return {key: _encode_tuple(value) for key, value in x.items()}
else:
return x
def decode(json_string):
return json.loads(json_string, object_hook=_decode_helper)
def decode_and_deserialize(
json_string, module_objects=None, custom_objects=None
):
"""Decodes the JSON and deserializes any Keras objects found in the dict."""
return json.loads(
json_string,
object_hook=functools.partial(
_decode_helper,
deserialize=True,
module_objects=module_objects,
custom_objects=custom_objects,
),
)
def _decode_helper(
obj, deserialize=False, module_objects=None, custom_objects=None
):
"""A decoding helper that is TF-object aware.
Args:
obj: A decoded dictionary that may represent an object.
deserialize: Boolean. When True, deserializes any Keras
objects found in `obj`. Defaults to `False`.
module_objects: A dictionary of built-in objects to look the name up in.
Generally, `module_objects` is provided by midlevel library
implementers.
custom_objects: A dictionary of custom objects to look the name up in.
Generally, `custom_objects` is provided by the end user.
Returns:
The decoded object.
"""
if isinstance(obj, dict) and "class_name" in obj:
if tf.available:
if obj["class_name"] == "TensorShape":
return tf.TensorShape(obj["items"])
elif obj["class_name"] == "TypeSpec":
from tensorflow.python.framework import type_spec_registry
return type_spec_registry.lookup(obj["type_spec"])._deserialize(
_decode_helper(obj["serialized"])
)
elif obj["class_name"] == "CompositeTensor":
spec = obj["spec"]
tensors = []
for dtype, tensor in obj["tensors"]:
tensors.append(
tf.constant(tensor, dtype=tf.dtypes.as_dtype(dtype))
)
return tf.nest.pack_sequence_as(
_decode_helper(spec), tensors, expand_composites=True
)
if obj["class_name"] == "__tuple__":
return tuple(_decode_helper(i) for i in obj["items"])
elif obj["class_name"] == "__ellipsis__":
return Ellipsis
elif deserialize and "__passive_serialization__" in obj:
# __passive_serialization__ is added by the JSON encoder when
# encoding an object that has a `get_config()` method.
try:
if (
"module" not in obj
): # TODO(nkovela): Add TF SavedModel scope
return serialization.deserialize_keras_object(
obj,
module_objects=module_objects,
custom_objects=custom_objects,
)
else:
return serialization_lib.deserialize_keras_object(
obj,
module_objects=module_objects,
custom_objects=custom_objects,
)
except ValueError:
pass
elif obj["class_name"] == "__bytes__":
return obj["value"].encode("utf-8")
return obj
def get_json_type(obj):
"""Serializes any object to a JSON-serializable structure.
Args:
obj: the object to serialize
Returns:
JSON-serializable structure representing `obj`.
Raises:
TypeError: if `obj` cannot be serialized.
"""
# if obj is a serializable Keras class instance
# e.g. optimizer, layer
if hasattr(obj, "get_config"):
# TODO(nkovela): Replace with legacy serialization
serialized = serialization.serialize_keras_object(obj)
serialized["__passive_serialization__"] = True
return serialized
# if obj is any numpy type
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
# misc functions (e.g. loss function)
if callable(obj):
return obj.__name__
# if obj is a python 'type'
if type(obj).__name__ == type.__name__:
return obj.__name__
if tf.available and isinstance(obj, tf.compat.v1.Dimension):
return obj.value
if tf.available and isinstance(obj, tf.TensorShape):
return obj.as_list()
if tf.available and isinstance(obj, tf.DType):
return obj.name
if isinstance(obj, collections.abc.Mapping):
return dict(obj)
if obj is Ellipsis:
return {"class_name": "__ellipsis__"}
# if isinstance(obj, wrapt.ObjectProxy):
# return obj.__wrapped__
if tf.available and isinstance(obj, tf.TypeSpec):
from tensorflow.python.framework import type_spec_registry
try:
type_spec_name = type_spec_registry.get_name(type(obj))
return {
"class_name": "TypeSpec",
"type_spec": type_spec_name,
"serialized": obj._serialize(),
}
except ValueError:
raise ValueError(
f"Unable to serialize {obj} to JSON, because the TypeSpec "
f"class {type(obj)} has not been registered."
)
if tf.available and isinstance(obj, tf.__internal__.CompositeTensor):
spec = tf.type_spec_from_value(obj)
tensors = []
for tensor in tf.nest.flatten(obj, expand_composites=True):
tensors.append((tensor.dtype.name, tensor.numpy().tolist()))
return {
"class_name": "CompositeTensor",
"spec": get_json_type(spec),
"tensors": tensors,
}
if isinstance(obj, enum.Enum):
return obj.value
if isinstance(obj, bytes):
return {"class_name": "__bytes__", "value": obj.decode("utf-8")}
raise TypeError(
f"Unable to serialize {obj} to JSON. Unrecognized type {type(obj)}."
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/legacy/saving/__init__.py | keras/src/legacy/saving/__init__.py | python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false | |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/legacy/saving/saving_utils.py | keras/src/legacy/saving/saving_utils.py | import threading
from absl import logging
from keras.src import backend
from keras.src import losses
from keras.src import metrics as metrics_module
from keras.src import tree
from keras.src.legacy.saving import serialization
from keras.src.saving import object_registration
MODULE_OBJECTS = threading.local()
# Legacy lambda arguments not found in Keras 3
LAMBDA_DEP_ARGS = (
"module",
"function_type",
"output_shape_type",
"output_shape_module",
)
def model_from_config(config, custom_objects=None):
"""Instantiates a Keras model from its config.
Args:
config: Configuration dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
Raises:
TypeError: if `config` is not a dictionary.
"""
if isinstance(config, list):
raise TypeError(
"`model_from_config` expects a dictionary, not a list. "
f"Received: config={config}. Did you meant to use "
"`Sequential.from_config(config)`?"
)
global MODULE_OBJECTS
if not hasattr(MODULE_OBJECTS, "ALL_OBJECTS"):
from keras.src import layers
from keras.src import models
MODULE_OBJECTS.ALL_OBJECTS = layers.__dict__
MODULE_OBJECTS.ALL_OBJECTS["InputLayer"] = layers.InputLayer
MODULE_OBJECTS.ALL_OBJECTS["Functional"] = models.Functional
MODULE_OBJECTS.ALL_OBJECTS["Model"] = models.Model
MODULE_OBJECTS.ALL_OBJECTS["Sequential"] = models.Sequential
batch_input_shape = config["config"].pop("batch_input_shape", None)
if batch_input_shape is not None:
if config["class_name"] == "InputLayer":
config["config"]["batch_shape"] = batch_input_shape
else:
config["config"]["input_shape"] = batch_input_shape
axis = config["config"].pop("axis", None)
if axis is not None:
if isinstance(axis, list) and len(axis) == 1:
config["config"]["axis"] = int(axis[0])
elif isinstance(axis, (int, float)):
config["config"]["axis"] = int(axis)
# Handle backwards compatibility for Keras lambdas
if config["class_name"] == "Lambda":
for dep_arg in LAMBDA_DEP_ARGS:
_ = config["config"].pop(dep_arg, None)
function_config = config["config"]["function"]
if isinstance(function_config, list):
function_dict = {"class_name": "__lambda__", "config": {}}
function_dict["config"]["code"] = function_config[0]
function_dict["config"]["defaults"] = function_config[1]
function_dict["config"]["closure"] = function_config[2]
config["config"]["function"] = function_dict
return serialization.deserialize_keras_object(
config,
module_objects=MODULE_OBJECTS.ALL_OBJECTS,
custom_objects=custom_objects,
printable_module_name="layer",
)
def model_metadata(model, include_optimizer=True, require_config=True):
"""Returns a dictionary containing the model metadata."""
from keras.src import __version__ as keras_version
model_config = {"class_name": model.__class__.__name__}
try:
model_config["config"] = model.get_config()
except NotImplementedError as e:
if require_config:
raise e
metadata = dict(
keras_version=str(keras_version),
backend=backend.backend(),
model_config=model_config,
)
if getattr(model, "optimizer", False) and include_optimizer:
if model.compiled:
training_config = model._compile_config.config
training_config.pop("optimizer", None) # Handled separately.
metadata["training_config"] = _serialize_nested_config(
training_config
)
optimizer_config = {
"class_name": object_registration.get_registered_name(
model.optimizer.__class__
),
"config": model.optimizer.get_config(),
}
metadata["training_config"]["optimizer_config"] = optimizer_config
return metadata
def compile_args_from_training_config(training_config, custom_objects=None):
"""Return model.compile arguments from training config."""
if custom_objects is None:
custom_objects = {}
with object_registration.CustomObjectScope(custom_objects):
from keras.src import optimizers
optimizer_config = training_config["optimizer_config"]
optimizer = optimizers.deserialize(optimizer_config)
# Ensure backwards compatibility for optimizers in legacy H5 files
optimizer = _resolve_compile_arguments_compat(
optimizer, optimizer_config, optimizers
)
# Recover losses.
loss = None
loss_config = training_config.get("loss", None)
if loss_config is not None:
loss = _deserialize_nested_config(losses.deserialize, loss_config)
# Ensure backwards compatibility for losses in legacy H5 files
loss = _resolve_compile_arguments_compat(loss, loss_config, losses)
# Recover metrics.
metrics = None
metrics_config = training_config.get("metrics", None)
if metrics_config is not None:
metrics = _deserialize_nested_config(
_deserialize_metric, metrics_config
)
# Ensure backwards compatibility for metrics in legacy H5 files
metrics = _resolve_compile_arguments_compat(
metrics, metrics_config, metrics_module
)
# Recover weighted metrics.
weighted_metrics = None
weighted_metrics_config = training_config.get("weighted_metrics", None)
if weighted_metrics_config is not None:
weighted_metrics = _deserialize_nested_config(
_deserialize_metric, weighted_metrics_config
)
loss_weights = training_config["loss_weights"]
return dict(
optimizer=optimizer,
loss=loss,
metrics=metrics,
weighted_metrics=weighted_metrics,
loss_weights=loss_weights,
)
def _serialize_nested_config(config):
"""Serialized a nested structure of Keras objects."""
def _serialize_fn(obj):
if callable(obj):
return serialization.serialize_keras_object(obj)
return obj
return tree.map_structure(_serialize_fn, config)
def _deserialize_nested_config(deserialize_fn, config):
"""Deserializes arbitrary Keras `config` using `deserialize_fn`."""
def _is_single_object(obj):
if isinstance(obj, dict) and "class_name" in obj:
return True # Serialized Keras object.
if isinstance(obj, str):
return True # Serialized function or string.
return False
if config is None:
return None
if _is_single_object(config):
return deserialize_fn(config)
elif isinstance(config, dict):
return {
k: _deserialize_nested_config(deserialize_fn, v)
for k, v in config.items()
}
elif isinstance(config, (tuple, list)):
return [
_deserialize_nested_config(deserialize_fn, obj) for obj in config
]
raise ValueError(
"Saved configuration not understood. Configuration should be a "
f"dictionary, string, tuple or list. Received: config={config}."
)
def _deserialize_metric(metric_config):
"""Deserialize metrics, leaving special strings untouched."""
if metric_config in ["accuracy", "acc", "crossentropy", "ce"]:
# Do not deserialize accuracy and cross-entropy strings as we have
# special case handling for these in compile, based on model output
# shape.
return metric_config
return metrics_module.deserialize(metric_config)
def _resolve_compile_arguments_compat(obj, obj_config, module):
"""Resolves backwards compatibility issues with training config arguments.
This helper function accepts built-in Keras modules such as optimizers,
losses, and metrics to ensure an object being deserialized is compatible
with Keras 3 built-ins. For legacy H5 files saved within Keras 3,
this does nothing.
"""
if isinstance(obj, str) and obj not in module.ALL_OBJECTS_DICT:
obj = module.get(obj_config["config"]["name"])
return obj
def try_build_compiled_arguments(model):
try:
if not model.compiled_loss.built:
model.compiled_loss.build(model.outputs)
if not model.compiled_metrics.built:
model.compiled_metrics.build(model.outputs, model.outputs)
except:
logging.warning(
"Compiled the loaded model, but the compiled metrics have "
"yet to be built. `model.compile_metrics` will be empty "
"until you train or evaluate the model."
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/legacy/preprocessing/image.py | keras/src/legacy/preprocessing/image.py | """Deprecated image preprocessing APIs from Keras 1."""
import collections
import multiprocessing
import os
import threading
import warnings
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset
from keras.src.utils import image_utils
from keras.src.utils import io_utils
from keras.src.utils.module_utils import scipy
@keras_export("keras._legacy.preprocessing.image.Iterator")
class Iterator(PyDataset):
"""Base class for image data iterators.
DEPRECATED.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
Args:
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
**kwargs: Additional keyword arguments for the `PyDataset` base class,
such as `workers`, `use_multiprocessing`, and `max_queue_size`.
"""
white_list_formats = ("png", "jpg", "jpeg", "bmp", "ppm", "tif", "tiff")
def __init__(self, n, batch_size, shuffle, seed, **kwargs):
super().__init__(**kwargs)
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError(
"Asked to retrieve element {idx}, "
"but the Sequence "
"has length {length}".format(idx=idx, length=len(self))
)
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[
self.batch_size * idx : self.batch_size * (idx + 1)
]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
if self.n == 0:
# Avoiding modulo by zero error
current_index = 0
else:
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[
current_index : current_index + self.batch_size
]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self):
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
Args:
index_array: Array of sample indices to include in batch.
Returns:
A batch of transformed samples.
"""
raise NotImplementedError
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension.
Args:
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean, follow symbolic links to subdirectories.
Yields:
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(
os.walk(subpath, followlinks=follow_links), key=lambda x: x[0]
)
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
if fname.lower().endswith(".tiff"):
warnings.warn(
'Using ".tiff" files with multiple bands '
"will cause distortion. Please verify your output."
)
if fname.lower().endswith(white_list_formats):
yield root, fname
def _list_valid_filenames_in_directory(
directory, white_list_formats, split, class_indices, follow_links
):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
Args:
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean, follow symbolic links to subdirectories.
Returns:
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
all_files = list(
_iter_valid_files(directory, white_list_formats, follow_links)
)
num_files = len(all_files)
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = all_files[start:stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links
)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory)
)
filenames.append(relative_path)
return classes, filenames
class BatchFromFilesMixin:
"""Adds methods related to getting batches from filenames.
It includes the logic to transform image files to batches.
"""
def set_processing_attrs(
self,
image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation,
keep_aspect_ratio,
):
"""Sets attributes to use later for processing files into a batch.
Args:
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images
to.
color_mode: One of `"rgb"`, `"rgba"`, `"grayscale"`.
Color mode to read images.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if
the target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic". If
PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
keep_aspect_ratio: Boolean, whether to resize images to a target
size without aspect ratio distortion. The image is cropped in
the center with target aspect ratio before resizing.
"""
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
self.keep_aspect_ratio = keep_aspect_ratio
if color_mode not in {"rgb", "rgba", "grayscale"}:
raise ValueError(
f"Invalid color mode: {color_mode}"
'; expected "rgb", "rgba", or "grayscale".'
)
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == "rgba":
if self.data_format == "channels_last":
self.image_shape = self.target_size + (4,)
else:
self.image_shape = (4,) + self.target_size
elif self.color_mode == "rgb":
if self.data_format == "channels_last":
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == "channels_last":
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == "validation":
split = (0, validation_split)
elif subset == "training":
split = (validation_split, 1)
else:
raise ValueError(
f"Invalid subset name: {subset};"
'expected "training" or "validation"'
)
else:
split = None
self.split = split
self.subset = subset
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
Args:
index_array: Array of sample indices to include in batch.
Returns:
A batch of transformed samples.
"""
batch_x = np.zeros(
(len(index_array),) + self.image_shape, dtype=self.dtype
)
# build batch of image data
# self.filepaths is dynamic, is better to call it once outside the loop
filepaths = self.filepaths
for i, j in enumerate(index_array):
img = image_utils.load_img(
filepaths[j],
color_mode=self.color_mode,
target_size=self.target_size,
interpolation=self.interpolation,
keep_aspect_ratio=self.keep_aspect_ratio,
)
x = image_utils.img_to_array(img, data_format=self.data_format)
# Pillow images should be closed after `load_img`,
# but not PIL images.
if hasattr(img, "close"):
img.close()
if self.image_data_generator:
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(x, params)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = image_utils.array_to_img(
batch_x[i], self.data_format, scale=True
)
fname = "{prefix}_{index}_{hash}.{format}".format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format,
)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == "input":
batch_y = batch_x.copy()
elif self.class_mode in {"binary", "sparse"}:
batch_y = np.empty(len(batch_x), dtype=self.dtype)
for i, n_observation in enumerate(index_array):
batch_y[i] = self.classes[n_observation]
elif self.class_mode == "categorical":
batch_y = np.zeros(
(len(batch_x), len(self.class_indices)), dtype=self.dtype
)
for i, n_observation in enumerate(index_array):
batch_y[i, self.classes[n_observation]] = 1.0
elif self.class_mode == "multi_output":
batch_y = [output[index_array] for output in self.labels]
elif self.class_mode == "raw":
batch_y = self.labels[index_array]
else:
return batch_x
if self.sample_weight is None:
return batch_x, batch_y
else:
return batch_x, batch_y, self.sample_weight[index_array]
@property
def filepaths(self):
"""List of absolute paths to image files."""
raise NotImplementedError(
"`filepaths` property method has not "
"been implemented in {}.".format(type(self).__name__)
)
@property
def labels(self):
"""Class labels of every observation."""
raise NotImplementedError(
"`labels` property method has not been implemented in {}.".format(
type(self).__name__
)
)
@property
def sample_weight(self):
raise NotImplementedError(
"`sample_weight` property method has not "
"been implemented in {}.".format(type(self).__name__)
)
@keras_export("keras._legacy.preprocessing.image.DirectoryIterator")
class DirectoryIterator(BatchFromFilesMixin, Iterator):
"""Iterator capable of reading images from a directory on disk.
DEPRECATED.
"""
allowed_class_modes = {"categorical", "binary", "sparse", "input", None}
def __init__(
self,
directory,
image_data_generator,
target_size=(256, 256),
color_mode="rgb",
classes=None,
class_mode="categorical",
batch_size=32,
shuffle=True,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix="",
save_format="png",
follow_links=False,
subset=None,
interpolation="nearest",
keep_aspect_ratio=False,
dtype=None,
):
if data_format is None:
data_format = backend.image_data_format()
if dtype is None:
dtype = backend.floatx()
super().set_processing_attrs(
image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation,
keep_aspect_ratio,
)
self.directory = directory
self.classes = classes
if class_mode not in self.allowed_class_modes:
raise ValueError(
"Invalid class_mode: {}; expected one of: {}".format(
class_mode, self.allowed_class_modes
)
)
self.class_mode = class_mode
self.dtype = dtype
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(
_list_valid_filenames_in_directory,
(
dirpath,
self.white_list_formats,
self.split,
self.class_indices,
follow_links,
),
)
)
classes_list = []
for res in results:
classes, filenames = res.get()
classes_list.append(classes)
self.filenames += filenames
self.samples = len(self.filenames)
self.classes = np.zeros((self.samples,), dtype="int32")
for classes in classes_list:
self.classes[i : i + len(classes)] = classes
i += len(classes)
io_utils.print_msg(
f"Found {self.samples} images belonging to "
f"{self.num_classes} classes."
)
pool.close()
pool.join()
self._filepaths = [
os.path.join(self.directory, fname) for fname in self.filenames
]
super().__init__(self.samples, batch_size, shuffle, seed)
@property
def filepaths(self):
return self._filepaths
@property
def labels(self):
return self.classes
@property # mixin needs this property to work
def sample_weight(self):
# no sample weights will be returned
return None
@keras_export("keras._legacy.preprocessing.image.NumpyArrayIterator")
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
DEPRECATED.
"""
def __init__(
self,
x,
y,
image_data_generator,
batch_size=32,
shuffle=False,
sample_weight=None,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix="",
save_format="png",
subset=None,
ignore_class_split=False,
dtype=None,
):
if data_format is None:
data_format = backend.image_data_format()
if dtype is None:
dtype = backend.floatx()
self.dtype = dtype
if isinstance(x, tuple) or isinstance(x, list):
if not isinstance(x[1], list):
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
"All of the arrays in `x` "
"should have the same length. "
"Found a pair with: "
f"len(x[0]) = {len(x)}, len(x[?]) = {len(xx)}"
)
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError(
"`x` (images tensor) and `y` (labels) "
"should have the same length. "
f"Found: x.shape = {np.asarray(x).shape}, "
f"y.shape = {np.asarray(y).shape}"
)
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError(
"`x` (images tensor) and `sample_weight` "
"should have the same length. "
f"Found: x.shape = {np.asarray(x).shape}, "
f"sample_weight.shape = {np.asarray(sample_weight).shape}"
)
if subset is not None:
if subset not in {"training", "validation"}:
raise ValueError(
f"Invalid subset name: {subset}"
'; expected "training" or "validation".'
)
split_idx = int(len(x) * image_data_generator._validation_split)
if (
y is not None
and not ignore_class_split
and not np.array_equal(
np.unique(y[:split_idx]), np.unique(y[split_idx:])
)
):
raise ValueError(
"Training and validation subsets "
"have different number of classes after "
"the split. If your numpy arrays are "
"sorted by the label, you might want "
"to shuffle them."
)
if subset == "validation":
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
self.x = np.asarray(x, dtype=self.dtype)
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError(
"Input data in `NumpyArrayIterator` "
"should have rank 4. You passed an array "
f"with shape {self.x.shape}"
)
channels_axis = 3 if data_format == "channels_last" else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn(
f"NumpyArrayIterator is set to use the data format convention"
f' "{data_format}" (channels on axis {channels_axis})'
", i.e. expected either 1, 3, or 4 channels "
f"on axis {channels_axis}. "
f"However, it was passed an array with shape {self.x.shape}"
f" ({self.x.shape[channels_axis]} channels)."
)
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super().__init__(x.shape[0], batch_size, shuffle, seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
tuple([len(index_array)] + list(self.x.shape)[1:]), dtype=self.dtype
)
for i, j in enumerate(index_array):
x = self.x[j]
params = self.image_data_generator.get_random_transform(x.shape)
x = self.image_data_generator.apply_transform(
x.astype(self.dtype), params
)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = image_utils.array_to_img(
batch_x[i], self.data_format, scale=True
)
fname = "{prefix}_{index}_{hash}.{format}".format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format,
)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if not batch_x_miscs else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def validate_filename(filename, white_list_formats):
"""Check if a filename refers to a valid file.
Args:
filename: String, absolute path to a file
white_list_formats: Set, allowed file extensions
Returns:
A boolean value indicating if the filename is valid or not
"""
return filename.lower().endswith(white_list_formats) and os.path.isfile(
filename
)
class DataFrameIterator(BatchFromFilesMixin, Iterator):
"""Iterator capable of reading images from a directory as a dataframe."""
allowed_class_modes = {
"binary",
"categorical",
"input",
"multi_output",
"raw",
"sparse",
None,
}
def __init__(
self,
dataframe,
directory=None,
image_data_generator=None,
x_col="filename",
y_col="class",
weight_col=None,
target_size=(256, 256),
color_mode="rgb",
classes=None,
class_mode="categorical",
batch_size=32,
shuffle=True,
seed=None,
data_format="channels_last",
save_to_dir=None,
save_prefix="",
save_format="png",
subset=None,
interpolation="nearest",
keep_aspect_ratio=False,
dtype="float32",
validate_filenames=True,
):
super().set_processing_attrs(
image_data_generator,
target_size,
color_mode,
data_format,
save_to_dir,
save_prefix,
save_format,
subset,
interpolation,
keep_aspect_ratio,
)
df = dataframe.copy()
self.directory = directory or ""
self.class_mode = class_mode
self.dtype = dtype
# check that inputs match the required class_mode
self._check_params(df, x_col, y_col, weight_col, classes)
if (
validate_filenames
): # check which image files are valid and keep them
df = self._filter_valid_filepaths(df, x_col)
if class_mode not in ["input", "multi_output", "raw", None]:
df, classes = self._filter_classes(df, y_col, classes)
num_classes = len(classes)
# build an index of all the unique classes
self.class_indices = dict(zip(classes, range(len(classes))))
# retrieve only training or validation set
if self.split:
num_files = len(df)
start = int(self.split[0] * num_files)
stop = int(self.split[1] * num_files)
df = df.iloc[start:stop, :]
# get labels for each observation
if class_mode not in ["input", "multi_output", "raw", None]:
self.classes = self.get_classes(df, y_col)
self.filenames = df[x_col].tolist()
self._sample_weight = df[weight_col].values if weight_col else None
if class_mode == "multi_output":
self._targets = [np.array(df[col].tolist()) for col in y_col]
if class_mode == "raw":
self._targets = df[y_col].values
self.samples = len(self.filenames)
validated_string = (
"validated" if validate_filenames else "non-validated"
)
if class_mode in ["input", "multi_output", "raw", None]:
io_utils.print_msg(
f"Found {self.samples} {validated_string} image filenames."
)
else:
io_utils.print_msg(
f"Found {self.samples} {validated_string} image filenames "
f"belonging to {num_classes} classes."
)
self._filepaths = [
os.path.join(self.directory, fname) for fname in self.filenames
]
super().__init__(self.samples, batch_size, shuffle, seed)
def _check_params(self, df, x_col, y_col, weight_col, classes):
# check class mode is one of the currently supported
if self.class_mode not in self.allowed_class_modes:
raise ValueError(
"Invalid class_mode: {}; expected one of: {}".format(
self.class_mode, self.allowed_class_modes
)
)
# check that y_col has several column names if class_mode is
# multi_output
if (self.class_mode == "multi_output") and not isinstance(y_col, list):
raise TypeError(
'If class_mode="{}", y_col must be a list. Received {}.'.format(
self.class_mode, type(y_col).__name__
)
)
# check that filenames/filepaths column values are all strings
if not all(df[x_col].apply(lambda x: isinstance(x, str))):
raise TypeError(
f"All values in column x_col={x_col} must be strings."
)
# check labels are string if class_mode is binary or sparse
if self.class_mode in {"binary", "sparse"}:
if not all(df[y_col].apply(lambda x: isinstance(x, str))):
raise TypeError(
'If class_mode="{}", y_col="{}" column '
"values must be strings.".format(self.class_mode, y_col)
)
# check that if binary there are only 2 different classes
if self.class_mode == "binary":
if classes:
classes = set(classes)
if len(classes) != 2:
raise ValueError(
'If class_mode="binary" there must be 2 '
"classes. {} class/es were given.".format(len(classes))
)
elif df[y_col].nunique() != 2:
raise ValueError(
'If class_mode="binary" there must be 2 classes. '
"Found {} classes.".format(df[y_col].nunique())
)
# check values are string, list or tuple if class_mode is categorical
if self.class_mode == "categorical":
types = (str, list, tuple)
if not all(df[y_col].apply(lambda x: isinstance(x, types))):
raise TypeError(
'If class_mode="{}", y_col="{}" column '
"values must be type string, list or tuple.".format(
self.class_mode, y_col
)
)
# raise warning if classes are given but will be unused
if classes and self.class_mode in {
"input",
"multi_output",
"raw",
None,
}:
warnings.warn(
'`classes` will be ignored given the class_mode="{}"'.format(
self.class_mode
)
)
# check that if weight column that the values are numerical
if weight_col and not issubclass(df[weight_col].dtype.type, np.number):
raise TypeError(f"Column weight_col={weight_col} must be numeric.")
def get_classes(self, df, y_col):
labels = []
for label in df[y_col]:
if isinstance(label, (list, tuple)):
labels.append([self.class_indices[lbl] for lbl in label])
else:
labels.append(self.class_indices[label])
return labels
@staticmethod
def _filter_classes(df, y_col, classes):
df = df.copy()
def remove_classes(labels, classes):
if isinstance(labels, (list, tuple)):
labels = [cls for cls in labels if cls in classes]
return labels or None
elif isinstance(labels, str):
return labels if labels in classes else None
else:
raise TypeError(
"Expect string, list or tuple "
"but found {} in {} column ".format(type(labels), y_col)
)
if classes:
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/legacy/preprocessing/sequence.py | keras/src/legacy/preprocessing/sequence.py | """Deprecated sequence preprocessing APIs from Keras 1."""
import json
import random
import numpy as np
from keras.src.api_export import keras_export
from keras.src.trainers.data_adapters.py_dataset_adapter import PyDataset
@keras_export("keras._legacy.preprocessing.sequence.TimeseriesGenerator")
class TimeseriesGenerator(PyDataset):
"""Utility class for generating batches of temporal data.
DEPRECATED.
This class takes in a sequence of data-points gathered at
equal intervals, along with time series parameters such as
stride, length of history, etc., to produce batches for
training/validation.
Arguments:
data: Indexable generator (such as list or Numpy array)
containing consecutive data points (timesteps).
The data should be at 2D, and axis 0 is expected
to be the time dimension.
targets: Targets corresponding to timesteps in `data`.
It should have same length as `data`.
length: Length of the output sequences (in number of timesteps).
sampling_rate: Period between successive individual timesteps
within sequences. For rate `r`, timesteps
`data[i]`, `data[i-r]`, ... `data[i - length]`
are used for create a sample sequence.
stride: Period between successive output sequences.
For stride `s`, consecutive output samples would
be centered around `data[i]`, `data[i+s]`, `data[i+2*s]`, etc.
start_index: Data points earlier than `start_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
end_index: Data points later than `end_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
shuffle: Whether to shuffle output samples,
or instead draw them in chronological order.
reverse: Boolean: if `true`, timesteps in each output sample will be
in reverse chronological order.
batch_size: Number of timeseries samples in each batch
(except maybe the last one).
**kwargs: Additional keyword arguments for the `PyDataset` base class,
such as `workers`, `use_multiprocessing`, and `max_queue_size`.
Returns:
A PyDataset instance.
"""
def __init__(
self,
data,
targets,
length,
sampling_rate=1,
stride=1,
start_index=0,
end_index=None,
shuffle=False,
reverse=False,
batch_size=128,
**kwargs,
):
super().__init__(**kwargs)
if len(data) != len(targets):
raise ValueError(
"Data and targets have to be "
f"of same length. Data length is {len(data)} "
f"while target length is {len(targets)}"
)
self.data = data
self.targets = targets
self.length = length
self.sampling_rate = sampling_rate
self.stride = stride
self.start_index = start_index + length
if end_index is None:
end_index = len(data) - 1
self.end_index = end_index
self.shuffle = shuffle
self.reverse = reverse
self.batch_size = batch_size
if self.start_index > self.end_index:
raise ValueError(
f"`start_index+length={self.start_index} "
f"> end_index={self.end_index}` "
"is disallowed, as no part of the sequence "
"would be left to be used as current step."
)
def __len__(self):
return (
self.end_index - self.start_index + self.batch_size * self.stride
) // (self.batch_size * self.stride)
def __getitem__(self, index):
if self.shuffle:
rows = np.random.randint(
self.start_index, self.end_index + 1, size=self.batch_size
)
else:
i = self.start_index + self.batch_size * self.stride * index
rows = np.arange(
i,
min(i + self.batch_size * self.stride, self.end_index + 1),
self.stride,
)
samples = np.array(
[
self.data[row - self.length : row : self.sampling_rate]
for row in rows
]
)
targets = np.array([self.targets[row] for row in rows])
if self.reverse:
return samples[:, ::-1, ...], targets
return samples, targets
def get_config(self):
"""Returns the TimeseriesGenerator configuration as Python dictionary.
Returns:
A Python dictionary with the TimeseriesGenerator configuration.
"""
data = self.data
if type(self.data).__module__ == np.__name__:
data = self.data.tolist()
try:
json_data = json.dumps(data)
except TypeError as e:
raise TypeError(f"Data not JSON Serializable: {data}") from e
targets = self.targets
if type(self.targets).__module__ == np.__name__:
targets = self.targets.tolist()
try:
json_targets = json.dumps(targets)
except TypeError as e:
raise TypeError(f"Targets not JSON Serializable: {targets}") from e
config = super().get_config()
config.update(
{
"data": json_data,
"targets": json_targets,
"length": self.length,
"sampling_rate": self.sampling_rate,
"stride": self.stride,
"start_index": self.start_index,
"end_index": self.end_index,
"shuffle": self.shuffle,
"reverse": self.reverse,
"batch_size": self.batch_size,
}
)
return config
def to_json(self, **kwargs):
"""Returns a JSON string containing the generator's configuration.
Args:
**kwargs: Additional keyword arguments to be passed
to `json.dumps()`.
Returns:
A JSON string containing the tokenizer configuration.
"""
config = self.get_config()
timeseries_generator_config = {
"class_name": self.__class__.__name__,
"config": config,
}
return json.dumps(timeseries_generator_config, **kwargs)
@keras_export("keras._legacy.preprocessing.sequence.make_sampling_table")
def make_sampling_table(size, sampling_factor=1e-5):
"""Generates a word rank-based probabilistic sampling table.
DEPRECATED.
Used for generating the `sampling_table` argument for `skipgrams`.
`sampling_table[i]` is the probability of sampling
the word i-th most common word in a dataset
(more common words should be sampled less frequently, for balance).
The sampling probabilities are generated according
to the sampling distribution used in word2vec:
```
p(word) = (min(1, sqrt(word_frequency / sampling_factor) /
(word_frequency / sampling_factor)))
```
We assume that the word frequencies follow Zipf's law (s=1) to derive
a numerical approximation of frequency(rank):
`frequency(rank) ~ 1/(rank * (log(rank) + gamma) + 1/2 - 1/(12*rank))`
where `gamma` is the Euler-Mascheroni constant.
Args:
size: Int, number of possible words to sample.
sampling_factor: The sampling factor in the word2vec formula.
Returns:
A 1D Numpy array of length `size` where the ith entry
is the probability that a word of rank i should be sampled.
"""
gamma = 0.577
rank = np.arange(size)
rank[0] = 1
inv_fq = rank * (np.log(rank) + gamma) + 0.5 - 1.0 / (12.0 * rank)
f = sampling_factor * inv_fq
return np.minimum(1.0, f / np.sqrt(f))
@keras_export("keras._legacy.preprocessing.sequence.skipgrams")
def skipgrams(
sequence,
vocabulary_size,
window_size=4,
negative_samples=1.0,
shuffle=True,
categorical=False,
sampling_table=None,
seed=None,
):
"""Generates skipgram word pairs.
DEPRECATED.
This function transforms a sequence of word indexes (list of integers)
into tuples of words of the form:
- (word, word in the same window), with label 1 (positive samples).
- (word, random word from the vocabulary), with label 0 (negative samples).
Read more about Skipgram in this gnomic paper by Mikolov et al.:
[Efficient Estimation of Word Representations in
Vector Space](http://arxiv.org/pdf/1301.3781v3.pdf)
Args:
sequence: A word sequence (sentence), encoded as a list
of word indices (integers). If using a `sampling_table`,
word indices are expected to match the rank
of the words in a reference dataset (e.g. 10 would encode
the 10-th most frequently occurring token).
Note that index 0 is expected to be a non-word and will be skipped.
vocabulary_size: Int, maximum possible word index + 1
window_size: Int, size of sampling windows (technically half-window).
The window of a word `w_i` will be
`[i - window_size, i + window_size+1]`.
negative_samples: Float >= 0. 0 for no negative (i.e. random) samples.
1 for same number as positive samples.
shuffle: Whether to shuffle the word couples before returning them.
categorical: bool. if False, labels will be
integers (eg. `[0, 1, 1 .. ]`),
if `True`, labels will be categorical, e.g.
`[[1,0],[0,1],[0,1] .. ]`.
sampling_table: 1D array of size `vocabulary_size` where the entry i
encodes the probability to sample a word of rank i.
seed: Random seed.
Returns:
couples, labels: where `couples` are int pairs and
`labels` are either 0 or 1.
Note:
By convention, index 0 in the vocabulary is
a non-word and will be skipped.
"""
couples = []
labels = []
for i, wi in enumerate(sequence):
if not wi:
continue
if sampling_table is not None:
if sampling_table[wi] < random.random():
continue
window_start = max(0, i - window_size)
window_end = min(len(sequence), i + window_size + 1)
for j in range(window_start, window_end):
if j != i:
wj = sequence[j]
if not wj:
continue
couples.append([wi, wj])
if categorical:
labels.append([0, 1])
else:
labels.append(1)
if negative_samples > 0:
num_negative_samples = int(len(labels) * negative_samples)
words = [c[0] for c in couples]
random.shuffle(words)
couples += [
[words[i % len(words)], random.randint(1, vocabulary_size - 1)]
for i in range(num_negative_samples)
]
if categorical:
labels += [[1, 0]] * num_negative_samples
else:
labels += [0] * num_negative_samples
if shuffle:
if seed is None:
seed = random.randint(0, 10e6)
random.seed(seed)
random.shuffle(couples)
random.seed(seed)
random.shuffle(labels)
return couples, labels
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/legacy/preprocessing/__init__.py | keras/src/legacy/preprocessing/__init__.py | python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false | |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/legacy/preprocessing/text.py | keras/src/legacy/preprocessing/text.py | """Deprecated text preprocessing APIs from Keras 1."""
import collections
import hashlib
import json
import warnings
import numpy as np
from keras.src.api_export import keras_export
@keras_export("keras._legacy.preprocessing.text.text_to_word_sequence")
def text_to_word_sequence(
input_text,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=" ",
):
"""DEPRECATED."""
if lower:
input_text = input_text.lower()
translate_dict = {c: split for c in filters}
translate_map = str.maketrans(translate_dict)
input_text = input_text.translate(translate_map)
seq = input_text.split(split)
return [i for i in seq if i]
@keras_export("keras._legacy.preprocessing.text.one_hot")
def one_hot(
input_text,
n,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=" ",
analyzer=None,
):
"""DEPRECATED."""
return hashing_trick(
input_text,
n,
hash_function=hash,
filters=filters,
lower=lower,
split=split,
analyzer=analyzer,
)
@keras_export("keras._legacy.preprocessing.text.hashing_trick")
def hashing_trick(
text,
n,
hash_function=None,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=" ",
analyzer=None,
):
"""DEPRECATED."""
if hash_function is None:
hash_function = hash
elif hash_function == "md5":
def hash_function(w):
return int(hashlib.md5(w.encode()).hexdigest(), 16)
if analyzer is None:
seq = text_to_word_sequence(
text, filters=filters, lower=lower, split=split
)
else:
seq = analyzer(text)
return [(hash_function(w) % (n - 1) + 1) for w in seq]
@keras_export("keras._legacy.preprocessing.text.Tokenizer")
class Tokenizer:
"""DEPRECATED."""
def __init__(
self,
num_words=None,
filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=True,
split=" ",
char_level=False,
oov_token=None,
analyzer=None,
**kwargs,
):
# Legacy support
if "nb_words" in kwargs:
warnings.warn(
"The `nb_words` argument in `Tokenizer` "
"has been renamed `num_words`."
)
num_words = kwargs.pop("nb_words")
document_count = kwargs.pop("document_count", 0)
if kwargs:
raise TypeError(f"Unrecognized keyword arguments: {str(kwargs)}")
self.word_counts = collections.OrderedDict()
self.word_docs = collections.defaultdict(int)
self.filters = filters
self.split = split
self.lower = lower
self.num_words = num_words
self.document_count = document_count
self.char_level = char_level
self.oov_token = oov_token
self.index_docs = collections.defaultdict(int)
self.word_index = {}
self.index_word = {}
self.analyzer = analyzer
def fit_on_texts(self, texts):
for text in texts:
self.document_count += 1
if self.char_level or isinstance(text, list):
if self.lower:
if isinstance(text, list):
text = [text_elem.lower() for text_elem in text]
else:
text = text.lower()
seq = text
else:
if self.analyzer is None:
seq = text_to_word_sequence(
text,
filters=self.filters,
lower=self.lower,
split=self.split,
)
else:
seq = self.analyzer(text)
for w in seq:
if w in self.word_counts:
self.word_counts[w] += 1
else:
self.word_counts[w] = 1
for w in set(seq):
# In how many documents each word occurs
self.word_docs[w] += 1
wcounts = list(self.word_counts.items())
wcounts.sort(key=lambda x: x[1], reverse=True)
# forcing the oov_token to index 1 if it exists
if self.oov_token is None:
sorted_voc = []
else:
sorted_voc = [self.oov_token]
sorted_voc.extend(wc[0] for wc in wcounts)
# note that index 0 is reserved, never assigned to an existing word
self.word_index = dict(
zip(sorted_voc, list(range(1, len(sorted_voc) + 1)))
)
self.index_word = {c: w for w, c in self.word_index.items()}
for w, c in list(self.word_docs.items()):
self.index_docs[self.word_index[w]] = c
def fit_on_sequences(self, sequences):
self.document_count += len(sequences)
for seq in sequences:
seq = set(seq)
for i in seq:
self.index_docs[i] += 1
def texts_to_sequences(self, texts):
return list(self.texts_to_sequences_generator(texts))
def texts_to_sequences_generator(self, texts):
num_words = self.num_words
oov_token_index = self.word_index.get(self.oov_token)
for text in texts:
if self.char_level or isinstance(text, list):
if self.lower:
if isinstance(text, list):
text = [text_elem.lower() for text_elem in text]
else:
text = text.lower()
seq = text
else:
if self.analyzer is None:
seq = text_to_word_sequence(
text,
filters=self.filters,
lower=self.lower,
split=self.split,
)
else:
seq = self.analyzer(text)
vect = []
for w in seq:
i = self.word_index.get(w)
if i is not None:
if num_words and i >= num_words:
if oov_token_index is not None:
vect.append(oov_token_index)
else:
vect.append(i)
elif self.oov_token is not None:
vect.append(oov_token_index)
yield vect
def sequences_to_texts(self, sequences):
return list(self.sequences_to_texts_generator(sequences))
def sequences_to_texts_generator(self, sequences):
num_words = self.num_words
oov_token_index = self.word_index.get(self.oov_token)
for seq in sequences:
vect = []
for num in seq:
word = self.index_word.get(num)
if word is not None:
if num_words and num >= num_words:
if oov_token_index is not None:
vect.append(self.index_word[oov_token_index])
else:
vect.append(word)
elif self.oov_token is not None:
vect.append(self.index_word[oov_token_index])
vect = " ".join(vect)
yield vect
def texts_to_matrix(self, texts, mode="binary"):
sequences = self.texts_to_sequences(texts)
return self.sequences_to_matrix(sequences, mode=mode)
def sequences_to_matrix(self, sequences, mode="binary"):
if not self.num_words:
if self.word_index:
num_words = len(self.word_index) + 1
else:
raise ValueError(
"Specify a dimension (`num_words` argument), "
"or fit on some text data first."
)
else:
num_words = self.num_words
if mode == "tfidf" and not self.document_count:
raise ValueError(
"Fit the Tokenizer on some data before using tfidf mode."
)
x = np.zeros((len(sequences), num_words))
for i, seq in enumerate(sequences):
if not seq:
continue
counts = collections.defaultdict(int)
for j in seq:
if j >= num_words:
continue
counts[j] += 1
for j, c in list(counts.items()):
if mode == "count":
x[i][j] = c
elif mode == "freq":
x[i][j] = c / len(seq)
elif mode == "binary":
x[i][j] = 1
elif mode == "tfidf":
# Use weighting scheme 2 in
# https://en.wikipedia.org/wiki/Tf%E2%80%93idf
tf = 1 + np.log(c)
idf = np.log(
1
+ self.document_count / (1 + self.index_docs.get(j, 0))
)
x[i][j] = tf * idf
else:
raise ValueError("Unknown vectorization mode:", mode)
return x
def get_config(self):
json_word_counts = json.dumps(self.word_counts)
json_word_docs = json.dumps(self.word_docs)
json_index_docs = json.dumps(self.index_docs)
json_word_index = json.dumps(self.word_index)
json_index_word = json.dumps(self.index_word)
return {
"num_words": self.num_words,
"filters": self.filters,
"lower": self.lower,
"split": self.split,
"char_level": self.char_level,
"oov_token": self.oov_token,
"document_count": self.document_count,
"word_counts": json_word_counts,
"word_docs": json_word_docs,
"index_docs": json_index_docs,
"index_word": json_index_word,
"word_index": json_word_index,
}
def to_json(self, **kwargs):
config = self.get_config()
tokenizer_config = {
"class_name": self.__class__.__name__,
"config": config,
}
return json.dumps(tokenizer_config, **kwargs)
@keras_export("keras._legacy.preprocessing.text.tokenizer_from_json")
def tokenizer_from_json(json_string):
"""DEPRECATED."""
tokenizer_config = json.loads(json_string)
config = tokenizer_config.get("config")
word_counts = json.loads(config.pop("word_counts"))
word_docs = json.loads(config.pop("word_docs"))
index_docs = json.loads(config.pop("index_docs"))
# Integer indexing gets converted to strings with json.dumps()
index_docs = {int(k): v for k, v in index_docs.items()}
index_word = json.loads(config.pop("index_word"))
index_word = {int(k): v for k, v in index_word.items()}
word_index = json.loads(config.pop("word_index"))
tokenizer = Tokenizer(**config)
tokenizer.word_counts = word_counts
tokenizer.word_docs = word_docs
tokenizer.index_docs = index_docs
tokenizer.word_index = word_index
tokenizer.index_word = index_word
return tokenizer
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/export/litert_test.py | keras/src/export/litert_test.py | import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src import tree
from keras.src.saving import saving_lib
from keras.src.testing.test_utils import named_product
from keras.src.utils.module_utils import litert
from keras.src.utils.module_utils import tensorflow
# Set up LiteRT interpreter with fallback logic:
# 1. Try AI Edge LiteRT interpreter (preferred)
# 2. Fall back to TensorFlow Lite interpreter if AI Edge LiteRT unavailable
AI_EDGE_LITERT_AVAILABLE = False
LiteRTInterpreter = None
if backend.backend() == "tensorflow":
if litert.available:
try:
from ai_edge_litert.interpreter import (
Interpreter as LiteRTInterpreter,
)
AI_EDGE_LITERT_AVAILABLE = True
except (ImportError, OSError):
LiteRTInterpreter = tensorflow.lite.Interpreter
else:
LiteRTInterpreter = tensorflow.lite.Interpreter
# Model types to test (LSTM only if AI Edge LiteRT is available)
model_types = ["sequential", "functional"]
# TODO(#21914): `"lstm"` does not work with ai-edge-litert==1.3.0.
# Unfortunately, for TF 2.20.0, this is the only version which works. Uncomment
# this part when we upgrade TF and ai-edge-litert.
# if AI_EDGE_LITERT_AVAILABLE:
# model_types.append("lstm")
class CustomModel(models.Model):
def __init__(self, layer_list):
super().__init__()
self.layer_list = layer_list
def call(self, input):
output = input
for layer in self.layer_list:
output = layer(output)
return output
def get_model(type="sequential", input_shape=(10,), layer_list=None):
layer_list = layer_list or [
layers.Dense(10, activation="relu"),
layers.BatchNormalization(),
layers.Dense(1, activation="sigmoid"),
]
if type == "sequential":
model = models.Sequential(layer_list)
model.build(input_shape=(None,) + input_shape)
return model
if type == "functional":
input = output = tree.map_shape_structure(layers.Input, input_shape)
for layer in layer_list:
output = layer(output)
return models.Model(inputs=input, outputs=output)
if type == "subclass":
model = CustomModel(layer_list)
model.build(input_shape=(None,) + input_shape)
# Trace the model with dummy data to ensure it's properly built for
# export
dummy_input = np.zeros((1,) + input_shape, dtype=np.float32)
_ = model(dummy_input) # This traces the model
return model
if type == "lstm":
inputs = layers.Input((4, 10))
x = layers.Bidirectional(
layers.LSTM(
10,
kernel_initializer="he_normal",
return_sequences=True,
kernel_regularizer=None,
),
merge_mode="sum",
)(inputs)
outputs = layers.Bidirectional(
layers.LSTM(
10,
kernel_initializer="he_normal",
return_sequences=True,
kernel_regularizer=None,
),
merge_mode="concat",
)(x)
return models.Model(inputs=inputs, outputs=outputs)
if type == "multi_input":
input1 = layers.Input(shape=input_shape, name="input1")
input2 = layers.Input(shape=input_shape, name="input2")
x1 = layers.Dense(10, activation="relu")(input1)
x2 = layers.Dense(10, activation="relu")(input2)
combined = layers.concatenate([x1, x2])
output = layers.Dense(1, activation="sigmoid")(combined)
return models.Model(inputs=[input1, input2], outputs=output)
if type == "multi_output":
inputs = layers.Input(shape=input_shape)
shared = layers.Dense(20, activation="relu")(inputs)
output1 = layers.Dense(1, activation="sigmoid", name="output1")(shared)
output2 = layers.Dense(3, activation="softmax", name="output2")(shared)
return models.Model(inputs=inputs, outputs=[output1, output2])
raise ValueError(f"Unknown model type: {type}")
def _convert_to_numpy(structure):
return tree.map_structure(
lambda x: x.numpy() if hasattr(x, "numpy") else np.array(x), structure
)
def _normalize_name(name):
normalized = name.split(":")[0]
if normalized.startswith("serving_default_"):
normalized = normalized[len("serving_default_") :]
return normalized
def _set_interpreter_inputs(interpreter, inputs):
input_details = interpreter.get_input_details()
if isinstance(inputs, dict):
for detail in input_details:
key = _normalize_name(detail["name"])
if key in inputs:
value = inputs[key]
else:
matched_key = None
for candidate in inputs:
if key.endswith(candidate) or candidate.endswith(key):
matched_key = candidate
break
if matched_key is None:
raise KeyError(
f"Unable to match input '{detail['name']}' in provided "
f"inputs"
)
value = inputs[matched_key]
interpreter.set_tensor(detail["index"], value)
else:
values = inputs
if not isinstance(values, (list, tuple)):
values = [values]
if len(values) != len(input_details):
raise ValueError(
"Number of provided inputs does not match interpreter signature"
)
for detail, value in zip(input_details, values):
interpreter.set_tensor(detail["index"], value)
def _get_interpreter_outputs(interpreter):
output_details = interpreter.get_output_details()
outputs = [
interpreter.get_tensor(detail["index"]) for detail in output_details
]
return outputs[0] if len(outputs) == 1 else outputs
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="`export_litert` currently supports the TensorFlow backend only.",
)
class ExportLitertTest(testing.TestCase):
"""Test suite for LiteRT (TFLite) model export functionality.
Tests use AI Edge LiteRT interpreter when available, otherwise fall back
to TensorFlow Lite interpreter for validation.
"""
@parameterized.named_parameters(named_product(model_type=model_types))
def test_standard_model_export(self, model_type):
"""Test exporting standard model types to LiteRT format."""
if model_type == "lstm" and not AI_EDGE_LITERT_AVAILABLE:
self.skipTest("LSTM models require AI Edge LiteRT interpreter.")
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.tflite"
)
model = get_model(model_type)
batch_size = 1 # LiteRT expects batch_size=1
if model_type == "lstm":
ref_input = np.random.normal(size=(batch_size, 4, 10))
else:
ref_input = np.random.normal(size=(batch_size, 10))
ref_input = ref_input.astype("float32")
ref_output = _convert_to_numpy(model(ref_input))
# Test with model.export()
model.export(temp_filepath, format="litert")
self.assertTrue(os.path.exists(temp_filepath))
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, ref_input)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4)
@parameterized.named_parameters(
named_product(struct_type=["tuple", "array", "dict"])
)
def test_model_with_input_structure(self, struct_type):
"""Test exporting models with structured inputs (tuple/array/dict)."""
batch_size = 1 # LiteRT expects batch_size=1
base_input = np.random.normal(size=(batch_size, 10)).astype("float32")
if struct_type == "tuple":
# Use Functional API for proper Input layer handling
input1 = layers.Input(shape=(10,), name="input_1")
input2 = layers.Input(shape=(10,), name="input_2")
output = layers.Add()([input1, input2])
model = models.Model(inputs=[input1, input2], outputs=output)
ref_input = (base_input, base_input * 2)
elif struct_type == "array":
# Use Functional API for proper Input layer handling
input1 = layers.Input(shape=(10,), name="input_1")
input2 = layers.Input(shape=(10,), name="input_2")
output = layers.Add()([input1, input2])
model = models.Model(inputs=[input1, input2], outputs=output)
ref_input = [base_input, base_input * 2]
elif struct_type == "dict":
# Use Functional API for proper Input layer handling
input1 = layers.Input(shape=(10,), name="x")
input2 = layers.Input(shape=(10,), name="y")
output = layers.Add()([input1, input2])
model = models.Model(
inputs={"x": input1, "y": input2}, outputs=output
)
ref_input = {"x": base_input, "y": base_input * 2}
else:
raise AssertionError("Unexpected structure type")
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.tflite"
)
ref_output = _convert_to_numpy(
model(tree.map_structure(ops.convert_to_tensor, ref_input))
)
# Test with model.export()
model.export(temp_filepath, format="litert")
export_path = temp_filepath
interpreter = LiteRTInterpreter(model_path=export_path)
interpreter.allocate_tensors()
feed_inputs = ref_input
if isinstance(feed_inputs, tuple):
feed_inputs = list(feed_inputs)
_set_interpreter_inputs(interpreter, feed_inputs)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4)
# Verify export still works after saving/loading via saving_lib.
archive_path = os.path.join(self.get_temp_dir(), "revived.keras")
saving_lib.save_model(model, archive_path)
revived_model = saving_lib.load_model(archive_path)
revived_output = _convert_to_numpy(revived_model(ref_input))
self.assertAllClose(ref_output, revived_output)
def test_model_with_multiple_inputs(self):
"""Test exporting models with multiple inputs and batch resizing."""
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.tflite"
)
# Use Functional API for proper Input layer handling
input_x = layers.Input(shape=(10,), name="x")
input_y = layers.Input(shape=(10,), name="y")
output = layers.Add()([input_x, input_y])
model = models.Model(inputs=[input_x, input_y], outputs=output)
batch_size = 1 # LiteRT expects batch_size=1
ref_input_x = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_input_y = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = _convert_to_numpy(model([ref_input_x, ref_input_y]))
# Test with model.export()
model.export(temp_filepath, format="litert")
export_path = temp_filepath
interpreter = LiteRTInterpreter(model_path=export_path)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, [ref_input_x, ref_input_y])
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4)
# Test with a different batch size by resizing interpreter inputs.
larger_x = np.concatenate([ref_input_x, ref_input_x], axis=0)
larger_y = np.concatenate([ref_input_y, ref_input_y], axis=0)
input_details = interpreter.get_input_details()
interpreter.resize_tensor_input(
input_details[0]["index"], larger_x.shape
)
interpreter.resize_tensor_input(
input_details[1]["index"], larger_y.shape
)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, [larger_x, larger_y])
interpreter.invoke()
larger_output = _get_interpreter_outputs(interpreter)
larger_ref_output = _convert_to_numpy(model([larger_x, larger_y]))
self.assertAllClose(
larger_ref_output, larger_output, atol=1e-4, rtol=1e-4
)
def test_export_with_custom_input_signature(self):
"""Test exporting with custom input signature specification."""
model = get_model("sequential")
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.tflite"
)
input_signature = [layers.InputSpec(shape=(None, 10), dtype="float32")]
# Test with model.export()
model.export(
temp_filepath,
format="litert",
input_signature=input_signature,
)
export_path = temp_filepath
self.assertTrue(os.path.exists(export_path))
interpreter = LiteRTInterpreter(model_path=export_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(len(input_details), 1)
self.assertEqual(tuple(input_details[0]["shape"][1:]), (10,))
def test_multi_output_model_export(self):
"""Test exporting multi-output models."""
model = get_model("multi_output")
# Build the model
ref_input = np.random.normal(size=(3, 10)).astype("float32")
model(ref_input)
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.tflite"
)
model.export(temp_filepath, format="litert")
tflite_path = temp_filepath
self.assertTrue(os.path.exists(tflite_path))
# Test inference
interpreter = LiteRTInterpreter(model_path=tflite_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
self.assertEqual(len(output_details), 2)
test_input = np.random.random(input_details[0]["shape"]).astype(
np.float32
)
interpreter.set_tensor(input_details[0]["index"], test_input)
interpreter.invoke()
for detail in output_details:
output = interpreter.get_tensor(detail["index"])
self.assertIsInstance(output, np.ndarray)
def test_export_with_verbose(self):
"""Test export with verbose output."""
model = get_model("sequential")
dummy_input = np.random.random((3, 10)).astype(np.float32)
model(dummy_input)
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.tflite"
)
# Export with verbose=True
model.export(temp_filepath, format="litert", verbose=True)
tflite_path = temp_filepath
self.assertTrue(os.path.exists(tflite_path))
# Verify the exported model works
interpreter = LiteRTInterpreter(model_path=tflite_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertEqual(len(input_details), 1)
def test_export_error_handling(self):
"""Test error handling in export API."""
model = get_model("sequential")
dummy_input = np.random.random((3, 10)).astype(np.float32)
model(dummy_input)
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.tflite"
)
# Test with invalid format
with self.assertRaises(ValueError):
model.export(temp_filepath, format="invalid_format")
def test_export_invalid_filepath(self):
"""Test that export fails with invalid file extension."""
model = get_model("sequential")
dummy_input = np.random.random((3, 10)).astype(np.float32)
model(dummy_input)
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model.txt")
# Should raise ValueError for wrong extension
with self.assertRaises(ValueError):
model.export(temp_filepath, format="litert")
def test_export_subclass_model(self):
"""Test exporting subclass models (uses wrapper conversion path)."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
model = get_model("subclass")
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.tflite"
)
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = _convert_to_numpy(model(ref_input))
# Export subclass model - this tests wrapper-based conversion
model.export(temp_filepath, format="litert")
self.assertTrue(os.path.exists(temp_filepath))
# Verify inference
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, ref_input)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4)
def test_export_with_optimizations_default(self):
"""Test export with DEFAULT optimization."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
model = get_model("sequential")
temp_filepath = os.path.join(
self.get_temp_dir(), "optimized_default.tflite"
)
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = _convert_to_numpy(model(ref_input))
# Export with DEFAULT optimization
model.export(
temp_filepath,
format="litert",
optimizations=[tensorflow.lite.Optimize.DEFAULT],
)
self.assertTrue(os.path.exists(temp_filepath))
# Verify inference still works
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, ref_input)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
# Quantized model should be close but not exact
self.assertAllClose(ref_output, litert_output, atol=1e-2, rtol=1e-2)
def test_export_with_optimizations_sparsity(self):
"""Test export with EXPERIMENTAL_SPARSITY optimization."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
model = get_model("functional")
temp_filepath = os.path.join(
self.get_temp_dir(), "optimized_sparsity.tflite"
)
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
# Export with EXPERIMENTAL_SPARSITY optimization
model.export(
temp_filepath,
format="litert",
optimizations=[tensorflow.lite.Optimize.EXPERIMENTAL_SPARSITY],
)
self.assertTrue(os.path.exists(temp_filepath))
# Verify the model can run inference
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, ref_input)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
# Output should have valid shape
self.assertEqual(litert_output.shape, (batch_size, 1))
def test_export_with_optimizations_size(self):
"""Test export with OPTIMIZE_FOR_SIZE optimization."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
model = get_model("sequential")
temp_filepath = os.path.join(
self.get_temp_dir(), "optimized_size.tflite"
)
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
# Export with OPTIMIZE_FOR_SIZE
model.export(
temp_filepath,
format="litert",
optimizations=[tensorflow.lite.Optimize.OPTIMIZE_FOR_SIZE],
)
self.assertTrue(os.path.exists(temp_filepath))
# Verify the model can run inference
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, ref_input)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
self.assertEqual(litert_output.shape, (batch_size, 1))
def test_export_with_optimizations_latency(self):
"""Test export with OPTIMIZE_FOR_LATENCY optimization."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
model = get_model("functional")
temp_filepath = os.path.join(
self.get_temp_dir(), "optimized_latency.tflite"
)
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
# Export with OPTIMIZE_FOR_LATENCY
model.export(
temp_filepath,
format="litert",
optimizations=[tensorflow.lite.Optimize.OPTIMIZE_FOR_LATENCY],
)
self.assertTrue(os.path.exists(temp_filepath))
# Verify the model can run inference
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, ref_input)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
self.assertEqual(litert_output.shape, (batch_size, 1))
def test_export_with_multiple_optimizations(self):
"""Test export with multiple optimization options combined."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
model = get_model("sequential")
temp_filepath = os.path.join(
self.get_temp_dir(), "optimized_multiple.tflite"
)
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
# Export with multiple optimizations
model.export(
temp_filepath,
format="litert",
optimizations=[
tensorflow.lite.Optimize.DEFAULT,
tensorflow.lite.Optimize.EXPERIMENTAL_SPARSITY,
],
)
self.assertTrue(os.path.exists(temp_filepath))
# Verify the model can run inference
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
_set_interpreter_inputs(interpreter, ref_input)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
self.assertEqual(litert_output.shape, (batch_size, 1))
def test_export_with_representative_dataset(self):
"""Test export with representative dataset for better quantization."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
model = get_model("functional")
temp_filepath = os.path.join(
self.get_temp_dir(), "quantized_model.tflite"
)
# Create representative dataset
def representative_dataset():
for _ in range(10):
yield [np.random.normal(size=(1, 10)).astype("float32")]
# Export with optimizations and representative dataset
model.export(
temp_filepath,
format="litert",
optimizations=[tensorflow.lite.Optimize.DEFAULT],
representative_dataset=representative_dataset,
)
self.assertTrue(os.path.exists(temp_filepath))
# Verify the model can run inference
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
_set_interpreter_inputs(interpreter, ref_input)
interpreter.invoke()
litert_output = _get_interpreter_outputs(interpreter)
# Output should have valid shape
self.assertEqual(litert_output.shape, (batch_size, 1))
def test_export_with_multiple_kwargs(self):
"""Test export with multiple converter kwargs."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
# Create a larger model for quantization testing
inputs = layers.Input(shape=(28, 28, 3))
x = layers.Conv2D(32, 3, activation="relu")(inputs)
x = layers.MaxPooling2D()(x)
x = layers.Flatten()(x)
x = layers.Dense(10, activation="softmax")(x)
model = models.Model(inputs, x)
temp_filepath = os.path.join(
self.get_temp_dir(), "multi_kwargs_model.tflite"
)
# Create representative dataset
def representative_dataset():
for _ in range(5):
yield [np.random.normal(size=(1, 28, 28, 3)).astype("float32")]
# Export with multiple kwargs
model.export(
temp_filepath,
format="litert",
optimizations=[tensorflow.lite.Optimize.DEFAULT],
representative_dataset=representative_dataset,
experimental_new_quantizer=True,
)
self.assertTrue(os.path.exists(temp_filepath))
# Verify file size is reduced compared to non-quantized
file_size = os.path.getsize(temp_filepath)
self.assertGreater(file_size, 0)
def test_export_optimization_file_size_comparison(self):
"""Test that optimizations reduce file size."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
# Create a larger model to see size differences
inputs = layers.Input(shape=(28, 28, 3))
x = layers.Conv2D(64, 3, activation="relu")(inputs)
x = layers.Conv2D(64, 3, activation="relu")(x)
x = layers.MaxPooling2D()(x)
x = layers.Flatten()(x)
x = layers.Dense(128, activation="relu")(x)
x = layers.Dense(10, activation="softmax")(x)
model = models.Model(inputs, x)
# Export without optimization
filepath_no_opt = os.path.join(
self.get_temp_dir(), "model_no_opt.tflite"
)
model.export(filepath_no_opt, format="litert")
# Export with optimization
filepath_with_opt = os.path.join(
self.get_temp_dir(), "model_with_opt.tflite"
)
model.export(
filepath_with_opt,
format="litert",
optimizations=[tensorflow.lite.Optimize.DEFAULT],
)
# Optimized model should be smaller
size_no_opt = os.path.getsize(filepath_no_opt)
size_with_opt = os.path.getsize(filepath_with_opt)
self.assertLess(
size_with_opt,
size_no_opt,
f"Optimized model ({size_with_opt} bytes) should be smaller "
f"than non-optimized ({size_no_opt} bytes)",
)
# Typically expect ~75% size reduction with quantization
reduction_ratio = size_with_opt / size_no_opt
self.assertLess(
reduction_ratio,
0.5, # Should be less than 50% of original size
f"Expected significant size reduction, got {reduction_ratio:.2%}",
)
def test_signature_def_with_named_model(self):
"""Test that exported models have SignatureDef with input names."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
# Build a model with explicit layer names
inputs = layers.Input(shape=(10,), name="feature_input")
x = layers.Dense(32, activation="relu", name="encoder")(inputs)
x = layers.Dense(16, activation="relu", name="bottleneck")(x)
outputs = layers.Dense(
1, activation="sigmoid", name="prediction_output"
)(x)
model = models.Model(inputs=inputs, outputs=outputs, name="named_model")
temp_filepath = os.path.join(self.get_temp_dir(), "named_model.tflite")
# Export the model
model.export(temp_filepath, format="litert")
self.assertTrue(os.path.exists(temp_filepath))
# Load and check SignatureDef
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
# Get SignatureDef information
signature_defs = interpreter.get_signature_list()
self.assertIn("serving_default", signature_defs)
serving_sig = signature_defs["serving_default"]
sig_inputs = serving_sig.get("inputs", [])
sig_outputs = serving_sig.get("outputs", [])
# Verify SignatureDef has inputs and outputs
self.assertGreater(
len(sig_inputs), 0, "Should have at least one input in SignatureDef"
)
self.assertGreater(
len(sig_outputs),
0,
"Should have at least one output in SignatureDef",
)
# Verify input names are preserved (they should match Keras input names)
self.assertIn(
"feature_input",
sig_inputs,
f"Input name 'feature_input' should be in SignatureDef inputs: "
f"{sig_inputs}",
)
# Verify inference works using signature runner
batch_size = 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = _convert_to_numpy(model(ref_input))
# Note: For single-output Functional models, Keras returns a tensor
# (not dict). SignatureDef will have generic output names like
# 'output_0'.
# Only multi-output models or models with explicit dict returns have
# named outputs
# Test inference using signature runner for better output name handling
signature_runner = interpreter.get_signature_runner("serving_default")
sig_output = signature_runner(feature_input=ref_input)
# sig_output should be a dict with meaningful output names
self.assertIsInstance(sig_output, dict)
self.assertGreater(
len(sig_output), 0, "Should have at least one output"
)
# For single output, extract the value
if len(sig_output) == 1:
litert_output = list(sig_output.values())[0]
else:
litert_output = list(sig_output.values())
self.assertAllClose(ref_output, litert_output, atol=1e-4, rtol=1e-4)
def test_signature_def_with_functional_model(self):
"""Test that SignatureDef preserves input/output names for
Functional models."""
if LiteRTInterpreter is None:
self.skipTest("No LiteRT interpreter available")
# Create a Functional model with named inputs and outputs
inputs = layers.Input(shape=(10,), name="input_layer")
x = layers.Dense(32, activation="relu", name="hidden_layer")(inputs)
outputs = layers.Dense(1, activation="sigmoid", name="output_layer")(x)
model = models.Model(
inputs=inputs, outputs=outputs, name="functional_model"
)
temp_filepath = os.path.join(
self.get_temp_dir(), "functional_model.tflite"
)
# Export the model
model.export(temp_filepath, format="litert")
self.assertTrue(os.path.exists(temp_filepath))
# Load and check SignatureDef
interpreter = LiteRTInterpreter(model_path=temp_filepath)
interpreter.allocate_tensors()
# Get SignatureDef information
signature_defs = interpreter.get_signature_list()
self.assertIn("serving_default", signature_defs)
serving_sig = signature_defs["serving_default"]
sig_inputs = serving_sig.get("inputs", [])
sig_outputs = serving_sig.get("outputs", [])
# Verify SignatureDef has inputs and outputs
self.assertGreater(
len(sig_inputs), 0, "Should have at least one input in SignatureDef"
)
self.assertGreater(
len(sig_outputs),
0,
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/export/tfsm_layer_test.py | keras/src/export/tfsm_layer_test.py | import os
import numpy as np
import pytest
import tensorflow as tf
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src import utils
from keras.src.export import saved_model
from keras.src.export import tfsm_layer
from keras.src.export.saved_model_test import get_model
from keras.src.saving import saving_lib
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="TFSM Layer reloading is only for the TF backend.",
)
class TestTFSMLayer(testing.TestCase):
def test_reloading_export_archive(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model()
ref_input = tf.random.normal((3, 10))
ref_output = model(ref_input)
saved_model.export_saved_model(model, temp_filepath)
reloaded_layer = tfsm_layer.TFSMLayer(temp_filepath)
self.assertAllClose(reloaded_layer(ref_input), ref_output, atol=1e-7)
self.assertLen(reloaded_layer.weights, len(model.weights))
self.assertLen(
reloaded_layer.trainable_weights, len(model.trainable_weights)
)
self.assertLen(
reloaded_layer.non_trainable_weights,
len(model.non_trainable_weights),
)
def test_reloading_default_saved_model(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model()
ref_input = tf.random.normal((3, 10))
ref_output = model(ref_input)
tf.saved_model.save(model, temp_filepath)
reloaded_layer = tfsm_layer.TFSMLayer(
temp_filepath, call_endpoint="serving_default"
)
# The output is a dict, due to the nature of SavedModel saving.
new_output = reloaded_layer(ref_input)
self.assertAllClose(
new_output[list(new_output.keys())[0]],
ref_output,
atol=1e-7,
)
self.assertLen(reloaded_layer.weights, len(model.weights))
self.assertLen(
reloaded_layer.trainable_weights, len(model.trainable_weights)
)
self.assertLen(
reloaded_layer.non_trainable_weights,
len(model.non_trainable_weights),
)
for keras_var in reloaded_layer.weights:
self.assertIsInstance(keras_var, backend.Variable)
def test_call_training(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
utils.set_random_seed(1337)
model = models.Sequential(
[
layers.Input((10,)),
layers.Dense(10),
layers.Dropout(0.99999),
]
)
export_archive = saved_model.ExportArchive()
export_archive.track(model)
export_archive.add_endpoint(
name="call_inference",
fn=lambda x: model(x, training=False),
input_signature=[tf.TensorSpec(shape=(None, 10), dtype=tf.float32)],
)
export_archive.add_endpoint(
name="call_training",
fn=lambda x: model(x, training=True),
input_signature=[tf.TensorSpec(shape=(None, 10), dtype=tf.float32)],
)
export_archive.write_out(temp_filepath)
reloaded_layer = tfsm_layer.TFSMLayer(
temp_filepath,
call_endpoint="call_inference",
call_training_endpoint="call_training",
)
inference_output = reloaded_layer(
tf.random.normal((1, 10)), training=False
)
training_output = reloaded_layer(
tf.random.normal((1, 10)), training=True
)
self.assertAllClose(np.mean(training_output), 0.0, atol=1e-7)
self.assertNotAllClose(np.mean(inference_output), 0.0, atol=1e-7)
def test_serialization(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model()
ref_input = tf.random.normal((3, 10))
ref_output = model(ref_input)
saved_model.export_saved_model(model, temp_filepath)
reloaded_layer = tfsm_layer.TFSMLayer(temp_filepath)
# Test reinstantiation from config
config = reloaded_layer.get_config()
rereloaded_layer = tfsm_layer.TFSMLayer.from_config(config)
self.assertAllClose(rereloaded_layer(ref_input), ref_output, atol=1e-7)
# Test whole model saving with reloaded layer inside
model = models.Sequential([reloaded_layer])
temp_model_filepath = os.path.join(self.get_temp_dir(), "m.keras")
model.save(temp_model_filepath, save_format="keras_v3")
reloaded_model = saving_lib.load_model(
temp_model_filepath,
custom_objects={"TFSMLayer": tfsm_layer.TFSMLayer},
)
self.assertAllClose(reloaded_model(ref_input), ref_output, atol=1e-7)
def test_errors(self):
# Test missing call endpoint
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = models.Sequential([layers.Input((2,)), layers.Dense(3)])
saved_model.export_saved_model(model, temp_filepath)
with self.assertRaisesRegex(ValueError, "The endpoint 'wrong'"):
tfsm_layer.TFSMLayer(temp_filepath, call_endpoint="wrong")
# Test missing call training endpoint
with self.assertRaisesRegex(ValueError, "The endpoint 'wrong'"):
tfsm_layer.TFSMLayer(
temp_filepath,
call_endpoint="serve",
call_training_endpoint="wrong",
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/export/export_utils.py | keras/src/export/export_utils.py | from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import tree
from keras.src.utils.module_utils import tensorflow as tf
def get_input_signature(model):
"""Get input signature for model export.
Args:
model: A Keras Model instance.
Returns:
Input signature suitable for model export (always a tuple or list).
"""
if not isinstance(model, models.Model):
raise TypeError(
"The model must be a `keras.Model`. "
f"Received: model={model} of the type {type(model)}"
)
if not model.built:
raise ValueError(
"The model provided has not yet been built. It must be built "
"before export."
)
if isinstance(model, models.Functional):
# Functional models expect a single positional argument `inputs`
# containing the full nested input structure. We keep the
# original behavior of returning a single-element list that
# wraps the mapped structure so that downstream exporters
# build a tf.function with one positional argument.
input_signature = [
tree.map_structure(make_input_spec, model._inputs_struct)
]
elif isinstance(model, models.Sequential):
input_signature = tree.map_structure(make_input_spec, model.inputs)
else:
# Subclassed models: rely on recorded shapes from the first call.
input_signature = _infer_input_signature_from_model(model)
if not input_signature or not model._called:
raise ValueError(
"The model provided has never called. "
"It must be called at least once before export."
)
return input_signature
def _infer_input_signature_from_model(model):
shapes_dict = getattr(model, "_build_shapes_dict", None)
if not shapes_dict:
return None
def _make_input_spec(structure):
# We need to turn wrapper structures like TrackingDict or _DictWrapper
# into plain Python structures because they don't work with jax2tf/JAX.
if isinstance(structure, dict):
return {k: _make_input_spec(v) for k, v in structure.items()}
elif isinstance(structure, tuple):
if all(isinstance(d, (int, type(None))) for d in structure):
return layers.InputSpec(
shape=(None,) + structure[1:], dtype=model.input_dtype
)
return tuple(_make_input_spec(v) for v in structure)
elif isinstance(structure, list):
if all(isinstance(d, (int, type(None))) for d in structure):
return layers.InputSpec(
shape=[None] + structure[1:], dtype=model.input_dtype
)
return [_make_input_spec(v) for v in structure]
else:
raise ValueError(
f"Unsupported type {type(structure)} for {structure}"
)
# Always return a flat list preserving the order of shapes_dict values
return [_make_input_spec(value) for value in shapes_dict.values()]
def make_input_spec(x):
if isinstance(x, layers.InputSpec):
if x.shape is None or x.dtype is None:
raise ValueError(
f"The `shape` and `dtype` must be provided. Received: x={x}"
)
input_spec = x
elif isinstance(x, backend.KerasTensor):
shape = (None,) + backend.standardize_shape(x.shape)[1:]
dtype = backend.standardize_dtype(x.dtype)
input_spec = layers.InputSpec(dtype=dtype, shape=shape, name=x.name)
elif backend.is_tensor(x):
shape = (None,) + backend.standardize_shape(x.shape)[1:]
dtype = backend.standardize_dtype(x.dtype)
input_spec = layers.InputSpec(dtype=dtype, shape=shape, name=None)
else:
raise TypeError(
f"Unsupported x={x} of the type ({type(x)}). Supported types are: "
"`keras.InputSpec`, `keras.KerasTensor` and backend tensor."
)
return input_spec
def make_tf_tensor_spec(x, dynamic_batch=False):
"""Create a TensorSpec from various input types.
Args:
x: Input to convert (tf.TensorSpec, KerasTensor, or backend tensor).
dynamic_batch: If True, set the batch dimension to None.
Returns:
A tf.TensorSpec instance.
"""
if isinstance(x, tf.TensorSpec):
tensor_spec = x
# Adjust batch dimension if needed
if dynamic_batch and len(tensor_spec.shape) > 0:
shape = tuple(
None if i == 0 else s for i, s in enumerate(tensor_spec.shape)
)
tensor_spec = tf.TensorSpec(
shape, dtype=tensor_spec.dtype, name=tensor_spec.name
)
else:
input_spec = make_input_spec(x)
shape = input_spec.shape
# Adjust batch dimension if needed and shape is not None
if dynamic_batch and shape is not None and len(shape) > 0:
shape = tuple(None if i == 0 else s for i, s in enumerate(shape))
tensor_spec = tf.TensorSpec(
shape, dtype=input_spec.dtype, name=input_spec.name
)
return tensor_spec
def convert_spec_to_tensor(spec, replace_none_number=None):
shape = backend.standardize_shape(spec.shape)
if replace_none_number is not None:
replace_none_number = int(replace_none_number)
shape = tuple(
s if s is not None else replace_none_number for s in shape
)
return ops.ones(shape, spec.dtype)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.