repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/export/saved_model.py | keras/src/export/saved_model.py | """Library for exporting SavedModel for Keras models/layers."""
from keras.src import backend
from keras.src import layers
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.export.export_utils import get_input_signature
from keras.src.export.export_utils import make_tf_tensor_spec
from keras.src.utils import io_utils
from keras.src.utils.module_utils import tensorflow as tf
if backend.backend() == "tensorflow":
from keras.src.backend.tensorflow.export import (
TFExportArchive as BackendExportArchive,
)
elif backend.backend() == "jax":
from keras.src.backend.jax.export import (
JaxExportArchive as BackendExportArchive,
)
elif backend.backend() == "torch":
from keras.src.backend.torch.export import (
TorchExportArchive as BackendExportArchive,
)
elif backend.backend() == "numpy":
from keras.src.backend.numpy.export import (
NumpyExportArchive as BackendExportArchive,
)
elif backend.backend() == "openvino":
from keras.src.backend.openvino.export import (
OpenvinoExportArchive as BackendExportArchive,
)
else:
raise RuntimeError(
f"Backend '{backend.backend()}' must implement ExportArchive."
)
DEFAULT_ENDPOINT_NAME = "serve"
@keras_export("keras.export.ExportArchive")
class ExportArchive(BackendExportArchive):
"""ExportArchive is used to write SavedModel artifacts (e.g. for inference).
If you have a Keras model or layer that you want to export as SavedModel for
serving (e.g. via TensorFlow-Serving), you can use `ExportArchive`
to configure the different serving endpoints you need to make available,
as well as their signatures. Simply instantiate an `ExportArchive`,
use `track()` to register the layer(s) or model(s) to be used,
then use the `add_endpoint()` method to register a new serving endpoint.
When done, use the `write_out()` method to save the artifact.
The resulting artifact is a SavedModel and can be reloaded via
`tf.saved_model.load`.
Examples:
Here's how to export a model for inference.
```python
export_archive = ExportArchive()
export_archive.track(model)
export_archive.add_endpoint(
name="serve",
fn=model.call,
input_signature=[keras.InputSpec(shape=(None, 3), dtype="float32")],
)
export_archive.write_out("path/to/location")
# Elsewhere, we can reload the artifact and serve it.
# The endpoint we added is available as a method:
serving_model = tf.saved_model.load("path/to/location")
outputs = serving_model.serve(inputs)
```
Here's how to export a model with one endpoint for inference and one
endpoint for a training-mode forward pass (e.g. with dropout on).
```python
export_archive = ExportArchive()
export_archive.track(model)
export_archive.add_endpoint(
name="call_inference",
fn=lambda x: model.call(x, training=False),
input_signature=[keras.InputSpec(shape=(None, 3), dtype="float32")],
)
export_archive.add_endpoint(
name="call_training",
fn=lambda x: model.call(x, training=True),
input_signature=[keras.InputSpec(shape=(None, 3), dtype="float32")],
)
export_archive.write_out("path/to/location")
```
**Note on resource tracking:**
`ExportArchive` is able to automatically track all `keras.Variables` used
by its endpoints, so most of the time calling `.track(model)`
is not strictly required. However, if your model uses lookup layers such
as `IntegerLookup`, `StringLookup`, or `TextVectorization`,
it will need to be tracked explicitly via `.track(model)`.
Explicit tracking is also required if you need to be able to access
the properties `variables`, `trainable_variables`, or
`non_trainable_variables` on the revived archive.
"""
def __init__(self):
super().__init__()
if backend.backend() not in ("tensorflow", "jax", "torch"):
raise NotImplementedError(
"`ExportArchive` is only compatible with TensorFlow, JAX and "
"Torch backends."
)
self._endpoint_names = []
self._endpoint_signatures = {}
self.tensorflow_version = tf.__version__
self._tf_trackable = tf.__internal__.tracking.AutoTrackable()
self._tf_trackable.variables = []
self._tf_trackable.trainable_variables = []
self._tf_trackable.non_trainable_variables = []
@property
def variables(self):
return self._tf_trackable.variables
@property
def trainable_variables(self):
return self._tf_trackable.trainable_variables
@property
def non_trainable_variables(self):
return self._tf_trackable.non_trainable_variables
def track(self, resource):
"""Track the variables (of a layer or model) and other assets.
By default, all variables used by an endpoint function are automatically
tracked when you call `add_endpoint()`. However, non-variables assets
such as lookup tables need to be tracked manually. Note that lookup
tables used by built-in Keras layers (`TextVectorization`,
`IntegerLookup`, `StringLookup`) are automatically tracked by
`add_endpoint()`.
Args:
resource: A layer, model or a TensorFlow trackable resource.
"""
if isinstance(resource, layers.Layer) and not resource.built:
raise ValueError(
"The layer provided has not yet been built. "
"It must be built before export."
)
# Note: with the TensorFlow backend, Layers and Models fall into both
# the Layer case and the Trackable case. The Trackable case is needed
# for preprocessing layers in order to track lookup tables.
if isinstance(resource, tf.__internal__.tracking.Trackable):
if not hasattr(self, "_tracked"):
self._tracked = []
self._tracked.append(resource)
if isinstance(resource, layers.Layer):
self._track_layer(resource)
elif not isinstance(resource, tf.__internal__.tracking.Trackable):
raise ValueError(
"Invalid resource type. Expected a Keras `Layer` or `Model` "
"or a TensorFlow `Trackable` object. "
f"Received object {resource} of type '{type(resource)}'. "
)
def add_endpoint(self, name, fn, input_signature=None, **kwargs):
"""Register a new serving endpoint.
Args:
name: `str`. The name of the endpoint.
fn: A callable. It should only leverage resources
(e.g. `keras.Variable` objects or `tf.lookup.StaticHashTable`
objects) that are available on the models/layers tracked by the
`ExportArchive` (you can call `.track(model)` to track a new
model).
The shape and dtype of the inputs to the function must be
known. For that purpose, you can either 1) make sure that `fn`
is a `tf.function` that has been called at least once, or 2)
provide an `input_signature` argument that specifies the shape
and dtype of the inputs (see below).
input_signature: Optional. Specifies the shape and dtype of `fn`.
Can be a structure of `keras.InputSpec`, `tf.TensorSpec`,
`backend.KerasTensor`, or backend tensor (see below for an
example showing a `Functional` model with 2 input arguments). If
not provided, `fn` must be a `tf.function` that has been called
at least once. Defaults to `None`.
**kwargs: Additional keyword arguments:
- Specific to the JAX backend:
- `is_static`: Optional `bool`. Indicates whether `fn` is
static. Set to `False` if `fn` involves state updates
(e.g., RNG seeds).
- `jax2tf_kwargs`: Optional `dict`. Arguments for
`jax2tf.convert`. See [`jax2tf.convert`](
https://github.com/google/jax/blob/main/jax/experimental/jax2tf/README.md).
If `native_serialization` and `polymorphic_shapes` are
not provided, they are automatically computed.
Returns:
The `tf.function` wrapping `fn` that was added to the archive.
Example:
Adding an endpoint using the `input_signature` argument when the
model has a single input argument:
```python
export_archive = ExportArchive()
export_archive.track(model)
export_archive.add_endpoint(
name="serve",
fn=model.call,
input_signature=[keras.InputSpec(shape=(None, 3), dtype="float32")],
)
```
Adding an endpoint using the `input_signature` argument when the
model has two positional input arguments:
```python
export_archive = ExportArchive()
export_archive.track(model)
export_archive.add_endpoint(
name="serve",
fn=model.call,
input_signature=[
keras.InputSpec(shape=(None, 3), dtype="float32"),
keras.InputSpec(shape=(None, 4), dtype="float32"),
],
)
```
Adding an endpoint using the `input_signature` argument when the
model has one input argument that is a list of 2 tensors (e.g.
a Functional model with 2 inputs):
```python
model = keras.Model(inputs=[x1, x2], outputs=outputs)
export_archive = ExportArchive()
export_archive.track(model)
export_archive.add_endpoint(
name="serve",
fn=model.call,
input_signature=[
[
keras.InputSpec(shape=(None, 3), dtype="float32"),
keras.InputSpec(shape=(None, 4), dtype="float32"),
],
],
)
```
This also works with dictionary inputs:
```python
model = keras.Model(inputs={"x1": x1, "x2": x2}, outputs=outputs)
export_archive = ExportArchive()
export_archive.track(model)
export_archive.add_endpoint(
name="serve",
fn=model.call,
input_signature=[
{
"x1": keras.InputSpec(shape=(None, 3), dtype="float32"),
"x2": keras.InputSpec(shape=(None, 4), dtype="float32"),
},
],
)
```
Adding an endpoint that is a `tf.function`:
```python
@tf.function()
def serving_fn(x):
return model(x)
# The function must be traced, i.e. it must be called at least once.
serving_fn(tf.random.normal(shape=(2, 3)))
export_archive = ExportArchive()
export_archive.track(model)
export_archive.add_endpoint(name="serve", fn=serving_fn)
```
Combining a model with some TensorFlow preprocessing, which can use
TensorFlow resources:
```python
lookup_table = tf.lookup.StaticHashTable(initializer, default_value=0.0)
export_archive = ExportArchive()
model_fn = export_archive.track_and_add_endpoint(
"model_fn",
model,
input_signature=[tf.TensorSpec(shape=(None, 5), dtype=tf.float32)],
)
export_archive.track(lookup_table)
@tf.function()
def serving_fn(x):
x = lookup_table.lookup(x)
return model_fn(x)
export_archive.add_endpoint(name="serve", fn=serving_fn)
```
"""
if name in self._endpoint_names:
raise ValueError(f"Endpoint name '{name}' is already taken.")
if backend.backend() != "jax":
if "jax2tf_kwargs" in kwargs or "is_static" in kwargs:
raise ValueError(
"'jax2tf_kwargs' and 'is_static' are only supported with "
f"the jax backend. Current backend: {backend.backend()}"
)
# The fast path if `fn` is already a `tf.function`.
if input_signature is None:
if isinstance(fn, tf.types.experimental.GenericFunction):
if not fn._list_all_concrete_functions():
raise ValueError(
f"The provided tf.function '{fn}' "
"has never been called. "
"To specify the expected shape and dtype "
"of the function's arguments, "
"you must either provide a function that "
"has been called at least once, or alternatively pass "
"an `input_signature` argument in `add_endpoint()`."
)
decorated_fn = fn
else:
raise ValueError(
"If the `fn` argument provided is not a `tf.function`, "
"you must provide an `input_signature` argument to "
"specify the shape and dtype of the function arguments. "
"Example:\n\n"
"export_archive.add_endpoint(\n"
" name='call',\n"
" fn=model.call,\n"
" input_signature=[\n"
" keras.InputSpec(\n"
" shape=(None, 224, 224, 3),\n"
" dtype='float32',\n"
" )\n"
" ],\n"
")"
)
setattr(self._tf_trackable, name, decorated_fn)
self._endpoint_names.append(name)
return decorated_fn
input_signature = tree.map_structure(
make_tf_tensor_spec, input_signature
)
decorated_fn = super().add_endpoint(name, fn, input_signature, **kwargs)
self._endpoint_signatures[name] = input_signature
setattr(self._tf_trackable, name, decorated_fn)
self._endpoint_names.append(name)
return decorated_fn
def track_and_add_endpoint(self, name, resource, input_signature, **kwargs):
"""Track the variables and register a new serving endpoint.
This function combines the functionality of `track` and `add_endpoint`.
It tracks the variables of the `resource` (either a layer or a model)
and registers a serving endpoint using `resource.__call__`.
Args:
name: `str`. The name of the endpoint.
resource: A trackable Keras resource, such as a layer or model.
input_signature: Optional. Specifies the shape and dtype of `fn`.
Can be a structure of `keras.InputSpec`, `tf.TensorSpec`,
`backend.KerasTensor`, or backend tensor (see below for an
example showing a `Functional` model with 2 input arguments). If
not provided, `fn` must be a `tf.function` that has been called
at least once. Defaults to `None`.
**kwargs: Additional keyword arguments:
- Specific to the JAX backend:
- `is_static`: Optional `bool`. Indicates whether `fn` is
static. Set to `False` if `fn` involves state updates
(e.g., RNG seeds).
- `jax2tf_kwargs`: Optional `dict`. Arguments for
`jax2tf.convert`. See [`jax2tf.convert`](
https://github.com/google/jax/blob/main/jax/experimental/jax2tf/README.md).
If `native_serialization` and `polymorphic_shapes` are
not provided, they are automatically computed.
"""
if name in self._endpoint_names:
raise ValueError(f"Endpoint name '{name}' is already taken.")
if not isinstance(resource, layers.Layer):
raise ValueError(
"Invalid resource type. Expected an instance of a Keras "
"`Layer` or `Model`. "
f"Received: resource={resource} (of type {type(resource)})"
)
if not resource.built:
raise ValueError(
"The layer provided has not yet been built. "
"It must be built before export."
)
if backend.backend() != "jax":
if "jax2tf_kwargs" in kwargs or "is_static" in kwargs:
raise ValueError(
"'jax2tf_kwargs' and 'is_static' are only supported with "
f"the jax backend. Current backend: {backend.backend()}"
)
input_signature = tree.map_structure(
make_tf_tensor_spec, input_signature
)
if not hasattr(BackendExportArchive, "track_and_add_endpoint"):
# Default behavior.
self.track(resource)
return self.add_endpoint(
name, resource.__call__, input_signature, **kwargs
)
else:
# Special case for the torch backend.
decorated_fn = super().track_and_add_endpoint(
name, resource, input_signature, **kwargs
)
self._endpoint_signatures[name] = input_signature
setattr(self._tf_trackable, name, decorated_fn)
self._endpoint_names.append(name)
return decorated_fn
def add_variable_collection(self, name, variables):
"""Register a set of variables to be retrieved after reloading.
Arguments:
name: The string name for the collection.
variables: A tuple/list/set of `keras.Variable` instances.
Example:
```python
export_archive = ExportArchive()
export_archive.track(model)
# Register an endpoint
export_archive.add_endpoint(
name="serve",
fn=model.call,
input_signature=[keras.InputSpec(shape=(None, 3), dtype="float32")],
)
# Save a variable collection
export_archive.add_variable_collection(
name="optimizer_variables", variables=model.optimizer.variables)
export_archive.write_out("path/to/location")
# Reload the object
revived_object = tf.saved_model.load("path/to/location")
# Retrieve the variables
optimizer_variables = revived_object.optimizer_variables
```
"""
if not isinstance(variables, (list, tuple, set)):
raise ValueError(
"Expected `variables` to be a list/tuple/set. "
f"Received instead object of type '{type(variables)}'."
)
# Ensure that all variables added are either tf.Variables
# or Variables created by Keras 3 with the TF or JAX backends.
if not all(
isinstance(v, (tf.Variable, backend.Variable)) for v in variables
):
raise ValueError(
"Expected all elements in `variables` to be "
"`tf.Variable` instances. Found instead the following types: "
f"{list(set(type(v) for v in variables))}"
)
if backend.backend() == "jax":
variables = tree.flatten(
tree.map_structure(self._convert_to_tf_variable, variables)
)
setattr(self._tf_trackable, name, list(variables))
def write_out(self, filepath, options=None, verbose=True):
"""Write the corresponding SavedModel to disk.
Arguments:
filepath: `str` or `pathlib.Path` object.
Path where to save the artifact.
options: `tf.saved_model.SaveOptions` object that specifies
SavedModel saving options.
verbose: whether to print all the variables of an
exported SavedModel.
**Note on TF-Serving**: all endpoints registered via `add_endpoint()`
are made visible for TF-Serving in the SavedModel artifact. In addition,
the first endpoint registered is made visible under the alias
`"serving_default"` (unless an endpoint with the name
`"serving_default"` was already registered manually),
since TF-Serving requires this endpoint to be set.
"""
if not self._endpoint_names:
raise ValueError(
"No endpoints have been set yet. Call add_endpoint()."
)
self._filter_and_track_resources()
signatures = {}
for name in self._endpoint_names:
signatures[name] = self._get_concrete_fn(name)
# Add "serving_default" signature key for TFServing
if "serving_default" not in self._endpoint_names:
signatures["serving_default"] = self._get_concrete_fn(
self._endpoint_names[0]
)
tf.saved_model.save(
self._tf_trackable,
filepath,
options=options,
signatures=signatures,
)
# Print out available endpoints
if verbose:
endpoints = "\n\n".join(
_print_signature(
getattr(self._tf_trackable, name), name, verbose=verbose
)
for name in self._endpoint_names
)
io_utils.print_msg(
f"Saved artifact at '{filepath}'. "
"The following endpoints are available:\n\n"
f"{endpoints}"
)
def _convert_to_tf_variable(self, backend_variable):
if not isinstance(backend_variable, backend.Variable):
raise TypeError(
"`backend_variable` must be a `backend.Variable`. "
f"Recevied: backend_variable={backend_variable} of type "
f"({type(backend_variable)})"
)
return tf.Variable(
backend_variable.value,
dtype=backend_variable.dtype,
trainable=backend_variable.trainable,
name=backend_variable.name,
)
def _get_concrete_fn(self, endpoint):
"""Workaround for some SavedModel quirks."""
if endpoint in self._endpoint_signatures:
return getattr(self._tf_trackable, endpoint)
else:
traces = getattr(self._tf_trackable, endpoint)._trackable_children(
"saved_model"
)
return list(traces.values())[0]
def _get_variables_used_by_endpoints(self):
fns = [self._get_concrete_fn(name) for name in self._endpoint_names]
return _list_variables_used_by_fns(fns)
def _filter_and_track_resources(self):
"""Track resources used by endpoints / referenced in `track()` calls."""
# Start by extracting variables from endpoints.
fns = [self._get_concrete_fn(name) for name in self._endpoint_names]
tvs, ntvs = _list_variables_used_by_fns(fns)
self._tf_trackable._all_variables = list(tvs + ntvs)
# Next, track lookup tables.
# Hopefully, one day this will be automated at the tf.function level.
self._tf_trackable._misc_assets = []
from tensorflow.saved_model.experimental import TrackableResource
if hasattr(self, "_tracked"):
for root in self._tracked:
descendants = tf.train.TrackableView(root).descendants()
for trackable in descendants:
if isinstance(trackable, TrackableResource):
self._tf_trackable._misc_assets.append(trackable)
def export_saved_model(
model, filepath, verbose=None, input_signature=None, **kwargs
):
"""Export the model as a TensorFlow SavedModel artifact for inference.
This method lets you export a model to a lightweight SavedModel artifact
that contains the model's forward pass only (its `call()` method)
and can be served via e.g. TensorFlow Serving. The forward pass is
registered under the name `serve()` (see example below).
The original code of the model (including any custom layers you may
have used) is *no longer* necessary to reload the artifact -- it is
entirely standalone.
Args:
filepath: `str` or `pathlib.Path` object. The path to save the artifact.
verbose: `bool`. Whether to print a message during export. Defaults to
`None`, which uses the default value set by different backends and
formats.
input_signature: Optional. Specifies the shape and dtype of the model
inputs. Can be a structure of `keras.InputSpec`, `tf.TensorSpec`,
`backend.KerasTensor`, or backend tensor. If not provided, it will
be automatically computed. Defaults to `None`.
**kwargs: Additional keyword arguments:
- Specific to the JAX backend:
- `is_static`: Optional `bool`. Indicates whether `fn` is
static. Set to `False` if `fn` involves state updates
(e.g., RNG seeds).
- `jax2tf_kwargs`: Optional `dict`. Arguments for
`jax2tf.convert`. See [`jax2tf.convert`](
https://github.com/google/jax/blob/main/jax/experimental/jax2tf/README.md).
If `native_serialization` and `polymorphic_shapes` are not
provided, they are automatically computed.
**Note:** This feature is currently supported only with TensorFlow, JAX and
Torch backends. Support for the Torch backend is experimental.
**Note:** The dynamic shape feature is not yet supported with Torch
backend. As a result, you must fully define the shapes of the inputs using
`input_signature`. If `input_signature` is not provided, all instances of
`None` (such as the batch size) will be replaced with `1`.
Example:
```python
# Export the model as a TensorFlow SavedModel artifact
model.export("path/to/location", format="tf_saved_model")
# Load the artifact in a different process/environment
reloaded_artifact = tf.saved_model.load("path/to/location")
predictions = reloaded_artifact.serve(input_data)
```
If you would like to customize your serving endpoints, you can
use the lower-level `keras.export.ExportArchive` class. The
`export()` method relies on `ExportArchive` internally.
"""
if verbose is None:
verbose = True # Defaults to `True` for all backends.
export_archive = ExportArchive()
if input_signature is None:
input_signature = get_input_signature(model)
export_archive.track_and_add_endpoint(
DEFAULT_ENDPOINT_NAME, model, input_signature, **kwargs
)
export_archive.write_out(filepath, verbose=verbose)
def _print_signature(fn, name, verbose=True):
concrete_fn = fn._list_all_concrete_functions()[0]
pprinted_signature = concrete_fn.pretty_printed_signature(verbose=verbose)
lines = pprinted_signature.split("\n")
lines = [f"* Endpoint '{name}'"] + lines[1:]
endpoint = "\n".join(lines)
return endpoint
def _list_variables_used_by_fns(fns):
trainable_variables = []
non_trainable_variables = []
trainable_variables_ids = set()
non_trainable_variables_ids = set()
for fn in fns:
if hasattr(fn, "concrete_functions"):
concrete_functions = fn.concrete_functions
elif hasattr(fn, "get_concrete_function"):
concrete_functions = [fn.get_concrete_function()]
else:
concrete_functions = [fn]
for concrete_fn in concrete_functions:
for v in concrete_fn.trainable_variables:
if id(v) not in trainable_variables_ids:
trainable_variables.append(v)
trainable_variables_ids.add(id(v))
for v in concrete_fn.variables:
if (
id(v) not in trainable_variables_ids
and id(v) not in non_trainable_variables_ids
):
non_trainable_variables.append(v)
non_trainable_variables_ids.add(id(v))
return trainable_variables, non_trainable_variables
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/export/onnx_test.py | keras/src/export/onnx_test.py | """Tests for ONNX exporting utilities."""
import os
import numpy as np
import onnxruntime
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src import tree
from keras.src.export import onnx
from keras.src.layers.input_spec import InputSpec as InputSpec
from keras.src.saving import saving_lib
from keras.src.testing.test_utils import named_product
class CustomModel(models.Model):
def __init__(self, layer_list):
super().__init__()
self.layer_list = layer_list
def call(self, input):
output = input
for layer in self.layer_list:
output = layer(output)
return output
def get_model(type="sequential", input_shape=(10,), layer_list=None):
layer_list = layer_list or [
layers.Dense(10, activation="relu"),
layers.BatchNormalization(),
layers.Dense(1, activation="sigmoid"),
]
if type == "sequential":
return models.Sequential(layer_list)
elif type == "functional":
input = output = tree.map_shape_structure(layers.Input, input_shape)
for layer in layer_list:
output = layer(output)
return models.Model(inputs=input, outputs=output)
elif type == "subclass":
return CustomModel(layer_list)
elif type == "lstm":
# https://github.com/keras-team/keras/issues/21390
inputs = layers.Input((4, 10))
x = layers.Bidirectional(
layers.LSTM(
10,
kernel_initializer="he_normal",
return_sequences=True,
kernel_regularizer=None,
),
merge_mode="sum",
)(inputs)
outputs = layers.Bidirectional(
layers.LSTM(
10,
kernel_initializer="he_normal",
return_sequences=True,
kernel_regularizer=None,
),
merge_mode="concat",
)(x)
return models.Model(inputs=inputs, outputs=outputs)
@pytest.mark.skipif(
backend.backend() not in ("tensorflow", "jax", "torch"),
reason=(
"`export_onnx` only currently supports the tensorflow, jax and torch "
"backends."
),
)
@pytest.mark.skipif(
testing.jax_uses_gpu()
or testing.tensorflow_uses_gpu()
or testing.torch_uses_gpu(),
reason="Fails on GPU",
)
@pytest.mark.skipif(
np.version.version.startswith("2."),
reason="ONNX export is currently incompatible with NumPy 2.0",
)
class ExportONNXTest(testing.TestCase):
@parameterized.named_parameters(
named_product(
model_type=["sequential", "functional", "subclass", "lstm"]
)
)
def test_standard_model_export(self, model_type):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model(model_type)
batch_size = 3 if backend.backend() != "torch" else 1
if model_type == "lstm":
ref_input = np.random.normal(size=(batch_size, 4, 10))
else:
ref_input = np.random.normal(size=(batch_size, 10))
ref_input = ref_input.astype("float32")
ref_output = model(ref_input)
onnx.export_onnx(model, temp_filepath)
ort_session = onnxruntime.InferenceSession(temp_filepath)
ort_inputs = {
k.name: v for k, v in zip(ort_session.get_inputs(), [ref_input])
}
self.assertAllClose(ref_output, ort_session.run(None, ort_inputs)[0])
# Test with a different batch size
if backend.backend() == "torch":
# TODO: Dynamic shape is not supported yet in the torch backend
return
ort_inputs = {
k.name: v
for k, v in zip(
ort_session.get_inputs(),
[np.concatenate([ref_input, ref_input], axis=0)],
)
}
ort_session.run(None, ort_inputs)
@parameterized.named_parameters(
named_product(struct_type=["tuple", "array", "dict"])
)
def test_model_with_input_structure(self, struct_type):
if backend.backend() == "torch" and struct_type == "dict":
self.skipTest("The torch backend doesn't support the dict model.")
class TupleModel(models.Model):
def call(self, inputs):
x, y = inputs
return ops.add(x, y)
class ArrayModel(models.Model):
def call(self, inputs):
x = inputs[0]
y = inputs[1]
return ops.add(x, y)
class DictModel(models.Model):
def call(self, inputs):
x = inputs["x"]
y = inputs["y"]
return ops.add(x, y)
batch_size = 3 if backend.backend() != "torch" else 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
if struct_type == "tuple":
model = TupleModel()
ref_input = (ref_input, ref_input * 2)
elif struct_type == "array":
model = ArrayModel()
ref_input = [ref_input, ref_input * 2]
elif struct_type == "dict":
model = DictModel()
ref_input = {"x": ref_input, "y": ref_input * 2}
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
ref_output = model(tree.map_structure(ops.convert_to_tensor, ref_input))
onnx.export_onnx(model, temp_filepath)
ort_session = onnxruntime.InferenceSession(temp_filepath)
if isinstance(ref_input, dict):
ort_inputs = {
k.name: v
for k, v in zip(ort_session.get_inputs(), ref_input.values())
}
else:
ort_inputs = {
k.name: v for k, v in zip(ort_session.get_inputs(), ref_input)
}
self.assertAllClose(ref_output, ort_session.run(None, ort_inputs)[0])
# Test with keras.saving_lib
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.keras"
)
saving_lib.save_model(model, temp_filepath)
revived_model = saving_lib.load_model(
temp_filepath,
{
"TupleModel": TupleModel,
"ArrayModel": ArrayModel,
"DictModel": DictModel,
},
)
self.assertAllClose(ref_output, revived_model(ref_input))
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model2")
onnx.export_onnx(revived_model, temp_filepath)
# Test with a different batch size
if backend.backend() == "torch":
# TODO: Dynamic shape is not supported yet in the torch backend
return
bigger_ref_input = tree.map_structure(
lambda x: np.concatenate([x, x], axis=0), ref_input
)
if isinstance(bigger_ref_input, dict):
bigger_ort_inputs = {
k.name: v
for k, v in zip(
ort_session.get_inputs(), bigger_ref_input.values()
)
}
else:
bigger_ort_inputs = {
k.name: v
for k, v in zip(ort_session.get_inputs(), bigger_ref_input)
}
ort_session.run(None, bigger_ort_inputs)
def test_model_with_multiple_inputs(self):
class TwoInputsModel(models.Model):
def call(self, x, y):
return x + y
def build(self, y_shape, x_shape):
self.built = True
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = TwoInputsModel()
batch_size = 3 if backend.backend() != "torch" else 1
ref_input_x = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_input_y = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = model(ref_input_x, ref_input_y)
onnx.export_onnx(model, temp_filepath)
ort_session = onnxruntime.InferenceSession(temp_filepath)
ort_inputs = {
k.name: v
for k, v in zip(
ort_session.get_inputs(), [ref_input_x, ref_input_y]
)
}
self.assertAllClose(ref_output, ort_session.run(None, ort_inputs)[0])
# Test with a different batch size
if backend.backend() == "torch":
# TODO: Dynamic shape is not supported yet in the torch backend
return
ort_inputs = {
k.name: v
for k, v in zip(
ort_session.get_inputs(),
[
np.concatenate([ref_input_x, ref_input_x], axis=0),
np.concatenate([ref_input_y, ref_input_y], axis=0),
],
)
}
ort_session.run(None, ort_inputs)
@parameterized.named_parameters(named_product(opset_version=[None, 17]))
def test_export_with_opset_version(self, opset_version):
import onnx as onnx_lib
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model("sequential")
batch_size = 3 if backend.backend() != "torch" else 1
ref_input = np.random.normal(size=(batch_size, 10))
ref_input = ref_input.astype("float32")
ref_output = model(ref_input)
onnx.export_onnx(
model, temp_filepath, opset_version=opset_version, verbose=True
)
ort_session = onnxruntime.InferenceSession(temp_filepath)
ort_inputs = {
k.name: v for k, v in zip(ort_session.get_inputs(), [ref_input])
}
self.assertAllClose(ref_output, ort_session.run(None, ort_inputs)[0])
if opset_version is not None:
onnx_model = onnx_lib.load(temp_filepath)
self.assertEqual(onnx_model.opset_import[0].version, opset_version)
def test_export_with_input_names(self):
"""Test ONNX export uses InputSpec.name for input names."""
import onnx as onnx_lib
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model("sequential")
batch_size = 3 if backend.backend() != "torch" else 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = model(ref_input)
# Test with custom input name
input_spec = [
InputSpec(
name="custom_input", shape=(batch_size, 10), dtype="float32"
)
]
onnx.export_onnx(model, temp_filepath, input_signature=input_spec)
onnx_model = onnx_lib.load(temp_filepath)
input_names = [input.name for input in onnx_model.graph.input]
self.assertIn("custom_input", input_names)
ort_session = onnxruntime.InferenceSession(temp_filepath)
ort_inputs = {
k.name: v for k, v in zip(ort_session.get_inputs(), [ref_input])
}
self.assertAllClose(ref_output, ort_session.run(None, ort_inputs)[0])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/export/tfsm_layer.py | keras/src/export/tfsm_layer.py | from keras.src import backend
from keras.src import layers
from keras.src.api_export import keras_export
from keras.src.export.saved_model import _list_variables_used_by_fns
from keras.src.utils.module_utils import tensorflow as tf
@keras_export("keras.layers.TFSMLayer")
class TFSMLayer(layers.Layer):
"""Reload a Keras model/layer that was saved via SavedModel / ExportArchive.
Arguments:
filepath: `str` or `pathlib.Path` object. The path to the SavedModel.
call_endpoint: Name of the endpoint to use as the `call()` method
of the reloaded layer. If the SavedModel was created
via `model.export()`,
then the default endpoint name is `'serve'`. In other cases
it may be named `'serving_default'`.
Example:
```python
model.export("path/to/artifact")
reloaded_layer = TFSMLayer("path/to/artifact")
outputs = reloaded_layer(inputs)
```
The reloaded object can be used like a regular Keras layer, and supports
training/fine-tuning of its trainable weights. Note that the reloaded
object retains none of the internal structure or custom methods of the
original object -- it's a brand new layer created around the saved
function.
**Limitations:**
* Only call endpoints with a single `inputs` tensor argument
(which may optionally be a dict/tuple/list of tensors) are supported.
For endpoints with multiple separate input tensor arguments, consider
subclassing `TFSMLayer` and implementing a `call()` method with a
custom signature.
* If you need training-time behavior to differ from inference-time behavior
(i.e. if you need the reloaded object to support a `training=True` argument
in `__call__()`), make sure that the training-time call function is
saved as a standalone endpoint in the artifact, and provide its name
to the `TFSMLayer` via the `call_training_endpoint` argument.
"""
def __init__(
self,
filepath,
call_endpoint="serve",
call_training_endpoint=None,
trainable=True,
name=None,
dtype=None,
):
if backend.backend() != "tensorflow":
raise NotImplementedError(
"The TFSMLayer is only currently supported with the "
"TensorFlow backend."
)
# Initialize an empty layer, then add_weight() etc. as needed.
super().__init__(trainable=trainable, name=name, dtype=dtype)
self._reloaded_obj = tf.saved_model.load(filepath)
self.filepath = filepath
self.call_endpoint = call_endpoint
self.call_training_endpoint = call_training_endpoint
# Resolve the call function.
if hasattr(self._reloaded_obj, call_endpoint):
# Case 1: it's set as an attribute.
self.call_endpoint_fn = getattr(self._reloaded_obj, call_endpoint)
elif call_endpoint in self._reloaded_obj.signatures:
# Case 2: it's listed in the `signatures` field.
self.call_endpoint_fn = self._reloaded_obj.signatures[call_endpoint]
else:
raise ValueError(
f"The endpoint '{call_endpoint}' "
"is neither an attribute of the reloaded SavedModel, "
"nor an entry in the `signatures` field of "
"the reloaded SavedModel. Select another endpoint via "
"the `call_endpoint` argument. Available endpoints for "
"this SavedModel: "
f"{list(self._reloaded_obj.signatures.keys())}"
)
# Resolving the training function.
if call_training_endpoint:
if hasattr(self._reloaded_obj, call_training_endpoint):
self.call_training_endpoint_fn = getattr(
self._reloaded_obj, call_training_endpoint
)
elif call_training_endpoint in self._reloaded_obj.signatures:
self.call_training_endpoint_fn = self._reloaded_obj.signatures[
call_training_endpoint
]
else:
raise ValueError(
f"The endpoint '{call_training_endpoint}' "
"is neither an attribute of the reloaded SavedModel, "
"nor an entry in the `signatures` field of "
"the reloaded SavedModel. Available endpoints for "
"this SavedModel: "
f"{list(self._reloaded_obj.signatures.keys())}"
)
# Add trainable and non-trainable weights from the call_endpoint_fn.
all_fns = [self.call_endpoint_fn]
if call_training_endpoint:
all_fns.append(self.call_training_endpoint_fn)
tvs, ntvs = _list_variables_used_by_fns(all_fns)
for v in tvs:
self._add_existing_weight(v)
for v in ntvs:
self._add_existing_weight(v)
self._build_at_init()
def _add_existing_weight(self, weight):
"""Tracks an existing weight."""
variable = backend.Variable(
initializer=weight,
trainable=weight.trainable,
dtype=weight.dtype,
shape=weight.shape,
# Keras variable names cannot contain slashes.
name=weight.name.replace("/", "_"),
)
self._track_variable(variable)
def call(self, inputs, training=False, **kwargs):
if training:
if self.call_training_endpoint:
return self.call_training_endpoint_fn(inputs, **kwargs)
return self.call_endpoint_fn(inputs, **kwargs)
def get_config(self):
base_config = super().get_config()
config = {
# Note: this is not intended to be portable.
"filepath": self.filepath,
"call_endpoint": self.call_endpoint,
"call_training_endpoint": self.call_training_endpoint,
}
return {**base_config, **config}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/export/tf2onnx_lib.py | keras/src/export/tf2onnx_lib.py | import copy
import functools
import logging
import traceback
import numpy as np
@functools.lru_cache()
def patch_tf2onnx():
"""Patches `tf2onnx` to ensure compatibility with numpy>=2.0.0."""
from onnx import AttributeProto
from onnx import TensorProto
from keras.src.utils.module_utils import tf2onnx
logger = logging.getLogger(tf2onnx.__name__)
if not hasattr(np, "object"):
np.object = object
def patched_rewrite_constant_fold(g, ops):
"""
We call tensorflow transform with constant folding but in some cases
tensorflow does fold all constants. Since there are a bunch of ops in
onnx that use attributes where tensorflow has dynamic inputs, we badly
want constant folding to work. For cases where tensorflow missed
something, make another pass over the graph and fix want we care about.
"""
func_map = {
"Add": np.add,
"GreaterEqual": np.greater_equal,
"Cast": np.asarray,
"ConcatV2": np.concatenate,
"Less": np.less,
"ListDiff": np.setdiff1d,
"Mul": np.multiply,
"Pack": np.stack,
"Range": np.arange,
"Sqrt": np.sqrt,
"Sub": np.subtract,
}
ops = list(ops)
keep_looking = True
while keep_looking:
keep_looking = False
for idx, op in enumerate(ops):
func = func_map.get(op.type)
if func is None:
continue
if set(op.output) & set(g.outputs):
continue
try:
inputs = []
for node in op.inputs:
if not node.is_const():
break
inputs.append(node.get_tensor_value(as_list=False))
logger.debug(
"op name %s, %s, %s",
op.name,
len(op.input),
len(inputs),
)
if inputs and len(op.input) == len(inputs):
logger.info(
"folding node type=%s, name=%s" % (op.type, op.name)
)
if op.type == "Cast":
dst = op.get_attr_int("to")
np_type = tf2onnx.utils.map_onnx_to_numpy_type(dst)
val = np.asarray(*inputs, dtype=np_type)
elif op.type == "ConcatV2":
axis = inputs[-1]
values = inputs[:-1]
val = func(tuple(values), axis)
elif op.type == "ListDiff":
out_type = op.get_attr_int("out_idx")
np_type = tf2onnx.utils.map_onnx_to_numpy_type(
out_type
)
val = func(*inputs)
val = val.astype(np_type)
elif op.type in ["Pack"]:
# handle ops that need input array and axis
axis = op.get_attr_int("axis")
val = func(inputs, axis=axis)
elif op.type == "Range":
dtype = op.get_attr_int("Tidx")
np_type = tf2onnx.utils.map_onnx_to_numpy_type(
dtype
)
val = func(*inputs, dtype=np_type)
else:
val = func(*inputs)
new_node_name = tf2onnx.utils.make_name(op.name)
new_output_name = new_node_name
old_output_name = op.output[0]
old_node_name = op.name
logger.debug(
"create const node [%s] replacing [%s]",
new_node_name,
old_node_name,
)
ops[idx] = g.make_const(new_node_name, val)
logger.debug(
"replace old output [%s] with new output [%s]",
old_output_name,
new_output_name,
)
# need to re-write the consumers input name to use the
# const name
consumers = g.find_output_consumers(old_output_name)
if consumers:
for consumer in consumers:
g.replace_input(
consumer, old_output_name, new_output_name
)
# keep looking until there is nothing we can fold.
# We keep the graph in topological order so if we
# folded, the result might help a following op.
keep_looking = True
except Exception as ex:
tb = traceback.format_exc()
logger.info("exception: %s, details: %s", ex, tb)
# ignore errors
return ops
def patched_get_value_attr(self, external_tensor_storage=None):
"""
Return onnx attr for value property of node.
Attr is modified to point to external tensor data stored in
external_tensor_storage, if included.
"""
a = self._attr["value"]
if (
external_tensor_storage is not None
and self in external_tensor_storage.node_to_modified_value_attr
):
return external_tensor_storage.node_to_modified_value_attr[self]
if external_tensor_storage is None or a.type != AttributeProto.TENSOR:
return a
def prod(x):
if hasattr(np, "product"):
return np.product(x)
else:
return np.prod(x)
if (
prod(a.t.dims)
> external_tensor_storage.external_tensor_size_threshold
):
a = copy.deepcopy(a)
tensor_name = (
f"{self.name.strip()}_{external_tensor_storage.name_counter}"
)
for c in '~"#%&*:<>?/\\{|}':
tensor_name = tensor_name.replace(c, "_")
external_tensor_storage.name_counter += 1
external_tensor_storage.name_to_tensor_data[tensor_name] = (
a.t.raw_data
)
external_tensor_storage.node_to_modified_value_attr[self] = a
a.t.raw_data = b""
a.t.ClearField("raw_data")
location = a.t.external_data.add()
location.key = "location"
location.value = tensor_name
a.t.data_location = TensorProto.EXTERNAL
return a
tf2onnx.tfonnx.rewrite_constant_fold = patched_rewrite_constant_fold
tf2onnx.graph.Node.get_value_attr = patched_get_value_attr
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/export/litert.py | keras/src/export/litert.py | from keras.src import layers
from keras.src import models
from keras.src import tree
from keras.src.export.export_utils import get_input_signature
from keras.src.utils import io_utils
from keras.src.utils.module_utils import tensorflow as tf
def export_litert(
model,
filepath,
input_signature=None,
**kwargs,
):
"""Export the model as a LiteRT artifact for inference.
Args:
model: The Keras model to export.
filepath: The path to save the exported artifact.
input_signature: Optional input signature specification. If
`None`, it will be inferred.
**kwargs: Additional keyword arguments passed to the exporter.
"""
exporter = LiteRTExporter(
model=model,
input_signature=input_signature,
**kwargs,
)
exporter.export(filepath)
io_utils.print_msg(f"Saved artifact at '{filepath}'.")
class LiteRTExporter:
"""Exporter for the LiteRT (TFLite) format.
This class handles the conversion of Keras models for LiteRT runtime and
generates a `.tflite` model file. For efficient inference on mobile and
embedded devices, it creates a single callable signature based on the
model's `call()` method.
"""
def __init__(
self,
model,
input_signature=None,
**kwargs,
):
"""Initialize the LiteRT exporter.
Args:
model: The Keras model to export
input_signature: Input signature specification (e.g., TensorFlow
TensorSpec or list of TensorSpec)
**kwargs: Additional export parameters
"""
self.model = model
self.input_signature = input_signature
self.kwargs = kwargs
def export(self, filepath):
"""Exports the Keras model to a TFLite file.
Args:
filepath: Output path for the exported model
Returns:
Path to exported model
"""
# 1. Resolve / infer input signature
if self.input_signature is None:
# Use the standard get_input_signature which handles all model types
# and preserves nested structures (dicts, lists, etc.)
self.input_signature = get_input_signature(self.model)
# 2. Determine input structure and create adapter if needed
# There are 3 cases:
# Case 1: Single input (not nested)
# Case 2: Flat list of inputs (list where flattened == original)
# Case 3: Nested structure (dicts, nested lists, etc.)
# Special handling for Functional models: get_input_signature wraps
# the structure in a list, so unwrap it for analysis
input_struct = self.input_signature
if (
isinstance(self.input_signature, list)
and len(self.input_signature) == 1
):
input_struct = self.input_signature[0]
if not tree.is_nested(input_struct):
# Case 1: Single input - use as-is
model_to_convert = self.model
signature_for_conversion = self.input_signature
elif isinstance(input_struct, list) and len(input_struct) == len(
tree.flatten(input_struct)
):
# Case 2: Flat list of inputs - use as-is
model_to_convert = self.model
signature_for_conversion = self.input_signature
else:
# Case 3: Nested structure (dict, nested lists, etc.)
# Create adapter model that converts flat list to nested structure
adapted_model = self._create_nested_inputs_adapter(input_struct)
# Flatten signature for TFLite conversion
signature_for_conversion = tree.flatten(input_struct)
# Use adapted model and flat list signature for conversion
model_to_convert = adapted_model
# Store original model reference for later use
original_model = self.model
# Temporarily replace self.model with the model to convert
self.model = model_to_convert
try:
# Convert the model to TFLite.
tflite_model = self._convert_to_tflite(signature_for_conversion)
finally:
# Restore original model
self.model = original_model
# Save the TFLite model to the specified file path.
if not filepath.endswith(".tflite"):
raise ValueError(
f"The LiteRT export requires the filepath to end with "
f"'.tflite'. Got: {filepath}"
)
with open(filepath, "wb") as f:
f.write(tflite_model)
return filepath
def _create_nested_inputs_adapter(self, input_signature_struct):
"""Create an adapter model that converts flat list inputs to nested
structure.
This adapter allows models expecting nested inputs (dicts, lists, etc.)
to be exported to TFLite format (which only supports positional/list
inputs).
Args:
input_signature_struct: Nested structure of InputSpecs (dict, list,
etc.)
Returns:
A Functional model that accepts flat list inputs and converts to
nested
"""
# Get flat paths to preserve names and print input mapping
paths_and_specs = tree.flatten_with_path(input_signature_struct)
paths = [".".join(str(e) for e in p) for p, v in paths_and_specs]
io_utils.print_msg(f"Creating adapter for inputs: {paths}")
# Create Input layers for TFLite (flat list-based)
input_layers = []
for path, spec in paths_and_specs:
# Extract the input name from spec or path
name = (
spec.name
if hasattr(spec, "name") and spec.name
else (str(path[-1]) if path else "input")
)
input_layer = layers.Input(
shape=spec.shape[1:], # Remove batch dimension
dtype=spec.dtype,
name=name,
)
input_layers.append(input_layer)
# Reconstruct the nested structure from flat list
inputs_structure = tree.pack_sequence_as(
input_signature_struct, input_layers
)
# Call the original model with nested inputs
outputs = self.model(inputs_structure)
# Build as Functional model (flat list inputs -> nested -> model ->
# output)
adapted_model = models.Model(inputs=input_layers, outputs=outputs)
# Preserve the original model's variables
adapted_model._variables = self.model.variables
adapted_model._trainable_variables = self.model.trainable_variables
adapted_model._non_trainable_variables = (
self.model.non_trainable_variables
)
return adapted_model
def _convert_to_tflite(self, input_signature):
"""Converts the Keras model to TFLite format.
Returns:
A bytes object containing the serialized TFLite model.
"""
# Try direct conversion first for all models
try:
converter = tf.lite.TFLiteConverter.from_keras_model(self.model)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS,
tf.lite.OpsSet.SELECT_TF_OPS,
]
# Keras 3 only supports resource variables
converter.experimental_enable_resource_variables = True
# Apply any additional converter settings from kwargs
self._apply_converter_kwargs(converter)
tflite_model = converter.convert()
return tflite_model
except Exception as e:
# If direct conversion fails, raise the error with helpful message
raise RuntimeError(
f"Direct TFLite conversion failed. This may be due to model "
f"complexity or unsupported operations. Error: {e}"
) from e
def _apply_converter_kwargs(self, converter):
"""Apply additional converter settings from kwargs.
Args:
converter: tf.lite.TFLiteConverter instance to configure
Raises:
ValueError: If any kwarg is not a valid converter attribute
"""
for attr, value in self.kwargs.items():
if attr == "target_spec" and isinstance(value, dict):
# Handle nested target_spec settings
for spec_key, spec_value in value.items():
if hasattr(converter.target_spec, spec_key):
setattr(converter.target_spec, spec_key, spec_value)
else:
raise ValueError(
f"Unknown target_spec attribute '{spec_key}'"
)
elif hasattr(converter, attr):
setattr(converter, attr, value)
else:
raise ValueError(f"Unknown converter attribute '{attr}'")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/export/saved_model_test.py | keras/src/export/saved_model_test.py | """Tests for SavedModel exporting utilities."""
import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import random
from keras.src import testing
from keras.src import tree
from keras.src.export import saved_model
from keras.src.saving import saving_lib
from keras.src.testing.test_utils import named_product
class CustomModel(models.Model):
def __init__(self, layer_list):
super().__init__()
self.layer_list = layer_list
def call(self, input):
output = input
for layer in self.layer_list:
output = layer(output)
return output
def get_model(type="sequential", input_shape=(10,), layer_list=None):
layer_list = layer_list or [
layers.Dense(10, activation="relu"),
layers.BatchNormalization(),
layers.Dense(1, activation="sigmoid"),
]
if type == "sequential":
return models.Sequential(layer_list)
elif type == "functional":
input = output = tree.map_shape_structure(layers.Input, input_shape)
for layer in layer_list:
output = layer(output)
return models.Model(inputs=input, outputs=output)
elif type == "subclass":
return CustomModel(layer_list)
@pytest.mark.skipif(
backend.backend() not in ("tensorflow", "jax", "torch"),
reason=(
"`export_saved_model` only currently supports the tensorflow, jax and "
"torch backends."
),
)
@pytest.mark.skipif(testing.jax_uses_gpu(), reason="Leads to core dumps on CI")
@pytest.mark.skipif(
testing.torch_uses_gpu(), reason="Leads to core dumps on CI"
)
@pytest.mark.skipif(
backend.backend() == "torch" and np.version.version.startswith("2."),
reason="Torch backend export (via torch_xla) is incompatible with np 2.0",
)
class ExportSavedModelTest(testing.TestCase):
@parameterized.named_parameters(
named_product(model_type=["sequential", "functional", "subclass"])
)
def test_standard_model_export(self, model_type):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model(model_type)
batch_size = 3 if backend.backend() != "torch" else 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = model(ref_input)
saved_model.export_saved_model(model, temp_filepath)
revived_model = tf.saved_model.load(temp_filepath)
self.assertAllClose(ref_output, revived_model.serve(ref_input))
# Test with a different batch size
if backend.backend() == "torch":
# TODO: Dynamic shape is not supported yet in the torch backend
return
revived_model.serve(tf.random.normal((6, 10)))
@parameterized.named_parameters(
named_product(model_type=["sequential", "functional", "subclass"])
)
@pytest.mark.skipif(
backend.backend() == "torch",
reason=(
"RuntimeError: mutating a non-functional tensor with a "
"functional tensor is not allowed in the torch backend."
),
)
def test_model_with_rng_export(self, model_type):
class RandomLayer(layers.Layer):
def __init__(self):
super().__init__()
self.seed_generator = backend.random.SeedGenerator()
def call(self, inputs):
return inputs + random.uniform(
ops.shape(inputs), seed=self.seed_generator
)
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model(model_type, layer_list=[RandomLayer()])
ref_input = tf.random.normal((3, 10))
ref_output = model(ref_input)
saved_model.export_saved_model(model, temp_filepath)
revived_model = tf.saved_model.load(temp_filepath)
self.assertEqual(ref_output.shape, revived_model.serve(ref_input).shape)
# Test with a different batch size
input = tf.random.normal((6, 10))
output1 = revived_model.serve(input)
output2 = revived_model.serve(input)
# Verify RNG seeding works and produces random outputs
self.assertNotAllClose(output1, output2)
@parameterized.named_parameters(
named_product(model_type=["sequential", "functional", "subclass"])
)
@pytest.mark.skipif(
backend.backend() == "torch",
reason=(
"RuntimeError: mutating a non-functional tensor with a "
"functional tensor is not allowed in the torch backend."
),
)
def test_model_with_non_trainable_state_export(self, model_type):
class StateLayer(layers.Layer):
def __init__(self):
super().__init__()
self.counter = self.add_variable(
(), "zeros", "int32", trainable=False
)
def call(self, inputs):
self.counter.assign_add(1)
return ops.array(inputs), ops.array(self.counter.value)
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model(model_type, layer_list=[StateLayer()])
model(tf.random.normal((3, 10)))
saved_model.export_saved_model(model, temp_filepath)
revived_model = tf.saved_model.load(temp_filepath)
# The non-trainable counter is expected to increment
input = tf.random.normal((6, 10))
output1, counter1 = revived_model.serve(input)
self.assertAllClose(output1, input)
self.assertAllClose(counter1, 2)
output2, counter2 = revived_model.serve(input)
self.assertAllClose(output2, input)
self.assertAllClose(counter2, 3)
@parameterized.named_parameters(
named_product(model_type=["sequential", "functional", "subclass"])
)
def test_model_with_tf_data_layer(self, model_type):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model(model_type, layer_list=[layers.Rescaling(scale=2.0)])
batch_size = 3 if backend.backend() != "torch" else 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = model(ref_input)
saved_model.export_saved_model(model, temp_filepath)
revived_model = tf.saved_model.load(temp_filepath)
self.assertAllClose(ref_output, revived_model.serve(ref_input))
# Test with a different batch size
if backend.backend() == "torch":
# TODO: Dynamic shape is not supported yet in the torch backend
return
revived_model.serve(tf.random.normal((6, 10)))
@parameterized.named_parameters(
named_product(struct_type=["tuple", "array", "dict"])
)
def test_model_with_input_structure(self, struct_type):
class TupleModel(models.Model):
def call(self, inputs):
x, y = inputs
return ops.add(x, y)
class ArrayModel(models.Model):
def call(self, inputs):
x = inputs[0]
y = inputs[1]
return ops.add(x, y)
class DictModel(models.Model):
def call(self, inputs):
x = inputs["x"]
y = inputs["y"]
return ops.add(x, y)
batch_size = 3 if backend.backend() != "torch" else 1
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
if struct_type == "tuple":
model = TupleModel()
ref_input = (ref_input, ref_input * 2)
elif struct_type == "array":
model = ArrayModel()
ref_input = [ref_input, ref_input * 2]
elif struct_type == "dict":
model = DictModel()
ref_input = {"x": ref_input, "y": ref_input * 2}
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
ref_output = model(tree.map_structure(ops.convert_to_tensor, ref_input))
saved_model.export_saved_model(model, temp_filepath)
revived_model = tf.saved_model.load(temp_filepath)
self.assertAllClose(ref_output, revived_model.serve(ref_input))
# Test with keras.saving_lib
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.keras"
)
saving_lib.save_model(model, temp_filepath)
revived_model = saving_lib.load_model(
temp_filepath,
{
"TupleModel": TupleModel,
"ArrayModel": ArrayModel,
"DictModel": DictModel,
},
)
self.assertAllClose(ref_output, revived_model(ref_input))
saved_model.export_saved_model(revived_model, self.get_temp_dir())
# Test with a different batch size
if backend.backend() == "torch":
# TODO: Dynamic shape is not supported yet in the torch backend
return
bigger_input = tree.map_structure(
lambda x: tf.concat([x, x], axis=0), ref_input
)
revived_model(bigger_input)
def test_model_with_multiple_inputs(self):
class TwoInputsModel(models.Model):
def call(self, x, y):
return x + y
def build(self, y_shape, x_shape):
self.built = True
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = TwoInputsModel()
batch_size = 3 if backend.backend() != "torch" else 1
ref_input_x = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_input_y = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = model(ref_input_x, ref_input_y)
saved_model.export_saved_model(model, temp_filepath)
revived_model = tf.saved_model.load(temp_filepath)
self.assertAllClose(
ref_output, revived_model.serve(ref_input_x, ref_input_y)
)
# Test with a different batch size
if backend.backend() == "torch":
# TODO: Dynamic shape is not supported yet in the torch backend
return
revived_model.serve(
tf.random.normal((6, 10)), tf.random.normal((6, 10))
)
@parameterized.named_parameters(
named_product(
model_type=["sequential", "functional", "subclass"],
input_signature=[
layers.InputSpec(
dtype="float32", shape=(None, 10), name="inputs"
),
tf.TensorSpec((None, 10), dtype="float32", name="inputs"),
backend.KerasTensor((None, 10), dtype="float32", name="inputs"),
"backend_tensor",
],
)
)
def test_input_signature(self, model_type, input_signature):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model(model_type)
batch_size = 3 if backend.backend() != "torch" else 1
ref_input = ops.random.normal((batch_size, 10))
ref_output = model(ref_input)
if input_signature == "backend_tensor":
input_signature = (ref_input,)
else:
input_signature = (input_signature,)
saved_model.export_saved_model(
model, temp_filepath, input_signature=input_signature
)
revived_model = tf.saved_model.load(temp_filepath)
self.assertAllClose(
ref_output, revived_model.serve(ops.convert_to_numpy(ref_input))
)
def test_input_signature_error(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model("functional")
with self.assertRaisesRegex(TypeError, "Unsupported x="):
input_signature = (123,)
saved_model.export_saved_model(
model, temp_filepath, input_signature=input_signature
)
@parameterized.named_parameters(
named_product(
model_type=["sequential", "functional", "subclass"],
is_static=(True, False),
jax2tf_kwargs=(
None,
{"enable_xla": True, "native_serialization": True},
),
)
)
@pytest.mark.skipif(
backend.backend() != "jax",
reason="This test is only for the jax backend.",
)
def test_jax_specific_kwargs(self, model_type, is_static, jax2tf_kwargs):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model(model_type)
ref_input = ops.random.uniform((3, 10))
ref_output = model(ref_input)
saved_model.export_saved_model(
model,
temp_filepath,
is_static=is_static,
jax2tf_kwargs=jax2tf_kwargs,
)
revived_model = tf.saved_model.load(temp_filepath)
self.assertAllClose(ref_output, revived_model.serve(ref_input))
@pytest.mark.skipif(
backend.backend()
not in (
"tensorflow",
"jax",
# "torch", # TODO: Support low-level operations in the torch backend.
),
reason="Export only currently supports the TF and JAX backends.",
)
@pytest.mark.skipif(testing.jax_uses_gpu(), reason="Leads to core dumps on CI")
@pytest.mark.skipif(
testing.torch_uses_gpu(), reason="Leads to core dumps on CI"
)
class ExportArchiveTest(testing.TestCase):
@parameterized.named_parameters(
named_product(model_type=["sequential", "functional", "subclass"])
)
def test_low_level_model_export(self, model_type):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model(model_type)
ref_input = tf.random.normal((3, 10))
ref_output = model(ref_input)
# Test variable tracking
export_archive = saved_model.ExportArchive()
export_archive.track(model)
self.assertLen(export_archive.variables, 8)
self.assertLen(export_archive.trainable_variables, 6)
self.assertLen(export_archive.non_trainable_variables, 2)
export_archive = saved_model.ExportArchive()
export_archive.track(model)
export_archive.add_endpoint(
"call",
model.__call__,
input_signature=[tf.TensorSpec(shape=(None, 10), dtype=tf.float32)],
)
export_archive.write_out(temp_filepath)
revived_model = tf.saved_model.load(temp_filepath)
self.assertAllClose(ref_output, revived_model.call(ref_input))
# Test with a different batch size
revived_model.call(tf.random.normal((6, 10)))
def test_low_level_model_export_with_alias(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model()
ref_input = tf.random.normal((3, 10))
ref_output = model(ref_input)
export_archive = saved_model.ExportArchive()
export_archive.track(model)
fn = export_archive.add_endpoint(
"call",
model.__call__,
input_signature=[tf.TensorSpec(shape=(None, 10), dtype=tf.float32)],
)
export_archive.write_out(
temp_filepath,
tf.saved_model.SaveOptions(function_aliases={"call_alias": fn}),
)
revived_model = tf.saved_model.load(
temp_filepath,
options=tf.saved_model.LoadOptions(
experimental_load_function_aliases=True
),
)
self.assertAllClose(
ref_output, revived_model.function_aliases["call_alias"](ref_input)
)
# Test with a different batch size
revived_model.function_aliases["call_alias"](tf.random.normal((6, 10)))
@parameterized.named_parameters(
named_product(model_type=["sequential", "functional", "subclass"])
)
def test_low_level_model_export_with_dynamic_dims(self, model_type):
class ReductionLayer(layers.Layer):
def call(self, inputs):
return ops.max(inputs, axis=1)
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model(
model_type,
input_shape=[(None,), (None,)],
layer_list=[layers.Concatenate(), ReductionLayer()],
)
ref_input = [tf.random.normal((3, 8)), tf.random.normal((3, 6))]
ref_output = model(ref_input)
export_archive = saved_model.ExportArchive()
export_archive.track(model)
export_archive.add_endpoint(
"call",
model.__call__,
input_signature=[
[
tf.TensorSpec(shape=(None, None), dtype=tf.float32),
tf.TensorSpec(shape=(None, None), dtype=tf.float32),
]
],
)
export_archive.write_out(temp_filepath)
revived_model = tf.saved_model.load(temp_filepath)
self.assertAllClose(ref_output, revived_model.call(ref_input))
# Test with a different batch size
revived_model.call([tf.random.normal((6, 8)), tf.random.normal((6, 6))])
# Test with a different batch size and different dynamic sizes
revived_model.call([tf.random.normal((6, 3)), tf.random.normal((6, 5))])
@pytest.mark.skipif(
backend.backend() != "jax",
reason="This test is only for the JAX backend.",
)
def test_low_level_model_export_with_jax2tf_kwargs(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model()
ref_input = tf.random.normal((3, 10))
ref_output = model(ref_input)
export_archive = saved_model.ExportArchive()
export_archive.track(model)
export_archive.add_endpoint(
"call",
model.__call__,
input_signature=[tf.TensorSpec(shape=(None, 10), dtype=tf.float32)],
jax2tf_kwargs={
"native_serialization": True,
"native_serialization_platforms": ("cpu", "tpu"),
},
)
with self.assertRaisesRegex(
ValueError, "native_serialization_platforms.*bogus"
):
export_archive.add_endpoint(
"call2",
model.__call__,
input_signature=[
tf.TensorSpec(shape=(None, 10), dtype=tf.float32)
],
jax2tf_kwargs={
"native_serialization": True,
"native_serialization_platforms": ("cpu", "bogus"),
},
)
export_archive.write_out(temp_filepath)
revived_model = tf.saved_model.load(temp_filepath)
self.assertAllClose(ref_output, revived_model.call(ref_input))
@pytest.mark.skipif(
backend.backend() != "jax",
reason="This test is only for the JAX backend.",
)
def test_low_level_model_export_with_jax2tf_polymorphic_shapes(self):
class SquareLayer(layers.Layer):
def call(self, inputs):
return ops.matmul(inputs, inputs)
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = CustomModel([SquareLayer()])
ref_input = tf.random.normal((3, 10, 10))
ref_output = model(ref_input)
signature = [tf.TensorSpec(shape=(None, None, None), dtype=tf.float32)]
with self.assertRaises(TypeError):
# This will fail because the polymorphic_shapes that is
# automatically generated will not account for the fact that
# dynamic dimensions 1 and 2 must have the same value.
export_archive = saved_model.ExportArchive()
export_archive.track(model)
export_archive.add_endpoint(
"call",
model.__call__,
input_signature=signature,
jax2tf_kwargs={},
)
export_archive.write_out(temp_filepath)
export_archive = saved_model.ExportArchive()
export_archive.track(model)
export_archive.add_endpoint(
"call",
model.__call__,
input_signature=signature,
jax2tf_kwargs={"polymorphic_shapes": ["(batch, a, a)"]},
)
export_archive.write_out(temp_filepath)
revived_model = tf.saved_model.load(temp_filepath)
self.assertAllClose(ref_output, revived_model.call(ref_input))
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="This test is native to the TF backend.",
)
def test_endpoint_registration_tf_function(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
model = get_model()
ref_input = tf.random.normal((3, 10))
ref_output = model(ref_input)
# Test variable tracking
export_archive = saved_model.ExportArchive()
export_archive.track(model)
self.assertLen(export_archive.variables, 8)
self.assertLen(export_archive.trainable_variables, 6)
self.assertLen(export_archive.non_trainable_variables, 2)
@tf.function()
def my_endpoint(x):
return model(x)
# Test registering an endpoint that is a tf.function (called)
my_endpoint(ref_input) # Trace fn
export_archive.add_endpoint(
"call",
my_endpoint,
)
export_archive.write_out(temp_filepath)
revived_model = tf.saved_model.load(temp_filepath)
self.assertFalse(hasattr(revived_model, "_tracked"))
self.assertAllClose(ref_output, revived_model.call(ref_input))
self.assertLen(revived_model.variables, 8)
self.assertLen(revived_model.trainable_variables, 6)
self.assertLen(revived_model.non_trainable_variables, 2)
@pytest.mark.skipif(
backend.backend() != "jax",
reason="This test is native to the JAX backend.",
)
def test_jax_endpoint_registration_tf_function(self):
model = get_model()
ref_input = np.random.normal(size=(3, 10))
model(ref_input)
# build a JAX function
def model_call(x):
return model(x)
from jax import default_backend as jax_device
from jax.experimental import jax2tf
native_jax_compatible = not (
jax_device() == "gpu"
and len(tf.config.list_physical_devices("GPU")) == 0
)
# now, convert JAX function
converted_model_call = jax2tf.convert(
model_call,
native_serialization=native_jax_compatible,
polymorphic_shapes=["(b, 10)"],
)
# you can now build a TF inference function
@tf.function(
input_signature=[tf.TensorSpec(shape=(None, 10), dtype=tf.float32)],
autograph=False,
)
def infer_fn(x):
return converted_model_call(x)
ref_output = infer_fn(ref_input)
# Export with TF inference function as endpoint
temp_filepath = os.path.join(self.get_temp_dir(), "my_model")
export_archive = saved_model.ExportArchive()
export_archive.track(model)
export_archive.add_endpoint("serve", infer_fn)
export_archive.write_out(temp_filepath)
# Reload and verify outputs
revived_model = tf.saved_model.load(temp_filepath)
self.assertFalse(hasattr(revived_model, "_tracked"))
self.assertAllClose(ref_output, revived_model.serve(ref_input))
self.assertLen(revived_model.variables, 8)
self.assertLen(revived_model.trainable_variables, 6)
self.assertLen(revived_model.non_trainable_variables, 2)
# Assert all variables wrapped as `tf.Variable`
assert isinstance(export_archive.variables[0], tf.Variable)
assert isinstance(export_archive.trainable_variables[0], tf.Variable)
assert isinstance(
export_archive.non_trainable_variables[0], tf.Variable
)
@pytest.mark.skipif(
backend.backend() != "jax",
reason="This test is native to the JAX backend.",
)
def test_jax_multi_unknown_endpoint_registration(self):
window_size = 100
X = np.random.random((1024, window_size, 1))
Y = np.random.random((1024, window_size, 1))
model = models.Sequential(
[
layers.Dense(128, activation="relu"),
layers.Dense(64, activation="relu"),
layers.Dense(1, activation="relu"),
]
)
model.compile(optimizer="adam", loss="mse")
model.fit(X, Y, batch_size=32)
# build a JAX function
def model_call(x):
return model(x)
from jax import default_backend as jax_device
from jax.experimental import jax2tf
native_jax_compatible = not (
jax_device() == "gpu"
and len(tf.config.list_physical_devices("GPU")) == 0
)
# now, convert JAX function
converted_model_call = jax2tf.convert(
model_call,
native_serialization=native_jax_compatible,
polymorphic_shapes=["(b, t, 1)"],
)
# you can now build a TF inference function
@tf.function(
input_signature=[
tf.TensorSpec(shape=(None, None, 1), dtype=tf.float32)
],
autograph=False,
)
def infer_fn(x):
return converted_model_call(x)
ref_input = np.random.random((1024, window_size, 1))
ref_output = infer_fn(ref_input)
# Export with TF inference function as endpoint
temp_filepath = os.path.join(self.get_temp_dir(), "my_model")
export_archive = saved_model.ExportArchive()
export_archive.track(model)
export_archive.add_endpoint("serve", infer_fn)
export_archive.write_out(temp_filepath)
# Reload and verify outputs
revived_model = tf.saved_model.load(temp_filepath)
self.assertFalse(hasattr(revived_model, "_tracked"))
self.assertAllClose(ref_output, revived_model.serve(ref_input))
self.assertLen(revived_model.variables, 6)
self.assertLen(revived_model.trainable_variables, 6)
self.assertLen(revived_model.non_trainable_variables, 0)
# Assert all variables wrapped as `tf.Variable`
assert isinstance(export_archive.variables[0], tf.Variable)
assert isinstance(export_archive.trainable_variables[0], tf.Variable)
def test_layer_export(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_layer")
layer = layers.BatchNormalization()
ref_input = tf.random.normal((3, 10))
ref_output = layer(ref_input) # Build layer (important)
export_archive = saved_model.ExportArchive()
export_archive.track(layer)
export_archive.add_endpoint(
"call",
layer.call,
input_signature=[tf.TensorSpec(shape=(None, 10), dtype=tf.float32)],
)
export_archive.write_out(temp_filepath)
revived_layer = tf.saved_model.load(temp_filepath)
self.assertAllClose(ref_output, revived_layer.call(ref_input))
def test_multi_input_output_functional_model(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
x1 = layers.Input((2,))
x2 = layers.Input((2,))
y1 = layers.Dense(3)(x1)
y2 = layers.Dense(3)(x2)
model = models.Model([x1, x2], [y1, y2])
ref_inputs = [tf.random.normal((3, 2)), tf.random.normal((3, 2))]
ref_outputs = model(ref_inputs)
model.export(temp_filepath)
revived_model = tf.saved_model.load(temp_filepath)
self.assertAllClose(ref_outputs[0], revived_model.serve(ref_inputs)[0])
self.assertAllClose(ref_outputs[1], revived_model.serve(ref_inputs)[1])
# Test with a different batch size
revived_model.serve(
[tf.random.normal((6, 2)), tf.random.normal((6, 2))]
)
# Now test dict inputs
model = models.Model({"x1": x1, "x2": x2}, [y1, y2])
ref_inputs = {
"x1": tf.random.normal((3, 2)),
"x2": tf.random.normal((3, 2)),
}
ref_outputs = model(ref_inputs)
model.export(temp_filepath)
revived_model = tf.saved_model.load(temp_filepath)
self.assertAllClose(ref_outputs[0], revived_model.serve(ref_inputs)[0])
self.assertAllClose(ref_outputs[1], revived_model.serve(ref_inputs)[1])
# Test with a different batch size
revived_model.serve(
{
"x1": tf.random.normal((6, 2)),
"x2": tf.random.normal((6, 2)),
}
)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="String lookup requires TensorFlow backend",
)
def test_model_with_lookup_table(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
text_vectorization = layers.TextVectorization()
text_vectorization.adapt(["one two", "three four", "five six"])
model = models.Sequential(
[
layers.Input(shape=(), dtype="string"),
text_vectorization,
layers.Embedding(10, 32),
layers.Dense(1),
]
)
ref_input = tf.convert_to_tensor(["one two three four"])
ref_output = model(ref_input)
saved_model.export_saved_model(model, temp_filepath)
revived_model = tf.saved_model.load(temp_filepath)
self.assertAllClose(ref_output, revived_model.serve(ref_input))
def test_track_multiple_layers(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model")
layer_1 = layers.Dense(2)
ref_input_1 = tf.random.normal((3, 4))
ref_output_1 = layer_1(ref_input_1)
layer_2 = layers.Dense(3)
ref_input_2 = tf.random.normal((3, 5))
ref_output_2 = layer_2(ref_input_2)
export_archive = saved_model.ExportArchive()
export_archive.add_endpoint(
"call_1",
layer_1.call,
input_signature=[tf.TensorSpec(shape=(None, 4), dtype=tf.float32)],
)
export_archive.add_endpoint(
"call_2",
layer_2.call,
input_signature=[tf.TensorSpec(shape=(None, 5), dtype=tf.float32)],
)
export_archive.write_out(temp_filepath)
revived_layer = tf.saved_model.load(temp_filepath)
self.assertAllClose(ref_output_1, revived_layer.call_1(ref_input_1))
self.assertAllClose(ref_output_2, revived_layer.call_2(ref_input_2))
def test_non_standard_layer_signature(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_layer")
layer = layers.MultiHeadAttention(2, 2)
x1 = tf.random.normal((3, 2, 2))
x2 = tf.random.normal((3, 2, 2))
ref_output = layer(x1, x2) # Build layer (important)
export_archive = saved_model.ExportArchive()
export_archive.track(layer)
export_archive.add_endpoint(
"call",
layer.call,
input_signature=[
tf.TensorSpec(shape=(None, 2, 2), dtype=tf.float32),
tf.TensorSpec(shape=(None, 2, 2), dtype=tf.float32),
],
)
export_archive.write_out(temp_filepath)
revived_layer = tf.saved_model.load(temp_filepath)
self.assertAllClose(ref_output, revived_layer.call(x1, x2))
def test_non_standard_layer_signature_with_kwargs(self):
temp_filepath = os.path.join(self.get_temp_dir(), "exported_layer")
layer = layers.MultiHeadAttention(2, 2)
x1 = tf.random.normal((3, 2, 2))
x2 = tf.random.normal((3, 2, 2))
ref_output = layer(x1, x2) # Build layer (important)
export_archive = saved_model.ExportArchive()
export_archive.track(layer)
export_archive.add_endpoint(
"call",
layer.call,
input_signature=[
tf.TensorSpec(shape=(None, 2, 2), dtype=tf.float32),
tf.TensorSpec(shape=(None, 2, 2), dtype=tf.float32),
],
)
export_archive.write_out(temp_filepath)
revived_layer = tf.saved_model.load(temp_filepath)
self.assertAllClose(ref_output, revived_layer.call(query=x1, value=x2))
# Test with a different batch size
revived_layer.call(
query=tf.random.normal((6, 2, 2)), value=tf.random.normal((6, 2, 2))
)
def test_variable_collection(self):
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/export/__init__.py | keras/src/export/__init__.py | from keras.src.export.litert import LiteRTExporter
from keras.src.export.litert import export_litert
from keras.src.export.onnx import export_onnx
from keras.src.export.openvino import export_openvino
from keras.src.export.saved_model import ExportArchive
from keras.src.export.saved_model import export_saved_model
from keras.src.export.tfsm_layer import TFSMLayer
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/export/openvino.py | keras/src/export/openvino.py | import warnings
from keras.src import backend
from keras.src import tree
from keras.src.export.export_utils import convert_spec_to_tensor
from keras.src.export.export_utils import get_input_signature
from keras.src.export.export_utils import make_tf_tensor_spec
from keras.src.export.saved_model import DEFAULT_ENDPOINT_NAME
from keras.src.export.saved_model import ExportArchive
from keras.src.utils import io_utils
def export_openvino(
model, filepath, verbose=None, input_signature=None, **kwargs
):
"""Export the model as an OpenVINO IR artifact for inference.
This method exports the model to the OpenVINO IR format,
which includes two files:
a `.xml` file containing the model structure and a `.bin` file
containing the weights.
The exported model contains only the forward pass
(i.e., the model's `call()` method), and can be deployed with the
OpenVINO Runtime for fast inference on CPU and other Intel hardware.
Args:
filepath: `str` or `pathlib.Path`. Path to the output `.xml` file.
The corresponding `.bin` file will be saved alongside it.
verbose: Optional `bool`. Whether to print a confirmation message
after export. If `None`, it uses the default verbosity configured
by the backend.
input_signature: Optional. Specifies the shape and dtype of the
model inputs. If not provided, it will be inferred.
**kwargs: Additional keyword arguments.
Example:
```python
import keras
# Define or load a Keras model
model = keras.models.Sequential([
keras.layers.Input(shape=(128,)),
keras.layers.Dense(64, activation="relu"),
keras.layers.Dense(10)
])
# Export to OpenVINO IR
model.export("model.xml", format="openvino")
```
"""
assert filepath.endswith(".xml"), (
"The OpenVINO export requires the filepath to end with '.xml'. "
f"Got: {filepath}"
)
import openvino as ov
import openvino.opset14 as ov_opset
from keras.src.backend.openvino.core import OPENVINO_DTYPES
from keras.src.backend.openvino.core import OpenVINOKerasTensor
actual_verbose = verbose if verbose is not None else True
if input_signature is None:
input_signature = get_input_signature(model)
if backend.backend() == "openvino":
import inspect
def parameterize_inputs(inputs, prefix=""):
if isinstance(inputs, (list, tuple)):
return [
parameterize_inputs(e, f"{prefix}{i}")
for i, e in enumerate(inputs)
]
elif isinstance(inputs, dict):
return {k: parameterize_inputs(v, k) for k, v in inputs.items()}
elif isinstance(inputs, OpenVINOKerasTensor):
ov_type = OPENVINO_DTYPES[str(inputs.dtype)]
ov_shape = list(inputs.shape)
param = ov_opset.parameter(shape=ov_shape, dtype=ov_type)
param.set_friendly_name(prefix)
return OpenVINOKerasTensor(param.output(0))
else:
raise TypeError(f"Unknown input type: {type(inputs)}")
if isinstance(input_signature, list) and len(input_signature) == 1:
input_signature = input_signature[0]
sample_inputs = tree.map_structure(
lambda x: convert_spec_to_tensor(x, replace_none_number=1),
input_signature,
)
params = parameterize_inputs(sample_inputs)
signature = inspect.signature(model.call)
if len(signature.parameters) > 1 and isinstance(params, (list, tuple)):
outputs = model(*params)
else:
outputs = model(params)
parameters = [p.output.get_node() for p in tree.flatten(params)]
results = [ov_opset.result(r.output) for r in tree.flatten(outputs)]
ov_model = ov.Model(results=results, parameters=parameters)
flat_specs = tree.flatten(input_signature)
for ov_input, spec in zip(ov_model.inputs, flat_specs):
# Respect the dynamic axes from the original input signature.
dynamic_shape_dims = [
-1 if dim is None else dim for dim in spec.shape
]
dynamic_shape = ov.PartialShape(dynamic_shape_dims)
ov_input.get_node().set_partial_shape(dynamic_shape)
elif backend.backend() in ("tensorflow", "jax"):
inputs = tree.map_structure(make_tf_tensor_spec, input_signature)
decorated_fn = get_concrete_fn(model, inputs, **kwargs)
ov_model = ov.convert_model(decorated_fn)
set_names(ov_model, inputs)
elif backend.backend() == "torch":
import torch
sample_inputs = tree.map_structure(
lambda x: convert_spec_to_tensor(x, replace_none_number=1),
input_signature,
)
sample_inputs = tuple(sample_inputs)
if hasattr(model, "eval"):
model.eval()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=torch.jit.TracerWarning)
traced = torch.jit.trace(model, sample_inputs)
ov_model = ov.convert_model(traced)
set_names(ov_model, sample_inputs)
else:
raise NotImplementedError(
"`export_openvino` is only compatible with OpenVINO, "
"TensorFlow, JAX and Torch backends."
)
ov.serialize(ov_model, filepath)
if actual_verbose:
io_utils.print_msg(f"Saved OpenVINO IR at '{filepath}'.")
def collect_names(structure):
if isinstance(structure, dict):
for k, v in structure.items():
if isinstance(v, (dict, list, tuple)):
yield from collect_names(v)
else:
yield k
elif isinstance(structure, (list, tuple)):
for v in structure:
yield from collect_names(v)
else:
if hasattr(structure, "name") and structure.name:
yield structure.name
else:
yield "input"
def set_names(model, inputs):
names = list(collect_names(inputs))
for ov_input, name in zip(model.inputs, names):
ov_input.get_node().set_friendly_name(name)
ov_input.tensor.set_names({name})
def _check_jax_kwargs(kwargs):
kwargs = kwargs.copy()
if "is_static" not in kwargs:
kwargs["is_static"] = True
if "jax2tf_kwargs" not in kwargs:
kwargs["jax2tf_kwargs"] = {
"enable_xla": False,
"native_serialization": False,
}
if kwargs["is_static"] is not True:
raise ValueError(
"`is_static` must be `True` in `kwargs` when using the jax backend."
)
if kwargs["jax2tf_kwargs"]["enable_xla"] is not False:
raise ValueError(
"`enable_xla` must be `False` in `kwargs['jax2tf_kwargs']` "
"when using the jax backend."
)
if kwargs["jax2tf_kwargs"]["native_serialization"] is not False:
raise ValueError(
"`native_serialization` must be `False` in "
"`kwargs['jax2tf_kwargs']` when using the jax backend."
)
return kwargs
def get_concrete_fn(model, input_signature, **kwargs):
if backend.backend() == "jax":
kwargs = _check_jax_kwargs(kwargs)
export_archive = ExportArchive()
export_archive.track_and_add_endpoint(
DEFAULT_ENDPOINT_NAME, model, input_signature, **kwargs
)
if backend.backend() == "tensorflow":
export_archive._filter_and_track_resources()
return export_archive._get_concrete_fn(DEFAULT_ENDPOINT_NAME)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/export/openvino_test.py | keras/src/export/openvino_test.py | import os
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src import tree
from keras.src.export import openvino
from keras.src.saving import saving_lib
from keras.src.testing.test_utils import named_product
try:
import openvino as ov
except ImportError:
ov = None
class CustomModel(models.Model):
def __init__(self, layer_list):
super().__init__()
self.layer_list = layer_list
def call(self, input):
output = input
for layer in self.layer_list:
output = layer(output)
return output
def get_model(type="sequential", input_shape=(10,), layer_list=None):
layer_list = layer_list or [
layers.Dense(10, activation="relu"),
layers.BatchNormalization(),
layers.Dense(1, activation="sigmoid"),
]
if type == "sequential":
return models.Sequential(layer_list)
elif type == "functional":
input = output = tree.map_shape_structure(layers.Input, input_shape)
for layer in layer_list:
output = layer(output)
return models.Model(inputs=input, outputs=output)
elif type == "subclass":
return CustomModel(layer_list)
elif type == "lstm":
# https://github.com/keras-team/keras/issues/21390
inputs = layers.Input((4, 10))
x = layers.Bidirectional(
layers.LSTM(
10,
kernel_initializer="he_normal",
return_sequences=True,
kernel_regularizer=None,
),
merge_mode="sum",
)(inputs)
outputs = layers.Bidirectional(
layers.LSTM(
10,
kernel_initializer="he_normal",
return_sequences=True,
kernel_regularizer=None,
),
merge_mode="concat",
)(x)
return models.Model(inputs=inputs, outputs=outputs)
@pytest.mark.skipif(ov is None, reason="OpenVINO is not installed")
@pytest.mark.skipif(
backend.backend() not in ("tensorflow", "openvino", "jax", "torch"),
reason=(
"`export_openvino` only currently supports"
"the tensorflow, jax, torch and openvino backends."
),
)
@pytest.mark.skipif(testing.jax_uses_gpu(), reason="Leads to core dumps on CI")
@pytest.mark.skipif(
testing.tensorflow_uses_gpu(), reason="Leads to core dumps on CI"
)
class ExportOpenVINOTest(testing.TestCase):
@parameterized.named_parameters(
named_product(
model_type=["sequential", "functional", "subclass", "lstm"]
)
)
def test_standard_model_export(self, model_type):
if model_type == "lstm":
self.skipTest(
"LSTM export not supported - unimplemented QR operation"
)
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model.xml")
model = get_model(model_type)
batch_size = 3
if model_type == "lstm":
ref_input = np.random.normal(size=(batch_size, 4, 10))
else:
ref_input = np.random.normal(size=(batch_size, 10))
ref_input = ref_input.astype("float32")
ref_output = model(ref_input)
try:
openvino.export_openvino(model, temp_filepath)
except Exception as e:
if "XlaCallModule" in str(e):
self.skipTest("OpenVINO does not support XlaCallModule yet")
raise e
# Load and run inference with OpenVINO
core = ov.Core()
ov_model = core.read_model(temp_filepath)
compiled_model = core.compile_model(ov_model, "CPU")
ov_output = compiled_model([ref_input])[compiled_model.output(0)]
self.assertAllClose(ref_output, ov_output)
larger_input = np.concatenate([ref_input, ref_input], axis=0)
compiled_model([larger_input])
@parameterized.named_parameters(
named_product(struct_type=["tuple", "array", "dict"])
)
def test_model_with_input_structure(self, struct_type):
class TupleModel(models.Model):
def call(self, inputs):
x, y = inputs
return ops.add(x, y)
class ArrayModel(models.Model):
def call(self, inputs):
x = inputs[0]
y = inputs[1]
return ops.add(x, y)
class DictModel(models.Model):
def call(self, inputs):
x = inputs["x"]
y = inputs["y"]
return ops.add(x, y)
batch_size = 3
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
if struct_type == "tuple":
model = TupleModel()
ref_input = (ref_input, ref_input * 2)
elif struct_type == "array":
model = ArrayModel()
ref_input = [ref_input, ref_input * 2]
elif struct_type == "dict":
model = DictModel()
ref_input = {"x": ref_input, "y": ref_input * 2}
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model.xml")
ref_output = model(tree.map_structure(ops.convert_to_tensor, ref_input))
try:
openvino.export_openvino(model, temp_filepath)
except Exception as e:
if "XlaCallModule" in str(e):
self.skipTest("OpenVINO does not support XlaCallModule yet")
raise e
# Load and run inference with OpenVINO
core = ov.Core()
ov_model = core.read_model(temp_filepath)
compiled_model = core.compile_model(ov_model, "CPU")
if isinstance(ref_input, dict):
ov_inputs = [ref_input[key] for key in ref_input.keys()]
else:
ov_inputs = list(ref_input)
ov_output = compiled_model(ov_inputs)[compiled_model.output(0)]
self.assertAllClose(ref_output, ov_output)
# Test with keras.saving_lib
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.keras"
)
saving_lib.save_model(model, temp_filepath)
revived_model = saving_lib.load_model(
temp_filepath,
{
"TupleModel": TupleModel,
"ArrayModel": ArrayModel,
"DictModel": DictModel,
},
)
self.assertAllClose(ref_output, revived_model(ref_input))
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model2.xml")
try:
openvino.export_openvino(revived_model, temp_filepath)
except Exception as e:
if "XlaCallModule" in str(e):
self.skipTest("OpenVINO does not support XlaCallModule yet")
raise e
bigger_ref_input = tree.map_structure(
lambda x: np.concatenate([x, x], axis=0), ref_input
)
if isinstance(bigger_ref_input, dict):
bigger_ov_inputs = [
bigger_ref_input[key] for key in bigger_ref_input.keys()
]
else:
bigger_ov_inputs = list(bigger_ref_input)
compiled_model(bigger_ov_inputs)
def test_model_with_multiple_inputs(self):
class TwoInputsModel(models.Model):
def call(self, x, y):
return x + y
def build(self, y_shape, x_shape):
self.built = True
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model.xml")
model = TwoInputsModel()
batch_size = 3
ref_input_x = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_input_y = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = model(ref_input_x, ref_input_y)
try:
openvino.export_openvino(model, temp_filepath)
except Exception as e:
if "XlaCallModule" in str(e):
self.skipTest("OpenVINO does not support XlaCallModule yet")
raise e
# Load and run inference with OpenVINO
core = ov.Core()
ov_model = core.read_model(temp_filepath)
compiled_model = core.compile_model(ov_model, "CPU")
ov_output = compiled_model([ref_input_x, ref_input_y])[
compiled_model.output(0)
]
self.assertAllClose(ref_output, ov_output)
larger_input_x = np.concatenate([ref_input_x, ref_input_x], axis=0)
larger_input_y = np.concatenate([ref_input_y, ref_input_y], axis=0)
compiled_model([larger_input_x, larger_input_y])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/export/onnx.py | keras/src/export/onnx.py | import warnings
from keras.src import backend
from keras.src import tree
from keras.src.export.export_utils import convert_spec_to_tensor
from keras.src.export.export_utils import get_input_signature
from keras.src.export.export_utils import make_tf_tensor_spec
from keras.src.export.saved_model import DEFAULT_ENDPOINT_NAME
from keras.src.export.saved_model import ExportArchive
from keras.src.export.tf2onnx_lib import patch_tf2onnx
from keras.src.utils import io_utils
def export_onnx(
model,
filepath,
verbose=None,
input_signature=None,
opset_version=None,
**kwargs,
):
"""Export the model as a ONNX artifact for inference.
This method lets you export a model to a lightweight ONNX artifact
that contains the model's forward pass only (its `call()` method)
and can be served via e.g. ONNX Runtime.
The original code of the model (including any custom layers you may
have used) is *no longer* necessary to reload the artifact -- it is
entirely standalone.
Args:
filepath: `str` or `pathlib.Path` object. The path to save the artifact.
verbose: `bool`. Whether to print a message during export. Defaults to
`None`, which uses the default value set by different backends and
formats.
input_signature: Optional. Specifies the shape and dtype of the model
inputs. Can be a structure of `keras.InputSpec`, `tf.TensorSpec`,
`backend.KerasTensor`, or backend tensor. If not provided, it will
be automatically computed. Defaults to `None`.
opset_version: Optional. An integer value that specifies the ONNX opset
version. If not provided, the default version for the backend will
be used. Defaults to `None`.
**kwargs: Additional keyword arguments.
**Note:** This feature is currently supported only with TensorFlow, JAX and
Torch backends.
**Note:** The dtype policy must be "float32" for the model. You can further
optimize the ONNX artifact using the ONNX toolkit. Learn more here:
[https://onnxruntime.ai/docs/performance/](https://onnxruntime.ai/docs/performance/).
**Note:** The dynamic shape feature is not yet supported with Torch
backend. As a result, you must fully define the shapes of the inputs using
`input_signature`. If `input_signature` is not provided, all instances of
`None` (such as the batch size) will be replaced with `1`.
Example:
```python
# Export the model as a ONNX artifact
model.export("path/to/location", format="onnx")
# Load the artifact in a different process/environment
ort_session = onnxruntime.InferenceSession("path/to/location")
ort_inputs = {
k.name: v for k, v in zip(ort_session.get_inputs(), input_data)
}
predictions = ort_session.run(None, ort_inputs)
```
"""
actual_verbose = verbose
if actual_verbose is None:
actual_verbose = True # Defaults to `True` for all backends.
if input_signature is None:
input_signature = get_input_signature(model)
if not input_signature or not model._called:
raise ValueError(
"The model provided has never called. "
"It must be called at least once before export."
)
input_names = [
getattr(spec, "name", None) or f"input_{i}"
for i, spec in enumerate(input_signature)
]
if backend.backend() in ("tensorflow", "jax"):
from keras.src.utils.module_utils import tf2onnx
input_signature = tree.map_structure(
make_tf_tensor_spec, input_signature
)
decorated_fn = get_concrete_fn(model, input_signature, **kwargs)
# Use `tf2onnx` to convert the `decorated_fn` to the ONNX format.
patch_tf2onnx() # TODO: Remove this once `tf2onnx` supports numpy 2.
tf2onnx.convert.from_function(
decorated_fn,
input_signature,
opset=opset_version,
output_path=filepath,
)
elif backend.backend() == "torch":
import torch
sample_inputs = tree.map_structure(
lambda x: convert_spec_to_tensor(x, replace_none_number=1),
input_signature,
)
sample_inputs = tuple(sample_inputs)
# TODO: Make dict model exportable.
if any(isinstance(x, dict) for x in sample_inputs):
raise ValueError(
"Currently, `export_onnx` in the torch backend doesn't support "
"dictionaries as inputs."
)
if hasattr(model, "eval"):
model.eval()
with warnings.catch_warnings():
# Suppress some unuseful warnings.
warnings.filterwarnings(
"ignore",
message=r".*\n.*\n*.*\n*.*export will treat it as a constant.*",
)
warnings.filterwarnings(
"ignore",
message=r".*not properly registered as a submodule,.*",
)
warnings.filterwarnings(
"ignore",
message=r".*which is what 'get_attr' Nodes typically target.*",
)
warnings.filterwarnings(
"ignore",
message=r".*underlying reference in the owning GraphModule.*",
)
warnings.filterwarnings(
"ignore", message=r".*suppressed about get_attr references.*"
)
try:
# Try the TorchDynamo-based ONNX exporter first.
onnx_program = torch.onnx.export(
model,
sample_inputs,
verbose=actual_verbose,
opset_version=opset_version,
input_names=input_names,
dynamo=True,
)
if hasattr(onnx_program, "optimize"):
onnx_program.optimize() # Only supported by torch>=2.6.0.
onnx_program.save(filepath)
except:
if verbose is None:
# Set to `False` due to file system leakage issue:
# https://github.com/keras-team/keras/issues/20826
actual_verbose = False
# Fall back to the TorchScript-based ONNX exporter.
torch.onnx.export(
model,
sample_inputs,
filepath,
verbose=actual_verbose,
opset_version=opset_version,
input_names=input_names,
)
else:
raise NotImplementedError(
"`export_onnx` is only compatible with TensorFlow, JAX and "
"Torch backends."
)
if actual_verbose:
io_utils.print_msg(f"Saved artifact at '{filepath}'.")
def _check_jax_kwargs(kwargs):
kwargs = kwargs.copy()
if "is_static" not in kwargs:
kwargs["is_static"] = True
if "jax2tf_kwargs" not in kwargs:
# TODO: These options will be deprecated in JAX. We need to
# find another way to export ONNX.
kwargs["jax2tf_kwargs"] = {
"enable_xla": False,
"native_serialization": False,
}
if kwargs["is_static"] is not True:
raise ValueError(
"`is_static` must be `True` in `kwargs` when using the jax backend."
)
if kwargs["jax2tf_kwargs"]["enable_xla"] is not False:
raise ValueError(
"`enable_xla` must be `False` in `kwargs['jax2tf_kwargs']` "
"when using the jax backend."
)
if kwargs["jax2tf_kwargs"]["native_serialization"] is not False:
raise ValueError(
"`native_serialization` must be `False` in "
"`kwargs['jax2tf_kwargs']` when using the jax backend."
)
return kwargs
def get_concrete_fn(model, input_signature, **kwargs):
"""Get the `tf.function` associated with the model."""
if backend.backend() == "jax":
kwargs = _check_jax_kwargs(kwargs)
export_archive = ExportArchive()
export_archive.track_and_add_endpoint(
DEFAULT_ENDPOINT_NAME, model, input_signature, **kwargs
)
if backend.backend() == "tensorflow":
export_archive._filter_and_track_resources()
return export_archive._get_concrete_fn(DEFAULT_ENDPOINT_NAME)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/random/seed_generator_test.py | keras/src/random/seed_generator_test.py | import numpy as np
import pytest
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.random import seed_generator
class SeedGeneratorTest(testing.TestCase):
def test_seed_generator_initialization(self):
gen = seed_generator.SeedGenerator()
self.assertIsNotNone(gen.state)
seed = 12345
gen = seed_generator.SeedGenerator(seed=seed)
self.assertEqual(ops.convert_to_numpy(gen.state)[0], seed)
with self.assertRaisesRegex(
ValueError, "Argument `seed` must be an integer"
):
seed_generator.SeedGenerator(seed="invalid_seed")
def test_seed_generator_next(self):
gen = seed_generator.SeedGenerator(seed=42)
seed1 = ops.convert_to_numpy(gen.next())
seed2 = ops.convert_to_numpy(gen.next())
self.assertFalse(np.array_equal(seed1, seed2))
def test_global_seed_generator(self):
gen1 = seed_generator.global_seed_generator()
gen2 = seed_generator.global_seed_generator()
self.assertEqual(gen1, gen2)
def test_make_default_seed(self):
seed1 = seed_generator.make_default_seed()
seed2 = seed_generator.make_default_seed()
self.assertNotEqual(seed1, seed2)
def test_seed_generator_dtype(self):
gen = seed_generator.SeedGenerator(seed=42)
self.assertEqual(gen.state.dtype, backend.random_seed_dtype())
seed = gen.next()
self.assertEqual(gen.state.dtype, backend.random_seed_dtype())
self.assertEqual(
backend.standardize_dtype(seed.dtype), backend.random_seed_dtype()
)
def test_draw_seed_from_seed_generator(self):
gen = seed_generator.SeedGenerator(seed=42)
seed1 = seed_generator.draw_seed(gen)
self.assertTrue(backend.is_tensor(seed1))
def test_draw_seed_from_integer(self):
seed2 = seed_generator.draw_seed(12345)
self.assertTrue(backend.is_tensor(seed2))
self.assertEqual(
backend.standardize_dtype(seed2.dtype), backend.random_seed_dtype()
)
def test_draw_seed_from_none(self):
seed3 = seed_generator.draw_seed(None)
self.assertTrue(backend.is_tensor(seed3))
def test_draw_seed_invalid(self):
with self.assertRaisesRegex(
ValueError, "Argument `seed` must be either an integer"
):
seed_generator.draw_seed("invalid_seed")
def test_seed_generator_unexpected_kwargs(self):
with self.assertRaisesRegex(
ValueError, "Unrecognized keyword arguments"
):
seed_generator.SeedGenerator(invalid_arg="unexpected_value")
@pytest.mark.skipif(
backend.backend() != "jax", reason="This test requires the JAX backend"
)
def test_jax_tracing_with_global_seed_generator(self):
import jax
@jax.jit
def traced_function():
return seed_generator.global_seed_generator().next()
with self.assertRaisesRegex(
ValueError,
"When tracing a JAX function, you should only use seeded random",
):
traced_function()
def test_seed_generator_serialization(self):
random_generator = seed_generator.SeedGenerator(seed=42, name="sg")
self.run_class_serialization_test(random_generator)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/random/seed_generator.py | keras/src/random/seed_generator.py | import random as python_random
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
from keras.src.utils import jax_utils
from keras.src.utils.naming import auto_name
GLOBAL_SEED_GENERATOR = "global_seed_generator"
@keras_export("keras.random.SeedGenerator")
class SeedGenerator:
"""Generates variable seeds upon each call to a function generating
random numbers.
In Keras, all random number generators (such as
`keras.random.normal()`) are stateless, meaning that if you pass an
integer seed to them (such as `seed=42`), they will return the same
values for repeated calls. To get different values for each
call, a `SeedGenerator` providing the state of the random generator
has to be used.
Note that all the random number generators have a default seed of None,
which implies that an internal global SeedGenerator is used.
If you need to decouple the RNG from the global state you can provide
a local `StateGenerator` with either a deterministic or random initial
state.
Remark concerning the JAX backend: Note that the use of a local
`StateGenerator` as seed argument is required for JIT compilation of
RNG with the JAX backend, because the use of global state is not
supported.
Example:
```python
seed_gen = keras.random.SeedGenerator(seed=42)
values = keras.random.normal(shape=(2, 3), seed=seed_gen)
new_values = keras.random.normal(shape=(2, 3), seed=seed_gen)
```
Usage in a layer:
```python
class Dropout(keras.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.seed_generator = keras.random.SeedGenerator(1337)
def call(self, x, training=False):
if training:
return keras.random.dropout(
x, rate=0.5, seed=self.seed_generator
)
return x
```
"""
def __init__(self, seed=None, name=None, **kwargs):
if name is None:
name = auto_name(self.__class__.__name__)
self.name = name
custom_backend = kwargs.pop("backend", None)
if kwargs:
raise ValueError(f"Unrecognized keyword arguments: {kwargs}")
if custom_backend is not None:
self.backend = custom_backend
else:
self.backend = backend
self._initial_seed = seed
if seed is None:
seed = make_default_seed()
if not isinstance(seed, int):
raise ValueError(
f"Argument `seed` must be an integer. Received: seed={seed}"
)
def seed_initializer(*args, **kwargs):
dtype = kwargs.get("dtype", None)
return self.backend.convert_to_tensor([seed, 0], dtype=dtype)
with self.backend.name_scope(self.name, caller=self):
self.state = self.backend.Variable(
seed_initializer,
shape=(2,),
dtype=self.backend.random_seed_dtype(),
trainable=False,
aggregation="none",
name="seed_generator_state",
)
def next(self, ordered=True):
seed_state = self.state
# Use * 1 to create a copy
new_seed_value = seed_state.value * 1
if ordered:
increment = self.backend.convert_to_tensor(
np.array([0, 1]), dtype=seed_state.dtype
)
self.state.assign(self.backend.numpy.add(seed_state, increment))
else:
# This produces a sequence of near-unique numbers
# between 0 and 1M
self.state.assign((seed_state + 1) * 5387 % 933199)
return new_seed_value
def get_config(self):
return {"seed": self._initial_seed, "name": self.name}
@classmethod
def from_config(cls, config):
return cls(**config)
def global_seed_generator():
if jax_utils.is_in_jax_tracing_scope():
raise ValueError(
"[JAX RNG] When tracing a JAX function, "
"you should only use seeded random ops, e.g. "
"you should create a `SeedGenerator` instance, attach it "
"to your layer/model, and pass the instance as the `seed` "
"argument when calling random ops. Unseeded random ops "
"would get incorrectly traced by JAX and would become constant "
"after tracing. Example:\n\n"
"```\n"
"# Make sure to set the seed generator as a layer attribute\n"
"self.seed_generator = keras.random.SeedGenerator(seed=1337)\n"
"...\n"
"out = keras.random.normal(shape=(1,), seed=self.seed_generator)\n"
"```"
)
gen = global_state.get_global_attribute(GLOBAL_SEED_GENERATOR)
if gen is None:
gen = SeedGenerator()
global_state.set_global_attribute(GLOBAL_SEED_GENERATOR, gen)
return gen
def make_default_seed():
return python_random.randint(1, int(1e9))
def draw_seed(seed):
from keras.src.backend import convert_to_tensor
from keras.src.backend import random_seed_dtype
if isinstance(seed, SeedGenerator):
return seed.next()
elif isinstance(seed, int):
return convert_to_tensor([seed, 0], dtype=random_seed_dtype())
elif seed is None:
return global_seed_generator().next(ordered=False)
raise ValueError(
"Argument `seed` must be either an integer "
"or an instance of `SeedGenerator`. "
f"Received: seed={seed} (of type {type(seed)})"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/random/random_test.py | keras/src/random/random_test.py | import numpy as np
import pytest
from absl.testing import parameterized
import keras
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.backend.common import dtypes
from keras.src.backend.common import standardize_dtype
from keras.src.random import random
from keras.src.random import seed_generator
from keras.src.testing.test_utils import named_product
from keras.src.utils.rng_utils import set_random_seed
class RandomCorrectnessTest(testing.TestCase):
@parameterized.parameters(
{"seed": 10, "shape": (5,), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3, 4), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 10, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 10, "stddev": 3},
)
def test_normal(self, seed, shape, mean, stddev):
np.random.seed(seed)
np_res = np.random.normal(loc=mean, scale=stddev, size=shape)
res = random.normal(shape, mean=mean, stddev=stddev, seed=seed)
self.assertEqual(res.shape, shape)
self.assertEqual(res.shape, np_res.shape)
@parameterized.parameters(
{"seed": 10, "shape": (5,), "minval": 0, "maxval": 1},
{"seed": 10, "shape": (2, 3), "minval": 0, "maxval": 1},
{"seed": 10, "shape": (2, 3, 4), "minval": 0, "maxval": 2},
{"seed": 10, "shape": (2, 3), "minval": -1, "maxval": 1},
{"seed": 10, "shape": (2, 3), "minval": 1, "maxval": 3},
)
def test_uniform(self, seed, shape, minval, maxval):
np.random.seed(seed)
np_res = np.random.uniform(low=minval, high=maxval, size=shape)
res = random.uniform(shape, minval=minval, maxval=maxval, seed=seed)
self.assertEqual(res.shape, shape)
self.assertEqual(res.shape, np_res.shape)
self.assertLessEqual(ops.max(res), maxval)
self.assertGreaterEqual(ops.max(res), minval)
@parameterized.parameters(
{"seed": 10, "num_samples": 1, "batch_size": 1},
{"seed": 10, "num_samples": 5, "batch_size": 2},
{"seed": 10, "num_samples": 10, "batch_size": 4},
{"seed": 10, "num_samples": 15, "batch_size": 8},
)
def test_categorical(self, seed, num_samples, batch_size):
np.random.seed(seed)
# Create logits that definitely favors the batch index after a softmax
# is applied. Without a softmax, this would be close to random.
logits = np.eye(batch_size) * 1e5 + 1e6
res = random.categorical(logits, num_samples, seed=seed)
# Outputs should have shape `(batch_size, num_samples)`, where each
# output index matches the batch index.
self.assertEqual(res.shape, (batch_size, num_samples))
expected = np.tile(np.arange(batch_size)[:, None], (1, num_samples))
self.assertAllClose(res, expected)
@parameterized.parameters(
{"seed": 10, "shape": (5,), "min": 0, "max": 10, "dtype": "uint16"},
{"seed": 10, "shape": (2, 3), "min": 0, "max": 10, "dtype": "uint32"},
{"seed": 10, "shape": (2, 3, 4), "min": 0, "max": 2, "dtype": "int8"},
{"seed": 10, "shape": (2, 3), "min": -1, "max": 1, "dtype": "int16"},
{"seed": 10, "shape": (2, 3), "min": 1, "max": 3, "dtype": "int32"},
)
def test_randint(self, seed, shape, min, max, dtype):
np.random.seed(seed)
np_res = np.random.randint(low=min, high=max, size=shape)
res = random.randint(
shape, minval=min, maxval=max, seed=seed, dtype=dtype
)
self.assertEqual(res.shape, shape)
self.assertEqual(res.shape, np_res.shape)
self.assertLessEqual(ops.max(res), max)
self.assertGreaterEqual(ops.max(res), min)
# Torch has incomplete dtype support for uints; will remap some dtypes.
if keras.backend.backend() != "torch":
self.assertEqual(backend.standardize_dtype(res.dtype), dtype)
@parameterized.parameters(
{"seed": 10, "shape": (5,), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3, 4), "mean": 0, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 10, "stddev": 1},
{"seed": 10, "shape": (2, 3), "mean": 10, "stddev": 3},
# Test list shapes.
{"seed": 10, "shape": [2, 3], "mean": 10, "stddev": 3},
)
def test_truncated_normal(self, seed, shape, mean, stddev):
np.random.seed(seed)
np_res = np.random.normal(loc=mean, scale=stddev, size=shape)
res = random.truncated_normal(
shape, mean=mean, stddev=stddev, seed=seed
)
self.assertEqual(res.shape, tuple(shape))
self.assertEqual(res.shape, np_res.shape)
self.assertLessEqual(ops.max(res), mean + 2 * stddev)
self.assertGreaterEqual(ops.max(res), mean - 2 * stddev)
def test_dropout(self):
x = ops.ones((3, 5))
self.assertAllClose(random.dropout(x, rate=0, seed=0), x)
x_res = random.dropout(x, rate=0.8, seed=0)
self.assertGreater(ops.max(x_res), ops.max(x))
self.assertGreater(ops.sum(x_res == 0), 2)
def test_dropout_noise_shape(self):
inputs = ops.ones((2, 3, 5, 7))
x = random.dropout(
inputs, rate=0.3, noise_shape=[None, 3, 5, None], seed=0
)
self.assertEqual(x.shape, (2, 3, 5, 7))
def test_global_seed_generator(self):
# Check that unseeded RNG calls use and update global_rng_state()
def random_numbers(seed):
rng_state = seed_generator.global_seed_generator().state
rng_state.assign(seed)
x = random.normal((), seed=None)
y = random.normal((), seed=None)
return x, y, rng_state.value
if backend.backend() == "tensorflow":
import tensorflow as tf
random_numbers = tf.function(jit_compile=True)(random_numbers)
seed = ops.zeros((2,))
seed0 = ops.convert_to_numpy(seed)
x1, y1, seed = random_numbers(seed)
x1 = ops.convert_to_numpy(x1)
y1 = ops.convert_to_numpy(y1)
seed1 = ops.convert_to_numpy(seed)
x2, y2, seed = random_numbers(seed)
x2 = ops.convert_to_numpy(x2)
y2 = ops.convert_to_numpy(y2)
seed2 = ops.convert_to_numpy(seed)
x3, y3, seed = random_numbers(seed)
x3 = ops.convert_to_numpy(x3)
y3 = ops.convert_to_numpy(y3)
seed3 = ops.convert_to_numpy(seed)
self.assertNotEqual(seed0[1], seed1[1])
self.assertNotEqual(seed1[1], seed2[1])
self.assertNotEqual(seed2[1], seed3[1])
self.assertGreater(np.abs(x1 - y1), 1e-4)
self.assertGreater(np.abs(x1 - y1), 1e-4)
self.assertGreater(np.abs(x2 - y2), 1e-4)
self.assertGreater(np.abs(x3 - y3), 1e-4)
self.assertGreater(np.abs(x1 - x2), 1e-4)
self.assertGreater(np.abs(x1 - x3), 1e-4)
self.assertGreater(np.abs(x2 - x3), 1e-4)
self.assertGreater(np.abs(y1 - y2), 1e-4)
self.assertGreater(np.abs(y1 - y3), 1e-4)
self.assertGreater(np.abs(y2 - y3), 1e-4)
seed_generator.global_seed_generator().state.assign(seed)
def test_shuffle(self):
x = np.arange(100).reshape(10, 10)
# Test axis=0
y = random.shuffle(x, seed=0)
self.assertFalse(np.all(x == ops.convert_to_numpy(y)))
self.assertAllClose(np.sum(x, axis=0), ops.sum(y, axis=0))
self.assertNotAllClose(np.sum(x, axis=1), ops.sum(y, axis=1))
# Test axis=1
y = random.shuffle(x, axis=1, seed=0)
self.assertFalse(np.all(x == ops.convert_to_numpy(y)))
self.assertAllClose(np.sum(x, axis=1), ops.sum(y, axis=1))
self.assertNotAllClose(np.sum(x, axis=0), ops.sum(y, axis=0))
@parameterized.parameters(
{"seed": 10, "shape": (5, 2), "alpha": 2.0, "dtype": "float16"},
{"seed": 10, "shape": (2,), "alpha": 1.5, "dtype": "float32"},
{"seed": 10, "shape": (2, 3), "alpha": 0.5, "dtype": "float32"},
)
def test_gamma(self, seed, shape, alpha, dtype):
values = random.gamma(shape, alpha=alpha, seed=seed, dtype=dtype)
self.assertEqual(ops.shape(values), shape)
self.assertEqual(backend.standardize_dtype(values.dtype), dtype)
self.assertGreater(np.min(ops.convert_to_numpy(values)), 0.0)
@parameterized.parameters(
{
"seed": 10,
"shape": (5, 2),
"counts": 5e4,
"probabilities": 0.5,
"dtype": "float16",
},
{
"seed": 10,
"shape": (2,),
"counts": 1e5,
"probabilities": 0.5,
"dtype": "float32",
},
{
"seed": 10,
"shape": (2, 3),
"counts": [[1e5, 2e5, 3e5], [4e5, 5e5, 6e5]],
"probabilities": [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]],
"dtype": "float32",
},
)
def test_binomial(self, seed, shape, counts, probabilities, dtype):
set_random_seed(1337)
values = random.binomial(
shape=shape,
counts=counts,
probabilities=probabilities,
seed=seed,
dtype=dtype,
)
self.assertEqual(ops.shape(values), shape)
self.assertEqual(backend.standardize_dtype(values.dtype), dtype)
# The following test that ensures that the number of time
# each event occurs doesn't exceed the total input count specified
# by the user for that event.
# Hence, we do an element wise comparison between `counts` array
# and the (generated) `values` array.
values_np = ops.convert_to_numpy(values)
assert np.greater_equal(np.array(counts), values_np).all()
# Following test computes the probabilities of each event
# by dividing number of times an event occurs (which is the generated
# value) by the corresponding value in the (total) counts array.
# and then makes sure that the computed probabilities approximate
# the input probabilities
generated_probabilities = values_np / np.array(counts)
probabilities = np.ones(shape) * np.array(probabilities)
self.assertAllClose(
probabilities, generated_probabilities, rtol=0.005, atol=0.005
)
@parameterized.parameters(
{
"seed": 10,
"shape": (10000,),
"alpha": 3.0,
"beta": 2.0,
"dtype": "float16",
},
{
"seed": 10,
"shape": (10000, 3),
"alpha": [[7.0, 0.5, 1.5]],
"beta": [[15.0, 0.9, 4.5]],
"dtype": "float32",
},
{
"seed": 10,
"shape": (10000, 30),
"alpha": 1.0,
"beta": 1.0,
"dtype": "float32",
},
)
def test_beta(self, seed, shape, alpha, beta, dtype):
set_random_seed(1337)
values = random.beta(
shape=shape, alpha=alpha, beta=beta, seed=seed, dtype=dtype
)
self.assertEqual(ops.shape(values), shape)
self.assertEqual(backend.standardize_dtype(values.dtype), dtype)
values_np = ops.convert_to_numpy(values)
self.assertGreaterEqual(np.min(values_np), b=0.0)
self.assertLessEqual(np.max(values_np), b=1.0)
_alpha_is_an_array = False
if isinstance(alpha, list):
alpha = np.array(alpha)
beta = np.array(beta)
_alpha_is_an_array = True
# Mean check:
# For a beta distributed random variable,
# mean = alpha / (alpha + beta)
expected_mean = alpha / (alpha + beta)
if _alpha_is_an_array:
actual_mean = np.mean(values_np, axis=0)
self.assertAllClose(
expected_mean.flatten(), actual_mean, atol=0.005, rtol=0.005
)
else:
actual_mean = np.mean(values_np.flatten())
self.assertAlmostEqual(expected_mean, actual_mean, decimal=2)
# Variance check:
# For a beta distributed random variable,
# variance = (alpha * beta) / ((alpha + beta)^2)(alpha + beta + 1)
expected_variance = (alpha * beta) / (
np.square(alpha + beta) * (alpha + beta + 1)
)
if _alpha_is_an_array:
actual_variance = np.var(values_np, axis=0)
self.assertAllClose(
expected_variance.flatten(),
actual_variance,
atol=0.005,
rtol=0.005,
)
else:
actual_variance = np.var(values_np.flatten())
self.assertAlmostEqual(
expected_variance, actual_variance, decimal=2
)
class RandomBehaviorTest(testing.TestCase):
def test_beta_tf_data_compatibility(self):
import tensorflow as tf
from keras.src.layers.preprocessing.data_layer import DataLayer
from keras.src.random.seed_generator import SeedGenerator
class BetaLayer(DataLayer):
def __init__(self, seed=None, **kwargs):
super().__init__(**kwargs)
self.seed = seed
self.generator = SeedGenerator(seed)
def compute_output_shape(self, input_shape):
return input_shape
def call(self, inputs):
seed_generator = self._get_seed_generator(self.backend._backend)
noise = self.backend.random.beta(
self.backend.shape(inputs),
alpha=0.5,
beta=0.5,
seed=seed_generator,
)
inputs = inputs + noise
return inputs
layer = BetaLayer()
input_data = np.random.random([2, 4, 4, 3])
ds = tf.data.Dataset.from_tensor_slices(input_data).batch(2).map(layer)
for output in ds.take(1):
output = ops.convert_to_numpy(output)
self.assertEqual(output.shape, (2, 4, 4, 3))
def test_categorical_errors(self):
with self.assertRaises(ValueError):
random.categorical(np.ones((5,)), 5)
with self.assertRaises(ValueError):
random.categorical(np.ones((5, 5, 5)), 5)
def test_randint_dtype_validation(self):
with self.assertRaisesRegex(
ValueError, "`keras.random.randint` requires an integer `dtype`."
):
random.randint((3, 4), minval=0, maxval=10, dtype="float64")
def test_uniform_dtype_validation(self):
with self.assertRaisesRegex(
ValueError,
"`keras.random.uniform` requires a floating point `dtype`.",
):
random.uniform((3, 4), minval=0, maxval=10, dtype="int64")
@pytest.mark.skipif(
keras.backend.backend() != "jax",
reason="This test requires `jax` as the backend.",
)
def test_dropout_jax_jit_stateless(self):
import jax
import jax.numpy as jnp
x = ops.ones(3)
@jax.jit
def train_step(x):
with keras.src.backend.StatelessScope():
x = keras.layers.Dropout(rate=0.1)(x, training=True)
return x
x = train_step(x)
self.assertIsInstance(x, jnp.ndarray)
@pytest.mark.skipif(
keras.backend.backend() != "jax",
reason="This test requires `jax` as the backend.",
)
def test_jax_rngkey_seed(self):
import jax
import jax.numpy as jnp
seed = 1234
rng = jax.random.PRNGKey(seed)
self.assertEqual(rng.shape, (2,))
self.assertEqual(rng.dtype, jnp.uint32)
x = random.randint((3, 5), 0, 10, seed=rng)
self.assertIsInstance(x, jnp.ndarray)
@pytest.mark.skipif(
keras.backend.backend() != "jax",
reason="This test requires `jax` as the backend.",
)
def test_jax_unseed_disallowed_during_tracing(self):
import jax
@jax.jit
def jit_fn():
return random.randint((2, 2), 0, 10, seed=None)
with self.assertRaisesRegex(
ValueError, "you should only use seeded random ops"
):
jit_fn()
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="This test requires `tensorflow` as the backend.",
)
def test_tf_cast_seed(self):
import tensorflow as tf
inputs = tf.ones([2, 3], dtype="float32")
seed = tf.int32.max + 1000 # Test floormod operation
outputs_mod = random.categorical(inputs, 2, seed=seed)
outputs_nomod = random.categorical(inputs, 2, seed=1001)
self.assertAllClose(outputs_mod, outputs_nomod)
class RandomDTypeTest(testing.TestCase):
"""Test the dtype to verify that the behavior matches JAX."""
INT_DTYPES = [x for x in dtypes.INT_TYPES if x not in ("uint64", "int64")]
FLOAT_DTYPES = [x for x in dtypes.FLOAT_TYPES if x not in ("float64",)]
if backend.backend() == "torch":
INT_DTYPES = [x for x in INT_DTYPES if x not in ("uint16", "uint32")]
@parameterized.named_parameters(named_product(dtype=FLOAT_DTYPES))
def test_normal(self, dtype):
res = random.normal((2, 3), dtype=dtype)
self.assertEqual(standardize_dtype(res.dtype), dtype)
@parameterized.named_parameters(named_product(dtype=INT_DTYPES))
def test_categorical(self, dtype):
logits = np.eye(4) * 1e5 + 1e6
res = random.categorical(logits, 10, dtype=dtype)
self.assertEqual(standardize_dtype(res.dtype), dtype)
@parameterized.named_parameters(named_product(dtype=FLOAT_DTYPES))
def test_uniform(self, dtype):
res = random.uniform((2, 3), dtype=dtype)
self.assertEqual(standardize_dtype(res.dtype), dtype)
@parameterized.named_parameters(named_product(dtype=INT_DTYPES))
def test_randint(self, dtype):
res = random.randint((2, 3), 0, 10, dtype=dtype)
self.assertEqual(standardize_dtype(res.dtype), dtype)
@parameterized.named_parameters(named_product(dtype=FLOAT_DTYPES))
def test_truncated_normal(self, dtype):
res = random.truncated_normal((2, 3), dtype=dtype)
self.assertEqual(standardize_dtype(res.dtype), dtype)
@parameterized.named_parameters(named_product(dtype=FLOAT_DTYPES))
def test_dropout(self, dtype):
x = ops.ones((3, 5), dtype=dtype)
res = random.dropout(x, rate=0.8, seed=0)
self.assertEqual(standardize_dtype(res.dtype), dtype)
@parameterized.named_parameters(named_product(dtype=FLOAT_DTYPES))
def test_gamma(self, dtype):
res = random.gamma((2, 3), 2.0, dtype=dtype)
self.assertEqual(standardize_dtype(res.dtype), dtype)
@parameterized.named_parameters(named_product(dtype=FLOAT_DTYPES))
def test_binomial(self, dtype):
res = random.binomial((2,), 1e5, 0.5, dtype=dtype)
self.assertEqual(standardize_dtype(res.dtype), dtype)
@parameterized.named_parameters(named_product(dtype=FLOAT_DTYPES))
def test_beta(self, dtype):
res = random.beta((2, 3), 2.0, 3.0, dtype=dtype)
self.assertEqual(standardize_dtype(res.dtype), dtype)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/random/random.py | keras/src/random/random.py | from keras.src import backend
from keras.src.api_export import keras_export
@keras_export("keras.random.normal")
def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Draw random samples from a normal (Gaussian) distribution.
Args:
shape: The shape of the random values to generate.
mean: Float, defaults to 0. Mean of the random values to generate.
stddev: Float, defaults to 1. Standard deviation of the random values
to generate.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `keras.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras.config.set_floatx(float_dtype)`).
seed: Optional Python integer or instance of
`keras.random.SeedGenerator`.
By default, the `seed` argument is `None`, and an internal global
`keras.random.SeedGenerator` is used. The `seed` argument can be
used to ensure deterministic (repeatable) random number generation.
Note that passing an integer as the `seed` value will produce the
same random values for each call. To generate different random
values for repeated calls, an instance of
`keras.random.SeedGenerator` must be provided as the `seed` value.
Remark concerning the JAX backend: When tracing functions with the
JAX backend the global `keras.random.SeedGenerator` is not
supported. Therefore, during tracing the default value `seed=None`
will produce an error, and a `seed` argument must be provided.
"""
return backend.random.normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed
)
@keras_export("keras.random.categorical")
def categorical(logits, num_samples, dtype="int32", seed=None):
"""Draws samples from a categorical distribution.
This function takes as input `logits`, a 2-D input tensor with shape
(batch_size, num_classes). Each row of the input represents a categorical
distribution, with each column index containing the log-probability for a
given class.
The function will output a 2-D tensor with shape (batch_size, num_samples),
where each row contains samples from the corresponding row in `logits`.
Each column index contains an independent samples drawn from the input
distribution.
Args:
logits: 2-D Tensor with shape (batch_size, num_classes). Each row
should define a categorical distribution with the unnormalized
log-probabilities for all classes.
num_samples: Int, the number of independent samples to draw for each
row of the input. This will be the second dimension of the output
tensor's shape.
dtype: Optional dtype of the output tensor.
seed: Optional Python integer or instance of
`keras.random.SeedGenerator`.
By default, the `seed` argument is `None`, and an internal global
`keras.random.SeedGenerator` is used. The `seed` argument can be
used to ensure deterministic (repeatable) random number generation.
Note that passing an integer as the `seed` value will produce the
same random values for each call. To generate different random
values for repeated calls, an instance of
`keras.random.SeedGenerator` must be provided as the `seed` value.
Remark concerning the JAX backend: When tracing functions with the
JAX backend the global `keras.random.SeedGenerator` is not
supported. Therefore, during tracing the default value seed=None
will produce an error, and a `seed` argument must be provided.
Returns:
A 2-D tensor with (batch_size, num_samples).
"""
logits_shape = list(backend.convert_to_tensor(logits).shape)
if len(logits_shape) != 2:
raise ValueError(
"`logits` should be a 2-D tensor with shape "
f"[batch_size, num_classes]. Received: logits={logits}"
)
return backend.random.categorical(
logits, num_samples, dtype=dtype, seed=seed
)
@keras_export("keras.random.uniform")
def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
"""Draw samples from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range,
while the upper bound `maxval` is excluded.
`dtype` must be a floating point type, the default range is `[0, 1)`.
Args:
shape: The shape of the random values to generate.
minval: Float, defaults to 0. Lower bound of the range of
random values to generate (inclusive).
maxval: Float, defaults to 1. Upper bound of the range of
random values to generate (exclusive).
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `keras.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras.config.set_floatx(float_dtype)`)
seed: Optional Python integer or instance of
`keras.random.SeedGenerator`.
By default, the `seed` argument is `None`, and an internal global
`keras.random.SeedGenerator` is used. The `seed` argument can be
used to ensure deterministic (repeatable) random number generation.
Note that passing an integer as the `seed` value will produce the
same random values for each call. To generate different random
values for repeated calls, an instance of
`keras.random.SeedGenerator` must be provided as the `seed` value.
Remark concerning the JAX backend: When tracing functions with the
JAX backend the global `keras.random.SeedGenerator` is not
supported. Therefore, during tracing the default value seed=None
will produce an error, and a `seed` argument must be provided.
"""
if dtype and not backend.is_float_dtype(dtype):
raise ValueError(
"`keras.random.uniform` requires a floating point `dtype`. "
f"Received: dtype={dtype} "
)
return backend.random.uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed
)
@keras_export("keras.random.randint")
def randint(shape, minval, maxval, dtype="int32", seed=None):
"""Draw random integers from a uniform distribution.
The generated values follow a uniform distribution in the range
`[minval, maxval)`. The lower bound `minval` is included in the range,
while the upper bound `maxval` is excluded.
`dtype` must be an integer type.
Args:
shape: The shape of the random values to generate.
minval: Float, defaults to 0. Lower bound of the range of
random values to generate (inclusive).
maxval: Float, defaults to 1. Upper bound of the range of
random values to generate (exclusive).
dtype: Optional dtype of the tensor. Only integer types are
supported. If not specified, `keras.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras.config.set_floatx(float_dtype)`)
seed: Optional Python integer or instance of
`keras.random.SeedGenerator`.
By default, the `seed` argument is `None`, and an internal global
`keras.random.SeedGenerator` is used. The `seed` argument can be
used to ensure deterministic (repeatable) random number generation.
Note that passing an integer as the `seed` value will produce the
same random values for each call. To generate different random
values for repeated calls, an instance of
`keras.random.SeedGenerator` must be provided as the `seed` value.
Remark concerning the JAX backend: When tracing functions with the
JAX backend the global `keras.random.SeedGenerator` is not
supported. Therefore, during tracing the default value seed=None
will produce an error, and a `seed` argument must be provided.
"""
if dtype and not backend.is_int_dtype(dtype):
raise ValueError(
"`keras.random.randint` requires an integer `dtype`. "
f"Received: dtype={dtype} "
)
return backend.random.randint(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed
)
@keras_export("keras.random.truncated_normal")
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Draw samples from a truncated normal distribution.
The values are drawn from a normal distribution with specified mean and
standard deviation, discarding and re-drawing any samples that are more
than two standard deviations from the mean.
Args:
shape: The shape of the random values to generate.
mean: Float, defaults to 0. Mean of the random values to generate.
stddev: Float, defaults to 1. Standard deviation of the random values
to generate.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `keras.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras.config.set_floatx(float_dtype)`)
seed: Optional Python integer or instance of
`keras.random.SeedGenerator`.
By default, the `seed` argument is `None`, and an internal global
`keras.random.SeedGenerator` is used. The `seed` argument can be
used to ensure deterministic (repeatable) random number generation.
Note that passing an integer as the `seed` value will produce the
same random values for each call. To generate different random
values for repeated calls, an instance of
`keras.random.SeedGenerator` must be provided as the `seed` value.
Remark concerning the JAX backend: When tracing functions with the
JAX backend the global `keras.random.SeedGenerator` is not
supported. Therefore, during tracing the default value seed=None
will produce an error, and a `seed` argument must be provided.
"""
return backend.random.truncated_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed
)
@keras_export("keras.random.dropout")
def dropout(inputs, rate, noise_shape=None, seed=None):
return backend.random.dropout(
inputs, rate, noise_shape=noise_shape, seed=seed
)
@keras_export("keras.random.shuffle")
def shuffle(x, axis=0, seed=None):
"""Shuffle the elements of a tensor uniformly at random along an axis.
Args:
x: The tensor to be shuffled.
axis: An integer specifying the axis along which to shuffle. Defaults to
`0`.
seed: Optional Python integer or instance of
`keras.random.SeedGenerator`.
By default, the `seed` argument is `None`, and an internal global
`keras.random.SeedGenerator` is used. The `seed` argument can be
used to ensure deterministic (repeatable) random number generation.
Note that passing an integer as the `seed` value will produce the
same random values for each call. To generate different random
values for repeated calls, an instance of
`keras.random.SeedGenerator` must be provided as the `seed` value.
Remark concerning the JAX backend: When tracing functions with the
JAX backend the global `keras.random.SeedGenerator` is not
supported. Therefore, during tracing the default value seed=None
will produce an error, and a `seed` argument must be provided.
"""
return backend.random.shuffle(x, axis=axis, seed=seed)
@keras_export("keras.random.gamma")
def gamma(shape, alpha, dtype=None, seed=None):
"""Draw random samples from the Gamma distribution.
Args:
shape: The shape of the random values to generate.
alpha: Float, the parameter of the distribution.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `keras.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras.config.set_floatx(float_dtype)`).
seed: Optional Python integer or instance of
`keras.random.SeedGenerator`.
By default, the `seed` argument is `None`, and an internal global
`keras.random.SeedGenerator` is used. The `seed` argument can be
used to ensure deterministic (repeatable) random number generation.
Note that passing an integer as the `seed` value will produce the
same random values for each call. To generate different random
values for repeated calls, an instance of
`keras.random.SeedGenerator` must be provided as the `seed` value.
Remark concerning the JAX backend: When tracing functions with the
JAX backend the global `keras.random.SeedGenerator` is not
supported. Therefore, during tracing the default value seed=None
will produce an error, and a `seed` argument must be provided.
"""
return backend.random.gamma(shape, alpha=alpha, dtype=dtype, seed=seed)
@keras_export("keras.random.binomial")
def binomial(shape, counts, probabilities, dtype=None, seed=None):
"""Draw samples from a Binomial distribution.
The values are drawn from a Binomial distribution with
specified trial count and probability of success.
Args:
shape: The shape of the random values to generate.
counts: A number or array of numbers representing the
number of trials. It must be broadcastable with `probabilities`.
probabilities: A float or array of floats representing the
probability of success of an individual event.
It must be broadcastable with `counts`.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `keras.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras.config.set_floatx(float_dtype)`).
seed: Optional Python integer or instance of
`keras.random.SeedGenerator`.
By default, the `seed` argument is `None`, and an internal global
`keras.random.SeedGenerator` is used. The `seed` argument can be
used to ensure deterministic (repeatable) random number generation.
Note that passing an integer as the `seed` value will produce the
same random values for each call. To generate different random
values for repeated calls, an instance of
`keras.random.SeedGenerator` must be provided as the `seed` value.
Remark concerning the JAX backend: When tracing functions with the
JAX backend the global `keras.random.SeedGenerator` is not
supported. Therefore, during tracing the default value seed=None
will produce an error, and a `seed` argument must be provided.
"""
return backend.random.binomial(
shape,
counts=counts,
probabilities=probabilities,
dtype=dtype,
seed=seed,
)
@keras_export("keras.random.beta")
def beta(shape, alpha, beta, dtype=None, seed=None):
"""Draw samples from a Beta distribution.
The values are drawn from a Beta distribution parametrized
by alpha and beta.
Args:
shape: The shape of the random values to generate.
alpha: Float or an array of floats representing the first
parameter alpha. Must be broadcastable with `beta` and `shape`.
beta: Float or an array of floats representing the second
parameter beta. Must be broadcastable with `alpha` and `shape`.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `keras.config.floatx()` is used,
which defaults to `float32` unless you configured it otherwise (via
`keras.config.set_floatx(float_dtype)`).
seed: Optional Python integer or instance of
`keras.random.SeedGenerator`.
By default, the `seed` argument is `None`, and an internal global
`keras.random.SeedGenerator` is used. The `seed` argument can be
used to ensure deterministic (repeatable) random number generation.
Note that passing an integer as the `seed` value will produce the
same random values for each call. To generate different random
values for repeated calls, an instance of
`keras.random.SeedGenerator` must be provided as the `seed` value.
Remark concerning the JAX backend: When tracing functions with the
JAX backend the global `keras.random.SeedGenerator` is not
supported. Therefore, during tracing the default value seed=None
will produce an error, and a `seed` argument must be provided.
"""
return backend.random.beta(
shape=shape, alpha=alpha, beta=beta, dtype=dtype, seed=seed
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/random/__init__.py | keras/src/random/__init__.py | from keras.src.random.random import categorical
from keras.src.random.random import dropout
from keras.src.random.random import gamma
from keras.src.random.random import normal
from keras.src.random.random import randint
from keras.src.random.random import shuffle
from keras.src.random.random import truncated_normal
from keras.src.random.random import uniform
from keras.src.random.seed_generator import SeedGenerator
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/tree/tree_api.py | keras/src/tree/tree_api.py | import warnings
from keras.src.api_export import keras_export
from keras.src.backend.config import backend
from keras.src.utils.module_utils import dmtree
from keras.src.utils.module_utils import optree
if backend() == "torch":
# torchtree_impl is especially used for Torch backend, as it works better
# with torch.compile.
from keras.src.tree import torchtree_impl as tree_impl
elif optree.available:
from keras.src.tree import optree_impl as tree_impl
elif dmtree.available:
from keras.src.tree import dmtree_impl as tree_impl
else:
raise ImportError(
"To use Keras, you need to have `optree` installed. "
"Install it via `pip install optree`"
)
def register_tree_node_class(cls):
return tree_impl.register_tree_node_class(cls)
@keras_export("keras.tree.MAP_TO_NONE")
class MAP_TO_NONE:
"""Special value for use with `traverse()`."""
pass
@keras_export("keras.tree.is_nested")
def is_nested(structure):
"""Checks if a given structure is nested.
Examples:
>>> keras.tree.is_nested(42)
False
>>> keras.tree.is_nested({"foo": 42})
True
Args:
structure: A structure to check.
Returns:
`True` if a given structure is nested, i.e. is a sequence, a mapping,
or a namedtuple, and `False` otherwise.
"""
return tree_impl.is_nested(structure)
@keras_export("keras.tree.traverse")
def traverse(func, structure, top_down=True):
"""Traverses the given nested structure, applying the given function.
The traversal is depth-first. If `top_down` is True (default), parents
are returned before their children (giving the option to avoid traversing
into a sub-tree).
Examples:
>>> v = []
>>> keras.tree.traverse(v.append, [(1, 2), [3], {"a": 4}], top_down=True)
[(1, 2), [3], {'a': 4}]
>>> v
[[(1, 2), [3], {'a': 4}], (1, 2), 1, 2, [3], 3, {'a': 4}, 4]
>>> v = []
>>> keras.tree.traverse(v.append, [(1, 2), [3], {"a": 4}], top_down=False)
[(1, 2), [3], {'a': 4}]
>>> v
[1, 2, (1, 2), 3, [3], 4, {'a': 4}, [(1, 2), [3], {'a': 4}]]
Args:
func: The function to be applied to each sub-nest of the structure.
When traversing top-down:
If `func(subtree) is None` the traversal continues into the
sub-tree.
If `func(subtree) is not None` the traversal does not continue
into the sub-tree. The sub-tree will be replaced by `func(subtree)`
in the returned structure (to replace the sub-tree with `None`, use
the special value `MAP_TO_NONE`).
When traversing bottom-up:
If `func(subtree) is None` the traversed sub-tree is returned
unaltered.
If `func(subtree) is not None` the sub-tree will be replaced by
`func(subtree)` in the returned structure (to replace the sub-tree
with None, use the special value `MAP_TO_NONE`).
structure: The structure to traverse.
top_down: If True, parent structures will be visited before their
children.
Returns:
The structured output from the traversal.
Raises:
TypeError: If `func` is not callable.
"""
return tree_impl.traverse(func, structure, top_down=top_down)
@keras_export("keras.tree.flatten")
def flatten(structure):
"""Flattens a possibly nested structure into a list.
In the case of dict instances, the sequence consists of the values,
sorted by key to ensure deterministic behavior. However, instances of
`collections.OrderedDict` are handled differently: their sequence order is
used instead of the sorted keys. The same convention is followed in
`pack_sequence_as`. This correctly unflattens dicts and `OrderedDict` after
they have been flattened, or vice-versa.
Dictionaries with non-sortable keys are not supported.
Examples:
>>> keras.tree.flatten([[1, 2, 3], [4, [5], [[6]]]])
[1, 2, 3, 4, 5, 6]
>>> keras.tree.flatten(None)
[None]
>>> keras.tree.flatten(1)
[1]
>>> keras.tree.flatten({100: 'world!', 6: 'Hello'})
['Hello', 'world!']
Args:
structure: An arbitrarily nested structure.
Returns:
A list, the flattened version of the input `structure`.
"""
return tree_impl.flatten(structure)
@keras_export("keras.tree.flatten_with_path")
def flatten_with_path(structure):
"""Flattens a possibly nested structure into a list.
This is a variant of flattens() which produces a
list of pairs: `(path, item)`. A path is a tuple of indices and/or keys
which uniquely identifies the position of the corresponding item.
Dictionaries with non-sortable keys are not supported.
Examples:
>>> keras.flatten_with_path([{"foo": 42}])
[((0, 'foo'), 42)]
Args:
structure: An arbitrarily nested structure.
Returns:
A list of `(path, item)` pairs corresponding to the flattened
version of the input `structure`.
"""
return tree_impl.flatten_with_path(structure)
@keras_export("keras.tree.map_structure")
def map_structure(func, *structures, none_is_leaf=True):
"""Maps `func` through given structures.
Examples:
>>> structure = [[1], [2], [3]]
>>> keras.tree.map_structure(lambda v: v**2, structure)
[[1], [4], [9]]
>>> keras.tree.map_structure(lambda x, y: x * y, structure, structure)
[[1], [4], [9]]
>>> Foo = collections.namedtuple('Foo', ['a', 'b'])
>>> structure = Foo(a=1, b=2)
>>> keras.tree.map_structure(lambda v: v * 2, structure)
Foo(a=2, b=4)
Args:
func: A callable that accepts as many arguments as there are structures.
*structures: Arbitrarily nested structures of the same layout.
none_is_leaf: If True, `func` will be called on `None` leaves. If False,
`None` values are not passed to `func` and are returned in the
output directly.
Returns:
A new structure with the same layout as the given ones.
Raises:
TypeError: If `structures` is empty or `func` is not callable.
ValueError: If there is more than one items in `structures` and some of
the nested structures don't match according to the rules of
`assert_same_structure`.
"""
return tree_impl.map_structure(func, *structures, none_is_leaf=none_is_leaf)
@keras_export("keras.tree.map_structure_up_to")
def map_structure_up_to(shallow_structure, func, *structures):
"""Maps `func` through given structures up to `shallow_structure`.
This is a variant of `map_structure` which only maps the given structures
up to `shallow_structure`. All further nested components are retained as-is.
Examples:
>>> shallow_structure = [None, None]
>>> structure = [[1, 1], [2, 2]]
>>> keras.tree.map_structure_up_to(shallow_structure, len, structure)
[2, 2]
>>> shallow_structure = [None, [None, None]]
>>> keras.tree.map_structure_up_to(shallow_structure, str, structure)
['[1, 1]', ['2', '2']]
Args:
shallow_structure: A structure with layout common to all `structures`.
func: A callable that accepts as many arguments as there are structures.
*structures: Arbitrarily nested structures of the same layout.
Returns:
A new structure with the same layout as `shallow_structure`.
Raises:
TypeError: If `structures` is empty or `func` is not callable.
ValueError: If one of the items in `structures` doesn't match the
nested structure of `shallow_structure` according to the rules of
`assert_same_structure`. Items in `structures` are allowed to be
nested deeper than `shallow_structure`, but they cannot be
shallower.
"""
return tree_impl.map_structure_up_to(shallow_structure, func, *structures)
@keras_export("keras.tree.assert_same_structure")
def assert_same_structure(a, b, check_types=None):
"""Asserts that two structures are nested in the same way.
This function verifies that the nested structures match. The leafs can be of
any type. At each level, the structures must be of the same type and have
the same number of elements. Instances of `dict`, `OrderedDict` and
`defaultdict` are all considered the same as long as they have the same set
of keys. However, `list`, `tuple`, `namedtuple` and `deque` are not the same
structures. Two namedtuples with identical fields and even identical names
are not the same structures.
Examples:
>>> keras.tree.assert_same_structure([(0, 1)], [(2, 3)])
>>> Foo = collections.namedtuple('Foo', ['a', 'b'])
>>> AlsoFoo = collections.namedtuple('Foo', ['a', 'b'])
>>> keras.tree.assert_same_structure(Foo(0, 1), Foo(2, 3))
>>> keras.tree.assert_same_structure(Foo(0, 1), AlsoFoo(2, 3))
Traceback (most recent call last):
...
ValueError: The two structures don't have the same nested structure.
...
Args:
a: an arbitrarily nested structure.
b: an arbitrarily nested structure.
check_types: Deprecated. The behavior of this flag was inconsistent, it
no longer has any effect. For a looser check, use
`assert_same_paths` instead, which considers `list`, `tuple`,
`namedtuple` and `deque` as matching structures.
Raises:
ValueError: If the two structures `a` and `b` don't match.
"""
if check_types is not None:
if check_types:
warnings.warn(
"The `check_types` argument is deprecated and no longer has "
"any effect, please remove.",
DeprecationWarning,
stacklevel=2,
)
else:
warnings.warn(
"The `check_types` argument is deprecated and no longer has "
"any effect. For a looser check, use "
"`keras.tree.assert_same_paths()`, which considers `list`, "
"`tuple`, `namedtuple` and `deque` as matching",
DeprecationWarning,
stacklevel=2,
)
return tree_impl.assert_same_structure(a, b)
@keras_export("keras.tree.assert_same_paths")
def assert_same_paths(a, b):
"""Asserts that two structures have identical paths in their tree structure.
This function verifies that two nested structures have the same paths.
Unlike `assert_same_structure`, this function only checks the paths
and ignores the collection types.
For Sequences, to path is the index: 0, 1, 2, etc. For Mappings, the path is
the key, for instance "a", "b", "c". Note that namedtuples also use indices
and not field names for the path.
Examples:
>>> keras.tree.assert_same_paths([0, 1], (2, 3))
>>> Point1 = collections.namedtuple('Point1', ['x', 'y'])
>>> Point2 = collections.namedtuple('Point2', ['x', 'y'])
>>> keras.tree.assert_same_paths(Point1(0, 1), Point2(2, 3))
Args:
a: an arbitrarily nested structure.
b: an arbitrarily nested structure.
Raises:
ValueError: If the paths in structure `a` don't match the paths in
structure `b`. The error message will include the specific paths
that differ.
"""
return tree_impl.assert_same_paths(a, b)
@keras_export("keras.tree.pack_sequence_as")
def pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a given structure.
If `structure` is an atom, `flat_sequence` must be a single-item list; in
this case the return value is `flat_sequence[0]`.
If `structure` is or contains a dict instance, the keys will be sorted to
pack the flat sequence in deterministic order. However, instances of
`collections.OrderedDict` are handled differently: their sequence order is
used instead of the sorted keys. The same convention is followed in
`flatten`. This correctly repacks dicts and `OrderedDicts` after they have
been flattened, or vice-versa.
Dictionaries with non-sortable keys are not supported.
Examples:
>>> structure = {"key3": "", "key1": "", "key2": ""}
>>> flat_sequence = ["value1", "value2", "value3"]
>>> keras.tree.pack_sequence_as(structure, flat_sequence)
{"key3": "value3", "key1": "value1", "key2": "value2"}
>>> structure = (("a", "b"), ("c", "d", "e"), "f")
>>> flat_sequence = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
>>> keras.tree.pack_sequence_as(structure, flat_sequence)
((1.0, 2.0), (3.0, 4.0, 5.0), 6.0)
>>> structure = {"key3": {"c": ("alpha", "beta"), "a": ("gamma")},
... "key1": {"e": "val1", "d": "val2"}}
>>> flat_sequence = ["val2", "val1", 3.0, 1.0, 2.0]
>>> keras.tree.pack_sequence_as(structure, flat_sequence)
{'key3': {'c': (1.0, 2.0), 'a': 3.0}, 'key1': {'e': 'val1', 'd': 'val2'}}
>>> structure = ["a"]
>>> flat_sequence = [np.array([[1, 2], [3, 4]])]
>>> keras.tree.pack_sequence_as(structure, flat_sequence)
[array([[1, 2],
[3, 4]])]
>>> structure = ["a"]
>>> flat_sequence = [keras.ops.ones([2, 2])]
>>> keras.tree.pack_sequence_as(structure, flat_sequence)
[array([[1., 1.],
[1., 1.]]]
Args:
structure: Arbitrarily nested structure.
flat_sequence: Flat sequence to pack.
Returns:
`flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
TypeError: If `flat_sequence` is not iterable.
ValueError: If `flat_sequence` cannot be repacked as `structure`; for
instance, if `flat_sequence` has too few or too many elements.
"""
return tree_impl.pack_sequence_as(structure, flat_sequence)
@keras_export("keras.tree.lists_to_tuples")
def lists_to_tuples(structure):
"""Returns the structure with list instances changed to tuples.
Args:
structure: Arbitrarily nested structure.
Returns:
The same structure but with tuples instead of lists.
"""
return tree_impl.lists_to_tuples(structure)
@keras_export("keras.tree.map_shape_structure")
def map_shape_structure(func, structure):
"""Variant of keras.tree.map_structure that operates on shape tuples.
Tuples containing ints and Nones are considered shapes and passed to `func`.
Args:
structure: Arbitrarily nested structure.
Returns:
The same structure with `func` applied.
"""
return tree_impl.map_shape_structure(func, structure)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/tree/dmtree_impl.py | keras/src/tree/dmtree_impl.py | import collections
import collections.abc
import itertools
from keras.src.backend.config import backend
from keras.src.utils.module_utils import dmtree
# NOTE: There are two known discrepancies between this `dmtree` implementation
# of the tree API and the `optree` implementation:
#
# 1. `map_structure` with *multiple* structures and `map_structure_up_to` do not
# use the object registration (they use the raw `dmtree.map_structure` and
# `dmtree.map_structure_up_to`). This only has consequences with two types of
# structures:
# - `TrackedSet` will not explored (considered as a leaf).
# - `OrderedDict` will be traversed in the order of sorted keys, not the
# order of the items. This is typically inconsequential because functions
# used with `map_structure` and `map_structure_up_to` are typically not
# order dependent and are, in fact, stateless.
#
# 2. The handling of non-sortable keys in dictionaries in inconsistent. `optree`
# uses the iteration order while `dmtree` raises an error. This is not an
# issue as keys are always strings. But this is the reason why we document
# non-sortable keys as unsupported (meaning behavior is undefined).
REGISTERED_CLASSES = {}
ClassRegistration = collections.namedtuple(
"ClassRegistration", ["flatten", "unflatten"]
)
class TypeErrorRemapping:
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is TypeError:
raise ValueError(exc_value).with_traceback(traceback)
return False
def register_tree_node(
cls,
flatten_func=None,
unflatten_func=None,
):
if flatten_func is None:
flatten_func = lambda x: x.tree_flatten()
if unflatten_func is None:
unflatten_func = cls.tree_unflatten
REGISTERED_CLASSES[cls] = ClassRegistration(flatten_func, unflatten_func)
def register_tree_node_class(cls):
register_tree_node(cls)
return cls
register_tree_node(
collections.OrderedDict,
lambda d: (d.values(), list(d.keys()), d.keys()),
lambda metadata, children: collections.OrderedDict(zip(metadata, children)),
)
if backend() == "tensorflow":
from tensorflow.python.trackable.data_structures import ListWrapper
from tensorflow.python.trackable.data_structures import _DictWrapper
register_tree_node(
ListWrapper,
lambda x: (x, None),
lambda metadata, children: ListWrapper(list(children)),
)
def sorted_keys_and_values(d):
keys = sorted(list(d.keys()))
values = [d[k] for k in keys]
return values, keys, keys
register_tree_node(
_DictWrapper,
sorted_keys_and_values,
lambda metadata, children: _DictWrapper(
{key: child for key, child in zip(metadata, children)}
),
)
def is_nested(structure):
return type(structure) in REGISTERED_CLASSES or dmtree.is_nested(structure)
def traverse(func, structure, top_down=True):
if not callable(func):
raise TypeError(
f"`func` must be callable, got {func} of type {type(func)}"
)
def remap_map_to_none(value, new_value):
if isinstance(value, type) and value.__name__ == "MAP_TO_NONE":
return new_value
return value
def traverse_top_down(s):
ret = func(s)
if ret is not None:
return remap_map_to_none(ret, dmtree.MAP_TO_NONE)
registration = REGISTERED_CLASSES.get(type(s), None)
if registration is None:
return None
flat_meta_s = registration.flatten(s)
flat_s = [
dmtree.traverse(traverse_top_down, x, top_down=True)
for x in list(flat_meta_s[0])
]
return registration.unflatten(flat_meta_s[1], flat_s)
def traverse_bottom_up(s):
registration = REGISTERED_CLASSES.get(type(s), None)
if registration is not None:
flat_meta_s = registration.flatten(s)
ret = [traverse_bottom_up(x) for x in list(flat_meta_s[0])]
ret = registration.unflatten(flat_meta_s[1], ret)
elif not dmtree.is_nested(s):
ret = s
elif isinstance(s, collections.abc.Mapping):
ret = [traverse_bottom_up(s[key]) for key in sorted(s)]
ret = dmtree._sequence_like(s, ret)
else:
ret = [traverse_bottom_up(x) for x in s]
ret = dmtree._sequence_like(s, ret)
func_ret = func(ret)
return ret if func_ret is None else remap_map_to_none(func_ret, None)
if top_down:
return dmtree.traverse(traverse_top_down, structure, top_down=True)
else:
return traverse_bottom_up(structure)
def flatten(structure):
if not is_nested(structure):
return [structure]
flattened = []
def flatten_func(s):
registration = REGISTERED_CLASSES.get(type(s), None)
if registration is not None:
flat_s = list(registration.flatten(s)[0])
return dmtree.traverse(flatten_func, flat_s, top_down=True)
if not is_nested(s):
flattened.append(s)
return dmtree.MAP_TO_NONE if s is None else s
return None
dmtree.traverse(flatten_func, structure, top_down=True)
return flattened
def _recursive_flatten_with_path(path, structure, flattened):
registration = REGISTERED_CLASSES.get(type(structure), None)
if registration is not None:
flat_meta_paths = registration.flatten(structure)
flat = flat_meta_paths[0]
paths = (
flat_meta_paths[2]
if len(flat_meta_paths) >= 3
else itertools.count()
)
for key, value in zip(paths, flat):
_recursive_flatten_with_path(path + (key,), value, flattened)
elif not dmtree.is_nested(structure):
flattened.append((path, structure))
elif isinstance(structure, collections.abc.Mapping):
for key in sorted(structure):
_recursive_flatten_with_path(
path + (key,), structure[key], flattened
)
else:
for key, value in enumerate(structure):
_recursive_flatten_with_path(path + (key,), value, flattened)
def flatten_with_path(structure):
if not is_nested(structure):
return [((), structure)]
# Fully reimplemented in Python to handle registered classes, OrderedDict
# and namedtuples the same way as optree.
flattened = []
_recursive_flatten_with_path((), structure, flattened)
return flattened
def map_structure(func, *structures, none_is_leaf=True):
if not callable(func):
raise TypeError(
f"`func` must be callable, got {func} of type {type(func)}"
)
map_func = func
if not none_is_leaf:
def func_skipping_none(*args):
# Check if the reference entry (first one) is None
if args[0] is None:
if not all(s is None for s in args):
raise ValueError(
"Structure mismatch: some arguments are None, others "
f"are not. Received arguments: {args}."
)
return None
return func(*args)
map_func = func_skipping_none
def func_traverse_wrapper(s):
if is_nested(s):
return None
ret = map_func(s)
if ret is None:
return dmtree.MAP_TO_NONE
return ret
if len(structures) == 1:
return traverse(func_traverse_wrapper, structures[0])
with TypeErrorRemapping():
return dmtree.map_structure(map_func, *structures)
def map_structure_up_to(shallow_structure, func, *structures):
if not callable(func):
raise TypeError(
f"`func` must be callable, got {func} of type {type(func)}"
)
with TypeErrorRemapping():
return dmtree.map_structure_up_to(shallow_structure, func, *structures)
def assert_same_structure(a, b):
# Fully reimplemented in Python to handle registered classes.
# Don't handle OrderedDict as a registered class, use the normal dict path
# so that OrderedDict is equivalent to dict per optree behavior.
a_registration = REGISTERED_CLASSES.get(type(a), None)
if isinstance(a, collections.OrderedDict):
a_registration = None
b_registration = REGISTERED_CLASSES.get(type(b), None)
if isinstance(b, collections.OrderedDict):
b_registration = None
if a_registration != b_registration:
raise ValueError(
f"Custom node type mismatch; "
f"expected type: {type(a)}, got type: {type(b)} "
f"while comparing {a} and {b}."
)
if a_registration is not None:
a_flat_meta = a_registration.flatten(a)
b_flat_meta = b_registration.flatten(b)
a_flat = list(a_flat_meta[0])
b_flat = list(b_flat_meta[0])
if not a_flat_meta[1] == b_flat_meta[1]:
raise ValueError(
f"Mismatch custom node data; "
f"expected: {a_flat_meta[1]}, got: {b_flat_meta[1]} "
f"while comparing {a} and {b}."
)
if len(a_flat) != len(b_flat):
raise ValueError(
f"Arity mismatch; expected: {len(a)}, got: {len(b)} "
f"while comparing {a} and {b}."
)
for sub_a, sub_b in zip(a_flat, b_flat):
assert_same_structure(sub_a, sub_b)
elif not dmtree.is_nested(a):
if dmtree.is_nested(b):
raise ValueError(
f"Structures don't have the same nested structure: {a}, {b}."
)
elif isinstance(
a, (dict, collections.OrderedDict, collections.defaultdict)
):
if not isinstance(
b, (dict, collections.OrderedDict, collections.defaultdict)
):
raise ValueError(
f"Expected an instance of dict, collections.OrderedDict, or "
f"collections.defaultdict, got {type(b)} "
f"while comparing {a} and {b}."
)
a_keys = sorted(a)
b_keys = sorted(b)
if not a_keys == b_keys:
raise ValueError(
f"Dictionary key mismatch; "
f"expected key(s): {a_keys}, got key(s): {b_keys} "
f"while comparing {a} and {b}."
)
for key in a_keys:
assert_same_structure(a[key], b[key])
elif isinstance(a, collections.abc.Mapping):
raise ValueError(
f"Encountered unregistered collections.abc.Mapping type: {type(a)} "
f"while comparing {a} and {b}."
)
else:
if type(a) is not type(b):
raise ValueError(
f"Expected an instance of {type(a)}, got {type(b)} "
f"while comparing {a} and {b}."
)
if not len(a) == len(b):
raise ValueError(
f"Arity mismatch; expected: {len(a)}, got: {len(b)} "
f"while comparing {a} and {b}."
)
for sub_a, sub_b in zip(a, b):
assert_same_structure(sub_a, sub_b)
def assert_same_paths(a, b):
a_paths = set([path for path, _ in flatten_with_path(a)])
b_paths = set([path for path, _ in flatten_with_path(b)])
if a_paths != b_paths:
msg = "`a` and `b` don't have the same paths."
a_diff = a_paths.difference(b_paths)
if a_diff:
msg += f"\nPaths in `a` missing in `b`:\n{a_diff}"
b_diff = b_paths.difference(a_paths)
if b_diff:
msg += f"\nPaths in `b` missing in `a`:\n{b_diff}"
raise ValueError(msg)
def pack_sequence_as(structure, flat_sequence):
# This is not just an optimization for the case when structure is a leaf.
# This is required to avoid Torch Dynamo failures.
if not is_nested(structure):
if len(flat_sequence) == 1:
return flat_sequence[0]
else:
raise ValueError(
"Incorrect number of leaves provided by `flat_sequence` for "
f"`structure`; expected: 1, got {len(flat_sequence)}."
)
flat_sequence_it = enumerate(flat_sequence)
def unflatten_func(s):
registration = REGISTERED_CLASSES.get(type(s), None)
if registration is not None:
flat_meta_s = registration.flatten(s)
flat_s = dmtree.traverse(
unflatten_func, list(flat_meta_s[0]), top_down=True
)
return registration.unflatten(flat_meta_s[1], flat_s)
elif not dmtree.is_nested(s):
try:
_, value = next(flat_sequence_it)
return dmtree.MAP_TO_NONE if value is None else value
except StopIteration:
raise ValueError(
"Too few leaves provided by `flat_sequence` for "
f"`structure`. Got {len(flat_sequence)}."
)
return None
ret = dmtree.traverse(unflatten_func, structure, top_down=True)
try:
index, _ = next(flat_sequence_it)
raise ValueError(
"Too many leaves provided by `flat_sequence` for `structure`; "
f"expected: {index}, got {len(flat_sequence)}."
)
except StopIteration:
return ret
def lists_to_tuples(structure):
def list_to_tuple(instance):
return tuple(instance) if isinstance(instance, list) else None
return traverse(list_to_tuple, structure, top_down=False)
def map_shape_structure(func, structure):
if not callable(func):
raise TypeError(
f"`func` must be callable, got {func} of type {type(func)}"
)
def map_shape_func(x):
if isinstance(x, (list, tuple)) and all(
isinstance(e, (int, type(None))) for e in x
):
ret = func(x)
elif is_nested(x):
return None
else:
ret = func(x)
return ret if ret is not None else dmtree.MAP_TO_NONE
return traverse(map_shape_func, structure, top_down=True)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/tree/optree_impl.py | keras/src/tree/optree_impl.py | import optree
import optree.utils
from keras.src.backend.config import backend
def register_tree_node_class(cls):
return optree.register_pytree_node_class(cls, namespace="keras")
# Register backend-specific node classes
if backend() == "tensorflow":
from tensorflow.python.trackable.data_structures import ListWrapper
from tensorflow.python.trackable.data_structures import _DictWrapper
try:
optree.register_pytree_node(
ListWrapper,
lambda x: (x, None),
lambda metadata, children: ListWrapper(list(children)),
namespace="keras",
)
def sorted_keys_and_values(d):
keys = sorted(list(d.keys()))
values = [d[k] for k in keys]
return values, keys, keys
optree.register_pytree_node(
_DictWrapper,
sorted_keys_and_values,
lambda metadata, children: _DictWrapper(
{key: child for key, child in zip(metadata, children)}
),
namespace="keras",
)
except ValueError:
pass # We may have already registered if we are reimporting keras.
def is_nested(structure):
return not optree.tree_is_leaf(
structure, none_is_leaf=True, namespace="keras"
)
def traverse(func, structure, top_down=True):
# From https://github.com/google/jax/pull/19695
def traverse_children():
children, treedef = optree.tree_flatten(
structure,
is_leaf=lambda x: x is not structure,
none_is_leaf=True,
namespace="keras",
)
if treedef.num_nodes == 1 and treedef.num_leaves == 1:
return structure
else:
return optree.tree_unflatten(
treedef,
[traverse(func, c, top_down=top_down) for c in children],
)
if top_down:
ret = func(structure)
if ret is None:
return traverse_children()
else:
traversed_structure = traverse_children()
ret = func(traversed_structure)
if ret is None:
return traversed_structure
# Detect MAP_TO_NONE without tree_api import to avoid circular import.
if isinstance(ret, type) and ret.__name__ == "MAP_TO_NONE":
return None
return ret
def flatten(structure):
# optree.tree_flatten returns a pair (leaves, treespec) where the first
# element is a list of leaf values and the second element is a treespec
# representing the structure of the pytree.
leaves, _ = optree.tree_flatten(
structure, none_is_leaf=True, namespace="keras"
)
return leaves
def flatten_with_path(structure):
paths, leaves, _ = optree.tree_flatten_with_path(
structure, none_is_leaf=True, namespace="keras"
)
return list(zip(paths, leaves))
def map_structure(func, *structures, none_is_leaf=True):
if not structures:
raise ValueError("Must provide at least one structure")
# Add check for same structures, otherwise optree just maps to shallowest.
def func_with_check(*args):
if not all(
optree.tree_is_leaf(s, none_is_leaf=none_is_leaf, namespace="keras")
for s in args
):
raise ValueError("Structures don't have the same nested structure.")
return func(*args)
map_func = func_with_check if len(structures) > 1 else func
return optree.tree_map(
map_func, *structures, none_is_leaf=none_is_leaf, namespace="keras"
)
def map_structure_up_to(shallow_structure, func, *structures):
if not structures:
raise ValueError("Must provide at least one structure")
# Add check that `shallow_structure` really is the shallowest.
# Also only call `func` on `structures` and not `shallow_structure`.
def func_with_check_without_shallow_structure(shallow, *args):
if not optree.tree_is_leaf(shallow):
raise ValueError("Structures don't have the same nested structure.")
return func(*args)
return optree.tree_map(
func_with_check_without_shallow_structure,
shallow_structure,
*structures,
none_is_leaf=True,
namespace="keras",
)
def assert_same_structure(a, b):
def check(a_leaf, b_leaf):
if not optree.tree_is_leaf(
a_leaf, none_is_leaf=True, namespace="keras"
) or not optree.tree_is_leaf(
b_leaf, none_is_leaf=True, namespace="keras"
):
raise ValueError("Structures don't have the same nested structure.")
return None
optree.tree_map(check, a, b, none_is_leaf=True, namespace="keras")
def assert_same_paths(a, b):
a_paths = set(optree.tree_paths(a, none_is_leaf=True, namespace="keras"))
b_paths = set(optree.tree_paths(b, none_is_leaf=True, namespace="keras"))
if a_paths != b_paths:
msg = "`a` and `b` don't have the same paths."
a_diff = a_paths.difference(b_paths)
if a_diff:
msg += f"\nPaths in `a` missing in `b`:\n{a_diff}"
b_diff = b_paths.difference(a_paths)
if b_diff:
msg += f"\nPaths in `b` missing in `a`:\n{b_diff}"
raise ValueError(msg)
def pack_sequence_as(structure, flat_sequence):
_, treespec = optree.tree_flatten(
structure, none_is_leaf=True, namespace="keras"
)
return optree.tree_unflatten(treespec, flat_sequence)
def lists_to_tuples(structure):
def list_to_tuple(instance):
return tuple(instance) if isinstance(instance, list) else None
return traverse(list_to_tuple, structure, top_down=False)
def map_shape_structure(func, structure):
def is_shape_tuple(x):
return isinstance(x, (list, tuple)) and all(
isinstance(e, (int, type(None))) for e in x
)
return optree.tree_map(
func,
structure,
is_leaf=is_shape_tuple,
none_is_leaf=True,
namespace="keras",
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/tree/torchtree_impl.py | keras/src/tree/torchtree_impl.py | from collections import defaultdict
from torch.utils import _pytree as torch_tree
def register_tree_node_class(cls):
torch_tree.register_pytree_node(
cls,
flatten_fn=lambda x: x.torchtree_flatten(),
unflatten_fn=cls.torchtree_unflatten,
serialized_type_name=f"{cls.__name__}",
flatten_with_keys_fn=lambda x: x.torchtree_flatten_with_keys(),
)
return cls
def _tree_is_leaf(tree, is_leaf=None):
if is_leaf is not None and is_leaf(tree):
return True
return torch_tree._get_node_type(tree) not in torch_tree.SUPPORTED_NODES
def _dict_to_ordered_dict(structure):
# We need to sort dict and defaultdict to ensure a deterministic order that
# that is consistent with other tree implementations.
def func(x):
if type(x) is dict:
return {k: x[k] for k in sorted(x.keys())}
elif type(x) is defaultdict:
return defaultdict(
x.default_factory,
{k: x[k] for k in sorted(x.keys())},
)
return None
def traverse_children():
children, treedef = torch_tree.tree_flatten(
structure,
is_leaf=lambda x: x is not structure,
)
if treedef.num_nodes == 1 and treedef.num_leaves == 1:
return structure
else:
return torch_tree.tree_unflatten(
[_dict_to_ordered_dict(c) for c in children],
treedef,
)
ret = func(structure)
if ret is None:
return traverse_children()
if isinstance(ret, type) and ret.__name__ == "MAP_TO_NONE":
return None
return ret
def is_nested(structure):
return not _tree_is_leaf(structure)
def traverse(func, structure, top_down=True):
def traverse_children():
children, treedef = torch_tree.tree_flatten(
structure,
is_leaf=lambda x: x is not structure,
)
if treedef.num_nodes == 1 and treedef.num_leaves == 1:
return structure
else:
return torch_tree.tree_unflatten(
[traverse(func, c, top_down=top_down) for c in children],
treedef,
)
structure = _dict_to_ordered_dict(structure)
if top_down:
ret = func(structure)
if ret is None:
return traverse_children()
else:
traversed_structure = traverse_children()
ret = func(traversed_structure)
if ret is None:
return traversed_structure
# Detect MAP_TO_NONE without tree_api import to avoid circular import.
if isinstance(ret, type) and ret.__name__ == "MAP_TO_NONE":
return None
return ret
def flatten(structure):
# We need to first sort dicts to ensure a deterministic order that is
# consistent with other tree implementations.
structure = _dict_to_ordered_dict(structure)
leaves, _ = torch_tree.tree_flatten(structure)
return leaves
def flatten_with_path(structure):
# We need to first sort dicts to ensure a deterministic order that is
# consistent with other tree implementations.
structure = _dict_to_ordered_dict(structure)
leaves_with_path, _ = torch_tree.tree_flatten_with_path(structure)
results = []
fields = []
for key, leaf in leaves_with_path:
for k in key:
if isinstance(k, torch_tree.GetAttrKey) and k.name not in fields:
fields.append(k.name)
fields = sorted(fields)
field_to_idx = {f: i for i, f in enumerate(fields)}
for key, leaf in leaves_with_path:
# Convert to a tuple of keys.
path = []
for k in key:
if isinstance(k, torch_tree.SequenceKey):
path.append(k.idx)
elif isinstance(k, torch_tree.MappingKey):
path.append(k.key)
elif isinstance(k, torch_tree.GetAttrKey):
path.append(field_to_idx[k.name])
results.append((tuple(path), leaf))
return results
def map_structure(func, *structures, none_is_leaf=True):
if not structures:
raise ValueError("Must provide at least one structure")
map_func = func
if not none_is_leaf:
def func_skipping_none(*args):
# Check if the reference entry (first one) is None
if args[0] is None:
if not all(s is None for s in args):
raise ValueError(
"Structure mismatch: some arguments are None, others "
f"are not. Received arguments: {args}."
)
return None
return func(*args)
map_func = func_skipping_none
return torch_tree.tree_map(map_func, *structures)
def map_structure_up_to(shallow_structure, func, *structures):
if not structures:
raise ValueError("Must provide at least one structure")
# Add check that `shallow_structure` really is the shallowest.
# Also only call `func` on `structures` and not `shallow_structure`.
def func_with_check_without_shallow_structure(shallow, *args):
if not _tree_is_leaf(shallow):
raise ValueError("Structures don't have the same nested structure.")
return func(*args)
return torch_tree.tree_map(
func_with_check_without_shallow_structure,
shallow_structure,
*structures,
)
def assert_same_structure(a, b):
def check(a_leaf, b_leaf):
if not _tree_is_leaf(a_leaf) or not _tree_is_leaf(b_leaf):
raise ValueError("Structures don't have the same nested structure.")
return None
torch_tree.tree_map(check, a, b)
def assert_same_paths(a, b):
a_paths = set([path for path, _ in flatten_with_path(a)])
b_paths = set([path for path, _ in flatten_with_path(b)])
if a_paths != b_paths:
msg = "`a` and `b` don't have the same paths."
a_diff = a_paths.difference(b_paths)
if a_diff:
msg += f"\nPaths in `a` missing in `b`:\n{a_diff}"
b_diff = b_paths.difference(a_paths)
if b_diff:
msg += f"\nPaths in `b` missing in `a`:\n{b_diff}"
raise ValueError(msg)
def pack_sequence_as(structure, flat_sequence):
# We need to first sort dicts to ensure a deterministic order that is
# consistent with other tree implementations.
structure = _dict_to_ordered_dict(structure)
_, treespec = torch_tree.tree_flatten(structure)
return torch_tree.tree_unflatten(flat_sequence, treespec)
def lists_to_tuples(structure):
def list_to_tuple(instance):
return tuple(instance) if isinstance(instance, list) else None
return traverse(list_to_tuple, structure, top_down=False)
def map_shape_structure(func, structure):
def is_shape_tuple(x):
return isinstance(x, (list, tuple)) and all(
isinstance(e, (int, type(None))) for e in x
)
# We need to first sort dicts to ensure a deterministic order that is
# consistent with other tree implementations.
structure = _dict_to_ordered_dict(structure)
return torch_tree.tree_map(func, structure, is_leaf=is_shape_tuple)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/tree/tree_test.py | keras/src/tree/tree_test.py | import functools
from collections import OrderedDict
from collections import defaultdict
from collections import deque
from collections import namedtuple
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.tree.tree_api import MAP_TO_NONE
from keras.src.utils.module_utils import dmtree
from keras.src.utils.module_utils import optree
from keras.src.utils.tracking import TrackedDict
from keras.src.utils.tracking import TrackedList
from keras.src.utils.tracking import TrackedSet
TEST_CASES = []
if dmtree.available:
from keras.src.tree import dmtree_impl
TEST_CASES += [
{
"testcase_name": "dmtree",
"t": dmtree_impl,
}
]
if backend.backend() != "torch" and optree.available:
from keras.src.tree import optree_impl
TEST_CASES += [
{
"testcase_name": "optree",
"t": optree_impl,
},
]
if backend.backend() == "torch":
from keras.src.tree import torchtree_impl
TEST_CASES += [
{
"testcase_name": "torchtree",
"t": torchtree_impl,
},
]
Empty = namedtuple("Empty", [])
Point = namedtuple("Point", ["x", "y"])
OtherPoint = namedtuple("OtherPoint", ["x", "y"])
def default_value():
return None
class Visitor:
def __init__(self, func):
self.func = func
self.visited_list = []
def __call__(self, x):
self.visited_list.append(x)
return self.func(x)
def visited(self):
ret = self.visited_list
self.visited_list = []
return ret
@parameterized.named_parameters(TEST_CASES)
class TreeTest(testing.TestCase):
def setUp(self):
if dmtree.available and optree.available:
# If both are available, the annotation on the Keras tracking
# wrappers will have used optree. For testing purposes, we need to
# also register them with dm-tree.
from keras.src.tree import dmtree_impl
dmtree_impl.register_tree_node_class(TrackedList)
dmtree_impl.register_tree_node_class(TrackedSet)
dmtree_impl.register_tree_node_class(TrackedDict)
super().setUp()
def assertEqualStrict(self, a, b):
self.assertEqual(a, b)
self.assertEqual(type(a), type(b))
if isinstance(a, OrderedDict):
# Verify order.
self.assertEqual(a.items(), b.items())
elif isinstance(a, defaultdict):
self.assertEqual(a.default_factory, b.default_factory)
# Recurse
if isinstance(a, (tuple, list, deque)):
for sub_a, sub_b in zip(a, b):
self.assertEqualStrict(sub_a, sub_b)
elif isinstance(a, dict):
for k in a:
self.assertEqualStrict(a[k], b[k])
def is_dmtree(self, tree_impl):
if dmtree.available:
from keras.src.tree import dmtree_impl
return tree_impl is dmtree_impl
return False
def test_is_nested(self, t):
# Non-nested.
self.assertFalse(t.is_nested(1))
self.assertFalse(t.is_nested("1234"))
self.assertFalse(t.is_nested(b"1234"))
self.assertFalse(t.is_nested(bytearray("1234", "ascii")))
self.assertFalse(t.is_nested(np.ones((4, 5))))
self.assertFalse(t.is_nested(ops.ones((4, 5))))
self.assertFalse(t.is_nested(set([1, 2])))
# Standard structures.
self.assertTrue(t.is_nested(()))
self.assertTrue(t.is_nested((1,)))
self.assertTrue(t.is_nested((1, 2)))
self.assertTrue(t.is_nested([]))
self.assertTrue(t.is_nested([1]))
self.assertTrue(t.is_nested([1, 2]))
self.assertTrue(t.is_nested(deque([])))
self.assertTrue(t.is_nested(deque([1])))
self.assertTrue(t.is_nested(deque([1, 2])))
self.assertTrue(t.is_nested(Empty()))
self.assertTrue(t.is_nested(Point(x=1, y=2)))
self.assertTrue(t.is_nested({}))
self.assertTrue(t.is_nested({"a": 1}))
self.assertTrue(t.is_nested({"b": 2, "a": 1}))
self.assertTrue(t.is_nested(OrderedDict()))
self.assertTrue(t.is_nested(OrderedDict([("a", 1)])))
self.assertTrue(t.is_nested(OrderedDict([("b", 2), ("a", 1)])))
self.assertTrue(t.is_nested(defaultdict(default_value)))
self.assertTrue(t.is_nested(defaultdict(default_value, [("a", 1)])))
self.assertTrue(
t.is_nested(defaultdict(default_value, [("b", 2), ("a", 1)]))
)
# Keras tracking wrappers.
self.assertTrue(t.is_nested(TrackedList([])))
self.assertTrue(t.is_nested(TrackedList([1])))
self.assertTrue(t.is_nested(TrackedList([1, 2])))
self.assertTrue(t.is_nested(TrackedSet([])))
self.assertTrue(t.is_nested(TrackedSet([1])))
self.assertTrue(t.is_nested(TrackedSet([1, 2])))
self.assertTrue(t.is_nested(TrackedDict({})))
self.assertTrue(t.is_nested(TrackedDict({"a": 1})))
self.assertTrue(t.is_nested(TrackedDict({"b": 2, "a": 1})))
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_is_nested_tf_wrappers(self, t):
from tensorflow.python.trackable.data_structures import ListWrapper
from tensorflow.python.trackable.data_structures import _DictWrapper
self.assertTrue(t.is_nested(ListWrapper([])))
self.assertTrue(t.is_nested(ListWrapper([1])))
self.assertTrue(t.is_nested(ListWrapper([1, 2])))
self.assertTrue(t.is_nested(_DictWrapper({})))
self.assertTrue(t.is_nested(_DictWrapper({"a": 1})))
self.assertTrue(t.is_nested(_DictWrapper({"b": 2, "a": 1})))
def test_flatten(self, t):
# Non-nested.
self.assertEqualStrict(t.flatten(1), [1])
# Standard structures.
self.assertEqualStrict(t.flatten(()), [])
self.assertEqualStrict(t.flatten((1,)), [1])
self.assertEqualStrict(t.flatten((1, 2)), [1, 2])
self.assertEqualStrict(t.flatten([]), [])
self.assertEqualStrict(t.flatten([1]), [1])
self.assertEqualStrict(t.flatten([1, 2]), [1, 2])
self.assertEqualStrict(t.flatten(deque([])), [])
self.assertEqualStrict(t.flatten(deque([1])), [1])
self.assertEqualStrict(t.flatten(deque([1, 2])), [1, 2])
self.assertEqualStrict(t.flatten(Empty()), [])
self.assertEqualStrict(t.flatten(Point(y=2, x=1)), [1, 2])
self.assertEqualStrict(t.flatten({}), [])
self.assertEqualStrict(t.flatten({"a": 1}), [1])
self.assertEqualStrict(t.flatten({"b": 2, "a": 1}), [1, 2])
self.assertEqualStrict(
t.flatten(OrderedDict()),
[],
)
self.assertEqualStrict(
t.flatten(OrderedDict([("a", 1)])),
[1],
)
self.assertEqualStrict(
t.flatten(OrderedDict([("b", 2), ("a", 1)])),
[2, 1],
)
self.assertEqualStrict(
t.flatten(defaultdict(default_value)),
[],
)
self.assertEqualStrict(
t.flatten(defaultdict(default_value, [("a", 1)])),
[1],
)
self.assertEqualStrict(
t.flatten(defaultdict(default_value, [("b", 2), ("a", 1)])),
[1, 2],
)
# Keras tracking wrappers.
self.assertEqualStrict(t.flatten(TrackedList([])), [])
self.assertEqualStrict(t.flatten(TrackedList([1])), [1])
self.assertEqualStrict(t.flatten(TrackedList([1, 2])), [1, 2])
self.assertEqualStrict(t.flatten(TrackedSet([])), [])
self.assertEqualStrict(t.flatten(TrackedSet([1])), [1])
self.assertEqualStrict(sorted(t.flatten(TrackedSet([1, 2]))), [1, 2])
self.assertEqualStrict(t.flatten(TrackedDict({})), [])
self.assertEqualStrict(t.flatten(TrackedDict({"a": 1})), [1])
self.assertEqualStrict(t.flatten(TrackedDict({"b": 2, "a": 1})), [1, 2])
# Deeper nested structures.
self.assertEqualStrict(
t.flatten(
(
{"b": [2, 3], "a": (1,)},
TrackedDict({"x": 4, "y": TrackedList([5, 6])}),
TrackedSet([7]),
Point(y=9, x=8),
np.array([10]),
)
),
[1, 2, 3, 4, 5, 6, 7, 8, 9, np.array([10])],
)
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_flatten_tf_wrappers(self, t):
from tensorflow.python.trackable.data_structures import ListWrapper
from tensorflow.python.trackable.data_structures import _DictWrapper
self.assertEqualStrict(t.flatten(ListWrapper([])), [])
self.assertEqualStrict(t.flatten(ListWrapper([1])), [1])
self.assertEqualStrict(t.flatten(ListWrapper([1, 2])), [1, 2])
self.assertEqualStrict(t.flatten(_DictWrapper({})), [])
self.assertEqualStrict(t.flatten(_DictWrapper({"a": 1})), [1])
self.assertEqualStrict(
t.flatten(_DictWrapper({"b": 2, "a": 1})), [1, 2]
)
def test_flatten_with_path(self, t):
# Non-nested.
self.assertEqualStrict(
t.flatten_with_path(1),
[((), 1)],
)
# Standard structures.
self.assertEqualStrict(
t.flatten_with_path(()),
[],
)
self.assertEqualStrict(
t.flatten_with_path((1,)),
[((0,), 1)],
)
self.assertEqualStrict(
t.flatten_with_path((1, 2)),
[((0,), 1), ((1,), 2)],
)
self.assertEqualStrict(
t.flatten_with_path([]),
[],
)
self.assertEqualStrict(
t.flatten_with_path([1]),
[((0,), 1)],
)
self.assertEqualStrict(
t.flatten_with_path([1, 2]),
[((0,), 1), ((1,), 2)],
)
self.assertEqualStrict(
t.flatten_with_path(deque([])),
[],
)
self.assertEqualStrict(
t.flatten_with_path(deque([1])),
[((0,), 1)],
)
self.assertEqualStrict(
t.flatten_with_path(deque([1, 2])),
[((0,), 1), ((1,), 2)],
)
self.assertEqualStrict(
t.flatten_with_path(Empty()),
[],
)
self.assertEqualStrict(
t.flatten_with_path(Point(y=2, x=1)),
[((0,), 1), ((1,), 2)],
)
self.assertEqualStrict(
t.flatten_with_path({}),
[],
)
self.assertEqualStrict(
t.flatten_with_path({"a": 1}),
[(("a",), 1)],
)
self.assertEqualStrict(
t.flatten_with_path({"b": 2, "a": 1}),
[(("a",), 1), (("b",), 2)],
)
self.assertEqualStrict(
t.flatten_with_path(OrderedDict()),
[],
)
self.assertEqualStrict(
t.flatten_with_path(OrderedDict([("a", 1)])),
[(("a",), 1)],
)
self.assertEqualStrict(
t.flatten_with_path(OrderedDict([("b", 2), ("a", 1)])),
[(("b",), 2), (("a",), 1)],
)
self.assertEqualStrict(
t.flatten_with_path(defaultdict(default_value)),
[],
)
self.assertEqualStrict(
t.flatten_with_path(defaultdict(default_value, [("a", 1)])),
[(("a",), 1)],
)
self.assertEqualStrict(
t.flatten_with_path(
defaultdict(default_value, [("b", 2), ("a", 1)])
),
[(("a",), 1), (("b",), 2)],
)
# Keras tracking wrappers.
self.assertEqualStrict(
t.flatten_with_path(TrackedList([])),
[],
)
self.assertEqualStrict(
t.flatten_with_path(TrackedList([1])),
[((0,), 1)],
)
self.assertEqualStrict(
t.flatten_with_path(TrackedList([1, 2])),
[((0,), 1), ((1,), 2)],
)
self.assertEqualStrict(
t.flatten_with_path(TrackedSet([])),
[],
)
self.assertEqualStrict(
t.flatten_with_path(TrackedSet([1])),
[((0,), 1)],
)
flat = t.flatten_with_path(TrackedSet([1, 2]))
if flat[0][1] == 1:
self.assertEqualStrict(flat, [((0,), 1), ((1,), 2)])
else:
self.assertEqualStrict(flat, [((0,), 2), ((1,), 1)])
self.assertEqualStrict(
t.flatten_with_path(TrackedDict({})),
[],
)
self.assertEqualStrict(
t.flatten_with_path(TrackedDict({"a": 1})),
[(("a",), 1)],
)
self.assertEqualStrict(
t.flatten_with_path(TrackedDict({"b": 2, "a": 1})),
[(("a",), 1), (("b",), 2)],
)
# Deeper nested structures.
self.assertEqualStrict(
t.flatten_with_path(
(
{"b": [2, 3], "a": (1,)},
TrackedDict({"x": 4, "y": TrackedList([5, 6])}),
TrackedSet([7]),
Point(y=9, x=8),
np.array([10]),
)
),
[
((0, "a", 0), 1),
((0, "b", 0), 2),
((0, "b", 1), 3),
((1, "x"), 4),
((1, "y", 0), 5),
((1, "y", 1), 6),
((2, 0), 7),
((3, 0), 8),
((3, 1), 9),
((4,), np.array([10])),
],
)
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_flatten_with_path_tf_wrappers(self, t):
from tensorflow.python.trackable.data_structures import ListWrapper
from tensorflow.python.trackable.data_structures import _DictWrapper
self.assertEqualStrict(
t.flatten_with_path(ListWrapper([])),
[],
)
self.assertEqualStrict(
t.flatten_with_path(ListWrapper([1])),
[((0,), 1)],
)
self.assertEqualStrict(
t.flatten_with_path(ListWrapper([1, 2])),
[((0,), 1), ((1,), 2)],
)
self.assertEqualStrict(
t.flatten_with_path(_DictWrapper({})),
[],
)
self.assertEqualStrict(
t.flatten_with_path(_DictWrapper({"a": 1})),
[(("a",), 1)],
)
self.assertEqualStrict(
t.flatten_with_path(_DictWrapper({"b": 2, "a": 1})),
[(("a",), 1), (("b",), 2)],
)
def test_pack_sequence_as(self, t):
# Non-nested.
self.assertEqualStrict(t.pack_sequence_as(10, [1]), 1)
# Standard structures.
self.assertEqualStrict(t.pack_sequence_as((), []), ())
self.assertEqualStrict(t.pack_sequence_as((10,), [1]), (1,))
self.assertEqualStrict(t.pack_sequence_as((10, 20), [1, 2]), (1, 2))
self.assertEqualStrict(t.pack_sequence_as([], []), [])
self.assertEqualStrict(t.pack_sequence_as([10], [1]), [1])
self.assertEqualStrict(t.pack_sequence_as([10, 20], [1, 2]), [1, 2])
self.assertEqualStrict(t.pack_sequence_as(deque([]), []), deque([]))
self.assertEqualStrict(t.pack_sequence_as(deque([10]), [1]), deque([1]))
self.assertEqualStrict(
t.pack_sequence_as(deque([10, 20]), [1, 2]), deque([1, 2])
)
self.assertEqualStrict(t.pack_sequence_as(Empty(), []), Empty())
self.assertEqualStrict(
t.pack_sequence_as(Point(y=20, x=10), [1, 2]), Point(x=1, y=2)
)
self.assertEqualStrict(t.pack_sequence_as({}, []), {})
self.assertEqualStrict(t.pack_sequence_as({"a": 10}, [1]), {"a": 1})
self.assertEqualStrict(
t.pack_sequence_as({"b": 20, "a": 10}, [1, 2]), {"a": 1, "b": 2}
)
self.assertEqualStrict(
t.pack_sequence_as(OrderedDict(), []), OrderedDict()
)
self.assertEqualStrict(
t.pack_sequence_as(OrderedDict([("a", 10)]), [1]),
OrderedDict([("a", 1)]),
)
self.assertEqualStrict(
t.pack_sequence_as(OrderedDict([("b", 20), ("a", 10)]), [2, 1]),
OrderedDict([("b", 2), ("a", 1)]),
)
self.assertEqualStrict(
t.pack_sequence_as(defaultdict(default_value), []),
defaultdict(default_value),
)
self.assertEqualStrict(
t.pack_sequence_as(defaultdict(default_value, [("a", 10)]), [1]),
defaultdict(default_value, [("a", 1)]),
)
self.assertEqualStrict(
t.pack_sequence_as(
defaultdict(default_value, [("b", 20), ("a", 10)]), [1, 2]
),
defaultdict(default_value, [("a", 1), ("b", 2)]),
)
# Keras tracking wrappers.
self.assertEqualStrict(
t.pack_sequence_as(TrackedList([]), []), TrackedList([])
)
self.assertEqualStrict(
t.pack_sequence_as(TrackedList([10]), [1]), TrackedList([1])
)
self.assertEqualStrict(
t.pack_sequence_as(TrackedList([10, 20]), [1, 2]),
TrackedList([1, 2]),
)
self.assertEqualStrict(
t.pack_sequence_as(TrackedSet([]), []), TrackedSet([])
)
self.assertEqualStrict(
t.pack_sequence_as(TrackedSet([10]), [1]), TrackedSet([1])
)
self.assertEqualStrict(
t.pack_sequence_as(TrackedSet([10, 20]), [1, 2]), TrackedSet([1, 2])
)
self.assertEqualStrict(
t.pack_sequence_as(TrackedDict({}), []), TrackedDict({})
)
self.assertEqualStrict(
t.pack_sequence_as(TrackedDict({"a": 10}), [1]),
TrackedDict({"a": 1}),
)
self.assertEqualStrict(
t.pack_sequence_as(TrackedDict({"b": 20, "a": 10}), [1, 2]),
TrackedDict({"a": 1, "b": 2}),
)
# Deeper nested structures.
self.assertEqualStrict(
t.pack_sequence_as(
(
{"b": [20, 30], "a": (10,)},
TrackedDict({"x": 40, "y": TrackedList([50, 60])}),
TrackedSet([70]),
Point(y=90, x=80),
100,
),
[1, 2, 3, 4, 5, 6, 7, 8, 9, np.array([10])],
),
(
{"b": [2, 3], "a": (1,)},
TrackedDict({"x": 4, "y": TrackedList([5, 6])}),
TrackedSet([7]),
Point(x=8, y=9),
np.array([10]),
),
)
# Error cases.
with self.assertRaisesRegex(TypeError, "[Ii]terable"):
t.pack_sequence_as([10, 20], 1)
with self.assertRaisesRegex(ValueError, "leaves.*[expected:|holds] 1"):
t.pack_sequence_as(10, [])
with self.assertRaisesRegex(ValueError, "leaves.*[expected:|holds] 1"):
t.pack_sequence_as(10, [1, 2])
with self.assertRaisesRegex(ValueError, "[Too few leaves|holds 2]"):
t.pack_sequence_as([10, 20], [1])
with self.assertRaisesRegex(ValueError, "[Too many leaves|holds 3]"):
t.pack_sequence_as([10, 20], [1, 2, 3])
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_pack_sequence_as_tf_wrappers(self, t):
from tensorflow.python.trackable.data_structures import ListWrapper
from tensorflow.python.trackable.data_structures import _DictWrapper
self.assertEqualStrict(
t.pack_sequence_as(ListWrapper([]), []), ListWrapper([])
)
self.assertEqualStrict(
t.pack_sequence_as(ListWrapper([10]), [1]), ListWrapper([1])
)
self.assertEqualStrict(
t.pack_sequence_as(ListWrapper([10, 20]), [1, 2]),
ListWrapper([1, 2]),
)
self.assertEqualStrict(
t.pack_sequence_as(_DictWrapper({}), []), _DictWrapper({})
)
self.assertEqualStrict(
t.pack_sequence_as(_DictWrapper({"a": 10}), [1]),
_DictWrapper({"a": 1}),
)
self.assertEqualStrict(
t.pack_sequence_as(_DictWrapper({"b": 20, "a": 10}), [1, 2]),
_DictWrapper({"b": 2, "a": 1}),
)
def test_map_structure_with_one_structure(self, t):
def f1(x):
return x + 10 if isinstance(x, int) else None
# Non-nested.
self.assertEqualStrict(t.map_structure(f1, 1), 11)
# Standard structures.
self.assertEqualStrict(t.map_structure(f1, ()), ())
self.assertEqualStrict(t.map_structure(f1, (1,)), (11,))
self.assertEqualStrict(t.map_structure(f1, (1, 2)), (11, 12))
self.assertEqualStrict(t.map_structure(f1, []), [])
self.assertEqualStrict(t.map_structure(f1, [1]), [11])
self.assertEqualStrict(t.map_structure(f1, [1, 2]), [11, 12])
self.assertEqualStrict(t.map_structure(f1, deque([])), deque([]))
self.assertEqualStrict(t.map_structure(f1, deque([1])), deque([11]))
self.assertEqualStrict(
t.map_structure(f1, deque([1, 2])), deque([11, 12])
)
self.assertEqualStrict(t.map_structure(f1, Empty()), Empty())
self.assertEqualStrict(
t.map_structure(f1, Point(y=2, x=1)), Point(x=11, y=12)
)
self.assertEqualStrict(
t.map_structure(f1, {}),
{},
)
self.assertEqualStrict(
t.map_structure(f1, {"a": 1}),
{"a": 11},
)
self.assertEqualStrict(
t.map_structure(f1, {"b": 2, "a": 1}),
{"a": 11, "b": 12},
)
self.assertEqualStrict(
t.map_structure(f1, OrderedDict()),
OrderedDict(),
)
self.assertEqualStrict(
t.map_structure(f1, OrderedDict([("a", 1)])),
OrderedDict([("a", 11)]),
)
self.assertEqualStrict(
t.map_structure(f1, OrderedDict([("b", 2), ("a", 1)])),
OrderedDict([("b", 12), ("a", 11)]),
)
self.assertEqualStrict(
t.map_structure(f1, defaultdict(default_value)),
defaultdict(default_value),
)
self.assertEqualStrict(
t.map_structure(f1, defaultdict(default_value, [("a", 1)])),
defaultdict(default_value, [("a", 11)]),
)
self.assertEqualStrict(
t.map_structure(
f1, defaultdict(default_value, [("b", 2), ("a", 1)])
),
defaultdict(default_value, [("a", 11), ("b", 12)]),
)
# Keras tracking wrappers.
self.assertEqualStrict(
t.map_structure(f1, TrackedList([])), TrackedList([])
)
self.assertEqualStrict(
t.map_structure(f1, TrackedList([1])), TrackedList([11])
)
self.assertEqualStrict(
t.map_structure(f1, TrackedList([1, 2])), TrackedList([11, 12])
)
self.assertEqualStrict(
t.map_structure(f1, TrackedSet([])), TrackedSet([])
)
self.assertEqualStrict(
t.map_structure(f1, TrackedSet([1])), TrackedSet([11])
)
self.assertEqualStrict(
t.map_structure(f1, TrackedSet([1, 2])), TrackedSet([11, 12])
)
self.assertEqualStrict(
t.map_structure(f1, TrackedDict()),
TrackedDict(),
)
self.assertEqualStrict(
t.map_structure(f1, TrackedDict({"a": 1})),
TrackedDict({"a": 11}),
)
self.assertEqualStrict(
t.map_structure(f1, TrackedDict({"b": 2, "a": 1})),
TrackedDict({"a": 11, "b": 12}),
)
# Deeper nested structures.
self.assertEqualStrict(
t.map_structure(
f1,
(
{"b": [2, 3], "a": (1,)},
TrackedDict({"x": 4, "y": TrackedList([5, 6])}),
TrackedSet([7]),
Point(y=9, x=8),
np.array([10]),
),
),
(
{"b": [12, 13], "a": (11,)},
TrackedDict({"x": 14, "y": TrackedList([15, 16])}),
TrackedSet([17]),
Point(y=19, x=18),
None,
),
)
# Error cases.
with self.assertRaisesRegex(TypeError, "callable"):
t.map_structure("bad", [1, 2])
with self.assertRaisesRegex(ValueError, "at least one structure"):
t.map_structure(f1)
@pytest.mark.skipif(backend.backend() != "tensorflow", reason="tf only")
def test_map_structure_with_one_structure_tf_wrappers(self, t):
from tensorflow.python.trackable.data_structures import ListWrapper
from tensorflow.python.trackable.data_structures import _DictWrapper
def f1(x):
return x + 10
self.assertEqualStrict(
t.map_structure(f1, ListWrapper([])), ListWrapper([])
)
self.assertEqualStrict(
t.map_structure(f1, ListWrapper([1])), ListWrapper([11])
)
self.assertEqualStrict(
t.map_structure(f1, ListWrapper([1, 2])), ListWrapper([11, 12])
)
self.assertEqualStrict(
t.map_structure(f1, _DictWrapper()),
_DictWrapper(),
)
self.assertEqualStrict(
t.map_structure(f1, _DictWrapper({"a": 1})),
_DictWrapper({"a": 11}),
)
self.assertEqualStrict(
t.map_structure(f1, _DictWrapper({"b": 2, "a": 1})),
_DictWrapper({"a": 11, "b": 12}),
)
def test_map_structure_with_multiple_structures(self, t):
def f2(x, y):
return x + y if isinstance(x, int) and isinstance(y, int) else None
# Non-nested.
self.assertEqualStrict(t.map_structure(f2, 1, 10), 11)
# Standard structures.
self.assertEqualStrict(t.map_structure(f2, ()), ())
self.assertEqualStrict(t.map_structure(f2, (1,), (10,)), (11,))
self.assertEqualStrict(t.map_structure(f2, (1, 2), (10, 20)), (11, 22))
self.assertEqualStrict(t.map_structure(f2, []), [])
self.assertEqualStrict(t.map_structure(f2, [1], [10]), [11])
self.assertEqualStrict(t.map_structure(f2, [1, 2], [10, 20]), [11, 22])
self.assertEqualStrict(t.map_structure(f2, deque([])), deque([]))
self.assertEqualStrict(
t.map_structure(f2, deque([1]), deque([10])), deque([11])
)
self.assertEqualStrict(
t.map_structure(f2, deque([1, 2]), deque([10, 20])), deque([11, 22])
)
self.assertEqualStrict(t.map_structure(f2, Empty()), Empty())
self.assertEqualStrict(
t.map_structure(f2, Point(y=2, x=1), Point(x=10, y=20)),
Point(x=11, y=22),
)
self.assertEqualStrict(t.map_structure(f2, {}), {})
self.assertEqualStrict(
t.map_structure(f2, {"a": 1}, {"a": 10}), {"a": 11}
)
self.assertEqualStrict(
t.map_structure(f2, {"b": 2, "a": 1}, {"a": 10, "b": 20}),
{"a": 11, "b": 22},
)
self.assertEqualStrict(
t.map_structure(f2, OrderedDict()),
OrderedDict(),
)
self.assertEqualStrict(
t.map_structure(
f2, OrderedDict([("a", 1)]), OrderedDict([("a", 10)])
),
OrderedDict([("a", 11)]),
)
self.assertEqualStrict(
t.map_structure(
f2,
OrderedDict([("b", 2), ("a", 1)]),
OrderedDict([("b", 20), ("a", 10)]),
),
OrderedDict([("b", 22), ("a", 11)]),
)
self.assertEqualStrict(
t.map_structure(
f2, defaultdict(default_value), defaultdict(default_value)
),
defaultdict(default_value),
)
self.assertEqualStrict(
t.map_structure(
f2,
defaultdict(default_value, [("a", 1)]),
defaultdict(default_value, [("a", 10)]),
),
defaultdict(default_value, [("a", 11)]),
)
self.assertEqualStrict(
t.map_structure(
f2,
defaultdict(default_value, [("b", 2), ("a", 1)]),
defaultdict(default_value, [("a", 10), ("b", 20)]),
),
defaultdict(default_value, [("a", 11), ("b", 22)]),
)
# Keras tracking wrappers.
self.assertEqualStrict(
t.map_structure(
f2,
TrackedList([]),
TrackedList([]),
),
TrackedList([]),
)
self.assertEqualStrict(
t.map_structure(
f2,
TrackedList([1]),
TrackedList([10]),
),
TrackedList([11]),
)
self.assertEqualStrict(
t.map_structure(
f2,
TrackedList([1, 2]),
TrackedList([10, 20]),
),
TrackedList([11, 22]),
)
# Known limitation of the dm-tree implementation:
# Registered classes are not handled when mapping multiple
# structures at once. TrackedSet is the only problematic one.
if not self.is_dmtree(t):
self.assertEqualStrict(
t.map_structure(
f2,
TrackedSet([]),
TrackedSet([]),
),
TrackedSet([]),
)
self.assertEqualStrict(
t.map_structure(
f2,
TrackedSet([1]),
TrackedSet([10]),
),
TrackedSet([11]),
)
self.assertEqualStrict(
t.map_structure(
f2,
TrackedSet([1, 2]),
TrackedSet([10, 20]),
),
TrackedSet([11, 22]),
)
self.assertEqualStrict(
t.map_structure(
f2,
TrackedDict({}),
TrackedDict({}),
),
TrackedDict({}),
)
self.assertEqualStrict(
t.map_structure(
f2,
TrackedDict({"a": 1}),
TrackedDict({"a": 10}),
),
TrackedDict({"a": 11}),
)
self.assertEqualStrict(
t.map_structure(
f2,
TrackedDict({"b": 2, "a": 1}),
TrackedDict({"a": 10, "b": 20}),
),
TrackedDict({"a": 11, "b": 22}),
)
# Deeper nested structures.
self.assertEqualStrict(
t.map_structure(
f2,
(
{"b": [2, 3], "a": (1,)},
TrackedDict({"x": 4, "y": TrackedList([5, 6])}),
TrackedSet([7]),
Point(y=9, x=8),
np.array([10]),
),
(
{"b": [20, 30], "a": (10,)},
TrackedDict({"x": 40, "y": TrackedList([50, 60])}),
TrackedSet([70]),
Point(y=90, x=80),
np.array([100]),
),
),
(
{"b": [22, 33], "a": (11,)},
TrackedDict({"x": 44, "y": TrackedList([55, 66])}),
# Known limitation of the dm-tree implementation:
# Registered classes are not handled when mapping multiple
# structures at once. TrackedSet is the only problematic one.
None if self.is_dmtree(t) else TrackedSet([77]),
Point(y=99, x=88),
None,
),
)
# Error cases.
# list, tuple, deque and namedtuple are not considered equivalent.
# Test all 6 combinations:
# tuple, list.
with self.assertRaisesRegex(ValueError, "tuple"):
t.map_structure(f2, (), [])
# tuple, deque.
with self.assertRaisesRegex(ValueError, "tuple"):
t.map_structure(f2, (), deque())
# tuple, namedtuple.
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/tree/__init__.py | keras/src/tree/__init__.py | from keras.src.tree.tree_api import assert_same_paths
from keras.src.tree.tree_api import assert_same_structure
from keras.src.tree.tree_api import flatten
from keras.src.tree.tree_api import flatten_with_path
from keras.src.tree.tree_api import is_nested
from keras.src.tree.tree_api import lists_to_tuples
from keras.src.tree.tree_api import map_shape_structure
from keras.src.tree.tree_api import map_structure
from keras.src.tree.tree_api import map_structure_up_to
from keras.src.tree.tree_api import pack_sequence_as
from keras.src.tree.tree_api import register_tree_node_class
from keras.src.tree.tree_api import traverse
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/einops.py | keras/src/ops/einops.py | import re
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.backend import any_symbolic_tensors
from keras.src.ops.core import shape
from keras.src.ops.numpy import prod
from keras.src.ops.numpy import reshape
from keras.src.ops.numpy import transpose
from keras.src.ops.operation import Operation
def _create_axes_map(axes, input_shape, axes_lengths):
axes_map = {}
for axis, dim in zip(axes, input_shape):
# Check for grouped axes pattern, e.g., "(h1 h)"
grouped_axes = re.match(r"\(([\w\s]+)\)", axis)
if grouped_axes:
inner_axes = grouped_axes.group(1).split()
known_axes = [a for a in inner_axes if a in axes_lengths]
inferred_axes = [a for a in inner_axes if a not in axes_lengths]
if inferred_axes:
inferred_axis = inferred_axes[0]
known_product = prod([axes_lengths[a] for a in known_axes])
axes_lengths[inferred_axis] = dim // known_product
axes_map.update({a: axes_lengths[a] for a in inner_axes})
else:
axes_map[axis] = dim
return axes_map
def _create_grouped_axes(axes):
grouped_output_axes = []
for axis in axes:
grouped_axes = re.match(r"\(([\w\s]+)\)", axis)
if grouped_axes:
inner_axes = grouped_axes.group(1).split()
grouped_output_axes.append(inner_axes)
else:
grouped_output_axes.append([axis])
return grouped_output_axes
def _flatten_group(axes):
return [x for xs in axes for x in xs]
def _get_transpose_order(from_shape, to_shape):
flattened_from_shape = _flatten_group(_create_grouped_axes(from_shape))
return [flattened_from_shape.index(dim) for dim in to_shape]
def _compute_output_shape(axes_map, grouped_axes):
output_shape = []
for group in grouped_axes:
size = 1
for axis in group:
size *= axes_map[axis]
output_shape.append(size)
return tuple(output_shape)
def _compute_decomposed_shape(input_axes, axes_lengths, axes_map):
reshaped_input_axes = []
reshaped_sizes = []
for axis in input_axes:
if "(" in axis: # Decomposed axis
inner_axes = re.findall(r"\w+", axis)
sizes = [axes_lengths[a] for a in inner_axes]
reshaped_input_axes.extend(inner_axes)
reshaped_sizes.extend(sizes)
else:
reshaped_input_axes.append(axis)
reshaped_sizes.append(axes_map[axis])
return reshaped_sizes
class Rearrange(Operation):
def call(self, tensor, pattern, **axes_lengths):
return rearrange(tensor, pattern, **axes_lengths)
def compute_output_spec(self, tensor, pattern, **axes_lengths):
input_pattern, output_pattern = re.split(r"\s*->\s*", pattern)
input_axes = re.findall(r"\w+|\(.*?\)", input_pattern)
output_axes = re.findall(r"\w+|\(.*?\)", output_pattern)
input_shape = shape(tensor)
axes_map = _create_axes_map(input_axes, input_shape, axes_lengths)
grouped_output_axes = _create_grouped_axes(output_axes)
output_shape = _compute_output_shape(axes_map, grouped_output_axes)
return KerasTensor(shape=output_shape, dtype=tensor.dtype)
@keras_export("keras.ops.rearrange")
def rearrange(tensor, pattern, **axes_lengths):
"""Rearranges the axes of a Keras tensor according to a specified pattern,
einops-style.
Args:
tensor: Input Keras tensor.
pattern: String describing the rearrangement in einops notation.
**axes_lengths: Keyword arguments specifying lengths of axes
when axes decomposition is used.
Returns:
Tensor: A Keras tensor with rearranged axes.
Follows the logic of:
1. If decomposition is needed, reshape to match decomposed dimensions.
2. Permute known and inferred axes to match the form of the output.
3. Reshape to match the desired output shape.
Example Usage:
```
>>> import numpy as np
>>> from keras.ops import rearrange
>>> images = np.random.rand(32, 30, 40, 3) # BHWC format
# Reordering to BCHW
>>> rearrange(images, 'b h w c -> b c h w').shape
TensorShape([32, 3, 30, 40])
# "Merge" along first axis - concat images from a batch
>>> rearrange(images, 'b h w c -> (b h) w c').shape
TensorShape([960, 40, 3])
# "Merge" along second axis - concat images horizontally
>>> rearrange(images, 'b h w c -> h (b w) c').shape
TensorShape([30, 1280, 3])
# Flatten images into a CHW vector
>>> rearrange(images, 'b h w c -> b (c h w)').shape
TensorShape([32, 3600])
# Decompose H and W axes into 4 smaller patches
>>> rearrange(images, 'b (h1 h) (w1 w) c -> (b h1 w1) h w c', h1=2, w1=2).shape
TensorShape([128, 15, 20, 3])
# Space-to-depth decomposition of input axes
>>> rearrange(images, 'b (h h1) (w w1) c -> b h w (c h1 w1)', h1=2, w1=2).shape
TensorShape([32, 15, 20, 12])
```
""" # noqa: E501
if any_symbolic_tensors((tensor,)):
return Rearrange().symbolic_call(tensor, pattern, **axes_lengths)
# Split the input and output patterns
input_pattern, output_pattern = re.split(r"\s*->\s*", pattern)
input_axes = re.findall(r"\w+|\(.*?\)", input_pattern)
output_axes = re.findall(r"\w+|\(.*?\)", output_pattern)
input_shape = shape(tensor)
# Create axes map, and flattened output group
axes_map = _create_axes_map(input_axes, input_shape, axes_lengths)
grouped_output_axes = _create_grouped_axes(output_axes)
flattened_output_axes = _flatten_group(grouped_output_axes)
# 1. Axes decomposition
decomposed_shapes = _compute_decomposed_shape(
input_axes, axes_lengths, axes_map
)
if decomposed_shapes != tensor.shape:
tensor = reshape(tensor, decomposed_shapes)
# 2. Transpose to match target shape
permute_order = _get_transpose_order(input_axes, flattened_output_axes)
tensor = transpose(tensor, permute_order)
# 3. Reshape to final target shape
output_shape = _compute_output_shape(axes_map, grouped_output_axes)
tensor = reshape(tensor, output_shape)
return tensor
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/image.py | keras/src/ops/image.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.backend import any_symbolic_tensors
from keras.src.ops.operation import Operation
from keras.src.ops.operation_utils import compute_conv_output_shape
class RGBToGrayscale(Operation):
def __init__(self, data_format=None, *, name=None):
super().__init__(name=name)
self.data_format = backend.standardize_data_format(data_format)
def call(self, images):
return backend.image.rgb_to_grayscale(
images, data_format=self.data_format
)
def compute_output_spec(self, images):
images_shape = list(images.shape)
if len(images_shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). "
f"Received: images.shape={images_shape}"
)
if self.data_format == "channels_last":
images_shape[-1] = 1
else:
images_shape[-3] = 1
return KerasTensor(shape=images_shape, dtype=images.dtype)
@keras_export("keras.ops.image.rgb_to_grayscale")
def rgb_to_grayscale(images, data_format=None):
"""Convert RGB images to grayscale.
This function converts RGB images to grayscale images. It supports both
3D and 4D tensors.
Args:
images: Input image or batch of images. Must be 3D or 4D.
data_format: A string specifying the data format of the input tensor.
It can be either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)`, while `"channels_first"`
corresponds to inputs with shape `(batch, channels, height, width)`.
If not specified, the value will default to
`keras.config.image_data_format`.
Returns:
Grayscale image or batch of grayscale images.
Examples:
>>> import numpy as np
>>> from keras import ops
>>> x = np.random.random((2, 4, 4, 3))
>>> y = ops.image.rgb_to_grayscale(x)
>>> y.shape
(2, 4, 4, 1)
>>> x = np.random.random((4, 4, 3)) # Single RGB image
>>> y = ops.image.rgb_to_grayscale(x)
>>> y.shape
(4, 4, 1)
>>> x = np.random.random((2, 3, 4, 4))
>>> y = ops.image.rgb_to_grayscale(x, data_format="channels_first")
>>> y.shape
(2, 1, 4, 4)
"""
if any_symbolic_tensors((images,)):
return RGBToGrayscale(data_format=data_format).symbolic_call(images)
return backend.image.rgb_to_grayscale(images, data_format=data_format)
class RGBToHSV(Operation):
def __init__(self, data_format=None, *, name=None):
super().__init__(name=name)
self.data_format = backend.standardize_data_format(data_format)
def call(self, images):
return backend.image.rgb_to_hsv(images, data_format=self.data_format)
def compute_output_spec(self, images):
images_shape = list(images.shape)
dtype = images.dtype
if len(images_shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). "
f"Received: images.shape={images_shape}"
)
if not backend.is_float_dtype(dtype):
raise ValueError(
"Invalid images dtype: expected float dtype. "
f"Received: images.dtype={dtype}"
)
return KerasTensor(shape=images_shape, dtype=images.dtype)
@keras_export("keras.ops.image.rgb_to_hsv")
def rgb_to_hsv(images, data_format=None):
"""Convert RGB images to HSV.
`images` must be of float dtype, and the output is only well defined if the
values in `images` are in `[0, 1]`.
All HSV values are in `[0, 1]`. A hue of `0` corresponds to pure red, `1/3`
is pure green, and `2/3` is pure blue.
Args:
images: Input image or batch of images. Must be 3D or 4D.
data_format: A string specifying the data format of the input tensor.
It can be either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)`, while `"channels_first"`
corresponds to inputs with shape `(batch, channels, height, width)`.
If not specified, the value will default to
`keras.config.image_data_format`.
Returns:
HSV image or batch of HSV images.
Examples:
>>> import numpy as np
>>> from keras import ops
>>> x = np.random.random((2, 4, 4, 3))
>>> y = ops.image.rgb_to_hsv(x)
>>> y.shape
(2, 4, 4, 3)
>>> x = np.random.random((4, 4, 3)) # Single RGB image
>>> y = ops.image.rgb_to_hsv(x)
>>> y.shape
(4, 4, 3)
>>> x = np.random.random((2, 3, 4, 4))
>>> y = ops.image.rgb_to_hsv(x, data_format="channels_first")
>>> y.shape
(2, 3, 4, 4)
"""
if any_symbolic_tensors((images,)):
return RGBToHSV(data_format=data_format).symbolic_call(images)
return backend.image.rgb_to_hsv(images, data_format=data_format)
class HSVToRGB(Operation):
def __init__(self, data_format=None, *, name=None):
super().__init__(name=name)
self.data_format = backend.standardize_data_format(data_format)
def call(self, images):
return backend.image.hsv_to_rgb(images, data_format=self.data_format)
def compute_output_spec(self, images):
images_shape = list(images.shape)
dtype = images.dtype
if len(images_shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). "
f"Received: images.shape={images_shape}"
)
if not backend.is_float_dtype(dtype):
raise ValueError(
"Invalid images dtype: expected float dtype. "
f"Received: images.dtype={dtype}"
)
return KerasTensor(shape=images_shape, dtype=images.dtype)
@keras_export("keras.ops.image.hsv_to_rgb")
def hsv_to_rgb(images, data_format=None):
"""Convert HSV images to RGB.
`images` must be of float dtype, and the output is only well defined if the
values in `images` are in `[0, 1]`.
Args:
images: Input image or batch of images. Must be 3D or 4D.
data_format: A string specifying the data format of the input tensor.
It can be either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)`, while `"channels_first"`
corresponds to inputs with shape `(batch, channels, height, width)`.
If not specified, the value will default to
`keras.config.image_data_format`.
Returns:
RGB image or batch of RGB images.
Examples:
>>> import numpy as np
>>> from keras import ops
>>> x = np.random.random((2, 4, 4, 3))
>>> y = ops.image.hsv_to_rgb(x)
>>> y.shape
(2, 4, 4, 3)
>>> x = np.random.random((4, 4, 3)) # Single HSV image
>>> y = ops.image.hsv_to_rgb(x)
>>> y.shape
(4, 4, 3)
>>> x = np.random.random((2, 3, 4, 4))
>>> y = ops.image.hsv_to_rgb(x, data_format="channels_first")
>>> y.shape
(2, 3, 4, 4)
"""
if any_symbolic_tensors((images,)):
return HSVToRGB(data_format=data_format).symbolic_call(images)
return backend.image.hsv_to_rgb(images, data_format=data_format)
class Resize(Operation):
def __init__(
self,
size,
interpolation="bilinear",
antialias=False,
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
fill_mode="constant",
fill_value=0.0,
data_format=None,
*,
name=None,
):
super().__init__(name=name)
self.size = tuple(size)
self.interpolation = interpolation
self.antialias = antialias
self.crop_to_aspect_ratio = crop_to_aspect_ratio
self.pad_to_aspect_ratio = pad_to_aspect_ratio
self.fill_mode = fill_mode
self.fill_value = fill_value
self.data_format = backend.standardize_data_format(data_format)
def call(self, images):
return _resize(
images,
self.size,
interpolation=self.interpolation,
antialias=self.antialias,
data_format=self.data_format,
crop_to_aspect_ratio=self.crop_to_aspect_ratio,
pad_to_aspect_ratio=self.pad_to_aspect_ratio,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
def compute_output_spec(self, images):
images_shape = list(images.shape)
if len(images_shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if self.data_format == "channels_last":
height_axis, width_axis = -3, -2
else:
height_axis, width_axis = -2, -1
images_shape[height_axis] = self.size[0]
images_shape[width_axis] = self.size[1]
return KerasTensor(shape=images_shape, dtype=images.dtype)
@keras_export("keras.ops.image.resize")
def resize(
images,
size,
interpolation="bilinear",
antialias=False,
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
fill_mode="constant",
fill_value=0.0,
data_format=None,
):
"""Resize images to size using the specified interpolation method.
Args:
images: Input image or batch of images. Must be 3D or 4D.
size: Size of output image in `(height, width)` format.
interpolation: Interpolation method. Available methods are `"nearest"`,
`"bilinear"`, and `"bicubic"`. Defaults to `"bilinear"`.
antialias: Whether to use an antialiasing filter when downsampling an
image. Defaults to `False`.
crop_to_aspect_ratio: If `True`, resize the images without aspect
ratio distortion. When the original aspect ratio differs
from the target aspect ratio, the output image will be
cropped so as to return the
largest possible window in the image (of size `(height, width)`)
that matches the target aspect ratio. By default
(`crop_to_aspect_ratio=False`), aspect ratio may not be preserved.
pad_to_aspect_ratio: If `True`, pad the images without aspect
ratio distortion. When the original aspect ratio differs
from the target aspect ratio, the output image will be
evenly padded on the short side.
fill_mode: When using `pad_to_aspect_ratio=True`, padded areas
are filled according to the given mode. Only `"constant"` is
supported at this time
(fill with constant value, equal to `fill_value`).
fill_value: Float. Padding value to use when `pad_to_aspect_ratio=True`.
data_format: A string specifying the data format of the input tensor.
It can be either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)`, while `"channels_first"`
corresponds to inputs with shape `(batch, channels, height, width)`.
If not specified, the value will default to
`keras.config.image_data_format`.
Returns:
Resized image or batch of images.
Examples:
>>> x = np.random.random((2, 4, 4, 3)) # batch of 2 RGB images
>>> y = keras.ops.image.resize(x, (2, 2))
>>> y.shape
(2, 2, 2, 3)
>>> x = np.random.random((4, 4, 3)) # single RGB image
>>> y = keras.ops.image.resize(x, (2, 2))
>>> y.shape
(2, 2, 3)
>>> x = np.random.random((2, 3, 4, 4)) # batch of 2 RGB images
>>> y = keras.ops.image.resize(x, (2, 2),
... data_format="channels_first")
>>> y.shape
(2, 3, 2, 2)
"""
if len(size) != 2:
raise ValueError(
"Expected `size` to be a tuple of 2 integers. "
f"Received: size={size}"
)
if len(images.shape) < 3 or len(images.shape) > 4:
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if pad_to_aspect_ratio and crop_to_aspect_ratio:
raise ValueError(
"Only one of `pad_to_aspect_ratio` & `crop_to_aspect_ratio` "
"can be `True`."
)
if any_symbolic_tensors((images,)):
return Resize(
size,
interpolation=interpolation,
antialias=antialias,
data_format=data_format,
crop_to_aspect_ratio=crop_to_aspect_ratio,
pad_to_aspect_ratio=pad_to_aspect_ratio,
fill_mode=fill_mode,
fill_value=fill_value,
).symbolic_call(images)
return _resize(
images,
size,
interpolation=interpolation,
antialias=antialias,
crop_to_aspect_ratio=crop_to_aspect_ratio,
data_format=data_format,
pad_to_aspect_ratio=pad_to_aspect_ratio,
fill_mode=fill_mode,
fill_value=fill_value,
)
def _resize(
images,
size,
interpolation="bilinear",
antialias=False,
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
fill_mode="constant",
fill_value=0.0,
data_format=None,
):
resized = backend.image.resize(
images,
size,
interpolation=interpolation,
antialias=antialias,
crop_to_aspect_ratio=crop_to_aspect_ratio,
data_format=data_format,
pad_to_aspect_ratio=pad_to_aspect_ratio,
fill_mode=fill_mode,
fill_value=fill_value,
)
if resized.dtype == images.dtype:
# Only `torch` backend will cast result to original dtype with
# correct rounding and without dtype overflow
return resized
if backend.is_int_dtype(images.dtype):
resized = ops.round(resized)
return ops.saturate_cast(resized, images.dtype)
class AffineTransform(Operation):
def __init__(
self,
interpolation="bilinear",
fill_mode="constant",
fill_value=0,
data_format=None,
*,
name=None,
):
super().__init__(name=name)
self.interpolation = interpolation
self.fill_mode = fill_mode
self.fill_value = fill_value
self.data_format = backend.standardize_data_format(data_format)
def call(self, images, transform):
return backend.image.affine_transform(
images,
transform,
interpolation=self.interpolation,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
data_format=self.data_format,
)
def compute_output_spec(self, images, transform):
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if len(transform.shape) not in (1, 2):
raise ValueError(
"Invalid transform rank: expected rank 1 (single transform) "
"or rank 2 (batch of transforms). Received input with shape: "
f"transform.shape={transform.shape}"
)
return KerasTensor(images.shape, dtype=images.dtype)
@keras_export("keras.ops.image.affine_transform")
def affine_transform(
images,
transform,
interpolation="bilinear",
fill_mode="constant",
fill_value=0,
data_format=None,
):
"""Applies the given transform(s) to the image(s).
Args:
images: Input image or batch of images. Must be 3D or 4D.
transform: Projective transform matrix/matrices. A vector of length 8 or
tensor of size N x 8. If one row of transform is
`[a0, a1, a2, b0, b1, b2, c0, c1]`, then it maps the output point
`(x, y)` to a transformed input point
`(x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k)`,
where `k = c0 x + c1 y + 1`. The transform is inverted compared to
the transform mapping input points to output points. Note that
gradients are not backpropagated into transformation parameters.
Note that `c0` and `c1` are only effective when using TensorFlow
backend and will be considered as `0` when using other backends.
interpolation: Interpolation method. Available methods are `"nearest"`,
and `"bilinear"`. Defaults to `"bilinear"`.
fill_mode: Points outside the boundaries of the input are filled
according to the given mode. Available methods are `"constant"`,
`"nearest"`, `"wrap"` and `"reflect"`. Defaults to `"constant"`.
- `"reflect"`: `(d c b a | a b c d | d c b a)`
The input is extended by reflecting about the edge of the last
pixel.
- `"constant"`: `(k k k k | a b c d | k k k k)`
The input is extended by filling all values beyond
the edge with the same constant value k specified by
`fill_value`.
- `"wrap"`: `(a b c d | a b c d | a b c d)`
The input is extended by wrapping around to the opposite edge.
- `"nearest"`: `(a a a a | a b c d | d d d d)`
The input is extended by the nearest pixel.
fill_value: Value used for points outside the boundaries of the input if
`fill_mode="constant"`. Defaults to `0`.
data_format: A string specifying the data format of the input tensor.
It can be either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)`, while `"channels_first"`
corresponds to inputs with shape `(batch, channels, height, width)`.
If not specified, the value will default to
`keras.config.image_data_format`.
Returns:
Applied affine transform image or batch of images.
Examples:
>>> x = np.random.random((2, 64, 80, 3)) # batch of 2 RGB images
>>> transform = np.array(
... [
... [1.5, 0, -20, 0, 1.5, -16, 0, 0], # zoom
... [1, 0, -20, 0, 1, -16, 0, 0], # translation
... ]
... )
>>> y = keras.ops.image.affine_transform(x, transform)
>>> y.shape
(2, 64, 80, 3)
>>> x = np.random.random((64, 80, 3)) # single RGB image
>>> transform = np.array([1.0, 0.5, -20, 0.5, 1.0, -16, 0, 0]) # shear
>>> y = keras.ops.image.affine_transform(x, transform)
>>> y.shape
(64, 80, 3)
>>> x = np.random.random((2, 3, 64, 80)) # batch of 2 RGB images
>>> transform = np.array(
... [
... [1.5, 0, -20, 0, 1.5, -16, 0, 0], # zoom
... [1, 0, -20, 0, 1, -16, 0, 0], # translation
... ]
... )
>>> y = keras.ops.image.affine_transform(x, transform,
... data_format="channels_first")
>>> y.shape
(2, 3, 64, 80)
"""
if any_symbolic_tensors((images, transform)):
return AffineTransform(
interpolation=interpolation,
fill_mode=fill_mode,
fill_value=fill_value,
data_format=data_format,
).symbolic_call(images, transform)
return backend.image.affine_transform(
images,
transform,
interpolation=interpolation,
fill_mode=fill_mode,
fill_value=fill_value,
data_format=data_format,
)
class ExtractPatches(Operation):
def __init__(
self,
size,
strides=None,
dilation_rate=1,
padding="valid",
data_format=None,
*,
name=None,
):
super().__init__(name=name)
if isinstance(size, int):
size = (size, size)
self.size = size
if strides is None:
strides = size
self.strides = strides
self.dilation_rate = dilation_rate
self.padding = padding
self.data_format = backend.standardize_data_format(data_format)
def call(self, images):
return _extract_patches(
images=images,
size=self.size,
strides=self.strides,
dilation_rate=self.dilation_rate,
padding=self.padding,
data_format=self.data_format,
)
def compute_output_spec(self, images):
images_shape = list(images.shape)
original_ndim = len(images_shape)
if self.data_format == "channels_last":
channels_in = images_shape[-1]
else:
channels_in = images_shape[-3]
if original_ndim == 3:
images_shape = [1] + images_shape
filters = self.size[0] * self.size[1] * channels_in
kernel_size = (self.size[0], self.size[1])
out_shape = compute_conv_output_shape(
images_shape,
filters,
kernel_size,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
if original_ndim == 3:
out_shape = out_shape[1:]
return KerasTensor(shape=out_shape, dtype=images.dtype)
@keras_export("keras.ops.image.extract_patches")
def extract_patches(
images,
size,
strides=None,
dilation_rate=1,
padding="valid",
data_format=None,
):
"""Extracts patches from the image(s).
Args:
images: Input image or batch of images. Must be 3D or 4D.
size: Patch size int or tuple (patch_height, patch_width)
strides: strides along height and width. If not specified, or
if `None`, it defaults to the same value as `size`.
dilation_rate: This is the input stride, specifying how far two
consecutive patch samples are in the input. For value other than 1,
strides must be 1. NOTE: `strides > 1` is not supported in
conjunction with `dilation_rate > 1`
padding: The type of padding algorithm to use: `"same"` or `"valid"`.
data_format: A string specifying the data format of the input tensor.
It can be either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)`, while `"channels_first"`
corresponds to inputs with shape `(batch, channels, height, width)`.
If not specified, the value will default to
`keras.config.image_data_format`.
Returns:
Extracted patches 3D (if not batched) or 4D (if batched)
Examples:
>>> image = np.random.random(
... (2, 20, 20, 3)
... ).astype("float32") # batch of 2 RGB images
>>> patches = keras.ops.image.extract_patches(image, (5, 5))
>>> patches.shape
(2, 4, 4, 75)
>>> image = np.random.random((20, 20, 3)).astype("float32") # 1 RGB image
>>> patches = keras.ops.image.extract_patches(image, (3, 3), (1, 1))
>>> patches.shape
(18, 18, 27)
"""
if any_symbolic_tensors((images,)):
return ExtractPatches(
size=size,
strides=strides,
dilation_rate=dilation_rate,
padding=padding,
data_format=data_format,
).symbolic_call(images)
return _extract_patches(
images, size, strides, dilation_rate, padding, data_format=data_format
)
def _extract_patches(
images,
size,
strides=None,
dilation_rate=1,
padding="valid",
data_format=None,
):
if isinstance(size, int):
patch_h = patch_w = size
elif len(size) == 2:
patch_h, patch_w = size[0], size[1]
else:
raise TypeError(
"Invalid `size` argument. Expected an "
f"int or a tuple of length 2. Received: size={size}"
)
data_format = backend.standardize_data_format(data_format)
if data_format == "channels_last":
channels_in = images.shape[-1]
elif data_format == "channels_first":
channels_in = images.shape[-3]
if not strides:
strides = size
out_dim = patch_h * patch_w * channels_in
kernel = backend.numpy.eye(out_dim, dtype=images.dtype)
kernel = backend.numpy.reshape(
kernel, (patch_h, patch_w, channels_in, out_dim)
)
_unbatched = False
if len(images.shape) == 3:
_unbatched = True
images = backend.numpy.expand_dims(images, axis=0)
patches = backend.nn.conv(
inputs=images,
kernel=kernel,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
if _unbatched:
patches = backend.numpy.squeeze(patches, axis=0)
return patches
class ExtractPatches3D(Operation):
def __init__(
self,
size,
strides=None,
dilation_rate=1,
padding="valid",
data_format=None,
*,
name=None,
):
super().__init__(name=name)
if isinstance(size, int):
size = (size, size, size)
elif len(size) != 3:
raise TypeError(
"Invalid `size` argument. Expected an "
f"int or a tuple of length 3. Received: size={size}"
)
self.size = size
if strides is not None:
if isinstance(strides, int):
strides = (strides, strides, strides)
elif len(strides) != 3:
raise ValueError(f"Invalid `strides` argument. Got: {strides}")
else:
strides = size
self.strides = strides
self.dilation_rate = dilation_rate
self.padding = padding
self.data_format = backend.standardize_data_format(data_format)
def call(self, volumes):
return _extract_patches_3d(
volumes,
self.size,
self.strides,
self.dilation_rate,
self.padding,
self.data_format,
)
def compute_output_spec(self, volumes):
volumes_shape = list(volumes.shape)
original_ndim = len(volumes_shape)
strides = self.strides
if self.data_format == "channels_last":
channels_in = volumes_shape[-1]
else:
channels_in = volumes_shape[-4]
if original_ndim == 4:
volumes_shape = [1] + volumes_shape
filters = self.size[0] * self.size[1] * self.size[2] * channels_in
kernel_size = (self.size[0], self.size[1], self.size[2])
out_shape = compute_conv_output_shape(
volumes_shape,
filters,
kernel_size,
strides=strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate,
)
if original_ndim == 4:
out_shape = out_shape[1:]
return KerasTensor(shape=out_shape, dtype=volumes.dtype)
def _extract_patches_3d(
volumes,
size,
strides=None,
dilation_rate=1,
padding="valid",
data_format=None,
):
if isinstance(size, int):
patch_d = patch_h = patch_w = size
elif len(size) == 3:
patch_d, patch_h, patch_w = size
else:
raise TypeError(
"Invalid `size` argument. Expected an "
f"int or a tuple of length 3. Received: size={size}"
)
if strides is None:
strides = size
if isinstance(strides, int):
strides = (strides, strides, strides)
if len(strides) != 3:
raise ValueError(f"Invalid `strides` argument. Got: {strides}")
data_format = backend.standardize_data_format(data_format)
if data_format == "channels_last":
channels_in = volumes.shape[-1]
elif data_format == "channels_first":
channels_in = volumes.shape[-4]
out_dim = patch_d * patch_w * patch_h * channels_in
kernel = backend.numpy.eye(out_dim, dtype=volumes.dtype)
kernel = backend.numpy.reshape(
kernel, (patch_d, patch_h, patch_w, channels_in, out_dim)
)
_unbatched = False
if len(volumes.shape) == 4:
_unbatched = True
volumes = backend.numpy.expand_dims(volumes, axis=0)
patches = backend.nn.conv(
inputs=volumes,
kernel=kernel,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
)
if _unbatched:
patches = backend.numpy.squeeze(patches, axis=0)
return patches
@keras_export("keras.ops.image.extract_patches_3d")
def extract_patches_3d(
volumes,
size,
strides=None,
dilation_rate=1,
padding="valid",
data_format=None,
):
"""Extracts patches from the volume(s).
Args:
volumes: Input volume or batch of volumes. Must be 4D or 5D.
size: Patch size int or tuple (patch_depth, patch_height, patch_width)
strides: strides along depth, height, and width. If not specified, or
if `None`, it defaults to the same value as `size`.
dilation_rate: This is the input stride, specifying how far two
consecutive patch samples are in the input. Note that using
`dilation_rate > 1` is not supported in conjunction with
`strides > 1` on the TensorFlow backend.
padding: The type of padding algorithm to use: `"same"` or `"valid"`.
data_format: A string specifying the data format of the input tensor.
It can be either `"channels_last"` or `"channels_first"`.
`"channels_last"` corresponds to inputs with shape
`(batch, depth, height, width, channels)`, while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, depth, height, width)`. If not specified,
the value will default to `keras.config.image_data_format()`.
Returns:
Extracted patches 4D (if not batched) or 5D (if batched)
Examples:
>>> import numpy as np
>>> import keras
>>> # Batched case
>>> volumes = np.random.random(
... (2, 10, 10, 10, 3)
... ).astype("float32") # batch of 2 volumes
>>> patches = keras.ops.image.extract_patches_3d(volumes, (3, 3, 3))
>>> patches.shape
(2, 3, 3, 3, 81)
>>> # Unbatched case
>>> volume = np.random.random((10, 10, 10, 3)).astype("float32") # 1 volume
>>> patches = keras.ops.image.extract_patches_3d(volume, (3, 3, 3))
>>> patches.shape
(3, 3, 3, 81)
"""
if any_symbolic_tensors((volumes,)):
return ExtractPatches3D(
size=size,
strides=strides,
dilation_rate=dilation_rate,
padding=padding,
data_format=data_format,
).symbolic_call(volumes)
return _extract_patches_3d(
volumes, size, strides, dilation_rate, padding, data_format=data_format
)
class MapCoordinates(Operation):
def __init__(self, order, fill_mode="constant", fill_value=0, *, name=None):
super().__init__(name=name)
self.order = order
self.fill_mode = fill_mode
self.fill_value = fill_value
def call(self, inputs, coordinates):
return backend.image.map_coordinates(
inputs,
coordinates,
order=self.order,
fill_mode=self.fill_mode,
fill_value=self.fill_value,
)
def compute_output_spec(self, inputs, coordinates):
if coordinates.shape[0] != len(inputs.shape):
raise ValueError(
"First dim of `coordinates` must be the same as the rank of "
"`inputs`. "
f"Received inputs with shape: {inputs.shape} and coordinate "
f"leading dim of {coordinates.shape[0]}"
)
if len(coordinates.shape) < 2:
raise ValueError(
"Invalid coordinates rank: expected at least rank 2."
f" Received input with shape: {coordinates.shape}"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/operation_test.py | keras/src/ops/operation_test.py | import numpy as np
from conftest import skip_if_backend
from keras.src import backend
from keras.src import testing
from keras.src.backend.common import keras_tensor
from keras.src.ops import numpy as knp
from keras.src.ops import operation
class OpWithMultipleInputs(operation.Operation):
def call(self, x, y, z=None):
# `z` has to be put first due to the order of operations issue with
# torch backend.
return 3 * z + x + 2 * y
def compute_output_spec(self, x, y, z=None):
return keras_tensor.KerasTensor(x.shape, x.dtype)
class OpWithMultipleOutputs(operation.Operation):
def call(self, x):
return (x, x + 1)
def compute_output_spec(self, x):
return (
keras_tensor.KerasTensor(x.shape, x.dtype),
keras_tensor.KerasTensor(x.shape, x.dtype),
)
class OpWithCustomConstructor(operation.Operation):
def __init__(self, alpha, *, beta=1.0, name=None):
super().__init__(name=name)
self.alpha = alpha
self.beta = beta
def call(self, x):
return self.alpha * x + self.beta
def compute_output_spec(self, x):
return keras_tensor.KerasTensor(x.shape, x.dtype)
class OpWithCustomConstructorNoName(operation.Operation):
def __init__(self, alpha, beta=1.0):
super().__init__()
self.alpha = alpha
self.beta = beta
def call(self, x):
return self.alpha * x + self.beta
def compute_output_spec(self, x):
return keras_tensor.KerasTensor(x.shape, x.dtype)
class OpWithKwargsInConstructor(operation.Operation):
def __init__(self, alpha, beta=1.0, **kwargs):
super().__init__(**kwargs)
self.alpha = alpha
self.beta = beta
def call(self, x):
return self.alpha * x + self.beta
def compute_output_spec(self, x):
return keras_tensor.KerasTensor(x.shape, x.dtype)
class OpWithArgsInConstructor(operation.Operation):
def __init__(self, alpha, *args, name=None):
super().__init__(name=name)
self.alpha = alpha
def call(self, x):
return self.alpha * x + self.beta
def compute_output_spec(self, x):
return keras_tensor.KerasTensor(x.shape, x.dtype)
class OpWithCustomConstructorGetConfig(operation.Operation):
def __init__(self, alpha, *, name=None):
super().__init__(name=name)
self.alpha = alpha
def call(self, x):
return self.alpha * x
def compute_output_spec(self, x):
return keras_tensor.KerasTensor(x.shape, x.dtype)
def get_config(self):
return {**super().get_config(), "alpha": self.alpha}
class OpWithKwargsInConstructorGetConfig(operation.Operation):
def __init__(self, alpha, **kwargs):
super().__init__(**kwargs)
self.alpha = alpha
def call(self, x):
return self.alpha * x
def compute_output_spec(self, x):
return keras_tensor.KerasTensor(x.shape, x.dtype)
def get_config(self):
return {**super().get_config(), "alpha": self.alpha}
class OperationTest(testing.TestCase):
def test_symbolic_call(self):
x = keras_tensor.KerasTensor(shape=(2, 3), name="x")
y = keras_tensor.KerasTensor(shape=(2, 3), name="y")
z = keras_tensor.KerasTensor(shape=(2, 3), name="z")
# Positional arguments
op = OpWithMultipleInputs(name="test_op")
self.assertEqual(op.name, "test_op")
out = op(x, y, z)
self.assertIsInstance(out, keras_tensor.KerasTensor)
self.assertEqual(out.shape, (2, 3))
self.assertEqual(len(op._inbound_nodes), 1)
self.assertEqual(op.input, [x, y, z])
self.assertEqual(op.output, out)
# Keyword arguments
op = OpWithMultipleInputs(name="test_op")
out = op(x=x, y=y, z=z)
self.assertIsInstance(out, keras_tensor.KerasTensor)
self.assertEqual(out.shape, (2, 3))
self.assertEqual(len(op._inbound_nodes), 1)
self.assertEqual(op.input, [x, y, z])
self.assertEqual(op.output, out)
# Mix
op = OpWithMultipleInputs(name="test_op")
out = op(x, y=y, z=z)
self.assertIsInstance(out, keras_tensor.KerasTensor)
self.assertEqual(out.shape, (2, 3))
self.assertEqual(len(op._inbound_nodes), 1)
self.assertEqual(op.input, [x, y, z])
self.assertEqual(op.output, out)
# Test op reuse
prev_out = out
out = op(x, y=y, z=z)
self.assertIsInstance(out, keras_tensor.KerasTensor)
self.assertEqual(out.shape, (2, 3))
self.assertEqual(len(op._inbound_nodes), 2)
self.assertEqual(op.output, prev_out)
# Test multiple outputs
op = OpWithMultipleOutputs()
out = op(x)
self.assertIsInstance(out, tuple)
self.assertEqual(len(out), 2)
self.assertIsInstance(out[0], keras_tensor.KerasTensor)
self.assertIsInstance(out[1], keras_tensor.KerasTensor)
self.assertEqual(out[0].shape, (2, 3))
self.assertEqual(out[1].shape, (2, 3))
self.assertEqual(len(op._inbound_nodes), 1)
self.assertEqual(op.output, list(out))
def test_eager_call(self):
x = knp.ones((2, 3))
y = knp.ones((2, 3))
z = knp.ones((2, 3))
op = OpWithMultipleInputs(name="test_op")
self.assertEqual(op.name, "test_op")
# Positional arguments
out = op(x, y, z)
self.assertTrue(backend.is_tensor(out))
self.assertAllClose(out, 6 * np.ones((2, 3)))
# Keyword arguments
out = op(x=x, y=y, z=z)
self.assertTrue(backend.is_tensor(out))
self.assertAllClose(out, 6 * np.ones((2, 3)))
# Mixed arguments
out = op(x, y=y, z=z)
self.assertTrue(backend.is_tensor(out))
self.assertAllClose(out, 6 * np.ones((2, 3)))
# Test multiple outputs
op = OpWithMultipleOutputs()
out = op(x)
self.assertEqual(len(out), 2)
self.assertTrue(backend.is_tensor(out[0]))
self.assertTrue(backend.is_tensor(out[1]))
self.assertAllClose(out[0], np.ones((2, 3)))
self.assertAllClose(out[1], np.ones((2, 3)) + 1)
def test_serialization_with_default_init_and_get_config(self):
# Explicit name passed in constructor is serialized and deserialized.
op = OpWithMultipleInputs(name="test_op")
config = op.get_config()
self.assertEqual(config, {"name": "test_op"})
revived = OpWithMultipleInputs.from_config(config)
self.assertEqual(revived.get_config(), config)
self.assertEqual(revived.name, op.name)
# Auto generated name is serialized and deserialized.
op = OpWithMultipleInputs()
config = op.get_config()
self.assertEqual(config, {"name": op.name})
revived = OpWithMultipleInputs.from_config(config)
self.assertEqual(revived.get_config(), config)
self.assertEqual(revived.name, op.name)
def test_serialization_custom_constructor_with_name_auto_config(self):
# Explicit name passed in constructor is serialized and deserialized.
op = OpWithCustomConstructor(alpha=0.2, name="test_op")
config = op.get_config()
self.assertEqual(config, {"alpha": 0.2, "beta": 1.0, "name": "test_op"})
revived = OpWithCustomConstructor.from_config(config)
self.assertEqual(revived.get_config(), config)
self.assertEqual(revived.name, op.name)
# Auto generated name is serialized and deserialized.
op = OpWithCustomConstructor(alpha=0.2, beta=0.0)
config = op.get_config()
self.assertEqual(config, {"alpha": 0.2, "beta": 0.0, "name": op.name})
revived = OpWithCustomConstructor.from_config(config)
self.assertEqual(revived.get_config(), config)
self.assertEqual(revived.name, op.name)
def test_serialization_custom_constructor_with_no_name_auto_config(self):
# Auto generated name is not serialized.
op = OpWithCustomConstructorNoName(alpha=0.2)
config = op.get_config()
self.assertEqual(config, {"alpha": 0.2, "beta": 1.0})
revived = OpWithCustomConstructorNoName.from_config(config)
self.assertEqual(revived.get_config(), config)
def test_serialization_custom_constructor_with_kwargs_auto_config(self):
# Explicit name passed in constructor is serialized and deserialized.
op = OpWithKwargsInConstructor(alpha=0.2, name="test_op")
config = op.get_config()
self.assertEqual(config, {"alpha": 0.2, "beta": 1.0, "name": "test_op"})
revived = OpWithKwargsInConstructor.from_config(config)
self.assertEqual(revived.get_config(), config)
self.assertEqual(revived.name, op.name)
# Auto generated name is serialized and deserialized.
op = OpWithKwargsInConstructor(alpha=0.2, beta=0.0)
config = op.get_config()
self.assertEqual(config, {"alpha": 0.2, "beta": 0.0, "name": op.name})
revived = OpWithKwargsInConstructor.from_config(config)
self.assertEqual(revived.get_config(), config)
self.assertEqual(revived.name, op.name)
def test_failing_serialization_non_serializable_auto_config(
self,
):
class NonSerializable:
pass
# Custom class cannot be automatically serialized.
op = OpWithCustomConstructor(alpha=NonSerializable(), name="test_op")
with self.assertRaises(NotImplementedError):
_ = op.get_config()
def test_failing_serialization_custom_constructor_with_args_auto_config(
self,
):
# Custom constructor with variadic args cannot be automatically
# serialized.
op = OpWithArgsInConstructor(0.2, "a", "b", "c", name="test_op")
with self.assertRaises(NotImplementedError):
_ = op.get_config()
def test_serialization_custom_constructor_custom_get_config(self):
# Explicit name passed in constructor is serialized and deserialized.
op = OpWithCustomConstructorGetConfig(alpha=0.2, name="test_op")
config = op.get_config()
self.assertEqual(config, {"alpha": 0.2, "name": "test_op"})
revived = OpWithCustomConstructorGetConfig.from_config(config)
self.assertEqual(revived.get_config(), config)
self.assertEqual(revived.name, op.name)
# Auto generated name is serialized and deserialized.
op = OpWithCustomConstructorGetConfig(alpha=0.2)
config = op.get_config()
self.assertEqual(config, {"alpha": 0.2, "name": op.name})
revived = OpWithCustomConstructorGetConfig.from_config(config)
self.assertEqual(revived.get_config(), config)
self.assertEqual(revived.name, op.name)
def test_serialization_custom_constructor_with_kwargs_custom_get_config(
self,
):
# Explicit name passed in constructor is serialized and deserialized.
op = OpWithKwargsInConstructorGetConfig(alpha=0.2, name="test_op")
config = op.get_config()
self.assertEqual(config, {"alpha": 0.2, "name": "test_op"})
revived = OpWithKwargsInConstructorGetConfig.from_config(config)
self.assertEqual(revived.get_config(), config)
self.assertEqual(revived.name, op.name)
# Auto generated name is serialized and deserialized.
op = OpWithKwargsInConstructorGetConfig(alpha=0.2)
config = op.get_config()
self.assertEqual(config, {"alpha": 0.2, "name": op.name})
revived = OpWithKwargsInConstructorGetConfig.from_config(config)
self.assertEqual(revived.get_config(), config)
self.assertEqual(revived.name, op.name)
@skip_if_backend(
"openvino", "Can not constant fold eltwise node by CPU plugin"
)
def test_input_conversion(self):
x = np.ones((2,))
y = np.ones((2,))
z = knp.ones((2,)) # mix
if backend.backend() == "torch":
z = z.cpu()
op = OpWithMultipleInputs()
out = op(x, y, z)
self.assertTrue(backend.is_tensor(out))
self.assertAllClose(out, 6 * np.ones((2,)))
def test_valid_naming(self):
OpWithMultipleOutputs(name="test_op")
with self.assertRaisesRegex(
ValueError, "must be a string and cannot contain character `/`."
):
OpWithMultipleOutputs(name="test/op")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/einops_test.py | keras/src/ops/einops_test.py | from conftest import skip_if_backend
from keras.src import ops
from keras.src import testing
from keras.src.backend.common import keras_tensor
from keras.src.ops.einops import rearrange
class RearrangeTest(testing.TestCase):
def test_basic_rearrangement_symbolic(self):
x = keras_tensor.KerasTensor((2, 3, 4))
y = rearrange(x, "b c h -> b h c")
self.assertIsInstance(y, keras_tensor.KerasTensor)
self.assertEqual(y.shape, (2, 4, 3))
@skip_if_backend("openvino", "Test operation not supported by openvino")
def test_basic_rearrangement(self):
x = ops.random.uniform((2, 3, 4))
y = rearrange(x, "b c h -> b h c")
self.assertEqual(y.shape, (2, 4, 3))
self.assertTrue(ops.all(ops.equal(y, ops.transpose(x, (0, 2, 1)))))
@skip_if_backend("openvino", "Test operation not supported by openvino")
def test_output_composition(self):
x = ops.random.uniform((2, 4, 4, 3))
y = rearrange(x, "b h w c -> (b h) w c")
target_shape = (8, 4, 3)
self.assertEqual(y.shape, target_shape)
self.assertTrue(ops.all(ops.equal(y, ops.reshape(x, (8, 4, 3)))))
def test_basic_decomposition_and_rearrangement_symbolic(self):
x = keras_tensor.KerasTensor((6, 8))
y = rearrange(x, "(h w) c -> h w c", h=2, w=3)
self.assertIsInstance(y, keras_tensor.KerasTensor)
self.assertEqual(y.shape, (2, 3, 8))
def test_basic_decomposition_and_rearrangement(self):
x = ops.random.uniform((6, 8))
y = rearrange(x, "(h w) c -> h w c", h=2, w=3)
self.assertEqual(y.shape, (2, 3, 8))
@skip_if_backend("openvino", "Test operation not supported by openvino")
def test_unchanged_shape(self):
x = ops.ones([2, 3, 4])
y = rearrange(x, "b h c -> b h c")
self.assertTrue(ops.all(ops.equal(y, x)))
self.assertTrue(x.shape, y.shape)
def test_unchanged_shape_symbolic(self):
x = keras_tensor.KerasTensor((2, 3, 4))
y = rearrange(x, "b h c -> b h c")
self.assertTrue(x.shape, y.shape)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/numpy_test.py | keras/src/ops/numpy_test.py | import functools
import itertools
import math
import warnings
import numpy as np
import pytest
from absl.testing import parameterized
import keras
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.backend.common import dtypes
from keras.src.backend.common import is_int_dtype
from keras.src.backend.common import standardize_dtype
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.ops import numpy as knp
from keras.src.testing.test_utils import named_product
class NumPyTestRot90(testing.TestCase):
def test_basic_rotation(self):
array = np.array([[1, 2, 3], [4, 5, 6]])
rotated = knp.rot90(array)
expected = np.rot90(array)
self.assertAllClose(rotated, expected)
@parameterized.named_parameters(
("k_0", 0, [[1, 2], [3, 4]]),
("k_1", 1, [[2, 4], [1, 3]]),
("k_2", 2, [[4, 3], [2, 1]]),
("k_neg1", -1, [[3, 1], [4, 2]]),
("k_5", 5, [[2, 4], [1, 3]]), # k=5 ≡ k=1 (mod 4)
("k_6", 6, [[4, 3], [2, 1]]), # k=6 ≡ k=2 (mod 4)
)
def test_k_parameter_variations(self, k, expected):
array = np.array([[1, 2], [3, 4]])
rotated = knp.rot90(array, k=k)
expected = np.array(expected)
self.assertAllClose(rotated, expected)
@parameterized.named_parameters(
("axes_0_1", (0, 1)), ("axes_1_2", (1, 2)), ("axes_0_2", (0, 2))
)
def test_3d_operations(self, axes):
array_3d = np.arange(12).reshape(3, 2, 2)
rotated = knp.rot90(array_3d, axes=axes)
expected = np.rot90(array_3d, axes=axes)
self.assertAllClose(rotated, expected)
@parameterized.named_parameters(
("single_image", np.random.random((4, 4, 3))),
("batch_images", np.random.random((2, 4, 4, 3))),
)
def test_image_processing(self, array):
np.random.seed(0)
rotated = knp.rot90(array, axes=(0, 1))
expected = np.rot90(array, axes=(0, 1))
self.assertAllClose(rotated, expected)
@parameterized.named_parameters(
("single_row", [[1, 2, 3]]),
("single_column", [[1], [2], [3]]),
("negative_values", [[-1, 0], [1, -2]]),
)
def test_edge_conditions(self, array):
numpy_array = np.array(array)
rotated = knp.rot90(numpy_array)
expected = np.rot90(numpy_array)
self.assertAllClose(rotated, expected)
@parameterized.named_parameters(
("1D_array", np.array([1, 2, 3]), None),
("duplicate_axes", np.array([[1, 2], [3, 4]]), (0, 0)),
)
def test_error_conditions(self, array, axes):
if axes is None:
with self.assertRaises(ValueError):
knp.rot90(array)
else:
with self.assertRaises(ValueError):
knp.rot90(array, axes=axes)
class NumpyTwoInputOpsDynamicShapeTest(testing.TestCase):
def test_add(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.add(x, y).shape, (2, 3))
def test_heaviside(self):
x = KerasTensor((None, 3))
y = KerasTensor((None, 3))
self.assertEqual(knp.heaviside(x, y).shape, (None, 3))
def test_hypot(self):
x = KerasTensor((None, 3))
y = KerasTensor((None, 3))
self.assertEqual(knp.hypot(x, y).shape, (None, 3))
def test_subtract(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.subtract(x, y).shape, (2, 3))
def test_multiply(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.multiply(x, y).shape, (2, 3))
def test_matmul(self):
x = KerasTensor((None, 3, 4))
y = KerasTensor((3, None, 4, 5))
self.assertEqual(knp.matmul(x, y).shape, (3, None, 3, 5))
def test_power(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.power(x, y).shape, (2, 3))
def test_divide(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.divide(x, y).shape, (2, 3))
def test_divide_no_nan(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.divide_no_nan(x, y).shape, (2, 3))
def test_true_divide(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.true_divide(x, y).shape, (2, 3))
def test_append(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.append(x, y).shape, (None,))
def test_arctan2(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.arctan2(x, y).shape, (2, 3))
def test_bitwise_and(self):
x = KerasTensor((None, 3))
y = KerasTensor((None, 3))
self.assertEqual(knp.bitwise_and(x, y).shape, (None, 3))
def test_bitwise_or(self):
x = KerasTensor((None, 3))
y = KerasTensor((None, 3))
self.assertEqual(knp.bitwise_or(x, y).shape, (None, 3))
def test_bitwise_xor(self):
x = KerasTensor((None, 3))
y = KerasTensor((None, 3))
self.assertEqual(knp.bitwise_xor(x, y).shape, (None, 3))
def test_bitwise_left_shift(self):
x = KerasTensor((None, 3))
y = KerasTensor((None, 3))
self.assertEqual(knp.bitwise_left_shift(x, y).shape, (None, 3))
# left_shift is same as bitwise_left_shift
def test_bitwise_right_shift(self):
x = KerasTensor((None, 3))
y = KerasTensor((None, 3))
self.assertEqual(knp.bitwise_right_shift(x, y).shape, (None, 3))
# right_shift is same as bitwise_right_shift
def test_cross(self):
x1 = KerasTensor((2, 3, 3))
x2 = KerasTensor((1, 3, 2))
y = KerasTensor((None, 1, 2))
self.assertEqual(knp.cross(x1, y).shape, (2, 3, 3))
self.assertEqual(knp.cross(x2, y).shape, (None, 3))
def test_einsum(self):
x = KerasTensor((None, 3))
y = KerasTensor((3, 4))
self.assertEqual(knp.einsum("ij,jk->ik", x, y).shape, (None, 4))
self.assertEqual(knp.einsum("ij,jk->ikj", x, y).shape, (None, 4, 3))
self.assertEqual(knp.einsum("ii", x).shape, ())
self.assertEqual(knp.einsum(",ij", 5, x).shape, (None, 3))
x = KerasTensor((None, 3, 4))
y = KerasTensor((None, 4, 5))
z = KerasTensor((1, 1, 1, 9))
self.assertEqual(knp.einsum("ijk,jkl->li", x, y).shape, (5, None))
self.assertEqual(knp.einsum("ijk,jkl->lij", x, y).shape, (5, None, 3))
self.assertEqual(
knp.einsum("...,...j->...j", x, y).shape, (None, 3, 4, 5)
)
self.assertEqual(
knp.einsum("i...,...j->i...j", x, y).shape, (None, 3, 4, 5)
)
self.assertEqual(knp.einsum("i...,...j", x, y).shape, (3, 4, None, 5))
self.assertEqual(
knp.einsum("i...,...j,...k", x, y, z).shape, (1, 3, 4, None, 5, 9)
)
self.assertEqual(
knp.einsum("mij,ijk,...", x, y, z).shape, (1, 1, 1, 9, 5, None)
)
with self.assertRaises(ValueError):
x = KerasTensor((None, 3))
y = KerasTensor((3, 4))
knp.einsum("ijk,jk->ik", x, y)
def test_full_like(self):
x = KerasTensor((None, 3))
self.assertEqual(knp.full_like(x, KerasTensor((1, 3))).shape, (None, 3))
x = KerasTensor((None, 3, 3))
self.assertEqual(knp.full_like(x, 2).shape, (None, 3, 3))
def test_gcd(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.gcd(x, y).shape, (2, 3))
def test_greater(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.greater(x, y).shape, (2, 3))
def test_greater_equal(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.greater_equal(x, y).shape, (2, 3))
def test_isclose(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.isclose(x, y).shape, (2, 3))
def test_isin(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.isin(x, y).shape, (None, 3))
def test_kron(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.kron(x, y).shape, (None, None))
def test_lcm(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.lcm(x, y).shape, (2, 3))
def test_ldexp(self):
x = KerasTensor((None, 3))
y = KerasTensor((1, 3))
self.assertEqual(knp.ldexp(x, y).shape, (None, 3))
def test_less(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.less(x, y).shape, (2, 3))
def test_less_equal(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.less_equal(x, y).shape, (2, 3))
def test_linspace(self):
start = KerasTensor((None, 3, 4))
stop = KerasTensor((2, 3, 4))
self.assertEqual(
knp.linspace(start, stop, 10, axis=1).shape, (2, 10, 3, 4)
)
start = KerasTensor((None, 3))
stop = 2
self.assertEqual(
knp.linspace(start, stop, 10, axis=1).shape, (None, 10, 3)
)
def test_logical_and(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.logical_and(x, y).shape, (2, 3))
def test_logical_or(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.logical_or(x, y).shape, (2, 3))
def test_logspace(self):
start = KerasTensor((None, 3, 4))
stop = KerasTensor((2, 3, 4))
self.assertEqual(
knp.logspace(start, stop, 10, axis=1).shape, (2, 10, 3, 4)
)
start = KerasTensor((None, 3))
stop = 2
self.assertEqual(
knp.logspace(start, stop, 10, axis=1).shape, (None, 10, 3)
)
def test_maximum(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.maximum(x, y).shape, (2, 3))
def test_minimum(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.minimum(x, y).shape, (2, 3))
def test_mod(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.mod(x, y).shape, (2, 3))
def test_nextafter(self):
x = KerasTensor((None, 3))
y = KerasTensor((1, 3))
self.assertEqual(knp.nextafter(x, y).shape, (None, 3))
def test_not_equal(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.not_equal(x, y).shape, (2, 3))
def test_outer(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.outer(x, y).shape, (None, None))
def test_quantile(self):
x = KerasTensor((None, 3))
# q as scalar
q = KerasTensor(())
self.assertEqual(knp.quantile(x, q).shape, ())
# q as 1D tensor
q = KerasTensor((2,))
self.assertEqual(knp.quantile(x, q).shape, (2,))
self.assertEqual(knp.quantile(x, q, axis=1).shape, (2, None))
self.assertEqual(
knp.quantile(x, q, axis=1, keepdims=True).shape,
(2, None, 1),
)
def test_searchsorted(self):
a = KerasTensor((None,))
v = KerasTensor((2, 3))
output = knp.searchsorted(a, v)
self.assertEqual(output.shape, v.shape)
self.assertEqual(output.dtype, "int64")
def test_take(self):
x = KerasTensor((None, 3))
self.assertEqual(knp.take(x, 1).shape, ())
self.assertEqual(knp.take(x, [1, 2]).shape, (2,))
self.assertEqual(
knp.take(x, [[1, 2], [1, 2]], axis=1).shape, (None, 2, 2)
)
x = KerasTensor((None, 3, 3))
self.assertEqual(knp.take(x, 1, axis=1).shape, (None, 3))
self.assertEqual(knp.take(x, [1, 2]).shape, (2,))
self.assertEqual(
knp.take(x, [[1, 2], [1, 2]], axis=1).shape, (None, 2, 2, 3)
)
# test with negative axis
self.assertEqual(knp.take(x, 1, axis=-2).shape, (None, 3))
# test with multi-dimensional indices
x = KerasTensor((None, 3, None, 5))
indices = KerasTensor((6, 7))
self.assertEqual(knp.take(x, indices, axis=2).shape, (None, 3, 6, 7, 5))
def test_take_along_axis(self):
x = KerasTensor((None, 3))
indices = KerasTensor((1, 3))
self.assertEqual(knp.take_along_axis(x, indices, axis=0).shape, (1, 3))
self.assertEqual(
knp.take_along_axis(x, indices, axis=1).shape, (None, 3)
)
x = KerasTensor((None, 3, 3))
indices = KerasTensor((1, 3, None))
self.assertEqual(
knp.take_along_axis(x, indices, axis=1).shape, (None, 3, 3)
)
def test_tensordot(self):
x = KerasTensor((None, 3, 4))
y = KerasTensor((3, 4))
self.assertEqual(knp.tensordot(x, y, axes=1).shape, (None, 3, 4))
self.assertEqual(knp.tensordot(x, y, axes=[[0, 1], [1, 0]]).shape, (4,))
def test_vdot(self):
x = KerasTensor((None, 3))
y = KerasTensor((None, 3))
self.assertEqual(knp.vdot(x, y).shape, ())
x = KerasTensor((None, 3, 3))
y = KerasTensor((None, 3, 3))
self.assertEqual(knp.vdot(x, y).shape, ())
def test_inner(self):
x = KerasTensor((None,))
y = KerasTensor((3,))
self.assertEqual(knp.inner(x, y).shape, ())
def test_where(self):
condition = KerasTensor((2, None, 1))
x = KerasTensor((None, 1))
y = KerasTensor((None, 3))
self.assertEqual(knp.where(condition, x, y).shape, (2, None, 3))
self.assertEqual(knp.where(condition).shape, (2, None, 1))
def test_floor_divide(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.floor_divide(x, y).shape, (2, 3))
def test_xor(self):
x = KerasTensor((None, 3))
y = KerasTensor((2, None))
self.assertEqual(knp.logical_xor(x, y).shape, (2, 3))
def test_shape_equal_basic_equality(self):
x = KerasTensor((3, 4)).shape
y = KerasTensor((3, 4)).shape
self.assertTrue(knp.shape_equal(x, y))
y = KerasTensor((3, 5)).shape
self.assertFalse(knp.shape_equal(x, y))
def test_shape_equal_allow_none(self):
x = KerasTensor((3, 4, None)).shape
y = KerasTensor((3, 4, 5)).shape
self.assertTrue(knp.shape_equal(x, y, allow_none=True))
self.assertFalse(knp.shape_equal(x, y, allow_none=False))
def test_shape_equal_different_shape_lengths(self):
x = KerasTensor((3, 4)).shape
y = KerasTensor((3, 4, 5)).shape
self.assertFalse(knp.shape_equal(x, y))
def test_shape_equal_ignore_axes(self):
x = KerasTensor((3, 4, 5)).shape
y = KerasTensor((3, 6, 5)).shape
self.assertTrue(knp.shape_equal(x, y, axis=1))
y = KerasTensor((3, 6, 7)).shape
self.assertTrue(knp.shape_equal(x, y, axis=(1, 2)))
self.assertFalse(knp.shape_equal(x, y, axis=1))
def test_shape_equal_only_none(self):
x = KerasTensor((None, None)).shape
y = KerasTensor((5, 6)).shape
self.assertTrue(knp.shape_equal(x, y, allow_none=True))
def test_shape_equal_axis_as_list(self):
x = KerasTensor((3, 4, 5)).shape
y = KerasTensor((3, 6, 5)).shape
self.assertTrue(knp.shape_equal(x, y, axis=[1]))
def test_shape_non_equal_with_negative_axis(self):
x = KerasTensor((3, 4, 5)).shape
y = KerasTensor((3, 4, 6)).shape
self.assertFalse(knp.shape_equal(x, y, axis=-2))
def test_shape_equal_with_negative_axis(self):
x = KerasTensor((3, 4, 5)).shape
y = KerasTensor((3, 4, 5)).shape
self.assertTrue(knp.shape_equal(x, y, axis=-1))
def test_shape_equal_zeros(self):
x = KerasTensor((0, 4)).shape
y = KerasTensor((0, 4)).shape
self.assertTrue(knp.shape_equal(x, y))
y = KerasTensor((0, 5)).shape
self.assertFalse(knp.shape_equal(x, y))
def test_broadcast_shapes_conversion_to_list(self):
shape1 = KerasTensor((1, 2)).shape
shape2 = KerasTensor((3, 1)).shape
expected_output = [3, 2]
self.assertEqual(knp.broadcast_shapes(shape1, shape2), expected_output)
def test_broadcast_shapes_shape1_longer_than_shape2(self):
shape1 = KerasTensor((5, 3, 2)).shape
shape2 = KerasTensor((1, 3)).shape
with self.assertRaisesRegex(ValueError, "Cannot broadcast shape"):
knp.broadcast_shapes(shape1, shape2)
def test_broadcast_shapes_shape2_longer_than_shape1(self):
shape1 = KerasTensor((5, 3)).shape
shape2 = KerasTensor((2, 5, 3)).shape
expected_output = [2, 5, 3]
self.assertEqual(knp.broadcast_shapes(shape1, shape2), expected_output)
def test_broadcast_shapes_broadcasting_shape1_is_1(self):
shape1 = KerasTensor((1, 3)).shape
shape2 = KerasTensor((5, 1)).shape
expected_output = [5, 3]
self.assertEqual(knp.broadcast_shapes(shape1, shape2), expected_output)
def test_broadcast_shapes_broadcasting_shape1_is_none(self):
shape1 = KerasTensor((None, 3)).shape
shape2 = KerasTensor((5, 1)).shape
expected_output = [5, 3]
self.assertEqual(knp.broadcast_shapes(shape1, shape2), expected_output)
shape1 = KerasTensor((None, 3)).shape
shape2 = KerasTensor((5, 3)).shape
expected_output = [5, 3]
self.assertEqual(knp.broadcast_shapes(shape1, shape2), expected_output)
def test_broadcast_shapes_broadcasting_shape2_conditions(self):
shape1 = KerasTensor((5, 3, 2)).shape
shape2 = KerasTensor((1, 3, 2)).shape
expected_output = [5, 3, 2]
self.assertEqual(knp.broadcast_shapes(shape1, shape2), expected_output)
shape1 = KerasTensor((5, 3, 2)).shape
shape2 = KerasTensor((1, None, 2)).shape
expected_output = [5, 3, 2]
self.assertEqual(knp.broadcast_shapes(shape1, shape2), expected_output)
class NumpyTwoInputOpsStaticShapeTest(testing.TestCase):
def test_add(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.add(x, y).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.add(x, y)
def test_heaviside(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.heaviside(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
y = KerasTensor((3,))
self.assertEqual(knp.heaviside(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
y = KerasTensor((1, 3))
self.assertEqual(knp.heaviside(x, y).shape, (2, 3))
def test_hypot(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.hypot(x, y).shape, (2, 3))
def test_subtract(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.subtract(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.subtract(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.subtract(x, y)
def test_multiply(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.multiply(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.multiply(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.multiply(x, y)
def test_matmul(self):
x = KerasTensor((2, 3))
y = KerasTensor((3, 2))
self.assertEqual(knp.matmul(x, y).shape, (2, 2))
with self.assertRaises(ValueError):
x = KerasTensor((3, 4))
y = KerasTensor((2, 3, 4))
knp.matmul(x, y)
@pytest.mark.skipif(testing.tensorflow_uses_gpu(), reason="Segfault")
def test_matmul_sparse(self):
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((3, 2))
result = knp.matmul(x, y)
self.assertEqual(result.shape, (2, 2))
x = KerasTensor((2, 3))
y = KerasTensor((3, 2), sparse=True)
result = knp.matmul(x, y)
self.assertEqual(result.shape, (2, 2))
x = KerasTensor((2, 3), sparse=True)
y = KerasTensor((3, 2), sparse=True)
result = knp.matmul(x, y)
self.assertEqual(result.shape, (2, 2))
self.assertTrue(result.sparse)
def test_power(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.power(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.power(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.power(x, y)
def test_divide(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.divide(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.divide(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.divide(x, y)
def test_divide_no_nan(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.divide_no_nan(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.divide_no_nan(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.divide_no_nan(x, y)
def test_true_divide(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.true_divide(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.true_divide(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.true_divide(x, y)
def test_append(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.append(x, y).shape, (12,))
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.append(x, y, axis=0).shape, (4, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.append(x, y, axis=2)
def test_arctan2(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.arctan2(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.arctan2(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.arctan2(x, y)
def test_bitwise_and(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.bitwise_and(x, y).shape, (2, 3))
def test_bitwise_or(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.bitwise_or(x, y).shape, (2, 3))
def test_bitwise_xor(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.bitwise_xor(x, y).shape, (2, 3))
def test_bitwise_left_shift(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.bitwise_left_shift(x, y).shape, (2, 3))
# left_shift is same as bitwise_left_shift
def test_bitwise_right_shift(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.bitwise_right_shift(x, y).shape, (2, 3))
# right_shift is same as bitwise_right_shift
def test_cross(self):
x1 = KerasTensor((2, 3, 3))
x2 = KerasTensor((1, 3, 2))
y1 = KerasTensor((2, 3, 3))
y2 = KerasTensor((2, 3, 2))
self.assertEqual(knp.cross(x1, y1).shape, (2, 3, 3))
self.assertEqual(knp.cross(x2, y2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.cross(x, y)
with self.assertRaises(ValueError):
x = KerasTensor((4, 3, 3))
y = KerasTensor((2, 3, 3))
knp.cross(x, y)
def test_einsum(self):
x = KerasTensor((2, 3))
y = KerasTensor((3, 4))
self.assertEqual(knp.einsum("ij,jk->ik", x, y).shape, (2, 4))
self.assertEqual(knp.einsum("ij,jk->ikj", x, y).shape, (2, 4, 3))
self.assertEqual(knp.einsum("ii", x).shape, ())
self.assertEqual(knp.einsum(",ij", 5, x).shape, (2, 3))
x = KerasTensor((2, 3, 4))
y = KerasTensor((3, 4, 5))
z = KerasTensor((1, 1, 1, 9))
self.assertEqual(knp.einsum("ijk,jkl->li", x, y).shape, (5, 2))
self.assertEqual(knp.einsum("ijk,jkl->lij", x, y).shape, (5, 2, 3))
self.assertEqual(knp.einsum("...,...j->...j", x, y).shape, (2, 3, 4, 5))
self.assertEqual(
knp.einsum("i...,...j->i...j", x, y).shape, (2, 3, 4, 5)
)
self.assertEqual(knp.einsum("i...,...j", x, y).shape, (3, 4, 2, 5))
self.assertEqual(knp.einsum("i...,...j", x, y).shape, (3, 4, 2, 5))
self.assertEqual(
knp.einsum("i...,...j,...k", x, y, z).shape, (1, 3, 4, 2, 5, 9)
)
self.assertEqual(
knp.einsum("mij,ijk,...", x, y, z).shape, (1, 1, 1, 9, 5, 2)
)
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((3, 4))
knp.einsum("ijk,jk->ik", x, y)
def test_full_like(self):
x = KerasTensor((2, 3))
self.assertEqual(knp.full_like(x, 2).shape, (2, 3))
def test_gcd(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.gcd(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.gcd(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.gcd(x, y)
def test_greater(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.greater(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.greater(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.greater(x, y)
def test_greater_equal(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.greater_equal(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.greater_equal(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.greater_equal(x, y)
def test_isclose(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.isclose(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.isclose(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.isclose(x, y)
def test_isin(self):
x = KerasTensor((2, 3))
y = KerasTensor((3, 3))
self.assertEqual(knp.isin(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.isin(x, 2).shape, (2, 3))
def test_kron(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.kron(x, y).shape, (4, 9))
def test_lcm(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.lcm(x, y).shape, (2, 3))
def test_ldexp(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.ldexp(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
y = KerasTensor((1, 3))
self.assertEqual(knp.ldexp(x, y).shape, (2, 3))
def test_less(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.less(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.less(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.less(x, y)
def test_less_equal(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.less_equal(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.less_equal(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.less_equal(x, y)
def test_linspace(self):
start = KerasTensor((2, 3, 4))
stop = KerasTensor((2, 3, 4))
self.assertEqual(knp.linspace(start, stop, 10).shape, (10, 2, 3, 4))
with self.assertRaises(ValueError):
start = KerasTensor((2, 3))
stop = KerasTensor((2, 3, 4))
knp.linspace(start, stop)
def test_logical_and(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.logical_and(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.logical_and(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.logical_and(x, y)
def test_logical_or(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.logical_or(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.logical_or(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.logical_or(x, y)
def test_logspace(self):
start = KerasTensor((2, 3, 4))
stop = KerasTensor((2, 3, 4))
self.assertEqual(knp.logspace(start, stop, 10).shape, (10, 2, 3, 4))
with self.assertRaises(ValueError):
start = KerasTensor((2, 3))
stop = KerasTensor((2, 3, 4))
knp.logspace(start, stop)
def test_maximum(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.maximum(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.maximum(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.maximum(x, y)
def test_minimum(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.minimum(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.minimum(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.minimum(x, y)
def test_mod(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.mod(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
self.assertEqual(knp.mod(x, 2).shape, (2, 3))
with self.assertRaises(ValueError):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3, 4))
knp.mod(x, y)
def test_nextafter(self):
x = KerasTensor((2, 3))
y = KerasTensor((2, 3))
self.assertEqual(knp.nextafter(x, y).shape, (2, 3))
x = KerasTensor((2, 3))
y = KerasTensor((1, 3))
self.assertEqual(knp.nextafter(x, y).shape, (2, 3))
def test_not_equal(self):
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/operation_utils.py | keras/src/ops/operation_utils.py | import math
import numpy as np
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.backend.common.backend_utils import canonicalize_axis
from keras.src.backend.common.backend_utils import to_tuple_or_list
def broadcast_shapes(shape1, shape2):
"""Broadcast input shapes to a unified shape.
Convert to list for mutability.
Args:
shape1: A tuple or list of integers.
shape2: A tuple or list of integers.
Returns:
output_shape (list of integers or `None`): The broadcasted shape.
Example:
>>> broadcast_shapes((5, 3), (1, 3))
[5, 3]
"""
shape1 = list(shape1)
shape2 = list(shape2)
origin_shape1 = shape1
origin_shape2 = shape2
if len(shape1) > len(shape2):
shape2 = [1] * (len(shape1) - len(shape2)) + shape2
if len(shape1) < len(shape2):
shape1 = [1] * (len(shape2) - len(shape1)) + shape1
output_shape = list(shape1)
for i in range(len(shape1)):
if shape1[i] == 1:
output_shape[i] = shape2[i]
elif shape1[i] is None:
output_shape[i] = None if shape2[i] == 1 else shape2[i]
else:
if shape2[i] == 1 or shape2[i] is None or shape2[i] == shape1[i]:
output_shape[i] = shape1[i]
else:
raise ValueError(
"Cannot broadcast shape, the failure dim has value "
f"{shape1[i]}, which cannot be broadcasted to {shape2[i]}. "
f"Input shapes are: {origin_shape1} and {origin_shape2}."
)
return output_shape
def compute_expand_dims_output_shape(input_shape, axis):
"""Compute the output shape for the `expand_dims` operation.
Args:
input_shape: Input shape.
axis: int or sequence of ints for the axis to expand.
Returns:
Tuple of ints: The output shape after the `expand_dims` operation.
"""
input_shape = list(input_shape)
if axis is None:
axis = len(input_shape)
axis = to_tuple_or_list(axis)
out_ndim = len(axis) + len(input_shape)
axis = [canonicalize_axis(a, out_ndim) for a in axis]
shape_iter = iter(input_shape)
new_shape = [
1 if ax in axis else next(shape_iter) for ax in range(out_ndim)
]
return tuple(new_shape)
def compute_pooling_output_shape(
input_shape,
pool_size,
strides,
padding="valid",
data_format="channels_last",
):
"""Computes the output shape of pooling operations.
Args:
input_shape: Input shape. Must be a tuple of integers.
pool_size: Size of the pooling operation. Must be a tuple of integers.
strides: Stride of the pooling operation. Must be a tuple of integers.
Defaults to `pool_size`.
padding: Padding method. Available methods are `"valid"` or `"same"`.
Defaults to `"valid"`.
data_format: String, either `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs. `"channels_last"`
corresponds to inputs with shape `(batch, height, width, channels)`
while `"channels_first"` corresponds to inputs with shape
`(batch, channels, height, weight)`. Defaults to `"channels_last"`.
Returns:
Tuple of ints: The output shape of the pooling operation.
Examples:
# Basic usage with square pooling on a single image
>>> compute_pooling_output_shape((1, 4, 4, 1), (2, 2))
(1, 2, 2, 1)
# Strided pooling on a single image with strides different from pool_size
>>> compute_pooling_output_shape((1, 4, 4, 1), (2, 2), strides=(1, 1))
(1, 3, 3, 1)
# Pooling on a batch of images
>>> compute_pooling_output_shape((32, 4, 4, 3), (2, 2))
(32, 2, 2, 3)
"""
strides = pool_size if strides is None else strides
input_shape_origin = list(input_shape)
input_shape = np.array(input_shape)
if data_format == "channels_last":
spatial_shape = input_shape[1:-1]
else:
spatial_shape = input_shape[2:]
none_dims = []
for i in range(len(spatial_shape)):
if spatial_shape[i] is None:
# Set `None` shape to a manual value so that we can run numpy
# computation on `spatial_shape`.
spatial_shape[i] = -1
none_dims.append(i)
pool_size = np.array(pool_size)
if padding == "valid":
output_spatial_shape = (
np.floor((spatial_shape - pool_size) / strides) + 1
)
for i in range(len(output_spatial_shape)):
if i not in none_dims and output_spatial_shape[i] < 0:
raise ValueError(
"Computed output size would be negative. Received: "
f"`inputs.shape={input_shape}` and `pool_size={pool_size}`."
)
elif padding == "same":
output_spatial_shape = np.floor((spatial_shape - 1) / strides) + 1
else:
raise ValueError(
"Argument `padding` must be either 'valid' or 'same'. Received: "
f"padding={padding}"
)
output_spatial_shape = [int(i) for i in output_spatial_shape]
for i in none_dims:
output_spatial_shape[i] = None
output_spatial_shape = tuple(output_spatial_shape)
if data_format == "channels_last":
output_shape = (
(input_shape_origin[0],)
+ output_spatial_shape
+ (input_shape_origin[-1],)
)
else:
output_shape = (
input_shape_origin[0],
input_shape_origin[1],
) + output_spatial_shape
return output_shape
def compute_conv_output_shape(
input_shape,
filters,
kernel_size,
strides=1,
padding="valid",
data_format="channels_last",
dilation_rate=1,
):
"""Compute the output shape of conv ops."""
if data_format == "channels_last":
spatial_shape = input_shape[1:-1]
kernel_shape = kernel_size + (input_shape[-1], filters)
else:
spatial_shape = input_shape[2:]
kernel_shape = kernel_size + (input_shape[1], filters)
if len(kernel_shape) != len(input_shape):
raise ValueError(
"Kernel shape must have the same length as input, but received "
f"kernel of shape {kernel_shape} and "
f"input of shape {input_shape}."
)
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,) * len(spatial_shape)
if isinstance(strides, int):
strides = (strides,) * len(spatial_shape)
if len(dilation_rate) != len(spatial_shape):
raise ValueError(
"Dilation must be None, scalar or tuple/list of length of "
"inputs' spatial shape, but received "
f"`dilation_rate={dilation_rate}` and "
f"input of shape {input_shape}."
)
none_dims = []
spatial_shape = np.array(spatial_shape)
for i in range(len(spatial_shape)):
if spatial_shape[i] is None:
# Set `None` shape to a manual value so that we can run numpy
# computation on `spatial_shape`.
spatial_shape[i] = -1
none_dims.append(i)
kernel_spatial_shape = np.array(kernel_shape[:-2])
dilation_rate = np.array(dilation_rate)
if padding == "valid":
output_spatial_shape = (
np.floor(
(spatial_shape - dilation_rate * (kernel_spatial_shape - 1) - 1)
/ strides
)
+ 1
)
for i in range(len(output_spatial_shape)):
if i not in none_dims and output_spatial_shape[i] < 0:
raise ValueError(
"Computed output size would be negative. Received "
f"`inputs shape={input_shape}`, "
f"`kernel shape={kernel_shape}`, "
f"`dilation_rate={dilation_rate}`."
)
elif padding == "same" or padding == "causal":
output_spatial_shape = np.floor((spatial_shape - 1) / strides) + 1
else:
raise ValueError(
"`padding` must be either `'valid'` or `'same'`. Received "
f"{padding}."
)
output_spatial_shape = [int(i) for i in output_spatial_shape]
for i in none_dims:
output_spatial_shape[i] = None
output_spatial_shape = tuple(output_spatial_shape)
if data_format == "channels_last":
output_shape = (
(input_shape[0],) + output_spatial_shape + (kernel_shape[-1],)
)
else:
output_shape = (input_shape[0], kernel_shape[-1]) + output_spatial_shape
return output_shape
def compute_matmul_output_shape(shape1, shape2):
"""Compute the output shape of a `matmul` operation.
Args:
shape1: Shape of the left operand.
shape2: Shape of the right operand.
Returns:
Tuple of ints: The output shape for the `matmul` operation.
"""
if len(shape1) == 1:
shape1 = (1, shape1[0])
if len(shape2) == 1:
shape2 = (shape2[0], 1)
if (
shape1[-1] is not None
and shape2[-2] is not None
and shape1[-1] != shape2[-2]
):
raise ValueError(
"Inner dimensions (`x1.shape[-1]` and `x2.shape[-2]`) must be "
f"equal, but received `x1.shape={shape1}` and "
f"`x2.shape={shape2}`."
)
leading_shape = broadcast_shapes(shape1[:-2], shape2[:-2])
last_2_dims_shape = [shape1[-2], shape2[-1]]
output_shape = leading_shape + last_2_dims_shape
if len(shape1) == 1:
del output_shape[-2]
if len(shape2) == 1:
del output_shape[-1]
return tuple(output_shape)
def compute_reshape_output_shape(input_shape, newshape, newshape_arg_name):
"""Converts `-1` in `newshape` to either an actual dimension or `None`.
This utility does not special case the 0th dimension (batch size).
"""
unknown_dim_count = newshape.count(-1)
if unknown_dim_count > 1:
raise ValueError(
"There must be at most one unknown dimension (-1) in "
f"{newshape_arg_name}. Received: {newshape_arg_name}={newshape}."
)
# If there is a None in input_shape, we can't infer what the -1 is
if None in input_shape:
return tuple(dim if dim != -1 else None for dim in newshape)
input_size = math.prod(input_shape)
# If the `newshape` is fully defined, return it
if unknown_dim_count == 0:
if input_size != math.prod(newshape):
raise ValueError(
"The total size of the tensor must be unchanged. Received: "
f"input_shape={input_shape}, {newshape_arg_name}={newshape}"
)
return newshape
# We have one -1 in `newshape`, compute the actual value
known_output_size = 1
unknown_dim_index = None
for index, dim in enumerate(newshape):
if dim == -1:
unknown_dim_index = index
else:
known_output_size *= dim
if known_output_size == 0 or input_size % known_output_size != 0:
raise ValueError(
"The total size of the tensor must be unchanged, however, the "
"input size cannot by divided by the specified dimensions in "
f"{newshape_arg_name}. Received: input_shape={input_shape}, "
f"{newshape_arg_name}={newshape}"
)
output_shape = list(newshape)
output_shape[unknown_dim_index] = input_size // known_output_size
return tuple(output_shape)
def compute_transpose_output_shape(input_shape, axes):
"""Compute the output shape for the `transpose` operation.
Args:
input_shape: Input shape.
axes: Permutation of the dimensions for the `transpose` operation.
Returns:
Tuple of ints: The output shape after the `transpose` operation.
"""
input_shape = list(input_shape)
if axes is None:
return tuple(input_shape[::-1])
if len(axes) != len(input_shape):
raise ValueError(
"axis must be a list of the same length as the input shape, "
f"expected {len(input_shape)}, but received {len(axes)}."
)
return tuple(input_shape[ax] for ax in axes)
def compute_take_along_axis_output_shape(input_shape, indices_shape, axis):
input_shape = list(input_shape)
indices_shape = list(indices_shape)
if axis is None:
input_shape = (
[None] if None in input_shape else [int(np.prod(input_shape))]
)
if len(input_shape) != len(indices_shape):
raise ValueError(
"`x` and `indices` must have the same number of dimensions, "
f"but receive shape {input_shape} and {indices_shape}."
)
input_shape[axis] = indices_shape[axis]
output_shape = broadcast_shapes(input_shape, indices_shape)
return output_shape
def reduce_shape(shape, axis=None, keepdims=False):
shape = list(shape)
if axis is None:
if keepdims:
return tuple([1 for _ in shape])
else:
return tuple([])
elif isinstance(axis, int):
axis = (axis,)
axis = tuple(canonicalize_axis(a, len(shape)) for a in axis)
if keepdims:
for ax in axis:
shape[ax] = 1
return tuple(shape)
else:
for ax in sorted(axis, reverse=True):
del shape[ax]
return tuple(shape)
@keras_export("keras.utils.get_source_inputs")
def get_source_inputs(tensor):
"""Returns the list of input tensors necessary to compute `tensor`.
Output will always be a list of tensors
(potentially with 1 element).
Args:
tensor: The tensor to start from.
Returns:
List of input tensors.
"""
if not hasattr(tensor, "_keras_history"):
return tensor
operation, node_index, _ = tensor._keras_history
if not operation or not operation._inbound_nodes:
return [tensor]
else:
node = operation._inbound_nodes[node_index]
if node.is_input:
# Reached input node, stop recursion.
return tree.flatten(node.output_tensors)
else:
source_tensors = []
for tensor in node.input_tensors:
previous_sources = get_source_inputs(tensor)
# Avoid input redundancy.
for x in previous_sources:
if all(x is not t for t in source_tensors):
source_tensors.append(x)
return source_tensors
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/node.py | keras/src/ops/node.py | import collections
from keras.src import tree
from keras.src.backend import KerasTensor
from keras.src.ops.symbolic_arguments import SymbolicArguments
class Node:
"""A `Node` describes an operation `__call__()` event.
A Keras Function is a DAG with `Node` instances as nodes, and
`KerasTensor` instances as edges. Nodes aren't `Operation` instances,
because a single operation could be called multiple times, which would
result in graph cycles.
A `__call__()` event involves input tensors (and other input arguments),
the operation that was called, and the resulting output tensors.
A `Node` will include all this information.
Since a single `Operation` could be called multiple times,
the `Node` instances are stored on operations as a list.
Each time an operation is called, a node is added to `op._inbound_nodes`.
Each time the output of an operation is used by another operation,
a node is added to `op._outbound_nodes`.
Every `KerasTensor` instance has a `KerasHistory` object attached,
which tracks the `Node` that records the `__call__()` event that created
the tensor. By recursively walking through `Node` instances
via the `KerasHistory` metadata of `KerasTensor` instances, once can
retrieve the entire DAG of a Keras Function.
Args:
operation: The Operation that was called in the `op.__call__()`
event that this node represents.
call_args: The positional arguments the operation was called with.
call_kwargs: The keyword arguments the operation was called with.
outputs: The output tensors of the `op.__call__()` call.
"""
def __init__(
self, operation, call_args=None, call_kwargs=None, outputs=None
):
self.operation = operation
self.arguments = SymbolicArguments(*call_args, **call_kwargs)
self.outputs = [] if outputs is None else tree.flatten(outputs)
for x in self.outputs:
if not isinstance(x, KerasTensor):
raise ValueError(
"All operation outputs must be tensors. "
f"Operation {operation} returned a non-tensor. "
f"Non-tensor received: {x}"
)
zero_history = any(
not x.record_history for x in self.arguments.keras_tensors
)
# If inputs don't have metadata yet, add it.
if not zero_history:
for tensor in self.arguments.keras_tensors:
if not hasattr(tensor, "_keras_history"):
tensor._keras_history = KerasHistory(
operation=None, node_index=0, tensor_index=0
)
# Wire up Node to Operations.
self.operation._inbound_nodes.append(self)
for kt in self.arguments.keras_tensors:
inbound_op = kt._keras_history.operation
if inbound_op is not None: # It's a graph entry point.
inbound_op._outbound_nodes.append(self)
# Set metadata on outputs.
if not zero_history:
node_index = len(self.operation._inbound_nodes) - 1
for i, tensor in enumerate(self.outputs):
tensor._keras_history = KerasHistory(
operation=operation, node_index=node_index, tensor_index=i
)
# Whether this is a root node.
self.is_input = not self.arguments.keras_tensors
def __repr__(self):
return f"<Node operation={self.operation.name}, id={id(self)}>"
@property
def input_tensors(self):
return self.arguments.keras_tensors
@property
def output_tensors(self):
return self.outputs
@property
def parent_nodes(self):
"""The parent `Node`s.
Returns:
all the `Node`s whose output this node immediately depends on.
"""
node_deps = []
for kt in self.arguments.keras_tensors:
op = kt._keras_history.operation
node_index = kt._keras_history.node_index
if op is not None: # `None` for `Input` tensors.
node_deps.append(op._inbound_nodes[node_index])
return node_deps
class KerasHistory(
collections.namedtuple(
"KerasHistory", ["operation", "node_index", "tensor_index"]
)
):
"""Tracks the Operation call that created a Tensor.
During construction of Keras Functions, this metadata is added to
each Tensor produced as the output of an Operation.
This allows Keras to track how each Tensor was produced, and
this information is later retraced by the `Function` class to
reconstruct the Operations graph.
Attributes:
operation: The Operation instance that produced the Tensor.
node_index: The specific call to the Operation that produced this Tensor.
Operations can be called multiple times in order to share weights. A new
node is created every time an Operation is called. The corresponding
node that represents the call event that produced the Tensor can be
found at `op._inbound_nodes[node_index]`.
tensor_index: The output index for this Tensor.
Always zero if the Operation that produced this Tensor
only has one output. Nested structures of
Tensors are deterministically assigned an index via `nest.flatten`.
"""
# Added to maintain memory and performance characteristics of `namedtuple`
# while subclassing.
__slots__ = ()
def is_keras_tensor(obj):
return hasattr(obj, "_keras_history")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/math_test.py | keras/src/ops/math_test.py | import math
import jax.numpy as jnp
import numpy as np
import pytest
import scipy.signal
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.backend.common import dtypes
from keras.src.backend.common import standardize_dtype
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.ops import math as kmath
def _stft(
x, sequence_length, sequence_stride, fft_length, window="hann", center=True
):
# pure numpy version of stft that matches librosa's implementation
x = np.array(x)
ori_dtype = x.dtype
if center:
pad_width = [(0, 0) for _ in range(len(x.shape))]
pad_width[-1] = (fft_length // 2, fft_length // 2)
x = np.pad(x, pad_width, mode="reflect")
l_pad = (fft_length - sequence_length) // 2
r_pad = fft_length - sequence_length - l_pad
if window is not None:
if isinstance(window, str):
window = scipy.signal.get_window(window, sequence_length)
win = np.array(window, dtype=x.dtype)
win = np.pad(win, [[l_pad, r_pad]])
else:
win = np.ones((sequence_length + l_pad + r_pad), dtype=x.dtype)
x = scipy.signal.stft(
x,
fs=1.0,
window=win,
nperseg=(sequence_length + l_pad + r_pad),
noverlap=(sequence_length + l_pad + r_pad - sequence_stride),
nfft=fft_length,
boundary=None,
padded=False,
)[-1]
# scale and swap to (..., num_sequences, fft_bins)
x = x / np.sqrt(1.0 / win.sum() ** 2)
x = np.swapaxes(x, -2, -1)
return np.real(x).astype(ori_dtype), np.imag(x).astype(ori_dtype)
def _istft(
x,
sequence_length,
sequence_stride,
fft_length,
length=None,
window="hann",
center=True,
):
# pure numpy version of istft that matches librosa's implementation
complex_input = x[0] + 1j * x[1]
x = np.fft.irfft(
complex_input, n=fft_length, axis=-1, norm="backward"
).astype(x[0].dtype)
expected_output_len = fft_length + sequence_stride * (x.shape[-2] - 1)
if window is not None:
if isinstance(window, str):
win = np.array(
scipy.signal.get_window(window, sequence_length), dtype=x.dtype
)
else:
win = np.array(window, dtype=x.dtype)
l_pad = (fft_length - sequence_length) // 2
r_pad = fft_length - sequence_length - l_pad
win = np.pad(win, [[l_pad, r_pad]])
# square and sum
_sequence_length = sequence_length + l_pad + r_pad
denom = np.square(win)
overlaps = -(-_sequence_length // sequence_stride)
denom = np.pad(
denom, [(0, overlaps * sequence_stride - _sequence_length)]
)
denom = np.reshape(denom, [overlaps, sequence_stride])
denom = np.sum(denom, 0, keepdims=True)
denom = np.tile(denom, [overlaps, 1])
denom = np.reshape(denom, [overlaps * sequence_stride])
win = np.divide(win, denom[:_sequence_length])
x = np.multiply(x, win)
# overlap_sequences
def _overlap_sequences(x, sequence_stride):
*batch_shape, num_sequences, sequence_length = x.shape
flat_batchsize = math.prod(batch_shape)
x = np.reshape(x, (flat_batchsize, num_sequences, sequence_length))
output_size = sequence_stride * (num_sequences - 1) + sequence_length
nstep_per_segment = 1 + (sequence_length - 1) // sequence_stride
padded_segment_len = nstep_per_segment * sequence_stride
x = np.pad(
x, ((0, 0), (0, 0), (0, padded_segment_len - sequence_length))
)
x = np.reshape(
x,
(flat_batchsize, num_sequences, nstep_per_segment, sequence_stride),
)
x = x.transpose((0, 2, 1, 3))
x = np.pad(x, ((0, 0), (0, 0), (0, num_sequences), (0, 0)))
shrinked = x.shape[2] - 1
x = np.reshape(x, (flat_batchsize, -1))
x = x[:, : (nstep_per_segment * shrinked * sequence_stride)]
x = np.reshape(
x, (flat_batchsize, nstep_per_segment, shrinked * sequence_stride)
)
x = np.sum(x, axis=1)[:, :output_size]
return np.reshape(x, tuple(batch_shape) + (-1,))
x = _overlap_sequences(x, sequence_stride)
start = 0 if center is False else fft_length // 2
if length is not None:
end = start + length
elif center:
end = -(fft_length // 2)
else:
end = expected_output_len
return x[..., start:end]
def _sum_reduce(left, right):
return left + right
def _max_reduce(left, right):
return np.max(np.stack([left, right]), axis=0)
class MathOpsDynamicShapeTest(testing.TestCase):
@parameterized.parameters([(kmath.segment_sum,), (kmath.segment_max,)])
def test_segment_reduce(self, segment_reduce_op):
# 1D case
data = KerasTensor((None, 4), dtype="float32")
segment_ids = KerasTensor((10,), dtype="int32")
outputs = segment_reduce_op(data, segment_ids)
self.assertEqual(outputs.shape, (None, 4))
data = KerasTensor((None, 4), dtype="float32")
segment_ids = KerasTensor((10,), dtype="int32")
outputs = segment_reduce_op(data, segment_ids, num_segments=5)
self.assertEqual(outputs.shape, (5, 4))
data = KerasTensor((10,), dtype="float32")
segment_ids = KerasTensor(
(10,),
dtype="int32",
)
outputs = segment_reduce_op(data, segment_ids)
self.assertEqual(outputs.shape, (None,))
def test_top_k(self):
x = KerasTensor((None, 2, 3))
values, indices = kmath.top_k(x, k=1)
self.assertEqual(values.shape, (None, 2, 1))
self.assertEqual(indices.shape, (None, 2, 1))
def test_in_top_k(self):
targets = KerasTensor((None,))
predictions = KerasTensor((None, 10))
self.assertEqual(
kmath.in_top_k(targets, predictions, k=1).shape, (None,)
)
def test_logsumexp(self):
x = KerasTensor((None, 2, 3), dtype="float32")
self.assertEqual(kmath.logsumexp(x).shape, ())
self.assertEqual(kmath.logsumexp(x, axis=1).shape, (None, 3))
self.assertEqual(kmath.logsumexp(x, axis=(1, 2)).shape, (None,))
self.assertEqual(kmath.logsumexp(x, keepdims=True).shape, (1, 1, 1))
def test_extract_sequences(self):
# Defined dimension
x = KerasTensor((None, 32), dtype="float32")
sequence_length = 3
sequence_stride = 2
outputs = kmath.extract_sequences(x, sequence_length, sequence_stride)
num_sequences = 1 + (x.shape[-1] - sequence_length) // sequence_stride
self.assertEqual(outputs.shape, (None, num_sequences, sequence_length))
# Undefined dimension
x = KerasTensor((None, None), dtype="float32")
sequence_length = 3
sequence_stride = 2
outputs = kmath.extract_sequences(x, sequence_length, sequence_stride)
self.assertEqual(outputs.shape, (None, None, sequence_length))
def test_fft(self):
real = KerasTensor((None, 4, 3), dtype="float32")
imag = KerasTensor((None, 4, 3), dtype="float32")
real_output, imag_output = kmath.fft((real, imag))
ref = np.fft.fft(np.ones((2, 4, 3)))
ref_shape = (None,) + ref.shape[1:]
self.assertEqual(real_output.shape, ref_shape)
self.assertEqual(imag_output.shape, ref_shape)
def test_fft2(self):
real = KerasTensor((None, 4, 3), dtype="float32")
imag = KerasTensor((None, 4, 3), dtype="float32")
real_output, imag_output = kmath.fft2((real, imag))
ref = np.fft.fft2(np.ones((2, 4, 3)))
ref_shape = (None,) + ref.shape[1:]
self.assertEqual(real_output.shape, ref_shape)
self.assertEqual(imag_output.shape, ref_shape)
def test_ifft2(self):
real = KerasTensor((None, 4, 3), dtype="float32")
imag = KerasTensor((None, 4, 3), dtype="float32")
real_output, imag_output = kmath.ifft2((real, imag))
ref = np.fft.ifft2(np.ones((2, 4, 3)))
ref_shape = (None,) + ref.shape[1:]
self.assertEqual(real_output.shape, ref_shape)
self.assertEqual(imag_output.shape, ref_shape)
@parameterized.parameters([(None,), (1,), (5,)])
def test_rfft(self, fft_length):
x = KerasTensor((None, 4, 3), dtype="float32")
real_output, imag_output = kmath.rfft(x, fft_length=fft_length)
ref = np.fft.rfft(np.ones((2, 4, 3)), n=fft_length)
ref_shape = (None,) + ref.shape[1:]
self.assertEqual(real_output.shape, ref_shape)
self.assertEqual(imag_output.shape, ref_shape)
@parameterized.parameters([(None,), (1,), (5,)])
def test_irfft(self, fft_length):
real = KerasTensor((None, 4, 3), dtype="float32")
imag = KerasTensor((None, 4, 3), dtype="float32")
output = kmath.irfft((real, imag), fft_length=fft_length)
ref = np.fft.irfft(np.ones((2, 4, 3)), n=fft_length)
ref_shape = (None,) + ref.shape[1:]
self.assertEqual(output.shape, ref_shape)
def test_stft(self):
x = KerasTensor((None, 32), dtype="float32")
sequence_length = 10
sequence_stride = 3
fft_length = 15
real_output, imag_output = kmath.stft(
x, sequence_length, sequence_stride, fft_length
)
real_ref, imag_ref = _stft(
np.ones((2, 32)), sequence_length, sequence_stride, fft_length
)
real_ref_shape = (None,) + real_ref.shape[1:]
imag_ref_shape = (None,) + imag_ref.shape[1:]
self.assertEqual(real_output.shape, real_ref_shape)
self.assertEqual(imag_output.shape, imag_ref_shape)
def test_istft(self):
sequence_length = 10
sequence_stride = 3
fft_length = 15
real = KerasTensor((None, 32), dtype="float32")
imag = KerasTensor((None, 32), dtype="float32")
output = kmath.istft(
(real, imag), sequence_length, sequence_stride, fft_length
)
ref = _istft(
(np.ones((5, 32)), np.ones((5, 32))),
sequence_length,
sequence_stride,
fft_length,
)
ref_shape = (None,) + ref.shape[1:]
self.assertEqual(output.shape, ref_shape)
def test_rsqrt(self):
x = KerasTensor([None, 3])
self.assertEqual(kmath.rsqrt(x).shape, (None, 3))
def test_logdet(self):
x = KerasTensor((None, 3, 3))
out = kmath.logdet(x)
self.assertEqual(out.shape, (None,))
class MathOpsStaticShapeTest(testing.TestCase):
@parameterized.parameters([(kmath.segment_sum,), (kmath.segment_max,)])
@pytest.mark.skipif(
backend.backend() == "jax",
reason="JAX does not support `num_segments=None`.",
)
def test_segment_reduce(self, segment_reduce_op):
# 1D case
data = KerasTensor((10, 4), dtype="float32")
segment_ids = KerasTensor((10,), dtype="int32")
outputs = segment_reduce_op(data, segment_ids)
self.assertEqual(outputs.shape, (None, 4))
data = KerasTensor((10,), dtype="float32")
segment_ids = KerasTensor((10,), dtype="int32")
outputs = segment_reduce_op(data, segment_ids)
self.assertEqual(outputs.shape, (None,))
@parameterized.parameters([(kmath.segment_sum,), (kmath.segment_max,)])
def test_segment_reduce_explicit_num_segments(self, segment_reduce_op):
# 1D case
data = KerasTensor((10, 4), dtype="float32")
segment_ids = KerasTensor((10,), dtype="int32")
outputs = segment_reduce_op(data, segment_ids, num_segments=5)
self.assertEqual(outputs.shape, (5, 4))
data = KerasTensor((6,), dtype="float32")
segment_ids = KerasTensor(
(6,),
dtype="int32",
)
outputs = segment_reduce_op(data, segment_ids, num_segments=5)
self.assertEqual(outputs.shape, (5,))
def test_topk(self):
x = KerasTensor((1, 2, 3))
values, indices = kmath.top_k(x, k=1)
self.assertEqual(values.shape, (1, 2, 1))
self.assertEqual(indices.shape, (1, 2, 1))
def test_in_top_k(self):
targets = KerasTensor((5,))
predictions = KerasTensor((5, 10))
self.assertEqual(kmath.in_top_k(targets, predictions, k=1).shape, (5,))
def test_logsumexp(self):
x = KerasTensor((1, 2, 3), dtype="float32")
result = kmath.logsumexp(x)
self.assertEqual(result.shape, ())
def test_extract_sequences(self):
x = KerasTensor((10, 16), dtype="float32")
sequence_length = 3
sequence_stride = 2
outputs = kmath.extract_sequences(x, sequence_length, sequence_stride)
num_sequences = 1 + (x.shape[-1] - sequence_length) // sequence_stride
self.assertEqual(outputs.shape, (10, num_sequences, sequence_length))
def test_fft(self):
real = KerasTensor((2, 4, 3), dtype="float32")
imag = KerasTensor((2, 4, 3), dtype="float32")
real_output, imag_output = kmath.fft((real, imag))
ref = np.fft.fft(np.ones((2, 4, 3)))
self.assertEqual(real_output.shape, ref.shape)
self.assertEqual(imag_output.shape, ref.shape)
def test_fft2(self):
real = KerasTensor((2, 4, 3), dtype="float32")
imag = KerasTensor((2, 4, 3), dtype="float32")
real_output, imag_output = kmath.fft2((real, imag))
ref = np.fft.fft2(np.ones((2, 4, 3)))
self.assertEqual(real_output.shape, ref.shape)
self.assertEqual(imag_output.shape, ref.shape)
def test_ifft2(self):
real = KerasTensor((2, 4, 3), dtype="float32")
imag = KerasTensor((2, 4, 3), dtype="float32")
real_output, imag_output = kmath.ifft2((real, imag))
ref = np.fft.ifft2(np.ones((2, 4, 3)))
self.assertEqual(real_output.shape, ref.shape)
self.assertEqual(imag_output.shape, ref.shape)
def test_rfft(self):
x = KerasTensor((2, 4, 3), dtype="float32")
real_output, imag_output = kmath.rfft(x)
ref = np.fft.rfft(np.ones((2, 4, 3)))
self.assertEqual(real_output.shape, ref.shape)
self.assertEqual(imag_output.shape, ref.shape)
def test_irfft(self):
real = KerasTensor((2, 4, 3), dtype="float32")
imag = KerasTensor((2, 4, 3), dtype="float32")
output = kmath.irfft((real, imag))
ref = np.fft.irfft(np.ones((2, 4, 3)))
self.assertEqual(output.shape, ref.shape)
def test_rsqrt(self):
x = KerasTensor([4, 3], dtype="float32")
self.assertEqual(kmath.rsqrt(x).shape, (4, 3))
def test_stft(self):
x = KerasTensor((2, 32), dtype="float32")
sequence_length = 10
sequence_stride = 3
fft_length = 15
real_output, imag_output = kmath.stft(
x, sequence_length, sequence_stride, fft_length
)
real_ref, imag_ref = _stft(
np.ones((2, 32)), sequence_length, sequence_stride, fft_length
)
self.assertEqual(real_output.shape, real_ref.shape)
self.assertEqual(imag_output.shape, imag_ref.shape)
def test_istft(self):
# sequence_stride must <= x[0].shape[-1]
# sequence_stride must >= fft_length / num_sequences
sequence_length = 10
sequence_stride = 3
fft_length = 15
num_sequences = fft_length // sequence_stride + 1
real = KerasTensor((num_sequences, 32), dtype="float32")
imag = KerasTensor((num_sequences, 32), dtype="float32")
output = kmath.istft(
(real, imag), sequence_length, sequence_stride, fft_length
)
ref = _istft(
(np.ones((num_sequences, 32)), np.ones((num_sequences, 32))),
sequence_length,
sequence_stride,
fft_length,
)
self.assertEqual(output.shape, ref.shape)
def test_logdet(self):
x = KerasTensor((3, 3))
out = kmath.logdet(x)
self.assertEqual(out.shape, ())
x = KerasTensor((2, 4, 3, 3))
out = kmath.logdet(x)
self.assertEqual(out.shape, (2, 4))
class MathOpsCorrectnessTest(testing.TestCase):
def run_segment_reduce_test(
self,
segment_reduce_op,
element_wise_reduce_method,
num_indices,
indices_high,
data_dims=tuple(),
num_segments=None,
add_neg1_to_indices=False,
sorted_indices=False,
):
if num_segments is not None and indices_high >= num_segments:
raise ValueError("Indices high cannot be more than num segments")
indices_dims = (num_indices,)
full_data_dims = indices_dims + data_dims
data = np.random.rand(*full_data_dims).astype(np.float32)
segment_ids = np.concatenate(
[
np.arange(indices_high),
np.random.randint(
low=0,
high=indices_high,
size=(indices_dims[0] - indices_high),
),
]
).astype(np.int32)
if sorted_indices:
segment_ids = np.sort(segment_ids, axis=-1)
if add_neg1_to_indices:
segment_ids[0] = -1
outputs = segment_reduce_op(
data, segment_ids, num_segments, sorted=sorted_indices
)
if num_segments is None:
num_segments = np.max(segment_ids).item() + 1
expected_shape = (num_segments,) + data_dims
if segment_reduce_op == kmath.segment_max:
if backend.backend() == "tensorflow":
empty_fill_value = -np.finfo(np.float32).max
else:
empty_fill_value = -np.inf
expected = np.full(expected_shape, empty_fill_value)
else:
expected = np.zeros(expected_shape)
for idx in range(num_indices):
segment_id = segment_ids[idx]
if segment_id == -1:
continue
expected[segment_id] = element_wise_reduce_method(
expected[segment_id], data[idx]
)
self.assertAllClose(outputs, expected)
@parameterized.product(
(
dict(
segment_reduce_op=kmath.segment_sum,
element_wise_reduce_method=_sum_reduce,
),
dict(
segment_reduce_op=kmath.segment_max,
element_wise_reduce_method=_max_reduce,
),
),
sorted_indices=(True, False),
)
@pytest.mark.skipif(
backend.backend() == "jax",
reason="JAX does not support `num_segments=None`.",
)
def test_segment_reduce(
self,
segment_reduce_op,
element_wise_reduce_method,
sorted_indices,
):
# Test 1D case.
self.run_segment_reduce_test(
segment_reduce_op,
element_wise_reduce_method,
num_indices=9,
indices_high=3,
sorted_indices=sorted_indices,
)
# Test ND data case.
self.run_segment_reduce_test(
segment_reduce_op,
element_wise_reduce_method,
num_indices=9,
indices_high=3,
data_dims=(
3,
3,
),
sorted_indices=sorted_indices,
)
@parameterized.product(
(
dict(
segment_reduce_op=kmath.segment_sum,
element_wise_reduce_method=_sum_reduce,
),
dict(
segment_reduce_op=kmath.segment_max,
element_wise_reduce_method=_max_reduce,
),
),
(
dict(
contains_neg1_in_indices=True,
sorted_indices=False,
),
dict(
contains_neg1_in_indices=False,
sorted_indices=False,
),
dict(
contains_neg1_in_indices=False,
sorted_indices=True,
),
),
)
def test_segment_reduce_explicit_num_segments(
self,
segment_reduce_op,
element_wise_reduce_method,
contains_neg1_in_indices,
sorted_indices,
):
if backend.backend() == "tensorflow" and sorted_indices:
pytest.skip(
"Num segments and sorted_indices=True doesn't work for "
"tensorflow."
)
# Test 1D case.
self.run_segment_reduce_test(
segment_reduce_op,
element_wise_reduce_method,
num_indices=9,
indices_high=3,
num_segments=4,
add_neg1_to_indices=contains_neg1_in_indices,
sorted_indices=sorted_indices,
)
# Test ND data case.
self.run_segment_reduce_test(
segment_reduce_op,
element_wise_reduce_method,
num_indices=9,
indices_high=3,
data_dims=(
3,
3,
),
num_segments=4,
add_neg1_to_indices=contains_neg1_in_indices,
sorted_indices=sorted_indices,
)
def test_top_k(self):
x = np.array([0, 4, 2, 1, 3, -1], dtype=np.float32)
values, indices = kmath.top_k(x, k=2)
self.assertAllClose(values, [4, 3])
self.assertAllClose(indices, [1, 4])
x = np.array([0, 4, 2, 1, 3, -1], dtype=np.float32)
values, indices = kmath.top_k(x, k=2, sorted=False)
# Any order ok when `sorted=False`.
self.assertEqual(set(backend.convert_to_numpy(values)), set([4, 3]))
self.assertEqual(set(backend.convert_to_numpy(indices)), set([1, 4]))
x = np.random.rand(5, 5)
outputs = kmath.top_k(x, k=2)
expected_values = np.zeros((5, 2))
expected_indices = np.zeros((5, 2), dtype=np.int32)
for i in range(x.shape[0]):
top_k_indices = np.argsort(x[i])[-2:][::-1]
expected_values[i] = x[i, top_k_indices]
expected_indices[i] = top_k_indices
self.assertAllClose(outputs[0], expected_values)
self.assertAllClose(outputs[1], expected_indices)
def test_in_top_k(self):
targets = np.array([1, 0, 2])
predictions = np.array(
[
[0.1, 0.9, 0.8, 0.8],
[0.05, 0.95, 0, 1],
[0.1, 0.8, 0.3, 1],
]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=1), [True, False, False]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=2), [True, False, False]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=3), [True, True, True]
)
# Test tie cases.
targets = np.array([1, 0, 2])
predictions = np.array(
[
[0.1, 0.9, 0.8, 0.8],
[0.95, 0.95, 0, 0.95],
[0.1, 0.8, 0.8, 0.95],
]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=1), [True, True, False]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=2), [True, True, True]
)
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=3), [True, True, True]
)
# Test `nan` in predictions
# https://github.com/keras-team/keras/issues/19995
targets = np.array([1, 0])
predictions = np.array([[0.1, np.nan, 0.5], [0.3, 0.2, 0.5]])
self.assertAllEqual(
kmath.in_top_k(targets, predictions, k=2), [False, True]
)
def test_logsumexp(self):
x = np.random.rand(5, 5)
outputs = kmath.logsumexp(x)
expected = np.log(np.sum(np.exp(x)))
self.assertAllClose(outputs, expected)
outputs = kmath.logsumexp(x, axis=1)
expected = np.log(np.sum(np.exp(x), axis=1))
self.assertAllClose(outputs, expected)
def test_extract_sequences(self):
# Test 1D case.
x = np.random.random((10,))
sequence_length = 3
sequence_stride = 2
output = kmath.extract_sequences(x, sequence_length, sequence_stride)
num_sequences = 1 + (x.shape[-1] - sequence_length) // sequence_stride
expected = np.zeros(shape=(num_sequences, sequence_length))
pos = 0
for i in range(num_sequences):
expected[i] = x[pos : pos + sequence_length]
pos += sequence_stride
self.assertAllClose(output, expected, tpu_atol=1e-2, tpu_rtol=1e-2)
# Test N-D case.
x = np.random.random((4, 8))
sequence_length = 3
sequence_stride = 2
output = kmath.extract_sequences(x, sequence_length, sequence_stride)
num_sequences = 1 + (x.shape[-1] - sequence_length) // sequence_stride
expected = np.zeros(shape=(4, num_sequences, sequence_length))
pos = 0
for i in range(num_sequences):
expected[:, i] = x[:, pos : pos + sequence_length]
pos += sequence_stride
self.assertAllClose(output, expected, tpu_atol=1e-2, tpu_rtol=1e-2)
def test_fft(self):
real = np.random.random((2, 4, 3))
imag = np.random.random((2, 4, 3))
complex_arr = real + 1j * imag
real_output, imag_output = kmath.fft((real, imag))
ref = np.fft.fft(complex_arr)
real_ref = np.real(ref)
imag_ref = np.imag(ref)
self.assertAllClose(real_ref, real_output)
self.assertAllClose(imag_ref, imag_output)
def test_fft2(self):
real = np.random.random((2, 4, 3))
imag = np.random.random((2, 4, 3))
complex_arr = real + 1j * imag
real_output, imag_output = kmath.fft2((real, imag))
ref = np.fft.fft2(complex_arr)
real_ref = np.real(ref)
imag_ref = np.imag(ref)
self.assertAllClose(real_ref, real_output)
self.assertAllClose(imag_ref, imag_output)
def test_ifft2(self):
real = np.random.random((2, 4, 3)).astype(np.float32)
imag = np.random.random((2, 4, 3)).astype(np.float32)
complex_arr = real + 1j * imag
real_output, imag_output = kmath.ifft2((real, imag))
ref = np.fft.ifft2(complex_arr)
real_ref = np.real(ref)
imag_ref = np.imag(ref)
self.assertAllClose(real_ref, real_output)
self.assertAllClose(imag_ref, imag_output)
@parameterized.parameters([(None,), (3,), (15,)])
def test_rfft(self, n):
# Test 1D.
x = np.random.random((10,))
real_output, imag_output = kmath.rfft(x, fft_length=n)
ref = np.fft.rfft(x, n=n)
real_ref = np.real(ref)
imag_ref = np.imag(ref)
self.assertAllClose(real_ref, real_output, atol=1e-5, rtol=1e-5)
self.assertAllClose(imag_ref, imag_output, atol=1e-5, rtol=1e-5)
# Test N-D case.
x = np.random.random((2, 3, 10))
real_output, imag_output = kmath.rfft(x, fft_length=n)
ref = np.fft.rfft(x, n=n)
real_ref = np.real(ref)
imag_ref = np.imag(ref)
self.assertAllClose(real_ref, real_output, atol=1e-5, rtol=1e-5)
self.assertAllClose(imag_ref, imag_output, atol=1e-5, rtol=1e-5)
@parameterized.parameters([(None,), (3,), (15,)])
def test_irfft(self, n):
# Test 1D.
real = np.random.random((10,))
imag = np.random.random((10,))
complex_arr = real + 1j * imag
output = kmath.irfft((real, imag), fft_length=n)
ref = np.fft.irfft(complex_arr, n=n)
self.assertAllClose(output, ref, atol=1e-5, rtol=1e-5)
# Test N-D case.
real = np.random.random((2, 3, 10))
imag = np.random.random((2, 3, 10))
complex_arr = real + 1j * imag
output = kmath.irfft((real, imag), fft_length=n)
ref = np.fft.irfft(complex_arr, n=n)
self.assertAllClose(output, ref, atol=1e-5, rtol=1e-5)
@parameterized.parameters(
[
(32, 8, 32, "hann", True),
(8, 8, 16, "hann", True),
(4, 4, 7, "hann", True),
(32, 8, 32, "hamming", True),
(32, 8, 32, "hann", False),
(32, 8, 32, np.ones((32,)), True),
(32, 8, 32, None, True),
]
)
def test_stft(
self, sequence_length, sequence_stride, fft_length, window, center
):
# Test 1D case.
x = np.random.random((32,))
real_output, imag_output = kmath.stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
real_ref, imag_ref = _stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
self.assertAllClose(real_ref, real_output, atol=1e-5, rtol=1e-5)
self.assertAllClose(imag_ref, imag_output, atol=1e-5, rtol=1e-5)
# Test N-D case.
x = np.random.random((2, 3, 32))
real_output, imag_output = kmath.stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
real_ref, imag_ref = _stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
self.assertAllClose(real_ref, real_output, atol=1e-5, rtol=1e-5)
self.assertAllClose(imag_ref, imag_output, atol=1e-5, rtol=1e-5)
@parameterized.parameters(
[
(32, 8, 32, "hann", True),
(8, 8, 16, "hann", True),
(4, 4, 7, "hann", True),
(32, 8, 32, "hamming", True),
(8, 4, 8, "hann", False),
(32, 8, 32, np.ones((32,)), True),
(32, 8, 32, None, True),
]
)
def test_istft(
self, sequence_length, sequence_stride, fft_length, window, center
):
# sequence_stride must <= x[0].shape[-1]
# sequence_stride must >= fft_length / num_sequences
# Test 1D case.
x = np.random.random((256,))
real_x, imag_x = _stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
output = kmath.istft(
(real_x, imag_x),
sequence_length,
sequence_stride,
fft_length,
window=window,
center=center,
)
ref = _istft(
(real_x, imag_x),
sequence_length,
sequence_stride,
fft_length,
window=window,
center=center,
)
if backend.backend() in ("numpy", "jax", "torch"):
# these backends have different implementation for the boundary of
# the output, so we need to truncate 5% before assertAllClose
truncated_len = int(output.shape[-1] * 0.05)
output = output[..., truncated_len:-truncated_len]
ref = ref[..., truncated_len:-truncated_len]
# Nans are handled differently in different backends, so zero them out.
output = np.nan_to_num(backend.convert_to_numpy(output), nan=0.0)
ref = np.nan_to_num(ref, nan=0.0)
self.assertAllClose(output, ref, atol=1e-5, rtol=1e-5)
# Test N-D case.
x = np.random.random((2, 3, 256))
real_x, imag_x = _stft(
x, sequence_length, sequence_stride, fft_length, window, center
)
output = kmath.istft(
(real_x, imag_x),
sequence_length,
sequence_stride,
fft_length,
window=window,
center=center,
)
ref = _istft(
(real_x, imag_x),
sequence_length,
sequence_stride,
fft_length,
window=window,
center=center,
)
if backend.backend() in ("numpy", "jax", "torch"):
# these backends have different implementation for the boundary of
# the output, so we need to truncate 5% before assertAllClose
truncated_len = int(output.shape[-1] * 0.05)
output = output[..., truncated_len:-truncated_len]
ref = ref[..., truncated_len:-truncated_len]
# Nans are handled differently in different backends, so zero them out.
output = np.nan_to_num(backend.convert_to_numpy(output), nan=0.0)
ref = np.nan_to_num(ref, nan=0.0)
self.assertAllClose(output, ref, atol=1e-5, rtol=1e-5)
def test_rsqrt(self):
x = np.array([[1, 4, 9], [16, 25, 36]], dtype="float32")
self.assertAllClose(kmath.rsqrt(x), 1 / np.sqrt(x))
self.assertAllClose(kmath.Rsqrt()(x), 1 / np.sqrt(x))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/linalg_test.py | keras/src/ops/linalg_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.ops import linalg
from keras.src.testing.test_utils import named_product
class LinalgOpsDynamicShapeTest(testing.TestCase):
def test_cholesky(self):
x = KerasTensor([None, 20, 20])
out = linalg.cholesky(x)
self.assertEqual(out.shape, (None, 20, 20))
x = KerasTensor([None, None, 20])
with self.assertRaises(ValueError):
linalg.cholesky(x)
x = KerasTensor([None, 20, 15])
with self.assertRaises(ValueError):
linalg.cholesky(x)
def test_cholesky_inverse(self):
x = KerasTensor([None, 20, 20])
out = linalg.cholesky_inverse(x)
self.assertEqual(out.shape, (None, 20, 20))
x = KerasTensor([None, None, 20])
with self.assertRaises(ValueError):
linalg.cholesky_inverse(x)
x = KerasTensor([None, 20, 15])
with self.assertRaises(ValueError):
linalg.cholesky_inverse(x)
def test_det(self):
x = KerasTensor([None, 20, 20])
out = linalg.det(x)
self.assertEqual(out.shape, (None,))
x = KerasTensor([None, None, 20])
with self.assertRaises(ValueError):
linalg.det(x)
x = KerasTensor([None, 20, 15])
with self.assertRaises(ValueError):
linalg.det(x)
def test_eig(self):
x = KerasTensor([None, 20, 20])
w, v = linalg.eig(x)
self.assertEqual(w.shape, (None, 20))
self.assertEqual(v.shape, (None, 20, 20))
def test_eigh(self):
x = KerasTensor([None, 20, 20])
w, v = linalg.eigh(x)
self.assertEqual(w.shape, (None, 20))
self.assertEqual(v.shape, (None, 20, 20))
def test_inv(self):
x = KerasTensor([None, 20, 20])
out = linalg.inv(x)
self.assertEqual(out.shape, (None, 20, 20))
x = KerasTensor([None, None, 20])
with self.assertRaises(ValueError):
linalg.inv(x)
x = KerasTensor([None, 20, 15])
with self.assertRaises(ValueError):
linalg.inv(x)
def test_lu_factor(self):
if testing.jax_uses_gpu():
self.skipTest("Skipping test with JAX + GPU due to temporary error")
x = KerasTensor([None, 4, 3])
lu, p = linalg.lu_factor(x)
self.assertEqual(lu.shape, (None, 4, 3))
self.assertEqual(p.shape, (None, 3))
x = KerasTensor([None, 2, 3])
lu, p = linalg.lu_factor(x)
self.assertEqual(lu.shape, (None, 2, 3))
self.assertEqual(p.shape, (None, 2))
def test_norm(self):
x = KerasTensor((None, 3))
self.assertEqual(linalg.norm(x).shape, ())
x = KerasTensor((None, 3, 3))
self.assertEqual(linalg.norm(x, axis=1).shape, (None, 3))
self.assertEqual(
linalg.norm(x, axis=1, keepdims=True).shape, (None, 1, 3)
)
def test_qr(self):
x = KerasTensor((None, 4, 3), dtype="float32")
q, r = linalg.qr(x, mode="reduced")
qref, rref = np.linalg.qr(np.ones((2, 4, 3)), mode="reduced")
qref_shape = (None,) + qref.shape[1:]
rref_shape = (None,) + rref.shape[1:]
self.assertEqual(q.shape, qref_shape)
self.assertEqual(r.shape, rref_shape)
q, r = linalg.qr(x, mode="complete")
qref, rref = np.linalg.qr(np.ones((2, 4, 3)), mode="complete")
qref_shape = (None,) + qref.shape[1:]
rref_shape = (None,) + rref.shape[1:]
self.assertEqual(q.shape, qref_shape)
self.assertEqual(r.shape, rref_shape)
def test_qr_invalid_mode(self):
# backend agnostic error message
x = np.array([[1, 2], [3, 4]])
invalid_mode = "invalid_mode"
with self.assertRaisesRegex(
ValueError, "Expected one of {'reduced', 'complete'}."
):
linalg.qr(x, mode=invalid_mode)
def test_solve(self):
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20, 5])
out = linalg.solve(a, b)
self.assertEqual(out.shape, (None, 20, 5))
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20])
out = linalg.solve(a, b)
self.assertEqual(out.shape, (None, 20))
a = KerasTensor([None, None, 20])
b = KerasTensor([None, 20, 5])
with self.assertRaises(ValueError):
linalg.solve(a, b)
a = KerasTensor([None, 20, 15])
b = KerasTensor([None, 20, 5])
with self.assertRaises(ValueError):
linalg.solve(a, b)
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, None, 5])
with self.assertRaises(ValueError):
linalg.solve(a, b)
def test_solve_triangular(self):
if testing.jax_uses_gpu():
self.skipTest("Skipping test with JAX + GPU due to temporary error")
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20, 5])
out = linalg.solve_triangular(a, b)
self.assertEqual(out.shape, (None, 20, 5))
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20])
out = linalg.solve_triangular(a, b)
self.assertEqual(out.shape, (None, 20))
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20, 5])
out = linalg.solve_triangular(a, b, lower=True)
self.assertEqual(out.shape, (None, 20, 5))
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, 20])
out = linalg.solve_triangular(a, b, lower=True)
self.assertEqual(out.shape, (None, 20))
a = KerasTensor([None, 20, 15])
b = KerasTensor([None, 20, 5])
with self.assertRaises(ValueError):
linalg.solve_triangular(a, b)
a = KerasTensor([None, 20, 20])
b = KerasTensor([None, None, 5])
with self.assertRaises(ValueError):
linalg.solve_triangular(a, b)
def test_svd(self):
x = KerasTensor((None, 3, 2))
u, s, v = linalg.svd(x)
self.assertEqual(u.shape, (None, 3, 3))
self.assertEqual(s.shape, (None, 2))
self.assertEqual(v.shape, (None, 2, 2))
u, s, v = linalg.svd(x, full_matrices=False)
self.assertEqual(u.shape, (None, 3, 2))
self.assertEqual(s.shape, (None, 2))
self.assertEqual(v.shape, (None, 2, 2))
s = linalg.svd(x, compute_uv=False)
self.assertEqual(s.shape, (None, 2))
class LinalgOpsStaticShapeTest(testing.TestCase):
def test_cholesky(self):
x = KerasTensor([4, 3, 3])
out = linalg.cholesky(x)
self.assertEqual(out.shape, (4, 3, 3))
x = KerasTensor([10, 20, 15])
with self.assertRaises(ValueError):
linalg.cholesky(x)
def test_cholesky_inverse(self):
x = KerasTensor([4, 3, 3])
out = linalg.cholesky_inverse(x)
self.assertEqual(out.shape, (4, 3, 3))
x = KerasTensor([10, 20, 15])
with self.assertRaises(ValueError):
linalg.cholesky_inverse(x)
def test_det(self):
x = KerasTensor([4, 3, 3])
out = linalg.det(x)
self.assertEqual(out.shape, (4,))
x = KerasTensor([10, 20, 15])
with self.assertRaises(ValueError):
linalg.det(x)
def test_eig(self):
x = KerasTensor([4, 3, 3])
w, v = linalg.eig(x)
self.assertEqual(w.shape, (4, 3))
self.assertEqual(v.shape, (4, 3, 3))
x = KerasTensor([10, 20, 15])
with self.assertRaises(ValueError):
linalg.eig(x)
def test_eigh(self):
x = KerasTensor([4, 3, 3])
w, v = linalg.eigh(x)
self.assertEqual(w.shape, (4, 3))
self.assertEqual(v.shape, (4, 3, 3))
x = KerasTensor([10, 20, 15])
with self.assertRaises(ValueError):
linalg.eigh(x)
def test_inv(self):
x = KerasTensor([4, 3, 3])
out = linalg.inv(x)
self.assertEqual(out.shape, (4, 3, 3))
x = KerasTensor([10, 20, 15])
with self.assertRaises(ValueError):
linalg.inv(x)
def test_lu_factor(self):
if testing.jax_uses_gpu():
self.skipTest("Skipping test with JAX + GPU due to temporary error")
x = KerasTensor([10, 4, 3])
lu, p = linalg.lu_factor(x)
self.assertEqual(lu.shape, (10, 4, 3))
self.assertEqual(p.shape, (10, 3))
x = KerasTensor([10, 2, 3])
lu, p = linalg.lu_factor(x)
self.assertEqual(lu.shape, (10, 2, 3))
self.assertEqual(p.shape, (10, 2))
def test_norm(self):
x = KerasTensor((10, 3))
self.assertEqual(linalg.norm(x).shape, ())
x = KerasTensor((10, 3, 3))
self.assertEqual(linalg.norm(x, axis=1).shape, (10, 3))
self.assertEqual(
linalg.norm(x, axis=1, keepdims=True).shape, (10, 1, 3)
)
def test_qr(self):
x = KerasTensor((4, 3), dtype="float32")
q, r = linalg.qr(x, mode="reduced")
qref, rref = np.linalg.qr(np.ones((4, 3)), mode="reduced")
self.assertEqual(q.shape, qref.shape)
self.assertEqual(r.shape, rref.shape)
q, r = linalg.qr(x, mode="complete")
qref, rref = np.linalg.qr(np.ones((4, 3)), mode="complete")
self.assertEqual(q.shape, qref.shape)
self.assertEqual(r.shape, rref.shape)
with self.assertRaises(ValueError):
linalg.qr(x, mode="invalid")
def test_solve(self):
a = KerasTensor([4, 3, 3])
b = KerasTensor([4, 3, 5])
out = linalg.solve(a, b)
self.assertEqual(out.shape, (4, 3, 5))
a = KerasTensor([4, 3, 3])
b = KerasTensor([4, 3])
out = linalg.solve(a, b)
self.assertEqual(out.shape, (4, 3))
a = KerasTensor([10, 20, 15])
b = KerasTensor([10, 20, 5])
with self.assertRaises(ValueError):
linalg.solve(a, b)
a = KerasTensor([20, 20])
b = KerasTensor([])
with self.assertRaises(ValueError):
linalg.solve(a, b)
def test_solve_triangular(self):
if testing.jax_uses_gpu():
self.skipTest("Skipping test with JAX + GPU due to temporary error")
a = KerasTensor([4, 3, 3])
b = KerasTensor([4, 3, 5])
out = linalg.solve_triangular(a, b)
self.assertEqual(out.shape, (4, 3, 5))
a = KerasTensor([4, 3, 3])
b = KerasTensor([4, 3])
out = linalg.solve_triangular(a, b)
self.assertEqual(out.shape, (4, 3))
a = KerasTensor([10, 20, 15])
b = KerasTensor([10, 20, 5])
with self.assertRaises(ValueError):
linalg.solve_triangular(a, b)
def test_svd(self):
x = KerasTensor((10, 3, 2))
u, s, v = linalg.svd(x)
self.assertEqual(u.shape, (10, 3, 3))
self.assertEqual(s.shape, (10, 2))
self.assertEqual(v.shape, (10, 2, 2))
u, s, v = linalg.svd(x, full_matrices=False)
self.assertEqual(u.shape, (10, 3, 2))
self.assertEqual(s.shape, (10, 2))
self.assertEqual(v.shape, (10, 2, 2))
s = linalg.svd(x, compute_uv=False)
self.assertEqual(s.shape, (10, 2))
class LinalgOpsCorrectnessTest(testing.TestCase):
def test_cholesky(self):
x_non_psd = np.random.rand(4, 3, 3).astype("float32")
with self.assertRaises(ValueError):
linalg.cholesky(x_non_psd)
x = np.random.rand(4, 3, 3).astype("float32")
x_psd = np.matmul(x, x.transpose((0, 2, 1))) + 1e-5 * np.eye(
3, dtype="float32"
)
l_out = linalg.cholesky(x_psd, upper=False)
l_expected = np.linalg.cholesky(x_psd)
self.assertAllClose(l_out, l_expected, atol=1e-4)
u_out = linalg.cholesky(x_psd, upper=True)
u_expected = l_expected.transpose((0, 2, 1))
self.assertAllClose(u_out, u_expected, atol=1e-4)
@parameterized.named_parameters(
{"testcase_name": "lower", "upper": False},
{"testcase_name": "upper", "upper": True},
)
def test_cholesky_inverse(self, upper):
A = np.array(
[
[4.0, 12.0, -16.0],
[12.0, 37.0, -43.0],
[-16.0, -43.0, 98.0],
],
dtype="float32",
)
if upper:
factor = np.linalg.cholesky(A, upper=True)
else:
factor = np.linalg.cholesky(A)
expected_inverse = np.array(
[
[49.36111, -13.555555, 2.111111],
[-13.555555, 3.777778, -0.555556],
[2.111111, -0.555556, 0.111111],
],
dtype="float32",
)
output_inverse = linalg.cholesky_inverse(factor, upper=upper)
self.assertAllClose(
output_inverse,
expected_inverse,
atol=1e-5,
tpu_atol=1e-2,
tpu_rtol=1e-2,
)
def test_det(self):
x = np.random.rand(4, 3, 3)
out = linalg.det(x)
self.assertAllClose(out, np.linalg.det(x), atol=1e-5)
with self.assertRaises(ValueError):
x = np.random.rand(4, 3, 4)
linalg.det(x)
def test_eig(self):
if testing.uses_tpu():
self.skipTest("Skipping test with JAX + TPU as it's not supported")
x = np.random.rand(2, 3, 3)
x = x @ x.transpose((0, 2, 1))
w, v = map(ops.convert_to_numpy, linalg.eig(x))
x_reconstructed = (v * w[..., None, :]) @ v.transpose((0, 2, 1))
self.assertAllClose(x_reconstructed, x, atol=1e-4)
def test_eigh(self):
x = np.random.rand(2, 3, 3)
x = x @ x.transpose((0, 2, 1))
w, v = map(ops.convert_to_numpy, linalg.eigh(x))
x_reconstructed = (v * w[..., None, :]) @ v.transpose((0, 2, 1))
self.assertAllClose(x_reconstructed, x, atol=1e-4)
def test_inv(self):
x = np.random.rand(4, 3, 3)
x_inv = ops.convert_to_numpy(linalg.inv(x))
x_reconstructed = x @ x_inv
# high tolerance due to numerical instability
self.assertAllClose(
x_reconstructed, np.repeat(np.eye(3)[None], 4, 0), atol=1e-3
)
def test_lu_factor(self):
if testing.jax_uses_gpu():
self.skipTest("Skipping test with JAX + GPU due to temporary error")
def _pivot_matrix(pivots, n):
p_matrix = np.eye(n)
for i, p in enumerate(pivots):
identity = np.eye(n, n)
q = identity[i, :].copy()
identity[i, :] = identity[p, :]
identity[p, :] = q
p_matrix = np.dot(p_matrix, identity)
return p_matrix
def _reconstruct(lu, pivots, m, n):
lower = np.tril(lu[:, : min(m, n)], -1) + np.eye(m, min(m, n))
upper = np.triu(lu[: min(m, n)])
# pivots are defined differently in tensorflow
# compared to the other backends
if backend.backend() == "tensorflow":
p_matrix = np.eye(m)[pivots]
else:
p_matrix = _pivot_matrix(pivots, m)
out = p_matrix @ lower @ upper
return out
m, n = 4, 4
x = np.random.rand(m, n)
lu, pivots = map(ops.convert_to_numpy, linalg.lu_factor(x))
x_reconstructed = _reconstruct(lu, pivots, m, n)
self.assertAllClose(x_reconstructed, x, atol=1e-5)
m, n = 4, 3
x = np.random.rand(m, n)
if backend.backend() == "tensorflow":
with self.assertRaises(ValueError):
linalg.lu_factor(x)
else:
lu, pivots = map(ops.convert_to_numpy, linalg.lu_factor(x))
x_reconstructed = _reconstruct(lu, pivots, m, n)
self.assertAllClose(x_reconstructed, x, atol=1e-5)
# batched case
m, n = 3, 4
x = np.random.rand(2, m, n)
if backend.backend() == "tensorflow":
with self.assertRaises(ValueError):
linalg.lu_factor(x)
else:
lu, pivots = map(ops.convert_to_numpy, linalg.lu_factor(x))
for i in range(2):
self.assertAllClose(
_reconstruct(lu[i], pivots[i], m, n), x[i], atol=1e-5
)
@parameterized.named_parameters(
named_product(
ndim=[1, 2],
ord=[None, "fro", "nuc", -np.inf, -2, -1, 0, 1, 2, np.inf, 3],
axis=[None, 1, -1, (0, 1)],
keepdims=[False, True],
)
)
def test_norm(self, ndim, ord, axis, keepdims):
if ndim == 1:
x = np.random.random((5,)).astype("float32")
else:
x = np.random.random((5, 6)).astype("float32")
vector_norm = (ndim == 1) or isinstance(axis, int)
axis_out_of_bounds = ndim == 1 and (
axis == 1 or isinstance(axis, tuple)
)
expected_error = None
# when an out of bounds axis triggers an IndexError on torch is complex
if (
axis_out_of_bounds
and (not isinstance(axis, tuple) or ord is None)
and ord not in ("fro", "nuc")
):
expected_error = IndexError
elif (
axis_out_of_bounds
or (vector_norm and isinstance(axis, tuple)) # inv. axis for vector
or (vector_norm and ord in ("fro", "nuc")) # invalid ord for vector
or (not vector_norm and ord in (0, 3)) # invalid ord for matrix
):
expected_error = RuntimeError
if expected_error is not None:
# Non-torch backends always throw a ValueError
expected_error = (
expected_error if backend.backend() == "torch" else ValueError
)
with self.assertRaises(expected_error):
linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
return
output = linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
expected_result = np.linalg.norm(
x, ord=ord, axis=axis, keepdims=keepdims
)
self.assertAllClose(output, expected_result, atol=1e-5)
def test_qr(self):
x = np.random.random((4, 5))
q, r = linalg.qr(x, mode="reduced")
qref, rref = np.linalg.qr(x, mode="reduced")
self.assertAllClose(qref, q)
self.assertAllClose(rref, r)
q, r = linalg.qr(x, mode="complete")
qref, rref = np.linalg.qr(x, mode="complete")
self.assertAllClose(qref, q)
self.assertAllClose(rref, r)
def test_solve(self):
x1 = np.array([[1, 2], [4, 5]], dtype="float32")
x2 = np.array([[2, 4], [8, 10]], dtype="float32")
output = linalg.solve(x1, x2)
expected_result = np.array([[2, 0], [0, 2]], dtype="float32")
self.assertAllClose(output, expected_result)
def test_solve_triangular(self):
if testing.jax_uses_gpu():
self.skipTest("Skipping test with JAX + GPU due to temporary error")
# 2d-case
x1 = np.array([[1, 2], [0, 5]], dtype="float32")
x2 = np.array([2, 10], dtype="float32")
output = linalg.solve_triangular(x1, x2, lower=True)
expected_result = np.array([2, 2], dtype="float32")
self.assertAllClose(output, expected_result)
output = linalg.solve_triangular(x1, x2, lower=False)
expected_result = np.array([-2, 2], dtype="float32")
self.assertAllClose(output, expected_result)
# batched case
x1 = np.array([[[1, 2], [0, 5]], [[1, 2], [0, 5]]], dtype="float32")
x2 = np.array([[2, 10], [2, 10]], dtype="float32")
output = linalg.solve_triangular(x1, x2, lower=True)
expected_result = np.array([[2, 2], [2, 2]], dtype="float32")
self.assertAllClose(output, expected_result)
def test_svd(self):
x = np.random.rand(4, 30, 20).astype("float32")
u, s, vh = linalg.svd(x)
x_reconstructed = (u[..., :, : s.shape[-1]] * s[..., None, :]) @ vh[
..., : s.shape[-1], :
]
# High tolerance due to numerical instability
self.assertAllClose(
x_reconstructed, x, atol=1e-3, tpu_atol=1e-2, tpu_rtol=1e-2
)
# Test `compute_uv=False`
s_no_uv = linalg.svd(x, compute_uv=False)
self.assertAllClose(
s_no_uv, s, atol=1e-5, rtol=1e-5, tpu_atol=1e-2, tpu_rtol=1e-2
)
@parameterized.named_parameters(
("b_rank_1", 1, None),
("b_rank_2", 2, None),
("rcond", 1, 1e-3),
)
def test_lstsq(self, b_rank, rcond):
a = np.random.random((5, 7)).astype("float32")
a_symb = backend.KerasTensor((5, 7))
if b_rank == 1:
b = np.random.random((5,)).astype("float32")
b_symb = backend.KerasTensor((5,))
else:
b = np.random.random((5, 4)).astype("float32")
b_symb = backend.KerasTensor((5, 4))
out = linalg.lstsq(a, b, rcond=rcond)
ref_out = np.linalg.lstsq(a, b, rcond=rcond)[0]
self.assertAllClose(
out, ref_out, atol=1e-5, tpu_atol=1e-4, tpu_rtol=1e-4
)
out_symb = linalg.lstsq(a_symb, b_symb)
self.assertEqual(out_symb.shape, out.shape)
class QrOpTest(testing.TestCase):
def test_qr_init_mode_reduced(self):
qr_op = linalg.Qr(mode="reduced")
self.assertIsNotNone(qr_op)
def test_qr_init_mode_complete(self):
qr_op = linalg.Qr(mode="complete")
self.assertIsNotNone(qr_op)
def test_qr_init_invalid_mode(self):
invalid_mode = "invalid_mode"
expected_error = (
r"`mode` argument value not supported. "
r"Expected one of \{'reduced', 'complete'\}. "
f"Received: mode={invalid_mode}"
)
with self.assertRaisesRegex(ValueError, expected_error):
linalg.Qr(mode=invalid_mode)
def test_compute_output_spec_low_rank(self):
qr_op = linalg.Qr(mode="reduced")
low_rank_input = np.random.rand(3)
with self.assertRaisesRegex(
ValueError, r"Input should have rank >= 2. Received: .*"
):
qr_op.compute_output_spec(low_rank_input)
def test_compute_output_spec_undefined_dimensions(self):
qr_op = linalg.Qr(mode="reduced")
undefined_dim_input = KerasTensor(shape=(None, 4), dtype="float32")
with self.assertRaisesRegex(
ValueError,
r"Input should have its last 2 dimensions "
r"fully-defined. Received: .*",
):
qr_op.compute_output_spec(undefined_dim_input)
def test_qr_call_mode_reduced(self):
qr_op = linalg.Qr(mode="reduced")
test_input = np.random.rand(10, 10)
q, r = qr_op.call(test_input)
self.assertEqual(q.shape, (10, 10))
self.assertEqual(r.shape, (10, 10))
def test_qr_call_mode_complete(self):
qr_op = linalg.Qr(mode="complete")
test_input = np.random.rand(10, 10)
q, r = qr_op.call(test_input)
self.assertEqual(q.shape, (10, 10))
self.assertEqual(r.shape, (10, 10))
def test_jvp(self):
if backend.backend() in ["openvino", "numpy"]:
pytest.skip("Backend does not support jvp operation")
a1, a2 = ops.convert_to_tensor(0.1), ops.convert_to_tensor(0.2)
primals, tangents = linalg.jvp(backend.numpy.sin, (a1,), (a2,))
self.assertAllClose(primals, 0.0998, atol=1e-4)
self.assertAllClose(tangents, 0.1990, atol=1e-4)
def f(x):
return backend.numpy.sin(x), x**2
primals_out, tangents_out, aux = linalg.jvp(
f, (a1,), (a2,), has_aux=True
)
self.assertAllClose(primals_out, 0.0998, atol=1e-4)
self.assertAllClose(tangents_out, 0.1990, atol=1e-4)
self.assertAllClose(aux, 0.01, atol=1e-4)
def test_jvp_symbolic_has_aux_false(self):
primals = KerasTensor((None, 7))
tangents = KerasTensor((None, 7))
def fun(x):
# simple non-linear transformation
return ops.sin(x) + ops.cos(x)
primals_out, tangents_out = linalg.jvp(fun, (primals,), (tangents,))
# output shapes must match input shapes
self.assertEqual(primals_out.shape, primals.shape)
self.assertEqual(tangents_out.shape, tangents.shape)
"""Symbolic JVP test – has_aux=True."""
def fun(x):
y = ops.exp(x)
aux = ops.mean(y, axis=-1, keepdims=True) # auxiliary output
return y, aux
primals_out, tangents_out, aux = linalg.jvp(
fun, (primals,), (tangents,), has_aux=True
)
# main output shapes
self.assertEqual(primals_out.shape, primals.shape)
self.assertEqual(tangents_out.shape, tangents.shape)
# auxiliary shape: (batch, 1)
self.assertEqual(aux.shape, (None, 1))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/ops_test.py | keras/src/ops/ops_test.py | import inspect
from absl.testing import parameterized
try:
from keras.api import ops as api_ops_root
except ImportError:
from keras import ops as api_ops_root
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.ops.operation import Operation
from keras.src.testing.test_utils import named_product
from keras.src.utils.naming import to_snake_case
OPS_MODULES = ("core", "image", "linalg", "math", "nn", "numpy")
SELF_PARAMETER = inspect.Parameter(
"self", inspect.Parameter.POSITIONAL_OR_KEYWORD
)
NAME_PARAMETER = inspect.Parameter(
"name", inspect.Parameter.KEYWORD_ONLY, default=None
)
# Parameters with these names are known to always be static (non-tensors).
STATIC_PARAMETER_NAMES = frozenset(
{"axis", "axes", "dtype", "shape", "newshape", "sparse", "ragged"}
)
def op_functions_and_classes(ops_module):
"""Enumerate pairs of op function and op classes in a module.
Will return for instance `(ExpandDims, expand_dims)`, `(Sum, sum)`, ...
Args:
ops_module: the module to explore.
Returns:
iterable returning tuples with function and class pairs.
"""
# Go through all symbols.
for op_class_name in dir(ops_module):
op_class = getattr(ops_module, op_class_name)
# Find the ones that are classes that extend `Operation`.
if isinstance(op_class, type) and Operation in op_class.__mro__:
# Infer what the corresponding op function name should be.
op_function_name = to_snake_case(op_class_name)
# With some exceptions.
op_function_name = {
"batch_norm": "batch_normalization",
"rms_norm": "rms_normalization",
"search_sorted": "searchsorted",
}.get(op_function_name, op_function_name)
# Check if that function exist. Some classes are abstract super
# classes for multiple operations and should be ignored.
op_function = getattr(ops_module, op_function_name, None)
if op_function is not None:
# We have a pair, return it.
yield op_function, op_class
class OperationTest(testing.TestCase):
@parameterized.named_parameters(named_product(module_name=OPS_MODULES))
def test_class_function_consistency(self, module_name):
ops_module = getattr(ops, module_name)
if module_name in ("core", "math"):
# `core` and `math` are not exported as their own module.
api_ops_module = None
else:
api_ops_module = getattr(api_ops_root, module_name)
for op_function, op_class in op_functions_and_classes(ops_module):
name = op_function.__name__
# ==== Check exports ====
# - op should be exported as e.g. `keras.ops.numpy.sum`
# - op should also be exported as e.g. `keras.ops.sum`
if module_name != "image":
# `image` ops are not exported at the top-level.
self.assertIsNotNone(
getattr(api_ops_root, name, None),
f"Not exported as `keras.ops.{name}`",
)
if api_ops_module is not None:
# `core` and `math` are not exported as their own module.
self.assertIsNotNone(
getattr(api_ops_module, name, None),
f"Not exported as `keras.ops.{module_name}.{name}`",
)
# ==== Check handling of name in __init__ ====
# - op class `__init__` should have a `name` parameter at the end,
# which should be keyword only and with a default value of `None`
# - op class `__init__` should call `super().__init__(name=name)`
if op_class.__init__ is Operation.__init__:
# `name` is not keyword only in `Operation`, use this instead.
class_init_signature = inspect.Signature(
[SELF_PARAMETER, NAME_PARAMETER]
)
else:
class_init_signature = inspect.signature(op_class.__init__)
# Check call to super.
self.assertContainsSubsequence(
inspect.getsource(op_class.__init__),
"super().__init__(name=name)",
f"`{op_class.__name__}.__init__` is not calling "
"`super().__init__(name=name)`",
)
static_parameters = list(class_init_signature.parameters.values())
# Remove `self`.
static_parameters = static_parameters[1:]
name_index = -1
if static_parameters[-1].kind == inspect.Parameter.VAR_KEYWORD:
# When there is a `**kwargs`, `name` appears before.
name_index = -2
# Verify `name` parameter is as expected.
self.assertEqual(
static_parameters[name_index],
NAME_PARAMETER,
f"The last parameter of `{op_class.__name__}.__init__` "
"should be `name`, should be a keyword only, and should "
"have a default value of `None`",
)
# Remove `name`, it's not part of the op signature.
static_parameters.pop(name_index)
# ==== Check static parameters ====
# Static parameters are declared in the class' `__init__`.
# Dynamic parameters are declared in the class' `call` method.
# - they should all appear in the op signature with the same name
# - they should have the same default value
# - they should appear in the same order and usually with the
# dynamic parameters first, and the static parameters last.
dynamic_parameters = list(
inspect.signature(op_class.call).parameters.values()
)[1:] # Remove self
op_signature = inspect.signature(op_function)
for p in dynamic_parameters + static_parameters:
# Check the same name appears in the op signature
self.assertIn(
p.name,
op_signature.parameters,
f"Op function `{name}` is missing a parameter that is in "
f"op class `{op_class.__name__}`",
)
# Check default values are the same
self.assertEqual(
p.default,
op_signature.parameters[p.name].default,
f"Default mismatch for parameter `{p.name}` between op "
f"function `{name}` and op class `{op_class.__name__}`",
)
dynamic_parameter_names = [p.name for p in dynamic_parameters]
static_parameter_names = [p.name for p in static_parameters]
# Check for obvious mistakes in parameters that were made dynamic
# but should be static.
for p in dynamic_parameters:
self.assertNotIn(
p.name,
STATIC_PARAMETER_NAMES,
f"`{p.name}` should not be a dynamic parameter in op class "
f"`{op_class.__name__}` based on its name.",
)
self.assertNotIsInstance(
p.default,
(bool, str),
f"`{p.name}` should not be a dynamic parameter in op class "
f"`{op_class.__name__}` based on default `{p.default}`.",
)
# Check order of parameters.
if name in (
"fori_loop",
"vectorized_map",
"while_loop",
"batch_normalization",
"dot_product_attention",
"average",
"einsum",
"full",
"pad",
):
# Loose case:
# order of of parameters is preserved but they are interspersed.
op_dynamic_parameter_names = [
name
for name in op_signature.parameters.keys()
if name in dynamic_parameter_names
]
self.assertEqual(
op_dynamic_parameter_names,
dynamic_parameter_names,
"Inconsistent dynamic parameter order for op "
f"function `{name}` and op class `{op_class.__name__}`",
)
op_static_parameter_names = [
name
for name in op_signature.parameters.keys()
if name in static_parameter_names
]
self.assertEqual(
op_static_parameter_names,
static_parameter_names,
"Inconsistent static parameter order for op "
f"function `{name}` and op class `{op_class.__name__}`",
)
else:
# Strict case:
# dynamic parameters first and static parameters at the end.
self.assertEqual(
list(op_signature.parameters.keys()),
dynamic_parameter_names + static_parameter_names,
"Inconsistent static parameter position for op "
f"function `{name}` and op class `{op_class.__name__}`",
)
# ==== Check compute_output_spec is implement ====
# - op class should override Operation's `compute_output_spec`
self.assertTrue(
hasattr(op_class, "compute_output_spec")
and op_class.compute_output_spec
is not Operation.compute_output_spec,
f"Op class `{op_class.__name__}` should override "
"`compute_output_spec`",
)
@parameterized.named_parameters(named_product(module_name=OPS_MODULES))
def test_backend_consistency(self, module_name):
ops_module = getattr(ops, module_name)
backend_ops_module = getattr(backend, module_name)
for op_function, _ in op_functions_and_classes(ops_module):
name = op_function.__name__
if hasattr(ops_module, f"_{name}"):
# For an op function `foo`, if there is a function named `_foo`,
# that means we have a backend independent implementation.
continue
if name in ("view_as_complex", "view_as_real", "get_item"):
# These ops have an inlined backend independent implementation.
continue
# ==== Check backend implementation ====
# - op should have an implementation in every backend
# - op implementation should have the same signature (same
# parameters, same order, same defaults)
backend_op_function = getattr(backend_ops_module, name, None)
if backend.backend() == "openvino" and backend_op_function is None:
# Openvino is still missing a number of ops.
continue
self.assertIsNotNone(backend_op_function, f"Missing op `{name}`")
if name == "multi_hot":
# multi_hot has code to massage the input parameters before
# calling the backend implementation, so the signature is
# different on purpose.
continue
# Signature should match in every way.
self.assertEqual(
inspect.signature(backend_op_function),
inspect.signature(op_function),
f"Signature mismatch for `{name}`",
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/function.py | keras/src/ops/function.py | import collections
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.backend.config import backend
from keras.src.backend.config import is_nnx_enabled
from keras.src.ops.operation import Operation
@keras_export("keras.Function")
class Function(Operation):
"""Class that encapsulates a computation graph of Keras operations.
You can use a `Function` to capture the computation graph linking
some input tensors to some output tensors, and reapply the same
computation on new inputs.
A `Function` is similar to a Functional Model, with the difference
that it is stateless (it does not track state variables)
and does not implement the `Layer` API.
Example:
```python
input_1 = keras.KerasTensor(shape=(None, 2, 3))
input_2 = keras.KerasTensor(shape=(None, 2, 3))
x = input_1 + input_2
output = keras.ops.sigmoid(x)
fn = keras.Function(inputs=[input_1, input_2], outputs=output)
input_1_val = np.random.random((4, 2, 3))
input_2_val = np.random.random((4, 2, 3))
output_val = fn([input_1_val, input_2_val])
```
Args:
inputs: `KerasTensor` instance or nested structured of
`KerasTensor` instances.
outputs: `KerasTensor` instance or nested structured of
`KerasTensor` instances. They should be computable
given only the values of `inputs`.
name: String. The name of the function.
"""
def __init__(self, inputs, outputs, name=None):
super().__init__(name=name)
if backend() == "tensorflow":
# Temporary work around for
# https://github.com/keras-team/keras/issues/931
# This stop tensorflow from wrapping tf.function output in a
# _DictWrapper object.
_self_setattr_tracking = getattr(
self, "_self_setattr_tracking", True
)
self._self_setattr_tracking = False
self._inputs_struct = tree.map_structure(lambda x: x, inputs)
self._outputs_struct = tree.map_structure(lambda x: x, outputs)
self._inputs = tree.flatten(inputs)
self._outputs = tree.flatten(outputs)
if not self._inputs:
raise ValueError(
"`inputs` argument cannot be empty. Received:\n"
f"inputs={inputs}\n"
f"outputs={outputs}"
)
if not self._outputs:
raise ValueError(
"`outputs` argument cannot be empty. Received:\n"
f"inputs={inputs}\n"
f"outputs={outputs}"
)
if backend() == "tensorflow":
self._self_setattr_tracking = _self_setattr_tracking
(nodes, nodes_by_depth, operations, operations_by_depth) = map_graph(
self._inputs, self._outputs
)
self._nodes = nodes
self._nodes_by_depth = nodes_by_depth
self._operations = operations
self._operations_by_depth = operations_by_depth
for input in self._inputs:
if (
input._keras_history.operation
and not input._keras_history.operation._outbound_nodes
):
raise ValueError("`inputs` not connected to `outputs`")
# Special handling for NNX to ensure consistent operation instance usage
if is_nnx_enabled():
self._setup_nnx_op_mapping()
@property
def operations(self):
return self._operations[:]
@property
def inputs(self):
"""Flat list of the symbolic inputs of the Function."""
return self._inputs
@property
def outputs(self):
"""Flat list of the symbolic outputs of the Function."""
return self._outputs
def _setup_nnx_op_mapping(self):
"""Setup operation mapping for NNX"""
# Create a mapping from operation id to operation instance
self._nnx_op_mapping = {}
# Assign the list of operations to a single attribute for NNX traversal
self.nnx_operations = self._operations[:]
for operation in self._operations:
# Map the operation id to this operation instance
self._nnx_op_mapping[id(operation)] = operation
def _get_operation_for_node(self, node):
"""Get the operation for a node, using NNX mapping if enabled."""
operation = node.operation
if hasattr(self, "_nnx_op_mapping") and id(operation) in getattr(
self, "_nnx_op_mapping", {}
):
return self._nnx_op_mapping[id(operation)]
return operation
def compute_output_spec(self, inputs):
self._assert_input_compatibility(inputs)
# Check if input shapes are identical to ref input shapes,
# if so take a shortcut.
shortcut = True
for x, x_ref in zip(tree.flatten(inputs), self._inputs):
if x.shape != x_ref.shape:
shortcut = False
break
if shortcut:
return tree.map_structure(
lambda x: KerasTensor(shape=x.shape, dtype=x.dtype),
self._outputs_struct,
)
# No luck; take the long road through the graph.
# Original Keras used a cache to avoid recomputing all this
# when known input shapes where seen again. Perhaps a good
# idea to bring that back.
return self._run_through_graph(
inputs, operation_fn=lambda op: op.compute_output_spec
)
def compute_output_shape(self, input_shape):
# Wrap `input_shape` into the structure of KerasTensor to utilize
# `compute_output_spec`.
input_shape_struct = tree.map_shape_structure(
lambda x: KerasTensor(shape=x), input_shape
)
# Ensure that dtype and sparse settings are the same as self._inputs,
# because we only care about the shape in this function.
for x, x_ref in zip(tree.flatten(input_shape_struct), self._inputs):
x._dtype = x_ref.dtype
x._sparse = x_ref.sparse
output_spec = self.compute_output_spec(input_shape_struct)
return tree.map_structure(lambda x: x.shape, output_spec)
def call(self, inputs):
"""Computes output tensors for new inputs."""
self._assert_input_compatibility(inputs)
return self._run_through_graph(inputs, operation_fn=lambda op: op)
def _run_through_graph(self, inputs, operation_fn, call_fn=None):
"""Execute the graph.
At each node we compute outputs via
`operation_fn(node.operation)(*args, **kwargs)`.
"""
inputs = tree.flatten(inputs)
# Dictionary mapping reference tensors to computed tensors.
tensor_dict = {}
for x, y in zip(self.inputs, inputs):
tensor_dict[id(x)] = y
nodes_by_depth = self._nodes_by_depth
depth_keys = list(nodes_by_depth.keys())
depth_keys.sort(reverse=True)
for depth in depth_keys:
nodes = nodes_by_depth[depth]
for node in nodes:
if not node.operation or node.is_input:
continue # Input tensors already exist.
if any(id(x) not in tensor_dict for x in node.input_tensors):
continue # Node is not computable, try skipping.
args, kwargs = node.arguments.fill_in(tensor_dict)
if call_fn is not None:
# Use call_fn if provided (e.g., for symbolic execution)
op = operation_fn(node.operation)
outputs = call_fn(op, *args, **kwargs)
else:
# Use NNX operation mapping
operation = self._get_operation_for_node(node)
op = operation_fn(operation)
outputs = op(*args, **kwargs)
# Update tensor_dict.
for x, y in zip(node.outputs, tree.flatten(outputs)):
tensor_dict[id(x)] = y
output_tensors = []
for x in self.outputs:
output_tensors.append(tensor_dict[id(x)])
return tree.pack_sequence_as(self._outputs_struct, output_tensors)
def _assert_input_compatibility(self, inputs):
try:
tree.assert_same_structure(inputs, self._inputs_struct)
except ValueError:
raise ValueError(
"Function was called with an invalid input structure. "
f"Expected input structure: {self._inputs_struct}\n"
f"Received input structure: {inputs}"
)
for x, x_ref in zip(tree.flatten(inputs), self._inputs):
if len(x.shape) != len(x_ref.shape):
raise ValueError(
f"{self.__class__.__name__} was passed "
f"incompatible inputs. For input '{x_ref.name}', "
f"expected shape {x_ref.shape}, but received "
f"instead a tensor with shape {x.shape}."
)
for dim, ref_dim in zip(x.shape, x_ref.shape):
if ref_dim is not None and dim is not None:
if dim != ref_dim:
raise ValueError(
f"{self.__class__.__name__} was passed "
f"incompatible inputs. For input '{x_ref.name}', "
f"expected shape {x_ref.shape}, but received "
f"instead a tensor with shape {x.shape}."
)
def make_node_key(op, node_index):
return f"{id(op)}_ib-{node_index}"
def map_graph(inputs, outputs):
"""Validates a graph's topology and gather its operations and nodes.
Args:
inputs: List of input tensors.
outputs: List of outputs tensors.
Returns:
A tuple `(nodes, nodes_by_depth, operations, operations_by_depth)`.
- nodes: set of Node instances
- nodes_by_depth: dict mapping ints (depth) to lists of node instances.
- operations: list of Operation instances.
- operations_by_depth: dict mapping ints (depth) to lists of Operation
instances.
"""
# "depth" is number of operations between output Node and the Node.
# Nodes are ordered from inputs -> outputs.
nodes_in_decreasing_depth, operation_indices = _build_map(inputs, outputs)
network_nodes = {
make_node_key(node.operation, node.operation._inbound_nodes.index(node))
for node in nodes_in_decreasing_depth
}
nodes_depths = {} # dict {node: depth value}
operations_depths = {} # dict {operation: depth value}
for node in reversed(nodes_in_decreasing_depth):
# If the depth is not set, the node has no outbound nodes (depth 0).
depth = nodes_depths.setdefault(node, 0)
# Update the depth of the corresponding operation
previous_depth = operations_depths.get(node.operation, 0)
# If we've seen this operation before at a higher depth,
# we should use that depth instead of the node depth.
# This is necessary for shared operations that have inputs at different
# depth levels in the graph.
depth = max(depth, previous_depth)
operations_depths[node.operation] = depth
nodes_depths[node] = depth
# Update the depth of inbound nodes.
# The "depth" of a node is the max of the depths
# of all nodes it is connected to + 1.
for node_dep in node.parent_nodes:
previous_depth = nodes_depths.get(node_dep, 0)
nodes_depths[node_dep] = max(depth + 1, previous_depth)
# Handle inputs that are not connected to outputs.
# We do not error out here because the inputs may be used to compute losses
# and metrics.
for input_t in inputs:
input_operation = input_t._keras_history[0]
if input_operation and input_operation not in operations_depths:
operations_depths[input_operation] = 0
operation_indices[input_operation] = -1
nodes_depths[input_operation._inbound_nodes[0]] = 0
network_nodes.add(make_node_key(input_operation, 0))
# Build a dict {depth: list of nodes with this depth}
nodes_by_depth = collections.defaultdict(list)
for node, depth in nodes_depths.items():
nodes_by_depth[depth].append(node)
# Build a dict {depth: list of operations with this depth}
operations_by_depth = collections.defaultdict(list)
for operation, depth in operations_depths.items():
operations_by_depth[depth].append(operation)
# Get sorted list of operation depths.
depth_keys = list(operations_by_depth.keys())
depth_keys.sort(reverse=True)
# Set self.operations ordered by depth.
operations = []
for depth in depth_keys:
operations_for_depth = operations_by_depth[depth]
# Network.operations needs to have a deterministic order:
# here we order them by traversal order.
operations_for_depth.sort(key=lambda x: operation_indices[x])
operations.extend(operations_for_depth)
# Get sorted list of node depths.
depth_keys = list(nodes_by_depth.keys())
depth_keys.sort(reverse=True)
# Check that all tensors required are computable.
# computable_tensors: all tensors in the graph
# that can be computed from the inputs provided.
computable_tensors = set()
for x in inputs:
computable_tensors.add(x)
operations_with_complete_input = [] # To provide a better error msg.
for depth in depth_keys:
for node in nodes_by_depth[depth]:
for x in tree.flatten(node.input_tensors):
if x not in computable_tensors:
operation = node.operation
raise ValueError(
"Graph disconnected: cannot find parent for "
f"tensor {x} at operation '{operation}'. "
"The following previous operations were accessed "
f"without issue: {operations_with_complete_input}"
)
operations_with_complete_input.append(node.operation.name)
for x in tree.flatten(node.outputs):
computable_tensors.add(x)
# Ensure name unicity, which will be crucial for serialization
# (since serialized nodes refer to operations by their name).
all_names = [operation.name for operation in operations]
for name in all_names:
if all_names.count(name) != 1:
raise ValueError(
f'The name "{name}" is used {all_names.count(name)} '
"times in the model. All operation names should be unique."
)
return network_nodes, nodes_by_depth, operations, operations_by_depth
def _build_map(inputs, outputs):
"""Topologically sort nodes in order from inputs to outputs.
It uses a depth-first search to topologically sort nodes that appear in the
_keras_history connectivity metadata of `outputs`.
Args:
outputs: the output tensors whose _keras_history metadata should be
walked. This may be an arbitrary nested structure.
Returns:
A tuple like (ordered_nodes, operation_to_first_traversal_index)
ordered_nodes: list of nodes appearing in the keras history,
topologically sorted from original inputs to the `outputs`.
(If outputs have different sets of ancestors, the inputs to one
output may appear after a different output).
operation_to_first_traversal_index:
A dict mapping operation to the traversal index in the DFS where it
is seen. Note: if a operation is shared by several nodes, the dict
will onlystore the index corresponding to the *first* time the
operation seen.
"""
finished_nodes = set()
nodes_in_progress = set()
nodes_in_decreasing_depth = [] # nodes from inputs -> outputs.
operation_indices = {} # operation -> in traversal order.
for output in tree.flatten(outputs):
_build_map_helper(
inputs,
output,
finished_nodes,
nodes_in_progress,
nodes_in_decreasing_depth,
operation_indices,
)
return nodes_in_decreasing_depth, operation_indices
def _build_map_helper(
inputs,
tensor,
finished_nodes,
nodes_in_progress,
nodes_in_decreasing_depth,
operation_indices,
):
"""Recursive helper for `_build_map`."""
(
operation,
node_index,
_,
) = tensor._keras_history
if not operation:
return
node = operation._inbound_nodes[node_index]
# Don't repeat work for shared subgraphs
if node in finished_nodes:
return
# Prevent cycles.
if node in nodes_in_progress:
raise ValueError(
f"Tensor {tensor} from operation '{operation.name}' is part of a "
"cycle."
)
# Store the traversal order for operation sorting.
if operation not in operation_indices:
operation_indices[operation] = len(operation_indices)
# Propagate to all previous tensors connected to this node.
nodes_in_progress.add(node)
if not node.is_input and tensor not in tree.flatten(inputs):
for tensor in node.input_tensors:
_build_map_helper(
inputs,
tensor,
finished_nodes,
nodes_in_progress,
nodes_in_decreasing_depth,
operation_indices,
)
finished_nodes.add(node)
nodes_in_progress.remove(node)
nodes_in_decreasing_depth.append(node)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/nn_test.py | keras/src/ops/nn_test.py | import math
from itertools import combinations
import numpy as np
import pytest
from absl.testing import parameterized
import keras
from keras.src import backend
from keras.src import layers
from keras.src import losses
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src.backend.common import dtypes
from keras.src.backend.common import standardize_dtype
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.convolutional.conv_test import np_conv1d
from keras.src.layers.convolutional.conv_test import np_conv2d
from keras.src.layers.convolutional.conv_test import np_conv3d
from keras.src.layers.convolutional.conv_transpose_test import (
np_conv1d_transpose,
)
from keras.src.layers.convolutional.conv_transpose_test import (
np_conv2d_transpose,
)
from keras.src.layers.convolutional.depthwise_conv_test import (
np_depthwise_conv2d,
)
from keras.src.layers.pooling.average_pooling_test import np_avgpool1d
from keras.src.layers.pooling.average_pooling_test import np_avgpool2d
from keras.src.layers.pooling.max_pooling_test import np_maxpool1d
from keras.src.layers.pooling.max_pooling_test import np_maxpool2d
from keras.src.ops import nn as knn
from keras.src.ops import numpy as knp
from keras.src.testing.test_utils import named_product
def _dot_product_attention(
query, key, value, bias=None, mask=None, scale=None, is_causal=False
):
# A pure and simplified numpy version of `dot_product_attention`
# Ref: jax.nn.dot_product_attention
# https://github.com/jax-ml/jax/blob/jax-v0.4.32/jax/_src/nn/functions.py#L828
# Not support `query_seq_lengths` and `key_value_seq_lengths` args
def _apply_masks(logits, mask, is_causal):
def _get_large_negative(dtype):
dtype = backend.standardize_dtype(dtype)
if dtype == "float16":
val = 65500.0
else:
val = 3.38953e38
return np.asarray(val * -0.7, dtype=dtype)
def _get_causal_mask(query_length, key_length):
mask = np.tril(np.ones((query_length, key_length), dtype=np.bool_))
return mask[None, None, :, :]
if mask is None and not is_causal:
return logits
combined_mask = np.ones_like(logits, dtype=np.bool_)
if mask is not None:
combined_mask = np.logical_and(combined_mask, mask)
if is_causal:
T, S = logits.shape[2], logits.shape[3]
mask = _get_causal_mask(T, S)
combined_mask = np.logical_and(combined_mask, mask)
padded_logits = np.where(
combined_mask, logits, _get_large_negative(logits.dtype)
)
return padded_logits
def softmax(x, axis=None):
exp_x = np.exp(x - np.max(x, axis=axis, keepdims=True))
return exp_x / np.sum(exp_x, axis=axis, keepdims=True)
_, _, _, H = key.shape
scale = (1.0 / np.sqrt(H)) if scale is None else scale
logits = np.einsum("BTNH,BSNH->BNTS", query, key)
logits *= np.array(scale, dtype=logits.dtype)
if bias is not None:
logits = (logits + bias).astype(logits.dtype)
padded_logits = _apply_masks(logits, mask, is_causal)
padded_logits = padded_logits.astype(np.float32)
probs = softmax(padded_logits, axis=-1).astype(key.dtype)
return np.einsum("BNTS,BSNH->BTNH", probs, value)
class NNOpsDynamicShapeTest(testing.TestCase):
def test_relu(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.relu(x).shape, (None, 2, 3))
def test_relu6(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.relu6(x).shape, (None, 2, 3))
def test_sigmoid(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.sigmoid(x).shape, (None, 2, 3))
def test_sparse_sigmoid(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.sparse_sigmoid(x).shape, (None, 2, 3))
def test_softplus(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.softplus(x).shape, (None, 2, 3))
def test_softsign(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.softsign(x).shape, (None, 2, 3))
def test_silu(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.silu(x).shape, (None, 2, 3))
def test_log_sigmoid(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.log_sigmoid(x).shape, (None, 2, 3))
def test_leaky_relu(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.leaky_relu(x).shape, (None, 2, 3))
def test_hard_sigmoid(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.hard_sigmoid(x).shape, (None, 2, 3))
def test_hard_silu(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.hard_silu(x).shape, (None, 2, 3))
def test_elu(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.elu(x).shape, (None, 2, 3))
def test_selu(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.selu(x).shape, (None, 2, 3))
def test_gelu(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.gelu(x).shape, (None, 2, 3))
def test_celu(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.celu(x).shape, (None, 2, 3))
def test_glu(self):
x = KerasTensor([None, 2, 4])
self.assertEqual(knn.glu(x).shape, (None, 2, 2))
def test_tanh_shrink(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.tanh_shrink(x).shape, (None, 2, 3))
def test_hard_tanh(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.hard_tanh(x).shape, (None, 2, 3))
def test_hard_shrink(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.hard_shrink(x).shape, (None, 2, 3))
def test_threshld(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.threshold(x, 0, 0).shape, (None, 2, 3))
def test_squareplus(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.squareplus(x).shape, (None, 2, 3))
def test_soft_shrink(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.soft_shrink(x).shape, (None, 2, 3))
def test_sparse_plus(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.sparse_plus(x).shape, (None, 2, 3))
def test_softmax(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.softmax(x).shape, (None, 2, 3))
self.assertEqual(knn.softmax(x, axis=1).shape, (None, 2, 3))
self.assertEqual(knn.softmax(x, axis=-1).shape, (None, 2, 3))
def test_softmax_in_graph(self):
class SoftmaxLayer(keras.Layer):
def call(self, x):
return ops.softmax(x, axis=-1)
class Model(keras.Model):
def __init__(self):
x = keras.Input(shape=(None,))
y = SoftmaxLayer()(x)
super().__init__(inputs=x, outputs=y)
# Make sure Keras is able to compile the model graph
model = Model()
x = ops.array([[1.0, 2.0, 3.0, 4.0]])
model.predict(x)
def test_log_softmax(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.log_softmax(x).shape, (None, 2, 3))
self.assertEqual(knn.log_softmax(x, axis=1).shape, (None, 2, 3))
self.assertEqual(knn.log_softmax(x, axis=-1).shape, (None, 2, 3))
def test_sparsemax(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.sparsemax(x).shape, (None, 2, 3))
def test_max_pool(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_shape = (None, 8, 3)
else:
input_shape = (None, 3, 8)
x = KerasTensor(input_shape)
self.assertEqual(
knn.max_pool(x, 2, 1).shape,
(None, 7, 3) if data_format == "channels_last" else (None, 3, 7),
)
self.assertEqual(
knn.max_pool(x, 2, 2, padding="same").shape,
(None, 4, 3) if data_format == "channels_last" else (None, 3, 4),
)
if data_format == "channels_last":
input_shape = (None, 8, None, 3)
else:
input_shape = (None, 3, 8, None)
x = KerasTensor(input_shape)
(
self.assertEqual(knn.max_pool(x, 2, 1).shape, (None, 7, None, 3))
if data_format == "channels_last"
else (None, 3, 7, None)
)
self.assertEqual(
knn.max_pool(x, 2, 2, padding="same").shape,
(
(None, 4, None, 3)
if data_format == "channels_last"
else (None, 3, 4, None)
),
)
self.assertEqual(
knn.max_pool(x, (2, 2), (2, 2), padding="same").shape,
(
(None, 4, None, 3)
if data_format == "channels_last"
else (None, 3, 4, None)
),
)
def test_average_pool(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_shape = (None, 8, 3)
else:
input_shape = (None, 3, 8)
x = KerasTensor(input_shape)
self.assertEqual(
knn.average_pool(x, 2, 1).shape,
(None, 7, 3) if data_format == "channels_last" else (None, 3, 7),
)
self.assertEqual(
knn.average_pool(x, 2, 2, padding="same").shape,
(None, 4, 3) if data_format == "channels_last" else (None, 3, 4),
)
if data_format == "channels_last":
input_shape = (None, 8, None, 3)
else:
input_shape = (None, 3, 8, None)
x = KerasTensor(input_shape)
self.assertEqual(
knn.average_pool(x, 2, 1).shape,
(
(None, 7, None, 3)
if data_format == "channels_last"
else (None, 3, 7, None)
),
)
self.assertEqual(
knn.average_pool(x, 2, 2, padding="same").shape,
(
(None, 4, None, 3)
if data_format == "channels_last"
else (None, 3, 4, None)
),
)
self.assertEqual(
knn.average_pool(x, (2, 2), (2, 2), padding="same").shape,
(
(None, 4, None, 3)
if data_format == "channels_last"
else (None, 3, 4, None)
),
)
def test_multi_hot(self):
x = KerasTensor([None, 3, 1])
self.assertEqual(knn.multi_hot(x, 5).shape, (None, 1, 5))
self.assertEqual(knn.multi_hot(x, 5, 1).shape, (None, 3, 1))
self.assertEqual(knn.multi_hot(x, 5, 2).shape, (None, 5, 1))
self.assertSparse(knn.multi_hot(x, 5, sparse=True))
@parameterized.named_parameters(
named_product(dtype=["float32", "int32", "bool"], sparse=[False, True])
)
def test_multi_hot_dtype(self, dtype, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors")
x = np.arange(5)
out = knn.multi_hot(x, 5, axis=0, dtype=dtype, sparse=sparse)
self.assertEqual(backend.standardize_dtype(out.dtype), dtype)
self.assertSparse(out, sparse)
def test_conv(self):
data_format = backend.config.image_data_format()
# Test 1D conv.
if data_format == "channels_last":
input_shape = (None, 20, 3)
else:
input_shape = (None, 3, 20)
inputs_1d = KerasTensor(input_shape)
kernel = KerasTensor([4, 3, 2])
for padding in ["valid", "VALID"]:
self.assertEqual(
knn.conv(inputs_1d, kernel, 1, padding=padding).shape,
(
(None, 17, 2)
if data_format == "channels_last"
else (None, 2, 17)
),
)
for padding in ["same", "SAME"]:
self.assertEqual(
knn.conv(inputs_1d, kernel, 1, padding=padding).shape,
(
(None, 20, 2)
if data_format == "channels_last"
else (None, 2, 20)
),
)
self.assertEqual(
knn.conv(inputs_1d, kernel, (2,), dilation_rate=2).shape,
(None, 7, 2) if data_format == "channels_last" else (None, 2, 7),
)
# Test 2D conv.
if data_format == "channels_last":
input_shape = (None, 10, None, 3)
else:
input_shape = (None, 3, 10, None)
inputs_2d = KerasTensor(input_shape)
kernel = KerasTensor([2, 2, 3, 2])
for padding in ["valid", "VALID"]:
self.assertEqual(
knn.conv(inputs_2d, kernel, 1, padding=padding).shape,
(
(None, 9, None, 2)
if data_format == "channels_last"
else (None, 2, 9, None)
),
)
for padding in ["same", "SAME"]:
self.assertEqual(
knn.conv(inputs_2d, kernel, 1, padding=padding).shape,
(
(None, 10, None, 2)
if data_format == "channels_last"
else (None, 2, 10, None)
),
)
self.assertEqual(
knn.conv(inputs_2d, kernel, (2, 1), dilation_rate=(2, 1)).shape,
(
(None, 4, None, 2)
if data_format == "channels_last"
else (None, 2, 4, None)
),
)
# Test 2D conv - H, W specified
if data_format == "channels_last":
input_shape = (None, 10, 10, 3)
else:
input_shape = (None, 3, 10, 10)
inputs_2d = KerasTensor(input_shape)
kernel = KerasTensor([2, 2, 3, 2])
for padding in ["valid", "VALID"]:
self.assertEqual(
knn.conv(inputs_2d, kernel, 1, padding=padding).shape,
(
(None, 9, 9, 2)
if data_format == "channels_last"
else (None, 2, 9, 9)
),
)
for padding in ["same", "SAME"]:
self.assertEqual(
knn.conv(inputs_2d, kernel, 1, padding=padding).shape,
(
(None, 10, 10, 2)
if data_format == "channels_last"
else (None, 2, 10, 10)
),
)
self.assertEqual(
knn.conv(inputs_2d, kernel, (2, 1), dilation_rate=(2, 1)).shape,
(
(None, 4, 9, 2)
if data_format == "channels_last"
else (None, 2, 4, 9)
),
)
# Test 3D conv.
if data_format == "channels_last":
input_shape = (None, 8, None, 8, 3)
else:
input_shape = (None, 3, 8, None, 8)
inputs_3d = KerasTensor(input_shape)
kernel = KerasTensor([3, 3, 3, 3, 2])
for padding in ["valid", "VALID"]:
self.assertEqual(
knn.conv(inputs_3d, kernel, 1, padding=padding).shape,
(
(None, 6, None, 6, 2)
if data_format == "channels_last"
else (None, 2, 6, None, 6)
),
)
for padding in ["same", "SAME"]:
self.assertEqual(
knn.conv(inputs_3d, kernel, (2, 1, 2), padding=padding).shape,
(
(None, 4, None, 4, 2)
if data_format == "channels_last"
else (None, 2, 4, None, 4)
),
)
self.assertEqual(
knn.conv(
inputs_3d, kernel, 1, padding="valid", dilation_rate=(1, 2, 2)
).shape,
(
(None, 6, None, 4, 2)
if data_format == "channels_last"
else (None, 2, 6, None, 4)
),
)
def test_depthwise_conv(self):
data_format = backend.config.image_data_format()
# Test 1D depthwise conv.
if data_format == "channels_last":
input_shape = (None, 20, 3)
else:
input_shape = (None, 3, 20)
inputs_1d = KerasTensor(input_shape)
kernel = KerasTensor([4, 3, 1])
for padding in ["valid", "VALID"]:
self.assertEqual(
knn.depthwise_conv(inputs_1d, kernel, 1, padding=padding).shape,
(
(None, 17, 3)
if data_format == "channels_last"
else (None, 3, 17)
),
)
for padding in ["same", "SAME"]:
self.assertEqual(
knn.depthwise_conv(
inputs_1d, kernel, (1,), padding=padding
).shape,
(
(None, 20, 3)
if data_format == "channels_last"
else (None, 3, 20)
),
)
self.assertEqual(
knn.depthwise_conv(inputs_1d, kernel, 2, dilation_rate=2).shape,
(None, 7, 3) if data_format == "channels_last" else (None, 3, 7),
)
# Test 2D depthwise conv.
if data_format == "channels_last":
input_shape = (None, 10, 10, 3)
else:
input_shape = (None, 3, 10, 10)
inputs_2d = KerasTensor(input_shape)
kernel = KerasTensor([2, 2, 3, 1])
for padding in ["valid", "VALID"]:
self.assertEqual(
knn.depthwise_conv(inputs_2d, kernel, 1, padding=padding).shape,
(
(None, 9, 9, 3)
if data_format == "channels_last"
else (None, 3, 9, 9)
),
)
for padding in ["same", "SAME"]:
self.assertEqual(
knn.depthwise_conv(
inputs_2d, kernel, (1, 2), padding=padding
).shape,
(
(None, 10, 5, 3)
if data_format == "channels_last"
else (None, 3, 10, 5)
),
)
self.assertEqual(
knn.depthwise_conv(inputs_2d, kernel, 2, dilation_rate=2).shape,
(
(None, 4, 4, 3)
if data_format == "channels_last"
else (None, 3, 4, 4)
),
)
self.assertEqual(
knn.depthwise_conv(
inputs_2d, kernel, 2, dilation_rate=(2, 1)
).shape,
(
(None, 4, 5, 3)
if data_format == "channels_last"
else (None, 3, 4, 5)
),
)
def test_separable_conv(self):
data_format = backend.config.image_data_format()
# Test 1D separable conv.
if data_format == "channels_last":
input_shape = (None, 20, 3)
else:
input_shape = (None, 3, 20)
inputs_1d = KerasTensor(input_shape)
kernel = KerasTensor([4, 3, 2])
pointwise_kernel = KerasTensor([1, 6, 5])
self.assertEqual(
knn.separable_conv(
inputs_1d, kernel, pointwise_kernel, 1, padding="valid"
).shape,
(None, 17, 5) if data_format == "channels_last" else (None, 5, 17),
)
self.assertEqual(
knn.separable_conv(
inputs_1d, kernel, pointwise_kernel, 1, padding="same"
).shape,
(None, 20, 5) if data_format == "channels_last" else (None, 5, 20),
)
self.assertEqual(
knn.separable_conv(
inputs_1d, kernel, pointwise_kernel, 2, dilation_rate=2
).shape,
(None, 7, 5) if data_format == "channels_last" else (None, 5, 7),
)
# Test 2D separable conv.
if data_format == "channels_last":
input_shape = (None, 10, 10, 3)
else:
input_shape = (None, 3, 10, 10)
inputs_2d = KerasTensor(input_shape)
kernel = KerasTensor([2, 2, 3, 2])
pointwise_kernel = KerasTensor([1, 1, 6, 5])
self.assertEqual(
knn.separable_conv(
inputs_2d, kernel, pointwise_kernel, 1, padding="valid"
).shape,
(
(None, 9, 9, 5)
if data_format == "channels_last"
else (None, 5, 9, 9)
),
)
self.assertEqual(
knn.separable_conv(
inputs_2d, kernel, pointwise_kernel, (1, 2), padding="same"
).shape,
(
(None, 10, 5, 5)
if data_format == "channels_last"
else (None, 5, 10, 5)
),
)
self.assertEqual(
knn.separable_conv(
inputs_2d, kernel, pointwise_kernel, 2, dilation_rate=(2, 1)
).shape,
(
(None, 4, 5, 5)
if data_format == "channels_last"
else (None, 5, 4, 5)
),
)
def test_conv_transpose(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_shape = (None, 4, 3)
else:
input_shape = (None, 3, 4)
inputs_1d = KerasTensor(input_shape)
kernel = KerasTensor([2, 5, 3])
self.assertEqual(
knn.conv_transpose(inputs_1d, kernel, 2).shape,
(None, 8, 5) if data_format == "channels_last" else (None, 5, 8),
)
self.assertEqual(
knn.conv_transpose(inputs_1d, kernel, 2, padding="same").shape,
(None, 8, 5) if data_format == "channels_last" else (None, 5, 8),
)
self.assertEqual(
knn.conv_transpose(
inputs_1d, kernel, 5, padding="valid", output_padding=4
).shape,
(None, 21, 5) if data_format == "channels_last" else (None, 5, 21),
)
if data_format == "channels_last":
input_shape = (None, 4, 4, 3)
else:
input_shape = (None, 3, 4, 4)
inputs_2d = KerasTensor(input_shape)
kernel = KerasTensor([2, 2, 5, 3])
self.assertEqual(
knn.conv_transpose(inputs_2d, kernel, 2).shape,
(
(None, 8, 8, 5)
if data_format == "channels_last"
else (None, 5, 8, 8)
),
)
self.assertEqual(
knn.conv_transpose(inputs_2d, kernel, (2, 2), padding="same").shape,
(
(None, 8, 8, 5)
if data_format == "channels_last"
else (None, 5, 8, 8)
),
)
self.assertEqual(
knn.conv_transpose(
inputs_2d, kernel, (5, 5), padding="valid", output_padding=4
).shape,
(
(None, 21, 21, 5)
if data_format == "channels_last"
else (None, 5, 21, 21)
),
)
def test_one_hot(self):
x = KerasTensor([None, 3, 1])
self.assertEqual(knn.one_hot(x, 5).shape, (None, 3, 1, 5))
self.assertEqual(knn.one_hot(x, 5, 1).shape, (None, 5, 3, 1))
self.assertEqual(knn.one_hot(x, 5, 2).shape, (None, 3, 5, 1))
self.assertSparse(knn.one_hot(x, 5, sparse=True))
@parameterized.named_parameters(
named_product(dtype=["float32", "int32", "bool"], sparse=[False, True])
)
def test_one_hot_dtype(self, dtype, sparse):
if sparse and not backend.SUPPORTS_SPARSE_TENSORS:
pytest.skip("Backend does not support sparse tensors")
x = np.arange(5)
out = knn.one_hot(x, 5, axis=0, dtype=dtype, sparse=sparse)
self.assertEqual(backend.standardize_dtype(out.dtype), dtype)
self.assertSparse(out, sparse)
def test_moments(self):
x = KerasTensor([None, 3, 4])
self.assertEqual(knn.moments(x, axes=[0])[0].shape, (3, 4))
self.assertEqual(knn.moments(x, axes=[0, 1])[0].shape, (4,))
self.assertEqual(
knn.moments(x, axes=[0, 1], keepdims=True)[0].shape, (1, 1, 4)
)
self.assertEqual(knn.moments(x, axes=[1])[0].shape, (None, 4))
self.assertEqual(knn.moments(x, axes=[1, 2])[0].shape, (None,))
self.assertEqual(
knn.moments(x, axes=[1, 2], keepdims=True)[0].shape, (None, 1, 1)
)
def test_batch_normalization(self):
x = KerasTensor([None, 3, 4])
mean = KerasTensor([4])
variance = KerasTensor([4])
self.assertEqual(
knn.batch_normalization(x, mean, variance, axis=-1).shape,
(None, 3, 4),
)
x = KerasTensor([None, 3, 4, 5])
self.assertEqual(
knn.batch_normalization(x, mean, variance, axis=2).shape,
(None, 3, 4, 5),
)
mean = KerasTensor([3])
variance = KerasTensor([3])
self.assertEqual(
knn.batch_normalization(x, mean, variance, axis=1).shape,
(None, 3, 4, 5),
)
# Test wrong offset shape
self.assertRaisesRegex(
ValueError,
"`offset` must be a vector of length",
knn.batch_normalization,
KerasTensor([None, 3, 4, 5]),
KerasTensor([5]),
KerasTensor([5]),
axis=-1,
offset=KerasTensor([3]),
scale=KerasTensor([5]),
)
# Test wrong scale shape
self.assertRaisesRegex(
ValueError,
"`scale` must be a vector of length",
knn.batch_normalization,
KerasTensor([None, 3, 4, 5]),
KerasTensor([5]),
KerasTensor([5]),
axis=-1,
offset=KerasTensor([5]),
scale=KerasTensor([3]),
)
def test_ctc_decode(self):
# Test strategy="greedy"
inputs = KerasTensor([None, 2, 3])
sequence_lengths = KerasTensor([None])
decoded, scores = knn.ctc_decode(inputs, sequence_lengths)
self.assertEqual(decoded.shape, (1, None, 2))
self.assertEqual(scores.shape, (None, 1))
# Test strategy="beam_search"
inputs = KerasTensor([None, 2, 3])
sequence_lengths = KerasTensor([None])
decoded, scores = knn.ctc_decode(
inputs, sequence_lengths, strategy="beam_search", top_paths=2
)
self.assertEqual(decoded.shape, (2, None, 2))
self.assertEqual(scores.shape, (None, 2))
def test_normalize(self):
x = KerasTensor([None, 2, 3])
self.assertEqual(knn.normalize(x).shape, (None, 2, 3))
def test_psnr(self):
x1 = KerasTensor([None, 2, 3])
x2 = KerasTensor([None, 5, 6])
out = knn.psnr(x1, x2, max_val=224)
self.assertEqual(out.shape, ())
def test_dot_product_attention(self):
query = KerasTensor([None, None, 8, 16])
key = KerasTensor([None, None, 6, 16])
value = KerasTensor([None, None, 6, 16])
out = knn.dot_product_attention(query, key, value)
self.assertEqual(out.shape, query.shape)
def test_rms_normalization(self):
x = KerasTensor([None, 8, 16])
scale = KerasTensor([None, 8, 16])
out = knn.rms_normalization(x, scale)
self.assertEqual(out.shape, x.shape)
def test_layer_normalization(self):
x = KerasTensor([None, 8, 16])
gamma = KerasTensor([None, 16])
beta = KerasTensor([None, 16])
out = knn.layer_normalization(x, gamma, beta)
self.assertEqual(out.shape, x.shape)
class NNOpsStaticShapeTest(testing.TestCase):
def test_relu(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.relu(x).shape, (1, 2, 3))
def test_relu6(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.relu6(x).shape, (1, 2, 3))
def test_sigmoid(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.sigmoid(x).shape, (1, 2, 3))
def test_sparse_sigmoid(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.sparse_sigmoid(x).shape, (1, 2, 3))
def test_softplus(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.softplus(x).shape, (1, 2, 3))
def test_softsign(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.softsign(x).shape, (1, 2, 3))
def test_silu(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.silu(x).shape, (1, 2, 3))
def test_log_sigmoid(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.log_sigmoid(x).shape, (1, 2, 3))
def test_leaky_relu(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.leaky_relu(x).shape, (1, 2, 3))
def test_hard_sigmoid(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.hard_sigmoid(x).shape, (1, 2, 3))
def test_hard_silu(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.hard_silu(x).shape, (1, 2, 3))
def test_elu(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.elu(x).shape, (1, 2, 3))
def test_selu(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.selu(x).shape, (1, 2, 3))
def test_gelu(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.gelu(x).shape, (1, 2, 3))
def test_celu(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.celu(x).shape, (1, 2, 3))
def test_glu(self):
x = KerasTensor([1, 2, 4])
self.assertEqual(knn.glu(x).shape, (1, 2, 2))
def test_tanh_shrink(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.tanh_shrink(x).shape, (1, 2, 3))
def test_hard_tanh(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.hard_tanh(x).shape, (1, 2, 3))
def test_hard_shrink(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.hard_shrink(x).shape, (1, 2, 3))
def test_threshold(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.threshold(x, 0, 0).shape, (1, 2, 3))
def test_squareplus(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.squareplus(x).shape, (1, 2, 3))
def test_soft_shrink(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.soft_shrink(x).shape, (1, 2, 3))
def test_sparse_plus(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.sparse_plus(x).shape, (1, 2, 3))
def test_softmax(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.softmax(x).shape, (1, 2, 3))
self.assertEqual(knn.softmax(x, axis=1).shape, (1, 2, 3))
self.assertEqual(knn.softmax(x, axis=-1).shape, (1, 2, 3))
def test_log_softmax(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.log_softmax(x).shape, (1, 2, 3))
self.assertEqual(knn.log_softmax(x, axis=1).shape, (1, 2, 3))
self.assertEqual(knn.log_softmax(x, axis=-1).shape, (1, 2, 3))
def test_sparsemax(self):
x = KerasTensor([1, 2, 3])
self.assertEqual(knn.sparsemax(x).shape, (1, 2, 3))
def test_max_pool(self):
data_format = backend.config.image_data_format()
if data_format == "channels_last":
input_shape = (1, 8, 3)
else:
input_shape = (1, 3, 8)
x = KerasTensor(input_shape)
self.assertEqual(
knn.max_pool(x, 2, 1).shape,
(1, 7, 3) if data_format == "channels_last" else (1, 3, 7),
)
self.assertEqual(
knn.max_pool(x, 2, 2, padding="same").shape,
(1, 4, 3) if data_format == "channels_last" else (1, 3, 4),
)
if data_format == "channels_last":
input_shape = (1, 8, 8, 3)
else:
input_shape = (1, 3, 8, 8)
x = KerasTensor(input_shape)
self.assertEqual(
knn.max_pool(x, 2, 1).shape,
(1, 7, 7, 3) if data_format == "channels_last" else (1, 3, 7, 7),
)
self.assertEqual(
knn.max_pool(x, 2, 2, padding="same").shape,
(1, 4, 4, 3) if data_format == "channels_last" else (1, 3, 4, 4),
)
self.assertEqual(
knn.max_pool(x, (2, 2), (2, 2), padding="same").shape,
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/symbolic_arguments.py | keras/src/ops/symbolic_arguments.py | from keras.src import tree
from keras.src.backend import KerasTensor
class SymbolicArguments:
def __init__(self, *args, **kwargs):
self.args = tree.map_structure(lambda x: x, args)
self.kwargs = tree.map_structure(lambda x: x, kwargs)
self._flat_arguments = tree.flatten((self.args, self.kwargs))
# Used to avoid expensive `tree` operations in the most common case.
if (
not self.kwargs
and len(self.args) == 1
and isinstance(self.args[0], KerasTensor)
):
self._single_positional_tensor = self.args[0]
else:
self._single_positional_tensor = None
self.keras_tensors = []
for arg in self._flat_arguments:
if isinstance(arg, KerasTensor):
self.keras_tensors.append(arg)
def convert(self, conversion_fn):
args = tree.map_structure(conversion_fn, self.args)
kwargs = tree.map_structure(conversion_fn, self.kwargs)
return args, kwargs
def fill_in(self, tensor_dict):
"""Maps KerasTensors to computed values using `tensor_dict`.
`tensor_dict` maps `KerasTensor` instances to their current values.
"""
if self._single_positional_tensor is not None:
# Performance optimization for most common case.
# Approx. 70x faster.
return (tensor_dict[id(self._single_positional_tensor)],), {}
def switch_fn(x):
if isinstance(x, KerasTensor):
return tensor_dict.get(id(x), None)
return x
return self.convert(switch_fn)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/core.py | keras/src/ops/core.py | import ml_dtypes
import numpy as np
from keras.src import backend
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.backend import any_symbolic_tensors
from keras.src.backend.common.backend_utils import slice_along_axis
from keras.src.ops.operation import Operation
from keras.src.saving import serialization_lib
from keras.src.utils import traceback_utils
class Map(Operation):
def call(self, f, xs):
return backend.core.map(f, xs)
def compute_output_spec(self, f, xs):
x = tree.map_structure(lambda t: t[0], xs)
n = tree.flatten(xs)[0].shape[0]
y = backend.compute_output_spec(f, x)
def append_batch_axis(t):
return KerasTensor(
shape=(n,) + t.shape,
dtype=t.dtype,
sparse=t.sparse,
ragged=t.ragged,
)
y = tree.map_structure(append_batch_axis, y)
return y
@keras_export("keras.ops.map")
def map(f, xs):
"""Map a function over leading array axes.
Like Python’s builtin map, except inputs and outputs are in the form of
stacked arrays. Consider using the `vectorized_map()` transform instead,
unless you need to apply a function element by element for reduced memory
usage or heterogeneous computation with other control flow primitives.
When `xs` is an array type, the semantics of `map()` are given by this
Python implementation:
```python
def map(f, xs):
return np.stack([f(x) for x in xs])
```
Args:
f: Callable defines the function to apply element-wise over the first
axis or axes of `xs`.
xs: Values over which to map along the leading axis.
Returns:
Mapped values.
Examples:
>>> f = lambda x: x**2
>>> xs = keras.ops.arange(10)
>>> ys = keras.ops.map(f, xs)
>>> ys
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> f = lambda x: {"y1": x**2, "y2": x * 10} # Can have nested outputs
>>> ys = keras.ops.map(f, xs)
>>> ys["y1"]
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> ys["y2"]
[0, 10, 20, 30, 40, 50, 60, 70, 80, 90]
"""
if any_symbolic_tensors((xs,)):
return Map().symbolic_call(f, xs)
return backend.core.map(f, xs)
class Scan(Operation):
def __init__(self, length=None, reverse=False, unroll=1, *, name=None):
super().__init__(name=name)
self.length = length
self.reverse = reverse
self.unroll = unroll
def call(self, f, init, xs=None):
return backend.core.scan(
f,
init,
xs,
length=self.length,
reverse=self.reverse,
unroll=self.unroll,
)
def compute_output_spec(self, f, init, xs=None):
if xs is None:
n = int(self.length)
x = None
else:
n = (
int(self.length)
if self.length is not None
else tree.flatten(xs)[0].shape[0]
)
x = xs[0]
carry, y = backend.compute_output_spec(f, init, x)
y = KerasTensor(shape=(n,) + y.shape, dtype=y.dtype, sparse=y.sparse)
return carry, y
@keras_export("keras.ops.scan")
def scan(f, init, xs=None, length=None, reverse=False, unroll=1):
"""Scan a function over leading array axes while carrying along state.
When the type of `xs` is an array type or `None`, and the type of `ys` is an
array type, the semantics of `scan()` are given roughly by this Python
implementation:
```python
def scan(f, init, xs, length=None):
if xs is None:
xs = [None] * length
carry = init
ys = []
for x in xs:
carry, y = f(carry, x)
ys.append(y)
return carry, np.stack(ys)
```
The loop-carried value `carry` (`init`) must hold a fixed shape and dtype
across all iterations.
In TensorFlow, `y` must match `carry` in shape and dtype. This is not
required in other backends.
Args:
f: Callable defines the logic for each loop iteration. This accepts two
arguments where the first is a value of the loop carry and the
second is a slice of `xs` along its leading axis.
This callable returns a pair where the first represents a new value
for the loop carry and the second represents a slice of the output.
init: The initial loop carry value. This can be a scalar, tensor, or any
nested structure. It must match the structure of the first element
returned by `f`.
xs: Optional value to scan along its leading axis. This can be a tensor
or any nested structure. If `xs` is not provided, you must specify
`length` to define the number of loop iterations.
Defaults to `None`.
length: Optional integer specifying the number of loop iterations.
If `length` is not provided, it defaults to the sizes of leading
axis of the arrays in `xs`. Defaults to `None`.
reverse: Optional boolean specifying whether to run the scan iteration
forward or in reverse, equivalent to reversing the leading axes of
the arrays in both `xs` and in `ys`.
unroll: Optional positive integer or boolean specifying how many scan
iterations to unroll within a single iteration of a loop. If an
integer is provided, it determines how many unrolled loop iterations
to run within a single rolled iteration of the loop. If a boolean is
provided, it will determine if the loop is completely unrolled
(`unroll=True`) or left completely unrolled (`unroll=False`).
Note that unrolling is only supported by JAX and TensorFlow
backends.
Returns:
A pair where the first element represents the final loop carry value and
the second element represents the stacked outputs of `f` when scanned
over the leading axis of the inputs.
Examples:
>>> sum_fn = lambda c, x: (c + x, c + x)
>>> init = keras.ops.array(0)
>>> xs = keras.ops.array([1, 2, 3, 4, 5])
>>> carry, result = keras.ops.scan(sum_fn, init, xs)
>>> carry
15
>>> result
[1, 3, 6, 10, 15]
"""
if any_symbolic_tensors((init, xs)):
return Scan(
length=length, reverse=reverse, unroll=unroll
).symbolic_call(f, init, xs)
return backend.core.scan(
f, init, xs, length, reverse=reverse, unroll=unroll
)
class AssociativeScan(Operation):
def __init__(self, reverse=False, axis=0, *, name=None):
super().__init__(name=name)
self.reverse = reverse
self.axis = axis
def call(self, f, elems):
return backend.core.associative_scan(
f, elems, reverse=self.reverse, axis=self.axis
)
def compute_output_spec(self, f, elems):
elems_flat = tree.flatten(elems)
lens = [elem.shape[self.axis] for elem in elems_flat]
if len(set(lens)) != 1:
raise ValueError(
"Array inputs to associative_scan must have the same "
"first dimension. (saw: {})".format(
[elem.shape for elem in elems_flat]
)
)
x = tree.pack_sequence_as(
elems,
[slice_along_axis(x, 0, 1, axis=self.axis) for x in elems_flat],
)
y_spec = backend.compute_output_spec(f, x, x)
def _restore_shape(x):
return KerasTensor(
shape=elems_flat[0].shape, dtype=x.dtype, sparse=x.sparse
)
y_spec = tree.map_structure(_restore_shape, y_spec)
return y_spec
@keras_export("keras.ops.associative_scan")
def associative_scan(f, elems, reverse=False, axis=0):
"""Performs a scan with an associative binary operation, in parallel.
This operation his similar to `scan`, with the key difference that
`associative_scan` is a parallel implementation with
potentially significant performance benefits, especially when jit compiled.
The catch is that it can only be used when `f` is a binary associative
operation (i.e. it must verify `f(a, f(b, c)) == f(f(a, b), c)`).
For an introduction to associative scans, refer to this paper:
Blelloch, Guy E. 1990.
[Prefix Sums and Their Applications](
https://www.cs.cmu.edu/~guyb/papers/Ble93.pdf).
Args:
f: A Python callable implementing an associative binary operation with
signature `r = f(a, b)`. Function `f` must be associative, i.e.,
it must satisfy the equation
`f(a, f(b, c)) == f(f(a, b), c)`.
The inputs and result are (possibly nested Python tree structures
of) array(s) matching `elems`. Each array has a dimension in place
of the `axis` dimension. `f` should be applied elementwise over
the `axis` dimension.
The result `r` has the same shape (and structure) as the
two inputs `a` and `b`.
elems: A (possibly nested Python tree structure of) array(s), each with
an `axis` dimension of size `num_elems`.
reverse: A boolean stating if the scan should be reversed with respect
to the `axis` dimension.
axis: an integer identifying the axis over which the scan should occur.
Returns:
A (possibly nested Python tree structure of) array(s) of the same shape
and structure as `elems`, in which the `k`'th element of `axis` is
the result of recursively applying `f` to combine the first `k`
elements of `elems` along `axis`. For example, given
`elems = [a, b, c, ...]`, the result would be
`[a, f(a, b), f(f(a, b), c), ...]`.
Examples:
>>> sum_fn = lambda x, y: x + y
>>> xs = keras.ops.arange(5)
>>> ys = keras.ops.associative_scan(sum_fn, xs, axis=0)
>>> ys
[0, 1, 3, 6, 10]
>>> sum_fn = lambda x, y: [x[0] + y[0], x[1] + y[1], x[2] + y[2]]
>>> xs = [keras.ops.array([[1, 2]]) for _ in range(3)]
>>> ys = keras.ops.associative_scan(sum_fn, xs, axis=0)
>>> ys
[[1, 3], [1, 3], [1, 3]]
"""
if any_symbolic_tensors((elems,)):
return AssociativeScan(reverse=reverse, axis=axis).symbolic_call(
f, elems
)
return backend.core.associative_scan(f, elems, reverse=reverse, axis=axis)
class Scatter(Operation):
def __init__(self, shape, *, name=None):
super().__init__(name=name)
self.shape = shape
def call(self, indices, values):
return backend.core.scatter(indices, values, self.shape)
def compute_output_spec(self, indices, values):
return KerasTensor(self.shape, dtype=values.dtype)
@keras_export("keras.ops.scatter")
def scatter(indices, values, shape):
"""Returns a tensor of shape `shape` where `indices` are set to `values`.
At a high level, this operation does `zeros[indices] = updates` and
returns the output. It is equivalent to:
```python
zeros = keras.ops.zeros(shape)
output = keras.ops.scatter_update(zeros, indices, values)
```
Args:
indices: A tensor or list/tuple specifying
indices for the values in `values`.
values: A tensor, the values to be set at `indices`.
shape: Shape of the output tensor.
Example:
>>> indices = [[0, 1], [1, 1]]
>>> values = np.array([1., 1.])
>>> keras.ops.scatter(indices, values, shape=(2, 2))
array([[0., 1.],
[0., 1.]])
"""
if any_symbolic_tensors((indices, values)):
return Scatter(shape=shape).symbolic_call(indices, values)
return backend.core.scatter(indices, values, shape)
class ScatterUpdate(Operation):
def call(self, inputs, indices, updates):
return backend.core.scatter_update(inputs, indices, updates)
def compute_output_spec(self, inputs, indices, updates):
return KerasTensor(inputs.shape, dtype=inputs.dtype)
@keras_export("keras.ops.scatter_update")
def scatter_update(inputs, indices, updates):
"""Update inputs via updates at scattered (sparse) indices.
At a high level, this operation does `inputs[indices] = updates`.
Assume `inputs` is a tensor of shape `(D0, D1, ..., Dn)`, there are 2 main
usages of `scatter_update`.
1. `indices` is a 2D tensor of shape `(num_updates, n)`, where `num_updates`
is the number of updates to perform, and `updates` is a 1D tensor of
shape `(num_updates,)`. For example, if `inputs` is `zeros((4, 4, 4))`,
and we want to update `inputs[1, 2, 3]` and `inputs[0, 1, 3]` as 1, then
we can use:
```python
inputs = np.zeros((4, 4, 4))
indices = [[1, 2, 3], [0, 1, 3]]
updates = np.array([1., 1.])
inputs = keras.ops.scatter_update(inputs, indices, updates)
```
2 `indices` is a 2D tensor of shape `(num_updates, k)`, where `num_updates`
is the number of updates to perform, and `k` (`k < n`) is the size of
each index in `indices`. `updates` is a `n - k`-D tensor of shape
`(num_updates, inputs.shape[k:])`. For example, if
`inputs = np.zeros((4, 4, 4))`, and we want to update `inputs[1, 2, :]`
and `inputs[2, 3, :]` as `[1, 1, 1, 1]`, then `indices` would have shape
`(num_updates, 2)` (`k = 2`), and `updates` would have shape
`(num_updates, 4)` (`inputs.shape[2:] = 4`). See the code below:
```python
inputs = np.zeros((4, 4, 4))
indices = [[1, 2], [2, 3]]
updates = np.array([[1., 1., 1, 1,], [1., 1., 1, 1,])
inputs = keras.ops.scatter_update(inputs, indices, updates)
```
Args:
inputs: A tensor, the tensor to be updated.
indices: A tensor or list/tuple of shape `(N, inputs.ndim)`, specifying
indices to update. `N` is the number of indices to update, must be
equal to the first dimension of `updates`.
updates: A tensor, the new values to be put to `inputs` at `indices`.
Returns:
A tensor, has the same shape and dtype as `inputs`.
"""
if any_symbolic_tensors((inputs, indices, updates)):
return ScatterUpdate().symbolic_call(inputs, indices, updates)
return backend.core.scatter_update(inputs, indices, updates)
class Slice(Operation):
def __init__(self, shape, *, name=None):
super().__init__(name=name)
self.shape = shape
def call(self, inputs, start_indices):
return backend.core.slice(inputs, start_indices, self.shape)
def compute_output_spec(self, inputs, start_indices):
if any(s == -1 for s in self.shape) and isinstance(
start_indices, KerasTensor
):
raise ValueError(
"When using -1 in `shape`, `start_indices` should not be a "
"KerasTensor. "
)
# If self.shape[i] is -1, all remaining elements in dimension i are
# included in the slice.
final_shape = tuple(
inputs.shape[i] - start_indices[i] if s == -1 else s
for i, s in enumerate(self.shape)
)
return KerasTensor(final_shape, dtype=inputs.dtype)
@keras_export("keras.ops.slice")
def slice(inputs, start_indices, shape):
"""Return a slice of an input tensor.
At a high level, this operation is an explicit replacement for array slicing
e.g. `inputs[start_indices: start_indices + shape]`.
Unlike slicing via brackets, this operation will accept tensor start
indices on all backends, which is useful when indices dynamically computed
via other tensor operations.
```python
inputs = np.zeros((5, 5))
start_indices = np.array([3, 3])
shape = np.array([2, 2])
inputs = keras.ops.slice(inputs, start_indices, shape)
```
Args:
inputs: A tensor, the tensor to be updated.
start_indices: A list/tuple of shape `(inputs.ndim,)`, specifying
the starting indices for updating.
shape: The full shape of the returned slice.
Returns:
A tensor, has the same shape and dtype as `inputs`.
"""
if any_symbolic_tensors((inputs, start_indices)):
return Slice(shape=shape).symbolic_call(inputs, start_indices)
return backend.core.slice(inputs, start_indices, shape)
class SliceUpdate(Operation):
def call(self, inputs, start_indices, updates):
return backend.core.slice_update(inputs, start_indices, updates)
def compute_output_spec(self, inputs, start_indices, updates):
return KerasTensor(inputs.shape, dtype=inputs.dtype)
@keras_export("keras.ops.slice_update")
def slice_update(inputs, start_indices, updates):
"""Update an input by slicing in a tensor of updated values.
At a high level, this operation does
`inputs[start_indices: start_indices + updates.shape] = updates`.
Assume inputs is a tensor of shape `(D0, D1, ..., Dn)`,
`start_indices` must be a list/tuple of n integers, specifying the starting
indices. `updates` must have the same rank as `inputs`, and the size of each
dim must not exceed `Di - start_indices[i]`. For example, if we have 2D
inputs `inputs = np.zeros((5, 5))`, and we want to update the intersection
of last 2 rows and last 2 columns as 1, i.e.,
`inputs[3:, 3:] = np.ones((2, 2))`, then we can use the code below:
```python
inputs = np.zeros((5, 5))
start_indices = [3, 3]
updates = np.ones((2, 2))
inputs = keras.ops.slice_update(inputs, start_indices, updates)
```
Args:
inputs: A tensor, the tensor to be updated.
start_indices: A list/tuple of shape `(inputs.ndim,)`, specifying
the starting indices for updating.
updates: A tensor, the new values to be put to `inputs` at `indices`.
`updates` must have the same rank as `inputs`.
Returns:
A tensor, has the same shape and dtype as `inputs`.
"""
if any_symbolic_tensors((inputs, start_indices, updates)):
return SliceUpdate().symbolic_call(inputs, start_indices, updates)
return backend.core.slice_update(inputs, start_indices, updates)
class Switch(Operation):
def call(self, index, branches, *operands):
return backend.core.switch(index, branches, *operands)
def compute_output_spec(self, index, branches, *operands):
# We use first branch for output_spec
spec = backend.compute_output_spec(branches[0], *operands)
return spec
@keras_export("keras.ops.switch")
def switch(index, branches, *operands):
"""Apply exactly one of the `branches` given by `index`.
If `index` is out of bounds, it is clamped to within bounds.
The semantics of `switch` are given roughly by this Python implementation:
```python
def switch(index, branches, *operands):
index = clamp(0, index, len(branches) - 1)
return branches[index](*operands)
```
Args:
index: An integer scalar indicating which branch function to apply.
branches: A sequence of functions to be applied based on `index`.
operands: Inputs to whichever branch is applied.
Returns:
The outputs of `branch(*operands)` for the branch that was selected
based on `index`.
Examples:
>>> add_fn = lambda x, y: x + y
>>> subtract_fn = lambda x, y: x - y
>>> x = keras.ops.array(2.0)
>>> y = keras.ops.array(0.5)
>>> branches = [add_fn, subtract_fn]
>>> keras.ops.switch(0, branches, x, y)
2.5
>>> keras.ops.switch(1, branches, x, y)
1.5
"""
if any_symbolic_tensors(operands):
return Switch().symbolic_call(index, branches, *operands)
return backend.core.switch(index, branches, *operands)
class WhileLoop(Operation):
def __init__(self, cond, body, maximum_iterations=None, *, name=None):
super().__init__(name=name)
self.cond = cond
self.body = body
self.maximum_iterations = maximum_iterations
def call(self, loop_vars):
return backend.core.while_loop(
self.cond,
self.body,
loop_vars,
maximum_iterations=self.maximum_iterations,
)
def compute_output_spec(self, loop_vars):
return tree.map_structure(
lambda v: KerasTensor(v.shape, dtype=v.dtype), loop_vars
)
@keras_export("keras.ops.while_loop")
def while_loop(
cond,
body,
loop_vars,
maximum_iterations=None,
):
"""While loop implementation.
Args:
cond: A callable that represents the termination condition of the loop.
Must accept a `loop_vars` like structure as an argument. If
`loop_vars` is a tuple or list, each element of `loop_vars` will be
passed positionally to the callable.
body: A callable that represents the loop body. Must accept a
`loop_vars` like structure as an argument, and return update value
with the same structure. If `loop_vars` is a tuple or list, each
element of `loop_vars` will be passed positionally to the callable.
loop_vars: An arbitrary nested structure of tensor state to persist
across loop iterations.
maximum_iterations: Optional maximum number of iterations of the while
loop to run. If provided, the `cond` output is AND-ed with an
additional condition ensuring the number of iterations executed is
no greater than `maximum_iterations`.
Returns:
A list/tuple of tensors, has the same shape and dtype as `inputs`.
Examples:
>>> i = 0
>>> cond = lambda i: i < 10
>>> body = lambda i: i + 1
>>> keras.ops.while_loop(cond, body, i)
10
>>> x, y = 0, 1
>>> cond = lambda x, y: x < 10
>>> body = lambda x, y: (x + 1, y + 1)
>>> keras.ops.while_loop(cond, body, (x, y))
10, 11
"""
if any_symbolic_tensors((loop_vars,)):
return WhileLoop(
cond, body, maximum_iterations=maximum_iterations
).symbolic_call(loop_vars)
return backend.core.while_loop(
cond,
body,
loop_vars,
maximum_iterations=maximum_iterations,
)
class StopGradient(Operation):
def call(self, variable):
return backend.core.stop_gradient(variable)
def compute_output_spec(self, variable):
return KerasTensor(variable.shape, dtype=variable.dtype)
@keras_export("keras.ops.stop_gradient")
def stop_gradient(variable):
"""Stops gradient computation.
Args:
variable: A tensor variable for which the gradient
computation is to be disabled.
Returns:
The variable with gradient computation disabled.
Examples:
>>> var = keras.backend.convert_to_tensor(
... [1., 2., 3.],
... dtype="float32"
... )
>>> var = keras.ops.stop_gradient(var)
"""
if any_symbolic_tensors((variable,)):
return StopGradient().symbolic_call(variable)
return backend.core.stop_gradient(variable)
class ForiLoop(Operation):
def __init__(self, lower, upper, body_fun, *, name=None):
super().__init__(name=name)
self.lower = lower
self.upper = upper
self.body_fun = body_fun
def call(self, init_val):
return backend.core.fori_loop(
self.lower,
self.upper,
self.body_fun,
init_val,
)
def compute_output_spec(self, init_val):
return KerasTensor(init_val.shape, dtype=init_val.dtype)
@keras_export("keras.ops.fori_loop")
def fori_loop(lower, upper, body_fun, init_val):
"""For loop implementation.
Args:
lower: The initial value of the loop variable.
upper: The upper bound of the loop variable.
body_fun: A callable that represents the loop body. Must take two
arguments: the loop variable and the loop state. The loop state
should be updated and returned by this function.
init_val: The initial value of the loop state.
Returns:
The final state after the loop.
Example:
>>> lower = 0
>>> upper = 10
>>> body_fun = lambda i, s: (i + 1, s + i)
>>> init_val = 0
>>> keras.ops.fori_loop(lower, upper, body_fun, init_val)
45
"""
if any_symbolic_tensors((lower, upper, init_val)):
return ForiLoop(lower, upper, body_fun).symbolic_call(init_val)
return backend.core.fori_loop(lower, upper, body_fun, init_val)
class Unstack(Operation):
def __init__(self, num=None, axis=0, *, name=None):
super().__init__(name=name)
self.num = num
self.axis = axis
def call(self, x):
return backend.core.unstack(x, self.num, self.axis)
def compute_output_spec(self, x):
axis = self.axis
if axis < 0:
axis = len(x.shape) + axis
output_shapes = x.shape[:axis] + x.shape[axis + 1 :]
num = self.num
if num is None:
num = x.shape[axis]
if num is None:
raise ValueError(
"Cannot infer argument `num` from shape "
f"{x.shape}. Either provide a tensor with a "
"concrete shape in the `axis` dimension or "
"explicitly pass the `num` argument."
)
output = [
KerasTensor(shape=output_shapes, dtype=x.dtype) for _ in range(num)
]
return output
@keras_export("keras.ops.unstack")
def unstack(x, num=None, axis=0):
"""Unpacks the given dimension of a rank-R tensor into rank-(R-1) tensors.
Args:
x: The input tensor.
num: The length of the dimension axis. Automatically inferred
if `None`.
axis: The axis along which to unpack.
Returns:
A list of tensors unpacked along the given axis.
Example:
>>> x = keras.ops.array([[1, 2], [3, 4]])
>>> keras.ops.unstack(x, axis=0)
[array([1, 2]), array([3, 4])]
"""
if any_symbolic_tensors((x,)):
return Unstack(num, axis).symbolic_call(x)
return backend.core.unstack(x, num=num, axis=axis)
@keras_export("keras.ops.shape")
def shape(x):
"""Gets the shape of the tensor input.
Note: On the TensorFlow backend, when `x` is a `tf.Tensor` with dynamic
shape, dimensions which are dynamic in the context of a compiled function
will have a `tf.Tensor` value instead of a static integer value.
Args:
x: A tensor. This function will try to access the `shape` attribute of
the input tensor.
Returns:
A tuple of integers or None values, indicating the shape of the input
tensor.
Example:
>>> x = keras.ops.zeros((8, 12))
>>> keras.ops.shape(x)
(8, 12)
"""
if any_symbolic_tensors((x,)):
return x.shape
return backend.core.shape(x)
@keras_export("keras.ops.dtype")
def dtype(x):
"""Return the dtype of the tensor input as a standardized string.
Note that due to the standardization, the dtype will not compare equal
to the backend-specific version of the dtype.
Args:
x: A tensor. This function will try to access the `dtype` attribute of
the input tensor.
Returns:
A string indicating the dtype of the input tensor, e.g. `"float32"`.
Example:
>>> x = keras.ops.zeros((8, 12))
>>> keras.ops.dtype(x)
'float32'
"""
return backend.standardize_dtype(x.dtype)
class Cast(Operation):
def __init__(self, dtype, *, name=None):
super().__init__(name=name)
self.dtype = backend.standardize_dtype(dtype)
def call(self, x):
return backend.core.cast(x, self.dtype)
def compute_output_spec(self, x):
return backend.KerasTensor(shape=x.shape, dtype=self.dtype)
@keras_export("keras.ops.cast")
def cast(x, dtype):
"""Cast a tensor to the desired dtype.
Args:
x: A tensor or variable.
dtype: The target type.
Returns:
A tensor of the specified `dtype`.
Example:
>>> x = keras.ops.arange(4)
>>> x = keras.ops.cast(x, dtype="float16")
"""
if any_symbolic_tensors((x,)):
return Cast(dtype=dtype)(x)
return backend.core.cast(x, dtype)
class SaturateCast(Operation):
def __init__(self, dtype, *, name=None):
super().__init__(name=name)
self.dtype = backend.standardize_dtype(dtype)
def call(self, x):
return _saturate_cast(x, self.dtype)
def compute_output_spec(self, x):
return backend.KerasTensor(shape=x.shape, dtype=self.dtype)
@keras_export("keras.ops.saturate_cast")
def saturate_cast(x, dtype):
"""Performs a safe saturating cast to the desired dtype.
Saturating cast prevents data type overflow when casting to `dtype` with
smaller values range. E.g.
`ops.cast(ops.cast([-1, 256], "float32"), "uint8")` returns `[255, 0]`,
but `ops.saturate_cast(ops.cast([-1, 256], "float32"), "uint8")` returns
`[0, 255]`.
Args:
x: A tensor or variable.
dtype: The target type.
Returns:
A safely casted tensor of the specified `dtype`.
Example:
Image resizing with bicubic interpolation may produce values outside
original range.
>>> image2x2 = np.array([0, 1, 254, 255], dtype="uint8").reshape(1, 2, 2, 1)
>>> image4x4 = tf.image.resize(image2x2, (4, 4), method="bicubic")
>>> print(image4x4.numpy().squeeze())
>>> # [[-22.500004 -22.204624 -21.618908 -21.32353 ]
>>> # [ 52.526054 52.82143 53.407146 53.70253 ]
>>> # [201.29752 201.59288 202.17859 202.47395 ]
>>> # [276.32355 276.61893 277.20465 277.50006 ]]
Casting this resized image back to `uint8` will cause overflow.
>>> image4x4_casted = ops.cast(image4x4, "uint8")
>>> print(image4x4_casted.numpy().squeeze())
>>> # [[234 234 235 235]
>>> # [ 52 52 53 53]
>>> # [201 201 202 202]
>>> # [ 20 20 21 21]]
Saturate casting to `uint8` will clip values to `uint8` range before
casting and will not cause overflow.
>>> image4x4_saturate_casted = ops.saturate_cast(image4x4, "uint8")
>>> print(image4x4_saturate_casted.numpy().squeeze())
>>> # [[ 0 0 0 0]
>>> # [ 52 52 53 53]
>>> # [201 201 202 202]
>>> # [255 255 255 255]]
"""
if any_symbolic_tensors((x,)):
return SaturateCast(dtype=dtype)(x)
return _saturate_cast(x, dtype)
def _saturate_cast(x, dtype, backend_module=None):
backend_module = backend_module or backend
def get_dtype_min_max(dtype):
if "bool" == dtype:
dtype_min = 0
dtype_max = 1
elif "int" in dtype:
dtype_min = ml_dtypes.iinfo(dtype).min
dtype_max = ml_dtypes.iinfo(dtype).max
else:
dtype_min = ml_dtypes.finfo(dtype).min
dtype_max = ml_dtypes.finfo(dtype).max
return dtype_min, dtype_max
dtype = backend.standardize_dtype(dtype)
in_dtype = backend.standardize_dtype(x.dtype)
in_min, in_max = get_dtype_min_max(in_dtype)
out_min, out_max = get_dtype_min_max(dtype)
# The output min/max may not actually be representable in the
# in_dtype (e.g. casting float32 to uint32). This can lead to undefined
# behavior when trying to cast a value outside the valid range of the
# target type. We work around this by nudging the min/max to fall within
# the valid output range. The catch is that we may actually saturate
# to a value less than the true saturation limit, but this is the best we
# can do in order to avoid UB without backend op.
min_limit = np.maximum(in_min, out_min).astype(in_dtype)
if min_limit < out_min:
min_limit = np.nextafter(min_limit, 0, dtype=in_dtype)
max_limit = np.minimum(in_max, out_max).astype(in_dtype)
if max_limit > out_max:
max_limit = np.nextafter(max_limit, 0, dtype=in_dtype)
# Unconditionally apply `clip` to fix `inf` behavior.
x = backend_module.numpy.clip(x, min_limit, max_limit)
return backend_module.cast(x, dtype)
class ConvertToTensor(Operation):
def __init__(self, dtype=None, sparse=None, ragged=None, *, name=None):
super().__init__(name=name)
self.dtype = None if dtype is None else backend.standardize_dtype(dtype)
self.sparse = sparse
self.ragged = ragged
def call(self, x):
return backend.core.convert_to_tensor(
x, dtype=self.dtype, sparse=self.sparse, ragged=self.ragged
)
def compute_output_spec(self, x):
dtype = (
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/function_test.py | keras/src/ops/function_test.py | import json
import numpy as np
from keras.src import testing
from keras.src.backend.common import keras_tensor
from keras.src.layers import Dense
from keras.src.layers import Input
from keras.src.models import Model
from keras.src.models import Sequential
from keras.src.ops import function
from keras.src.ops import numpy as knp
class FunctionTest(testing.TestCase):
def test_define_and_call(self):
x1 = keras_tensor.KerasTensor((2, 3))
x2 = keras_tensor.KerasTensor((2, 3))
x = knp.add(x1, x2)
y1 = x * 3
y2 = x**2
fn = function.Function(
inputs=[x1, x2], outputs=[y1, y2], name="test_function"
)
self.assertEqual(fn.name, "test_function")
# Eager call
y_val = fn([np.ones((2, 3)), np.ones((2, 3))])
self.assertIsInstance(y_val, list)
self.assertAllClose(y_val[0], np.ones((2, 3)) * 6)
self.assertAllClose(y_val[1], np.ones((2, 3)) * 4)
# Symbolic call
x1_alt = keras_tensor.KerasTensor((2, 3))
x2_alt = keras_tensor.KerasTensor((2, 3))
y_val = fn([x1_alt, x2_alt])
self.assertIsInstance(y_val[0], keras_tensor.KerasTensor)
self.assertEqual(y_val[0].shape, (2, 3))
self.assertIsInstance(y_val[1], keras_tensor.KerasTensor)
self.assertEqual(y_val[1].shape, (2, 3))
# Recursion
fn = function.Function(inputs=[x1_alt, x2_alt], outputs=y_val)
y_val = fn([np.ones((2, 3)), np.ones((2, 3))])
self.assertIsInstance(y_val, list)
self.assertAllClose(y_val[0], np.ones((2, 3)) * 6)
self.assertAllClose(y_val[1], np.ones((2, 3)) * 4)
def test_dynamic_shape_inference(self):
x = keras_tensor.KerasTensor((None, 3))
y = x**2
fn = function.Function(x, y)
# Test with compute_output_spec
out = fn.compute_output_spec(keras_tensor.KerasTensor((4, 3)))
self.assertIsInstance(out, keras_tensor.KerasTensor)
self.assertEqual(out.shape, (4, 3))
# Test with compute_output_shape
out = fn.compute_output_shape((None, 3))
self.assertIsInstance(out, tuple)
self.assertEqual(out, (None, 3))
# Test with call
out = fn(keras_tensor.KerasTensor((4, 3)))
self.assertIsInstance(out, keras_tensor.KerasTensor)
self.assertEqual(out.shape, (4, 3))
def test_dict_io(self):
x1 = keras_tensor.KerasTensor((2, 3))
x2 = keras_tensor.KerasTensor((2, 3))
x = knp.add(x1, x2)
y1 = x * 3
y2 = x**2
fn = function.Function(
inputs={"x1": x1, "x2": x2}, outputs={"y1": y1, "y2": y2}
)
# Eager call
y_val = fn({"x1": np.ones((2, 3)), "x2": np.ones((2, 3))})
self.assertIsInstance(y_val, dict)
self.assertAllClose(y_val["y1"], np.ones((2, 3)) * 6)
self.assertAllClose(y_val["y2"], np.ones((2, 3)) * 4)
# Symbolic call
x1_alt = keras_tensor.KerasTensor((2, 3))
x2_alt = keras_tensor.KerasTensor((2, 3))
y_val = fn({"x1": x1_alt, "x2": x2_alt})
self.assertIsInstance(y_val["y1"], keras_tensor.KerasTensor)
self.assertEqual(y_val["y1"].shape, (2, 3))
self.assertIsInstance(y_val["y2"], keras_tensor.KerasTensor)
self.assertEqual(y_val["y2"].shape, (2, 3))
def test_invalid_inputs_error(self):
x1 = keras_tensor.KerasTensor((2, 3))
x2 = keras_tensor.KerasTensor((2, 3))
x = knp.add(x1, x2)
y1 = x * 3
y2 = x**2
fn = function.Function(
inputs=[x1, x2], outputs=[y1, y2], name="test_function"
)
self.assertEqual(fn.name, "test_function")
# Bad structure
with self.assertRaisesRegex(ValueError, "invalid input structure"):
_ = fn(np.ones((2, 3)))
# Bad rank
with self.assertRaisesRegex(ValueError, "incompatible inputs"):
_ = fn([np.ones((2, 3, 3)), np.ones((2, 3))])
# Bad shape
with self.assertRaisesRegex(ValueError, "incompatible inputs"):
_ = fn([np.ones((4, 3)), np.ones((2, 3))])
def test_graph_disconnected_error(self):
# TODO
pass
def test_serialization(self):
inputs = Input(shape=(10,))
outputs = Dense(1)(inputs)
model = Model(inputs=inputs, outputs=outputs)
config = model.get_config()
new_model = Model.from_config(config)
self.assertEqual(
json.dumps(model.get_config()), json.dumps(new_model.get_config())
)
def test_function_with_empty_outputs(self):
x = keras_tensor.KerasTensor((None, 3))
with self.assertRaisesRegex(
ValueError, "`outputs` argument cannot be empty"
):
_ = function.Function(inputs=x, outputs=[])
def test_function_with_empty_inputs(self):
x = keras_tensor.KerasTensor((None, 3))
with self.assertRaisesRegex(
ValueError, "`inputs` argument cannot be empty"
):
_ = function.Function(inputs=[], outputs=x)
def test_function_with_unconnected_inputs(self):
model_1 = Sequential(
[
Input(shape=(6,)),
Dense(3, activation="sigmoid"),
]
)
model_2 = Sequential(
[
Input(shape=(3,)),
Dense(2, activation="sigmoid"),
],
)
with self.assertRaisesRegex(
ValueError, "`inputs` not connected to `outputs`"
):
_ = Model(Input(shape=(6,)), model_2(model_1(Input(shape=(6,)))))
with self.assertRaisesRegex(
ValueError, "`inputs` not connected to `outputs`"
):
_ = Model(model_1(Input(shape=(6,))), model_2(Input(shape=(3,))))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/operation_utils_test.py | keras/src/ops/operation_utils_test.py | from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.layers.core import input_layer
from keras.src.ops import operation_utils
class OperationUtilsTest(testing.TestCase):
def test_get_source_inputs(self):
x1 = backend.KerasTensor(shape=(2,))
x2 = backend.KerasTensor(shape=(2,))
x = x1 + x2
x += 2
x = ops.square(x)
self.assertEqual(operation_utils.get_source_inputs(x), [x1, x2])
def test_get_source_inputs_return_input_tensor(self):
inputs = input_layer.Input(shape=(10,))
self.assertIs(operation_utils.get_source_inputs(inputs)[0], inputs)
def test_compute_expand_dims_output_shape(self):
input_shape = (2, 3, 4)
axis = -1
output_shape = operation_utils.compute_expand_dims_output_shape(
input_shape, axis
)
expected_output_shape = (2, 3, 4, 1)
self.assertEqual(output_shape, expected_output_shape)
input_shape = (2, 3, 4)
axis = (1, -1)
output_shape = operation_utils.compute_expand_dims_output_shape(
input_shape, axis
)
expected_output_shape = (2, 1, 3, 4, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_compute_pooling_output_shape(self):
input_shape = (1, 4, 4, 1)
pool_size = (2, 2)
strides = (2, 2)
output_shape = operation_utils.compute_pooling_output_shape(
input_shape, pool_size, strides
)
expected_output_shape = (1, 2, 2, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_compute_pooling_output_shape_with_none(self):
input_shape = (None, 4, 4, 1)
pool_size = (2, 2)
strides = (2, 2)
output_shape = operation_utils.compute_pooling_output_shape(
input_shape, pool_size, strides
)
expected_output_shape = (None, 2, 2, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_compute_pooling_output_shape_valid_padding(self):
input_shape = (1, 4, 4, 1)
pool_size = (2, 2)
strides = (2, 2)
output_shape = operation_utils.compute_pooling_output_shape(
input_shape, pool_size, strides, padding="valid"
)
self.assertEqual(output_shape, (1, 2, 2, 1))
def test_compute_pooling_output_shape_channels_last(self):
input_shape = (1, 4, 4, 3)
pool_size = (2, 2)
strides = (2, 2)
output_shape = operation_utils.compute_pooling_output_shape(
input_shape,
pool_size,
strides,
padding="valid",
data_format="channels_last",
)
self.assertEqual(output_shape, (1, 2, 2, 3))
def test_compute_pooling_output_shape_same_padding_stride1(self):
input_shape = (1, 4, 4, 3)
pool_size = (2, 2)
strides = (1, 1)
output_shape = operation_utils.compute_pooling_output_shape(
input_shape,
pool_size,
strides,
padding="same",
data_format="channels_last",
)
self.assertEqual(output_shape, (1, 4, 4, 3))
def test_compute_conv_output_shape(self):
input_shape = (1, 4, 4, 1)
filters = 1
kernel_size = (3, 3)
strides = (1, 1)
output_shape = operation_utils.compute_conv_output_shape(
input_shape, filters, kernel_size, strides
)
expected_output_shape = (1, 2, 2, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_compute_conv_output_shape_with_none(self):
input_shape = (None, 4, 4, 1)
kernel_size = (3, 3)
filters = 1
strides = (1, 1)
output_shape = operation_utils.compute_conv_output_shape(
input_shape, filters, kernel_size, strides
)
expected_output_shape = (None, 2, 2, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_compute_conv_output_shape_valid_padding(self):
input_shape = (1, 4, 4, 1)
kernel_size = (3, 3)
filters = 1
strides = (2, 2)
output_shape = operation_utils.compute_conv_output_shape(
input_shape, filters, kernel_size, strides, padding="valid"
)
self.assertEqual(output_shape, (1, 1, 1, 1))
def test_compute_conv_output_shape_channels_last(self):
input_shape = (1, 4, 4, 3)
kernel_size = (3, 3)
filters = 3
strides = (2, 2)
output_shape = operation_utils.compute_conv_output_shape(
input_shape,
filters,
kernel_size,
strides,
padding="valid",
data_format="channels_last",
)
self.assertEqual(output_shape, (1, 1, 1, 3))
def test_compute_conv_output_shape_same_padding_stride1(self):
input_shape = (1, 4, 4, 3)
kernel_size = (3, 3)
filters = 3
strides = (1, 1)
output_shape = operation_utils.compute_conv_output_shape(
input_shape,
filters,
kernel_size,
strides,
padding="same",
data_format="channels_last",
)
self.assertEqual(output_shape, (1, 4, 4, 3))
def test_compute_reshape_output_shape(self):
input_shape = (1, 4, 4, 1)
target_shape = (16, 1)
output_shape = operation_utils.compute_reshape_output_shape(
input_shape, newshape=target_shape, newshape_arg_name="New shape"
)
self.assertEqual(output_shape, target_shape)
def test_reduce_shape_no_axes_no_keepdims(self):
input_shape = (1, 4, 4, 1)
output_shape = operation_utils.reduce_shape(input_shape)
expected_output_shape = ()
self.assertEqual(output_shape, expected_output_shape)
def test_reduce_shape_no_axes_with_keepdims(self):
input_shape = (1, 4, 4, 1)
output_shape = operation_utils.reduce_shape(input_shape, keepdims=True)
expected_output_shape = (1, 1, 1, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_reduce_shape_single_axis_no_keepdims(self):
input_shape = (1, 4, 4, 1)
axes = [1]
output_shape = operation_utils.reduce_shape(input_shape, axes)
expected_output_shape = (1, 4, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_reduce_shape_single_axis_with_keepdims(self):
input_shape = (1, 4, 4, 1)
axes = [1]
output_shape = operation_utils.reduce_shape(
input_shape, axes, keepdims=True
)
expected_output_shape = (1, 1, 4, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_reduce_shape_multiple_axes_no_keepdims(self):
input_shape = (1, 4, 4, 1)
axes = [1, 2]
output_shape = operation_utils.reduce_shape(input_shape, axes)
expected_output_shape = (1, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_reduce_shape_out_of_order_axes_no_keepdims(self):
input_shape = (1, 4, 4, 1)
axes = [2, 1]
output_shape = operation_utils.reduce_shape(input_shape, axes)
expected_output_shape = (1, 1)
self.assertEqual(output_shape, expected_output_shape)
def test_reduce_shape_negative_axes_no_keepdims(self):
input_shape = (1, 4, 4, 1)
axes = [-2, -3]
output_shape = operation_utils.reduce_shape(input_shape, axes)
expected_output_shape = (1, 1)
self.assertEqual(output_shape, expected_output_shape)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/nn.py | keras/src/ops/nn.py | """Commonly-used neural network operations not included in NumPy."""
import warnings
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.backend import any_symbolic_tensors
from keras.src.backend import config
from keras.src.backend import standardize_data_format
from keras.src.backend.common.backend_utils import (
compute_conv_transpose_output_shape,
)
from keras.src.ops import operation_utils
from keras.src.ops.operation import Operation
from keras.src.ops.operation_utils import reduce_shape
from keras.src.utils.python_utils import is_continuous_axis
class Relu(Operation):
def call(self, x):
return backend.nn.relu(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.relu", "keras.ops.nn.relu"])
def relu(x):
"""Rectified linear unit activation function.
It is defined as `f(x) = max(0, x)`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x1 = keras.ops.convert_to_tensor([-1.0, 0.0, 1.0, 0.2])
>>> keras.ops.relu(x1)
array([0.0, 0.0, 1.0, 0.2], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Relu().symbolic_call(x)
return backend.nn.relu(x)
class Relu6(Operation):
def call(self, x):
return backend.nn.relu6(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.relu6", "keras.ops.nn.relu6"])
def relu6(x):
"""Rectified linear unit activation function with upper bound of 6.
It is defined as `f(x) = np.clip(x, 0, 6)`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras.ops.convert_to_tensor([-3.0, -2.0, 0.1, 0.2, 6.0, 8.0])
>>> keras.ops.relu6(x)
array([0.0, 0.0, 0.1, 0.2, 6.0, 6.0], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Relu6().symbolic_call(x)
return backend.nn.relu6(x)
class Sigmoid(Operation):
def call(self, x):
return backend.nn.sigmoid(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.sigmoid", "keras.ops.nn.sigmoid"])
def sigmoid(x):
"""Sigmoid activation function.
It is defined as `f(x) = 1 / (1 + exp(-x))`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras.ops.convert_to_tensor([-6.0, 1.0, 0.0, 1.0, 6.0])
>>> keras.ops.sigmoid(x)
array([0.00247262, 0.7310586, 0.5, 0.7310586, 0.9975274], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Sigmoid().symbolic_call(x)
return backend.nn.sigmoid(x)
class SparseSigmoid(Operation):
def call(self, x):
return backend.nn.sparse_sigmoid(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.sparse_sigmoid", "keras.ops.nn.sparse_sigmoid"])
def sparse_sigmoid(x):
"""Sparse sigmoid activation function.
It is defined as
`f(x) = 0` for `x <= -1`,
`f(x) = 0.5 * (x + 1)` for `-1 < x < 1`,
`f(x) = 1` for `x >= 1`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras.ops.convert_to_tensor([-6.0, 1.0, 0.0, 1.0, 6.0])
>>> keras.ops.sparse_sigmoid(x)
array([0. , 1. , 0.5, 1. , 1. ], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return SparseSigmoid().symbolic_call(x)
return backend.nn.sparse_sigmoid(x)
class Softplus(Operation):
def call(self, x):
return backend.nn.softplus(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.softplus", "keras.ops.nn.softplus"])
def softplus(x):
"""Softplus activation function.
It is defined as `f(x) = log(exp(x) + 1)`, where `log` is the natural
logarithm and `exp` is the exponential function.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras.ops.convert_to_tensor([-0.555, 0.0, 0.555])
>>> keras.ops.softplus(x)
array([0.45366603, 0.6931472, 1.008666], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Softplus().symbolic_call(x)
return backend.nn.softplus(x)
class Softsign(Operation):
def call(self, x):
return backend.nn.softsign(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.softsign", "keras.ops.nn.softsign"])
def softsign(x):
"""Softsign activation function.
It is defined as `f(x) = x / (abs(x) + 1)`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras.ops.convert_to_tensor([-0.100, -10.0, 1.0, 0.0, 100.0])
>>> keras.ops.softsign(x)
Array([-0.09090909, -0.90909094, 0.5, 0.0, 0.990099], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Softsign().symbolic_call(x)
return backend.nn.softsign(x)
class SoftShrink(Operation):
def __init__(self, threshold=0.5, *, name=None):
super().__init__(name=name)
self.threshold = threshold
def call(self, x):
return backend.nn.soft_shrink(x, self.threshold)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.soft_shrink", "keras.ops.nn.soft_shrink"])
def soft_shrink(x, threshold=0.5):
"""Soft Shrink activation function.
It is defined as
`f(x) = x - threshold` if `x > threshold`,
`f(x) = x + threshold` if `x < -threshold`,
`f(x) = 0` otherwise.
Args:
x: Input tensor.
threshold: Threshold value. Defaults to 0.5.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1.0, 0.0, 1.0])
>>> x_soft_shrink = keras.ops.soft_shrink(x)
>>> print(x_soft_shrink)
array([-0.5 0. 0.5], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return SoftShrink(threshold).symbolic_call(x)
return backend.nn.soft_shrink(x, threshold)
class SparsePlus(Operation):
def call(self, x):
return backend.nn.sparse_plus(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.sparse_plus", "keras.ops.nn.sparse_plus"])
def sparse_plus(x):
"""SparsePlus activation function.
It is defined as
`f(x) = 0` for `x <= -1`.
`f(x) = (1/4) * (x + 1)^2` for `-1 < x < 1`.
`f(x) = x` for `x >= 1`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1.0, 0.0, 1.0])
>>> x_sparse_plus = keras.ops.sparse_plus(x)
>>> print(x_sparse_plus)
Array([0. 0.25 1. ], shape=(3,), dtype=float32)
"""
if any_symbolic_tensors((x,)):
return SparsePlus().symbolic_call(x)
return backend.nn.sparse_plus(x)
class Silu(Operation):
def call(self, x):
return backend.nn.silu(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(
[
"keras.ops.silu",
"keras.ops.nn.silu",
"keras.ops.swish",
"keras.ops.nn.swish",
]
)
def silu(x):
"""Sigmoid Linear Unit (SiLU) activation function, also known as Swish.
The SiLU activation function is computed by the sigmoid function multiplied
by its input. It is defined as `f(x) = x * sigmoid(x)`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras.ops.convert_to_tensor([-6.0, 1.0, 0.0, 1.0, 6.0])
>>> keras.ops.sigmoid(x)
array([0.00247262, 0.7310586, 0.5, 0.7310586, 0.9975274], dtype=float32)
>>> keras.ops.silu(x)
array([-0.0148357, 0.7310586, 0.0, 0.7310586, 5.9851646], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Silu().symbolic_call(x)
return backend.nn.silu(x)
class Squareplus(Operation):
def __init__(self, b=4, *, name=None):
super().__init__(name=name)
self.b = b
def call(self, x):
return backend.nn.squareplus(x, self.b)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.squareplus", "keras.ops.nn.squareplus"])
def squareplus(x, b=4):
"""Squareplus activation function.
The Squareplus activation function is defined as:
`f(x) = (x + sqrt(x^2 + b)) / 2`
Args:
x: Input tensor.
b: Smoothness parameter. Defaults to 4.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1.0, 0.0, 1.0])
>>> x_squareplus = keras.ops.squareplus(x)
>>> print(x_squareplus)
array([0.6180, 1.0000, 1.6180], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Squareplus(b).symbolic_call(x)
return backend.nn.squareplus(x, b)
class LogSigmoid(Operation):
def call(self, x):
return backend.nn.log_sigmoid(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(
[
"keras.ops.log_sigmoid",
"keras.ops.nn.log_sigmoid",
]
)
def log_sigmoid(x):
"""Logarithm of the sigmoid activation function.
It is defined as `f(x) = log(1 / (1 + exp(-x)))`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras.ops.convert_to_tensor([-0.541391, 0.0, 0.50, 5.0])
>>> keras.ops.log_sigmoid(x)
array([-1.0000418, -0.6931472, -0.474077, -0.00671535], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return LogSigmoid().symbolic_call(x)
return backend.nn.log_sigmoid(x)
class LeakyRelu(Operation):
def __init__(self, negative_slope=0.2, *, name=None):
super().__init__(name=name)
self.negative_slope = negative_slope
def call(self, x):
return backend.nn.leaky_relu(x, self.negative_slope)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.leaky_relu", "keras.ops.nn.leaky_relu"])
def leaky_relu(x, negative_slope=0.2):
"""Leaky version of a Rectified Linear Unit activation function.
It allows a small gradient when the unit is not active, it is defined as:
`f(x) = alpha * x for x < 0` or `f(x) = x for x >= 0`.
Args:
x: Input tensor.
negative_slope: Slope of the activation function at x < 0.
Defaults to `0.2`.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_leaky_relu = keras.ops.leaky_relu(x)
>>> print(x_leaky_relu)
array([-0.2, 0. , 1. ], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return LeakyRelu(negative_slope).symbolic_call(x)
return backend.nn.leaky_relu(x, negative_slope=negative_slope)
class HardSigmoid(Operation):
def call(self, x):
return backend.nn.hard_sigmoid(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(
[
"keras.ops.hard_sigmoid",
"keras.ops.nn.hard_sigmoid",
]
)
def hard_sigmoid(x):
"""Hard sigmoid activation function.
It is defined as:
`0 if x < -2.5`, `1 if x > 2.5`, `(0.2 * x) + 0.5 if -2.5 <= x <= 2.5`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_hard_sigmoid = keras.ops.hard_sigmoid(x)
>>> print(x_hard_sigmoid)
array([0.3, 0.5, 0.7], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return HardSigmoid().symbolic_call(x)
return backend.nn.hard_sigmoid(x)
class HardSilu(Operation):
def call(self, x):
return backend.nn.hard_silu(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(
[
"keras.ops.hard_silu",
"keras.ops.nn.hard_silu",
"keras.ops.hard_swish",
"keras.ops.nn.hard_swish",
]
)
def hard_silu(x):
"""Hard SiLU activation function, also known as Hard Swish.
It is defined as:
- `0` if `if x < -3`
- `x` if `x > 3`
- `x * (x + 3) / 6` if `-3 <= x <= 3`
It's a faster, piecewise linear approximation of the silu activation.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = keras.ops.convert_to_tensor([-3.0, -1.0, 0.0, 1.0, 3.0])
>>> keras.ops.hard_silu(x)
array([-0.0, -0.3333333, 0.0, 0.6666667, 3.0], shape=(5,), dtype=float32)
"""
if any_symbolic_tensors((x,)):
return HardSilu().symbolic_call(x)
return backend.nn.hard_silu(x)
class Elu(Operation):
def __init__(self, alpha=1.0, *, name=None):
super().__init__(name=name)
self.alpha = alpha
def call(self, x):
return backend.nn.elu(x, alpha=self.alpha)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.elu", "keras.ops.nn.elu"])
def elu(x, alpha=1.0):
"""Exponential Linear Unit activation function.
It is defined as:
`f(x) = alpha * (exp(x) - 1.) for x < 0`, `f(x) = x for x >= 0`.
Args:
x: Input tensor.
alpha: A scalar, slope of positive section. Defaults to `1.0`.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_elu = keras.ops.elu(x)
>>> print(x_elu)
array([-0.63212055, 0., 1.], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return Elu(alpha).symbolic_call(x)
return backend.nn.elu(x, alpha=alpha)
class Selu(Operation):
def call(self, x):
return backend.nn.selu(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.selu", "keras.ops.nn.selu"])
def selu(x):
"""Scaled Exponential Linear Unit (SELU) activation function.
It is defined as:
`f(x) = scale * alpha * (exp(x) - 1.) for x < 0`,
`f(x) = scale * x for x >= 0`.
Args:
x: Input tensor.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_selu = keras.ops.selu(x)
>>> print(x_selu)
array([-1.11133055, 0., 1.05070098], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return Selu().symbolic_call(x)
return backend.nn.selu(x)
class Gelu(Operation):
def __init__(self, approximate=True, *, name=None):
super().__init__(name=name)
self.approximate = approximate
def call(self, x):
return backend.nn.gelu(x, self.approximate)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.gelu", "keras.ops.nn.gelu"])
def gelu(x, approximate=True):
"""Gaussian Error Linear Unit (GELU) activation function.
If `approximate` is `True`, it is defined as:
`f(x) = 0.5 * x * (1 + tanh(sqrt(2 / pi) * (x + 0.044715 * x^3)))`
Or if `approximate` is `False`, it is defined as:
`f(x) = x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2)))`,
where `P(X) ~ N(0, 1)`.
Args:
x: Input tensor.
approximate: Approximate version of GELU activation. Defaults to `True`.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_gelu = keras.ops.gelu(x)
>>> print(x_gelu)
array([-0.15865525, 0., 0.84134475], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return Gelu(approximate).symbolic_call(x)
return backend.nn.gelu(x, approximate)
class Celu(Operation):
def __init__(self, alpha=1.0, *, name=None):
super().__init__(name=name)
self.alpha = alpha
def call(self, x):
return backend.nn.celu(x, self.alpha)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.celu", "keras.ops.nn.celu"])
def celu(x, alpha=1.0):
"""Continuously-differentiable exponential linear unit.
It is defined as:
`f(x) = alpha * (exp(x / alpha) - 1) for x < 0`, `f(x) = x for x >= 0`.
Args:
x: Input tensor.
alpha: the α value for the CELU formulation. Defaults to `1.0`.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_celu = keras.ops.celu(x)
>>> print(x_celu)
array([-0.63212056, 0. , 1. ], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return Celu(alpha).symbolic_call(x)
return backend.nn.celu(x, alpha)
class Glu(Operation):
def __init__(self, axis=-1, *, name=None):
super().__init__(name=name)
self.axis = axis
def call(self, x):
return backend.nn.glu(x, axis=self.axis)
def compute_output_spec(self, x):
output_shape = list(x.shape)
if output_shape[self.axis] is not None:
if output_shape[self.axis] % 2 != 0:
raise ValueError(
"axis size must be divisible by 2. "
f"Received: x.shape={x.shape} with axis={self.axis}"
)
output_shape[self.axis] = output_shape[self.axis] // 2
return KerasTensor(output_shape, dtype=x.dtype)
@keras_export(["keras.ops.glu", "keras.ops.nn.glu"])
def glu(x, axis=-1):
"""Gated Linear Unit (GLU) activation function.
It is defined as:
`f(x) = a * sigmoid(b)`
where `x` is split into `a` and `b` along the given axis.
Args:
x: Input tensor.
axis: The axis along which to split the input tensor. Defaults to `-1`.
Returns:
A tensor with the same shape as half of the input.
Example:
>>> x = np.array([-1., 0., 1. , 1.])
>>> x_glu = keras.ops.glu(x)
>>> print(x_glu)
array([-0.73105858, 0. ], shape=(2,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return Glu(axis).symbolic_call(x)
return backend.nn.glu(x, axis=axis)
class TanhShrink(Operation):
def call(self, x):
return backend.nn.tanh_shrink(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.tanh_shrink", "keras.ops.nn.tanh_shrink"])
def tanh_shrink(x):
"""Applies the tanh shrink function element-wise.
It is defined as:
`f(x) = x - tanh(x)`.
Args:
x: Input tensor.
Returns:
Output tensor of the same shape as `x`, where each element is
transformed according to the tanh shrink operation.
Example:
>>> x = np.array([ -1., 0., 1.])
>>> x_tanh_shrink = keras.ops.tanh_shrink(x)
>>> print(x_tanh_shrink)
array([-0.23840584 0. 0.23840584], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return TanhShrink().symbolic_call(x)
return backend.nn.tanh_shrink(x)
class HardTanh(Operation):
def call(self, x):
return backend.nn.hard_tanh(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.hard_tanh", "keras.ops.nn.hard_tanh"])
def hard_tanh(x):
"""Applies the HardTanh function element-wise.
It is defined as:
`f(x) = -1 for x < -1`, `f(x) = x for -1 <= x <= 1`, `f(x) = 1 for x > 1`.
Args:
x: Input tensor.
Returns:
Output tensor of same shape as `x`
where values are clamped between -1 and 1.
Example:
>>> x = np.array([-2., -1., 0., 1., 2.])
>>> x_hard_tanh = keras.ops.hard_tanh(x)
>>> print(x_hard_tanh)
array([-1. -1. 0. 1. 1.], shape=(5,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return HardTanh().symbolic_call(x)
return backend.nn.hard_tanh(x)
class HardShrink(Operation):
def __init__(self, threshold=0.5, *, name=None):
super().__init__(name=name)
self.threshold = threshold
def call(self, x):
return backend.nn.hard_shrink(x, self.threshold)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.hard_shrink", "keras.ops.nn.hard_shrink"])
def hard_shrink(x, threshold=0.5):
"""Hard Shrink activation function.
The Hard Shrink function is a thresholding operation defined as:
`f(x) = x` if `|x| > threshold`,
`f(x) = 0` otherwise.
Args:
x: Input tensor.
threshold: Threshold value. Defaults to 0.5.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-0.5, 0., 1.])
>>> x_hard_shrink = keras.ops.hard_shrink(x)
>>> print(x_hard_shrink)
array([0. 0. 1.], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return HardShrink(threshold).symbolic_call(x)
return backend.nn.hard_shrink(x, threshold)
class Threshold(Operation):
def __init__(self, threshold, default_value, *, name=None):
super().__init__(name=name)
self.threshold = threshold
self.default_value = default_value
def call(self, x):
return backend.nn.threshold(x, self.threshold, self.default_value)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.threshold", "keras.ops.nn.threshold"])
def threshold(x, threshold, default_value):
"""Threshold activation function.
The function thresholds the input `x` as follows:
`f(x) = x` if `x > threshold`,
`f(x) = default_value` otherwise.
Args:
x: Input tensor.
threshold: The value that decides when to retain or replace x.
default_value: Value to assign when `x <= threshold`.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1.0, 0.0, 1.0, 2.0])
>>> x_threshold = keras.ops.threshold(x, 1, 0)
>>> print(x_threshold)
array([0., 0., 0., 2.], shape=(4,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return Threshold(threshold, default_value).symbolic_call(x)
return backend.nn.threshold(x, threshold, default_value)
class Softmax(Operation):
def __init__(self, axis=-1, *, name=None):
super().__init__(name=name)
self.axis = axis
def call(self, x):
return backend.nn.softmax(x, axis=self.axis)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.softmax", "keras.ops.nn.softmax"])
def softmax(x, axis=-1):
"""Softmax activation function.
The elements of the output vector lie within the range `(0, 1)`, and their
total sum is exactly 1 (excluding the floating point rounding error).
Each vector is processed independently. The `axis` argument specifies the
axis along which the function is applied within the input.
It is defined as:
`f(x) = exp(x) / sum(exp(x))`
Args:
x: Input tensor.
axis: Integer, axis along which the softmax is applied.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_softmax = keras.ops.softmax(x)
>>> print(x_softmax)
array([0.09003057, 0.24472847, 0.66524096], shape=(3,), dtype=float64)
"""
# Don't use `backend.shape` since TensorFlow returns
# symbolic tensors for unknown shape which can trigger
# an error in TensorFlow graph execution.
if isinstance(axis, int) and x.shape[axis] == 1:
warnings.warn(
f"You are using a softmax over axis {axis} "
f"of a tensor of shape {x.shape}. This axis "
"has size 1. The softmax operation will always return "
"the value 1, which is likely not what you intended. "
"Did you mean to use a sigmoid instead?"
)
if any_symbolic_tensors((x,)):
return Softmax(axis).symbolic_call(x)
if isinstance(axis, tuple):
axis_to_keep = [v for v in range(len(x.shape)) if v not in axis]
x_transposed = backend.numpy.transpose(x, axes=(*axis_to_keep, *axis))
x_reshaped = backend.numpy.reshape(
x_transposed, (*[x.shape[v] for v in axis_to_keep], -1)
)
x = backend.nn.softmax(x_reshaped, axis=-1)
x = backend.numpy.reshape(x, x_transposed.shape)
x = backend.numpy.transpose(
x, axes=list(backend.numpy.argsort([*axis_to_keep, *axis]))
)
return x
else:
return backend.nn.softmax(x, axis=axis)
class LogSoftmax(Operation):
def __init__(self, axis=-1, *, name=None):
super().__init__(name=name)
self.axis = axis
def call(self, x):
return backend.nn.log_softmax(x, axis=self.axis)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(
[
"keras.ops.log_softmax",
"keras.ops.nn.log_softmax",
]
)
def log_softmax(x, axis=-1):
"""Log-softmax activation function.
It is defined as:
`f(x) = x - max(x) - log(sum(exp(x - max(x))))`
Args:
x: Input tensor.
axis: Integer, axis along which the log-softmax is applied.
Defaults to `-1`.
Returns:
A tensor with the same shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_log_softmax = keras.ops.log_softmax(x)
>>> print(x_log_softmax)
array([-2.40760596, -1.40760596, -0.40760596], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return LogSoftmax(axis).symbolic_call(x)
if isinstance(axis, tuple):
axis_to_keep = [v for v in range(len(x.shape)) if v not in axis]
x_transposed = backend.numpy.transpose(x, axes=(*axis_to_keep, *axis))
x_reshaped = backend.numpy.reshape(
x_transposed, (*[x.shape[v] for v in axis_to_keep], -1)
)
x = backend.nn.log_softmax(x_reshaped, axis=-1)
x = backend.numpy.reshape(x, x_transposed.shape)
x = backend.numpy.transpose(
x, axes=list(backend.numpy.argsort([*axis_to_keep, *axis]))
)
return x
else:
return backend.nn.log_softmax(x, axis=axis)
class Sparsemax(Operation):
def __init__(self, axis=-1, *, name=None):
super().__init__(name=name)
self.axis = axis
def call(self, x):
return backend.nn.sparsemax(x, axis=self.axis)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export(["keras.ops.sparsemax", "keras.ops.nn.sparsemax"])
def sparsemax(x, axis=-1):
"""Sparsemax activation function.
For each batch `i`, and class `j`,
sparsemax activation function is defined as:
`sparsemax(x)[i, j] = max(x[i, j] - τ(x[i, :]), 0).`
Args:
x: Input tensor.
axis: `int`, axis along which the sparsemax operation is applied.
Returns:
A tensor, output of sparsemax transformation. Has the same type and
shape as `x`.
Example:
>>> x = np.array([-1., 0., 1.])
>>> x_sparsemax = keras.ops.sparsemax(x)
>>> print(x_sparsemax)
array([0., 0., 1.], shape=(3,), dtype=float64)
"""
if any_symbolic_tensors((x,)):
return Sparsemax(axis).symbolic_call(x)
return backend.nn.sparsemax(x, axis=axis)
class MaxPool(Operation):
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
*,
name=None,
):
super().__init__(name=name)
self.pool_size = pool_size
self.strides = strides
self.padding = padding.lower()
self.data_format = data_format
def call(self, inputs):
return backend.nn.max_pool(
inputs,
self.pool_size,
self.strides,
self.padding,
self.data_format,
)
def compute_output_spec(self, inputs):
output_shape = operation_utils.compute_pooling_output_shape(
inputs.shape,
self.pool_size,
self.strides,
self.padding,
self.data_format,
)
return KerasTensor(output_shape, dtype=inputs.dtype)
@keras_export(["keras.ops.max_pool", "keras.ops.nn.max_pool"])
def max_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
"""Max pooling operation.
Args:
inputs: Tensor of rank N+2. `inputs` has shape
`(batch_size,) + inputs_spatial_shape + (num_channels,)` if
`data_format="channels_last"`, or
`(batch_size, num_channels) + inputs_spatial_shape` if
`data_format="channels_first"`. Pooling happens over the spatial
dimensions only.
pool_size: int or tuple/list of integers of size
`len(inputs_spatial_shape)`, specifying the size of the pooling
window for each spatial dimension of the input tensor. If
`pool_size` is int, then every spatial dimension shares the same
`pool_size`.
strides: int or tuple/list of integers of size
`len(inputs_spatial_shape)`. The stride of the sliding window for
each spatial dimension of the input tensor. If `strides` is int,
then every spatial dimension shares the same `strides`.
padding: string, either `"valid"` or `"same"`. `"valid"` means no
padding is applied, and `"same"` results in padding evenly to the
left/right or up/down of the input such that output has the
same height/width dimension as the input when `strides=1`.
data_format: A string, either `"channels_last"` or `"channels_first"`.
`data_format` determines the ordering of the dimensions in the
inputs. If `data_format="channels_last"`, `inputs` is of shape
`(batch_size, ..., channels)` while if
`data_format="channels_first"`, `inputs` is of shape
`(batch_size, channels, ...)`.
Returns:
A tensor of rank N+2, the result of the max pooling operation.
"""
data_format = standardize_data_format(data_format)
padding = padding.lower()
if any_symbolic_tensors((inputs,)):
return MaxPool(
pool_size,
strides,
padding,
data_format,
).symbolic_call(inputs)
return backend.nn.max_pool(inputs, pool_size, strides, padding, data_format)
class AdaptiveMaxPool(Operation):
"""Adaptive max pooling operation."""
def __init__(self, output_size, data_format=None, *, name=None):
super().__init__(name=name)
self.output_size = output_size
self.data_format = data_format
def call(self, inputs):
return backend.nn.adaptive_max_pool(
inputs, output_size=self.output_size, data_format=self.data_format
)
def compute_output_spec(self, inputs):
if self.data_format == "channels_last":
spatial_dims = self.output_size
output_shape = (
inputs.shape[: -len(self.output_size)]
+ spatial_dims
+ (inputs.shape[-1],)
)
else:
spatial_dims = self.output_size
output_shape = (inputs.shape[0], inputs.shape[1]) + spatial_dims
return backend.KerasTensor(output_shape, dtype=inputs.dtype)
@keras_export(["keras.ops.adaptive_max_pool", "keras.ops.nn.adaptive_max_pool"])
def adaptive_max_pool(
inputs,
output_size,
data_format=None,
):
"""Adaptive max pooling operation.
Applies an adaptive max pooling operation that automatically computes the
kernel size and stride to pool the input to the specified `output_size`.
This operation is useful when you want a fixed output size regardless of
input size, commonly used in models like ResNet for global feature
extraction.
Args:
inputs: Tensor of rank 4. Input tensor of shape:
- If `data_format="channels_last"`:
`(batch_size, height, width, channels)`.
- If `data_format="channels_first"`:
`(batch_size, channels, height, width)`.
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/node_test.py | keras/src/ops/node_test.py | import numpy as np
from keras.src import Layer
from keras.src import testing
from keras.src.backend import KerasTensor
from keras.src.ops.node import Node
class DummyLayer(Layer):
pass
class NodeTest(testing.TestCase):
# Testing a simple node and layer combination **a**
def test_simple_case(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
a_layer = DummyLayer()
node = Node(a_layer, outputs=a, call_args=(), call_kwargs={})
self.assertEqual(node.is_input, True)
self.assertEqual(node.output_tensors[0], a)
self.assertEqual(node.output_tensors[0].shape, shape)
# Testing a simple node connection with args and kwargs **a** --> **b**
def test_single_wired_layers(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
a_layer = DummyLayer()
node1 = Node(a_layer, outputs=a, call_args=(), call_kwargs={})
b = KerasTensor(shape=shape)
x = KerasTensor(shape=shape)
kwargs = {"x": x}
args = (a,)
b_layer = DummyLayer()
node2 = Node(b_layer, outputs=b, call_args=args, call_kwargs=kwargs)
self.assertEqual(node1.is_input, True)
self.assertEqual(node2.is_input, False)
self.assertEqual(node1.operation, a_layer)
self.assertEqual(node2.operation, b_layer)
self.assertEqual(node1.output_tensors[0], a)
self.assertEqual(node1.output_tensors[0].shape, shape)
self.assertEqual(a_layer._inbound_nodes[0], node1)
self.assertEqual(a_layer._outbound_nodes[0], node2)
self.assertEqual(b_layer._inbound_nodes[0], node2)
self.assertEqual(node2.parent_nodes[0], node1)
self.assertEqual(node2.input_tensors, [a, x])
self.assertEqual(node2.arguments.kwargs, kwargs)
self.assertEqual(node2.arguments.args, args)
# Testing when output tensor is not Keras Tensor
def test_output_tensor_error(self):
a = np.random.rand(2, 3, 4)
a_layer = DummyLayer()
with self.assertRaisesRegex(
ValueError, "operation outputs must be tensors."
):
Node(a_layer, outputs=a, call_args=(), call_kwargs={})
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/linalg.py | keras/src/ops/linalg.py | from keras.src import backend
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.backend import any_symbolic_tensors
from keras.src.ops.operation import Operation
from keras.src.ops.operation_utils import reduce_shape
class Cholesky(Operation):
def __init__(self, upper=False, *, name=None):
super().__init__(name=name)
self.upper = upper
def call(self, x):
return _cholesky(x, self.upper)
def compute_output_spec(self, x):
_assert_2d(x)
_assert_square(x)
return KerasTensor(x.shape, x.dtype)
@keras_export(["keras.ops.cholesky", "keras.ops.linalg.cholesky"])
def cholesky(x, upper=False):
"""Computes the Cholesky decomposition of a positive semi-definite matrix.
Args:
x: Input tensor of shape `(..., M, M)`.
upper (bool): If True, returns the upper-triangular Cholesky factor.
If False (default), returns the lower-triangular Cholesky factor.
Returns:
A tensor of shape `(..., M, M)` representing the Cholesky factor of `x`.
"""
if any_symbolic_tensors((x,)):
return Cholesky(upper=upper).symbolic_call(x)
return _cholesky(x, upper=upper)
def _cholesky(x, upper=False):
x = backend.convert_to_tensor(x)
_assert_2d(x)
_assert_square(x)
try:
return backend.linalg.cholesky(x, upper=upper)
except Exception as e:
raise ValueError(f"Cholesky decomposition failed: {e}")
class CholeskyInverse(Operation):
def __init__(self, upper=False, *, name=None):
super().__init__(name=name)
self.upper = upper
def call(self, x):
return _cholesky_inverse(x, self.upper)
def compute_output_spec(self, x):
_assert_2d(x)
_assert_square(x)
return KerasTensor(x.shape, x.dtype)
@keras_export(
["keras.ops.cholesky_inverse", "keras.ops.linalg.cholesky_inverse"]
)
def cholesky_inverse(x, upper=False):
"""Computes the inverse of a symmetric positive-definite matrix.
Args:
x: Input tensor of shape `(..., M, M)`.
upper (bool): Determines whether to use the upper- or lower-triangular
factor for the internal computation. Defaults to False.
Returns:
A tensor of shape `(..., M, M)` representing the inverse of `x`.
Raises:
ValueError: If `x` is not a symmetric positive-definite matrix.
"""
if any_symbolic_tensors((x,)):
return CholeskyInverse(upper=upper).symbolic_call(x)
return _cholesky_inverse(x, upper=upper)
def _cholesky_inverse(x, upper=False):
x = backend.convert_to_tensor(x)
_assert_2d(x)
_assert_square(x)
try:
return backend.linalg.cholesky_inverse(x, upper=upper)
except Exception as e:
raise ValueError(f"Cholesky inverse failed: {e}")
class Det(Operation):
def call(self, x):
return _det(x)
def compute_output_spec(self, x):
_assert_2d(x)
_assert_square(x)
return KerasTensor(x.shape[:-2], x.dtype)
@keras_export(["keras.ops.det", "keras.ops.linalg.det"])
def det(x):
"""Computes the determinant of a square tensor.
Args:
x: Input tensor of shape `(..., M, M)`.
Returns:
A tensor of shape `(...,)` representing the determinant of `x`.
"""
if any_symbolic_tensors((x,)):
return Det().symbolic_call(x)
return _det(x)
def _det(x):
x = backend.convert_to_tensor(x)
_assert_2d(x)
_assert_square(x)
return backend.linalg.det(x)
class Eig(Operation):
def call(self, x):
return _eig(x)
def compute_output_spec(self, x):
_assert_square(x)
_assert_2d(x)
return (
KerasTensor(x.shape[:-1], x.dtype),
KerasTensor(x.shape, x.dtype),
)
@keras_export(["keras.ops.eig", "keras.ops.linalg.eig"])
def eig(x):
"""Computes the eigenvalues and eigenvectors of a square matrix.
Args:
x: Input tensor of shape `(..., M, M)`.
Returns:
A tuple of two tensors: a tensor of shape `(..., M)` containing
eigenvalues and a tensor of shape `(..., M, M)` containing eigenvectors.
"""
if any_symbolic_tensors((x,)):
return Eig().symbolic_call(x)
return _eig(x)
def _eig(x):
x = backend.convert_to_tensor(x)
_assert_square(x)
_assert_2d(x)
return backend.linalg.eig(x)
class Eigh(Operation):
def call(self, x):
return _eigh(x)
def compute_output_spec(self, x):
_assert_square(x)
_assert_2d(x)
return (
KerasTensor(x.shape[:-1], x.dtype),
KerasTensor(x.shape, x.dtype),
)
@keras_export(["keras.ops.eigh", "keras.ops.linalg.eigh"])
def eigh(x):
"""Computes the eigenvalues and eigenvectors of a complex Hermitian.
Args:
x: Input tensor of shape `(..., M, M)`.
Returns:
A tuple of two tensors: a tensor of shape `(..., M)` containing
eigenvalues and a tensor of shape `(..., M, M)` containing eigenvectors.
"""
if any_symbolic_tensors((x,)):
return Eigh().symbolic_call(x)
return _eigh(x)
def _eigh(x):
x = backend.convert_to_tensor(x)
_assert_square(x)
_assert_2d(x)
return backend.linalg.eigh(x)
class Inv(Operation):
def call(self, x):
return _inv(x)
def compute_output_spec(self, x):
_assert_2d(x)
_assert_square(x)
return KerasTensor(x.shape, x.dtype)
@keras_export(["keras.ops.inv", "keras.ops.linalg.inv"])
def inv(x):
"""Computes the inverse of a square tensor.
Args:
x: Input tensor of shape `(..., M, M)`.
Returns:
A tensor of shape `(..., M, M)` representing the inverse of `x`.
"""
if any_symbolic_tensors((x,)):
return Inv().symbolic_call(x)
return _inv(x)
def _inv(x):
x = backend.convert_to_tensor(x)
_assert_2d(x)
_assert_square(x)
return backend.linalg.inv(x)
class LuFactor(Operation):
def call(self, x):
return _lu_factor(x)
def compute_output_spec(self, x):
_assert_2d(x)
batch_shape = x.shape[:-2]
m, n = x.shape[-2:]
k = min(m, n)
return (
KerasTensor(batch_shape + (m, n), x.dtype),
KerasTensor(batch_shape + (k,), x.dtype),
)
@keras_export(["keras.ops.lu_factor", "keras.ops.linalg.lu_factor"])
def lu_factor(x):
"""Computes the lower-upper decomposition of a square matrix.
Args:
x: A tensor of shape `(..., M, M)`.
Returns:
A tuple of two tensors: a tensor of shape `(..., M, M)` containing the
lower and upper triangular matrices and a tensor of shape `(..., M)`
containing the pivots.
"""
if any_symbolic_tensors((x,)):
return LuFactor().symbolic_call(x)
return _lu_factor(x)
def _lu_factor(x):
x = backend.convert_to_tensor(x)
_assert_2d(x)
if backend.backend() == "tensorflow":
try:
_assert_square(x)
except ValueError as e:
raise ValueError(
f"LU decomposition failed: {e}. LU decomposition is only "
"supported for square matrices in Tensorflow."
)
return backend.linalg.lu_factor(x)
class Norm(Operation):
def __init__(self, ord=None, axis=None, keepdims=False, *, name=None):
super().__init__(name=name)
if isinstance(ord, str):
if ord not in ("fro", "nuc"):
raise ValueError(
"Invalid `ord` argument. "
"Expected one of {'fro', 'nuc'} when using string. "
f"Received: ord={ord}"
)
if isinstance(axis, int):
axis = [axis]
self.ord = ord
self.axis = axis
self.keepdims = keepdims
def compute_output_spec(self, x):
output_dtype = backend.standardize_dtype(x.dtype)
if "int" in output_dtype or output_dtype == "bool":
output_dtype = backend.floatx()
if self.axis is None:
axis = tuple(range(len(x.shape)))
else:
axis = self.axis
num_axes = len(axis)
if num_axes == 1 and isinstance(self.ord, str):
raise ValueError(
"Invalid `ord` argument for vector norm. "
f"Received: ord={self.ord}"
)
elif num_axes == 2 and self.ord not in (
None,
"fro",
"nuc",
float("inf"),
float("-inf"),
1,
-1,
2,
-2,
):
raise ValueError(
"Invalid `ord` argument for matrix norm. "
f"Received: ord={self.ord}"
)
return KerasTensor(
reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims),
dtype=output_dtype,
)
def call(self, x):
x = backend.convert_to_tensor(x)
return backend.linalg.norm(
x, ord=self.ord, axis=self.axis, keepdims=self.keepdims
)
@keras_export(["keras.ops.norm", "keras.ops.linalg.norm"])
def norm(x, ord=None, axis=None, keepdims=False):
"""Matrix or vector norm.
This function is able to return one of eight different matrix norms, or one
of an infinite number of vector norms (described below), depending on the
value of the `ord` parameter.
Args:
x: Input tensor.
ord: Order of the norm (see table under Notes). The default is `None`.
axis: If `axis` is an integer, it specifies the axis of `x` along which
to compute the vector norms. If `axis` is a 2-tuple, it specifies
the axes that hold 2-D matrices, and the matrix norms of these
matrices are computed.
keepdims: If this is set to `True`, the axes which are reduced are left
in the result as dimensions with size one.
Note:
For values of `ord < 1`, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes. The following norms can be calculated:
- For matrices:
- `ord=None`: Frobenius norm
- `ord="fro"`: Frobenius norm
- `ord="nuc"`: nuclear norm
- `ord=np.inf`: `max(sum(abs(x), axis=1))`
- `ord=-np.inf`: `min(sum(abs(x), axis=1))`
- `ord=0`: not supported
- `ord=1`: `max(sum(abs(x), axis=0))`
- `ord=-1`: `min(sum(abs(x), axis=0))`
- `ord=2`: 2-norm (largest sing. value)
- `ord=-2`: smallest singular value
- other: not supported
- For vectors:
- `ord=None`: 2-norm
- `ord="fro"`: not supported
- `ord="nuc"`: not supported
- `ord=np.inf`: `max(abs(x))`
- `ord=-np.inf`: `min(abs(x))`
- `ord=0`: `sum(x != 0)`
- `ord=1`: as below
- `ord=-1`: as below
- `ord=2`: as below
- `ord=-2`: as below
- other: `sum(abs(x)**ord)**(1./ord)`
Returns:
Norm of the matrix or vector(s).
Example:
>>> x = keras.ops.reshape(keras.ops.arange(9, dtype="float32") - 4, (3, 3))
>>> keras.ops.linalg.norm(x)
7.7459664
"""
if any_symbolic_tensors((x,)):
return Norm(ord=ord, axis=axis, keepdims=keepdims).symbolic_call(x)
x = backend.convert_to_tensor(x)
return backend.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
class Qr(Operation):
def __init__(self, mode="reduced", *, name=None):
super().__init__(name=name)
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
self.mode = mode
def compute_output_spec(self, x):
if len(x.shape) < 2:
raise ValueError(
"Input should have rank >= 2. Received: "
f"input.shape = {x.shape}"
)
m = x.shape[-2]
n = x.shape[-1]
if m is None or n is None:
raise ValueError(
"Input should have its last 2 dimensions "
"fully-defined. Received: "
f"input.shape = {x.shape}"
)
k = min(m, n)
base = tuple(x.shape[:-2])
if self.mode == "reduced":
return (
KerasTensor(shape=base + (m, k), dtype=x.dtype),
KerasTensor(shape=base + (k, n), dtype=x.dtype),
)
# 'complete' mode.
return (
KerasTensor(shape=base + (m, m), dtype=x.dtype),
KerasTensor(shape=base + (m, n), dtype=x.dtype),
)
def call(self, x):
x = backend.convert_to_tensor(x)
return backend.linalg.qr(x, mode=self.mode)
@keras_export(["keras.ops.qr", "keras.ops.linalg.qr"])
def qr(x, mode="reduced"):
"""Computes the QR decomposition of a tensor.
Args:
x: Input tensor of shape `(..., M, N)`.
mode: A string specifying the mode of the QR decomposition.
- 'reduced': Returns the reduced QR decomposition. (default)
- 'complete': Returns the complete QR decomposition.
Returns:
A tuple containing two tensors. The first tensor of shape `(..., M, K)`
is the orthogonal matrix `q` and the second tensor of shape
`(..., K, N)` is the upper triangular matrix `r`, where `K = min(M, N)`.
Example:
>>> x = keras.ops.convert_to_tensor([[1., 2.], [3., 4.], [5., 6.]])
>>> q, r = qr(x)
>>> print(q)
array([[-0.16903079 0.897085]
[-0.5070925 0.2760267 ]
[-0.8451542 -0.34503305]], shape=(3, 2), dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Qr(mode=mode).symbolic_call(x)
x = backend.convert_to_tensor(x)
return backend.linalg.qr(x, mode=mode)
class Solve(Operation):
def call(self, a, b):
return _solve(a, b)
def compute_output_spec(self, a, b):
_assert_2d(a)
_assert_square(a)
_assert_1d(b)
_assert_a_b_compat(a, b)
return KerasTensor(b.shape, b.dtype)
@keras_export(["keras.ops.solve", "keras.ops.linalg.solve"])
def solve(a, b):
"""Solves a linear system of equations given by `a x = b`.
Args:
a: A tensor of shape `(..., M, M)` representing the coefficients matrix.
b: A tensor of shape `(..., M)` or `(..., M, N)` representing the
right-hand side or "dependent variable" matrix.
Returns:
A tensor of shape `(..., M)` or `(..., M, N)` representing the solution
of the linear system. Returned shape is identical to `b`.
"""
if any_symbolic_tensors((a, b)):
return Solve().symbolic_call(a, b)
return _solve(a, b)
def _solve(a, b):
a = backend.convert_to_tensor(a)
b = backend.convert_to_tensor(b)
_assert_2d(a)
_assert_square(a)
_assert_1d(b)
_assert_a_b_compat(a, b)
return backend.linalg.solve(a, b)
class SolveTriangular(Operation):
def __init__(self, lower=False, *, name=None):
super().__init__(name=name)
self.lower = lower
def call(self, a, b):
return _solve_triangular(a, b, self.lower)
def compute_output_spec(self, a, b):
_assert_2d(a)
_assert_square(a)
_assert_1d(b)
_assert_a_b_compat(a, b)
return KerasTensor(b.shape, b.dtype)
@keras_export(
["keras.ops.solve_triangular", "keras.ops.linalg.solve_triangular"]
)
def solve_triangular(a, b, lower=False):
"""Solves a linear system of equations given by `a x = b`.
Args:
a: A tensor of shape `(..., M, M)` representing the coefficients matrix.
b: A tensor of shape `(..., M)` or `(..., M, N)` representing the
right-hand side or "dependent variable" matrix.
Returns:
A tensor of shape `(..., M)` or `(..., M, N)` representing the solution
of the linear system. Returned shape is identical to `b`.
"""
if any_symbolic_tensors((a, b)):
return SolveTriangular(lower).symbolic_call(a, b)
return _solve_triangular(a, b, lower)
def _solve_triangular(a, b, lower=False):
a = backend.convert_to_tensor(a)
b = backend.convert_to_tensor(b)
_assert_2d(a)
_assert_square(a)
_assert_1d(b)
_assert_a_b_compat(a, b)
return backend.linalg.solve_triangular(a, b, lower)
class SVD(Operation):
def __init__(self, full_matrices=True, compute_uv=True, *, name=None):
super().__init__(name=name)
self.full_matrices = full_matrices
self.compute_uv = compute_uv
def call(self, x):
return _svd(x, self.full_matrices, self.compute_uv)
def compute_output_spec(self, x):
_assert_2d(x)
rows, columns = x.shape[-2:]
batches = x.shape[:-2]
s_shape = batches + (min(rows, columns),)
if self.full_matrices:
u_shape = batches + (rows, rows)
v_shape = batches + (columns, columns)
else:
u_shape = batches + (rows, min(rows, columns))
v_shape = batches + (min(rows, columns), columns)
if self.compute_uv:
return (
KerasTensor(u_shape, x.dtype),
KerasTensor(s_shape, x.dtype),
KerasTensor(v_shape, x.dtype),
)
return KerasTensor(s_shape, x.dtype)
@keras_export(["keras.ops.svd", "keras.ops.linalg.svd"])
def svd(x, full_matrices=True, compute_uv=True):
"""Computes the singular value decomposition of a matrix.
Args:
x: Input tensor of shape `(..., M, N)`.
Returns:
A tuple of three tensors: a tensor of shape `(..., M, M)` containing the
left singular vectors, a tensor of shape `(..., M, N)` containing the
singular values and a tensor of shape `(..., N, N)` containing the
right singular vectors.
"""
if any_symbolic_tensors((x,)):
return SVD(full_matrices, compute_uv).symbolic_call(x)
return _svd(x, full_matrices, compute_uv)
def _svd(x, full_matrices=True, compute_uv=True):
x = backend.convert_to_tensor(x)
_assert_2d(x)
return backend.linalg.svd(x, full_matrices, compute_uv)
class Lstsq(Operation):
def __init__(self, rcond=None, *, name=None):
super().__init__(name=name)
self.rcond = rcond
def call(self, a, b):
return backend.linalg.lstsq(a, b, rcond=self.rcond)
def compute_output_spec(self, a, b):
if len(a.shape) != 2:
raise ValueError(
f"Expected a to have rank 2. Received: a.shape={a.shape}"
)
if len(b.shape) not in (1, 2):
raise ValueError(
f"Expected b to have rank 1 or 2. Received: b.shape={b.shape}"
)
m, n = a.shape
if b.shape[0] != m:
raise ValueError(
"Expected b.shape[0] to be equal to "
"a.shape[0]. Received: "
f"a.shape={a.shape}, b.shape={b.shape}"
)
if len(b.shape) == 2:
k = b.shape[1]
x = KerasTensor((n, k), dtype=a.dtype)
else:
x = KerasTensor((n,), dtype=a.dtype)
return x
@keras_export(["keras.ops.lstsq", "keras.ops.linalg.lstsq"])
def lstsq(a, b, rcond=None):
"""Return the least-squares solution to a linear matrix equation.
Computes the vector x that approximately solves the equation
`a @ x = b`. The equation may be under-, well-, or over-determined
(i.e., the number of linearly independent rows of a can be less than,
equal to, or greater than its number of linearly independent columns).
If a is square and of full rank, then `x` (but for round-off error)
is the exact solution of the equation. Else, `x` minimizes the
L2 norm of `b - a * x`.
If there are multiple minimizing solutions,
the one with the smallest L2 norm is returned.
Args:
a: "Coefficient" matrix of shape `(M, N)`.
b: Ordinate or "dependent variable" values,
of shape `(M,)` or `(M, K)`.
If `b` is two-dimensional, the least-squares solution
is calculated for each of the K columns of `b`.
rcond: Cut-off ratio for small singular values of `a`.
For the purposes of rank determination,
singular values are treated as zero if they are
smaller than rcond times the largest
singular value of `a`.
Returns:
Tensor with shape `(N,)` or `(N, K)` containing
the least-squares solutions.
**NOTE:** The output differs from `numpy.linalg.lstsq`.
NumPy returns a tuple with four elements, the first of which
being the least-squares solutions and the others
being essentially never used.
Keras only returns the first value. This is done both
to ensure consistency across backends (which cannot be achieved
for the other values) and to simplify the API.
"""
if any_symbolic_tensors((a, b)):
return Lstsq(rcond=rcond).symbolic_call(a, b)
return backend.linalg.lstsq(a, b, rcond=rcond)
def _assert_1d(*arrays):
for a in arrays:
if a.ndim < 1:
raise ValueError(
f"Expected input to have rank >= 1. Received scalar input {a}."
)
def _assert_2d(*arrays):
for a in arrays:
if a.ndim < 2:
raise ValueError(
"Expected input to have rank >= 2. "
f"Received input with shape {a.shape}."
)
def _assert_square(*arrays):
for a in arrays:
m, n = a.shape[-2:]
if m != n:
raise ValueError(
"Expected a square matrix. "
f"Received non-square input with shape {a.shape}"
)
def _assert_a_b_compat(a, b):
if a.ndim == b.ndim:
if a.shape[-2] != b.shape[-2]:
raise ValueError(
"Incompatible shapes between `a` and `b`. "
"Expected `a.shape[-2] == b.shape[-2]`. "
f"Received: a.shape={a.shape}, b.shape={b.shape}"
)
elif a.ndim == b.ndim - 1:
if a.shape[-1] != b.shape[-1]:
raise ValueError(
"Incompatible shapes between `a` and `b`. "
"Expected `a.shape[-1] == b.shape[-1]`. "
f"Received: a.shape={a.shape}, b.shape={b.shape}"
)
class JVP(Operation):
def __init__(self, has_aux=False, *, name=None):
super().__init__(name=name)
self.has_aux = has_aux
def call(self, fun, primals, tangents):
"""Computes the JVP of `fun` at `primals` along `tangents`.
Args:
fun: A callable that takes tensors (or nested structures) as input
and returns a tensor (or nested structure) as output.
primals: Input tensors (or nested structures) at which the Jacobian
of `fun` is evaluated.
tangents: Tensors (or nested structures) representing the direction
vectors for the JVP. Must have the same structure as
`primals`.
Returns:
If `has_aux` is False:
A tuple (primals_out, tangents_out) where:
- primals_out: Output of `fun(*primals)`
- tangents_out: JVP of `fun` at `primals` along `tangents`
If `has_aux` is True:
A tuple (primals_out, tangents_out, aux) where:
- aux: Auxiliary data returned by `fun`
"""
return backend.linalg.jvp(fun, primals, tangents, has_aux=self.has_aux)
def compute_output_spec(self, fun, primals, tangents):
# Infer primal output spec
if self.has_aux:
primals_out_spec, aux_spec = backend.compute_output_spec(
fun, *primals
)
else:
primals_out_spec = backend.compute_output_spec(fun, *primals)
# Tangents output should match primals output in structure and shape
tangents_out_spec = tree.map_structure(
lambda x: KerasTensor(x.shape, x.dtype), primals_out_spec
)
if self.has_aux:
return primals_out_spec, tangents_out_spec, aux_spec
return primals_out_spec, tangents_out_spec
@keras_export(["keras.ops.jvp", "keras.ops.linalg.jvp"])
def jvp(fun, primals, tangents, has_aux=False):
"""Computes a (forward-mode) Jacobian-vector product of `fun`.
Args:
fun: Function to be differentiated. Its arguments should be arrays,
scalars, or standard Python containers of arrays or scalars. It
should return an array, scalar, or standard Python container of
arrays or scalars.
primals: The primal values at which the Jacobian of `fun` should be
evaluated. Should be either a tuple or a list of arguments,
and its length should be equal to the number of positional
parameters of `fun`.
tangents: The tangent vector for which the Jacobian-vector product
should be evaluated. Should be either a tuple or a list of
tangents, with the same tree structure and array shapes as
`primals`.
has_aux: Optional, bool. Indicates whether `fun` returns a pair where
the first element is considered the output of the mathematical
function to be differentiated and the second element is
auxiliary data. Default is False.
Returns:
If `has_aux` is False, returns a (`primals_out`, `tangents_out`) pair,
where `primals_out` is `fun(*primals)`, and `tangents_out` is the
Jacobian-vector product of `fun` evaluated at `primals` with
`tangents`. The `tangents_out` value has the same Python tree
structure and shapes as `primals_out`.
If `has_aux` is True, returns a (`primals_out`, `tangents_out`, `aux`)
tuple where `aux` is the auxiliary data returned by `fun`.
Example:
>>> from keras import ops
>>> a1, a2 = ops.convert_to_tensor(0.1), ops.convert_to_tensor(0.2)
>>> primals, tangents = ops.jvp(ops.sin, (a1,), (a2,))
>>> primals
0.09983342
>>> tangents
0.19900084
"""
if any_symbolic_tensors((primals, tangents)):
return JVP(has_aux=has_aux).symbolic_call(fun, primals, tangents)
return backend.linalg.jvp(fun, primals, tangents, has_aux=has_aux)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/numpy.py | keras/src/ops/numpy.py | import builtins
import re
import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.backend import any_symbolic_tensors
from keras.src.backend.common import dtypes
from keras.src.backend.common.backend_utils import canonicalize_axis
from keras.src.backend.common.backend_utils import to_tuple_or_list
from keras.src.ops import operation_utils
from keras.src.ops.operation import Operation
from keras.src.ops.operation_utils import broadcast_shapes
from keras.src.ops.operation_utils import reduce_shape
class Rot90(Operation):
def __init__(self, k=1, axes=(0, 1), *, name=None):
super().__init__(name=name)
self.k = k
self.axes = axes
def call(self, array):
return backend.numpy.rot90(array, k=self.k, axes=self.axes)
def compute_output_spec(self, array):
array_shape = list(array.shape)
if len(array_shape) < 2:
raise ValueError(
"Input array must have at least 2 dimensions. "
f"Received: array.shape={array_shape}"
)
if len(self.axes) != 2 or self.axes[0] == self.axes[1]:
raise ValueError(
f"Invalid axes: {self.axes}. "
"Axes must be a tuple of two different dimensions."
)
axis1, axis2 = self.axes
array_shape[axis1], array_shape[axis2] = (
array_shape[axis2],
array_shape[axis1],
)
return KerasTensor(shape=array_shape, dtype=array.dtype)
@keras_export(["keras.ops.rot90", "keras.ops.numpy.rot90"])
def rot90(array, k=1, axes=(0, 1)):
"""Rotate an array by 90 degrees in the plane specified by axes.
This function rotates an array counterclockwise
by 90 degrees `k` times in the plane specified by `axes`.
Supports arrays of two or more dimensions.
Args:
array: Input array to rotate.
k: Number of times the array is rotated by 90 degrees.
axes: A tuple of two integers specifying the
plane of rotation (defaults to `(0, 1)`).
Returns:
Rotated array.
Examples:
>>> import numpy as np
>>> from keras import ops
>>> m = np.array([[1, 2], [3, 4]])
>>> rotated = ops.rot90(m)
>>> rotated
array([[2, 4],
[1, 3]])
>>> m = np.arange(8).reshape((2, 2, 2))
>>> rotated = ops.rot90(m, k=1, axes=(1, 2))
>>> rotated
array([[[1, 3],
[0, 2]],
[[5, 7],
[4, 6]]])
"""
if any_symbolic_tensors((array,)):
return Rot90(k=k, axes=axes).symbolic_call(array)
return backend.numpy.rot90(array, k=k, axes=axes)
def shape_equal(shape1, shape2, axis=None, allow_none=True):
"""Check if two shapes are equal.
Args:
shape1: A list or tuple of integers for first shape to be compared.
shape2: A list or tuple of integers for second shape to be compared.
axis: An integer, list, or tuple of integers (optional):
Axes to ignore during comparison. Defaults to `None`.
allow_none (bool, optional): If `True`, allows `None` in a shape
to match any value in the corresponding position of the other shape.
Defaults to `True`.
Returns:
bool: `True` if shapes are considered equal based on the criteria,
`False` otherwise.
Examples:
>>> shape_equal((32, 64, 128), (32, 64, 128))
True
>>> shape_equal((32, 64, 128), (32, 64, 127))
False
>>> shape_equal((32, 64, None), (32, 64, 128), allow_none=True)
True
>>> shape_equal((32, 64, None), (32, 64, 128), allow_none=False)
False
>>> shape_equal((32, 64, 128), (32, 63, 128), axis=1)
True
>>> shape_equal((32, 64, 128), (32, 63, 127), axis=(1, 2))
True
>>> shape_equal((32, 64, 128), (32, 63, 127), axis=[1,2])
True
>>> shape_equal((32, 64), (32, 64, 128))
False
"""
if len(shape1) != len(shape2):
return False
shape1 = list(shape1)
shape2 = list(shape2)
if axis is not None:
if isinstance(axis, int):
axis = [axis]
for ax in axis:
shape1[ax] = -1
shape2[ax] = -1
if allow_none:
for i in range(len(shape1)):
if shape1[i] is None:
shape1[i] = shape2[i]
if shape2[i] is None:
shape2[i] = shape1[i]
return shape1 == shape2
class Absolute(Operation):
def call(self, x):
return backend.numpy.absolute(x)
def compute_output_spec(self, x):
sparse = getattr(x, "sparse", False)
return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse)
@keras_export(["keras.ops.absolute", "keras.ops.numpy.absolute"])
def absolute(x):
"""Compute the absolute value element-wise.
`keras.ops.abs` is a shorthand for this function.
Args:
x: Input tensor.
Returns:
An array containing the absolute value of each element in `x`.
Example:
>>> x = keras.ops.convert_to_tensor([-1.2, 1.2])
>>> keras.ops.absolute(x)
array([1.2, 1.2], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Absolute().symbolic_call(x)
return backend.numpy.absolute(x)
class Abs(Absolute):
pass
@keras_export(["keras.ops.abs", "keras.ops.numpy.abs"])
def abs(x):
"""Shorthand for `keras.ops.absolute`."""
return absolute(x)
class Add(Operation):
def call(self, x1, x2):
return backend.numpy.add(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
output_dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1_sparse = getattr(x1, "sparse", False)
x2_sparse = getattr(x2, "sparse", False)
output_sparse = x1_sparse and x2_sparse
return KerasTensor(
output_shape, dtype=output_dtype, sparse=output_sparse
)
@keras_export(["keras.ops.add", "keras.ops.numpy.add"])
def add(x1, x2):
"""Add arguments element-wise.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
The tensor containing the element-wise sum of `x1` and `x2`.
Examples:
>>> x1 = keras.ops.convert_to_tensor([1, 4])
>>> x2 = keras.ops.convert_to_tensor([5, 6])
>>> keras.ops.add(x1, x2)
array([6, 10], dtype=int32)
`keras.ops.add` also broadcasts shapes:
>>> x1 = keras.ops.convert_to_tensor(
... [[5, 4],
... [5, 6]]
... )
>>> x2 = keras.ops.convert_to_tensor([5, 6])
>>> keras.ops.add(x1, x2)
array([[10 10]
[10 12]], shape=(2, 2), dtype=int32)
"""
if any_symbolic_tensors((x1, x2)):
return Add().symbolic_call(x1, x2)
return backend.numpy.add(x1, x2)
class All(Operation):
def __init__(self, axis=None, keepdims=False, *, name=None):
super().__init__(name=name)
if isinstance(axis, int):
self.axis = [axis]
else:
self.axis = axis
self.keepdims = keepdims
def call(self, x):
return backend.numpy.all(
x,
axis=self.axis,
keepdims=self.keepdims,
)
def compute_output_spec(self, x):
return KerasTensor(
reduce_shape(
x.shape,
axis=self.axis,
keepdims=self.keepdims,
),
dtype="bool",
)
@keras_export(["keras.ops.all", "keras.ops.numpy.all"])
def all(x, axis=None, keepdims=False):
"""Test whether all array elements along a given axis evaluate to `True`.
Args:
x: Input tensor.
axis: An integer or tuple of integers that represent the axis along
which a logical AND reduction is performed. The default
(`axis=None`) is to perform a logical AND over all the dimensions
of the input array. `axis` may be negative, in which case it counts
for the last to the first axis.
keepdims: If `True`, axes which are reduced are left in the result as
dimensions with size one. With this option, the result will
broadcast correctly against the input array. Defaults to `False`.
Returns:
The tensor containing the logical AND reduction over the `axis`.
Examples:
>>> x = keras.ops.convert_to_tensor([True, False])
>>> keras.ops.all(x)
array(False, shape=(), dtype=bool)
>>> x = keras.ops.convert_to_tensor([[True, False], [True, True]])
>>> keras.ops.all(x, axis=0)
array([ True False], shape=(2,), dtype=bool)
`keepdims=True` outputs a tensor with dimensions reduced to one.
>>> x = keras.ops.convert_to_tensor([[True, False], [True, True]])
>>> keras.ops.all(x, keepdims=True)
array([[False]], shape=(1, 1), dtype=bool)
"""
if any_symbolic_tensors((x,)):
return All(axis=axis, keepdims=keepdims).symbolic_call(x)
return backend.numpy.all(x, axis=axis, keepdims=keepdims)
class Angle(Operation):
def call(self, x):
return backend.numpy.angle(x)
def compute_output_spec(self, x):
dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx()))
if dtype == "int64":
dtype = backend.floatx()
else:
dtype = dtypes.result_type(dtype, float)
return KerasTensor(x.shape, dtype=dtype)
@keras_export(["keras.ops.angle", "keras.ops.numpy.angle"])
def angle(x):
"""Element-wise angle of a complex tensor.
Arguments:
x: Input tensor. Can be real or complex.
Returns:
Output tensor of same shape as x. containing the angle of each element
(in radians).
Example:
>>> x = keras.ops.convert_to_tensor([[1 + 3j, 2 - 5j], [4 - 3j, 3 + 2j]])
>>> keras.ops.angle(x)
array([[ 1.2490457, -1.19029 ],
[-0.6435011, 0.5880026]], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Angle().symbolic_call(x)
return backend.numpy.angle(x)
class Any(Operation):
def __init__(self, axis=None, keepdims=False, *, name=None):
super().__init__(name=name)
if isinstance(axis, int):
self.axis = [axis]
else:
self.axis = axis
self.keepdims = keepdims
def call(self, x):
return backend.numpy.any(
x,
axis=self.axis,
keepdims=self.keepdims,
)
def compute_output_spec(self, x):
return KerasTensor(
reduce_shape(
x.shape,
axis=self.axis,
keepdims=self.keepdims,
),
dtype="bool",
)
@keras_export(["keras.ops.any", "keras.ops.numpy.any"])
def any(x, axis=None, keepdims=False):
"""Test whether any array element along a given axis evaluates to `True`.
Args:
x: Input tensor.
axis: An integer or tuple of integers that represent the axis along
which a logical OR reduction is performed. The default
(`axis=None`) is to perform a logical OR over all the dimensions
of the input array. `axis` may be negative, in which case it counts
for the last to the first axis.
keepdims: If `True`, axes which are reduced are left in the result as
dimensions with size one. With this option, the result will
broadcast correctly against the input array. Defaults to `False`.
Returns:
The tensor containing the logical OR reduction over the `axis`.
Examples:
>>> x = keras.ops.convert_to_tensor([True, False])
>>> keras.ops.any(x)
array(True, shape=(), dtype=bool)
>>> x = keras.ops.convert_to_tensor([[True, False], [True, True]])
>>> keras.ops.any(x, axis=0)
array([ True True], shape=(2,), dtype=bool)
`keepdims=True` outputs a tensor with dimensions reduced to one.
>>> x = keras.ops.convert_to_tensor([[True, False], [True, True]])
>>> keras.ops.all(x, keepdims=True)
array([[False]], shape=(1, 1), dtype=bool)
"""
if any_symbolic_tensors((x,)):
return Any(axis=axis, keepdims=keepdims).symbolic_call(x)
return backend.numpy.any(x, axis=axis, keepdims=keepdims)
class Amax(Operation):
def __init__(self, axis=None, keepdims=False, *, name=None):
super().__init__(name=name)
if isinstance(axis, int):
axis = [axis]
self.axis = axis
self.keepdims = keepdims
def call(self, x):
return backend.numpy.amax(
x,
axis=self.axis,
keepdims=self.keepdims,
)
def compute_output_spec(self, x):
return KerasTensor(
reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims),
dtype=x.dtype,
)
@keras_export(["keras.ops.amax", "keras.ops.numpy.amax"])
def amax(x, axis=None, keepdims=False):
"""Returns the maximum of an array or maximum value along an axis.
Args:
x: Input tensor.
axis: Axis along which to compute the maximum.
By default (`axis=None`), find the maximum value in all the
dimensions of the input array.
keepdims: If `True`, axes which are reduced are left in the result as
dimensions that are broadcast to the size of the original
input tensor. Defaults to `False`.
Returns:
An array with the maximum value. If `axis=None`, the result is a scalar
value representing the maximum element in the entire array. If `axis` is
given, the result is an array with the maximum values along
the specified axis.
Examples:
>>> x = keras.ops.convert_to_tensor([[1, 3, 5], [2, 3, 6]])
>>> keras.ops.amax(x)
array(6, dtype=int32)
>>> x = keras.ops.convert_to_tensor([[1, 6, 8], [1, 5, 2]])
>>> keras.ops.amax(x, axis=0)
array([1, 6, 8], dtype=int32)
>>> x = keras.ops.convert_to_tensor([[1, 6, 8], [1, 5, 2]])
>>> keras.ops.amax(x, axis=1, keepdims=True)
array([[8], [5]], dtype=int32)
"""
if any_symbolic_tensors((x,)):
return Amax(axis=axis, keepdims=keepdims).symbolic_call(x)
return backend.numpy.amax(x, axis=axis, keepdims=keepdims)
class Amin(Operation):
def __init__(self, axis=None, keepdims=False, *, name=None):
super().__init__(name=name)
if isinstance(axis, int):
axis = [axis]
self.axis = axis
self.keepdims = keepdims
def call(self, x):
return backend.numpy.amin(x, axis=self.axis, keepdims=self.keepdims)
def compute_output_spec(self, x):
return KerasTensor(
reduce_shape(x.shape, axis=self.axis, keepdims=self.keepdims),
dtype=x.dtype,
)
@keras_export(["keras.ops.amin", "keras.ops.numpy.amin"])
def amin(x, axis=None, keepdims=False):
"""Returns the minimum of an array or minimum value along an axis.
Args:
x: Input tensor.
axis: Axis along which to compute the minimum.
By default (`axis=None`), find the minimum value in all the
dimensions of the input array.
keepdims: If `True`, axes which are reduced are left in the result as
dimensions that are broadcast to the size of the original
input tensor. Defaults to `False`.
Returns:
An array with the minimum value. If `axis=None`, the result is a scalar
value representing the minimum element in the entire array. If `axis` is
given, the result is an array with the minimum values along
the specified axis.
Examples:
>>> x = keras.ops.convert_to_tensor([1, 3, 5, 2, 3, 6])
>>> keras.ops.amin(x)
array(1, dtype=int32)
>>> x = keras.ops.convert_to_tensor([[1, 6, 8], [7, 5, 3]])
>>> keras.ops.amin(x, axis=0)
array([1,5,3], dtype=int32)
>>> x = keras.ops.convert_to_tensor([[1, 6, 8], [7, 5, 3]])
>>> keras.ops.amin(x, axis=1, keepdims=True)
array([[1],[3]], dtype=int32)
"""
if any_symbolic_tensors((x,)):
return Amin(axis=axis, keepdims=keepdims).symbolic_call(x)
return backend.numpy.amin(x, axis=axis, keepdims=keepdims)
class Append(Operation):
def __init__(self, axis=None, *, name=None):
super().__init__(name=name)
self.axis = axis
def call(self, x1, x2):
return backend.numpy.append(x1, x2, axis=self.axis)
def compute_output_spec(self, x1, x2):
x1_shape = x1.shape
x2_shape = x2.shape
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
if self.axis is None:
if None in x1_shape or None in x2_shape:
output_shape = [None]
else:
output_shape = [int(np.prod(x1_shape) + np.prod(x2_shape))]
return KerasTensor(output_shape, dtype=dtype)
if not shape_equal(x1_shape, x2_shape, [self.axis]):
raise ValueError(
"`append` requires inputs to have the same shape except the "
f"`axis={self.axis}`, but received shape {x1_shape} and "
f"{x2_shape}."
)
output_shape = list(x1_shape)
output_shape[self.axis] = x1_shape[self.axis] + x2_shape[self.axis]
return KerasTensor(output_shape, dtype=dtype)
@keras_export(["keras.ops.append", "keras.ops.numpy.append"])
def append(
x1,
x2,
axis=None,
):
"""Append tensor `x2` to the end of tensor `x1`.
Args:
x1: First input tensor.
x2: Second input tensor.
axis: Axis along which tensor `x2` is appended to tensor `x1`.
If `None`, both tensors are flattened before use.
Returns:
A tensor with the values of `x2` appended to `x1`.
Examples:
>>> x1 = keras.ops.convert_to_tensor([1, 2, 3])
>>> x2 = keras.ops.convert_to_tensor([[4, 5, 6], [7, 8, 9]])
>>> keras.ops.append(x1, x2)
array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int32)
When `axis` is specified, `x1` and `x2` must have compatible shapes.
>>> x1 = keras.ops.convert_to_tensor([[1, 2, 3], [4, 5, 6]])
>>> x2 = keras.ops.convert_to_tensor([[7, 8, 9]])
>>> keras.ops.append(x1, x2, axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=int32)
>>> x3 = keras.ops.convert_to_tensor([7, 8, 9])
>>> keras.ops.append(x1, x3, axis=0)
Traceback (most recent call last):
...
TypeError: Cannot concatenate arrays with different numbers of
dimensions: got (2, 3), (3,).
"""
if any_symbolic_tensors((x1, x2)):
return Append(axis=axis).symbolic_call(x1, x2)
return backend.numpy.append(x1, x2, axis=axis)
class Arange(Operation):
def __init__(self, dtype=None, *, name=None):
super().__init__(name=name)
self.dtype = None if dtype is None else backend.standardize_dtype(dtype)
def call(self, start, stop=None, step=None):
return backend.numpy.arange(start, stop, step=step, dtype=self.dtype)
def compute_output_spec(self, start, stop=None, step=None):
if stop is None:
start, stop = 0, start
if step is None:
step = 1
output_shape = [int(np.ceil((stop - start) / step))]
dtype = self.dtype
if dtype is None:
dtypes_to_resolve = [getattr(start, "dtype", type(start))]
if stop is not None:
dtypes_to_resolve.append(getattr(stop, "dtype", type(stop)))
if step is not None:
dtypes_to_resolve.append(getattr(step, "dtype", type(step)))
dtype = dtypes.result_type(*dtypes_to_resolve)
return KerasTensor(output_shape, dtype=dtype)
@keras_export(["keras.ops.arange", "keras.ops.numpy.arange"])
def arange(start, stop=None, step=None, dtype=None):
"""Return evenly spaced values within a given interval.
`arange` can be called with a varying number of positional arguments:
* `arange(stop)`: Values are generated within the half-open interval
`[0, stop)` (in other words, the interval including start but excluding
stop).
* `arange(start, stop)`: Values are generated within the half-open interval
`[start, stop)`.
* `arange(start, stop, step)`: Values are generated within the half-open
interval `[start, stop)`, with spacing between values given by step.
Args:
start: Integer or real, representing the start of the interval. The
interval includes this value.
stop: Integer or real, representing the end of the interval. The
interval does not include this value, except in some cases where
`step` is not an integer and floating point round-off affects the
length of `out`. Defaults to `None`.
step: Integer or real, represent the spacing between values. For any
output `out`, this is the distance between two adjacent values,
`out[i+1] - out[i]`. The default step size is 1. If `step` is
specified as a position argument, `start` must also be given.
dtype: The type of the output array. If `dtype` is not given, infer the
data type from the other input arguments.
Returns:
Tensor of evenly spaced values.
For floating point arguments, the length of the result is
`ceil((stop - start)/step)`. Because of floating point overflow, this
rule may result in the last element of out being greater than stop.
Examples:
>>> keras.ops.arange(3)
array([0, 1, 2], dtype=int32)
>>> keras.ops.arange(3.0)
array([0., 1., 2.], dtype=float32)
>>> keras.ops.arange(3, 7)
array([3, 4, 5, 6], dtype=int32)
>>> keras.ops.arange(3, 7, 2)
array([3, 5], dtype=int32)
"""
if any_symbolic_tensors((start, stop, step)):
return Arange(dtype=dtype).symbolic_call(start, stop, step=step)
return backend.numpy.arange(start, stop, step=step, dtype=dtype)
class Arccos(Operation):
def call(self, x):
return backend.numpy.arccos(x)
def compute_output_spec(self, x):
dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx()))
if dtype == "int64":
dtype = backend.floatx()
else:
dtype = dtypes.result_type(dtype, float)
return KerasTensor(x.shape, dtype=dtype)
@keras_export(["keras.ops.arccos", "keras.ops.numpy.arccos"])
def arccos(x):
"""Trigonometric inverse cosine, element-wise.
The inverse of `cos` so that, if `y = cos(x)`, then `x = arccos(y)`.
Args:
x: Input tensor.
Returns:
Tensor of the angle of the ray intersecting the unit circle at the given
x-coordinate in radians `[0, pi]`.
Example:
>>> x = keras.ops.convert_to_tensor([1, -1])
>>> keras.ops.arccos(x)
array([0.0, 3.1415927], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Arccos().symbolic_call(x)
return backend.numpy.arccos(x)
class Arccosh(Operation):
def call(self, x):
return backend.numpy.arccosh(x)
def compute_output_spec(self, x):
dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx()))
if dtype == "int64":
dtype = backend.floatx()
else:
dtype = dtypes.result_type(dtype, float)
return KerasTensor(x.shape, dtype=dtype)
@keras_export(["keras.ops.arccosh", "keras.ops.numpy.arccosh"])
def arccosh(x):
"""Inverse hyperbolic cosine, element-wise.
Arguments:
x: Input tensor.
Returns:
Output tensor of same shape as x.
Example:
>>> x = keras.ops.convert_to_tensor([10, 100])
>>> keras.ops.arccosh(x)
array([2.993223, 5.298292], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Arccosh().symbolic_call(x)
return backend.numpy.arccosh(x)
class Arcsin(Operation):
def call(self, x):
return backend.numpy.arcsin(x)
def compute_output_spec(self, x):
dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx()))
if dtype == "int64":
dtype = backend.floatx()
else:
dtype = dtypes.result_type(dtype, float)
sparse = getattr(x, "sparse", False)
return KerasTensor(x.shape, dtype=dtype, sparse=sparse)
@keras_export(["keras.ops.arcsin", "keras.ops.numpy.arcsin"])
def arcsin(x):
"""Inverse sine, element-wise.
Args:
x: Input tensor.
Returns:
Tensor of the inverse sine of each element in `x`, in radians and in
the closed interval `[-pi/2, pi/2]`.
Example:
>>> x = keras.ops.convert_to_tensor([1, -1, 0])
>>> keras.ops.arcsin(x)
array([ 1.5707964, -1.5707964, 0.], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Arcsin().symbolic_call(x)
return backend.numpy.arcsin(x)
class Arcsinh(Operation):
def call(self, x):
return backend.numpy.arcsinh(x)
def compute_output_spec(self, x):
dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx()))
if dtype == "int64":
dtype = backend.floatx()
else:
dtype = dtypes.result_type(dtype, float)
sparse = getattr(x, "sparse", False)
return KerasTensor(x.shape, dtype=dtype, sparse=sparse)
@keras_export(["keras.ops.arcsinh", "keras.ops.numpy.arcsinh"])
def arcsinh(x):
"""Inverse hyperbolic sine, element-wise.
Arguments:
x: Input tensor.
Returns:
Output tensor of same shape as `x`.
Example:
>>> x = keras.ops.convert_to_tensor([1, -1, 0])
>>> keras.ops.arcsinh(x)
array([0.88137364, -0.88137364, 0.0], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Arcsinh().symbolic_call(x)
return backend.numpy.arcsinh(x)
class Arctan(Operation):
def call(self, x):
return backend.numpy.arctan(x)
def compute_output_spec(self, x):
dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx()))
if dtype == "int64":
dtype = backend.floatx()
else:
dtype = dtypes.result_type(dtype, float)
sparse = getattr(x, "sparse", False)
return KerasTensor(x.shape, dtype=dtype, sparse=sparse)
@keras_export(["keras.ops.arctan", "keras.ops.numpy.arctan"])
def arctan(x):
"""Trigonometric inverse tangent, element-wise.
Args:
x: Input tensor.
Returns:
Tensor of the inverse tangent of each element in `x`, in the interval
`[-pi/2, pi/2]`.
Example:
>>> x = keras.ops.convert_to_tensor([0, 1])
>>> keras.ops.arctan(x)
array([0., 0.7853982], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Arctan().symbolic_call(x)
return backend.numpy.arctan(x)
class Arctan2(Operation):
def call(self, x1, x2):
return backend.numpy.arctan2(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
outputs_shape = broadcast_shapes(x1_shape, x2_shape)
x1_dtype = backend.standardize_dtype(
getattr(x1, "dtype", backend.floatx())
)
x2_dtype = backend.standardize_dtype(
getattr(x2, "dtype", backend.floatx())
)
dtype = dtypes.result_type(x1_dtype, x2_dtype, float)
return KerasTensor(outputs_shape, dtype=dtype)
@keras_export(["keras.ops.arctan2", "keras.ops.numpy.arctan2"])
def arctan2(x1, x2):
"""Element-wise arc tangent of `x1/x2` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that `arctan2(x1, x2)` is the
signed angle in radians between the ray ending at the origin and passing
through the point `(1, 0)`, and the ray ending at the origin and passing
through the point `(x2, x1)`. (Note the role reversal: the "y-coordinate"
is the first function parameter, the "x-coordinate" is the second.) By IEEE
convention, this function is defined for `x2 = +/-0` and for either or both
of `x1` and `x2` `= +/-inf`.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Tensor of angles in radians, in the range `[-pi, pi]`.
Examples:
Consider four points in different quadrants:
>>> x = keras.ops.convert_to_tensor([-1, +1, +1, -1])
>>> y = keras.ops.convert_to_tensor([-1, -1, +1, +1])
>>> keras.ops.arctan2(y, x) * 180 / numpy.pi
array([-135., -45., 45., 135.], dtype=float32)
Note the order of the parameters. `arctan2` is defined also when x2=0 and
at several other points, obtaining values in the range `[-pi, pi]`:
>>> keras.ops.arctan2(
... keras.ops.array([1., -1.]),
... keras.ops.array([0., 0.]),
... )
array([ 1.5707964, -1.5707964], dtype=float32)
>>> keras.ops.arctan2(
... keras.ops.array([0., 0., numpy.inf]),
... keras.ops.array([+0., -0., numpy.inf]),
... )
array([0., 3.1415925, 0.7853982], dtype=float32)
"""
if any_symbolic_tensors((x1, x2)):
return Arctan2().symbolic_call(x1, x2)
return backend.numpy.arctan2(x1, x2)
class Arctanh(Operation):
def call(self, x):
return backend.numpy.arctanh(x)
def compute_output_spec(self, x):
dtype = backend.standardize_dtype(getattr(x, "dtype", backend.floatx()))
if dtype == "int64":
dtype = backend.floatx()
else:
dtype = dtypes.result_type(dtype, float)
sparse = getattr(x, "sparse", False)
return KerasTensor(x.shape, dtype=dtype, sparse=sparse)
@keras_export(["keras.ops.arctanh", "keras.ops.numpy.arctanh"])
def arctanh(x):
"""Inverse hyperbolic tangent, element-wise.
Arguments:
x: Input tensor.
Returns:
Output tensor of same shape as `x`.
Example:
>>> x = keras.ops.convert_to_tensor([0, -0.5])
>>> keras.ops.arctanh(x)
array([ 0. , -0.54930615], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Arctanh().symbolic_call(x)
return backend.numpy.arctanh(x)
class Argmax(Operation):
def __init__(self, axis=None, keepdims=False, *, name=None):
super().__init__(name=name)
self.axis = axis
self.keepdims = keepdims
def call(self, x):
return backend.numpy.argmax(x, axis=self.axis, keepdims=self.keepdims)
def compute_output_spec(self, x):
if self.keepdims:
return KerasTensor(x.shape, dtype="int32")
if self.axis is None:
return KerasTensor([], dtype="int32")
return KerasTensor(
reduce_shape(x.shape, axis=[self.axis]), dtype="int32"
)
@keras_export(["keras.ops.argmax", "keras.ops.numpy.argmax"])
def argmax(x, axis=None, keepdims=False):
"""Returns the indices of the maximum values along an axis.
Args:
x: Input tensor.
axis: By default, the index is into the flattened tensor, otherwise
along the specified axis.
keepdims: If this is set to `True`, the axes which are reduced are left
in the result as dimensions with size one. Defaults to `False`.
Returns:
Tensor of indices. It has the same shape as `x`, with the dimension
along `axis` removed.
Example:
>>> x = keras.ops.arange(6).reshape(2, 3) + 10
>>> x
array([[10, 11, 12],
[13, 14, 15]], dtype=int32)
>>> keras.ops.argmax(x)
array(5, dtype=int32)
>>> keras.ops.argmax(x, axis=0)
array([1, 1, 1], dtype=int32)
>>> keras.ops.argmax(x, axis=1)
array([2, 2], dtype=int32)
"""
if any_symbolic_tensors((x,)):
return Argmax(axis=axis, keepdims=keepdims).symbolic_call(x)
return backend.numpy.argmax(x, axis=axis, keepdims=keepdims)
class Argmin(Operation):
def __init__(self, axis=None, keepdims=False, *, name=None):
super().__init__(name=name)
self.axis = axis
self.keepdims = keepdims
def call(self, x):
return backend.numpy.argmin(x, axis=self.axis, keepdims=self.keepdims)
def compute_output_spec(self, x):
if self.keepdims:
return KerasTensor(x.shape, dtype="int32")
if self.axis is None:
return KerasTensor([], dtype="int32")
return KerasTensor(
reduce_shape(x.shape, axis=[self.axis]), dtype="int32"
)
@keras_export(["keras.ops.argmin", "keras.ops.numpy.argmin"])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/__init__.py | keras/src/ops/__init__.py | # from keras.src.ops.numpy import Matmul, matmul
# from keras.src.ops.numpy import Add, add
# from keras.src.ops.numpy import Multiply, multiply
from keras.src.backend import cast
from keras.src.backend import cond
from keras.src.backend import is_tensor
from keras.src.backend import name_scope
from keras.src.backend import random
from keras.src.ops import image
from keras.src.ops import operation_utils
from keras.src.ops.core import * # noqa: F403
from keras.src.ops.linalg import * # noqa: F403
from keras.src.ops.math import * # noqa: F403
from keras.src.ops.nn import * # noqa: F403
from keras.src.ops.numpy import * # noqa: F403
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/math.py | keras/src/ops/math.py | """Commonly used math operations not included in NumPy."""
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.backend import any_symbolic_tensors
from keras.src.ops.operation import Operation
from keras.src.ops.operation_utils import reduce_shape
def _segment_reduce_validation(data, segment_ids):
data_shape = data.shape
segment_ids_shape = segment_ids.shape
if len(segment_ids_shape) > 1:
raise ValueError(
"Argument `segment_ids` should be an 1-D vector, got shape: "
f"{len(segment_ids_shape)}. Consider either flatten input with "
"segment_ids.reshape((-1)) and "
"data.reshape((-1, ) + data.shape[len(segment_ids.shape):]) or "
"vectorize with vmap."
)
if (
segment_ids_shape[0] is not None
and data_shape[0] is not None
and segment_ids_shape[0] != data_shape[0]
):
raise ValueError(
"Argument `segment_ids` and `data` should have same leading "
f"dimension. Got {segment_ids_shape} v.s. "
f"{data_shape}."
)
class SegmentReduction(Operation):
def __init__(self, num_segments=None, sorted=False, *, name=None):
super().__init__(name=name)
self.num_segments = num_segments
self.sorted = sorted
def compute_output_spec(self, data, _):
output_shape = (self.num_segments,) + tuple(data.shape[1:])
return KerasTensor(shape=output_shape, dtype=data.dtype)
class SegmentSum(SegmentReduction):
def call(self, data, segment_ids):
_segment_reduce_validation(data, segment_ids)
return backend.math.segment_sum(
data,
segment_ids,
num_segments=self.num_segments,
sorted=self.sorted,
)
@keras_export("keras.ops.segment_sum")
def segment_sum(data, segment_ids, num_segments=None, sorted=False):
"""Computes the sum of segments in a tensor.
Args:
data: Input tensor.
segment_ids: A N-D tensor containing segment indices for each
element in `data`. Num dims for segment ids should be strictly
smaller or equal to number of dims in data.
num_segments: An integer representing the total number of
segments. If not specified, it is inferred from the maximum
value in `segment_ids`.
sorted: A boolean indicating whether `segment_ids` is sorted.
Defaults to `False`.
Returns:
A tensor containing the sum of segments, where each element
represents the sum of the corresponding segment in `data`.
Example:
>>> data = keras.ops.convert_to_tensor([1, 2, 10, 20, 100, 200])
>>> segment_ids = keras.ops.convert_to_tensor([0, 0, 1, 1, 2, 2])
>>> num_segments = 3
>>> keras.ops.segment_sum(data, segment_ids,num_segments)
array([3, 30, 300], dtype=int32)
"""
_segment_reduce_validation(data, segment_ids)
if any_symbolic_tensors((data,)):
return SegmentSum(num_segments, sorted).symbolic_call(data, segment_ids)
return backend.math.segment_sum(
data, segment_ids, num_segments=num_segments, sorted=sorted
)
class SegmentMax(SegmentReduction):
def call(self, data, segment_ids):
_segment_reduce_validation(data, segment_ids)
return backend.math.segment_max(
data,
segment_ids,
num_segments=self.num_segments,
sorted=self.sorted,
)
@keras_export("keras.ops.segment_max")
def segment_max(data, segment_ids, num_segments=None, sorted=False):
"""Computes the max of segments in a tensor.
Args:
data: Input tensor.
segment_ids: A N-D tensor containing segment indices for each
element in `data`. data.shape[:len(segment_ids.shape)] should match.
num_segments: An integer representing the total number of
segments. If not specified, it is inferred from the maximum
value in `segment_ids`.
sorted: A boolean indicating whether `segment_ids` is sorted.
Defaults to `False`.
Returns:
A tensor containing the max of segments, where each element
represents the max of the corresponding segment in `data`.
Example:
>>> data = keras.ops.convert_to_tensor([1, 2, 10, 20, 100, 200])
>>> segment_ids = keras.ops.convert_to_tensor([0, 0, 1, 1, 2, 2])
>>> num_segments = 3
>>> keras.ops.segment_max(data, segment_ids, num_segments)
array([2, 20, 200], dtype=int32)
"""
_segment_reduce_validation(data, segment_ids)
if any_symbolic_tensors((data,)):
return SegmentMax(num_segments, sorted).symbolic_call(data, segment_ids)
return backend.math.segment_max(
data, segment_ids, num_segments=num_segments, sorted=sorted
)
class TopK(Operation):
def __init__(self, k, sorted=True, *, name=None):
super().__init__(name=name)
self.k = k
self.sorted = sorted
def compute_output_spec(self, x):
output_shape = list(x.shape)
output_shape[-1] = self.k
# Return a tuple (values, indices).
return (
KerasTensor(shape=output_shape, dtype=x.dtype),
KerasTensor(shape=output_shape, dtype="int32"),
)
def call(self, x):
return backend.math.top_k(x, self.k, self.sorted)
@keras_export("keras.ops.top_k")
def top_k(x, k, sorted=True):
"""Finds the top-k values and their indices in a tensor.
Args:
x: Input tensor.
k: An integer representing the number of top elements to retrieve.
sorted: A boolean indicating whether to sort the output in
descending order. Defaults to `True`.
Returns:
A tuple containing two tensors. The first tensor contains the
top-k values, and the second tensor contains the indices of the
top-k values in the input tensor.
Example:
>>> x = keras.ops.convert_to_tensor([5, 2, 7, 1, 9, 3])
>>> values, indices = top_k(x, k=3)
>>> print(values)
array([9 7 5], shape=(3,), dtype=int32)
>>> print(indices)
array([4 2 0], shape=(3,), dtype=int32)
"""
if any_symbolic_tensors((x,)):
return TopK(k, sorted).symbolic_call(x)
return backend.math.top_k(x, k, sorted)
class InTopK(Operation):
def __init__(self, k, *, name=None):
super().__init__(name=name)
self.k = k
def compute_output_spec(self, targets, predictions):
return KerasTensor(shape=targets.shape, dtype="bool")
def call(self, targets, predictions):
return backend.math.in_top_k(targets, predictions, self.k)
@keras_export("keras.ops.in_top_k")
def in_top_k(targets, predictions, k):
"""Checks if the targets are in the top-k predictions.
Args:
targets: A tensor of true labels.
predictions: A tensor of predicted labels.
k: An integer representing the number of predictions to consider.
Returns:
A boolean tensor of the same shape as `targets`, where each element
indicates whether the corresponding target is in the top-k predictions.
Example:
>>> targets = keras.ops.convert_to_tensor([2, 5, 3])
>>> predictions = keras.ops.convert_to_tensor(
... [[0.1, 0.4, 0.6, 0.9, 0.5],
... [0.1, 0.7, 0.9, 0.8, 0.3],
... [0.1, 0.6, 0.9, 0.9, 0.5]])
>>> in_top_k(targets, predictions, k=3)
array([ True False True], shape=(3,), dtype=bool)
"""
if any_symbolic_tensors((targets, predictions)):
return InTopK(k).symbolic_call(targets, predictions)
return backend.math.in_top_k(targets, predictions, k)
class Logsumexp(Operation):
def __init__(self, axis=None, keepdims=False, *, name=None):
super().__init__(name=name)
self.axis = axis
self.keepdims = keepdims
def compute_output_spec(self, x):
output_shape = reduce_shape(x.shape, self.axis, self.keepdims)
return KerasTensor(shape=output_shape)
def call(self, x):
return backend.math.logsumexp(x, axis=self.axis, keepdims=self.keepdims)
@keras_export("keras.ops.logsumexp")
def logsumexp(x, axis=None, keepdims=False):
"""Computes the logarithm of sum of exponentials of elements in a tensor.
Args:
x: Input tensor.
axis: An integer or a tuple of integers specifying the axis/axes
along which to compute the sum. If `None`, the sum is computed
over all elements. Defaults to `None`.
keepdims: A boolean indicating whether to keep the dimensions of
the input tensor when computing the sum. Defaults to `False`.
Returns:
A tensor containing the logarithm of the sum of exponentials of
elements in `x`.
Example:
>>> x = keras.ops.convert_to_tensor([1., 2., 3.])
>>> logsumexp(x)
3.407606
"""
if any_symbolic_tensors((x,)):
return Logsumexp(axis, keepdims).symbolic_call(x)
return backend.math.logsumexp(x, axis=axis, keepdims=keepdims)
class ExtractSequences(Operation):
def __init__(self, sequence_length, sequence_stride, *, name=None):
super().__init__(name=name)
self.sequence_length = sequence_length
self.sequence_stride = sequence_stride
def compute_output_spec(self, x):
if len(x.shape) < 1:
raise ValueError(
f"Input should have rank >= 1. "
f"Received: input.shape = {x.shape}"
)
if x.shape[-1] is not None:
num_sequences = (
1 + (x.shape[-1] - self.sequence_length) // self.sequence_stride
)
else:
num_sequences = None
new_shape = x.shape[:-1] + (num_sequences, self.sequence_length)
return KerasTensor(shape=new_shape, dtype=x.dtype)
def call(self, x):
return backend.math.extract_sequences(
x,
sequence_length=self.sequence_length,
sequence_stride=self.sequence_stride,
)
@keras_export("keras.ops.extract_sequences")
def extract_sequences(x, sequence_length, sequence_stride):
"""Expands the dimension of last axis into sequences of `sequence_length`.
Slides a window of size `sequence_length` over the last axis of the input
with a stride of `sequence_stride`, replacing the last axis with
`[num_sequences, sequence_length]` sequences.
If the dimension along the last axis is N, the number of sequences can be
computed by:
`num_sequences = 1 + (N - sequence_length) // sequence_stride`
Args:
x: Input tensor.
sequence_length: An integer representing the sequences length.
sequence_stride: An integer representing the sequences hop size.
Returns:
A tensor of sequences with shape [..., num_sequences, sequence_length].
Example:
>>> x = keras.ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
>>> extract_sequences(x, 3, 2)
array([[1, 2, 3],
[3, 4, 5]])
"""
if any_symbolic_tensors((x,)):
return ExtractSequences(sequence_length, sequence_stride).symbolic_call(
x
)
return backend.math.extract_sequences(x, sequence_length, sequence_stride)
class FFT(Operation):
def compute_output_spec(self, x):
if not isinstance(x, (tuple, list)) or len(x) != 2:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and "
f"imaginary. Received: x={x}"
)
real, imag = x
# Both real and imaginary parts should have the same shape.
if real.shape != imag.shape:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and "
"imaginary. Both the real and imaginary parts should have the "
f"same shape. Received: x[0].shape = {real.shape}, "
f"x[1].shape = {imag.shape}"
)
# We are calculating 1D FFT. Hence, rank >= 1.
if len(real.shape) < 1:
raise ValueError(
f"Input should have rank >= 1. "
f"Received: input.shape = {real.shape}"
)
# The axis along which we are calculating FFT should be fully-defined.
m = real.shape[-1]
if m is None:
raise ValueError(
f"Input should have its last dimension fully-defined. "
f"Received: input.shape = {real.shape}"
)
return (
KerasTensor(shape=real.shape, dtype=real.dtype),
KerasTensor(shape=imag.shape, dtype=imag.dtype),
)
def call(self, x):
return backend.math.fft(x)
@keras_export("keras.ops.fft")
def fft(x):
"""Computes the Fast Fourier Transform along last axis of input.
Args:
x: Tuple of the real and imaginary parts of the input tensor. Both
tensors in the tuple should be of floating type.
Returns:
A tuple containing two tensors - the real and imaginary parts of the
output tensor.
Example:
>>> x = (
... keras.ops.convert_to_tensor([1., 2.]),
... keras.ops.convert_to_tensor([0., 1.]),
... )
>>> fft(x)
(array([ 3., -1.], dtype=float32), array([ 1., -1.], dtype=float32))
"""
if any_symbolic_tensors(x):
return FFT().symbolic_call(x)
return backend.math.fft(x)
class FFT2(Operation):
def compute_output_spec(self, x):
axes = (-2, -1)
if not isinstance(x, (tuple, list)) or len(x) != 2:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and "
f"imaginary. Received: x={x}"
)
real, imag = x
# Both real and imaginary parts should have the same shape.
if real.shape != imag.shape:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and "
"imaginary. Both the real and imaginary parts should have the "
f"same shape. Received: x[0].shape = {real.shape}, "
f"x[1].shape = {imag.shape}"
)
# We are calculating 2D FFT. Hence, rank >= 2.
if len(real.shape) < 2:
raise ValueError(
f"Input should have rank >= 2. "
f"Received: input.shape = {real.shape}"
)
# The axes along which we are calculating FFT should be fully-defined.
m = real.shape[axes[0]]
n = real.shape[axes[1]]
if m is None or n is None:
raise ValueError(
f"Input should have its {axes} axes fully-defined. "
f"Received: input.shape = {real.shape}"
)
return (
KerasTensor(shape=real.shape, dtype=real.dtype),
KerasTensor(shape=imag.shape, dtype=imag.dtype),
)
def call(self, x):
return backend.math.fft2(x)
@keras_export("keras.ops.fft2")
def fft2(x):
"""Computes the 2D Fast Fourier Transform along the last two axes of input.
Args:
x: Tuple of the real and imaginary parts of the input tensor. Both
tensors in the tuple should be of floating type.
Returns:
A tuple containing two tensors - the real and imaginary parts of the
output.
Example:
>>> x = (
... keras.ops.convert_to_tensor([[1., 2.], [2., 1.]]),
... keras.ops.convert_to_tensor([[0., 1.], [1., 0.]]),
... )
>>> fft2(x)
(array([[ 6., 0.],
[ 0., -2.]], dtype=float32), array([[ 2., 0.],
[ 0., -2.]], dtype=float32))
"""
if any_symbolic_tensors(x):
return FFT2().symbolic_call(x)
return backend.math.fft2(x)
class IFFT2(Operation):
def compute_output_spec(self, x):
axes = (-2, -1)
if not isinstance(x, (tuple, list)) or len(x) != 2:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and "
f"imaginary. Received: x={x}"
)
real, imag = x
# Both real and imaginary parts should have the same shape.
if real.shape != imag.shape:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and "
"imaginary. Both the real and imaginary parts should have the "
f"same shape. Received: x[0].shape = {real.shape}, "
f"x[1].shape = {imag.shape}"
)
# We are calculating 2D IFFT. Hence, rank >= 2.
if len(real.shape) < 2:
raise ValueError(
f"Input should have rank >= 2. "
f"Received: input.shape = {real.shape}"
)
# The axes along which we are calculating IFFT should be fully-defined.
m = real.shape[axes[0]]
n = real.shape[axes[1]]
if m is None or n is None:
raise ValueError(
f"Input should have its {axes} axes fully-defined. "
f"Received: input.shape = {real.shape}"
)
return (
KerasTensor(shape=real.shape, dtype=real.dtype),
KerasTensor(shape=imag.shape, dtype=imag.dtype),
)
def call(self, x):
return backend.math.ifft2(x)
@keras_export("keras.ops.ifft2")
def ifft2(x):
"""Computes the 2D Inverse Fast Fourier Transform along the last two axes of
input.
Args:
x: Tuple of the real and imaginary parts of the input tensor. Both
tensors in the tuple should be of floating type.
Returns:
A tuple containing two tensors - the real and imaginary parts of the
output.
Example:
>>> x = (
... keras.ops.convert_to_tensor([[1., 2.], [2., 1.]]),
... keras.ops.convert_to_tensor([[0., 1.], [1., 0.]]),
... )
>>> ifft2(x)
(array([[ 6., 0.],
[ 0., -2.]], dtype=float32), array([[ 2., 0.],
[ 0., -2.]], dtype=float32))
"""
if any_symbolic_tensors(x):
return IFFT2().symbolic_call(x)
return backend.math.ifft2(x)
class RFFT(Operation):
def __init__(self, fft_length=None, *, name=None):
super().__init__(name=name)
self.fft_length = fft_length
def compute_output_spec(self, x):
# We are calculating 1D RFFT. Hence, rank >= 1.
if len(x.shape) < 1:
raise ValueError(
f"Input should have rank >= 1. "
f"Received: input.shape = {x.shape}"
)
if self.fft_length is not None:
new_last_dimension = self.fft_length // 2 + 1
else:
if x.shape[-1] is not None:
new_last_dimension = x.shape[-1] // 2 + 1
else:
new_last_dimension = None
new_shape = x.shape[:-1] + (new_last_dimension,)
return (
KerasTensor(shape=new_shape, dtype=x.dtype),
KerasTensor(shape=new_shape, dtype=x.dtype),
)
def call(self, x):
return backend.math.rfft(x, fft_length=self.fft_length)
@keras_export("keras.ops.rfft")
def rfft(x, fft_length=None):
"""Real-valued Fast Fourier Transform along the last axis of the input.
Computes the 1D Discrete Fourier Transform of a real-valued signal over the
inner-most dimension of input.
Since the Discrete Fourier Transform of a real-valued signal is
Hermitian-symmetric, RFFT only returns the `fft_length / 2 + 1` unique
components of the FFT: the zero-frequency term, followed by the
`fft_length / 2` positive-frequency terms.
Along the axis RFFT is computed on, if `fft_length` is smaller than the
corresponding dimension of the input, the dimension is cropped. If it is
larger, the dimension is padded with zeros.
Args:
x: Input tensor.
fft_length: An integer representing the number of the fft length. If not
specified, it is inferred from the length of the last axis of `x`.
Defaults to `None`.
Returns:
A tuple containing two tensors - the real and imaginary parts of the
output.
Examples:
>>> x = keras.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0])
>>> rfft(x)
(array([10.0, -2.5, -2.5]), array([0.0, 3.4409548, 0.81229924]))
>>> rfft(x, 3)
(array([3.0, -1.5]), array([0.0, 0.8660254]))
"""
if any_symbolic_tensors((x,)):
return RFFT(fft_length).symbolic_call(x)
return backend.math.rfft(x, fft_length)
class IRFFT(Operation):
def __init__(self, fft_length=None, *, name=None):
super().__init__(name=name)
self.fft_length = fft_length
def compute_output_spec(self, x):
if not isinstance(x, (tuple, list)) or len(x) != 2:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and "
f"imaginary. Received: x={x}"
)
real, imag = x
# Both real and imaginary parts should have the same shape.
if real.shape != imag.shape:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and "
"imaginary. Both the real and imaginary parts should have the "
f"same shape. Received: x[0].shape = {real.shape}, "
f"x[1].shape = {imag.shape}"
)
# We are calculating 1D IRFFT. Hence, rank >= 1.
if len(real.shape) < 1:
raise ValueError(
f"Input should have rank >= 1. "
f"Received: input.shape = {real.shape}"
)
if self.fft_length is not None:
new_last_dimension = self.fft_length
else:
if real.shape[-1] is not None:
new_last_dimension = 2 * (real.shape[-1] - 1)
else:
new_last_dimension = None
new_shape = real.shape[:-1] + (new_last_dimension,)
return KerasTensor(shape=new_shape, dtype=real.dtype)
def call(self, x):
return backend.math.irfft(x, fft_length=self.fft_length)
@keras_export("keras.ops.irfft")
def irfft(x, fft_length=None):
"""Inverse real-valued Fast Fourier transform along the last axis.
Computes the inverse 1D Discrete Fourier Transform of a real-valued signal
over the inner-most dimension of input.
The inner-most dimension of the input is assumed to be the result of RFFT:
the `fft_length / 2 + 1` unique components of the DFT of a real-valued
signal. If `fft_length` is not provided, it is computed from the size of the
inner-most dimension of the input `(fft_length = 2 * (inner - 1))`. If the
FFT length used to compute is odd, it should be provided since it cannot
be inferred properly.
Along the axis IRFFT is computed on, if `fft_length / 2 + 1` is smaller than
the corresponding dimension of the input, the dimension is cropped. If it is
larger, the dimension is padded with zeros.
Args:
x: Tuple of the real and imaginary parts of the input tensor. Both
tensors in the tuple should be of floating type.
fft_length: An integer representing the number of the fft length. If not
specified, it is inferred from the length of the last axis of `x`.
Defaults to `None`.
Returns:
A tensor containing the inverse real-valued Fast Fourier Transform
along the last axis of `x`.
Examples:
>>> real = keras.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0])
>>> imag = keras.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0])
>>> irfft((real, imag))
array([0.66666667, -0.9106836, 0.24401694])
>>> irfft(rfft(real, 5), 5)
array([0.0, 1.0, 2.0, 3.0, 4.0])
"""
if any_symbolic_tensors(x):
return IRFFT(fft_length).symbolic_call(x)
return backend.math.irfft(x, fft_length)
class STFT(Operation):
def __init__(
self,
sequence_length,
sequence_stride,
fft_length,
window="hann",
center=True,
*,
name=None,
):
super().__init__(name=name)
self.sequence_length = sequence_length
self.sequence_stride = sequence_stride
self.fft_length = fft_length
self.window = window
self.center = center
def compute_output_spec(self, x):
if x.shape[-1] is not None:
padded = 0 if self.center is False else (self.fft_length // 2) * 2
num_sequences = (
1
+ (x.shape[-1] + padded - self.fft_length)
// self.sequence_stride
)
else:
num_sequences = None
new_shape = x.shape[:-1] + (num_sequences, self.fft_length // 2 + 1)
return (
KerasTensor(shape=new_shape, dtype=x.dtype),
KerasTensor(shape=new_shape, dtype=x.dtype),
)
def call(self, x):
return backend.math.stft(
x,
sequence_length=self.sequence_length,
sequence_stride=self.sequence_stride,
fft_length=self.fft_length,
window=self.window,
center=self.center,
)
@keras_export("keras.ops.stft")
def stft(
x, sequence_length, sequence_stride, fft_length, window="hann", center=True
):
"""Short-Time Fourier Transform along the last axis of the input.
The STFT computes the Fourier transform of short overlapping windows of the
input. This giving frequency components of the signal as they change over
time.
Args:
x: Input tensor.
sequence_length: An integer representing the sequence length.
sequence_stride: An integer representing the sequence hop size.
fft_length: An integer representing the size of the FFT to apply. If not
specified, uses the smallest power of 2 enclosing `sequence_length`.
window: A string, a tensor of the window or `None`. If `window` is a
string, available values are `"hann"` and `"hamming"`. If `window`
is a tensor, it will be used directly as the window and its length
must be `sequence_length`. If `window` is `None`, no windowing is
used. Defaults to `"hann"`.
center: Whether to pad `x` on both sides so that the t-th sequence is
centered at time `t * sequence_stride`. Otherwise, the t-th sequence
begins at time `t * sequence_stride`. Defaults to `True`.
Returns:
A tuple containing two tensors - the real and imaginary parts of the
STFT output.
Example:
>>> x = keras.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0])
>>> stft(x, 3, 2, 3)
(array([[0.75, -0.375],
[3.75, -1.875],
[5.25, -2.625]]), array([[0.0, 0.64951905],
[0.0, 0.64951905],
[0.0, -0.64951905]]))
"""
if any_symbolic_tensors((x,)):
return STFT(
sequence_length=sequence_length,
sequence_stride=sequence_stride,
fft_length=fft_length,
window=window,
center=center,
).symbolic_call(x)
return backend.math.stft(
x,
sequence_length=sequence_length,
sequence_stride=sequence_stride,
fft_length=fft_length,
window=window,
center=center,
)
class ISTFT(Operation):
def __init__(
self,
sequence_length,
sequence_stride,
fft_length,
length=None,
window="hann",
center=True,
*,
name=None,
):
super().__init__(name=name)
self.sequence_length = sequence_length
self.sequence_stride = sequence_stride
self.fft_length = fft_length
self.length = length
self.window = window
self.center = center
def compute_output_spec(self, x):
if not isinstance(x, (tuple, list)) or len(x) != 2:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and "
f"imaginary. Received: x={x}"
)
real, imag = x
# Both real and imaginary parts should have the same shape.
if real.shape != imag.shape:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and "
"imaginary. Both the real and imaginary parts should have the "
f"same shape. Received: x[0].shape = {real.shape}, "
f"x[1].shape = {imag.shape}"
)
if len(real.shape) < 2:
raise ValueError(
f"Input should have rank >= 2. "
f"Received: input.shape = {real.shape}"
)
if real.shape[-2] is not None:
output_size = (
real.shape[-2] - 1
) * self.sequence_stride + self.fft_length
if self.length is not None:
output_size = self.length
elif self.center:
output_size = output_size - (self.fft_length // 2) * 2
else:
output_size = None
new_shape = real.shape[:-2] + (output_size,)
return KerasTensor(shape=new_shape, dtype=real.dtype)
def call(self, x):
return backend.math.istft(
x,
sequence_length=self.sequence_length,
sequence_stride=self.sequence_stride,
fft_length=self.fft_length,
length=self.length,
window=self.window,
center=self.center,
)
@keras_export("keras.ops.istft")
def istft(
x,
sequence_length,
sequence_stride,
fft_length,
length=None,
window="hann",
center=True,
):
"""Inverse Short-Time Fourier Transform along the last axis of the input.
To reconstruct an original waveform, the parameters should be the same in
`stft`.
Args:
x: Tuple of the real and imaginary parts of the input tensor. Both
tensors in the tuple should be of floating type.
sequence_length: An integer representing the sequence length.
sequence_stride: An integer representing the sequence hop size.
fft_length: An integer representing the size of the FFT that produced
`stft`. Should be of type `int32`.
length: An integer representing the output is clipped to exactly length.
If not specified, no padding or clipping take place. Defaults to
`None`.
window: A string, a tensor of the window or `None`. If `window` is a
string, available values are `"hann"` and `"hamming"`. If `window`
is a tensor, it will be used directly as the window and its length
must be `sequence_length`. If `window` is `None`, no windowing is
used. Defaults to `"hann"`.
center: Whether `x` was padded on both sides so that the t-th sequence
is centered at time `t * sequence_stride`. Defaults to `True`.
Returns:
A tensor containing the inverse Short-Time Fourier Transform along the
last axis of `x`.
Example:
>>> x = keras.ops.convert_to_tensor([0.0, 1.0, 2.0, 3.0, 4.0])
>>> istft(stft(x, 1, 1, 1), 1, 1, 1)
array([0.0, 1.0, 2.0, 3.0, 4.0])
"""
if any_symbolic_tensors(x):
return ISTFT(
sequence_length=sequence_length,
sequence_stride=sequence_stride,
fft_length=fft_length,
window=window,
center=center,
).symbolic_call(x)
return backend.math.istft(
x,
sequence_length=sequence_length,
sequence_stride=sequence_stride,
fft_length=fft_length,
length=length,
window=window,
center=center,
)
class Rsqrt(Operation):
def call(self, x):
x = backend.convert_to_tensor(x)
return backend.math.rsqrt(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=x.dtype)
@keras_export("keras.ops.rsqrt")
def rsqrt(x):
"""Computes reciprocal of square root of x element-wise.
Args:
x: input tensor
Returns:
A tensor with the same dtype as `x`.
Example:
>>> x = keras.ops.convert_to_tensor([1.0, 10.0, 100.0])
>>> keras.ops.rsqrt(x)
array([1.0, 0.31622776, 0.1], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Rsqrt().symbolic_call(x)
x = backend.convert_to_tensor(x)
return backend.math.rsqrt(x)
class Erf(Operation):
def compute_output_spec(self, x):
return KerasTensor(shape=x.shape, dtype=x.dtype)
def call(self, x):
return backend.math.erf(x)
@keras_export("keras.ops.erf")
def erf(x):
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/operation.py | keras/src/ops/operation.py | import inspect
import textwrap
from keras.src import backend
from keras.src import dtype_policies
from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.backend.common.keras_tensor import any_symbolic_tensors
from keras.src.backend.config import is_nnx_enabled
from keras.src.ops.node import Node
from keras.src.saving.keras_saveable import KerasSaveable
from keras.src.utils import python_utils
from keras.src.utils import traceback_utils
from keras.src.utils.naming import auto_name
@keras_export("keras.Operation")
class Operation(KerasSaveable):
def __init__(self, name=None):
if name is None:
name = auto_name(self.__class__.__name__)
if not isinstance(name, str) or "/" in name:
raise ValueError(
"Argument `name` must be a string and "
f"cannot contain character `/`. "
f"Received: name={name} (of type {type(name)})"
)
self.name = name
self._inbound_nodes = []
self._outbound_nodes = []
@traceback_utils.filter_traceback
def __call__(self, *args, **kwargs):
if traceback_utils.is_traceback_filtering_enabled():
# Wrap self.call to provide helpful info in case of exception
if any_symbolic_tensors(args, kwargs):
call_fn = self.symbolic_call
else:
if getattr(self, "_remat_mode", None) is not None:
if getattr(self, "quantization_mode", None) is not None:
call_fn = self.rematerialized_call(
self.quantized_call,
*args,
**kwargs,
)
else:
call_fn = self.rematerialized_call(
self.call, *args, **kwargs
)
else:
if getattr(self, "quantization_mode", None) is not None:
call_fn = self.quantized_call
else:
call_fn = self.call
call_fn = traceback_utils.inject_argument_info_in_traceback(
call_fn,
object_name=(f"{self.__class__.__name__}.call()"),
)
return call_fn(*args, **kwargs)
# Plain flow.
if any_symbolic_tensors(args, kwargs):
return self.symbolic_call(*args, **kwargs)
elif getattr(self, "_remat_mode", None) is not None:
if getattr(self, "quantization_mode", None) is not None:
return self.rematerialized_call(
self.quantized_call, *args, **kwargs
)(*args, **kwargs)
else:
return self.rematerialized_call(self.call, *args, **kwargs)(
*args, **kwargs
)
else:
if getattr(self, "quantization_mode", None) is not None:
return self.quantized_call(*args, **kwargs)
else:
return self.call(*args, **kwargs)
def symbolic_call(self, *args, **kwargs):
# Perform shape/dtype inference.
outputs = self.compute_output_spec(*args, **kwargs)
# Record a new node in the operations graph.
# The Node wires itself to inbound and outbound ops. The
# Node constructor updates this op's self._inbound_nodes,
# sets _keras_history on the outputs, and adds itself to the
# `_outbound_nodes` of the ops that produced the inputs to this
# call.
Node(
operation=self, call_args=args, call_kwargs=kwargs, outputs=outputs
)
return outputs
def call(self, *args, **kwargs):
raise NotImplementedError
def quantized_call(self, *args, **kwargs):
raise NotImplementedError
def compute_output_spec(self, *args, **kwargs):
try:
return backend.compute_output_spec(self.call, *args, **kwargs)
except Exception as e:
new_e = e.__class__(
"Could not automatically infer the output shape / dtype of "
f"'{self.name}' (of type {self.__class__.__name__}). "
f"Either the `{self.__class__.__name__}.call()` method "
f"is incorrect, or you need to implement the "
f"`{self.__class__.__name__}.compute_output_spec() / "
"compute_output_shape()` method. "
f"Error encountered:\n\n{e}"
)
raise new_e.with_traceback(e.__traceback__) from None
def __new__(cls, *args, **kwargs):
"""We override __new__ to saving serializable constructor arguments.
These arguments are used to auto-generate an object serialization
config, which enables user-created subclasses to be serializable
out of the box in most cases without forcing the user
to manually implement `get_config()`.
"""
instance = super(Operation, cls).__new__(cls)
if backend.backend() == "jax" and is_nnx_enabled():
from flax import nnx
try:
vars(instance)["_pytree__state"] = nnx.pytreelib.PytreeState()
except AttributeError:
vars(instance)["_object__state"] = nnx.object.ObjectState()
# Generate a config to be returned by default by `get_config()`.
auto_config = True
signature = inspect.signature(cls.__init__)
argspec = inspect.getfullargspec(cls.__init__)
try:
bound_parameters = signature.bind(None, *args, **kwargs)
except TypeError:
# Raised by signature.bind when the supplied args and kwargs
# do not match the signature.
auto_config = False
if auto_config and any(
[
param.kind == inspect.Parameter.POSITIONAL_ONLY
for name, param in signature.parameters.items()
if name != argspec.args[0]
]
):
# cls.__init__ takes positional only arguments, which
# cannot be restored via cls(**config)
auto_config = False
# Create variable to show appropriate warning in get_config.
instance._auto_config_error_args = True
if auto_config:
# Include default values in the config.
bound_parameters.apply_defaults()
# Extract all arguments as a dictionary.
kwargs = bound_parameters.arguments
# Expand variable kwargs argument.
kwargs |= kwargs.pop(argspec.varkw, {})
# Remove first positional argument, self.
kwargs.pop(argspec.args[0])
# Remove argument "name", as it is provided by get_config.
kwargs.pop("name", None)
if argspec.varargs is not None:
# Varargs cannot be meaningfully converted to a dictionary.
varargs = kwargs.pop(argspec.varargs)
if len(varargs) > 0:
auto_config = False
# Store variable to show appropriate warning in get_config.
instance._auto_config_error_args = True
# For safety, we only rely on auto-configs for a small set of
# serializable types.
supported_types = (str, int, float, bool, type(None))
try:
flat_arg_values = tree.flatten(kwargs)
for value in flat_arg_values:
if not isinstance(value, supported_types):
auto_config = False
break
except TypeError:
auto_config = False
try:
instance._lock = False
if auto_config:
from keras.src.saving import serialization_lib
instance._auto_config = serialization_lib.SerializableDict(
**kwargs
)
else:
instance._auto_config = None
instance._lock = True
except RecursionError:
# Setting an instance attribute in __new__ has the potential
# to trigger an infinite recursion if a subclass overrides
# setattr in an unsafe way.
pass
return instance
@python_utils.default
def get_config(self):
"""Returns the config of the object.
An object config is a Python dictionary (serializable)
containing the information needed to re-instantiate it.
"""
config = {
"name": self.name,
}
if not python_utils.is_default(self.get_config):
# In this case the subclass implements get_config()
return config
# In this case the subclass doesn't implement get_config():
# Let's see if we can autogenerate it.
if getattr(self, "_auto_config", None) is not None:
config.update(self._auto_config.config)
init_params = inspect.signature(self.__init__).parameters
init_has_name = "name" in init_params
init_has_kwargs = (
"kwargs" in init_params
and init_params["kwargs"].kind == inspect.Parameter.VAR_KEYWORD
)
if not init_has_name and not init_has_kwargs:
# We can't pass `name` back to `__init__`, remove it.
config.pop("name", None)
return config
else:
example_str = """
class CustomLayer(keras.layers.Layer):
def __init__(self, arg1, arg2, **kwargs):
super().__init__(**kwargs)
self.arg1 = arg1
self.arg2 = arg2
def get_config(self):
config = super().get_config()
config.update({
"arg1": self.arg1,
"arg2": self.arg2,
})
return config
"""
if getattr(self, "_auto_config_error_args", False):
raise NotImplementedError(
textwrap.dedent(
f"""
Object {self.__class__.__name__} was created by passing
positional only or variadic positional arguments (e.g.,
`*args`) to `__init__()`, which is not supported by the
automatic config generation. Please remove all positional
only and variadic arguments from `__init__()`
or override `get_config()` and `from_config()` to make
the object serializatble.
Example:
{example_str}"""
)
)
else:
raise NotImplementedError(
textwrap.dedent(
f"""
Object {self.__class__.__name__} was created by passing
non-serializable argument values in `__init__()`,
and therefore the object must override `get_config()` in
order to be serializable. Please implement `get_config()`.
Example:
{example_str}"""
)
)
@classmethod
def from_config(cls, config):
"""Creates an operation from its config.
This method is the reverse of `get_config`, capable of instantiating the
same operation from the config dictionary.
Note: If you override this method, you might receive a serialized dtype
config, which is a `dict`. You can deserialize it as follows:
```python
if "dtype" in config and isinstance(config["dtype"], dict):
policy = dtype_policies.deserialize(config["dtype"])
```
Args:
config: A Python dictionary, typically the output of `get_config`.
Returns:
An operation instance.
"""
# Explicitly deserialize dtype config if needed. This enables users to
# directly interact with the instance of `DTypePolicy`.
if "dtype" in config and isinstance(config["dtype"], dict):
config = config.copy()
policy = dtype_policies.deserialize(config["dtype"])
if (
not isinstance(policy, dtype_policies.DTypePolicyMap)
and policy.quantization_mode is None
):
# For backward compatibility, we use a str (`name`) for
# `DTypePolicy`
policy = policy.name
config["dtype"] = policy
try:
return cls(**config)
except Exception as e:
raise TypeError(
f"Error when deserializing class '{cls.__name__}' using "
f"config={config}.\n\nException encountered: {e}"
)
def __repr__(self):
return f"<Operation name={self.name}>"
@property
def input(self):
"""Retrieves the input tensor(s) of a symbolic operation.
Only returns the tensor(s) corresponding to the *first time*
the operation was called.
Returns:
Input tensor or list of input tensors.
"""
return self._get_node_attribute_at_index(0, "input_tensors", "input")
@property
def output(self):
"""Retrieves the output tensor(s) of a layer.
Only returns the tensor(s) corresponding to the *first time*
the operation was called.
Returns:
Output tensor or list of output tensors.
"""
return self._get_node_attribute_at_index(0, "output_tensors", "output")
def _get_node_attribute_at_index(self, node_index, attr, attr_name):
"""Private utility to retrieves an attribute (e.g. inputs) from a node.
This is used to implement the properties:
- output
- input
Args:
node_index: Integer index of the node from which
to retrieve the attribute.
attr: Exact node attribute name.
attr_name: Human-readable attribute name, for error messages.
Returns:
The operation's attribute `attr` at the node of index `node_index`.
"""
if not self._inbound_nodes:
raise AttributeError(
f"The layer {self.name} has never been called "
f"and thus has no defined {attr_name}."
)
if not len(self._inbound_nodes) > node_index:
raise ValueError(
f"Asked to get {attr_name} at node "
f"{node_index}, but the operation has only "
f"{len(self._inbound_nodes)} inbound nodes."
)
values = getattr(self._inbound_nodes[node_index], attr)
if isinstance(values, list) and len(values) == 1:
return values[0]
else:
return values
def _obj_type(self):
return "Operation"
# Hooks for backend layer classes
def _post_build(self):
"""Can be overridden for per backend post build actions."""
pass
def _setattr_hook(self, name, value):
"""Can be overridden for per backend post build actions."""
return name, value
def _post_track_variable(self, variable):
"""Can be overridden for per backend post track actions."""
pass
def _post_untrack_variable(self, variable):
"""Can be overridden for per backend post untrack actions."""
pass
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/image_test.py | keras/src/ops/image_test.py | import math
import jax
import numpy as np
import pytest
import scipy.ndimage
import tensorflow as tf
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.backend.common import dtypes
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.ops import image as kimage
from keras.src.ops import numpy as knp
from keras.src.ops import random as krandom
from keras.src.testing.test_utils import named_product
class ImageOpsDynamicShapeTest(testing.TestCase):
def setUp(self):
# Defaults to channels_last
self.data_format = backend.image_data_format()
backend.set_image_data_format("channels_last")
return super().setUp()
def tearDown(self):
backend.set_image_data_format(self.data_format)
return super().tearDown()
def test_rgb_to_grayscale(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
out = kimage.rgb_to_grayscale(x)
self.assertEqual(out.shape, (None, 20, 20, 1))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
out = kimage.rgb_to_grayscale(x)
self.assertEqual(out.shape, (None, 1, 20, 20))
def test_rgb_to_hsv(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
out = kimage.rgb_to_hsv(x)
self.assertEqual(out.shape, (None, 20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
out = kimage.rgb_to_hsv(x)
self.assertEqual(out.shape, (None, 3, 20, 20))
def test_hsv_to_rgb(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
out = kimage.hsv_to_rgb(x)
self.assertEqual(out.shape, (None, 20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
out = kimage.hsv_to_rgb(x)
self.assertEqual(out.shape, (None, 3, 20, 20))
def test_resize(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
out = kimage.resize(x, size=(15, 15))
self.assertEqual(out.shape, (None, 15, 15, 3))
x = KerasTensor([None, None, 3])
out = kimage.resize(x, size=(15, 15))
self.assertEqual(out.shape, (15, 15, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
out = kimage.resize(x, size=(15, 15))
self.assertEqual(out.shape, (None, 3, 15, 15))
x = KerasTensor([3, None, None])
out = kimage.resize(x, size=(15, 15))
self.assertEqual(out.shape, (3, 15, 15))
def test_affine_transform(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
transform = KerasTensor([None, 8])
out = kimage.affine_transform(x, transform)
self.assertEqual(out.shape, (None, 20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
transform = KerasTensor([None, 8])
out = kimage.affine_transform(x, transform)
self.assertEqual(out.shape, (None, 3, 20, 20))
def test_extract_patches(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
p_h, p_w = 5, 5
out = kimage.extract_patches(x, (p_h, p_w))
self.assertEqual(out.shape, (None, 4, 4, 75))
out = kimage.extract_patches(x, 5)
self.assertEqual(out.shape, (None, 4, 4, 75))
out = kimage.extract_patches(x, 5, strides=1)
self.assertEqual(out.shape, (None, 16, 16, 75))
out = kimage.extract_patches(x, 5, strides=(2, 3))
self.assertEqual(out.shape, (None, 8, 6, 75))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
p_h, p_w = 5, 5
out = kimage.extract_patches(x, (p_h, p_w))
self.assertEqual(out.shape, (None, 75, 4, 4))
out = kimage.extract_patches(x, 5)
self.assertEqual(out.shape, (None, 75, 4, 4))
out = kimage.extract_patches(x, 5, strides=1)
self.assertEqual(out.shape, (None, 75, 16, 16))
out = kimage.extract_patches(x, 5, strides=(2, 3))
self.assertEqual(out.shape, (None, 75, 8, 6))
def test_extract_patches_3d(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 20, 3])
p_d, p_h, p_w = 5, 5, 5
out = kimage.extract_patches_3d(x, (p_d, p_h, p_w))
self.assertEqual(out.shape, (None, 4, 4, 4, 375))
out = kimage.extract_patches_3d(x, 5)
self.assertEqual(out.shape, (None, 4, 4, 4, 375))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20, 20])
p_d, p_h, p_w = 5, 5, 5
out = kimage.extract_patches_3d(x, (p_d, p_h, p_w))
self.assertEqual(out.shape, (None, 375, 4, 4, 4))
out = kimage.extract_patches_3d(x, 5)
self.assertEqual(out.shape, (None, 375, 4, 4, 4))
def test_map_coordinates(self):
input = KerasTensor([20, 20, None])
coordinates = KerasTensor([3, 15, 15, None])
out = kimage.map_coordinates(input, coordinates, 0)
self.assertEqual(out.shape, coordinates.shape[1:])
def test_pad_images(self):
# Test channels_last
x = KerasTensor([None, 15, 25, 3])
out = kimage.pad_images(x, 2, 3, target_height=20, target_width=30)
self.assertEqual(out.shape, (None, 20, 30, 3))
x = KerasTensor([None, None, 3])
out = kimage.pad_images(x, 2, 3, target_height=20, target_width=30)
self.assertEqual(out.shape, (20, 30, 3))
# Test unknown shape
x = KerasTensor([None, None, 3])
out = kimage.pad_images(x, 2, 3, 2, 3)
self.assertEqual(out.shape, (None, None, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 15, 25])
out = kimage.pad_images(x, 2, 3, target_height=20, target_width=30)
self.assertEqual(out.shape, (None, 3, 20, 30))
x = KerasTensor([3, None, None])
out = kimage.pad_images(x, 2, 3, target_height=20, target_width=30)
self.assertEqual(out.shape, (3, 20, 30))
def test_crop_images(self):
# Test channels_last
x = KerasTensor([None, 15, 25, 3])
out = kimage.crop_images(x, 2, 3, target_height=10, target_width=20)
self.assertEqual(out.shape, (None, 10, 20, 3))
x = KerasTensor([None, None, 3])
out = kimage.crop_images(x, 2, 3, target_height=10, target_width=20)
self.assertEqual(out.shape, (10, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 15, 25])
out = kimage.crop_images(x, 2, 3, target_height=10, target_width=20)
self.assertEqual(out.shape, (None, 3, 10, 20))
x = KerasTensor([3, None, None])
out = kimage.crop_images(x, 2, 3, target_height=10, target_width=20)
self.assertEqual(out.shape, (3, 10, 20))
def test_perspective_transform(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
start_points = KerasTensor([None, 4, 2])
end_points = KerasTensor([None, 4, 2])
out = kimage.perspective_transform(x, start_points, end_points)
self.assertEqual(out.shape, (None, 20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
start_points = KerasTensor([None, 4, 2])
end_points = KerasTensor([None, 4, 2])
out = kimage.perspective_transform(x, start_points, end_points)
self.assertEqual(out.shape, (None, 3, 20, 20))
def test_gaussian_blur(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
out = kimage.gaussian_blur(x)
self.assertEqual(out.shape, (None, 20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
out = kimage.gaussian_blur(x)
self.assertEqual(out.shape, (None, 3, 20, 20))
def test_elastic_transform(self):
# Test channels_last
x = KerasTensor([None, 20, 20, 3])
out = kimage.elastic_transform(x)
self.assertEqual(out.shape, (None, 20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([None, 3, 20, 20])
out = kimage.elastic_transform(x)
self.assertEqual(out.shape, (None, 3, 20, 20))
def test_scale_and_translate(self):
images = KerasTensor([None, 20, 20, 3])
output_shape = (None, 25, 25, 3)
scale = KerasTensor([2])
translation = KerasTensor([2])
out = kimage.scale_and_translate(
images,
output_shape=output_shape,
scale=scale,
translation=translation,
spatial_dims=(1, 2),
method="linear",
)
self.assertEqual(out.shape, output_shape)
class ImageOpsStaticShapeTest(testing.TestCase):
def setUp(self):
# Defaults to channels_last
self.data_format = backend.image_data_format()
backend.set_image_data_format("channels_last")
return super().setUp()
def tearDown(self):
backend.set_image_data_format(self.data_format)
return super().tearDown()
def test_rgb_to_grayscale(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
out = kimage.rgb_to_grayscale(x)
self.assertEqual(out.shape, (20, 20, 1))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
out = kimage.rgb_to_grayscale(x)
self.assertEqual(out.shape, (1, 20, 20))
def test_rgb_to_hsv(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
out = kimage.rgb_to_hsv(x)
self.assertEqual(out.shape, (20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
out = kimage.rgb_to_hsv(x)
self.assertEqual(out.shape, (3, 20, 20))
def test_hsv_to_rgb(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
out = kimage.hsv_to_rgb(x)
self.assertEqual(out.shape, (20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
out = kimage.hsv_to_rgb(x)
self.assertEqual(out.shape, (3, 20, 20))
def test_resize(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
out = kimage.resize(x, size=(15, 15))
self.assertEqual(out.shape, (15, 15, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
out = kimage.resize(x, size=(15, 15))
self.assertEqual(out.shape, (3, 15, 15))
def test_affine_transform(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
transform = KerasTensor([8])
out = kimage.affine_transform(x, transform)
self.assertEqual(out.shape, (20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
transform = KerasTensor([8])
out = kimage.affine_transform(x, transform)
self.assertEqual(out.shape, (3, 20, 20))
def test_extract_patches(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
p_h, p_w = 5, 5
out = kimage.extract_patches(x, (p_h, p_w))
self.assertEqual(out.shape, (4, 4, 75))
out = kimage.extract_patches(x, 5)
self.assertEqual(out.shape, (4, 4, 75))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
p_h, p_w = 5, 5
out = kimage.extract_patches(x, (p_h, p_w))
self.assertEqual(out.shape, (75, 4, 4))
out = kimage.extract_patches(x, 5)
self.assertEqual(out.shape, (75, 4, 4))
def test_extract_patches_3d(self):
# Test channels_last
x = KerasTensor([20, 20, 20, 3])
p_d, p_h, p_w = 5, 5, 5
out = kimage.extract_patches_3d(x, (p_d, p_h, p_w))
self.assertEqual(out.shape, (4, 4, 4, 375))
out = kimage.extract_patches_3d(x, 5)
self.assertEqual(out.shape, (4, 4, 4, 375))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20, 20])
p_d, p_h, p_w = 5, 5, 5
out = kimage.extract_patches_3d(x, (p_d, p_h, p_w))
self.assertEqual(out.shape, (375, 4, 4, 4))
out = kimage.extract_patches_3d(x, 5)
self.assertEqual(out.shape, (375, 4, 4, 4))
def test_map_coordinates(self):
input = KerasTensor([20, 20, 3])
coordinates = KerasTensor([3, 15, 15, 3])
out = kimage.map_coordinates(input, coordinates, 0)
self.assertEqual(out.shape, coordinates.shape[1:])
def test_map_coordinates_uint8(self):
image_uint8 = tf.ones((1, 1, 3), dtype=tf.uint8)
coordinates = tf.convert_to_tensor([-1.0, 0.0, 0.0])[..., None, None]
if backend.backend() != "tensorflow":
pytest.skip("Skipping test because the backend is not TensorFlow.")
out = kimage.map_coordinates(
image_uint8, coordinates, order=1, fill_mode="constant"
)
assert out.shape == coordinates.shape[1:]
def test_map_coordinates_float32(self):
image_float32 = tf.ones((1, 1, 3), dtype=tf.float32)
coordinates = tf.convert_to_tensor([-1.0, 0.0, 0.0])[..., None, None]
if backend.backend() != "tensorflow":
pytest.skip("Skipping test because the backend is not TensorFlow.")
out = kimage.map_coordinates(
image_float32, coordinates, order=1, fill_mode="constant"
)
assert out.shape == coordinates.shape[1:]
def test_map_coordinates_nearest(self):
image_uint8 = tf.ones((1, 1, 3), dtype=tf.uint8)
coordinates = tf.convert_to_tensor([-1.0, 0.0, 0.0])[..., None, None]
if backend.backend() != "tensorflow":
pytest.skip("Skipping test because the backend is not TensorFlow.")
out = kimage.map_coordinates(
image_uint8, coordinates, order=1, fill_mode="nearest"
)
assert out.shape == coordinates.shape[1:]
def test_map_coordinates_manual_cast(self):
image_uint8 = tf.ones((1, 1, 3), dtype=tf.uint8)
coordinates = tf.convert_to_tensor([-1.0, 0.0, 0.0])[..., None, None]
image_uint8_casted = tf.cast(image_uint8, dtype=tf.float32)
if backend.backend() != "tensorflow":
pytest.skip("Skipping test because the backend is not TensorFlow.")
out = tf.cast(
kimage.map_coordinates(
image_uint8_casted, coordinates, order=1, fill_mode="constant"
),
dtype=tf.uint8,
)
assert out.shape == coordinates.shape[1:]
def test_pad_images(self):
# Test channels_last
x = KerasTensor([15, 25, 3])
out = kimage.pad_images(x, 2, 3, target_height=20, target_width=30)
self.assertEqual(out.shape, (20, 30, 3))
x_batch = KerasTensor([2, 15, 25, 3])
out_batch = kimage.pad_images(
x_batch, 2, 3, target_height=20, target_width=30
)
self.assertEqual(out_batch.shape, (2, 20, 30, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 15, 25])
out = kimage.pad_images(x, 2, 3, target_height=20, target_width=30)
self.assertEqual(out.shape, (3, 20, 30))
x_batch = KerasTensor([2, 3, 15, 25])
out_batch = kimage.pad_images(
x_batch, 2, 3, target_height=20, target_width=30
)
self.assertEqual(out_batch.shape, (2, 3, 20, 30))
def test_crop_images(self):
# Test channels_last
x = KerasTensor([15, 25, 3])
out = kimage.crop_images(x, 2, 3, target_height=10, target_width=20)
self.assertEqual(out.shape, (10, 20, 3))
x_batch = KerasTensor([2, 15, 25, 3])
out_batch = kimage.crop_images(
x_batch, 2, 3, target_height=10, target_width=20
)
self.assertEqual(out_batch.shape, (2, 10, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 15, 25])
out = kimage.crop_images(x, 2, 3, target_height=10, target_width=20)
self.assertEqual(out.shape, (3, 10, 20))
# Test channels_first and batched
x_batch = KerasTensor([2, 3, 15, 25])
out_batch = kimage.crop_images(
x_batch, 2, 3, target_height=10, target_width=20
)
self.assertEqual(out_batch.shape, (2, 3, 10, 20))
def test_perspective_transform(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
start_points = KerasTensor([4, 2])
end_points = KerasTensor([4, 2])
out = kimage.perspective_transform(x, start_points, end_points)
self.assertEqual(out.shape, (20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
start_points = KerasTensor([4, 2])
end_points = KerasTensor([4, 2])
out = kimage.perspective_transform(x, start_points, end_points)
self.assertEqual(out.shape, (3, 20, 20))
def test_gaussian_blur(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
kernel_size = KerasTensor(
[
2,
]
)
sigma = KerasTensor(
[
2,
]
)
out = kimage.gaussian_blur(x, kernel_size, sigma)
self.assertEqual(out.shape, (20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
kernel_size = KerasTensor(
[
2,
]
)
sigma = KerasTensor(
[
2,
]
)
out = kimage.gaussian_blur(x, kernel_size, sigma)
self.assertEqual(out.shape, (3, 20, 20))
def test_elastic_transform(self):
# Test channels_last
x = KerasTensor([20, 20, 3])
out = kimage.elastic_transform(x)
self.assertEqual(out.shape, (20, 20, 3))
# Test channels_first
backend.set_image_data_format("channels_first")
x = KerasTensor([3, 20, 20])
out = kimage.elastic_transform(x)
self.assertEqual(out.shape, (3, 20, 20))
def test_scale_and_translate(self):
images = KerasTensor([20, 20, 3])
output_shape = (25, 25, 3)
scale = KerasTensor([2])
translation = KerasTensor([2])
out = kimage.scale_and_translate(
images,
output_shape=output_shape,
scale=scale,
translation=translation,
spatial_dims=(0, 1),
method="linear",
)
self.assertEqual(out.shape, output_shape)
AFFINE_TRANSFORM_INTERPOLATIONS = { # map to order
"nearest": 0,
"bilinear": 1,
}
def _compute_affine_transform_coordinates(image, transform):
image = image.copy()
transform = transform.copy()
need_squeeze = False
if len(image.shape) == 3: # unbatched
need_squeeze = True
image = np.expand_dims(image, axis=0)
transform = np.expand_dims(transform, axis=0)
batch_size = image.shape[0]
# get indices
meshgrid = np.meshgrid(
*[np.arange(size) for size in image.shape[1:]], indexing="ij"
)
indices = np.concatenate(
[np.expand_dims(x, axis=-1) for x in meshgrid], axis=-1
)
indices = np.tile(indices, (batch_size, 1, 1, 1, 1))
# swap the values
transform[:, 4], transform[:, 0] = (
transform[:, 0].copy(),
transform[:, 4].copy(),
)
transform[:, 5], transform[:, 2] = (
transform[:, 2].copy(),
transform[:, 5].copy(),
)
# deal with transform
transform = np.pad(transform, pad_width=[[0, 0], [0, 1]], constant_values=1)
transform = np.reshape(transform, (batch_size, 3, 3))
offset = np.pad(transform[:, 0:2, 2], pad_width=[[0, 0], [0, 1]])
transform[:, 0:2, 2] = 0
# transform the indices
coordinates = np.einsum("Bhwij, Bjk -> Bhwik", indices, transform)
coordinates = np.moveaxis(coordinates, source=-1, destination=1)
coordinates += np.reshape(offset, (*offset.shape, 1, 1, 1))
if need_squeeze:
coordinates = np.squeeze(coordinates, axis=0)
return coordinates
def _fixed_map_coordinates(
input, coordinates, order, fill_mode="constant", fill_value=0.0
):
# SciPy's implementation of map_coordinates handles boundaries incorrectly,
# unless mode='reflect'. For order=1, this only affects interpolation
# outside the bounds of the original array.
# https://github.com/scipy/scipy/issues/2640
padding = [
(
max(-np.floor(c.min()).astype(int) + 1, 0),
max(np.ceil(c.max()).astype(int) + 1 - size, 0),
)
for c, size in zip(coordinates, input.shape)
]
shifted_coords = [c + p[0] for p, c in zip(padding, coordinates)]
pad_mode = {
"nearest": "edge",
"mirror": "reflect",
"reflect": "symmetric",
}.get(fill_mode, fill_mode)
if fill_mode == "constant":
padded = np.pad(
input, padding, mode=pad_mode, constant_values=fill_value
)
else:
padded = np.pad(input, padding, mode=pad_mode)
result = scipy.ndimage.map_coordinates(
padded, shifted_coords, order=order, mode=fill_mode, cval=fill_value
)
return result
def _perspective_transform_numpy(
images,
start_points,
end_points,
interpolation="bilinear",
fill_value=0,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
need_squeeze = False
if len(images.shape) == 3:
images = np.expand_dims(images, axis=0)
need_squeeze = True
if len(start_points.shape) == 2:
start_points = np.expand_dims(start_points, axis=0)
if len(end_points.shape) == 2:
end_points = np.expand_dims(end_points, axis=0)
if data_format == "channels_first":
images = np.transpose(images, (0, 2, 3, 1))
batch_size, height, width, channels = images.shape
transforms = _compute_homography_matrix(start_points, end_points)
if len(transforms.shape) == 1:
transforms = np.expand_dims(transforms, axis=0)
if transforms.shape[0] == 1 and batch_size > 1:
transforms = np.tile(transforms, (batch_size, 1))
x, y = np.meshgrid(
np.arange(width, dtype=np.float32),
np.arange(height, dtype=np.float32),
indexing="xy",
)
output = np.empty((batch_size, height, width, channels))
for i in range(batch_size):
a0, a1, a2, a3, a4, a5, a6, a7 = transforms[i]
denom = a6 * x + a7 * y + 1.0
x_in = (a0 * x + a1 * y + a2) / denom
y_in = (a3 * x + a4 * y + a5) / denom
coords = np.stack([y_in.ravel(), x_in.ravel()], axis=0)
mapped_channels = []
for channel in range(channels):
channel_img = images[i, :, :, channel]
mapped_channel = _fixed_map_coordinates(
channel_img,
coords,
order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation],
fill_mode="constant",
fill_value=fill_value,
)
mapped_channels.append(mapped_channel.reshape(height, width))
output[i] = np.stack(mapped_channels, axis=-1)
if data_format == "channels_first":
output = np.transpose(output, (0, 3, 1, 2))
if need_squeeze:
output = np.squeeze(output, axis=0)
return output
def gaussian_blur_np(
images,
kernel_size,
sigma,
data_format=None,
):
def _create_gaussian_kernel(kernel_size, sigma, num_channels, dtype):
def _get_gaussian_kernel1d(size, sigma):
x = np.arange(size, dtype=dtype) - (size - 1) / 2
kernel1d = np.exp(-0.5 * (x / sigma) ** 2)
return kernel1d / np.sum(kernel1d)
def _get_gaussian_kernel2d(size, sigma):
kernel1d_x = _get_gaussian_kernel1d(size[0], sigma[0])
kernel1d_y = _get_gaussian_kernel1d(size[1], sigma[1])
return np.outer(kernel1d_y, kernel1d_x)
kernel = _get_gaussian_kernel2d(kernel_size, sigma)
kernel = kernel[:, :, np.newaxis]
kernel = np.tile(kernel, (1, 1, num_channels))
return kernel.astype(dtype)
images = np.asarray(images)
input_dtype = images.dtype
kernel_size = np.asarray(kernel_size)
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
need_squeeze = False
if len(images.shape) == 3:
images = np.expand_dims(images, axis=0)
need_squeeze = True
if data_format == "channels_first":
images = np.transpose(images, (0, 2, 3, 1))
num_channels = images.shape[-1]
kernel = _create_gaussian_kernel(
kernel_size, sigma, num_channels, input_dtype
)
batch_size, height, width, _ = images.shape
padded_images = np.pad(
images,
(
(0, 0),
(kernel_size[0] // 2, kernel_size[0] // 2),
(kernel_size[1] // 2, kernel_size[1] // 2),
(0, 0),
),
mode="constant",
)
blurred_images = np.zeros_like(images)
kernel_reshaped = kernel.reshape(
(1, kernel.shape[0], kernel.shape[1], num_channels)
)
for b in range(batch_size):
image_patch = padded_images[b : b + 1, :, :, :]
for i in range(height):
for j in range(width):
patch = image_patch[
:, i : i + kernel_size[0], j : j + kernel_size[1], :
]
blurred_images[b, i, j, :] = np.sum(
patch * kernel_reshaped, axis=(1, 2)
)
if data_format == "channels_first":
blurred_images = np.transpose(blurred_images, (0, 3, 1, 2))
if need_squeeze:
blurred_images = np.squeeze(blurred_images, axis=0)
return blurred_images
def elastic_transform_np(
images,
alpha=20.0,
sigma=5.0,
interpolation="bilinear",
fill_mode="reflect",
fill_value=0.0,
seed=None,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
images = np.asarray(images)
input_dtype = images.dtype
alpha = np.asarray(alpha, dtype=input_dtype)
sigma = np.asarray(sigma, dtype=input_dtype)
kernel_size = (int(6 * sigma) | 1, int(6 * sigma) | 1)
need_squeeze = False
if len(images.shape) == 3:
images = np.expand_dims(images, axis=0)
need_squeeze = True
if data_format == "channels_last":
batch_size, height, width, channels = images.shape
channel_axis = -1
else:
batch_size, channels, height, width = images.shape
channel_axis = 1
rng = np.random.default_rng([seed, 0])
dx = (
rng.normal(size=(batch_size, height, width), loc=0.0, scale=1.0).astype(
input_dtype
)
* sigma
)
dy = (
rng.normal(size=(batch_size, height, width), loc=0.0, scale=1.0).astype(
input_dtype
)
* sigma
)
dx = gaussian_blur_np(
np.expand_dims(dx, axis=channel_axis),
kernel_size=kernel_size,
sigma=(sigma, sigma),
data_format=data_format,
)
dy = gaussian_blur_np(
np.expand_dims(dy, axis=channel_axis),
kernel_size=kernel_size,
sigma=(sigma, sigma),
data_format=data_format,
)
dx = np.squeeze(dx)
dy = np.squeeze(dy)
x, y = np.meshgrid(np.arange(width), np.arange(height))
x, y = x[None, :, :], y[None, :, :]
distorted_x = x + alpha * dx
distorted_y = y + alpha * dy
transformed_images = np.zeros_like(images)
if data_format == "channels_last":
for i in range(channels):
transformed_images[..., i] = np.stack(
[
_fixed_map_coordinates(
images[b, ..., i],
[distorted_y[b], distorted_x[b]],
order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation],
fill_mode=fill_mode,
fill_value=fill_value,
)
for b in range(batch_size)
]
)
else:
for i in range(channels):
transformed_images[:, i, :, :] = np.stack(
[
_fixed_map_coordinates(
images[b, i, ...],
[distorted_y[b], distorted_x[b]],
order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation],
fill_mode=fill_mode,
fill_value=fill_value,
)
for b in range(batch_size)
]
)
if need_squeeze:
transformed_images = np.squeeze(transformed_images, axis=0)
transformed_images = transformed_images.astype(input_dtype)
return transformed_images
def _compute_homography_matrix(start_points, end_points):
start_x1, start_y1 = start_points[:, 0, 0], start_points[:, 0, 1]
start_x2, start_y2 = start_points[:, 1, 0], start_points[:, 1, 1]
start_x3, start_y3 = start_points[:, 2, 0], start_points[:, 2, 1]
start_x4, start_y4 = start_points[:, 3, 0], start_points[:, 3, 1]
end_x1, end_y1 = end_points[:, 0, 0], end_points[:, 0, 1]
end_x2, end_y2 = end_points[:, 1, 0], end_points[:, 1, 1]
end_x3, end_y3 = end_points[:, 2, 0], end_points[:, 2, 1]
end_x4, end_y4 = end_points[:, 3, 0], end_points[:, 3, 1]
coefficient_matrix = np.stack(
[
np.stack(
[
end_x1,
end_y1,
np.ones_like(end_x1),
np.zeros_like(end_x1),
np.zeros_like(end_x1),
np.zeros_like(end_x1),
-start_x1 * end_x1,
-start_x1 * end_y1,
],
axis=-1,
),
np.stack(
[
np.zeros_like(end_x1),
np.zeros_like(end_x1),
np.zeros_like(end_x1),
end_x1,
end_y1,
np.ones_like(end_x1),
-start_y1 * end_x1,
-start_y1 * end_y1,
],
axis=-1,
),
np.stack(
[
end_x2,
end_y2,
np.ones_like(end_x2),
np.zeros_like(end_x2),
np.zeros_like(end_x2),
np.zeros_like(end_x2),
-start_x2 * end_x2,
-start_x2 * end_y2,
],
axis=-1,
),
np.stack(
[
np.zeros_like(end_x2),
np.zeros_like(end_x2),
np.zeros_like(end_x2),
end_x2,
end_y2,
np.ones_like(end_x2),
-start_y2 * end_x2,
-start_y2 * end_y2,
],
axis=-1,
),
np.stack(
[
end_x3,
end_y3,
np.ones_like(end_x3),
np.zeros_like(end_x3),
np.zeros_like(end_x3),
np.zeros_like(end_x3),
-start_x3 * end_x3,
-start_x3 * end_y3,
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/symbolic_arguments_test.py | keras/src/ops/symbolic_arguments_test.py | from keras.src import testing
from keras.src import tree
from keras.src.backend import KerasTensor
from keras.src.ops.symbolic_arguments import SymbolicArguments
class SymbolicArgumentsTest(testing.TestCase):
# Testing multiple args and empty kwargs
def test_args(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
b = KerasTensor(shape=shape)
args = SymbolicArguments(
(
a,
b,
),
{},
)
self.assertEqual(args.keras_tensors, [a, b])
self.assertEqual(args._flat_arguments, [a, b])
self.assertEqual(args._single_positional_tensor, None)
# Testing single arg and single position tensor
def test_args_single_arg(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
args = SymbolicArguments((a))
self.assertEqual(args.keras_tensors, [a])
self.assertEqual(args._flat_arguments, [a])
self.assertEqual(len(args.kwargs), 0)
self.assertEqual(isinstance(args.args[0], KerasTensor), True)
self.assertEqual(args._single_positional_tensor, a)
# Testing kwargs
def test_kwargs(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
b = KerasTensor(shape=shape)
c = KerasTensor(shape=shape)
args = SymbolicArguments(
(
a,
b,
),
{1: c},
)
self.assertEqual(args.keras_tensors, [a, b, c])
self.assertEqual(args._flat_arguments, [a, b, c])
self.assertEqual(args._single_positional_tensor, None)
# Testing conversion function with args and kwargs
def test_conversion_fn(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
b = KerasTensor(shape=shape)
c = KerasTensor(shape=shape)
sym_args = SymbolicArguments(
(
a,
b,
),
{1: c},
)
(value, _) = sym_args.convert(lambda x: x**2)
args1 = value[0][0]
self.assertIsInstance(args1, KerasTensor)
mapped_value = tree.map_structure(lambda x: x**2, a)
self.assertEqual(mapped_value.shape, args1.shape)
self.assertEqual(mapped_value.dtype, args1.dtype)
# Testing fill in function with single args only
def test_fill_in_single_arg(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
tensor_dict = {id(a): 3}
sym_args = SymbolicArguments((a))
# Call the method to be tested
result, _ = sym_args.fill_in(tensor_dict)
self.assertEqual(result, (3,))
# Testing fill in function with multiple args
def test_fill_in_multiple_arg(self):
shape = (2, 3, 4)
a = KerasTensor(shape=shape)
b = KerasTensor(shape=shape)
tensor_dict = {id(b): 2}
sym_args = SymbolicArguments((a, b))
# Call the method to be tested
result, _ = sym_args.fill_in(tensor_dict)
self.assertEqual(result, ((None, 2),))
# Testing fill in function for args and kwargs
def test_fill_in(self):
shape1 = (2, 3, 4)
shape2 = (3, 2, 4)
a = KerasTensor(shape=shape1)
b = KerasTensor(shape=shape2)
c = KerasTensor(shape=shape2)
dictionary = {id(a): 3, id(c): 2}
sym_args = SymbolicArguments(
(
a,
b,
),
{"1": c},
)
(values, _) = sym_args.fill_in(dictionary)
self.assertEqual(values, ((3, None), {"1": 2}))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/ops/core_test.py | keras/src/ops/core_test.py | import operator
from unittest.mock import Mock
import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import losses
from keras.src import models
from keras.src import ops
from keras.src import optimizers
from keras.src import testing
from keras.src import tree
from keras.src.backend.common import dtypes
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.layers.core import input_layer
from keras.src.ops import core
from keras.src.saving import object_registration
from keras.src.testing.test_utils import named_product
class CoreOpsDynamicShapeTest(testing.TestCase):
def test_associative_scan(self):
xs = (KerasTensor((5, None)), KerasTensor((5, None)))
ys = core.associative_scan(
f=lambda x, y: (x[0] + y[0], x[1] + y[1]), elems=xs, axis=0
)
self.assertEqual(ys[0].shape, (5, None))
# sum two tuples of unknown (but same) length at axis
def _fn(x, y):
return tuple([x[i] + y[i] for i in range(len(x))])
ys = core.associative_scan(f=_fn, elems=xs, axis=1)
self.assertEqual(ys[0].shape, (5, None))
def test_cast(self):
x = KerasTensor((3, 5, None), dtype="float32")
self.assertEqual(core.cast(x, "float16").shape, (3, 5, None))
def test_convert_to_tensor(self):
x = KerasTensor((2, None))
self.assertEqual(core.convert_to_tensor(x).shape, (2, None))
def test_fori_loop(self):
def body_fun(i, x):
return x + i
initial_value = KerasTensor((3, 5, None))
self.assertEqual(
core.fori_loop(0, 10, body_fun, initial_value).shape, (3, 5, None)
)
def test_map(self):
def f(x):
return x**2
xs = KerasTensor((None, 5))
self.assertEqual(core.map(f, xs).shape, (None, 5))
# Test nested output
def f2(x):
return {"a": x**2, "b": x * 10}
xs = KerasTensor((None, 5))
ys = core.map(f2, xs)
self.assertEqual(ys["a"].shape, (None, 5))
self.assertEqual(ys["b"].shape, (None, 5))
# Test nested input
def f3(x):
return x[0] + x[1]
xs = (KerasTensor((None, 5)), KerasTensor((None, 5)))
self.assertEqual(core.map(f3, xs).shape, (None, 5))
def test_saturate_cast(self):
x = KerasTensor((3, 5, None), dtype="float32")
self.assertEqual(core.saturate_cast(x, "float16").shape, (3, 5, None))
def test_scan(self):
def f(carry, xs):
xs = xs + carry
return carry, carry
init = KerasTensor((None,))
xs = KerasTensor((6, None))
carry, result = core.scan(f, init, xs)
self.assertEqual(carry.shape, (None,))
self.assertEqual(result.shape, (6, None))
def f2(carry, _):
return carry, carry
carry, result = core.scan(f2, init, xs=None, length=3)
self.assertEqual(carry.shape, (None,))
self.assertEqual(result.shape, (3, None))
# Scatter doesn't support dynamic shape.
def test_scatter_update(self):
inputs = KerasTensor((4, None))
indices = KerasTensor((5, 2))
updates = KerasTensor((5,))
self.assertEqual(
core.scatter_update(inputs, indices, updates).shape, (4, None)
)
# Slice doesn't support dynamic shape.
def test_slice_update(self):
inputs = KerasTensor((4, None))
start_indices = KerasTensor((2,))
updates = KerasTensor((2, 2))
self.assertEqual(
core.slice_update(inputs, start_indices, updates).shape, (4, None)
)
def test_stop_gradient(self):
variable = KerasTensor(shape=(3, None), dtype="float32")
self.assertEqual(core.stop_gradient(variable).shape, (3, None))
def test_switch(self):
def fn(x, y):
return x[:, 0], y[0, :]
index = KerasTensor(())
x = KerasTensor((None, 2))
y = KerasTensor((5, None))
result = core.switch(index, [fn], x, y)
self.assertEqual(result[0].shape, (None,))
self.assertEqual(result[1].shape, (None,))
def test_vectorized_map(self):
def f(x):
return x**2
xs = KerasTensor((None, 5))
self.assertEqual(core.vectorized_map(f, xs).shape, (None, 5))
# Test nested output
def f2(x):
return {"a": x**2, "b": x * 10}
xs = KerasTensor((None, 5))
ys = core.vectorized_map(f2, xs)
self.assertEqual(ys["a"].shape, (None, 5))
self.assertEqual(ys["b"].shape, (None, 5))
# Test nested input
def f3(x):
return x[0] + x[1]
xs = (KerasTensor((None, 5)), KerasTensor((None, 5)))
self.assertEqual(core.vectorized_map(f3, xs).shape, (None, 5))
def test_while_loop(self):
def cond(args):
return tree.flatten(args)[0] < 10
def body(args):
return tree.map_structure(lambda x: x + 1, args)
loop_vars = KerasTensor((None,))
self.assertEqual(core.while_loop(cond, body, loop_vars).shape, (None,))
def test_unstack(self):
x = KerasTensor((2, None, None))
axis, num = 1, 3
out = core.unstack(x, num=num, axis=axis)
self.assertEqual(len(out), 3)
for o in out:
self.assertEqual(o.shape, (2, None))
class CoreOpsStaticShapeTest(testing.TestCase):
def test_associative_scan(self):
xs = (KerasTensor((5, 10)), KerasTensor((5, 10)))
ys = core.associative_scan(
f=lambda x, y: (x[0] + y[0], x[1] + y[1]), elems=xs, axis=0
)
self.assertEqual(ys[0].shape, (5, 10))
# sum two tuples of unknown (but same) length at axis
def _fn(x, y):
return tuple([x[i] + y[i] for i in range(len(x))])
ys = core.associative_scan(f=_fn, elems=xs, axis=1)
self.assertEqual(ys[0].shape, (5, 10))
def test_cast(self):
x = KerasTensor((3, 5, 7), dtype="float32")
self.assertEqual(core.cast(x, "float16").shape, (3, 5, 7))
def test_cond(self):
pred = KerasTensor((), dtype="bool")
self.assertEqual(
ops.cond(
pred, lambda: ops.ones((1, 3)), lambda: ops.zeros((1, 3))
).shape,
(1, 3),
)
def test_convert_to_tensor(self):
x = KerasTensor((2, 3))
out = core.convert_to_tensor(x)
self.assertEqual(out.shape, x.shape)
self.assertFalse(out.sparse)
out = core.convert_to_tensor(x, sparse=True)
self.assertFalse(out.sparse)
x = KerasTensor((2, 3), sparse=True)
out = core.convert_to_tensor(x)
self.assertTrue(out.sparse)
out = core.convert_to_tensor(x, sparse=True)
self.assertTrue(out.sparse)
out = core.convert_to_tensor(x, sparse=False)
self.assertFalse(out.sparse)
def test_fori_loop(self):
def body_fun(i, x):
return x + i
initial_value = KerasTensor((3, 5, 7))
result = core.fori_loop(0, 10, body_fun, initial_value)
self.assertEqual(result.shape, (3, 5, 7))
def test_map(self):
def f(x):
return x**2
xs = KerasTensor((6, 5))
ys = core.map(f, xs)
self.assertEqual(ys.shape, (6, 5))
# Test nested output
def f2(x):
return {"a": x**2, "b": x * 10}
xs = KerasTensor((6, 5))
ys = core.map(f2, xs)
self.assertEqual(ys["a"].shape, (6, 5))
self.assertEqual(ys["b"].shape, (6, 5))
# Test nested input
def f3(x):
return x[0] + x[1]
xs = (KerasTensor((6, 5)), KerasTensor((6, 5)))
self.assertEqual(core.map(f3, xs).shape, (6, 5))
def test_saturate_cast(self):
x = KerasTensor((3, 5, 7), dtype="float32")
self.assertEqual(core.saturate_cast(x, "float16").shape, (3, 5, 7))
def test_scan(self):
def f(carry, xs):
xs = xs + carry
return carry, carry
init = KerasTensor(())
xs = KerasTensor((6,))
carry, result = core.scan(f, init, xs)
self.assertEqual(carry.shape, ())
self.assertEqual(result.shape, (6,))
def f2(carry, _):
return carry, carry
carry, result = core.scan(f2, init, xs=None, length=3)
self.assertEqual(carry.shape, ())
self.assertEqual(result.shape, (3,))
def test_scatter(self):
indices = KerasTensor((5, 2))
values = KerasTensor((5,))
shape = (4, 4)
self.assertEqual(core.scatter(indices, values, shape).shape, (4, 4))
def test_scatter_update(self):
inputs = KerasTensor((4, 4))
indices = KerasTensor((5, 2))
updates = KerasTensor((5,))
self.assertEqual(
core.scatter_update(inputs, indices, updates).shape, (4, 4)
)
inputs = KerasTensor((4, 4, 4))
indices = KerasTensor((5, 2))
updates = KerasTensor((5, 4))
self.assertEqual(
core.scatter_update(inputs, indices, updates).shape, (4, 4, 4)
)
def test_slice(self):
inputs = KerasTensor(shape=(3, 3), dtype="float32")
start_indices = KerasTensor(shape=(2,), dtype="int32")
shape = (2, 2)
self.assertEqual(core.slice(inputs, start_indices, shape).shape, (2, 2))
def test_slice_negative_one_shape(self):
inputs = KerasTensor(shape=(3, 3), dtype="float32")
start_indices = (1, 1)
shape = (-1, -1)
self.assertEqual(core.slice(inputs, start_indices, shape).shape, (2, 2))
def test_slice_negative_one_shape_raises(self):
inputs = KerasTensor(shape=(3, 3), dtype="float32")
start_indices = KerasTensor(shape=(2,), dtype="int32")
shape = (-1, -1)
with self.assertRaises(ValueError):
core.slice(inputs, start_indices, shape)
def test_slice_update(self):
inputs = KerasTensor((4, 4))
start_indices = KerasTensor((2,))
updates = KerasTensor((2, 2))
self.assertEqual(
core.slice_update(inputs, start_indices, updates).shape, (4, 4)
)
inputs = KerasTensor((4, 4, 4))
start_indices = KerasTensor((3,))
updates = KerasTensor((2, 2, 2))
self.assertEqual(
core.slice_update(inputs, start_indices, updates).shape, (4, 4, 4)
)
def test_stop_gradient(self):
variable = KerasTensor(shape=(3, 3), dtype="float32")
self.assertEqual(core.stop_gradient(variable).shape, (3, 3))
def test_switch(self):
def fn(x, y):
return x[:, 0], y[0, :]
index = KerasTensor(())
x = KerasTensor((5, 2))
y = KerasTensor((5, 2))
self.assertEqual(core.switch(index, [fn], x, y)[0].shape, (5,))
self.assertEqual(core.switch(index, [fn], x, y)[1].shape, (2,))
def test_vectorized_map(self):
def f(x):
return x**2
xs = KerasTensor((6, 5))
ys = core.vectorized_map(f, xs)
self.assertEqual(ys.shape, (6, 5))
# Test nested output
def f2(x):
return {"a": x**2, "b": x * 10}
xs = KerasTensor((6, 5))
ys = core.vectorized_map(f2, xs)
self.assertEqual(ys["a"].shape, (6, 5))
self.assertEqual(ys["b"].shape, (6, 5))
# Test nested input
def f3(x):
return x[0] + x[1]
xs = (KerasTensor((6, 5)), KerasTensor((6, 5)))
self.assertEqual(core.vectorized_map(f3, xs).shape, (6, 5))
def test_while_loop(self):
def cond(args):
return tree.flatten(args)[0] < 10
def body(args):
return tree.map_structure(lambda x: x + 1, args)
loop_vars = KerasTensor((10,))
self.assertEqual(core.while_loop(cond, body, loop_vars).shape, (10,))
def test_unstack(self):
x = KerasTensor((2, 3, 4))
axis = 1
out = core.unstack(x, axis=axis)
self.assertEqual(len(out), 3)
for o in out:
self.assertEqual(o.shape, (2, 4))
class CoreOpsCorrectnessTest(testing.TestCase):
def test_associative_scan(self):
# Test prefix sum
arr = np.arange(5)
result = core.associative_scan(f=operator.add, elems=arr)
self.assertAllEqual(result, [0, 1, 3, 6, 10])
# Test reverse
result = core.associative_scan(f=operator.add, elems=arr, reverse=True)
self.assertAllEqual(result, [10, 10, 9, 7, 4])
# Test multiple dimensions, across different axes
batched_arr = np.stack([arr, arr + 1, arr + 2])
result = core.associative_scan(
f=operator.add, elems=batched_arr, axis=1
)
self.assertAllEqual(result[2], [2, 5, 9, 14, 20])
result = core.associative_scan(
f=operator.add, elems=batched_arr, axis=0
)
self.assertAllEqual(result[:, 0], [0, 1, 3])
# Test structured input
elems = {
"a": np.array([[0, 1, 2], [3, 4, 5]]),
"b": np.array([[6, 7, 8], [9, 10, 11]]),
}
def _dict_add(x, y):
return {"a": x["a"] + y["b"], "b": x["b"] + y["b"]}
ax0 = core.associative_scan(f=_dict_add, elems=elems, axis=0)
self.assertAllEqual(
ax0["b"],
[[6, 7, 8], [15, 17, 19]],
)
# Test parallel scan op used in mamba
b, l, d, n = 1, 2, 3, 4
DB = np.random.rand(b, l, d, n)
DA = np.random.rand(b, l, d, n)
H_seq = np.zeros((b, d, n))
for i in range(l):
H_seq = DA[:, i] * H_seq + DB[:, i]
def scan_op(ci, cj):
a = cj[0] * ci[0]
b = cj[0] * ci[1] + cj[1]
return (a, b)
inputs = (DA.transpose(1, 0, 2, 3), DB.transpose(1, 0, 2, 3))
H_par = core.associative_scan(f=scan_op, elems=inputs)[-1][-1]
self.assertAllClose(H_seq, H_par)
# Test Operation call.
xs = np.arange(5, dtype="float32")
self.assertAllClose(
core.AssociativeScan()(operator.add, xs), ops.cumsum(xs)
)
def test_cast(self):
x = ops.ones((2,), dtype="float32")
y = ops.cast(x, "float16")
self.assertIn("float16", str(y.dtype))
x = ops.KerasTensor((2,), dtype="float32")
y = ops.cast(x, "float16")
self.assertEqual("float16", y.dtype)
self.assertEqual(x.shape, y.shape)
self.assertTrue(hasattr(y, "_keras_history"))
# Test Operation call.
x = ops.ones((2,), dtype="float32")
self.assertDType(core.Cast("float16")(x), "float16")
@parameterized.named_parameters(
("float8_e4m3fn", "float8_e4m3fn"), ("float8_e5m2", "float8_e5m2")
)
def test_cast_float8(self, float8_dtype):
# Cast to float8 and cast back
x = ops.ones((2,), dtype="float32")
y = ops.cast(x, float8_dtype)
self.assertIn(float8_dtype, str(y.dtype))
x = ops.cast(y, "float32")
self.assertIn("float32", str(x.dtype))
x = ops.KerasTensor((2,), dtype="float32")
y = ops.cast(x, float8_dtype)
self.assertEqual(float8_dtype, y.dtype)
self.assertEqual(x.shape, y.shape)
self.assertTrue(hasattr(y, "_keras_history"))
x = ops.cast(y, "float32")
self.assertEqual("float32", x.dtype)
self.assertEqual(x.shape, y.shape)
self.assertTrue(hasattr(x, "_keras_history"))
def test_cond(self):
t = ops.cond(True, lambda: 0, lambda: 1)
self.assertEqual(t, 0)
f = ops.cond(False, lambda: 0, lambda: 1)
self.assertEqual(f, 1)
f = ops.cond(False, lambda: None, lambda: None)
self.assertEqual(f, None)
out = ops.cond(
ops.convert_to_tensor(True),
lambda: ops.ones((1, 3)),
lambda: ops.zeros((1, 3)),
)
self.assertAllClose(out, ops.ones((1, 3)))
out = ops.cond(
ops.convert_to_tensor(False),
lambda: ops.ones((3,)),
lambda: ops.zeros((3,)),
)
self.assertAllClose(out, ops.zeros((3,)))
with self.assertRaises(ValueError):
ops.cond(
KerasTensor((), dtype="bool"),
lambda: ops.ones((3,)),
lambda: ops.zeros((4,)),
)
def test_convert_to_tensor(self):
x = np.ones((2,))
x = ops.convert_to_tensor(x)
x = ops.convert_to_numpy(x)
self.assertAllEqual(x, (1, 1))
self.assertIsInstance(x, np.ndarray)
# Empty lists should give an empty array.
x = ops.convert_to_tensor([])
np_x = ops.convert_to_numpy(x)
self.assertTrue(ops.is_tensor(x))
self.assertAllEqual(x, [])
self.assertIsInstance(np_x, np.ndarray)
# Partially converted.
x = ops.convert_to_tensor((1, ops.array(2), 3))
self.assertAllEqual(x, (1, 2, 3))
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason=f"{backend.backend()} backend doesn't support sparse tensors.",
)
def test_convert_to_tensor_sparse(self):
if backend.backend() == "tensorflow":
import tensorflow as tf
x = tf.SparseTensor([[0, 0], [1, 2]], [1.0, 2.0], (2, 3))
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
x = jax_sparse.BCOO(([1.0, 2.0], [[0, 0], [1, 2]]), shape=(2, 3))
else:
self.fail(f"Sparse is unsupported with backend {backend.backend()}")
x_default = ops.convert_to_tensor(x)
self.assertSparse(x_default)
self.assertAllClose(x, x_default)
x_sparse = ops.convert_to_tensor(x, sparse=True)
self.assertSparse(x_sparse)
self.assertAllClose(x, x_sparse)
x_dense = ops.convert_to_tensor(x, sparse=False)
self.assertSparse(x_dense, False)
self.assertAllClose(x, x_dense)
x_numpy = ops.convert_to_numpy(x)
self.assertIsInstance(x_numpy, np.ndarray)
self.assertAllClose(x_numpy, x_dense)
@pytest.mark.skipif(
not backend.SUPPORTS_RAGGED_TENSORS,
reason=f"{backend.backend()} backend doesn't support ragged tensors.",
)
def test_convert_to_tensor_ragged(self):
import tensorflow as tf
x = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
x_default = ops.convert_to_tensor(x)
self.assertIsInstance(x_default, tf.RaggedTensor)
self.assertAllClose(x, x_default)
x_ragged = ops.convert_to_tensor(x, ragged=True)
self.assertIsInstance(x_ragged, tf.RaggedTensor)
self.assertAllClose(x, x_ragged)
x_dense = ops.convert_to_tensor(x, ragged=False)
self.assertNotIsInstance(x_dense, tf.RaggedTensor)
self.assertAllClose(x, x_dense)
x_numpy = ops.convert_to_numpy(x)
self.assertIsInstance(x_numpy, np.ndarray)
self.assertAllClose(x_numpy, x_dense)
@pytest.mark.skipif(
backend.backend() not in ("tensorflow", "jax", "torch"),
reason=(
f"{backend.backend()} backend doesn't support `custom_gradient`."
),
)
def test_custom_gradient(self):
# function to test custom_gradient on
@ops.custom_gradient
def log1pexp(x):
e = ops.exp(x)
def grad(*args, upstream=None):
if upstream is None:
(upstream,) = args
return ops.multiply(upstream, 1.0 - 1.0 / ops.add(1, e))
return ops.log(1 + e), grad
def log1pexp_nan(x):
return ops.log(1 + ops.exp(x))
x = ops.convert_to_tensor(100.0)
if backend.backend() == "tensorflow":
import tensorflow as tf
with tf.GradientTape() as tape1:
tape1.watch(x)
y = log1pexp(x)
with tf.GradientTape() as tape2:
tape2.watch(x)
z = log1pexp_nan(x)
dy_dx = tape1.gradient(y, x)
dz_dx = tape2.gradient(z, x)
self.assertEqual(ops.convert_to_numpy(dy_dx), 1.0)
elif backend.backend() == "jax":
import jax
dy_dx = jax.grad(log1pexp)(x)
dz_dx = jax.grad(log1pexp_nan)(x)
self.assertEqual(ops.convert_to_numpy(dy_dx), 1.0)
self.assertTrue(ops.isnan(dz_dx))
elif backend.backend() == "torch":
import torch
x = torch.tensor(100.0, requires_grad=True)
z = log1pexp(x)
z.sum().backward()
self.assertEqual(ops.convert_to_numpy(x.grad), 1.0)
def test_dynamic_slice(self):
def cond(index, inputs, sum):
return index < 10
def body(index, inputs, sum):
sum = sum + core.slice(inputs, [index], [1])
index = index + 1
return index, inputs, sum
index, inputs, sum = 0, np.arange(10), np.array([0])
index, inputs, sum = core.while_loop(cond, body, (index, inputs, sum))
self.assertEqual(sum.shape, (1,))
self.assertAllClose(sum, [45])
def test_fori_loop(self):
def body_fun(i, x):
return x + i
initial_value = np.array(0)
result = core.fori_loop(0, 10, body_fun, initial_value)
self.assertAllClose(result, 45)
# Test Operation call.
self.assertAllClose(core.ForiLoop(0, 10, body_fun)(initial_value), 45)
def test_getitem(self):
np_tensor = np.arange(24).reshape(2, 3, 4)
tensor = ops.convert_to_tensor(np_tensor)
t = tensor[1]
n = np_tensor[1]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1, 2, 3]
n = np_tensor[1, 2, 3]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1:2]
n = np_tensor[1:2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1:2, 2:3, 3:4]
n = np_tensor[1:2, 2:3, 3:4]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1:2, None]
n = np_tensor[1:2, None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1:2, 2:3, ...]
n = np_tensor[1:2, 2:3, ...]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1:2, ..., 3:4]
n = np_tensor[1:2, ..., 3:4]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[None, ..., 3:4, None]
n = np_tensor[None, ..., 3:4, None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1:2:None]
n = np_tensor[1:2:None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[:, 2]
n = np_tensor[:, 2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[None]
n = np_tensor[None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[None, None]
n = np_tensor[None, None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[...]
n = np_tensor[...]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[..., 1]
n = np_tensor[..., 1]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[..., 1, 2]
n = np_tensor[..., 1, 2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[..., -1, 2]
n = np_tensor[..., -1, 2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[..., -1:-2, 2]
n = np_tensor[..., -1:-2, 2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[..., None, None]
n = np_tensor[..., None, None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[None, ..., None]
n = np_tensor[None, ..., None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1, 2, None, ..., None]
n = np_tensor[1, 2, None, ..., None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[None, ..., 1, 2]
n = np_tensor[None, ..., 1, 2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1, None, 2]
n = np_tensor[1, None, 2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
index_tensor = ops.convert_to_tensor(np.array(1, dtype=np.int32))
t = tensor[index_tensor]
n = np_tensor[ops.convert_to_numpy(index_tensor)]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
index_tensor = ops.convert_to_tensor(np.array(1, dtype=np.int32))
t = tensor[index_tensor, 2, None]
n = np_tensor[ops.convert_to_numpy(index_tensor), 2, None]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
index_tensor = ops.convert_to_tensor(np.array(-2, dtype=np.int32))
t = tensor[index_tensor, 1]
n = np_tensor[ops.convert_to_numpy(index_tensor), 1]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
index_tensor = ops.convert_to_tensor(np.array(-1, dtype=np.int32))
t = tensor[-2, index_tensor]
n = np_tensor[-2, ops.convert_to_numpy(index_tensor)]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
# Negative indexing
t = tensor[-1]
n = np_tensor[-1]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[1, -1, -2]
n = np_tensor[1, -1, -2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
# Slicing with step
t = tensor[::2]
n = np_tensor[::2]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
# Mixed slices and integers
t = tensor[1, :, 1:4]
n = np_tensor[1, :, 1:4]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
t = tensor[:, 1:2, 3]
n = np_tensor[:, 1:2, 3]
self.assertEqual(t.shape, n.shape)
self.assertAllClose(t, n)
def test_is_tensor(self):
np_x = np.array([[1, 2, 3], [3, 2, 1]])
x = backend.convert_to_tensor(np_x)
if backend.backend() != "numpy":
self.assertFalse(ops.is_tensor(np_x))
self.assertTrue(ops.is_tensor(x))
self.assertFalse(ops.is_tensor([1, 2, 3]))
def test_map(self):
def f(x):
return x**2
xs = np.arange(10)
self.assertAllClose(ops.map(f, xs), xs**2)
# Test nested output
def f2(x):
return {"a": x**2, "b": x * 10}
xs = np.random.rand(2, 3, 4).astype("float32")
outputs = ops.map(f2, xs)
self.assertAllClose(outputs["a"], xs**2)
self.assertAllClose(outputs["b"], xs * 10)
# Test with nested structures
def dict_input_fn(inputs):
x = inputs["x"][:, 0]
y = inputs["y"] + 1
return {"x": x, "y": y}
def list_input_fn(inputs):
return [x**2 for x in inputs]
xs = {
"x": ops.convert_to_tensor(
np.random.rand(4, 100, 3), dtype="float32"
),
"y": ops.convert_to_tensor(
np.random.randint(0, 10, size=(4, 1)), dtype="int32"
),
}
xs1 = [
ops.convert_to_tensor(np.random.rand(4, 100, 3), dtype="float32"),
ops.convert_to_tensor(
np.random.randint(0, 10, size=(4, 1)), dtype="int32"
),
]
ys = ops.map(dict_input_fn, xs)
self.assertEqual(ys["x"].shape, (4, 100))
self.assertEqual(
ops.convert_to_numpy(ys["y"]).all(),
ops.convert_to_numpy(xs["y"] + 1).all(),
)
ys = ops.map(list_input_fn, xs1)
for x, y in zip(xs1, ys):
self.assertEqual(
(ops.convert_to_numpy(y)).all(),
(ops.convert_to_numpy(x) ** 2).all(),
)
# Test Operation call.
xs = np.arange(10)
self.assertAllClose(ops.Map()(f, xs), xs**2)
def test_saturate_cast(self):
x = ops.ones((2,), dtype="float32")
y = ops.saturate_cast(x, "float16")
self.assertIn("float16", str(y.dtype))
x = ops.KerasTensor((2,), dtype="float32")
y = ops.saturate_cast(x, "float16")
self.assertEqual("float16", y.dtype)
self.assertEqual(x.shape, y.shape)
self.assertTrue(hasattr(y, "_keras_history"))
# Test Operation call.
x = np.array([-256, 1.0, 257.0], dtype="float32")
y = core.SaturateCast("uint8")(x)
self.assertDType(y, "uint8")
# Check that the values are the same
self.assertAllClose(y, np.clip(x, 0, 255).astype("uint8"))
def test_scan(self):
# Test cumsum
def cumsum(carry, xs):
carry = carry + xs
return carry, carry
init = np.array(0, dtype="float32")
xs = np.array([1, 2, 3, 4, 10, 20], dtype="float32")
carry, result = core.scan(cumsum, init, xs)
self.assertAllClose(carry, 40.0)
self.assertAllClose(result, ops.cumsum(xs))
# Test reverse=True
carry, result = core.scan(cumsum, init, xs, reverse=True)
self.assertAllClose(carry, 40.0)
self.assertAllClose(result, [40, 39, 37, 34, 30, 20])
# Test unroll
for unroll in (True, False, 2):
carry, result = core.scan(cumsum, init, xs, unroll=unroll)
self.assertAllClose(carry, 40.0)
self.assertAllClose(result, ops.cumsum(xs))
# Test xs is None
def fibonaccis(carry, _):
return (carry[1], carry[0] + carry[1]), None
init = (np.array(0, dtype="float32"), np.array(1, dtype="float32"))
carry, _ = core.scan(fibonaccis, init, length=6)
self.assertAllClose(carry, [8, 13])
# Test nested init
if backend.backend() != "tensorflow":
# tensorflow doesn't support arbitrary shape/dtype of the output of
# `f`. It must be the same as `init`.
def multiply_two(carry, _):
value1 = carry["value1"]
value2 = carry["value2"]
return (
{"value1": value1 * 2, "value2": value2 * 2},
value1 * 2 + value2 * 2,
)
init = {"value1": 2.0, "value2": 3.0}
carry, result = core.scan(multiply_two, init, length=3)
self.assertAllClose(carry["value1"], 16)
self.assertAllClose(carry["value2"], 24)
self.assertAllClose(result, [10, 20, 40])
# Test nested xs
def reduce_add(carry, xs):
value1 = xs["value1"]
value2 = xs["value2"]
return carry, value1 + value2
init = np.array(0, dtype="float32")
xs = {
"value1": np.array([1, 2, 3], dtype="float32"),
"value2": np.array([10, 20, 30], dtype="float32"),
}
_, result = core.scan(reduce_add, init, xs)
self.assertAllClose(result, [11, 22, 33])
# Test Operation call.
init = np.array(0, dtype="float32")
xs = np.array([1, 2, 3, 4, 10, 20], dtype="float32")
carry, result = core.Scan()(cumsum, init, xs)
self.assertAllClose(carry, 40.0)
self.assertAllClose(result, ops.cumsum(xs))
def test_scatter(self):
# Test 1D
indices = np.array([[1], [3], [4], [7]])
values = np.array([9, 10, 11, 12])
self.assertAllClose(
core.scatter(indices, values, (8,)),
[0, 9, 0, 10, 11, 0, 0, 12],
)
# Test 2D
indices = np.array([[0, 1], [2, 0]])
values = np.array([5, 10])
self.assertAllClose(
core.scatter(indices, values, (3, 2)), [[0, 5], [0, 0], [10, 0]]
)
# Test 3D
indices = np.array([[1], [3]])
values = np.array(
[
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/quantizers/quantizers.py | keras/src/quantizers/quantizers.py | import ml_dtypes
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.backend import any_symbolic_tensors
from keras.src.backend.common.backend_utils import canonicalize_axis
from keras.src.backend.common.backend_utils import standardize_axis_for_numpy
from keras.src.ops.operation import Operation
from keras.src.quantizers.gptq_config import GPTQConfig
"""Int8-related classes and methods"""
@keras_export(["keras.Quantizer", "keras.quantizers.Quantizer"])
class Quantizer:
def __init__(self, output_dtype="int8"):
self.output_dtype = output_dtype
def __call__(self, x):
"""Compute a quantized output from an input tensor."""
return x
@classmethod
def from_config(cls, config):
"""Creates a quantizer from its config.
This method is the reverse of `get_config`,
capable of instantiating the same quantizer from the config
dictionary.
This method is used by Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Args:
config: A Python dictionary, typically the output of get_config.
Returns:
A quantizer instance.
"""
return cls(**config)
def get_config(self):
"""Returns the config of the quantizer.
A quantizer config is a Python dictionary (serializable)
containing all configuration parameters of the quantizer.
The same quantizer can be reinstantiated later
(without any saved state) from this configuration.
This method is optional if you are just training and executing models,
exporting to and from SavedModels, or using weight checkpoints.
This method is required for Keras `model_to_estimator`, saving and
loading models to HDF5 formats, Keras model cloning, some visualization
utilities, and exporting models to and from JSON.
Returns:
Python dictionary.
"""
raise NotImplementedError(f"{self} does not implement get_config()")
@keras_export("keras.quantizers.abs_max_quantize")
def abs_max_quantize(
inputs,
axis,
value_range=(-127, 127),
dtype="int8",
epsilon=backend.epsilon(),
to_numpy=False,
):
"""
Quantizes the input tensor using the absolute maximum quantization scheme.
Args:
inputs: Input tensor to quantize.
axis: Axis along which to compute the quantization range.
value_range: Tuple of the minimum and maximum values of the quantization
range.
dtype: Data type of the quantized output.
epsilon: Small value to avoid division by zero.
to_numpy: Whether to perform the quantization in numpy. This performs
the computation on the host CPU and can be useful for saving memory
on the device. If False, the computation is performed on the device.
Returns:
A tuple of the quantized tensor and the scale.
"""
if to_numpy:
# Save memory on the device using numpy
original_dtype = backend.standardize_dtype(inputs.dtype)
inputs = ops.convert_to_numpy(inputs)
axis = standardize_axis_for_numpy(axis)
scale = np.divide(
value_range[1],
np.add(np.max(np.abs(inputs), axis=axis, keepdims=True), epsilon),
)
outputs = np.multiply(inputs, scale)
outputs = np.clip(np.round(outputs), value_range[0], value_range[1])
outputs = outputs.astype(dtype)
return ops.convert_to_tensor(outputs), ops.convert_to_tensor(
scale, dtype=original_dtype
)
inputs = ops.convert_to_tensor(inputs)
scale = ops.divide(
value_range[1],
ops.add(ops.max(ops.abs(inputs), axis=axis, keepdims=True), epsilon),
)
scale = ops.cast(scale, backend.standardize_dtype(inputs.dtype))
outputs = ops.multiply(inputs, scale)
outputs = ops.clip(ops.round(outputs), value_range[0], value_range[1])
outputs = ops.cast(outputs, dtype)
return outputs, scale
@keras_export("keras.quantizers.AbsMaxQuantizer")
class AbsMaxQuantizer(Quantizer):
def __init__(
self,
axis=None, # Deprecated, provide axis in __call__ instead.
value_range=(-127, 127),
epsilon=backend.epsilon(),
output_dtype="int8",
):
Quantizer.__init__(self, output_dtype=output_dtype)
if axis is not None:
if isinstance(axis, int):
axis = (axis,)
self.axis = tuple(axis)
else:
self.axis = None
self.value_range = value_range
self.epsilon = epsilon
if output_dtype == "int8":
if value_range[0] < -128 or value_range[1] > 127:
raise ValueError(
f"Quantizer with output_dtype='int8' requires value_range "
f"to be within the interval [-128, 127]. Received: "
f"value_range={value_range}"
)
def __call__(self, x, axis=None, to_numpy=False):
"""
Quantizes the input tensor.
Args:
x: Input tensor to quantize.
axis: Axis along which to compute the quantization range. If None,
uses the axis specified in the constructor. If None and no axis
was specified in the constructor, defaults to -1.
to_numpy: Whether to perform the quantization in numpy. This
performs the computation on the host CPU and can be useful for
saving memory on the device. If False, the computation is
performed on the device.
Returns:
A tuple of the quantized tensor and the scale.
"""
if axis is None:
axis = self.axis
if axis is None:
# Default to -1 if no axis is specified
axis = -1
quantized_x, scale = abs_max_quantize(
x,
axis,
self.value_range,
self.output_dtype,
self.epsilon,
to_numpy,
)
return quantized_x, scale
def get_config(self):
config = {
"value_range": self.value_range,
"epsilon": self.epsilon,
"output_dtype": self.output_dtype,
}
if self.axis is not None:
config["axis"] = self.axis
return config
def adjust_and_nudge(min_range, max_range, num_bits, narrow_range):
"""Adjusts and nudges the quantization range for better accuracy."""
# Use higher precision for the computation.
compute_dtype = backend.result_type(min_range.dtype, "float32")
min_range = ops.cast(min_range, compute_dtype)
max_range = ops.cast(max_range, compute_dtype)
quant_max = (1 << num_bits) - 1
quant_min = 0 if not narrow_range else 1
diff_range = ops.subtract(max_range, min_range)
# Calculate the scale and ensure it's positive
scale = ops.divide(diff_range, quant_max - quant_min)
# Re-calculate the inverse to avoid loss of precision
inv_scale = ops.divide(quant_max - quant_min, diff_range)
# Calculate the zero point from the min range
zero_point_from_min = quant_min - ops.divide(min_range, scale)
# Ensure zero point is within valid range [0, quant_max]
zero_point = ops.clip(zero_point_from_min, quant_min, quant_max)
# Nudge zero point if it's very close to an integer
nudged_zero_point = ops.round(zero_point)
# Calculate nudged limits
nudged_min = ops.multiply(ops.subtract(quant_min, nudged_zero_point), scale)
nudged_max = ops.multiply(ops.subtract(quant_max, nudged_zero_point), scale)
return nudged_min, nudged_max, scale, inv_scale
class FakeQuantWithMinMaxVars(Operation):
def __init__(self, num_bits=8, narrow_range=False, axis=None):
super().__init__()
self.num_bits = num_bits
self.narrow_range = narrow_range
self.axis = axis
def call(self, inputs, min_vals, max_vals):
return fake_quant_with_min_max_vars(
inputs,
min_vals,
max_vals,
num_bits=self.num_bits,
narrow_range=self.narrow_range,
axis=self.axis,
)
def compute_output_spec(self, inputs, min_vals, max_vals):
return KerasTensor(inputs.shape, dtype=inputs.dtype)
@keras_export("keras.quantizers.fake_quant_with_min_max_vars")
def fake_quant_with_min_max_vars(
inputs,
min_vals,
max_vals,
num_bits=8,
narrow_range=False,
axis=None,
):
"""Perform per-tensor or per-channel fake quantization.
`[min_vals, max_vals]` define the clamping range for the `inputs`.
The `inputs` are quantized into the quantization range:
- `[0, 2^num_bits - 1]` when `narrow_range=False`
- `[1, 2^num_bits - 1]` when `narrow_range=True`
After quantization, the values are dequantized and output as floats within
the `[min_vals, max_vals]` interval.
This operation supports gradient computation, allowing `min_vals` and
`max_vals` to be trained.
Args:
inputs: Input Keras tensor of float dtype.
min_vals: A global minimum scalar or a per-channel minimum tensor.
max_vals: A global maximum scalar or a per-channel maximum tensor.
num_bits: Quantization bit width (e.g., `8` for int8). Defaults to `8`.
narrow_range: Whether to use narrow quantization range. Defaults to
`False`.
axis: Axis along which to perform per-channel quantization. If `None`,
per-tensor quantization is performed. Defaults to `None`.
Returns:
Tensor: A Keras tensor with fake quantization applied.
"""
if any_symbolic_tensors((inputs,)):
return FakeQuantWithMinMaxVars().symbolic_call(
inputs, min_vals, max_vals
)
inputs = ops.convert_to_tensor(inputs)
min_vals = ops.convert_to_tensor(min_vals)
max_vals = ops.convert_to_tensor(max_vals)
num_bits = int(num_bits)
if axis is not None:
axis = canonicalize_axis(axis, inputs.ndim)
# Shortcut for TensorFlow backend by using `tf.quantization.fake_quant_*`
# apis. This is necessary to be recognizable for the TFLite converter.
if backend.backend() == "tensorflow":
import tensorflow as tf
# `tf.quantization.fake_quant_*` only supports float32.
dtype = backend.standardize_dtype(inputs.dtype)
if axis is None:
outputs = tf.quantization.fake_quant_with_min_max_vars(
ops.cast(inputs, "float32"),
ops.cast(ops.reshape(min_vals, ()), "float32"),
ops.cast(ops.reshape(max_vals, ()), "float32"),
num_bits=num_bits,
narrow_range=narrow_range,
)
return ops.cast(outputs, dtype=dtype)
else:
# `tf.quantization.fake_quant_with_min_max_vars_per_channel` only
# supports the last channel for the per-channel quantization. We
# use `ops.swapaxes` for the pre- and post-processing.
last_axis = inputs.ndim - 1
inputs = ops.swapaxes(inputs, axis, last_axis)
outputs = tf.quantization.fake_quant_with_min_max_vars_per_channel(
ops.cast(inputs, "float32"),
ops.cast(min_vals, "float32"),
ops.cast(max_vals, "float32"),
num_bits=num_bits,
narrow_range=narrow_range,
)
outputs = ops.cast(outputs, dtype=dtype)
return ops.swapaxes(outputs, last_axis, axis)
@ops.custom_gradient
def _fake_quant_with_min_max_vars_per_channel(x, min_val, max_val):
dtype = backend.standardize_dtype(x.dtype)
# Calculate quantization parameters for all channels at once
nudged_min, nudged_max, scale, inv_scale = adjust_and_nudge(
min_val, max_val, num_bits, narrow_range
)
quant_zero = ops.floor(
ops.add(ops.multiply(-nudged_min, inv_scale), 0.5)
)
x_clamped = ops.clip(
ops.cast(x, nudged_min.dtype), nudged_min, nudged_max
)
x_clamped_shifted = ops.subtract(x_clamped, nudged_min)
result = ops.multiply(
ops.floor(
ops.add(
ops.subtract(
ops.multiply(x_clamped_shifted, inv_scale), quant_zero
),
0.5,
)
),
scale,
)
result = ops.cast(result, dtype=dtype)
# Create gradient mask for all channels
masks = ops.logical_and(
ops.greater_equal(x, nudged_min), ops.less_equal(x, nudged_max)
)
def grad(*args, upstream=None):
if upstream is None:
(upstream,) = args
# Gradient for x
dx = ops.where(masks, upstream, 0.0)
axes = [i for i in range(len(dx.shape)) if i != axis]
# Gradient for min_val
# When x is clipped to min, the gradient flows to min_val
min_mask = ops.less_equal(x, nudged_min)
grad_min = ops.where(min_mask, upstream, 0.0)
if axis is not None:
grad_min = ops.sum(grad_min, axis=axes)
else:
grad_min = ops.sum(grad_min)
grad_min = ops.reshape(grad_min, ops.shape(min_val))
# Gradient for max_val
# When x is clipped to max, the gradient flows to max_val
max_mask = ops.greater_equal(x, nudged_max)
grad_max = ops.where(max_mask, upstream, 0.0)
if axis is not None:
grad_max = ops.sum(grad_max, axis=axes)
else:
grad_max = ops.sum(grad_max)
grad_max = ops.reshape(grad_max, ops.shape(max_val))
return dx, grad_min, grad_max
return result, grad
return _fake_quant_with_min_max_vars_per_channel(inputs, min_vals, max_vals)
"""Float8-related methods"""
@keras_export("keras.quantizers.compute_float8_scale")
def compute_float8_scale(amax, scale, dtype_max, margin=0):
# The algorithm for computing the new scale is sourced from
# https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/jax.html#transformer_engine.jax.update_fp8_metas
# wherein the `original_scale` corresponds to the reciprocal of the
# `scale` passed in this function.
scale = ops.reciprocal(scale)
sf = ops.divide(ops.divide(dtype_max, amax), 2**margin)
sf = ops.where(amax > 0.0, sf, scale)
sf = ops.where(ops.isfinite(amax), sf, scale)
return ops.reciprocal(sf)
@keras_export("keras.quantizers.compute_float8_amax_history")
def compute_float8_amax_history(x, amax_history):
amax_update = ops.cast(ops.max(ops.abs(x)), amax_history.dtype)
new_amax_history = ops.scatter_update(
ops.roll(amax_history, shift=-1),
[[0]],
ops.reshape(amax_update, [1]),
)
return new_amax_history
@keras_export("keras.quantizers.quantize_and_dequantize")
def quantize_and_dequantize(inputs, scale, quantized_dtype, compute_dtype):
# Quantize
quantized_dtype_max = ops.cast(
float(ml_dtypes.finfo(quantized_dtype).max), compute_dtype
)
x = ops.divide(inputs, ops.cast(scale, compute_dtype))
x = ops.clip(x, -quantized_dtype_max, quantized_dtype_max)
x = ops.cast(x, quantized_dtype)
# Dequantize
x = ops.multiply(ops.cast(x, compute_dtype), ops.cast(scale, compute_dtype))
return x
@keras_export("keras.quantizers.pack_int4")
def pack_int4(arr, axis=0, dtype="int8"):
"""Pack an int4 tensor into an int8 tensor with packed nibbles.
The input values must already be int8 in the signed range `[-8, 7]` and
represent the desired int4 values. Packing is performed along the specified
axis (default is 0).
For every two consecutive rows, the **low nibble** of the output byte
stores the value from the first row, and the **high nibble** stores
the value from the second row.
Args:
arr: An `int8` or `uint8` tensor containing int4 values in the range
`[-8, 7]`.
axis: The axis along which to pack the tensor. Defaults to 0.
dtype: The data type of the input and packed tensor. Can be
`"int8"` or `"uint8"`. Defaults to `"int8"`.
Returns:
tuple: A tuple `(packed, packed_shape, orig_rows)` where `packed` is
the packed int8 tensor with int4 values stored in nibbles,
`packed_shape` is the shape of the packed tensor, and `orig_rows`
is the original (unpacked) row count prior to any padding that may
have been inserted when an odd number of rows is supplied.
Example:
```python
>>> import numpy as np
>>> from keras.quantizers import pack_int4, unpack_int4
# Example with axis=0
# Original array has shape (3, 2)
>>> original_array = np.array([[-3, 7], [2, -8], [1, 0]], dtype=np.int8)
# Pack the array along axis 0. Since the length of axis 0 (3) is
# odd, it will be padded to a length of 4. The packed array will
# have a shape of (ceil(3/2), 2) = (2, 2).
>>> packed, packed_shape, orig_len = pack_int4(original_array, axis=0)
>>> print("Packed array:\n", packed)
Packed array:
[[ 45 -121]
[ 1 0]]
# Now, unpack the array back to its original form
>>> unpacked = unpack_int4(packed, orig_len, axis=0)
>>> print("Unpacked array:\n", unpacked)
Unpacked array:
[[-3 7]
[ 2 -8]
[ 1 0]]
>>> np.allclose(original_array, unpacked)
True
# Example with axis=1
# Original array has shape (2, 3)
>>> original_array = np.array([[-3, 7, 2], [-8, 1, 0]], dtype=np.int8)
# Pack along axis 1. Length of axis 1 (3) is padded to 4.
# The new shape is (2, ceil(3/2)) = (2, 2).
>>> packed, packed_shape, orig_len = pack_int4(original_array, axis=1)
>>> print("Packed array:\n", packed)
Packed array:
[[ 125 2]
[ 24 0]]
# Unpack the array
>>> unpacked = unpack_int4(packed, orig_len, axis=1)
>>> print("Unpacked array:\n", unpacked)
Unpacked array:
[[-3 7 2]
[-8 1 0]]
>>> np.allclose(original_array, unpacked)
True
```
"""
if dtype not in ("int8", "uint8"):
raise ValueError(
f"Expected dtype to be 'int8' or 'uint8', but got '{dtype}'."
)
if backend.standardize_dtype(arr.dtype) != dtype:
raise TypeError(
f"Expected {dtype} tensor for packing, got "
f"{backend.standardize_dtype(arr.dtype)}."
)
rank = getattr(arr.shape, "rank", None) or len(arr.shape)
if axis < 0:
axis += rank
# 1. Bring `axis` to the front.
perm = [axis] + [i for i in range(rank) if i != axis]
inv_perm = [perm.index(i) for i in range(rank)]
transposed = ops.transpose(arr, perm)
# 2. Pad to even length.
rows = ops.shape(transposed)[0]
needs_pad = ops.equal(ops.mod(rows, 2), 1)
# Always append one zero row so the tensor shape is static for JAX. If no
# padding is actually needed, we'll slice it away later.
zero_row = transposed[:1, ...] * 0 # same dtype/shape (1, ...)
padded_full = ops.concatenate([transposed, zero_row], axis=0)
# Number of valid rows after (possible) padding:
# rows + (1 if needs_pad else 0)
rows_packed = rows + ops.cast(needs_pad, "int32")
# Slice to keep only the valid rows. This keeps the shape rank static while
# allowing the row count to be dynamic.
padded = padded_full[:rows_packed, ...]
# 3-4. Group in pairs and pack.
low = padded[::2, ...]
high = padded[1::2, ...]
mask = ops.array(0x0F, dtype=dtype)
low_u = ops.bitwise_and(low, mask)
high_u = ops.bitwise_and(high, mask)
packed = ops.bitwise_or(low_u, ops.left_shift(high_u, 4))
packed = ops.cast(packed, dtype)
# 5-6. Restore shape.
packed = ops.transpose(packed, inv_perm) # back to original order
orig_len = rows # number of slices before padding
return packed, ops.shape(packed), orig_len
@keras_export("keras.quantizers.unpack_int4")
def unpack_int4(packed, orig_len, axis=0, dtype="int8"):
"""Unpack a packed int4 back to an int8 tensor in the range [-8, 7].
This function reverses the packing performed by `pack_int4`, restoring
the original int8 tensor (values in the range [-8, 7]) from a packed int8
tensor where each element contains two int4 values (one in the lower nibble,
one in the upper nibble).
The function restores the original axis order and removes any
padding that was added during packing.
Args:
packed: An int8 tensor containing packed int4 values along the
specified axis. Each int8 value encodes two int4 values.
orig_len: The original (unpadded) length of the axis that was
packed. This is used to remove any padding that may have
been added during packing to ensure an even number of rows.
axis: The axis along which the tensor was packed. Defaults to 0.
dtype: The data type of the input and unpacked tensor. Can be
`"int8"` or `"uint8"`. Defaults to `"int8"`.
Returns:
unpacked: An int8 tensor with the same shape as the original
(unpacked) tensor, with values in the range [-8, 7].
Example:
```python
>>> import numpy as np
>>> from keras.quantizers import pack_int4, unpack_int4
# Example with axis=0
# Original array has shape (3, 2)
>>> original_array = np.array([[-3, 7], [2, -8], [1, 0]], dtype=np.int8)
# Pack the array along axis 0. Since the length of axis 0 (3) is
# odd, it will be padded to a length of 4. The packed array will
# have a shape of (ceil(3/2), 2) = (2, 2).
>>> packed, packed_shape, orig_len = pack_int4(original_array, axis=0)
>>> print("Packed array:\n", packed)
Packed array:
[[ 45 -121]
[ 1 0]]
# Now, unpack the array back to its original form
>>> unpacked = unpack_int4(packed, orig_len, axis=0)
>>> print("Unpacked array:\n", unpacked)
Unpacked array:
[[-3 7]
[ 2 -8]
[ 1 0]]
>>> np.allclose(original_array, unpacked)
True
# Example with axis=1
# Original array has shape (2, 3)
>>> original_array = np.array([[-3, 7, 2], [-8, 1, 0]], dtype=np.int8)
# Pack along axis 1. Length of axis 1 (3) is padded to 4.
# The new shape is (2, ceil(3/2)) = (2, 2).
>>> packed, packed_shape, orig_len = pack_int4(original_array, axis=1)
>>> print("Packed array:\n", packed)
Packed array:
[[ 125 2]
[ 24 0]]
# Unpack the array
>>> unpacked = unpack_int4(packed, orig_len, axis=1)
>>> print("Unpacked array:\n", unpacked)
Unpacked array:
[[-3 7 2]
[-8 1 0]]
>>> np.allclose(original_array, unpacked)
True
```
"""
if dtype not in ("int8", "uint8"):
raise ValueError(
f"Expected dtype to be 'int8' or 'uint8', but got '{dtype}'."
)
if backend.standardize_dtype(packed.dtype) not in ("int8", "uint8"):
raise TypeError(
f"Expected int8 or uint8 tensor for unpacking, got {packed.dtype}"
)
def to_signed(x):
"""Converts unpacked nibbles [0, 15] to signed int4 [-8, 7]."""
dtype_x = backend.standardize_dtype(x.dtype)
eight = ops.cast(8, dtype_x)
sixteen = ops.cast(16, dtype_x)
return ops.where(x < eight, x, x - sixteen)
rank = getattr(packed.shape, "rank", None) or len(packed.shape)
if axis < 0:
axis += rank
# Fast path for the most common case in Dense layers
if axis == 0 and rank == 2:
# The result of the bitwise op is a wider dtype (e.g., int32).
mask = ops.array(0x0F, dtype=packed.dtype)
low_unpacked = ops.bitwise_and(packed, mask)
high_unpacked = ops.bitwise_and(ops.right_shift(packed, 4), mask)
if dtype == "int8":
low_unpacked = to_signed(low_unpacked)
high_unpacked = to_signed(high_unpacked)
low_final = ops.cast(low_unpacked, dtype)
high_final = ops.cast(high_unpacked, dtype)
# Interleave and reshape
stacked = ops.stack([low_final, high_final], axis=1)
unpacked = ops.reshape(stacked, (-1,) + tuple(ops.shape(packed)[1:]))
# Remove padding and return
return unpacked[:orig_len, ...]
# General case
perm = [axis] + [i for i in range(rank) if i != axis]
inv_perm = [perm.index(i) for i in range(rank)]
transposed = ops.transpose(packed, perm)
# 1. Split nibbles.
mask = ops.array(0x0F, dtype=packed.dtype)
low = ops.bitwise_and(transposed, mask)
high = ops.bitwise_and(ops.right_shift(transposed, 4), mask)
# 2. Conditionally convert to signed.
if dtype == "int8":
low = to_signed(low)
high = to_signed(high)
low = ops.cast(low, dtype)
high = ops.cast(high, dtype)
# 3. Interleave and reshape.
stacked = ops.stack([low, high], axis=1)
unpacked = ops.reshape(stacked, (-1,) + tuple(ops.shape(transposed)[1:]))
# 4. Remove padding and restore original layout.
unpacked = unpacked[:orig_len, ...]
unpacked = ops.transpose(unpacked, inv_perm)
return unpacked
class GPTQQuantizer(Quantizer):
"""A class that handles the quantization of weights using GPTQ method.
This class provides methods to find quantization parameters (scale and zero)
for a given tensor and can be used to quantize weights in a GPTQ context.
Args:
weight_bits: (int) The number of bits to quantize to (e.g., 4).
per_channel: (bool) A flag indicating whether quantization is
applied per-channel (`True`) or per-tensor (`False`).
Defaults to `False`.
symmetric: (bool) A flag indicating whether symmetric (`True`) or
asymmetric (`False`) quantization is used. Defaults to `False`.
group_size: (int) The size of weight groups for quantization. A
value of -1 indicates that grouping is not used.
Defaults to -1.
"""
def __init__(
self,
config=GPTQConfig(tokenizer=None, dataset=None),
compute_dtype="float32",
):
Quantizer.__init__(self)
self.weight_bits = config.weight_bits
self.per_channel = config.per_channel
self.symmetric = config.symmetric
self.group_size = config.group_size
self.compute_dtype = compute_dtype
# These are now determined later by `find_params`
self.scale = None
self.zero = None
self.maxq = None
def find_params(self, input_tensor, weight=True):
"""Finds quantization parameters (scale and zero) for a given tensor."""
self.scale, self.zero, self.maxq = compute_quantization_parameters(
input_tensor,
bits=self.weight_bits,
symmetric=self.symmetric,
per_channel=self.per_channel,
group_size=self.group_size,
weight=weight,
compute_dtype=self.compute_dtype,
)
return self.scale, self.zero, self.maxq
def get_config(self):
config = super().get_config()
config.update(
{
"weight_bits": self.weight_bits,
"per_channel": self.per_channel,
"symmetric": self.symmetric,
"group_size": self.group_size,
}
)
return config
@classmethod
def from_config(cls, config):
gptq = GPTQConfig(
tokenizer=None,
dataset=None,
weight_bits=config["weight_bits"],
per_channel=config["per_channel"],
symmetric=config["symmetric"],
group_size=config["group_size"],
)
return cls(gptq)
def compute_quantization_parameters(
x,
*,
bits,
symmetric=False,
per_channel=False,
group_size=-1,
weight=False,
compute_dtype="float32",
):
"""
Computes the scale and zero-point for quantization.
This function calculates the scale and zero-point required for quantizing
a given tensor `x` based on the specified parameters. It supports grouped,
per-channel, per-tensor, symmetric, and asymmetric quantization - along
with any combinations of these.
Args:
x: KerasTensor. The input tensor to quantize.
bits: int. The number of bits to quantize to (e.g., 4).
symmetric: bool. Whether to use symmetric quantization.
per_channel: bool. Whether to quantize per channel.
group_size: int. The group size for quantization.
weight: bool. Whether the input tensor is a weight tensor.
Returns:
scale: KerasTensor. The scale tensor for quantization.
zero: KerasTensor. The zero tensor for quantization.
maxq: scalar. The maximum quantization value.
"""
if x is None:
raise ValueError(f"Input tensor {x} cannot be None.")
# For weights, we typically expect at least a 2D tensor.
if weight and len(x.shape) < 2:
raise ValueError(
f"Input weight tensor {x} must have a rank of at "
f"least 2, but got rank {len(x.shape)}."
)
if ops.size(x) == 0:
raise ValueError("Input tensor 'x' cannot be empty.")
original_shape = x.shape
if per_channel:
if weight:
if group_size != -1:
input_reshaped = ops.reshape(x, [-1, group_size])
else:
input_reshaped = ops.reshape(x, [original_shape[0], -1])
else: # per-tensor
input_reshaped = ops.reshape(x, [1, -1])
# Find min/max values
min_values = ops.min(input_reshaped, axis=1)
max_values = ops.max(input_reshaped, axis=1)
# Apply symmetric quantization logic if enabled
if symmetric:
max_values = ops.maximum(ops.abs(min_values), max_values)
min_values = ops.where(
ops.less(min_values, 0), ops.negative(max_values), min_values
)
# Ensure range is not zero to avoid division errors
zero_range = ops.equal(min_values, max_values)
min_values = ops.where(zero_range, ops.subtract(min_values, 1), min_values)
max_values = ops.where(zero_range, ops.add(max_values, 1), max_values)
maxq = ops.cast(ops.subtract(ops.power(2, bits), 1), compute_dtype)
# Calculate scale and zero-point
scale = ops.divide(ops.subtract(max_values, min_values), maxq)
if symmetric:
zero = ops.full_like(scale, ops.divide(ops.add(maxq, 1), 2))
else:
zero = ops.round(ops.divide(ops.negative(min_values), scale))
# Ensure scale is non-zero
scale = ops.where(ops.less_equal(scale, 0), 1e-8, scale)
if weight:
# Per-channel, non-grouped case: simple reshape is correct.
if per_channel and group_size == -1:
scale = ops.reshape(scale, [-1, 1])
zero = ops.reshape(zero, [-1, 1])
elif not per_channel:
num_rows = original_shape[0]
scale = ops.tile(ops.reshape(scale, (1, 1)), (num_rows, 1))
zero = ops.tile(ops.reshape(zero, (1, 1)), (num_rows, 1))
if per_channel:
scale = ops.reshape(scale, [-1, 1])
zero = ops.reshape(zero, [-1, 1])
zero = ops.cast(zero, "uint8")
return scale, zero, maxq
def quantize_with_zero_point(input_tensor, scale, zero, maxq):
"""Quantize a float tensor into discrete levels [0, maxq] using
per-tensor/per-channel/grouped scaling.
Returns `q` (same dtype as inputs/scales; float is fine) where values are in
[0, maxq].
Args:
input_tensor: KerasTensor. The input tensor to quantize.
scale: KerasTensor. The scale tensor for quantization.
zero: KerasTensor. The zero tensor for quantization.
maxq: KerasTensor. The maximum quantization value.
Returns:
KerasTensor. The quantized tensor.
"""
# Guard against divide-by-zero
epsilon = ops.cast(1e-8, dtype=scale.dtype)
safe_scale = ops.where(ops.equal(scale, 0), epsilon, scale)
quantized_tensor = ops.round(
ops.add(
ops.divide(input_tensor, safe_scale), ops.cast(zero, scale.dtype)
)
)
quantized_tensor = ops.clip(quantized_tensor, 0, maxq)
return quantized_tensor
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/quantizers/quantization_config_test.py | keras/src/quantizers/quantization_config_test.py | import os
from keras.src import layers
from keras.src import models
from keras.src import saving
from keras.src import testing
from keras.src.quantizers.quantization_config import Int4QuantizationConfig
from keras.src.quantizers.quantization_config import Int8QuantizationConfig
from keras.src.quantizers.quantization_config import QuantizationConfig
from keras.src.quantizers.quantization_config import validate_and_resolve_config
from keras.src.quantizers.quantizers import AbsMaxQuantizer
class QuantizationConfigTest(testing.TestCase):
def test_base_quantization_config(self):
config = QuantizationConfig()
with self.assertRaises(NotImplementedError):
_ = config.mode
def test_int8_quantization_config_valid(self):
config = Int8QuantizationConfig()
self.assertEqual(config.mode, "int8")
self.assertIsNone(config.weight_quantizer)
# Valid weight quantizer
q = AbsMaxQuantizer(axis=0, value_range=(-127, 127))
config = Int8QuantizationConfig(weight_quantizer=q)
self.assertEqual(config.weight_quantizer, q)
def test_int8_quantization_config_invalid(self):
# Invalid value_range
with self.assertRaisesRegex(ValueError, "value_range"):
AbsMaxQuantizer(axis=0, value_range=(-256, 256))
def test_int4_quantization_config_valid(self):
config = Int4QuantizationConfig()
self.assertEqual(config.mode, "int4")
self.assertIsNone(config.weight_quantizer)
# Valid weight quantizer
q = AbsMaxQuantizer(axis=0, value_range=(-8, 7))
config = Int4QuantizationConfig(weight_quantizer=q)
self.assertEqual(config.weight_quantizer, q)
def test_int4_quantization_config_invalid(self):
# Invalid value_range
q = AbsMaxQuantizer(axis=0, value_range=(-127, 127))
with self.assertRaisesRegex(ValueError, "value_range"):
Int4QuantizationConfig(weight_quantizer=q)
def test_quantization_config_serialization(self):
config = Int8QuantizationConfig(
weight_quantizer=AbsMaxQuantizer(axis=0),
activation_quantizer=AbsMaxQuantizer(axis=-1),
)
serialized = config.get_config()
deserialized = Int8QuantizationConfig.from_config(serialized)
self.assertIsInstance(deserialized, Int8QuantizationConfig)
self.assertIsInstance(deserialized.weight_quantizer, AbsMaxQuantizer)
self.assertIsInstance(
deserialized.activation_quantizer, AbsMaxQuantizer
)
self.assertEqual(deserialized.weight_quantizer.axis, (0,))
self.assertEqual(deserialized.activation_quantizer.axis, (-1,))
def test_validate_and_resolve_config(self):
# 1. String mode
config = validate_and_resolve_config("int8", None)
self.assertIsInstance(config, Int8QuantizationConfig)
self.assertEqual(config.mode, "int8")
config = validate_and_resolve_config("int4", None)
self.assertIsInstance(config, Int4QuantizationConfig)
self.assertEqual(config.mode, "int4")
# 2. Config object
config_in = Int8QuantizationConfig()
config_out = validate_and_resolve_config(None, config_in)
self.assertIs(config_out, config_in)
# 3. Mode + Config (matching)
config_in = Int8QuantizationConfig()
config_out = validate_and_resolve_config("int8", config_in)
self.assertIs(config_out, config_in)
# 4. Mode + Config (mismatch)
config_in = Int8QuantizationConfig()
with self.assertRaisesRegex(ValueError, "Contradictory arguments"):
validate_and_resolve_config("int4", config_in)
# 5. Invalid mode
with self.assertRaisesRegex(ValueError, "Invalid quantization mode"):
validate_and_resolve_config("invalid_mode", None)
# 6. GPTQ without config
with self.assertRaisesRegex(ValueError, "must pass a `GPTQConfig`"):
validate_and_resolve_config("gptq", None)
# 7. Contradictory config
with self.assertRaisesRegex(ValueError, "Contradictory arguments"):
validate_and_resolve_config("gptq", Int8QuantizationConfig())
# 8. GPTQ with invalid config type (but correct mode)
class FakeGPTQConfig(QuantizationConfig):
@property
def mode(self):
return "gptq"
with self.assertRaisesRegex(ValueError, "requires a valid `config`"):
validate_and_resolve_config("gptq", FakeGPTQConfig())
def test_int8_quantization_config_output_dtype_mismatch(self):
# Invalid output_dtype
q = AbsMaxQuantizer(
axis=0, value_range=(-127, 127), output_dtype="int16"
)
with self.assertRaisesRegex(ValueError, "output_dtype='int8'"):
Int8QuantizationConfig(weight_quantizer=q)
def test_int4_quantization_config_output_dtype_mismatch(self):
# Invalid output_dtype
q = AbsMaxQuantizer(axis=0, value_range=(-8, 7), output_dtype="int16")
with self.assertRaisesRegex(ValueError, "output_dtype='int8'"):
Int4QuantizationConfig(weight_quantizer=q)
def test_model_save_and_load(self):
"""
Test custom quantizer serialization for model save and load.
"""
# Setup
weight_range = (-100, 100)
custom_quantizer = AbsMaxQuantizer(axis=0, value_range=weight_range)
config = Int8QuantizationConfig(
weight_quantizer=custom_quantizer,
activation_quantizer=None,
)
layer = layers.Dense(10)
layer.build((None, 5))
layer.quantize("int8", config=config)
model = models.Sequential([layer])
model.build((None, 5))
# Save to temp file
filepath = os.path.join(self.get_temp_dir(), "quantized_model.keras")
model.save(filepath)
# Load back
loaded_model = saving.load_model(filepath)
# Verify
loaded_layer = loaded_model.layers[0]
self.assertIsInstance(
loaded_layer.quantization_config, Int8QuantizationConfig
)
quantizer = loaded_layer.quantization_config.weight_quantizer
self.assertIsInstance(quantizer, AbsMaxQuantizer)
self.assertEqual(quantizer.axis, (0,))
self.assertAllEqual(quantizer.value_range, weight_range)
self.assertIsNone(loaded_layer.quantization_config.activation_quantizer)
self.assertTrue(loaded_layer._is_quantized)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/quantizers/gptq_core_test.py | keras/src/quantizers/gptq_core_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src.quantizers.gptq_config import GPTQConfig
from keras.src.quantizers.gptq_core import get_dataloader
from keras.src.quantizers.gptq_core import gptq_quantize
VOCAB_SIZE = 100
class MockTokenizer:
"""A mock tokenizer that mimics the real API for testing."""
def tokenize(self, text):
return [ord(c) % VOCAB_SIZE for c in "".join(text)]
def __call__(self, text):
return self.tokenize(text)
class EmptyBlock(layers.Layer):
"""A block that contains no quantizable layers."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.ln = layers.LayerNormalization()
def call(self, inputs):
return self.ln(inputs)
class TransformerBlock(layers.Layer):
"""A toy transformer block with a quantizable Dense layer."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dense = layers.Dense(128)
def call(self, inputs):
return self.dense(inputs)
def _get_model_with_backbone(
has_transformer_layers=True, embedding_name="embedding"
):
"""Creates a KerasHub-style model with a backbone."""
class Backbone(layers.Layer):
def __init__(self, vocab_size, embedding_dim=128, **kwargs):
super().__init__(**kwargs)
# Use direct assignment
setattr(
self,
embedding_name,
layers.Embedding(vocab_size, embedding_dim),
)
# Keep track of layers in a list for the call method
self.transformer_layers = []
if has_transformer_layers:
self.transformer_layers.append(TransformerBlock())
def call(self, inputs):
x = getattr(self, embedding_name)(inputs)
for layer in self.transformer_layers:
x = layer(x)
return x
class Model(models.Model):
def __init__(self, vocab_size, **kwargs):
super().__init__(**kwargs)
# Pass configuration directly
self.backbone = Backbone(vocab_size=vocab_size)
self.classifier = layers.Dense(1, activation="sigmoid")
def call(self, inputs):
x = self.backbone(inputs)
x = layers.GlobalAveragePooling1D()(x)
return self.classifier(x)
model = Model(vocab_size=VOCAB_SIZE)
rng = np.random.default_rng(seed=42)
dummy_input = rng.normal(loc=0, scale=1, size=(2, 64)).astype(np.float32)
_ = model(dummy_input)
return model
def build_all_tokens_strings(dataset, tokenizer, eos_id=None):
pieces = []
for i, s in enumerate(dataset):
toks = np.asarray(tokenizer.tokenize(s), dtype=np.int32).reshape(-1)
pieces.append(toks)
if eos_id is not None and i < len(dataset) - 1:
pieces.append(np.array([eos_id], dtype=np.int32))
return np.concatenate(pieces, axis=0).astype(np.int32, copy=False)
def sliding_windows(x, L):
return np.lib.stride_tricks.sliding_window_view(x, L)
@pytest.mark.requires_trainable_backend
class TestGPTQCore(testing.TestCase):
@parameterized.named_parameters(
[("strided", "strided"), ("linspace", "linspace"), ("random", "random")]
)
def test_shape_and_dtype_strings(self, strategy):
"""Test the shape and dtype of the output for string inputs."""
tok = MockTokenizer()
dataset = ["a b c d e f g", "h i j k"]
seq_len, n = 5, 7
out = get_dataloader(
tok, seq_len, dataset, num_samples=n, strategy=strategy, seed=123
)
self.assertEqual(out.shape, (n, 1, seq_len))
self.assertEqual(out.dtype, np.int32)
@parameterized.named_parameters(
[("strided", "strided"), ("linspace", "linspace"), ("random", "random")]
)
def test_shape_and_dtype_pretokenized(self, strategy):
"""Test the shape and dtype of the output for pre-tokenized inputs."""
tok = MockTokenizer()
# Pre-tokenized inputs; mixed shapes (1, L) and (L,)
seqs = [
np.array([[1, 2, 3, 4]], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
]
tok = MockTokenizer()
seq_len, n = 3, 4
out = get_dataloader(
tok, seq_len, seqs, num_samples=n, strategy=strategy, seed=7
)
self.assertEqual(out.shape, (n, 1, seq_len))
self.assertEqual(out.dtype, np.int32)
def test_strided_is_deterministic_for_same_args(self):
tok = MockTokenizer()
dataset = ["a b c d e", "f g h i j k"]
out1 = get_dataloader(
tok, 4, dataset, num_samples=6, strategy="strided", seed=99
)
out2 = get_dataloader(
tok, 4, dataset, num_samples=6, strategy="strided", seed=99
)
self.assertTrue(ops.all(ops.equal(out1, out2)))
def test_random_reproducibility_by_seed(self):
tok = MockTokenizer()
dataset = ["a b c d e", "f g h i j k"]
a = get_dataloader(
tok, 4, dataset, num_samples=6, strategy="random", seed=123
)
b = get_dataloader(
tok, 4, dataset, num_samples=6, strategy="random", seed=123
)
c = get_dataloader(
tok, 4, dataset, num_samples=6, strategy="random", seed=124
)
self.assertTrue(ops.all(ops.equal(a, b)))
self.assertFalse(ops.all(ops.equal(a, c)))
def test_linspace_windows_match_expected(self):
tok = MockTokenizer()
dataset = ["aa bb cc dd", "ee ff gg"]
seq_len, n = 3, 5
eos_id = None
all_tokens = build_all_tokens_strings(dataset, tok, eos_id=eos_id)
max_start = all_tokens.size - seq_len
expected_starts = np.linspace(0, max_start, n, dtype=np.int64)
expected = sliding_windows(all_tokens, seq_len)[expected_starts]
got = get_dataloader(
tok, seq_len, dataset, num_samples=n, strategy="linspace"
)
self.assertTrue(
ops.all(ops.equal(got[:, 0, :], expected.astype(np.int32)))
)
def test_strided_override_respected(self):
"""Tests that strided windows are disjoint and cover the input."""
tok = MockTokenizer()
# 20 tokens total
# with seq_len=4 and stride=4, we expect disjoint chunks
# in order (modulo offset)
dataset = [" ".join([f"t{i}" for i in range(20)])]
seq_len, n, stride = 4, 5, 4
out = get_dataloader(
tok,
seq_len,
dataset,
num_samples=n,
strategy="strided",
stride=stride,
seed=0,
)
# Validate that each sample is a contiguous run
# of length seq_len from the flattened stream
flat = build_all_tokens_strings(dataset, tok)
for s in out[:, 0, :]:
# Each window should appear as a slice in the flat stream
# (This is a soft check; exact start positions depend on offset.)
joined = " ".join(map(str, s.tolist()))
self.assertIn(joined, " ".join(map(str, flat.tolist())))
def test_eos_insertion_is_present_in_some_window_with_linspace(self):
tok = MockTokenizer()
dataset = ["aa aa", "bb bb"] # len = 5 + 1(EOS) + 5 = 11
eos = 9999
seq_len = 3
n = 3
out = get_dataloader(
tok,
seq_len,
dataset,
num_samples=n,
strategy="linspace",
eos_id=eos,
)
# linspace starts -> [0, 4, 8]; the middle window [4:7]
# includes EOS at 5
windows = out[:, 0, :]
self.assertTrue(
np.any(np.any(windows == eos, axis=1)),
"Expected EOS to appear in at least one sampled window with "
"linspace.",
)
def test_get_dataloader_error_scenarios(self):
"""Tests error cases for get_dataloader."""
with pytest.raises(ValueError, match="Provided dataset is empty"):
get_dataloader(
tokenizer=MockTokenizer(),
sequence_length=10,
dataset=[],
num_samples=10,
)
with self.assertRaisesRegex(
TypeError,
"The `dataset` argument must be an iterable.*Got type: str.*"
"Please pass the loaded dataset directly.",
):
get_dataloader(
tokenizer=MockTokenizer(),
sequence_length=10,
dataset="wikitext2",
num_samples=10,
)
def test_apply_gptq_on_multi_block_model(self):
"""Tests quantization on a model with multiple blocks."""
model = models.Sequential(
[
layers.Embedding(VOCAB_SIZE, 128),
TransformerBlock(),
TransformerBlock(),
]
)
model.build(input_shape=(None, 10))
layer_structure = {
"pre_block_layers": [model.layers[0]],
"sequential_blocks": [model.layers[1], model.layers[2]],
}
config = GPTQConfig(
dataset=["test data"],
tokenizer=MockTokenizer(),
group_size=32,
quantization_layer_structure=layer_structure,
)
model.quantize("gptq", config=config)
@parameterized.named_parameters(
(
"no_embedding_layer",
models.Sequential([layers.Dense(10)]),
"For 'gptq' mode, a valid quantization structure must be provided",
),
(
"no_transformer_blocks",
models.Sequential(
[layers.Embedding(VOCAB_SIZE, 10), layers.Dense(10)]
),
"For 'gptq' mode, a valid quantization structure must be provided",
),
(
"backbone_no_layers",
_get_model_with_backbone(has_transformer_layers=False),
"For 'gptq' mode, a valid quantization structure must be provided",
),
(
"backbone_no_embedding",
_get_model_with_backbone(embedding_name="wrong_name"),
"For 'gptq' mode, a valid quantization structure must be provided",
),
)
def test_apply_gptq_with_unsupported_architectures(
self, model, error_message
):
"""Tests that quantize fails correctly for various unsupported
model architectures."""
if not model.built:
model.build(input_shape=(None, 10))
config = GPTQConfig(dataset=["test"], tokenizer=MockTokenizer())
with self.assertRaisesRegex(ValueError, error_message):
# We pass None as structure to trigger the error
gptq_quantize(config, quantization_layer_structure=None)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/quantizers/quantizers_test.py | keras/src/quantizers/quantizers_test.py | import numpy as np
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import ops
from keras.src import quantizers
from keras.src import random
from keras.src import testing
from keras.src.quantizers.quantizers import compute_quantization_parameters
from keras.src.quantizers.quantizers import dequantize_with_sz_map
from keras.src.quantizers.quantizers import dequantize_with_zero_point
from keras.src.quantizers.quantizers import quantize_with_sz_map
from keras.src.quantizers.quantizers import quantize_with_zero_point
from keras.src.testing.test_utils import named_product
class QuantizersTest(testing.TestCase):
def test_get_method(self):
quantizer = quantizers.get("abs_max_quantizer")
self.assertTrue(quantizer, quantizers.AbsMaxQuantizer)
quantizer = quantizers.get(None)
self.assertEqual(quantizer, None)
with self.assertRaises(ValueError):
quantizers.get("typo")
def test_abs_max_quantizer(self):
values = random.uniform([3, 4, 5], minval=-1, maxval=1, dtype="float32")
quantizer = quantizers.AbsMaxQuantizer()
# Test quantizing
quantized_values, scale = quantizer(values, axis=-1)
self.assertDType(quantized_values, "int8")
self.assertDType(scale, "float32")
self.assertEqual(tuple(quantized_values.shape), (3, 4, 5))
self.assertEqual(tuple(scale.shape), (3, 4, 1))
self.assertLessEqual(ops.max(quantized_values), 127)
self.assertGreaterEqual(ops.min(quantized_values), -127)
# Test dequantizing
dequantized_values = ops.divide(quantized_values, scale)
rmse = ops.sqrt(
ops.mean(ops.square(ops.subtract(values, dequantized_values)))
)
self.assertLess(rmse, 1e-1) # loose assertion
# Test serialization
self.run_class_serialization_test(quantizer)
# Test bfloat16 & float16 dtype
values = random.uniform(
[3, 4, 5], minval=-1, maxval=1, dtype="bfloat16"
)
quantized_values, scale = quantizer(values, axis=-1)
self.assertDType(quantized_values, "int8")
self.assertDType(scale, "bfloat16")
values = random.uniform([3, 4, 5], minval=-1, maxval=1, dtype="float16")
quantized_values, scale = quantizer(values, axis=-1)
self.assertDType(quantized_values, "int8")
self.assertDType(scale, "float16")
def test_abs_max_quantizer_to_numpy(self):
values = random.uniform([3, 4, 5], minval=-1, maxval=1, dtype="float32")
quantized_values, scale = quantizers.abs_max_quantize(
values, axis=-1, to_numpy=True
)
ref_quantized_values, ref_scale = quantizers.abs_max_quantize(
values, axis=-1
)
self.assertAllClose(quantized_values, ref_quantized_values)
self.assertAllClose(scale, ref_scale)
def test_compute_float8_scale(self):
amax = 3.0
scale = 4.0
dtype_max = 448.0 # float8_e4m3fn
# The algorithm for computing the new scale is sourced from
# https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/jax.html#transformer_engine.jax.update_fp8_metas
expected_scale = 1.0 / (dtype_max / amax) / (2**0)
computed_scale = quantizers.compute_float8_scale(amax, scale, dtype_max)
self.assertAllClose(computed_scale, expected_scale)
def test_compute_float8_amax_history(self):
values = random.uniform([3, 4, 5], minval=-1, maxval=1)
amax_history = random.uniform([123])
amax_from_values = ops.max(ops.abs(values))
computed_amax_history = quantizers.compute_float8_amax_history(
values, amax_history
)
self.assertAllClose(computed_amax_history[0], amax_from_values)
# Shift to left with 1 step
self.assertAllClose(
computed_amax_history[1:], ops.roll(amax_history, -1)[1:]
)
def test_quantize_and_dequantize(self):
scale = 1.0 / 100.0
values = random.uniform([3, 4, 5], minval=-1, maxval=1)
qdq_values = quantizers.quantize_and_dequantize(
values, scale, "float8_e4m3fn", "float32"
)
# A loose assertion due to an expected quantization error
self.assertAllClose(qdq_values, values, atol=1e-1)
qdq_values = quantizers.quantize_and_dequantize(
values, scale, "float8_e5m2", "float32"
)
# A loose assertion due to an expected quantization error
self.assertAllClose(qdq_values, values, atol=5e-1)
SHAPE_AXIS_SCENARIOS = [
# 1. 2D Tensors
# Covers the unpack fast path (rank=2, axis=0) for both parities
{"testcase_name": "2d_axis0_odd", "shape": (5, 8), "axis": 0},
{"testcase_name": "2d_axis0_even", "shape": (4, 8), "axis": 0},
# Covers the general path and a negative axis for 2D tensors
{"testcase_name": "2d_axis1_odd", "shape": (8, 7), "axis": 1},
{"testcase_name": "2d_axis_neg1_even", "shape": (8, 6), "axis": -1},
# 2. Higher-Rank Tensors
# Covers a middle axis for a complex shape with both parities
{"testcase_name": "4d_axis1_odd", "shape": (2, 5, 4, 6), "axis": 1},
{"testcase_name": "4d_axis2_even", "shape": (2, 4, 8, 6), "axis": 2},
# Covers the last axis of a complex shape with a negative index
{
"testcase_name": "4d_axis_neg1_odd",
"shape": (2, 4, 6, 7),
"axis": -1,
},
]
DTYPE_PARAMS = [
{"testcase_name": "int8", "dtype": "int8", "minval": -8, "maxval": 8},
{"testcase_name": "uint8", "dtype": "uint8", "minval": 0, "maxval": 16},
]
@parameterized.named_parameters(
named_product(SHAPE_AXIS_SCENARIOS, DTYPE_PARAMS)
)
def test_pack_unpack_int4(self, shape, axis, dtype, minval, maxval):
# Create a random tensor with int4 values in the specified range and
# dtype
arr = ops.cast(
ops.floor(random.uniform(shape, minval=minval, maxval=maxval)),
dtype,
)
# Pack the tensor using the specified dtype
packed, packed_shape, orig_len = quantizers.pack_int4(
arr, axis=axis, dtype=dtype
)
# Unpack the tensor using the specified dtype
unpacked = quantizers.unpack_int4(
packed, orig_len, axis=axis, dtype=dtype
)
# Verify that the packed tensor has the correct dtype
self.assertDType(packed, dtype)
# Verify that the unpacked tensor has the correct dtype
self.assertDType(unpacked, dtype)
# The unpacked tensor should be the same as the original tensor
self.assertAllClose(unpacked, arr)
# Test the packed shape
expected_packed_shape = list(shape)
expected_packed_shape[axis] = (expected_packed_shape[axis] + 1) // 2
self.assertEqual(
list(ops.convert_to_numpy(packed_shape)), expected_packed_shape
)
@parameterized.named_parameters(
("per_tensor", None),
("per_channel", -1),
)
def test_fake_quant_with_min_max_vars_symbolic(self, axis):
x = backend.KerasTensor((2, 3, 4))
y = quantizers.fake_quant_with_min_max_vars(x, -3.0, 3.0, axis=axis)
self.assertIsInstance(y, backend.KerasTensor)
self.assertEqual(y.shape, (2, 3, 4))
@parameterized.named_parameters(
[
{
"testcase_name": "wide_8bits_input_mins_0.0_input_maxs_255.0",
"narrow_range": False,
"input_mins": [0.0],
"input_maxs": [255.0],
"num_bits": 8,
"expected_nudged_input_mins": [0.0],
"expected_nudged_input_maxs": [255.0],
"expected_steps": [1.0],
"axis": None,
},
{
"testcase_name": (
"wide_8bits_scalar_input_mins_0.0_input_maxs_255.0"
),
"narrow_range": False,
"input_mins": 0.0,
"input_maxs": 255.0,
"num_bits": 8,
"expected_nudged_input_mins": [0.0],
"expected_nudged_input_maxs": [255.0],
"expected_steps": [1.0],
"axis": None,
},
{
"testcase_name": "wide_8bits_input_mins_0.5_input_maxs_128.0",
"narrow_range": False,
"input_mins": [0.5],
"input_maxs": [128.0],
"num_bits": 8,
"expected_nudged_input_mins": [0.0],
"expected_nudged_input_maxs": [127.5],
"expected_steps": [0.5],
"axis": None,
},
{
"testcase_name": "wide_8bits_input_mins_-128.0_input_maxs_-0.5",
"narrow_range": False,
"input_mins": [-128.0],
"input_maxs": [-0.5],
"num_bits": 8,
"expected_nudged_input_mins": [-127.5],
"expected_nudged_input_maxs": [0.0],
"expected_steps": [0.5],
"axis": None,
},
{
"testcase_name": "wide_8bits_input_mins_-0.1_input_maxs_127.4",
"narrow_range": False,
"input_mins": [-0.1],
"input_maxs": [127.4],
"num_bits": 8,
"expected_nudged_input_mins": [0.0],
"expected_nudged_input_maxs": [127.5],
"expected_steps": [0.5],
"axis": None,
},
{
"testcase_name": "narrow_8bits_input_mins_0.0_input_maxs_254.0",
"narrow_range": True,
"input_mins": [0.0],
"input_maxs": [254.0],
"num_bits": 8,
"expected_nudged_input_mins": [0.0],
"expected_nudged_input_maxs": [254.0],
"expected_steps": [1.0],
"axis": None,
},
{
"testcase_name": "narrow_8bits_input_mins_0.1_input_maxs_127.1",
"narrow_range": True,
"input_mins": [0.1],
"input_maxs": [127.1],
"num_bits": 8,
"expected_nudged_input_mins": [0.0],
"expected_nudged_input_maxs": [127.0],
"expected_steps": [0.5],
"axis": None,
},
{
"testcase_name": (
"narrow_8bits_input_mins_-127.1_input_maxs_-0.1"
),
"narrow_range": True,
"input_mins": [-127.1],
"input_maxs": [-0.1],
"num_bits": 8,
"expected_nudged_input_mins": [-127.0],
"expected_nudged_input_maxs": [0.0],
"expected_steps": [0.5],
"axis": None,
},
{
"testcase_name": (
"narrow_8bits_input_mins_-0.1_input_maxs_126.9"
),
"narrow_range": True,
"input_mins": [-0.1],
"input_maxs": [126.9],
"num_bits": 8,
"expected_nudged_input_mins": [0.0],
"expected_nudged_input_maxs": [127.0],
"expected_steps": [0.5],
"axis": None,
},
{
"testcase_name": "wide_7bits_input_mins_0.0_input_maxs_127.0",
"narrow_range": False,
"input_mins": [0.0],
"input_maxs": [127.0],
"num_bits": 7,
"expected_nudged_input_mins": [0.0],
"expected_nudged_input_maxs": [127.0],
"expected_steps": [1.0],
"axis": None,
},
{
"testcase_name": "wide_7bits_input_mins_0.5_input_maxs_64.0",
"narrow_range": False,
"input_mins": [0.5],
"input_maxs": [64.0],
"num_bits": 7,
"expected_nudged_input_mins": [0.0],
"expected_nudged_input_maxs": [63.5],
"expected_steps": [0.5],
"axis": None,
},
{
"testcase_name": "wide_7bits_input_mins_-64.0_input_maxs_-0.5",
"narrow_range": False,
"input_mins": [-64.0],
"input_maxs": [-0.5],
"num_bits": 7,
"expected_nudged_input_mins": [-63.5],
"expected_nudged_input_maxs": [0.0],
"expected_steps": [0.5],
"axis": None,
},
{
"testcase_name": "wide_7bits_input_mins_-0.1_input_maxs_63.4",
"narrow_range": False,
"input_mins": [-0.1],
"input_maxs": [63.4],
"num_bits": 7,
"expected_nudged_input_mins": [0.0],
"expected_nudged_input_maxs": [63.5],
"expected_steps": [0.5],
"axis": None,
},
{
"testcase_name": "narrow_7bits_input_mins_0.0_input_maxs_126.0",
"narrow_range": True,
"input_mins": [0.0],
"input_maxs": [126.0],
"num_bits": 7,
"expected_nudged_input_mins": [0.0],
"expected_nudged_input_maxs": [126.0],
"expected_steps": [1.0],
"axis": None,
},
{
"testcase_name": "narrow_7bits_input_mins_0.1_input_maxs_63.1",
"narrow_range": True,
"input_mins": [0.1],
"input_maxs": [63.1],
"num_bits": 7,
"expected_nudged_input_mins": [0.0],
"expected_nudged_input_maxs": [63.0],
"expected_steps": [0.5],
"axis": None,
},
{
"testcase_name": (
"narrow_7bits_input_mins_-63.1_input_maxs_-0.1"
),
"narrow_range": True,
"input_mins": [-63.1],
"input_maxs": [-0.1],
"num_bits": 7,
"expected_nudged_input_mins": [-63.0],
"expected_nudged_input_maxs": [0.0],
"expected_steps": [0.5],
"axis": None,
},
{
"testcase_name": "narrow_7bits_input_mins_-0.1_input_maxs_62.9",
"narrow_range": True,
"input_mins": [-0.1],
"input_maxs": [62.9],
"num_bits": 7,
"expected_nudged_input_mins": [0.0],
"expected_nudged_input_maxs": [63.0],
"expected_steps": [0.5],
"axis": None,
},
{
"testcase_name": "wide_8bits_multi_channel",
"narrow_range": False,
"input_mins": [0.0, 0.5, -128.0, -0.1],
"input_maxs": [255.0, 128.0, -0.5, 127.4],
"num_bits": 8,
"expected_nudged_input_mins": [0.0, 0.0, -127.5, 0.0],
"expected_nudged_input_maxs": [255.0, 127.5, 0.0, 127.5],
"expected_steps": [1.0, 0.5, 0.5, 0.5],
"axis": 1,
},
{
"testcase_name": "narrow_8bits_multi_channel",
"narrow_range": True,
"input_mins": [0.0, 0.1, -127.1, -0.1],
"input_maxs": [254.0, 127.1, -0.1, 126.9],
"num_bits": 8,
"expected_nudged_input_mins": [0.0, 0.0, -127.0, 0.0],
"expected_nudged_input_maxs": [254.0, 127.0, 0.0, 127.0],
"expected_steps": [1.0, 0.5, 0.5, 0.5],
"axis": 1,
},
{
"testcase_name": "wide_7bits_multi_channel",
"narrow_range": False,
"input_mins": [0.0, 0.5, -64.0, -0.1],
"input_maxs": [127.0, 64.0, -0.5, 63.4],
"num_bits": 7,
"expected_nudged_input_mins": [0.0, 0.0, -63.5, 0.0],
"expected_nudged_input_maxs": [127.0, 63.5, 0.0, 63.5],
"expected_steps": [1.0, 0.5, 0.5, 0.5],
"axis": 1,
},
{
"testcase_name": "narrow_7bits_multi_channel",
"narrow_range": True,
"input_mins": [0.0, 0.1, -63.1, -0.1],
"input_maxs": [126.0, 63.1, -0.1, 62.9],
"num_bits": 7,
"expected_nudged_input_mins": [0.0, 0.0, -63.0, 0.0],
"expected_nudged_input_maxs": [126.0, 63.0, 0.0, 63.0],
"expected_steps": [1.0, 0.5, 0.5, 0.5],
"axis": 1,
},
]
)
@pytest.mark.skipif(
backend.backend() not in ("tensorflow", "jax", "torch"),
reason=f"{backend.backend()} doesn't support `custom_gradient`.",
)
def test_fake_quant_with_min_max_vars(
self,
input_mins,
input_maxs,
num_bits,
narrow_range,
axis,
expected_nudged_input_mins,
expected_nudged_input_maxs,
expected_steps,
):
num_channels = len(expected_nudged_input_mins)
inputs_list = []
expected_list = []
initial_gradients_list = []
expected_backprops_wrt_input_list = []
for i in range(num_channels):
expected_nudged_input_min = expected_nudged_input_mins[i]
expected_nudged_input_max = expected_nudged_input_maxs[i]
expected_step = expected_steps[i]
inputs_list.append(
[
expected_nudged_input_min - expected_step,
expected_nudged_input_min - 0.01,
expected_nudged_input_min,
expected_nudged_input_min + 0.01,
expected_nudged_input_min + expected_step - 0.01,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step + 0.01,
expected_nudged_input_max - 0.01,
expected_nudged_input_max,
expected_nudged_input_max + 0.01,
expected_nudged_input_max + expected_step,
]
)
expected_list.append(
[
expected_nudged_input_min,
expected_nudged_input_min,
expected_nudged_input_min,
expected_nudged_input_min,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step,
expected_nudged_input_min + expected_step,
expected_nudged_input_max,
expected_nudged_input_max,
expected_nudged_input_max,
expected_nudged_input_max,
]
)
initial_gradients_list.append(
list(range(1, len(inputs_list[-1]) + 1))
)
expected_backprops_wrt_input_list.append(
[0.0, 0.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 0.0, 0.0]
)
inputs = ops.transpose(ops.array(inputs_list, dtype="float32"))
expected = ops.transpose(ops.array(expected_list, dtype="float32"))
expected_backprops_wrt_input = ops.transpose(
ops.array(expected_backprops_wrt_input_list, dtype="float32")
)
input_min = ops.array(input_mins, dtype="float32")
input_max = ops.array(input_maxs, dtype="float32")
initial_gradients = ops.transpose(
ops.array(initial_gradients_list, dtype="float32")
)
# Test gradients.
if backend.backend() == "tensorflow":
import tensorflow as tf
@tf.function(jit_compile=True)
def test_op(
inputs, input_mins, input_maxs, num_bits, narrow_range, axis
):
with tf.GradientTape() as tape:
tape.watch(inputs)
result = quantizers.fake_quant_with_min_max_vars(
inputs,
input_mins,
input_maxs,
num_bits,
narrow_range,
axis,
)
return initial_gradients * tape.gradient(result, inputs)
if backend.backend() == "torch":
import torch
def test_op(
inputs, input_mins, input_maxs, num_bits, narrow_range, axis
):
# Create tensor and enable gradient tracking
inputs = torch.tensor(
inputs, dtype=torch.float32, requires_grad=True
)
# Apply the quantization operation
result = quantizers.fake_quant_with_min_max_vars(
inputs, input_mins, input_maxs, num_bits, narrow_range, axis
)
# Compute gradients
result.backward(torch.ones_like(result))
return initial_gradients * inputs.grad
if backend.backend() == "jax":
import jax
def test_op(
inputs, input_mins, input_maxs, num_bits, narrow_range, axis
):
# Define the function to compute gradients for
def quantize_fn(x):
return ops.sum(
quantizers.fake_quant_with_min_max_vars(
x,
input_mins,
input_maxs,
num_bits,
narrow_range,
axis,
)
)
input_gradients = jax.grad(quantize_fn)(inputs)
return ops.multiply(initial_gradients, input_gradients)
gradients = test_op(
inputs, input_min, input_max, num_bits, narrow_range, axis
)
if backend.backend() != "jax" or not testing.jax_uses_gpu():
# JAX GPU produces less precise numbers, causing the CI to fail.
# For example, 127.5 / 255.0 results in 0.49999997 instead of 0.5.
self.assertAllClose(gradients, expected_backprops_wrt_input)
# Test outputs.
outputs = quantizers.fake_quant_with_min_max_vars(
inputs,
input_min,
input_max,
num_bits=num_bits,
narrow_range=narrow_range,
axis=axis,
)
self.assertAllClose(outputs, expected)
# Test bfloat16 & float16 dtype
outputs = quantizers.fake_quant_with_min_max_vars(
ops.cast(inputs, "bfloat16"),
input_min,
input_max,
num_bits=num_bits,
narrow_range=narrow_range,
axis=axis,
)
self.assertDType(outputs, "bfloat16")
self.assertAllClose(outputs, expected)
outputs = quantizers.fake_quant_with_min_max_vars(
ops.cast(inputs, "float16"),
input_min,
input_max,
num_bits=num_bits,
narrow_range=narrow_range,
axis=axis,
)
self.assertDType(outputs, "float16")
self.assertAllClose(outputs, expected)
class GPTQQuantizerTest(testing.TestCase):
@parameterized.named_parameters(
("bits_2_sym_False", 2, False),
("bits_4_sym_False", 4, False),
("bits_8_sym_False", 8, False),
("bits_2_sym_True", 2, True),
("bits_4_sym_True", 4, True),
("bits_8_sym_True", 8, True),
)
def test_quantize_dequantize_roundtrip_error_bound_per_tensor(
self, bits, symmetric
):
"""
For finite inputs and positive scales, the reconstruction error
|x_hat - clip(x)| is bounded by 0.5 * scale elementwise.
"""
rng = np.random.default_rng(0)
x = ops.array(rng.standard_normal((64, 32)), "float32")
scale = ops.array(0.05) # per-tensor scale
maxq = ops.array(ops.subtract(ops.power(2, bits), 1), "float32")
zero = ops.array(maxq / 2.0 if symmetric else 3.0, "float32")
quantized = quantize_with_zero_point(x, scale, zero, maxq)
dequantized = dequantize_with_zero_point(quantized, scale, zero)
# Representable dequantization range:
# [scale*(0 - zero), scale*(maxq - zero)]
lo = ops.multiply(scale, ops.subtract(ops.array(0.0), zero))
hi = ops.multiply(scale, ops.subtract(maxq, zero))
x_clipped = ops.clip(x, lo, hi)
err = ops.abs(dequantized - x_clipped)
self.assertTrue(
ops.all(err <= (ops.add(ops.multiply(0.5, scale), 1e-7)))
)
def test_quantize_clipping_behavior_extremes(self):
"""
Very negative q == 0 ; very positive q == maxq.
"""
maxq = ops.array(15.0)
scale = ops.array(0.1)
zero = ops.array(7.0)
x = ops.array([[-1e6, 1e6]], "float32")
quantized = quantize_with_zero_point(x, scale, zero, maxq)
self.assertEqual(quantized.shape, (1, 2))
self.assertEqual(quantized[0, 0], 0.0)
self.assertEqual(quantized[0, 1], maxq)
def test_zero_scale_guard_no_nans_for_finite_inputs(self):
"""
If scale == 0, quantize should not produce NaNs (uses epsilon
replacement).
"""
x = ops.array([[0.0, 1.0, -2.0]])
scale = ops.array(0.0) # triggers epsilon path
zero = ops.array(5.0)
maxq = ops.array(15.0)
q = quantize_with_zero_point(x, scale, zero, maxq)
self.assertFalse(ops.any(ops.isnan(q)))
# Dequantize should also be finite
x_hat = dequantize_with_zero_point(q, scale, zero)
self.assertTrue(ops.all(ops.isfinite(x_hat)))
@parameterized.parameters(4, 8)
def test_idempotent_quantize_when_input_is_already_levels(self, bits):
"""
If input is already exactly on representable dequantized grid,
quantize→dequantize should return the same values (within float eps).
"""
scale = ops.array(0.125)
maxq = ops.array(ops.subtract(ops.power(2, bits), 1), "float32")
zero = ops.array(ops.divide(maxq, 2.0))
# Build dequantized grid points: x = scale * (k - zero), k in [0..maxq]
ks = ops.arange(0, ops.add(maxq, 1))
x_vals = ops.multiply(scale, ops.subtract(ks, zero))
x = ops.reshape(x_vals, (1, -1))
q = quantize_with_zero_point(x, scale, zero, maxq)
x_hat = dequantize_with_zero_point(q, scale, zero)
self.assertAllClose(x_hat, x, rtol=0, atol=1e-6)
class ComputeScaleZeroTest(testing.TestCase):
def test_error_when_x_is_none(self):
with self.assertRaisesRegex(ValueError, "cannot be None"):
compute_quantization_parameters(None, bits=4)
def test_error_when_x_is_empty(self):
x = ops.array([], "float32")
with self.assertRaisesRegex(ValueError, "cannot be empty"):
compute_quantization_parameters(x, bits=4)
def test_error_when_weight_rank_too_low(self):
x = ops.array([1.0, 2.0], "float32") # rank-1
with self.assertRaisesRegex(ValueError, "rank of at least 2"):
compute_quantization_parameters(x, bits=4, weight=True)
@parameterized.named_parameters(
("bits2_asym", 2, False),
("bits4_asym", 4, False),
("bits8_asym", 8, False),
("bits2_sym", 2, True),
("bits4_sym", 4, True),
("bits8_sym", 8, True),
)
def test_per_tensor_shapes_and_basic_invariants(self, bits, symmetric):
"""Test per-tensor shapes and basic invariants."""
x = ops.array(
np.random.default_rng(0).standard_normal((7, 5), dtype="float32")
)
scale, zero, maxq = compute_quantization_parameters(
x, bits=bits, symmetric=symmetric, per_channel=False, weight=False
)
# Shapes (per-tensor): (1,) for scale/zero
self.assertEqual(scale.shape, (1,))
self.assertEqual(zero.shape, (1,))
# Scale must be strictly positive
self.assertTrue(ops.all(scale > 0.0))
if symmetric:
# zero should be (maxq + 1)/2 for symmetric
expected_zero = ops.divide(ops.add(maxq, 1.0), 2.0)
self.assertAllClose(zero, expected_zero)
else:
# Asymmetric: zero ~ round(-min/scale) on the flattened input
flat = ops.reshape(x, (1, -1))
min_val = ops.min(flat, axis=1)
expected_zero = ops.round(ops.divide(ops.negative(min_val), scale))
self.assertAllClose(zero, expected_zero)
def test_per_tensor_symmetric_on_constant_input_uses_safe_range(self):
"""Ensures safe range adjustment if entries are equal"""
x = ops.array(np.full((3, 4), 0.0, dtype=np.float32))
scale, zero, maxq = compute_quantization_parameters(
x, bits=4, symmetric=True, per_channel=False, weight=False
)
# With symmetric=True and constant input, zero = (maxq+1)/2
self.assertAllClose(zero, ops.array((float(maxq) + 1.0) / 2.0))
self.assertTrue(ops.all(ops.greater(scale, 0.0)))
def test_weight_per_tensor_tiles_rows(self):
"""Tests that scales/zeros tensors are properly tiled when
per-channel quantization is not used."""
x = ops.array(
np.random.default_rng(1).standard_normal((8, 16)), "float32"
)
scale, zero, _ = compute_quantization_parameters(
x, bits=4, symmetric=False, per_channel=False, weight=True
)
# When weight=True and per_channel=False, shapes are (rows, 1)
self.assertEqual(scale.shape, (8, 1))
self.assertEqual(zero.shape, (8, 1))
# All elements in the scale and zero tensors must be equal due to
# tiling.
self.assertTrue(ops.all(scale == scale[0, 0]))
self.assertTrue(ops.all(zero == zero[0, 0]))
def test_weight_per_channel_ungrouped_shapes(self):
"""Tests that scales/zeros tensors have the correct shape when
per-channel quantization is used without grouping."""
x = ops.array(
np.random.default_rng(2).standard_normal((6, 10)), "float32"
)
scale, zero, _ = compute_quantization_parameters(
x,
bits=4,
symmetric=False,
per_channel=True,
group_size=-1,
weight=True,
)
# Per-channel (ungrouped): one scale per output row -> (rows, 1)
self.assertEqual(scale.shape, (6, 1))
self.assertEqual(zero.shape, (6, 1))
self.assertTrue(ops.all(ops.greater(scale, 0.0)))
# Each channel should have roughly unique scales and zeros
self.assertFalse(ops.all(scale == scale[0, 0]))
self.assertFalse(ops.all(zero == zero[0, 0]))
def test_weight_per_channel_grouped_shapes_and_count(self):
"""Tests that scales/zeros have the correct shape and count when
per-channel quantization is used with grouping."""
rows, cols, groups = 8, 16, 4
x = ops.array(
np.random.default_rng(3).standard_normal((rows, cols)), "float32"
)
scale, zero, _ = compute_quantization_parameters(
x,
bits=4,
symmetric=False,
per_channel=True,
group_size=groups,
weight=True,
)
# Grouped path reshapes to [-1, group_size]
# number of groups = rows*cols / groups
num_groups = (rows * cols) // groups
self.assertEqual(scale.shape, (num_groups, 1))
self.assertEqual(zero.shape, (num_groups, 1))
self.assertTrue(ops.all(ops.greater(scale, 0.0)))
@parameterized.named_parameters(
("sym_true", True),
("sym_false", False),
)
def test_dtype_and_finiteness(self, symmetric):
x = ops.array(
np.random.default_rng(4).standard_normal((5, 7)).astype("float32")
)
scale, zero, maxq = compute_quantization_parameters(
x,
bits=8,
symmetric=symmetric,
per_channel=True,
group_size=-1,
weight=True,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/quantizers/gptq_config.py | keras/src/quantizers/gptq_config.py | from keras.src.api_export import keras_export
from keras.src.quantizers.quantization_config import QuantizationConfig
@keras_export("keras.quantizers.GPTQConfig")
class GPTQConfig(QuantizationConfig):
"""Configuration class for the GPTQ (Gradient-based Post-Training
Quantization) algorithm.
GPTQ is a post-training quantization method that quantizes neural network
weights to lower precision (e.g., 4-bit) while minimizing the impact on
model accuracy. It works by analyzing the Hessian matrix of the loss
function with respect to the weights and applying optimal quantization
that preserves the most important weight values.
**When to use GPTQ:**
- You want to reduce model size and memory usage
- You need faster inference on hardware that supports low-precision
operations
- You want to maintain model accuracy as much as possible
- You have a pre-trained model that you want to quantize without
retraining
**How it works:**
1. Uses calibration data to compute the Hessian matrix for each layer
2. Applies iterative quantization with error correction
3. Reorders weights based on activation importance (optional)
4. Quantizes weights while minimizing quantization error
**Example usage:**
```python
from keras.quantizers import GPTQConfig
from keras import Model
# Create configuration for 4-bit quantization
config = GPTQConfig(
dataset=calibration_data, # Your calibration dataset
tokenizer=your_tokenizer, # Tokenizer for text data
weight_bits=4, # Quantize to 4 bits
num_samples=128, # Number of calibration samples
sequence_length=512, # Sequence length for each sample
hessian_damping=0.01, # Hessian stabilization factor
group_size=128, # Weight grouping for quantization
symmetric=False, # Use asymmetric quantization
activation_order=True # Reorder weights by importance
)
# Apply quantization to your model
model = Model(...) # Your pre-trained model
model.quantize("gptq", config=config)
# The model now has quantized weights and can be used for inference
```
**Benefits:**
- **Memory reduction**: 4-bit quantization reduces memory by ~8x compared
to float32
- **Faster inference**: Lower precision operations are faster on supported
hardware
- **Accuracy preservation**: Minimizes accuracy loss through optimal
quantization
- **No retraining required**: Works with pre-trained models
**Advanced usage examples:**
**Per-channel quantization (recommended for most cases):**
```python
config = GPTQConfig(
dataset=calibration_data,
tokenizer=tokenizer,
weight_bits=4,
group_size=-1, # -1 enables per-channel quantization
symmetric=False
)
```
**Grouped quantization (for specific hardware requirements):**
```python
config = GPTQConfig(
dataset=calibration_data,
tokenizer=tokenizer,
weight_bits=4,
group_size=64, # 64 weights share the same scale factor
symmetric=True # Use symmetric quantization
)
```
**High-accuracy quantization with activation ordering:**
```python
config = GPTQConfig(
dataset=calibration_data,
tokenizer=tokenizer,
weight_bits=4,
activation_order=True, # Reorder weights by importance
hessian_damping=0.005, # Lower damping for more precise
# quantization
num_samples=256 # More samples for better accuracy
)
```
**References:**
- Original GPTQ paper: "GPTQ: Accurate Post-Training Quantization
for Generative Pre-trained Transformers"
- Implementation based on: https://github.com/IST-DASLab/gptq
- Suitable for: Transformer models, large language models, and other
deep neural networks
**Note:** The quality of quantization depends heavily on the calibration
dataset. Use representative data that covers the expected input
distribution for best results.
Args:
dataset: The calibration dataset. It can be an iterable that yields
strings or pre-tokenized numerical tensors (e.g., a list of
strings, a generator, or a NumPy array). This data is used to
analyze the model's activations.
tokenizer: A `keras_nlp.Tokenizer` instance (or a similar callable)
that is used to process the `dataset` if it contains strings.
weight_bits: (int, optional) The number of bits to quantize weights to.
Defaults to 4.
num_samples: (int, optional) The number of calibration data samples to
use from the dataset. Defaults to 128.
sequence_length: (int, optional) The sequence length to use for each
calibration sample. Defaults to 512.
hessian_damping: (float, optional) The % of Hessian damping to use for
stabilization during inverse calculation. Defaults to 0.01.
group_size: (int, optional) The size of weight groups to quantize
together. A `group_size` of -1 indicates per-channel quantization.
Defaults to 128.
symmetric: (bool, optional) If `True`, uses symmetric quantization.
If `False`, uses asymmetric quantization. Defaults to `False`.
activation_order: (bool, optional) If `True`, reorders weight columns
based on activation magnitude, which can improve quantization
accuracy. Defaults to `False`.
quantization_layer_structure: (dict, optional) A dictionary defining the
model's quantization structure. It should contain:
- "pre_block_layers": list of layers to run before the first block.
- "sequential_blocks": list of blocks to be quantized sequentially.
If not provided, the model must implement
`get_quantization_layer_structure`.
"""
def __init__(
self,
dataset,
tokenizer,
*,
weight_bits: int = 4,
num_samples: int = 128,
per_channel: bool = True,
sequence_length: int = 512,
hessian_damping: float = 0.01,
group_size: int = 128,
symmetric: bool = False,
activation_order: bool = False,
quantization_layer_structure: dict = None,
):
super().__init__()
if weight_bits not in [2, 3, 4, 8]:
raise ValueError(
f"Unsupported weight_bits {weight_bits}. "
"Supported values are 2, 3, 4, and 8."
)
if num_samples <= 0:
raise ValueError("num_samples must be a positive integer.")
if sequence_length <= 0:
raise ValueError("sequence_length must be a positive integer.")
if hessian_damping < 0 or hessian_damping > 1:
raise ValueError("hessian_damping must be between 0 and 1.")
if group_size < -1 or group_size == 0:
raise ValueError(
"Invalid group_size. Supported values are -1 (whole-tensor) "
"or a positive integer, "
f"but got {group_size}."
)
self.dataset = dataset
self.tokenizer = tokenizer
self.num_samples = num_samples
self.per_channel = per_channel
self.sequence_length = sequence_length
self.hessian_damping = hessian_damping
self.weight_bits = weight_bits
self.group_size = group_size
self.symmetric = symmetric
self.activation_order = activation_order
self.quantization_layer_structure = quantization_layer_structure
def get_config(self):
return {
# Dataset and Tokenizer are only required for a one-time
# calibration and are not saved in the config.
"dataset": None,
"tokenizer": None,
"weight_bits": self.weight_bits,
"num_samples": self.num_samples,
"per_channel": self.per_channel,
"sequence_length": self.sequence_length,
"hessian_damping": self.hessian_damping,
"group_size": self.group_size,
"symmetric": self.symmetric,
"activation_order": self.activation_order,
"quantization_layer_structure": self.quantization_layer_structure,
}
@classmethod
def from_config(cls, config):
return cls(**config)
@property
def mode(self):
return "gptq"
def dtype_policy_string(self):
"""Returns the dtype policy string for this configuration.
Returns:
A string representing the dtype policy, e.g. "gptq_4bit".
"""
return f"gptq/{self.weight_bits}/{self.group_size}"
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/quantizers/quantization_config.py | keras/src/quantizers/quantization_config.py | from keras.src.api_export import keras_export
from keras.src.dtype_policies import QUANTIZATION_MODES
from keras.src.saving import serialization_lib
@keras_export("keras.quantizers.QuantizationConfig")
class QuantizationConfig:
"""Base class for quantization configs.
Subclasses must implement the `mode` property and the `get_config` and
`from_config` class methods.
Args:
weight_quantizer: Quantizer for weights.
activation_quantizer: Quantizer for activations.
"""
def __init__(self, weight_quantizer=None, activation_quantizer=None):
self.weight_quantizer = weight_quantizer
self.activation_quantizer = activation_quantizer
@property
def mode(self):
raise NotImplementedError(
"Subclasses must implement this property. Do not instantiate "
"QuantizationConfig directly."
)
def get_config(self):
return {
"weight_quantizer": serialization_lib.serialize_keras_object(
self.weight_quantizer
),
"activation_quantizer": serialization_lib.serialize_keras_object(
self.activation_quantizer
),
}
@classmethod
def from_config(cls, config):
weight_quantizer = serialization_lib.deserialize_keras_object(
config.get("weight_quantizer")
)
activation_quantizer = serialization_lib.deserialize_keras_object(
config.get("activation_quantizer")
)
return cls(
weight_quantizer=weight_quantizer,
activation_quantizer=activation_quantizer,
)
@staticmethod
def weight_quantizer_or_default(config, default):
if config is not None and config.weight_quantizer is not None:
return config.weight_quantizer
return default
@staticmethod
def activation_quantizer_or_default(config, default):
if config is not None:
return config.activation_quantizer
return default
@keras_export("keras.quantizers.Int8QuantizationConfig")
class Int8QuantizationConfig(QuantizationConfig):
"""Int8 quantization config.
Args:
weight_quantizer: Quantizer for weights.
activation_quantizer: Quantizer for activations. If "default", uses
AbsMaxQuantizer with axis=-1.
"""
def __init__(self, weight_quantizer=None, activation_quantizer="default"):
from keras.src.quantizers.quantizers import AbsMaxQuantizer
if activation_quantizer == "default":
activation_quantizer = AbsMaxQuantizer()
super().__init__(weight_quantizer, activation_quantizer)
if self.weight_quantizer is not None:
if self.weight_quantizer.output_dtype != "int8":
raise ValueError(
"Int8QuantizationConfig requires a weight_quantizer "
"with output_dtype='int8'. Received: "
f"output_dtype={self.weight_quantizer.output_dtype}"
)
@property
def mode(self):
return "int8"
@keras_export("keras.quantizers.Int4QuantizationConfig")
class Int4QuantizationConfig(QuantizationConfig):
"""Int4 quantization config.
Args:
weight_quantizer: Quantizer for weights.
activation_quantizer: Quantizer for activations. If "default", uses
AbsMaxQuantizer with axis=-1.
"""
def __init__(self, weight_quantizer=None, activation_quantizer="default"):
from keras.src.quantizers.quantizers import AbsMaxQuantizer
if activation_quantizer == "default":
activation_quantizer = AbsMaxQuantizer()
super().__init__(weight_quantizer, activation_quantizer)
if self.weight_quantizer is not None:
if self.weight_quantizer.value_range != (-8, 7):
raise ValueError(
"Int4QuantizationConfig requires a weight_quantizer "
"with value_range=(-8, 7). Received: "
f"value_range={self.weight_quantizer.value_range}"
)
if self.weight_quantizer.output_dtype != "int8":
raise ValueError(
"Int4QuantizationConfig requires a weight_quantizer "
"with output_dtype='int8'. Received: "
f"output_dtype={self.weight_quantizer.output_dtype}"
)
@property
def mode(self):
return "int4"
@keras_export("keras.quantizers.Float8QuantizationConfig")
class Float8QuantizationConfig(QuantizationConfig):
"""FP8 quantization config.
FP8 mixed-precision training does not support user defined quantizers.
This config is only used to indicate that FP8 mixed-precision training
should be used.
"""
def __init__(self):
super().__init__(None, None)
@property
def mode(self):
return "float8"
def get_config(self):
return {}
@classmethod
def from_config(cls, config):
return cls()
def validate_and_resolve_config(mode, config):
"""Validate and resolve quantization config.
This function validates the quantization config and resolves the mode.
If mode is not provided, it is inferred from the config.
If config is not provided, a default config is inferred from the mode.
Args:
mode: Quantization mode.
config: Quantization config.
"""
# 1. Backwards Compatibility: Handle string shortcuts.
if isinstance(config, str):
mode = config
config = None
_validate_mode(mode)
# 2. Resolve "mode" into a Config object.
if config is None:
if mode == "int8":
config = Int8QuantizationConfig()
elif mode == "int4":
config = Int4QuantizationConfig()
elif mode == "float8":
config = Float8QuantizationConfig()
elif mode == "gptq":
raise ValueError(
"For GPTQ, you must pass a `GPTQConfig` object in the "
"`config` argument."
)
else:
if mode is not None:
raise ValueError(
f"Invalid quantization mode. Received: mode={mode}"
)
raise ValueError(
"You must provide either `mode` or `config` to `quantize`."
)
else:
if not isinstance(config, QuantizationConfig):
raise ValueError(
"Argument `config` must be an instance of "
"`QuantizationConfig`. "
f"Received: config={config} (of type {type(config)})"
)
# 3. Validation: Prevent contradictions.
if mode is not None and config.mode != mode:
raise ValueError(
f"Contradictory arguments: mode='{mode}' but "
f"config.mode='{config.mode}'"
)
# Ensure mode is consistent.
mode = config.mode
# Ensure the mode derived from the config is valid.
_validate_mode(mode)
if mode == "gptq":
from keras.src.quantizers.gptq_config import GPTQConfig
if not isinstance(config, GPTQConfig):
raise ValueError(
"Mode 'gptq' requires a valid `config` argument of type "
f"`GPTQConfig`. Received: {type(config)}"
)
return config
def _validate_mode(mode):
"""Validates quantization mode."""
if mode is not None and mode not in QUANTIZATION_MODES:
raise ValueError(
"Invalid quantization mode. "
f"Expected one of {QUANTIZATION_MODES}. Received: mode={mode}"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/quantizers/utils.py | keras/src/quantizers/utils.py | import re
def should_quantize_layer(layer, filters):
"""Determines if a layer should be quantized based on filters.
Args:
layer: The layer to check.
filters: A regex string, a list of regex strings, or a callable.
If None, returns True.
Returns:
True if the layer should be quantized, False otherwise.
"""
if filters is None:
return True
if isinstance(filters, str):
return bool(re.search(filters, layer.name))
if isinstance(filters, (list, tuple)):
return any(re.search(pat, layer.name) for pat in filters)
if callable(filters):
return filters(layer)
return True
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/quantizers/gptq.py | keras/src/quantizers/gptq.py | import types
from functools import partial
from keras.src import ops
from keras.src import quantizers
from keras.src.layers import Dense
from keras.src.layers import EinsumDense
from keras.src.ops import linalg
from keras.src.quantizers.gptq_config import GPTQConfig
from keras.src.quantizers.quantizers import GPTQQuantizer
from keras.src.quantizers.quantizers import compute_quantization_parameters
from keras.src.quantizers.quantizers import dequantize_with_zero_point
from keras.src.quantizers.quantizers import quantize_with_zero_point
def _stable_permutation(metric):
"""Return a stable permutation that sorts `metric` in descending order.
Uses an index-based jitter to break ties deterministically."""
n = ops.shape(metric)[0]
idx = ops.arange(0, n, dtype="int32")
# tiny jitter = (idx / n) * 1e-12 so it never flips a real strict ordering
jitter = ops.divide(ops.cast(idx, "float32"), ops.cast(n, "float32"))
metric_jittered = ops.add(metric, ops.multiply(jitter, 1e-12))
# argsort by negative to get descending
return ops.argsort(ops.negative(metric_jittered))
def gptq_quantize_matrix(
weights_transpose,
inv_hessian,
*,
blocksize=128,
group_size=-1,
activation_order=False,
order_metric=None,
compute_scale_zero=compute_quantization_parameters,
):
"""
Implements the GPTQ error correction updates.
For a single column update (column j):
e = invH[j, j] * (w_j - q_j)
W[:, j+1:] -= e * invH[j, j+1:]
where:
- w_j is the original column,
- q_j is the quantized column,
- invH is the inverse Hessian,
- e is the propagated error term.
Across entire blocks:
W[:, future] -= E_block * invH[block, future]
where:
- E_block is the quantization error accumulated for the current block,
- invH[block, future] denotes the cross-block slice of the inverse Hessian,
- W[:, future] are the columns yet to be quantized.
Args:
weights_transpose: Transposed weight matrix [out_features, in_features]
to quantize.
inv_hessian: Inverse Hessian matrix [in_features, in_features] for
error propagation.
blocksize: Size of the blocks to process (default: 128).
group_size: Size of the groups for parameter reuse
(default: -1, no grouping).
activation_order: Whether to apply activation-order permutation
(default: False).
order_metric: Metric for ordering features
(default: None, uses 1 / diag(invH)).
compute_scale_zero: Function to compute scale and zero for
quantization.
Returns:
quantized_weights: Quantized weight matrix [out_features, in_features].
scale: float32. Scale parameters for quantization
[out_features, num_groups].
zero: Zero-point parameters for quantization [out_features, num_groups].
g_idx: int32. Group indices for each feature [in_features].
"""
in_features = ops.shape(weights_transpose)[1]
if activation_order:
# Use 1 / diag(inverse_hessian) as importance proxy by default.
if order_metric is None:
order_metric = ops.reciprocal(
ops.add(ops.diagonal(inv_hessian), 1e-12)
)
else:
# sanitize provided metric
order_metric = ops.cast(order_metric, "float32")
order_metric = ops.where(
ops.isfinite(order_metric),
order_metric,
ops.zeros_like(order_metric),
)
# Sort in descending order by importance
perm = _stable_permutation(order_metric)
inv_perm = ops.argsort(perm)
weights_transpose = ops.take(weights_transpose, perm, axis=1)
inv_hessian = ops.take(
ops.take(inv_hessian, perm, axis=0), perm, axis=1
)
else:
perm = inv_perm = None
# weights_buffer: [out_features, in_features]
weights_buffer = weights_transpose
# Buffer for the final quantized matrix: [out_features, in_features]
quantized_weights_buffer = ops.zeros_like(weights_transpose, dtype="int32")
scale_chunks = []
zero_chunks = []
# Compute effective group size
effective_group = in_features if group_size == -1 else group_size
# Process features in blocks
for block_start in range(0, in_features, blocksize):
block_end = min(block_start + blocksize, in_features)
block_size = block_end - block_start
# Block views
# block_weights: [out_features, block_size]
block_weights = weights_buffer[:, block_start:block_end]
# block_error: [out_features, block_size]
block_error = ops.zeros_like(block_weights)
# block_inv_hessian: [block_size, block_size]
block_inv_hessian = inv_hessian[
block_start:block_end, block_start:block_end
]
# Per-group cached params for reuse within the group
cached_scale = None
cached_zero = None
cached_maxq = None
cached_group_start = -1
for block_idx in range(block_size):
# Current global column index, represents the original column
# in the weight matrix
global_idx = block_start + block_idx
# weight_column: [out_features,]
weight_column = block_weights[:, block_idx]
# Group-wise parameter reuse (compute once per group)
if not effective_group == in_features: # group_size != -1
# Determine the group start index for the current column
group_start = (global_idx // effective_group) * effective_group
if group_start != cached_group_start:
# New group encountered, compute & cache params
# for this group
group_end = min(group_start + effective_group, in_features)
group_slice = weights_buffer[:, group_start:group_end]
cached_scale, cached_zero, cached_maxq = compute_scale_zero(
group_slice
)
# Store params once per group (in the order encountered).
scale_chunks.append(cached_scale)
zero_chunks.append(cached_zero)
cached_group_start = group_start
scale, zero, maxq = cached_scale, cached_zero, cached_maxq
else:
# Single global group covering all columns.
if cached_scale is None:
cached_scale, cached_zero, cached_maxq = compute_scale_zero(
weights_buffer
)
scale_chunks.append(cached_scale)
zero_chunks.append(cached_zero)
cached_group_start = 0
scale, zero, maxq = cached_scale, cached_zero, cached_maxq
# Quantize column and store it.
# quantized_column: [out_features, 1]
quantized_column = quantize_with_zero_point(
ops.expand_dims(weight_column, 1), scale, zero, maxq
)
# Store quantized column in the buffer.
quantized_weights_buffer = ops.slice_update(
quantized_weights_buffer,
(0, global_idx),
ops.cast(quantized_column, "int32"),
)
# Dequantize column to compute error.
# dequantized_col: [out_features,]
dequantized_col = dequantize_with_zero_point(
quantized_column, scale, zero
)[:, 0]
# Error feedback for remaining columns within the block
# block_inv_hessian_diag: scalar
current_block_influence = block_inv_hessian[block_idx, block_idx]
# We divide by current_block_influence to get the
# correct scaling of the error term.
err = ops.divide(
ops.subtract(weight_column, dequantized_col),
current_block_influence,
)
# Record error for propagation to future blocks
block_error = ops.slice_update(
block_error, (0, block_idx), ops.expand_dims(err, 1)
)
# Update remaining columns in the current block
# (those before the current column have already been quantized)
# Propagate error to remaining columns in the block.
if block_idx < block_size - 1:
# update: [out_features, block_size - block_idx - 1]
update = ops.matmul(
ops.expand_dims(err, 1),
ops.expand_dims(
block_inv_hessian[block_idx, block_idx + 1 :], 0
),
)
# tail is a view of the remaining columns in the block
# to be updated
# tail: [out_features, block_size - block_idx - 1]
tail = block_weights[:, block_idx + 1 :]
block_weights = ops.slice_update(
block_weights,
(0, block_idx + 1),
ops.subtract(tail, update),
)
# Propagate block errors to future features (beyond the block)
if block_end < in_features:
# Total update for all future columns, based on the
# accumulated error in this block. This is calculated
# as the matrix product of the block_error and the
# relevant slice of the inverse Hessian.
# total_update: [out_features, in_features - block_end]
total_update = ops.matmul(
block_error, inv_hessian[block_start:block_end, block_end:]
)
# Update the remaining weights in the buffer. This is done
# by subtracting the total_update from the remaining columns.
weights_buffer = ops.concatenate(
[
weights_buffer[:, :block_end],
ops.subtract(weights_buffer[:, block_end:], total_update),
],
axis=1,
)
# Build group indices for each (possibly permuted) column
# base_group = effective_group (int)
base_group = effective_group
# g_idx in permuted domain
g_idx = ops.arange(0, in_features, dtype="int32")
g_idx = ops.divide(g_idx, base_group)
g_idx = ops.cast(g_idx, "float32")
# Map group indices and quantized weights back to original column order
if activation_order:
g_idx = ops.take(g_idx, inv_perm, axis=0)
quantized_weights_buffer = ops.take(
quantized_weights_buffer, inv_perm, axis=1
)
# Concatenate recorded group params
if len(scale_chunks) == 0:
# Edge case: no groups recorded (empty input); fall back to whole matrix
s, z, _ = compute_scale_zero(weights_transpose)
scale = s
zero = z
else:
scale = ops.concatenate(scale_chunks, axis=1)
zero = ops.concatenate(zero_chunks, axis=1)
return quantized_weights_buffer, scale, zero, g_idx
class GPTQ:
def __init__(self, layer, config=GPTQConfig(tokenizer=None, dataset=None)):
self.original_layer = layer
self.num_samples = 0
self.config = config
self.quantizer = GPTQQuantizer(
config, compute_dtype=layer.variable_dtype
)
# Explicitly handle each supported layer type
if isinstance(layer, Dense) or (
isinstance(layer, EinsumDense) and layer.kernel.ndim == 2
):
# For a standard Dense layer, the dimensions are straightforward.
self.kernel_shape = layer.kernel.shape
# rows: [input_features]
self.rows = self.kernel_shape[0]
# columns: [output_features]
self.columns = self.kernel_shape[1]
self.layer = layer
# Handle 3D EinsumDense layers (typically from attention blocks).
elif isinstance(layer, EinsumDense) and layer.kernel.ndim == 3:
# For EinsumDense, we determine the effective 2D dimensions.
self.kernel_shape = layer.kernel.shape
shape = list(self.kernel_shape)
d_model_dim_index = shape.index(max(shape))
if d_model_dim_index == 0: # QKV projection case
in_features, heads, head_dim = shape
self.rows, self.columns = (
in_features,
ops.multiply(heads, head_dim),
)
elif d_model_dim_index in [1, 2]: # Attention Output case
heads, head_dim, out_features = shape
self.rows, self.columns = (
ops.multiply(heads, head_dim),
out_features,
)
# Create a temporary object that holds a reshaped
# 2D version of the kernel.
self.layer = types.SimpleNamespace(
kernel=ops.reshape(layer.kernel, (self.rows, self.columns)),
)
else:
# Raise an error if the layer is not supported.
raise TypeError(f"Unsupported layer type for GPTQ: {type(layer)}")
self.hessian = ops.zeros((self.rows, self.rows), dtype="float32")
def update_hessian_with_batch(self, input_batch):
"""
Updates the running average of the Hessian matrix with a new batch.
This method computes the Hessian matrix for a given batch of input
activations and updates the accumulated Hessian (`self.hessian`) using a
numerically stable running average. This allows the Hessian to be
computed over a large dataset without loading all samples into memory
at once.
The input tensor is first reshaped into a 2D matrix [num_samples,
num_features] before the Hessian is calculated.
Args:
input_batch: A 2D or higher-dimensional tensor of input activations
from a calibration batch.
Raises:
ValueError: If the feature dimension of the input tensor
`input_batch` does not match the dimensions of the
pre-initialized Hessian matrix `self.hessian`.
"""
if input_batch is None:
raise ValueError("Input tensor cannot be None.")
if len(input_batch.shape) < 2:
raise ValueError(
"Input tensor must have rank >= 2 "
f"(got rank {len(input_batch.shape)})."
)
if ops.size(input_batch) == 0:
raise ValueError("Input tensor cannot be empty.")
if len(input_batch.shape) > 2:
# [batch, features]
input_batch = ops.reshape(input_batch, (-1, input_batch.shape[-1]))
x = ops.cast(input_batch, "float32")
num_new_samples = ops.shape(x)[0]
num_prev_samples = self.num_samples
total_samples = ops.add(num_prev_samples, num_new_samples)
if ops.shape(self.hessian)[0] != ops.shape(x)[-1]:
raise ValueError(
f"Hessian dimensions ({ops.shape(self.hessian)[0]}) do not "
f"match input features ({ops.shape(x)[-1]})."
)
# gram_matrix: [features, features]
gram_matrix = ops.matmul(ops.transpose(x), x)
# Ensures numerical stability and symmetry in case of large floating
# point activations.
gram_matrix = ops.divide(
ops.add(gram_matrix, ops.transpose(gram_matrix)), 2.0
)
# Decay previous mean and add current per-sample contribution
# (factor 2/N)
if self.num_samples > 0:
self.hessian = ops.multiply(
self.hessian, ops.divide(num_prev_samples, total_samples)
)
self.hessian = ops.add(
self.hessian,
ops.multiply(ops.divide(2.0, total_samples), gram_matrix),
)
self.num_samples = self.num_samples + ops.shape(x)[0] or 0
def quantize_and_correct_layer(
self,
blocksize=128,
):
"""
Performs GPTQ quantization and correction on the layer's weights.
This method implements the core logic of the "Optimal Brain Quant"
(OBQ) method, as applied by GPTQ, to quantize the weights of a single
layer. It iteratively quantizes blocks of weights and corrects for the
quantization error by updating the remaining weights.
The algorithm follows these main steps:
1. Initialization: It optionally reorders the weight columns based
on activation magnitudes (`activation_order=True`) to protect more
salient
weights.
2. Hessian Modification: The Hessian matrix, pre-computed from
calibration data, is dampened to ensure its invertibility and
stability.
3. Iterative Quantization: The function iterates through the
weight columns in blocks (`blocksize`). In each iteration, it:
a. Quantizes one column.
b. Calculates the quantization error.
c. Updates the remaining weights in the *current* block by
distributing the error, using the inverse Hessian.
4. Block-wise Correction: After a block is quantized, the total
error from that block is propagated to the *next* block of weights
to be processed.
5. Finalization: The quantized weights are reordered back if
`activation_order` was used, and the layer's weights are updated.
This implementation is based on the official GPTQ paper and repository.
For more details, see:
- Paper: https://arxiv.org/abs/2210.17323
- Original Code: https://github.com/IST-DASLab/gptq
Args:
blocksize: (int, optional) The size of the weight block to process
at a time. Defaults to 128.
"""
weights_matrix = ops.transpose(self.layer.kernel)
# Dampen the Hessian for Stability
hessian_diagonal = ops.diagonal(self.hessian)
dead_diagonal = ops.equal(hessian_diagonal, 0.0)
hessian_diagonal = ops.where(dead_diagonal, 1.0, hessian_diagonal)
hessian_matrix = ops.add(
self.hessian,
ops.diag(
ops.where(dead_diagonal, 1.0, ops.zeros_like(hessian_diagonal))
),
)
# Add dampening factor to the Hessian diagonal
damping_factor = ops.multiply(
self.config.hessian_damping, ops.mean(hessian_diagonal)
)
hessian_diagonal = ops.add(hessian_diagonal, damping_factor)
hessian_matrix = ops.add(
ops.subtract(
hessian_matrix, ops.diag(ops.diagonal(hessian_matrix))
),
ops.diag(hessian_diagonal),
)
# Compute the inverse Hessian, which is used for error correction
inverse_hessian = linalg.inv(hessian_matrix)
quantized, scale, zero, g_idx = gptq_quantize_matrix(
weights_matrix,
inv_hessian=inverse_hessian,
blocksize=blocksize,
group_size=self.config.group_size,
activation_order=self.config.activation_order,
order_metric=ops.diagonal(hessian_matrix),
compute_scale_zero=partial(self.quantizer.find_params, weight=True),
)
quantized = ops.cast(
quantized, self.original_layer.quantized_kernel.dtype
)
if self.config.weight_bits == 4:
# For 4-bit weights, we need to pack them into bytes
quantized, _, _ = quantizers.pack_int4(
quantized, axis=0, dtype="uint8"
)
del self.original_layer._kernel
self.original_layer.quantized_kernel.assign(quantized)
self.original_layer.kernel_scale.assign(scale)
self.original_layer.kernel_zero.assign(zero)
self.original_layer.g_idx.assign(g_idx)
self.original_layer.is_gptq_calibrated = True
def free(self):
del self.hessian
del self.layer
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/quantizers/__init__.py | keras/src/quantizers/__init__.py | import inspect
from keras.src.api_export import keras_export
from keras.src.quantizers.quantization_config import Float8QuantizationConfig
from keras.src.quantizers.quantization_config import Int4QuantizationConfig
from keras.src.quantizers.quantization_config import Int8QuantizationConfig
from keras.src.quantizers.quantization_config import QuantizationConfig
from keras.src.quantizers.quantizers import AbsMaxQuantizer
from keras.src.quantizers.quantizers import Quantizer
from keras.src.quantizers.quantizers import abs_max_quantize
from keras.src.quantizers.quantizers import compute_float8_amax_history
from keras.src.quantizers.quantizers import compute_float8_scale
from keras.src.quantizers.quantizers import fake_quant_with_min_max_vars
from keras.src.quantizers.quantizers import pack_int4
from keras.src.quantizers.quantizers import quantize_and_dequantize
from keras.src.quantizers.quantizers import unpack_int4
from keras.src.saving import serialization_lib
from keras.src.utils.naming import to_snake_case
ALL_OBJECTS = {
Quantizer,
AbsMaxQuantizer,
QuantizationConfig,
Int8QuantizationConfig,
Int4QuantizationConfig,
Float8QuantizationConfig,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
@keras_export("keras.quantizers.serialize")
def serialize(initializer):
return serialization_lib.serialize_keras_object(initializer)
@keras_export("keras.quantizers.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras quantizer object via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.quantizers.get")
def get(identifier, **kwargs):
"""Retrieve a Keras quantizer object via an identifier."""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj(kwargs)
return obj
else:
raise ValueError(
f"Could not interpret quantizer identifier: {identifier}"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/quantizers/gptq_core.py | keras/src/quantizers/gptq_core.py | import math
from contextlib import contextmanager
import numpy as np
from absl import logging
from keras.src import ops
from keras.src import utils as keras_utils
from keras.src.dtype_policies.dtype_policy import GPTQDTypePolicy
from keras.src.dtype_policies.dtype_policy_map import DTypePolicyMap
from keras.src.layers import Dense
from keras.src.layers import EinsumDense
from keras.src.quantizers.gptq import GPTQ
from keras.src.quantizers.gptq_config import GPTQConfig
from keras.src.quantizers.utils import should_quantize_layer
@contextmanager
def stream_hessians(layers_map, gptq_objects):
"""
Temporarily monkey-patch each target layer's `call` method so
that input activations are streamed into the GPTQ instance
running Hessian estimate at capture time.
On `__enter__`: For every (name, layer) in `layers_map`, replaces
`layer.call` with a wrapper that:
1) extracts the layer input from `*args`/`**kwargs`,
2) reshapes it to 2D `[-1, rows]` where
`rows = gptq_objects[name].rows`,
3) calls `gptq_objects[name].update_hessian_with_batch(x2d)`
4) delegates to the original `layer.call` and returns its
output.
On `__exit__`: All original `layer.call` methods are restored even if an
exception occurs.
* Space complexity: O(d**2) per layer (for the Hessian).
* No weights are modified; only GPTQ statistics are updated.
Args:
layers_map: Dict[str, Layer]. Mapping from logical layer names to
the Keras layers that should be patched during calibration. Keys must
match `gptq_objects`.
gptq_objects: Dict[str, GPTQ]. Mapping from names to GPTQ instances.
Yields:
None: The patched state is active only within the `with` block. After
exit, all layers are unpatched and safe to use normally.
Example:
```python
>>> with stream_hessians(layers_map, gptq_objects):
... for sample in calibration_inputs:
... if len(sample.shape) == 2:
... sample = ops.expand_dims(sample, 0)
... _ = block(sample) # hooks update Hessians on-the-fly
>>> # <- original layer.call methods restored here
```
"""
original_calls = {}
def create_hook(name, original_call_func):
def hook(*args, **kwargs):
inp = args[0] if args else kwargs["inputs"]
# Explicitly reshape the input tensor to be 2D, with the
# second dimension matching the number of input features
# expected by the layer's kernel.
# This correctly handles inputs of any dimensionality
# (e.g., 3D or 4D).
num_features = gptq_objects[name].rows
input_2d = ops.reshape(inp, (-1, num_features))
gptq_objects[name].update_hessian_with_batch(input_2d)
return original_call_func(*args, **kwargs)
return hook
try:
for name, layer in layers_map.items():
original_calls[name] = layer.call
layer.call = create_hook(name, layer.call)
yield
finally:
for name, layer in layers_map.items():
layer.call = original_calls[name]
def get_dataloader(
tokenizer,
sequence_length,
dataset,
num_samples=128,
*,
strategy="strided",
seed=42,
stride=None,
eos_id=None,
):
"""
Prepares and chunks the calibration dataloader, repeating short datasets.
All processing happens on the CPU.
Args:
tokenizer: The tokenizer to use for text splitting.
sequence_length: The length of each input sequence.
dataset: The dataset to sample from.
num_samples: The number of samples to generate.
strategy: The sampling strategy to use. Possible values are
1. "strided": Samples are taken at regular intervals.
2. "linspace": Samples are taken at evenly spaced intervals.
3. "random": Samples are taken at random positions.
seed: The random seed for reproducibility. Used only if
strategy="random"
stride: The stride length for "strided" sampling.
eos_id: The end-of-sequence token ID.
Returns:
np.ndarray of shape (num_samples, 1, sequence_length), dtype int32.
"""
if not hasattr(dataset, "__iter__") or isinstance(dataset, (str, bytes)):
raise TypeError(
"The `dataset` argument must be an iterable (e.g., a list of "
"strings, a generator, or a NumPy array). Got type: "
f"{type(dataset).__name__}. Please pass the loaded dataset "
"directly."
)
dataset_list = list(dataset)
if not dataset_list:
raise ValueError("Provided dataset is empty.")
pieces = []
if isinstance(dataset_list[0], str):
for i, s in enumerate(dataset_list):
toks = np.asarray(tokenizer.tokenize(s)).reshape(-1)
pieces.append(toks)
# avoid windows that span document boundaries
if eos_id is not None and i < len(dataset_list) - 1:
pieces.append(np.array([eos_id], dtype=np.int32))
else:
for s in dataset_list:
toks = ops.convert_to_numpy(s).reshape(-1)
pieces.append(toks.astype(np.int32, copy=False))
all_tokens = (
pieces[0].astype(np.int32, copy=False)
if len(pieces) == 1
else np.concatenate(pieces, axis=0).astype(np.int32, copy=False)
)
required_tokens = num_samples * sequence_length
if all_tokens.size < required_tokens:
repeats = math.ceil(required_tokens / max(1, all_tokens.size))
all_tokens = np.tile(all_tokens, repeats)
max_start = all_tokens.size - sequence_length
if max_start < 0:
raise ValueError(
f"Not enough tokens to form one sample of length {sequence_length} "
f"(have {all_tokens.size})."
)
# Choose deterministic, well-spread starts by default
if strategy == "random":
rng = np.random.default_rng(seed)
starts = rng.integers(
0, max_start + 1, size=num_samples, dtype=np.int64
)
elif strategy == "linspace":
# even coverage with no RNG
starts = np.linspace(0, max_start, num_samples, dtype=np.int64)
elif strategy == "strided":
# stride chosen to cover the space roughly uniformly
if stride is None:
stride = max(1, (max_start + 1) // num_samples)
# offset derived deterministically from seed
offset = (
(abs(hash(("gptq-calib", seed))) % (max_start + 1))
if max_start > 0
else 0
)
starts = (offset + np.arange(num_samples, dtype=np.int64) * stride) % (
max_start + 1
)
else:
raise ValueError(f"Unknown strategy: {strategy}")
# Gather contiguous windows
# sliding_window_view avoids building a big index matrix
windows = np.lib.stride_tricks.sliding_window_view(
all_tokens, sequence_length
)
samples = windows[starts] # (num_samples, sequence_length)
return samples.astype(np.int32)[:, None, :]
def find_layers_in_block(block):
"""
Finds all Dense and EinsumDense layers in a transformer block.
Args:
block: A Keras layer representing a transformer block.
Returns:
A dict mapping layer paths to the corresponding Dense or EinsumDense
"""
found_layers = {}
for sub_layer in block._flatten_layers():
if len(list(sub_layer._flatten_layers())) == 1:
if isinstance(sub_layer, (Dense, EinsumDense)):
found_layers[sub_layer.path] = sub_layer
return found_layers
def apply_gptq_layerwise(dataloader, config, structure, filters=None):
"""Applies GPTQ quantization layer-by-layer to a Keras model.
This function uses the provided `structure` to identify pre-quantization
layers and sequential blocks.
The core logic operates as follows:
1. It processes the model sequentially, one block at a time. For each
block, it uses temporary hooks to capture the input activations of
each target layer during a forward pass with the calibration data.
2. These captured activations are used to compute the Hessian matrix for
each layer's weights.
3. The GPTQ algorithm is then applied to each layer to find the optimal
quantized weights that minimize the error introduced.
4. The output activations from the current block are then used as the
input for the next block, ensuring that quantization errors are
accounted for throughout the model.
Args:
dataloader: An iterable providing calibration data.
config: A GPTQConfiguration object.
structure: A dictionary with keys "pre_block_layers" and
"sequential_blocks".
filters: Optional filters to exclude layers from quantization.
Raises:
ValueError: If the function cannot automatically find an embedding
layer or any transformer-like blocks to quantize within the model.
"""
num_samples = config.num_samples
logging.info("Starting model quantization...")
pre_layers = structure.get("pre_block_layers", [])
transformer_blocks = structure.get("sequential_blocks", [])
if not transformer_blocks:
raise ValueError(
"No sequential blocks found in the provided structure to quantize."
)
# Initial inputs are the outputs of the pre-block layers
inputs = []
for batch in dataloader:
batch = ops.convert_to_tensor(batch, dtype="int32")
for layer in pre_layers:
batch = layer(batch)
inputs.append(batch)
num_samples = min(num_samples, len(inputs))
progbar = keras_utils.Progbar(target=len(transformer_blocks))
for block_idx, block in enumerate(transformer_blocks):
logging.info(f"Quantizing Block {block_idx}")
sub_layers_map = find_layers_in_block(block)
# Filter out layers that are not quantized with GPTQ
final_sub_layers_map = {}
for name, layer in sub_layers_map.items():
if not should_quantize_layer(layer, filters):
continue
final_sub_layers_map[name] = layer
sub_layers_map = final_sub_layers_map
if not sub_layers_map:
logging.info(
f" No quantizable layers found in block {block_idx}. Skipping."
)
else:
logging.info(f"Found layers: {list(sub_layers_map.keys())}")
gptq_objects = {
name: GPTQ(layer, config)
for name, layer in sub_layers_map.items()
}
with stream_hessians(sub_layers_map, gptq_objects):
for sample_idx in range(num_samples):
current_input = inputs[sample_idx]
if len(current_input.shape) == 2:
current_input = ops.expand_dims(current_input, axis=0)
_ = block(current_input)
for name, gptq_object in gptq_objects.items():
logging.info(f"Quantizing {name}...")
gptq_object.quantize_and_correct_layer()
gptq_object.free()
del gptq_objects
if block_idx < len(transformer_blocks) - 1:
logging.info(f"Generating inputs for block {block_idx + 1}...")
next_block_inputs = []
for sample_idx in range(num_samples):
current_input = inputs[sample_idx]
if len(current_input.shape) == 2:
current_input = ops.expand_dims(current_input, axis=0)
output = block(current_input)[0]
next_block_inputs.append(output)
inputs = next_block_inputs
progbar.update(current=block_idx + 1)
logging.info("Quantization process complete.")
def gptq_quantize(config, quantization_layer_structure, filters=None):
"""
Quantizes the model using GPTQ.
Args:
config: The GPTQ configuration.
quantization_layer_structure: A dictionary describing the model's layer
structure for quantization.
filters: Optional filters to exclude layers from quantization.
"""
if config.dataset is None or config.tokenizer is None:
raise ValueError(
"GPTQ quantization requires a dataset and a tokenizer. "
"Please provide them in the `GPTQConfig`."
)
if quantization_layer_structure is None:
raise ValueError(
"For 'gptq' mode, a valid quantization structure must be provided "
"either via `config.quantization_layer_structure` or by overriding "
"`model.get_quantization_layer_structure(mode)`. The structure "
"should be a dictionary with keys 'pre_block_layers' and "
"'sequential_blocks'."
)
# Load all data needed from the generator/source in a single call.
total_samples_to_request = config.num_samples
dataloader = get_dataloader(
config.tokenizer,
config.sequence_length,
config.dataset,
num_samples=total_samples_to_request,
)
# Split the materialized data. This works because dataloader
# is now a NumPy array, which can be sliced and reused.
calibration_dataloader = dataloader[: config.num_samples]
apply_gptq_layerwise(
calibration_dataloader,
config,
quantization_layer_structure,
filters=filters,
)
def get_group_size_for_layer(layer, config):
"""Determine the group size for GPTQ quantization.
The group size can be specified either through the `config` argument
or through the `dtype_policy` if it is of type `GPTQDTypePolicy`.
The config argument is usually available when quantizing the layer
via the `quantize` method. If the layer was deserialized from a
saved model, the group size should be specified in the `dtype_policy`.
Args:
config: An optional configuration object that may contain the
`group_size` attribute.
Returns:
int. The determined group size for GPTQ quantization.
Raises:
ValueError: If the group size is not specified in either the
`config` or the `dtype_policy`.
"""
if config and isinstance(config, GPTQConfig):
return config.group_size
elif isinstance(layer.dtype_policy, GPTQDTypePolicy):
return layer.dtype_policy.group_size
elif isinstance(layer.dtype_policy, DTypePolicyMap):
policy = layer.dtype_policy[layer.path]
if not isinstance(policy, GPTQDTypePolicy):
# This should never happen based on how we set the
# quantization mode, but we check just in case.
raise ValueError(
"Expected a `dtype_policy` of type `GPTQDTypePolicy`."
f"Got: {type(policy)}"
)
return policy.group_size
else:
raise ValueError(
"For GPTQ quantization, the group_size must be specified"
"either through a `dtype_policy` of type "
"`GPTQDTypePolicy` or the `config` argument."
)
def get_weight_bits_for_layer(layer, config):
"""Determine the number of weight bits for GPTQ quantization.
The number of weight bits can be specified either through the `config`
argument or through the `dtype_policy` if it is of type
`GPTQDTypePolicy`.
The config argument is usually available when quantizing the layer
via the `quantize` method. If the layer was deserialized from a
saved model, the weight bits should be specified in the `dtype_policy`.
Args:
config: An optional configuration object that may contain the
`weight_bits` attribute.
Returns:
int. The determined number of weight bits for GPTQ quantization.
Raises:
ValueError: If the weight bits is not specified in either the
`config` or the `dtype_policy`.
"""
if config and isinstance(config, GPTQConfig):
return config.weight_bits
elif isinstance(layer.dtype_policy, GPTQDTypePolicy):
return layer.dtype_policy.weight_bits
elif isinstance(layer.dtype_policy, DTypePolicyMap):
policy = layer.dtype_policy[layer.path]
if not isinstance(policy, GPTQDTypePolicy):
# This should never happen based on how we set the
# quantization mode, but we check just in case.
raise ValueError(
"Expected a `dtype_policy` of type `GPTQDTypePolicy`."
f"Got: {type(policy)}"
)
return policy.weight_bits
else:
raise ValueError(
"For GPTQ quantization, the weight_bits must be specified"
"either through a `dtype_policy` of type "
"`GPTQDTypePolicy` or the `config` argument."
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/quantizers/utils_test.py | keras/src/quantizers/utils_test.py | from absl.testing import parameterized
from keras.src import layers
from keras.src import testing
from keras.src.quantizers import utils
class UtilsTest(testing.TestCase):
@parameterized.named_parameters(
("none_filter", None, "dense", True),
("regex_match", "dense", "dense_1", True),
("regex_no_match", "conv", "dense_1", False),
("list_match", ["dense", "conv"], "dense_1", True),
("list_no_match", ["conv", "pool"], "dense_1", False),
("callable_match", lambda l: "dense" in l.name, "dense_1", True),
("callable_no_match", lambda l: "conv" in l.name, "dense_1", False),
)
def test_should_quantize_layer(self, filters, layer_name, expected):
layer = layers.Layer(name=layer_name)
self.assertEqual(utils.should_quantize_layer(layer, filters), expected)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/quantizers/gptq_config_test.py | keras/src/quantizers/gptq_config_test.py | from keras.src import testing
from keras.src.quantizers.gptq_config import GPTQConfig
class TestGPTQConfig(testing.TestCase):
def test_invalid_weight_bits(self):
with self.assertRaisesRegex(ValueError, "Unsupported weight_bits"):
GPTQConfig(dataset=None, tokenizer=None, weight_bits=1)
with self.assertRaisesRegex(ValueError, "Unsupported weight_bits"):
GPTQConfig(dataset=None, tokenizer=None, weight_bits=5)
def test_invalid_num_samples(self):
with self.assertRaisesRegex(
ValueError, "num_samples must be a positive"
):
GPTQConfig(dataset=None, tokenizer=None, num_samples=0)
with self.assertRaisesRegex(
ValueError, "num_samples must be a positive"
):
GPTQConfig(dataset=None, tokenizer=None, num_samples=-1)
def test_invalid_sequence_length(self):
with self.assertRaisesRegex(
ValueError, "sequence_length must be a positive"
):
GPTQConfig(dataset=None, tokenizer=None, sequence_length=0)
with self.assertRaisesRegex(
ValueError, "sequence_length must be a positive"
):
GPTQConfig(dataset=None, tokenizer=None, sequence_length=-10)
def test_invalid_hessian_damping(self):
with self.assertRaisesRegex(
ValueError, "hessian_damping must be between"
):
GPTQConfig(dataset=None, tokenizer=None, hessian_damping=-0.1)
with self.assertRaisesRegex(
ValueError, "hessian_damping must be between"
):
GPTQConfig(dataset=None, tokenizer=None, hessian_damping=1.1)
def test_invalid_group_size(self):
with self.assertRaisesRegex(ValueError, "Invalid group_size"):
GPTQConfig(dataset=None, tokenizer=None, group_size=0)
with self.assertRaisesRegex(ValueError, "Invalid group_size"):
GPTQConfig(dataset=None, tokenizer=None, group_size=-2)
def test_dtype_policy_string(self):
config = GPTQConfig(
dataset=None, tokenizer=None, weight_bits=4, group_size=64
)
assert config.dtype_policy_string() == "gptq/4/64"
def test_gptq_config_serialization(self):
config = GPTQConfig(
dataset=None, tokenizer=None, weight_bits=4, group_size=64
)
serialized_config = config.get_config()
deserialized_config = GPTQConfig.from_config(serialized_config)
self.assertDictEqual(config.__dict__, deserialized_config.__dict__)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/quantizers/gptq_test.py | keras/src/quantizers/gptq_test.py | from collections.abc import Callable
import numpy as np
import pytest
from absl.testing import parameterized
import keras
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import ops
from keras.src import testing
from keras.src.quantizers.gptq import GPTQ
from keras.src.quantizers.gptq import _stable_permutation
from keras.src.quantizers.gptq import gptq_quantize_matrix
from keras.src.quantizers.gptq_config import GPTQConfig
from keras.src.quantizers.quantization_config import QuantizationConfig
from keras.src.quantizers.quantizers import dequantize_with_sz_map
from keras.src.quantizers.quantizers import dequantize_with_zero_point
from keras.src.quantizers.quantizers import quantize_with_zero_point
from keras.src.testing.test_utils import named_product
VOCAB_SIZE = 1000
SEQ_LEN = 128
NUM_SAMPLES = 16
W_BITS = 4
NUM_CLASSES = 32
CALIBRATION_TEXT = r"""
GPTQ (Generative Pre-trained Transformer Quantization) is an advanced
post-training quantization (PTQ) algorithm designed to compress large
language models with minimal accuracy degradation. It addresses the
challenge of reducing model size from high-precision formats like
FP16 to low-bit integers (e.g., INT4, INT3) without the need for
expensive retraining. The algorithm operates on a layer-by-layer basis,
treating the quantization of each weight matrix $W$ as a
reconstruction problem. Its objective is to find a quantized weight
matrix $\hat{W}$ that minimizes the mean squared error of the layer's
output, formulated as $\arg\min_{\hat{W}} \|WX - \hat{W}X\|_F^2$,
where $X$ is a set of calibration inputs. GPTQ's primary innovation
is its greedy, error-compensating quantization process, based on the
Optimal Brain Quantizer (OBQ) framework. It quantizes weights one by
one (or in small groups). After quantizing a single weight $w_q$ to
its discrete value $\hat{w}_q$, it introduces a quantization error of
$\delta = w_q - \hat{w}_q$. This error is then immediately compensated
for by updating all remaining, unquantized weights in the layer.
The update step is guided by second-order information, specifically
the inverse of the Hessian matrix ($\mathbf{H}^{-1}$) of the layer's
reconstruction loss. This inverse Hessian provides a measure of weight
saliency and inter-dependencies. The update applied to the remaining
weights is calculated based on $\delta$ and the corresponding entries
in $\mathbf{H}^{-1}$, effectively propagating the error to less
sensitive weights. This sequential compensation minimizes the
cumulative error across the entire layer, allowing GPTQ to maintain
high model fidelity, as measured by perplexity, even at aggressive
bit-rates.
"""
def _get_test_layer(layer_type, kernel_shape):
if layer_type == "Dense":
layer = layers.Dense(units=kernel_shape[1])
layer.build(input_shape=(None, kernel_shape[0]))
elif layer_type == "EinsumDense":
output_shape = (kernel_shape[1], kernel_shape[2])
layer = layers.EinsumDense(
equation="...h,hio->...io", output_shape=output_shape
)
layer.build(input_shape=(None, kernel_shape[0]))
else:
layer = layers.Layer()
return layer
@pytest.mark.requires_trainable_backend
class GPTQTest(testing.TestCase):
def test_initialization_with_dense_layer(self):
mock_layer = _get_test_layer("Dense", kernel_shape=(64, 128))
gptq_instance = GPTQ(mock_layer)
self.assertEqual(gptq_instance.rows, 64)
self.assertEqual(gptq_instance.columns, 128)
self.assertEqual(gptq_instance.hessian.shape, (64, 64))
def test_initialization_with_einsumdense_3d(self):
mock_layer = _get_test_layer("EinsumDense", kernel_shape=(64, 4, 32))
gptq_instance = GPTQ(mock_layer)
self.assertEqual(gptq_instance.rows, 64)
self.assertEqual(gptq_instance.columns, 4 * 32)
self.assertEqual(gptq_instance.hessian.shape, (64, 64))
def test_update_hessian(self):
dense = _get_test_layer("Dense", kernel_shape=(16, 32))
dense_gptq = GPTQ(dense)
rng = np.random.default_rng(seed=42)
batch1 = rng.standard_normal(size=(8, 16)).astype("float32")
dense_gptq.update_hessian_with_batch(batch1)
self.assertEqual(dense_gptq.num_samples, 8)
H1 = dense_gptq.hessian
batch2 = rng.standard_normal(size=(4, 16)).astype("float32")
dense_gptq.update_hessian_with_batch(batch2)
self.assertEqual(dense_gptq.num_samples, 12)
H2 = dense_gptq.hessian
self.assertNotAllClose(H1, H2)
def test_gptq_on_single_layer(self):
rng = np.random.default_rng(seed=42)
dense = _get_test_layer("Dense", kernel_shape=(16, 32))
config = GPTQConfig(
dataset=None,
tokenizer=None,
weight_bits=4,
symmetric=False,
group_size=-1,
)
dense.quantize("gptq", config=config)
dense_gptq = GPTQ(
dense,
config,
)
calibration_data = rng.standard_normal(size=(128, 16)).astype("float32")
dense_gptq.update_hessian_with_batch(calibration_data)
dense_gptq.quantize_and_correct_layer()
self.assertEqual(backend.standardize_dtype(dense.kernel.dtype), "uint8")
dense_gptq.free()
self.assertIsNone(getattr(dense_gptq, "hessian", None))
self.assertIsNone(getattr(dense_gptq, "layer", None))
def test_unsupported_layer_error(self):
unsupported_layer = _get_test_layer("Unsupported", kernel_shape=None)
with self.assertRaisesRegex(TypeError, "Unsupported layer type"):
GPTQ(unsupported_layer)
def test_update_hessian_invalid_input(self):
rng = np.random.default_rng(seed=42)
dense = _get_test_layer("Dense", kernel_shape=(16, 32))
gptq_instance = GPTQ(dense)
with self.assertRaisesRegex(ValueError, "cannot be None"):
gptq_instance.update_hessian_with_batch(None)
with self.assertRaisesRegex(ValueError, "cannot be empty"):
gptq_instance.update_hessian_with_batch(np.empty((0, 16)))
with self.assertRaisesRegex(ValueError, "match input features"):
bad_input = rng.standard_normal(size=(8, 99))
gptq_instance.update_hessian_with_batch(bad_input)
def test_streaming_equals_big_batch(self):
"""Tests that streaming updates match big batch updates."""
# dummy inputs
x = ops.array(np.random.randn(100, 7), "float32")
# One-shot hessian update
layer_1 = layers.Dense(5, use_bias=False)
layer_1.build(input_shape=(None, 7))
g1 = GPTQ(layer_1)
g1.update_hessian_with_batch(x)
# Streamed hessian update
layer_2 = layers.Dense(5, use_bias=False)
layer_2.build(input_shape=(None, 7))
g2 = GPTQ(layer_2)
g2.update_hessian_with_batch(x[:50])
g2.update_hessian_with_batch(x[50:])
# Both the one-shot and streamed hessian updates should match
self.assertAllClose(g1.hessian, g2.hessian, rtol=1e-6, atol=1e-6)
def test_hessian_matches_closed_form(self):
"""Tests that the Hessian matches the closed-form solution."""
x = ops.array(np.random.randn(128, 7), "float32")
layer = layers.Dense(5, use_bias=False)
layer.build((None, 7))
g = GPTQ(layer)
g.update_hessian_with_batch(x)
expected = ops.multiply(
ops.divide(2.0, x.shape[0]), ops.matmul(ops.transpose(x), x)
)
self.assertAllClose(g.hessian, expected, rtol=1e-6, atol=1e-6)
def test_higher_rank_inputs_are_reshaped(self):
"""Tests that higher-rank inputs are reshaped correctly."""
# x: [batch, time, feat]
x = ops.array(np.random.randn(10, 4, 7), "float32")
x_flat = ops.reshape(x, (-1, ops.shape(x)[-1]))
layer1 = layers.Dense(5, use_bias=False)
layer1.build((None, 7))
g1 = GPTQ(layer1)
g1.update_hessian_with_batch(x)
layer2 = layers.Dense(5, use_bias=False)
layer2.build((None, 7))
g2 = GPTQ(layer2)
g2.update_hessian_with_batch(x_flat)
self.assertAllClose(g1.hessian, g2.hessian, rtol=1e-6, atol=1e-6)
def test_raises_on_feature_mismatch(self):
x = ops.array(np.random.randn(8, 7), "float32")
layer = layers.Dense(5, use_bias=False)
layer.build((None, 6)) # wrong in_features
g = GPTQ(layer)
with self.assertRaisesRegex(ValueError, "do not match input features"):
g.update_hessian_with_batch(x)
with self.assertRaisesRegex(ValueError, "cannot be None"):
g.update_hessian_with_batch(None)
with self.assertRaisesRegex(ValueError, "cannot be empty"):
g.update_hessian_with_batch(
ops.array(np.empty((0, 7), dtype="float32"))
)
def test_num_samples_accumulates_correctly(self):
"""Tests that the number of samples is accumulated correctly when
streaming updates are used."""
x = ops.array(np.random.randn(64, 7), "float32")
layer = layers.Dense(5, use_bias=False)
layer.build((None, 7))
g = GPTQ(layer)
g.update_hessian_with_batch(x[:5])
g.update_hessian_with_batch(x[5:30])
g.update_hessian_with_batch(x[30:])
self.assertEqual(g.num_samples, 64)
def test_numeric_stability_large_values(self):
"""Tests numeric stability of hessian update with large input values."""
x = ops.multiply(ops.array(np.random.randn(32, 7), "float32"), 1e6)
layer = layers.Dense(5, use_bias=False)
layer.build((None, 7))
g = GPTQ(layer)
g.update_hessian_with_batch(x)
# Should be finite and symmetric
self.assertTrue(ops.all(ops.isfinite(g.hessian)))
self.assertTrue(ops.all(ops.equal(g.hessian, ops.transpose(g.hessian))))
def test_einsumdense_2d_kernel_hessian_shape(self):
x = layers.Input((7,))
y = layers.EinsumDense("ab,bc->ac", output_shape=(5,))(x)
model = keras.Model(x, y)
einsum_dense_layer = next(
l for l in model.layers if isinstance(l, layers.EinsumDense)
)
g = GPTQ(einsum_dense_layer)
# should infer rows==7
self.assertEqual(ops.shape(g.hessian), (7, 7))
def test_einsumdense_3d_kernel_streaming_equals_big_batch(self):
"""Tests that streaming updates to the Hessian are equivalent to a big
batch update."""
# Construct a tiny attention-like einsum with 3D kernel
x = layers.Input((7,))
qkv = layers.EinsumDense("bf,fhk->bhk", output_shape=(2, 3))(
x
) # heads=2, head_dim=3
model = keras.Model(x, qkv)
einsum_dense_layer = next(
l for l in model.layers if isinstance(l, layers.EinsumDense)
)
x = ops.array(np.random.randn(50, 7), "float32")
g1 = GPTQ(einsum_dense_layer)
g1.update_hessian_with_batch(x)
g2 = GPTQ(einsum_dense_layer)
g2.update_hessian_with_batch(x[:20])
g2.update_hessian_with_batch(x[20:])
self.assertAllClose(g1.hessian, g2.hessian, rtol=1e-6, atol=1e-6)
def test_identity_inv_hessian_matches_direct_quantization(self):
"""Tests that the matrix quantization without error correction
matches the direct implementation."""
in_features, out_features = 16, 8
weights = ops.reshape(
ops.linspace(
-0.9, 1.1, in_features * out_features, dtype="float32"
),
(in_features, out_features),
)
weights_transpose = ops.transpose(weights)
# inverse_hessian = identity; no cross-feature correction
# (since all off-diagonal elements are zero), which means
# there is no interaction between different features
inverse_hessian = ops.eye(in_features, dtype="float32")
quantized_weights, scale_map, zero_map, g_idx = gptq_quantize_matrix(
weights_transpose,
inverse_hessian,
blocksize=128,
group_size=1, # per-column quantization
activation_order=False,
compute_scale_zero=_compute_scale_zero,
)
dequantized_weights = dequantize_with_sz_map(
quantized_weights, scale_map, zero_map, g_idx
)
# Compare function output with columnwise direct application
# of quantization.
out = ops.zeros_like(weights_transpose)
for j in range(ops.shape(weights_transpose)[1]):
column = weights_transpose[:, j : j + 1]
scale, zero, maxq = _compute_scale_zero(column)
quantized_col = quantize_with_zero_point(column, scale, zero, maxq)
dequantized = dequantize_with_zero_point(quantized_col, scale, zero)
out = ops.slice_update(
out, (0, j), ops.expand_dims(dequantized[:, 0], 1)
)
self.assertAllClose(dequantized_weights, out, atol=1e-6)
def test_activation_order_produces_equivalent_weights(self):
"""
Tests that quantizing with `activation_order=True` yields the same
final weights as `activation_order=False`, because the internal
permutation should be undone.
"""
# Set up shared inputs and a non-trivial permutation.
in_features, out_features = 8, 6
initial_weights = ops.array(
np.random.randn(in_features, out_features), "float32"
)
# Generate a Hessian that creates a non-trivial permutation.
hessian_diag = ops.random.shuffle(
ops.linspace(10.0, 1.0, in_features, dtype="float32")
)
hessian_matrix = ops.diag(hessian_diag)
# Sanity check: ensure the permutation is not the identity.
perm = _stable_permutation(hessian_diag)
self.assertFalse(ops.all(ops.equal(perm, ops.arange(in_features))))
def create_and_quantize(use_activation_order):
layer = layers.Dense(out_features, use_bias=False)
layer.build((None, in_features))
layer.set_weights([ops.copy(initial_weights)])
config = GPTQConfig(
dataset=None,
tokenizer=None,
group_size=-1,
activation_order=use_activation_order,
)
layer.quantize("gptq", config=config)
quantizer = GPTQ(layer, config)
quantizer.hessian = hessian_matrix
quantizer.quantize_and_correct_layer()
return layer
# Quantize two layers, one with and one without activation ordering.
ordered_layer = create_and_quantize(use_activation_order=True)
unordered_layer = create_and_quantize(use_activation_order=False)
self.assertAllClose(
ordered_layer.get_weights()[0],
unordered_layer.get_weights()[0],
msg="Weights should be identical as the permutation is undone.",
)
def _compute_scale_zero(x, **_):
# Per-column asymmetric int4 example
# scale = (max-min)/maxq, zero = round(-min/scale)
maxq = 15.0
xmin = ops.min(x, axis=0, keepdims=True)
xmax = ops.max(x, axis=0, keepdims=True)
scale = ops.divide(ops.subtract(xmax, xmin), ops.add(maxq, 1e-8))
zero = ops.round(ops.divide(ops.negative(xmin), ops.add(scale, 1e-8)))
return scale, zero, maxq
def _get_sequence_classifier():
"""Transformer-based sequence classifier
tokens -> Embedding -> Transformer -> GAP -> Dense(num_classes).
"""
embed_dim = 32
num_heads = 4
ff_dim = 32
class SimpleTransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ff_dim, **kwargs):
super().__init__(**kwargs)
self.att = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim // num_heads
)
self.ffn = models.Sequential(
[
layers.Dense(ff_dim, activation="relu"),
layers.Dense(embed_dim),
]
)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
def call(self, inputs):
attention_output = self.att(inputs, inputs)
out1 = self.layernorm1(inputs + attention_output)
ffn_output = self.ffn(out1)
return self.layernorm2(out1 + ffn_output)
inputs = layers.Input(shape=(SEQ_LEN,), dtype="int32")
x = layers.Embedding(VOCAB_SIZE, embed_dim)(inputs)
x = SimpleTransformerBlock(embed_dim, num_heads, ff_dim)(x)
x = layers.GlobalAveragePooling1D()(x)
outputs = layers.Dense(NUM_CLASSES)(x)
return models.Model(inputs, outputs)
def _get_simple_model():
return models.Sequential([layers.Dense(10, input_shape=(5,))])
def _mean_kl(p, q):
# Add small epsilon for numerical stability
eps = 1e-8
p = ops.clip(p, eps, 1.0)
q = ops.clip(q, eps, 1.0)
# Compute KL divergence
# D_KL(P || Q) = sum(P * log(P / Q))
return ops.mean(
ops.sum(ops.multiply(p, ops.subtract(ops.log(p), ops.log(q))), axis=-1)
)
def _top1_match_rate(a_logits, b_logits):
"""Calculates the top-1 match rate between two sets of logits.
Formula: T = 1/N * sum(1{argmax(a_i) == argmax(b_i)})
"""
return ops.mean(
ops.equal(ops.argmax(a_logits, axis=-1), ops.argmax(b_logits, axis=-1))
)
DATASETS = {
"string_dataset": lambda: _string_dataset(
CALIBRATION_TEXT, NUM_SAMPLES, SEQ_LEN
),
"token_dataset": lambda: _token_dataset(NUM_SAMPLES, SEQ_LEN),
}
CONFIGS = {
"default": {},
"per_channel": {"group_size": -1, "per_channel": True},
"act_order": {"activation_order": True},
"symmetric": {"symmetric": True},
"group_wise": {"group_size": 8},
"group_wise_act_order": {"group_size": 8, "activation_order": True},
"symmetric_act_order": {"symmetric": True, "activation_order": True},
"symmetric_per_channel": {"symmetric": True, "per_channel": True},
"group_wise_symmetric_8bit": {
"group_size": 8,
"symmetric": True,
"weight_bits": 8,
},
}
def _pad_or_trim_1d(ids, length):
"""Pads or trims a 1D array to a specified length."""
ids = ops.ravel(ops.array(ids, "int64"))
if len(ids) < length:
ids = ops.concatenate(
[ids, ops.zeros(length - len(ids), dtype=ids.dtype)]
)
else:
ids = ids[:length]
return ids
def _char_tokenizer(vocab_size=VOCAB_SIZE, seq_len=SEQ_LEN):
"""Tokenizes strings to char-IDs or passes through int arrays;
outputs shape (1, seq_len)."""
def _tok(x):
if isinstance(x, str):
ids = ops.convert_to_tensor(
np.fromiter((ord(c) % vocab_size for c in x), dtype=np.int64)
)
else:
ids = np.asarray(x, dtype=np.int64)
ids = _pad_or_trim_1d(ids, seq_len)
return ids[None, :]
_tok.tokenize = _tok
return _tok
def _string_dataset(
long_text, num_samples=NUM_SAMPLES, sequence_length=SEQ_LEN
):
"""Yields string slices"""
rng = np.random.default_rng(seed=0)
L = max(1, len(long_text) - sequence_length)
for _ in range(num_samples):
start = rng.integers(0, L) if L > 1 else 0
yield long_text[start : start + sequence_length]
def _token_dataset(
num_samples=NUM_SAMPLES, sequence_length=SEQ_LEN, vocab_size=VOCAB_SIZE
):
"""Yields tokenized samples."""
rng = np.random.default_rng(seed=0)
for _ in range(num_samples):
yield rng.integers(
low=0, high=vocab_size, size=(1, sequence_length), dtype=np.int64
)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(
backend.backend() == "torch",
reason="torch gives low accuracy on CI, but works well locally",
)
class TestModelQuantization(testing.TestCase):
@parameterized.named_parameters(
named_product(
[
{"testcase_name": dataset_id, "dataset": dataset}
for dataset_id, dataset in DATASETS.items()
],
[
{"testcase_name": config_id, "config": config}
for config_id, config in CONFIGS.items()
],
)
)
def test_quantize_gptq_combinations(self, dataset, config):
"""Tests GPTQ quantization on a tiny transformer classifier.
Validates classification performance of the quantized model
with respect to the full-precision baseline.
"""
rng = np.random.default_rng(seed=321)
keras.utils.set_random_seed(123)
# Build the calibration set.
calibration_set = list(
dataset() if isinstance(dataset, Callable) else dataset
)
self.assertNotEmpty(calibration_set)
# Build classifier and tokenizer
model = _get_sequence_classifier()
tokenizer = _char_tokenizer(vocab_size=VOCAB_SIZE, seq_len=SEQ_LEN)
# Build an eval batch drawn from the SAME distribution as calibration
batch_size = min(8, len(calibration_set))
eval_samples = [
calibration_set[rng.integers(0, len(calibration_set))]
for _ in range(batch_size)
]
x_eval = ops.concatenate([tokenizer(s) for s in eval_samples], axis=0)
# Baseline logits
y_ref = model.predict(x_eval)
embedding_layer = model.layers[1]
transformer_block = model.layers[2]
layer_structure = {
"pre_block_layers": [embedding_layer],
"sequential_blocks": [transformer_block],
}
base_cfg = dict(
dataset=calibration_set,
tokenizer=tokenizer,
weight_bits=W_BITS,
num_samples=NUM_SAMPLES,
sequence_length=SEQ_LEN,
group_size=32,
symmetric=False,
activation_order=False,
quantization_layer_structure=layer_structure,
)
gptq_cfg = GPTQConfig(**{**base_cfg, **config})
# Quantize
model.quantize("gptq", config=gptq_cfg)
# Post-quant logits
y_q = model.predict(x_eval)
top1_match = _top1_match_rate(y_ref, y_q)
p_ref, p_q = ops.softmax(y_ref), ops.softmax(y_q)
kl = _mean_kl(p_ref, p_q)
self.assertGreaterEqual(
top1_match, 0.5, f"Top-1 agreement too low: {top1_match:.3f}"
)
self.assertLessEqual(kl, 0.30, f"KL divergence too high: {kl:.3f}")
@parameterized.named_parameters(
{
"testcase_name": "gptq_with_invalid_config_type",
"mode": "gptq",
"config": {"weight_bits": 4},
"expected_exception": ValueError,
"error_msg": "Argument `config` must be an instance of "
"`QuantizationConfig`",
},
{
"testcase_name": "gptq_with_none_config",
"mode": "gptq",
"config": None,
"expected_exception": ValueError,
"error_msg": "For GPTQ, you must pass a `GPTQConfig` object "
"in the `config` argument.",
},
{
"testcase_name": "gptq_with_base_quantization_config",
"mode": "gptq",
"config": QuantizationConfig(),
"expected_exception": NotImplementedError,
"error_msg": "Do not instantiate QuantizationConfig directly.",
},
{
"testcase_name": "gptq_missing_structure",
"mode": "gptq",
"config": GPTQConfig(dataset=["a"], tokenizer=lambda x: x),
"expected_exception": ValueError,
"error_msg": "For 'gptq' mode, a valid quantization structure",
},
)
def test_quantize_scenarios(
self, mode, config, expected_exception, error_msg
):
model = _get_simple_model()
with self.assertRaisesRegex(expected_exception, error_msg):
model.quantize(mode, config=config)
def test_gptq_filtering(self):
"""Tests that filters argument works for GPTQ."""
model = _get_sequence_classifier()
tokenizer = _char_tokenizer(vocab_size=VOCAB_SIZE, seq_len=SEQ_LEN)
# Structure
embedding_layer = model.layers[1]
transformer_block = model.layers[2]
layer_structure = {
"pre_block_layers": [embedding_layer],
"sequential_blocks": [transformer_block],
}
config = GPTQConfig(
dataset=[np.zeros((1, SEQ_LEN), dtype="int32")],
tokenizer=tokenizer,
quantization_layer_structure=layer_structure,
weight_bits=4,
group_size=32,
)
target_layer = transformer_block.ffn.layers[0]
def filter_fn(layer):
return layer.name != target_layer.name
model.quantize("gptq", config=config, filters=filter_fn)
# Check that target_layer is NOT quantized.
self.assertIsNone(getattr(target_layer, "quantization_mode", None))
self.assertFalse(hasattr(target_layer, "quantized_kernel"))
# Check that other dense layers ARE quantized.
other_dense = transformer_block.ffn.layers[1]
self.assertEqual(
getattr(other_dense, "quantization_mode", None), "gptq"
)
self.assertTrue(hasattr(other_dense, "quantized_kernel"))
def test_gptq_multi_filtering(self):
"""Tests that list of regex filters works for GPTQ."""
model = _get_sequence_classifier()
tokenizer = _char_tokenizer(vocab_size=VOCAB_SIZE, seq_len=SEQ_LEN)
embedding_layer = model.layers[1]
transformer_block = model.layers[2]
layer_structure = {
"pre_block_layers": [embedding_layer],
"sequential_blocks": [transformer_block],
}
config = GPTQConfig(
dataset=[np.zeros((1, SEQ_LEN), dtype="int32")],
tokenizer=tokenizer,
quantization_layer_structure=layer_structure,
weight_bits=4,
group_size=32,
)
layer0 = transformer_block.ffn.layers[0]
layer1 = transformer_block.ffn.layers[1]
# We want to quantize only layer0.
filters = [f"^{layer0.name}$"]
model.quantize("gptq", config=config, filters=filters)
# Check that layer0 is quantized.
self.assertEqual(getattr(layer0, "quantization_mode", None), "gptq")
self.assertTrue(hasattr(layer0, "quantized_kernel"))
# Check that layer1 is not quantized.
self.assertIsNone(getattr(layer1, "quantization_mode", None))
self.assertFalse(hasattr(layer1, "quantized_kernel"))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/__init__.py | keras/api/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras import _tf_keras as _tf_keras
from keras import activations as activations
from keras import applications as applications
from keras import backend as backend
from keras import callbacks as callbacks
from keras import config as config
from keras import constraints as constraints
from keras import datasets as datasets
from keras import distillation as distillation
from keras import distribution as distribution
from keras import dtype_policies as dtype_policies
from keras import export as export
from keras import initializers as initializers
from keras import layers as layers
from keras import legacy as legacy
from keras import losses as losses
from keras import metrics as metrics
from keras import mixed_precision as mixed_precision
from keras import models as models
from keras import ops as ops
from keras import optimizers as optimizers
from keras import preprocessing as preprocessing
from keras import quantizers as quantizers
from keras import random as random
from keras import regularizers as regularizers
from keras import saving as saving
from keras import tree as tree
from keras import utils as utils
from keras import visualization as visualization
from keras import wrappers as wrappers
from keras.src.backend import Variable as Variable
from keras.src.backend import device as device
from keras.src.backend import name_scope as name_scope
from keras.src.backend.common.keras_tensor import KerasTensor as KerasTensor
from keras.src.backend.common.remat import RematScope as RematScope
from keras.src.backend.common.remat import remat as remat
from keras.src.backend.common.stateless_scope import (
StatelessScope as StatelessScope,
)
from keras.src.backend.common.symbolic_scope import (
SymbolicScope as SymbolicScope,
)
from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy
from keras.src.dtype_policies.dtype_policy import (
FloatDTypePolicy as FloatDTypePolicy,
)
from keras.src.initializers.initializer import Initializer as Initializer
from keras.src.layers.core.input_layer import Input as Input
from keras.src.layers.input_spec import InputSpec as InputSpec
from keras.src.layers.layer import Layer as Layer
from keras.src.losses.loss import Loss as Loss
from keras.src.metrics.metric import Metric as Metric
from keras.src.models.model import Model as Model
from keras.src.models.sequential import Sequential as Sequential
from keras.src.ops.function import Function as Function
from keras.src.ops.operation import Operation as Operation
from keras.src.optimizers.optimizer import Optimizer as Optimizer
from keras.src.quantizers.quantizers import Quantizer as Quantizer
from keras.src.regularizers.regularizers import Regularizer as Regularizer
from keras.src.version import __version__ as __version__
from keras.src.version import version as version
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/initializers/__init__.py | keras/api/initializers/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.initializers import deserialize as deserialize
from keras.src.initializers import get as get
from keras.src.initializers import serialize as serialize
from keras.src.initializers.constant_initializers import STFT as STFT
from keras.src.initializers.constant_initializers import STFT as STFTInitializer
from keras.src.initializers.constant_initializers import STFT as stft
from keras.src.initializers.constant_initializers import Constant as Constant
from keras.src.initializers.constant_initializers import Constant as constant
from keras.src.initializers.constant_initializers import Identity as Identity
from keras.src.initializers.constant_initializers import (
Identity as IdentityInitializer,
)
from keras.src.initializers.constant_initializers import Identity as identity
from keras.src.initializers.constant_initializers import Ones as Ones
from keras.src.initializers.constant_initializers import Ones as ones
from keras.src.initializers.constant_initializers import Zeros as Zeros
from keras.src.initializers.constant_initializers import Zeros as zeros
from keras.src.initializers.initializer import Initializer as Initializer
from keras.src.initializers.random_initializers import (
GlorotNormal as GlorotNormal,
)
from keras.src.initializers.random_initializers import (
GlorotNormal as glorot_normal,
)
from keras.src.initializers.random_initializers import (
GlorotUniform as GlorotUniform,
)
from keras.src.initializers.random_initializers import (
GlorotUniform as glorot_uniform,
)
from keras.src.initializers.random_initializers import HeNormal as HeNormal
from keras.src.initializers.random_initializers import HeNormal as he_normal
from keras.src.initializers.random_initializers import HeUniform as HeUniform
from keras.src.initializers.random_initializers import HeUniform as he_uniform
from keras.src.initializers.random_initializers import (
LecunNormal as LecunNormal,
)
from keras.src.initializers.random_initializers import (
LecunNormal as lecun_normal,
)
from keras.src.initializers.random_initializers import (
LecunUniform as LecunUniform,
)
from keras.src.initializers.random_initializers import (
LecunUniform as lecun_uniform,
)
from keras.src.initializers.random_initializers import Orthogonal as Orthogonal
from keras.src.initializers.random_initializers import (
Orthogonal as OrthogonalInitializer,
)
from keras.src.initializers.random_initializers import Orthogonal as orthogonal
from keras.src.initializers.random_initializers import (
RandomNormal as RandomNormal,
)
from keras.src.initializers.random_initializers import (
RandomNormal as random_normal,
)
from keras.src.initializers.random_initializers import (
RandomUniform as RandomUniform,
)
from keras.src.initializers.random_initializers import (
RandomUniform as random_uniform,
)
from keras.src.initializers.random_initializers import (
TruncatedNormal as TruncatedNormal,
)
from keras.src.initializers.random_initializers import (
TruncatedNormal as truncated_normal,
)
from keras.src.initializers.random_initializers import (
VarianceScaling as VarianceScaling,
)
from keras.src.initializers.random_initializers import (
VarianceScaling as variance_scaling,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/distillation/__init__.py | keras/api/distillation/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.distillation.distillation_loss import (
DistillationLoss as DistillationLoss,
)
from keras.src.distillation.distillation_loss import (
FeatureDistillation as FeatureDistillation,
)
from keras.src.distillation.distillation_loss import (
LogitsDistillation as LogitsDistillation,
)
from keras.src.distillation.distiller import Distiller as Distiller
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/regularizers/__init__.py | keras/api/regularizers/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.regularizers import deserialize as deserialize
from keras.src.regularizers import get as get
from keras.src.regularizers import serialize as serialize
from keras.src.regularizers.regularizers import L1 as L1
from keras.src.regularizers.regularizers import L1 as l1
from keras.src.regularizers.regularizers import L1L2 as L1L2
from keras.src.regularizers.regularizers import L1L2 as l1_l2
from keras.src.regularizers.regularizers import L2 as L2
from keras.src.regularizers.regularizers import L2 as l2
from keras.src.regularizers.regularizers import (
OrthogonalRegularizer as OrthogonalRegularizer,
)
from keras.src.regularizers.regularizers import (
OrthogonalRegularizer as orthogonal_regularizer,
)
from keras.src.regularizers.regularizers import Regularizer as Regularizer
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/saving/__init__.py | keras/api/saving/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.saving.file_editor import KerasFileEditor as KerasFileEditor
from keras.src.saving.object_registration import (
CustomObjectScope as CustomObjectScope,
)
from keras.src.saving.object_registration import (
CustomObjectScope as custom_object_scope,
)
from keras.src.saving.object_registration import (
get_custom_objects as get_custom_objects,
)
from keras.src.saving.object_registration import (
get_registered_name as get_registered_name,
)
from keras.src.saving.object_registration import (
get_registered_object as get_registered_object,
)
from keras.src.saving.object_registration import (
register_keras_serializable as register_keras_serializable,
)
from keras.src.saving.saving_api import load_model as load_model
from keras.src.saving.saving_api import load_weights as load_weights
from keras.src.saving.saving_api import save_model as save_model
from keras.src.saving.saving_api import save_weights as save_weights
from keras.src.saving.serialization_lib import (
deserialize_keras_object as deserialize_keras_object,
)
from keras.src.saving.serialization_lib import (
serialize_keras_object as serialize_keras_object,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/optimizers/__init__.py | keras/api/optimizers/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.optimizers import legacy as legacy
from keras.optimizers import schedules as schedules
from keras.src.optimizers import deserialize as deserialize
from keras.src.optimizers import get as get
from keras.src.optimizers import serialize as serialize
from keras.src.optimizers.adadelta import Adadelta as Adadelta
from keras.src.optimizers.adafactor import Adafactor as Adafactor
from keras.src.optimizers.adagrad import Adagrad as Adagrad
from keras.src.optimizers.adam import Adam as Adam
from keras.src.optimizers.adamax import Adamax as Adamax
from keras.src.optimizers.adamw import AdamW as AdamW
from keras.src.optimizers.ftrl import Ftrl as Ftrl
from keras.src.optimizers.lamb import Lamb as Lamb
from keras.src.optimizers.lion import Lion as Lion
from keras.src.optimizers.loss_scale_optimizer import (
LossScaleOptimizer as LossScaleOptimizer,
)
from keras.src.optimizers.muon import Muon as Muon
from keras.src.optimizers.nadam import Nadam as Nadam
from keras.src.optimizers.optimizer import Optimizer as Optimizer
from keras.src.optimizers.rmsprop import RMSprop as RMSprop
from keras.src.optimizers.sgd import SGD as SGD
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/optimizers/schedules/__init__.py | keras/api/optimizers/schedules/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.optimizers.schedules.learning_rate_schedule import (
CosineDecay as CosineDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
CosineDecayRestarts as CosineDecayRestarts,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
ExponentialDecay as ExponentialDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
InverseTimeDecay as InverseTimeDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
LearningRateSchedule as LearningRateSchedule,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
PiecewiseConstantDecay as PiecewiseConstantDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
PolynomialDecay as PolynomialDecay,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
deserialize as deserialize,
)
from keras.src.optimizers.schedules.learning_rate_schedule import (
serialize as serialize,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/optimizers/legacy/__init__.py | keras/api/optimizers/legacy/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.optimizers import LegacyOptimizerWarning as Adagrad
from keras.src.optimizers import LegacyOptimizerWarning as Adam
from keras.src.optimizers import LegacyOptimizerWarning as Ftrl
from keras.src.optimizers import LegacyOptimizerWarning as Optimizer
from keras.src.optimizers import LegacyOptimizerWarning as RMSprop
from keras.src.optimizers import LegacyOptimizerWarning as SGD
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/preprocessing/__init__.py | keras/api/preprocessing/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.preprocessing import image as image
from keras.preprocessing import sequence as sequence
from keras.src.utils.image_dataset_utils import (
image_dataset_from_directory as image_dataset_from_directory,
)
from keras.src.utils.text_dataset_utils import (
text_dataset_from_directory as text_dataset_from_directory,
)
from keras.src.utils.timeseries_dataset_utils import (
timeseries_dataset_from_array as timeseries_dataset_from_array,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/preprocessing/image/__init__.py | keras/api/preprocessing/image/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.utils.image_utils import array_to_img as array_to_img
from keras.src.utils.image_utils import img_to_array as img_to_array
from keras.src.utils.image_utils import load_img as load_img
from keras.src.utils.image_utils import save_img as save_img
from keras.src.utils.image_utils import smart_resize as smart_resize
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/preprocessing/sequence/__init__.py | keras/api/preprocessing/sequence/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.utils.sequence_utils import pad_sequences as pad_sequences
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/dtype_policies/__init__.py | keras/api/dtype_policies/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.dtype_policies import deserialize as deserialize
from keras.src.dtype_policies import get as get
from keras.src.dtype_policies import serialize as serialize
from keras.src.dtype_policies.dtype_policy import DTypePolicy as DTypePolicy
from keras.src.dtype_policies.dtype_policy import (
FloatDTypePolicy as FloatDTypePolicy,
)
from keras.src.dtype_policies.dtype_policy import (
GPTQDTypePolicy as GPTQDTypePolicy,
)
from keras.src.dtype_policies.dtype_policy import (
QuantizedDTypePolicy as QuantizedDTypePolicy,
)
from keras.src.dtype_policies.dtype_policy import (
QuantizedFloat8DTypePolicy as QuantizedFloat8DTypePolicy,
)
from keras.src.dtype_policies.dtype_policy_map import (
DTypePolicyMap as DTypePolicyMap,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/losses/__init__.py | keras/api/losses/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.losses import deserialize as deserialize
from keras.src.losses import get as get
from keras.src.losses import serialize as serialize
from keras.src.losses.loss import Loss as Loss
from keras.src.losses.losses import CTC as CTC
from keras.src.losses.losses import BinaryCrossentropy as BinaryCrossentropy
from keras.src.losses.losses import (
BinaryFocalCrossentropy as BinaryFocalCrossentropy,
)
from keras.src.losses.losses import (
CategoricalCrossentropy as CategoricalCrossentropy,
)
from keras.src.losses.losses import (
CategoricalFocalCrossentropy as CategoricalFocalCrossentropy,
)
from keras.src.losses.losses import (
CategoricalGeneralizedCrossEntropy as CategoricalGeneralizedCrossEntropy,
)
from keras.src.losses.losses import CategoricalHinge as CategoricalHinge
from keras.src.losses.losses import Circle as Circle
from keras.src.losses.losses import CosineSimilarity as CosineSimilarity
from keras.src.losses.losses import Dice as Dice
from keras.src.losses.losses import Hinge as Hinge
from keras.src.losses.losses import Huber as Huber
from keras.src.losses.losses import KLDivergence as KLDivergence
from keras.src.losses.losses import LogCosh as LogCosh
from keras.src.losses.losses import MeanAbsoluteError as MeanAbsoluteError
from keras.src.losses.losses import (
MeanAbsolutePercentageError as MeanAbsolutePercentageError,
)
from keras.src.losses.losses import MeanSquaredError as MeanSquaredError
from keras.src.losses.losses import (
MeanSquaredLogarithmicError as MeanSquaredLogarithmicError,
)
from keras.src.losses.losses import Poisson as Poisson
from keras.src.losses.losses import (
SparseCategoricalCrossentropy as SparseCategoricalCrossentropy,
)
from keras.src.losses.losses import SquaredHinge as SquaredHinge
from keras.src.losses.losses import Tversky as Tversky
from keras.src.losses.losses import binary_crossentropy as binary_crossentropy
from keras.src.losses.losses import (
binary_focal_crossentropy as binary_focal_crossentropy,
)
from keras.src.losses.losses import (
categorical_crossentropy as categorical_crossentropy,
)
from keras.src.losses.losses import (
categorical_focal_crossentropy as categorical_focal_crossentropy,
)
from keras.src.losses.losses import (
categorical_generalized_cross_entropy as categorical_generalized_cross_entropy,
)
from keras.src.losses.losses import categorical_hinge as categorical_hinge
from keras.src.losses.losses import circle as circle
from keras.src.losses.losses import cosine_similarity as cosine_similarity
from keras.src.losses.losses import ctc as ctc
from keras.src.losses.losses import dice as dice
from keras.src.losses.losses import hinge as hinge
from keras.src.losses.losses import huber as huber
from keras.src.losses.losses import kl_divergence as kl_divergence
from keras.src.losses.losses import log_cosh as log_cosh
from keras.src.losses.losses import mean_absolute_error as mean_absolute_error
from keras.src.losses.losses import (
mean_absolute_percentage_error as mean_absolute_percentage_error,
)
from keras.src.losses.losses import mean_squared_error as mean_squared_error
from keras.src.losses.losses import (
mean_squared_logarithmic_error as mean_squared_logarithmic_error,
)
from keras.src.losses.losses import poisson as poisson
from keras.src.losses.losses import (
sparse_categorical_crossentropy as sparse_categorical_crossentropy,
)
from keras.src.losses.losses import squared_hinge as squared_hinge
from keras.src.losses.losses import tversky as tversky
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/visualization/__init__.py | keras/api/visualization/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.visualization.draw_bounding_boxes import (
draw_bounding_boxes as draw_bounding_boxes,
)
from keras.src.visualization.draw_segmentation_masks import (
draw_segmentation_masks as draw_segmentation_masks,
)
from keras.src.visualization.plot_bounding_box_gallery import (
plot_bounding_box_gallery as plot_bounding_box_gallery,
)
from keras.src.visualization.plot_image_gallery import (
plot_image_gallery as plot_image_gallery,
)
from keras.src.visualization.plot_segmentation_mask_gallery import (
plot_segmentation_mask_gallery as plot_segmentation_mask_gallery,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/datasets/__init__.py | keras/api/datasets/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.datasets import boston_housing as boston_housing
from keras.datasets import california_housing as california_housing
from keras.datasets import cifar10 as cifar10
from keras.datasets import cifar100 as cifar100
from keras.datasets import fashion_mnist as fashion_mnist
from keras.datasets import imdb as imdb
from keras.datasets import mnist as mnist
from keras.datasets import reuters as reuters
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/datasets/reuters/__init__.py | keras/api/datasets/reuters/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.reuters import get_label_names as get_label_names
from keras.src.datasets.reuters import get_word_index as get_word_index
from keras.src.datasets.reuters import load_data as load_data
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/datasets/cifar10/__init__.py | keras/api/datasets/cifar10/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.cifar10 import load_data as load_data
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/datasets/fashion_mnist/__init__.py | keras/api/datasets/fashion_mnist/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.fashion_mnist import load_data as load_data
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/datasets/cifar100/__init__.py | keras/api/datasets/cifar100/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.cifar100 import load_data as load_data
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/datasets/boston_housing/__init__.py | keras/api/datasets/boston_housing/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.boston_housing import load_data as load_data
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/datasets/california_housing/__init__.py | keras/api/datasets/california_housing/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.california_housing import load_data as load_data
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/datasets/imdb/__init__.py | keras/api/datasets/imdb/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.imdb import get_word_index as get_word_index
from keras.src.datasets.imdb import load_data as load_data
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/datasets/mnist/__init__.py | keras/api/datasets/mnist/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.datasets.mnist import load_data as load_data
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/metrics/__init__.py | keras/api/metrics/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.losses.losses import binary_crossentropy as binary_crossentropy
from keras.src.losses.losses import (
binary_focal_crossentropy as binary_focal_crossentropy,
)
from keras.src.losses.losses import (
categorical_crossentropy as categorical_crossentropy,
)
from keras.src.losses.losses import (
categorical_focal_crossentropy as categorical_focal_crossentropy,
)
from keras.src.losses.losses import categorical_hinge as categorical_hinge
from keras.src.losses.losses import hinge as hinge
from keras.src.losses.losses import huber as huber
from keras.src.losses.losses import kl_divergence as kl_divergence
from keras.src.losses.losses import log_cosh as log_cosh
from keras.src.losses.losses import mean_absolute_error as mean_absolute_error
from keras.src.losses.losses import (
mean_absolute_percentage_error as mean_absolute_percentage_error,
)
from keras.src.losses.losses import mean_squared_error as mean_squared_error
from keras.src.losses.losses import (
mean_squared_logarithmic_error as mean_squared_logarithmic_error,
)
from keras.src.losses.losses import poisson as poisson
from keras.src.losses.losses import (
sparse_categorical_crossentropy as sparse_categorical_crossentropy,
)
from keras.src.losses.losses import squared_hinge as squared_hinge
from keras.src.metrics import deserialize as deserialize
from keras.src.metrics import get as get
from keras.src.metrics import serialize as serialize
from keras.src.metrics.accuracy_metrics import Accuracy as Accuracy
from keras.src.metrics.accuracy_metrics import BinaryAccuracy as BinaryAccuracy
from keras.src.metrics.accuracy_metrics import (
CategoricalAccuracy as CategoricalAccuracy,
)
from keras.src.metrics.accuracy_metrics import (
SparseCategoricalAccuracy as SparseCategoricalAccuracy,
)
from keras.src.metrics.accuracy_metrics import (
SparseTopKCategoricalAccuracy as SparseTopKCategoricalAccuracy,
)
from keras.src.metrics.accuracy_metrics import (
TopKCategoricalAccuracy as TopKCategoricalAccuracy,
)
from keras.src.metrics.accuracy_metrics import (
binary_accuracy as binary_accuracy,
)
from keras.src.metrics.accuracy_metrics import (
categorical_accuracy as categorical_accuracy,
)
from keras.src.metrics.accuracy_metrics import (
sparse_categorical_accuracy as sparse_categorical_accuracy,
)
from keras.src.metrics.accuracy_metrics import (
sparse_top_k_categorical_accuracy as sparse_top_k_categorical_accuracy,
)
from keras.src.metrics.accuracy_metrics import (
top_k_categorical_accuracy as top_k_categorical_accuracy,
)
from keras.src.metrics.confusion_metrics import AUC as AUC
from keras.src.metrics.confusion_metrics import FalseNegatives as FalseNegatives
from keras.src.metrics.confusion_metrics import FalsePositives as FalsePositives
from keras.src.metrics.confusion_metrics import Precision as Precision
from keras.src.metrics.confusion_metrics import (
PrecisionAtRecall as PrecisionAtRecall,
)
from keras.src.metrics.confusion_metrics import Recall as Recall
from keras.src.metrics.confusion_metrics import (
RecallAtPrecision as RecallAtPrecision,
)
from keras.src.metrics.confusion_metrics import (
SensitivityAtSpecificity as SensitivityAtSpecificity,
)
from keras.src.metrics.confusion_metrics import (
SpecificityAtSensitivity as SpecificityAtSensitivity,
)
from keras.src.metrics.confusion_metrics import TrueNegatives as TrueNegatives
from keras.src.metrics.confusion_metrics import TruePositives as TruePositives
from keras.src.metrics.correlation_metrics import (
ConcordanceCorrelation as ConcordanceCorrelation,
)
from keras.src.metrics.correlation_metrics import (
PearsonCorrelation as PearsonCorrelation,
)
from keras.src.metrics.correlation_metrics import (
concordance_correlation as concordance_correlation,
)
from keras.src.metrics.correlation_metrics import (
pearson_correlation as pearson_correlation,
)
from keras.src.metrics.f_score_metrics import F1Score as F1Score
from keras.src.metrics.f_score_metrics import FBetaScore as FBetaScore
from keras.src.metrics.hinge_metrics import CategoricalHinge as CategoricalHinge
from keras.src.metrics.hinge_metrics import Hinge as Hinge
from keras.src.metrics.hinge_metrics import SquaredHinge as SquaredHinge
from keras.src.metrics.iou_metrics import BinaryIoU as BinaryIoU
from keras.src.metrics.iou_metrics import IoU as IoU
from keras.src.metrics.iou_metrics import MeanIoU as MeanIoU
from keras.src.metrics.iou_metrics import OneHotIoU as OneHotIoU
from keras.src.metrics.iou_metrics import OneHotMeanIoU as OneHotMeanIoU
from keras.src.metrics.metric import Metric as Metric
from keras.src.metrics.probabilistic_metrics import (
BinaryCrossentropy as BinaryCrossentropy,
)
from keras.src.metrics.probabilistic_metrics import (
CategoricalCrossentropy as CategoricalCrossentropy,
)
from keras.src.metrics.probabilistic_metrics import KLDivergence as KLDivergence
from keras.src.metrics.probabilistic_metrics import Poisson as Poisson
from keras.src.metrics.probabilistic_metrics import (
SparseCategoricalCrossentropy as SparseCategoricalCrossentropy,
)
from keras.src.metrics.reduction_metrics import Mean as Mean
from keras.src.metrics.reduction_metrics import (
MeanMetricWrapper as MeanMetricWrapper,
)
from keras.src.metrics.reduction_metrics import Sum as Sum
from keras.src.metrics.regression_metrics import (
CosineSimilarity as CosineSimilarity,
)
from keras.src.metrics.regression_metrics import LogCoshError as LogCoshError
from keras.src.metrics.regression_metrics import (
MeanAbsoluteError as MeanAbsoluteError,
)
from keras.src.metrics.regression_metrics import (
MeanAbsolutePercentageError as MeanAbsolutePercentageError,
)
from keras.src.metrics.regression_metrics import (
MeanSquaredError as MeanSquaredError,
)
from keras.src.metrics.regression_metrics import (
MeanSquaredLogarithmicError as MeanSquaredLogarithmicError,
)
from keras.src.metrics.regression_metrics import R2Score as R2Score
from keras.src.metrics.regression_metrics import (
RootMeanSquaredError as RootMeanSquaredError,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/models/__init__.py | keras/api/models/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.models.cloning import clone_model as clone_model
from keras.src.models.model import Model as Model
from keras.src.models.model import model_from_json as model_from_json
from keras.src.models.sequential import Sequential as Sequential
from keras.src.saving.saving_api import load_model as load_model
from keras.src.saving.saving_api import save_model as save_model
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/__init__.py | keras/api/applications/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.applications import convnext as convnext
from keras.applications import densenet as densenet
from keras.applications import efficientnet as efficientnet
from keras.applications import efficientnet_v2 as efficientnet_v2
from keras.applications import imagenet_utils as imagenet_utils
from keras.applications import inception_resnet_v2 as inception_resnet_v2
from keras.applications import inception_v3 as inception_v3
from keras.applications import mobilenet as mobilenet
from keras.applications import mobilenet_v2 as mobilenet_v2
from keras.applications import mobilenet_v3 as mobilenet_v3
from keras.applications import nasnet as nasnet
from keras.applications import resnet as resnet
from keras.applications import resnet50 as resnet50
from keras.applications import resnet_v2 as resnet_v2
from keras.applications import vgg16 as vgg16
from keras.applications import vgg19 as vgg19
from keras.applications import xception as xception
from keras.src.applications.convnext import ConvNeXtBase as ConvNeXtBase
from keras.src.applications.convnext import ConvNeXtLarge as ConvNeXtLarge
from keras.src.applications.convnext import ConvNeXtSmall as ConvNeXtSmall
from keras.src.applications.convnext import ConvNeXtTiny as ConvNeXtTiny
from keras.src.applications.convnext import ConvNeXtXLarge as ConvNeXtXLarge
from keras.src.applications.densenet import DenseNet121 as DenseNet121
from keras.src.applications.densenet import DenseNet169 as DenseNet169
from keras.src.applications.densenet import DenseNet201 as DenseNet201
from keras.src.applications.efficientnet import EfficientNetB0 as EfficientNetB0
from keras.src.applications.efficientnet import EfficientNetB1 as EfficientNetB1
from keras.src.applications.efficientnet import EfficientNetB2 as EfficientNetB2
from keras.src.applications.efficientnet import EfficientNetB3 as EfficientNetB3
from keras.src.applications.efficientnet import EfficientNetB4 as EfficientNetB4
from keras.src.applications.efficientnet import EfficientNetB5 as EfficientNetB5
from keras.src.applications.efficientnet import EfficientNetB6 as EfficientNetB6
from keras.src.applications.efficientnet import EfficientNetB7 as EfficientNetB7
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B0 as EfficientNetV2B0,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B1 as EfficientNetV2B1,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B2 as EfficientNetV2B2,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B3 as EfficientNetV2B3,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2L as EfficientNetV2L,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2M as EfficientNetV2M,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2S as EfficientNetV2S,
)
from keras.src.applications.inception_resnet_v2 import (
InceptionResNetV2 as InceptionResNetV2,
)
from keras.src.applications.inception_v3 import InceptionV3 as InceptionV3
from keras.src.applications.mobilenet import MobileNet as MobileNet
from keras.src.applications.mobilenet_v2 import MobileNetV2 as MobileNetV2
from keras.src.applications.mobilenet_v3 import (
MobileNetV3Large as MobileNetV3Large,
)
from keras.src.applications.mobilenet_v3 import (
MobileNetV3Small as MobileNetV3Small,
)
from keras.src.applications.nasnet import NASNetLarge as NASNetLarge
from keras.src.applications.nasnet import NASNetMobile as NASNetMobile
from keras.src.applications.resnet import ResNet50 as ResNet50
from keras.src.applications.resnet import ResNet101 as ResNet101
from keras.src.applications.resnet import ResNet152 as ResNet152
from keras.src.applications.resnet_v2 import ResNet50V2 as ResNet50V2
from keras.src.applications.resnet_v2 import ResNet101V2 as ResNet101V2
from keras.src.applications.resnet_v2 import ResNet152V2 as ResNet152V2
from keras.src.applications.vgg16 import VGG16 as VGG16
from keras.src.applications.vgg19 import VGG19 as VGG19
from keras.src.applications.xception import Xception as Xception
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/efficientnet_v2/__init__.py | keras/api/applications/efficientnet_v2/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B0 as EfficientNetV2B0,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B1 as EfficientNetV2B1,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B2 as EfficientNetV2B2,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2B3 as EfficientNetV2B3,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2L as EfficientNetV2L,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2M as EfficientNetV2M,
)
from keras.src.applications.efficientnet_v2 import (
EfficientNetV2S as EfficientNetV2S,
)
from keras.src.applications.efficientnet_v2 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.efficientnet_v2 import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/inception_resnet_v2/__init__.py | keras/api/applications/inception_resnet_v2/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.inception_resnet_v2 import (
InceptionResNetV2 as InceptionResNetV2,
)
from keras.src.applications.inception_resnet_v2 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.inception_resnet_v2 import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/nasnet/__init__.py | keras/api/applications/nasnet/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.nasnet import NASNetLarge as NASNetLarge
from keras.src.applications.nasnet import NASNetMobile as NASNetMobile
from keras.src.applications.nasnet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.nasnet import preprocess_input as preprocess_input
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/vgg16/__init__.py | keras/api/applications/vgg16/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.vgg16 import VGG16 as VGG16
from keras.src.applications.vgg16 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.vgg16 import preprocess_input as preprocess_input
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/convnext/__init__.py | keras/api/applications/convnext/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.convnext import ConvNeXtBase as ConvNeXtBase
from keras.src.applications.convnext import ConvNeXtLarge as ConvNeXtLarge
from keras.src.applications.convnext import ConvNeXtSmall as ConvNeXtSmall
from keras.src.applications.convnext import ConvNeXtTiny as ConvNeXtTiny
from keras.src.applications.convnext import ConvNeXtXLarge as ConvNeXtXLarge
from keras.src.applications.convnext import (
decode_predictions as decode_predictions,
)
from keras.src.applications.convnext import preprocess_input as preprocess_input
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/resnet_v2/__init__.py | keras/api/applications/resnet_v2/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.resnet_v2 import ResNet50V2 as ResNet50V2
from keras.src.applications.resnet_v2 import ResNet101V2 as ResNet101V2
from keras.src.applications.resnet_v2 import ResNet152V2 as ResNet152V2
from keras.src.applications.resnet_v2 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.resnet_v2 import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/xception/__init__.py | keras/api/applications/xception/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.xception import Xception as Xception
from keras.src.applications.xception import (
decode_predictions as decode_predictions,
)
from keras.src.applications.xception import preprocess_input as preprocess_input
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/mobilenet_v3/__init__.py | keras/api/applications/mobilenet_v3/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.mobilenet_v3 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.mobilenet_v3 import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/mobilenet/__init__.py | keras/api/applications/mobilenet/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.mobilenet import MobileNet as MobileNet
from keras.src.applications.mobilenet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.mobilenet import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/resnet/__init__.py | keras/api/applications/resnet/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.resnet import ResNet50 as ResNet50
from keras.src.applications.resnet import ResNet101 as ResNet101
from keras.src.applications.resnet import ResNet152 as ResNet152
from keras.src.applications.resnet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.resnet import preprocess_input as preprocess_input
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/imagenet_utils/__init__.py | keras/api/applications/imagenet_utils/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.imagenet_utils import (
decode_predictions as decode_predictions,
)
from keras.src.applications.imagenet_utils import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/mobilenet_v2/__init__.py | keras/api/applications/mobilenet_v2/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.mobilenet_v2 import MobileNetV2 as MobileNetV2
from keras.src.applications.mobilenet_v2 import (
decode_predictions as decode_predictions,
)
from keras.src.applications.mobilenet_v2 import (
preprocess_input as preprocess_input,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/resnet50/__init__.py | keras/api/applications/resnet50/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.resnet import ResNet50 as ResNet50
from keras.src.applications.resnet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.resnet import preprocess_input as preprocess_input
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/api/applications/densenet/__init__.py | keras/api/applications/densenet/__init__.py | """DO NOT EDIT.
This file was autogenerated. Do not edit it by hand,
since your modifications would be overwritten.
"""
from keras.src.applications.densenet import DenseNet121 as DenseNet121
from keras.src.applications.densenet import DenseNet169 as DenseNet169
from keras.src.applications.densenet import DenseNet201 as DenseNet201
from keras.src.applications.densenet import (
decode_predictions as decode_predictions,
)
from keras.src.applications.densenet import preprocess_input as preprocess_input
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.