repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/numpy.py | keras/src/backend/tensorflow/numpy.py | import builtins
import collections
import functools
import math
import string
import warnings
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.linalg.sparse import sparse_csr_matrix_ops
from tensorflow.python.ops.math_ops import is_nan
from keras.src import tree
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.common.backend_utils import canonicalize_axis
from keras.src.backend.common.backend_utils import to_tuple_or_list
from keras.src.backend.common.backend_utils import vectorize_impl
from keras.src.backend.tensorflow import sparse
from keras.src.backend.tensorflow.core import cast
from keras.src.backend.tensorflow.core import convert_to_tensor
from keras.src.backend.tensorflow.core import shape as shape_op
def rot90(array, k=1, axes=(0, 1)):
"""Rotate an array by 90 degrees in the specified plane.
Args:
array: Input tensor
k: Number of 90-degree rotations (default=1)
axes: Tuple of two axes that define the plane of rotation.
Defaults to (0, 1).
Returns:
Rotated tensor with correct shape transformation
"""
array = convert_to_tensor(array)
if array.shape.rank < 2:
raise ValueError(
f"Input array must have at least 2 dimensions. "
f"Received: array.ndim={array.shape.rank}"
)
if len(axes) != 2 or axes[0] == axes[1]:
raise ValueError(
f"Invalid axes: {axes}. Axes must be a tuple of "
"two different dimensions."
)
k = k % 4
if k == 0:
return array
axes = tuple(
axis if axis >= 0 else array.shape.rank + axis for axis in axes
)
perm = [i for i in range(array.shape.rank) if i not in axes]
perm.extend(axes)
array = tf.transpose(array, perm)
shape = tf.shape(array)
non_rot_shape = shape[:-2]
h, w = shape[-2], shape[-1]
array = tf.reshape(array, tf.concat([[-1], [h, w]], axis=0))
array = tf.reverse(array, axis=[2])
array = tf.transpose(array, [0, 2, 1])
if k % 2 == 1:
final_h, final_w = w, h
else:
final_h, final_w = h, w
if k > 1:
array = tf.reshape(array, tf.concat([[-1], [final_h, final_w]], axis=0))
for _ in range(k - 1):
array = tf.reverse(array, axis=[2])
array = tf.transpose(array, [0, 2, 1])
final_shape = tf.concat([non_rot_shape, [final_h, final_w]], axis=0)
array = tf.reshape(array, final_shape)
inv_perm = [0] * len(perm)
for i, p in enumerate(perm):
inv_perm[p] = i
array = tf.transpose(array, inv_perm)
return array
@sparse.elementwise_binary_union(tf.sparse.add)
def add(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
# Special case of `tf.add`: `tf.nn.bias_add`
# `BiasAdd` can be fused with `MatMul` and `Conv*` kernels
# Expecting `x1` to be `inputs` and `x2` to be `bias` (no swapping)
x2_squeeze_shape = [d for d in x2.shape.as_list() if d is None or d > 1]
if (
# `x2` looks like bias (can be squeezed to vector)
1 == len(x2_squeeze_shape)
# `x1` looks like input tensor (rank >= 2)
and len(x1.shape) > 1
# `x2` non-squeezable dimension defined
and x2_squeeze_shape[0] is not None
# `x2` non-squeezable dimension match `x1` channel dimension
and x2_squeeze_shape[0]
in {x1.shape.as_list()[1], x1.shape.as_list()[-1]}
):
if x1.shape[-1] == x2_squeeze_shape[0]:
data_format = "NHWC"
else:
data_format = "NCHW"
if len(x2.shape) > 1:
x2 = tf.squeeze(x2)
return tf.nn.bias_add(x1, x2, data_format=data_format)
return tf.add(x1, x2)
def bartlett(x):
x = convert_to_tensor(x, dtype=config.floatx())
if x == 0:
return tf.constant([])
if x == 1:
return tf.ones([1])
n = tf.range(x)
half = (x - 1) / 2
window = tf.where(n <= half, 2.0 * n / (x - 1), 2.0 - 2.0 * n / (x - 1))
return window
def hamming(x):
x = convert_to_tensor(x, dtype=tf.int32)
return tf.signal.hamming_window(x, periodic=False)
def hanning(x):
x = convert_to_tensor(x, dtype=tf.int32)
return tf.signal.hann_window(x, periodic=False)
def heaviside(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
if dtype in ["int8", "int16", "int32", "uint8", "uint16", "uint32"]:
dtype = config.floatx()
elif dtype in ["int64"]:
dtype = "float64"
x1 = tf.cast(x1, dtype)
x2 = tf.cast(x2, dtype)
return tf.where(
x1 < 0,
tf.zeros_like(x1),
tf.where(x1 > 0, tf.ones_like(x1), x2),
)
def kaiser(x, beta):
x = convert_to_tensor(x, dtype=tf.int32)
return tf.signal.kaiser_window(x, beta=beta)
def bincount(x, weights=None, minlength=0, sparse=False):
x = convert_to_tensor(x)
dtypes_to_resolve = [x.dtype]
if standardize_dtype(x.dtype) not in ["int32", "int64"]:
x = tf.cast(x, tf.int32)
if weights is not None:
weights = convert_to_tensor(weights)
dtypes_to_resolve.append(weights.dtype)
dtype = dtypes.result_type(*dtypes_to_resolve)
if standardize_dtype(weights.dtype) not in [
"int32",
"int64",
"float32",
"float64",
]:
if "int" in standardize_dtype(weights.dtype):
weights = tf.cast(weights, tf.int32)
else:
weights = tf.cast(weights, tf.float32)
else:
dtype = "int32"
if sparse or isinstance(x, tf.SparseTensor):
output = tf.sparse.bincount(
x,
weights=weights,
minlength=minlength,
axis=-1,
)
actual_length = output.shape[-1]
if actual_length is None:
actual_length = tf.shape(output)[-1]
output = cast(output, dtype)
if x.shape.rank == 1:
output_shape = (actual_length,)
else:
batch_size = output.shape[0]
if batch_size is None:
batch_size = tf.shape(output)[0]
output_shape = (batch_size, actual_length)
return tf.SparseTensor(
indices=output.indices,
values=output.values,
dense_shape=output_shape,
)
return tf.cast(
tf.math.bincount(x, weights=weights, minlength=minlength, axis=-1),
dtype,
)
@functools.lru_cache(512)
def _normalize_einsum_subscripts(subscripts):
# string.ascii_letters
mapping = {}
normalized_subscripts = ""
for c in subscripts:
if c in string.ascii_letters:
if c not in mapping:
mapping[c] = string.ascii_letters[len(mapping)]
normalized_subscripts += mapping[c]
else:
normalized_subscripts += c
return normalized_subscripts
def einsum(subscripts, *operands, **kwargs):
operands = tree.map_structure(convert_to_tensor, operands)
subscripts = _normalize_einsum_subscripts(subscripts)
def is_valid_for_custom_ops(subscripts, *operands):
# Check that `subscripts` is supported and the shape of operands is not
# `None`.
if subscripts in [
"a,b->ab",
"ab,b->a",
"ab,bc->ac",
"ab,cb->ac",
"abc,cd->abd",
"abc,dc->abd",
"abcd,abde->abce",
"abcd,abed->abce",
"abcd,acbe->adbe",
"abcd,adbe->acbe",
"abcd,aecd->acbe",
"abcd,aecd->aceb",
]:
# These subscripts don't require the shape information
return True
elif subscripts == "abc,cde->abde":
_, b1, c1 = operands[0].shape
c2, d2, e2 = operands[1].shape
b, c, d, e = b1, c1 or c2, d2, e2
if None in (b, c, d, e):
return False
return True
elif subscripts == "abc,dce->abde":
_, b1, c1 = operands[0].shape
d2, c2, e2 = operands[1].shape
b, c, d, e = b1, c1 or c2, d2, e2
if None in (b, c, d, e):
return False
return True
elif subscripts == "abc,dec->abde":
_, b1, c1 = operands[0].shape
d2, e2, c2 = operands[1].shape
b, c, d, e = b1, c1 or c2, d2, e2
if None in (b, c, d, e):
return False
return True
elif subscripts == "abcd,cde->abe":
_, b1, c1, d1 = operands[0].shape
c2, d2, e2 = operands[1].shape
b, c, d, e = b1, c1 or c2, d1 or d2, e2
if None in (b, c, d, e):
return False
return True
elif subscripts == "abcd,ced->abe":
_, b1, c1, d1 = operands[0].shape
c2, e2, d2 = operands[1].shape
b, c, d, e = b1, c1 or c2, d1 or d2, e2
if None in (b, c, d, e):
return False
return True
elif subscripts == "abcd,ecd->abe":
_, b1, c1, d1 = operands[0].shape
e2, c2, d2 = operands[1].shape
b, c, d, e = b1, c1 or c2, d1 or d2, e2
if None in (b, c, d, e):
return False
return True
elif subscripts == "abcde,aebf->adbcf":
_, b1, c1, d1, e1 = operands[0].shape
_, e2, b2, f2 = operands[1].shape
b, c, d, e, f = b1 or b2, c1, d1, e1 or e2, f2
if None in (b, c, d, e, f):
return False
return True
elif subscripts == "abcde,afce->acdbf":
_, b1, c1, d1, e1 = operands[0].shape
_, f2, c2, e2 = operands[1].shape
b, c, d, e, f = b1, c1 or c2, d1, e1 or e2, f2
if None in (b, c, d, e, f):
return False
return True
else:
# No match in subscripts
return False
def use_custom_ops(subscripts, *operands, output_type):
# Replace tf.einsum with custom ops to utilize hardware-accelerated
# matmul
x, y = operands[0], operands[1]
if subscripts == "a,b->ab":
x = tf.expand_dims(x, axis=-1)
y = tf.expand_dims(y, axis=0)
return tf.matmul(x, y, output_type=output_type)
elif subscripts == "ab,b->a":
y = tf.expand_dims(y, axis=-1)
result = tf.matmul(x, y, output_type=output_type)
return tf.squeeze(result, axis=-1)
elif subscripts == "ab,bc->ac":
return tf.matmul(x, y, output_type=output_type)
elif subscripts == "ab,cb->ac":
y = tf.transpose(y, [1, 0])
return tf.matmul(x, y, output_type=output_type)
elif subscripts == "abc,cd->abd":
return tf.matmul(x, y, output_type=output_type)
elif subscripts == "abc,cde->abde":
_, b1, c1 = x.shape
c2, d2, e2 = y.shape
b, c, d, e = b1, c1 or c2, d2, e2
y = tf.reshape(y, [c, -1])
result = tf.matmul(x, y, output_type=output_type)
return tf.reshape(result, [-1, b, d, e])
elif subscripts == "abc,dc->abd":
y = tf.transpose(y, [1, 0])
return tf.matmul(x, y, output_type=output_type)
elif subscripts == "abc,dce->abde":
_, b1, c1 = x.shape
d2, c2, e2 = y.shape
b, c, d, e = b1, c1 or c2, d2, e2
y = tf.transpose(y, [1, 0, 2]) # cde
y = tf.reshape(y, [c, -1])
result = tf.matmul(x, y, output_type=output_type)
return tf.reshape(result, [-1, b, d, e])
elif subscripts == "abc,dec->abde":
_, b1, c1 = x.shape
d2, e2, c2 = y.shape
b, c, d, e = b1, c1 or c2, d2, e2
y = tf.transpose(y, [2, 0, 1]) # cde
y = tf.reshape(y, [c, -1])
result = tf.matmul(x, y, output_type=output_type)
return tf.reshape(result, [-1, b, d, e])
elif subscripts == "abcd,abde->abce":
return tf.matmul(x, y, output_type=output_type)
elif subscripts == "abcd,abed->abce":
y = tf.transpose(y, [0, 1, 3, 2])
return tf.matmul(x, y, output_type=output_type)
elif subscripts == "abcd,acbe->adbe":
x = tf.transpose(x, [0, 1, 3, 2])
y = tf.transpose(y, [0, 2, 1, 3])
result = tf.matmul(x, y, output_type=output_type)
return tf.transpose(result, [0, 2, 1, 3])
elif subscripts == "abcd,adbe->acbe":
y = tf.transpose(y, [0, 2, 1, 3]) # abde
result = tf.matmul(x, y, output_type=output_type) # abce
return tf.transpose(result, [0, 2, 1, 3])
elif subscripts == "abcd,aecd->acbe":
x = tf.transpose(x, [0, 2, 1, 3]) # acbd
y = tf.transpose(y, [0, 2, 3, 1]) # acde
return tf.matmul(x, y, output_type=output_type)
elif subscripts == "abcd,aecd->aceb":
x = tf.transpose(x, [0, 2, 1, 3])
y = tf.transpose(y, [0, 2, 3, 1])
result = tf.matmul(x, y, output_type=output_type) # acbe
return tf.transpose(result, [0, 1, 3, 2])
elif subscripts == "abcd,cde->abe":
_, b1, c1, d1 = x.shape
c2, d2, e2 = y.shape
b, c, d, e = b1, c1 or c2, d1 or d2, e2
x = tf.reshape(x, [-1, b, c * d])
y = tf.reshape(y, [-1, e])
return tf.matmul(x, y, output_type=output_type)
elif subscripts == "abcd,ced->abe":
_, b1, c1, d1 = x.shape
c2, e2, d2 = y.shape
b, c, d, e = b1, c1 or c2, d1 or d2, e2
x = tf.reshape(x, [-1, b, c * d])
y = tf.transpose(y, [0, 2, 1])
y = tf.reshape(y, [-1, e])
return tf.matmul(x, y, output_type=output_type)
elif subscripts == "abcd,ecd->abe":
_, b1, c1, d1 = x.shape
e2, c2, d2 = y.shape
b, c, d, e = b1, c1 or c2, d1 or d2, e2
x = tf.reshape(x, [-1, b, c * d])
y = tf.transpose(y, [1, 2, 0])
y = tf.reshape(y, [-1, e])
return tf.matmul(x, y, output_type=output_type)
elif subscripts == "abcde,aebf->adbcf":
_, b1, c1, d1, e1 = x.shape
_, e2, b2, f2 = y.shape
b, c, d, e, f = b1 or b2, c1, d1, e1 or e2, f2
x = tf.reshape(x, [-1, b, c * d, e]) # ab(cd)e
y = tf.transpose(y, [0, 2, 1, 3]) # abef
result = tf.matmul(x, y, output_type=output_type) # ab(cd)f
result = tf.reshape(result, [-1, b, c, d, f]) # abcdf
return tf.transpose(result, [0, 3, 1, 2, 4])
elif subscripts == "abcde,afce->acdbf":
_, b1, c1, d1, e1 = x.shape
_, f2, c2, e2 = y.shape
b, c, d, e, f = b1, c1 or c2, d1, e1 or e2, f2
x = tf.transpose(x, [0, 2, 3, 1, 4]) # acdbe
x = tf.reshape(x, [-1, c, d * b, e]) # ac(db)e
y = tf.transpose(y, [0, 2, 3, 1]) # acef
result = tf.matmul(x, y, output_type=output_type) # ac(db)f
return tf.reshape(result, [-1, c, d, b, f])
else:
raise NotImplementedError
dtypes_to_resolve = list(set(standardize_dtype(x.dtype) for x in operands))
# When operands are of int8, we cast the result to int32 to align with
# the behavior of jax.
if len(dtypes_to_resolve) == 1 and dtypes_to_resolve[0] == "int8":
compute_dtype = "int8"
result_dtype = "int32"
output_type = "int32"
else:
result_dtype = dtypes.result_type(*dtypes_to_resolve)
compute_dtype = result_dtype
output_type = None
# TODO: Remove the condition once `tf.einsum` supports int8xint8->int32
if is_valid_for_custom_ops(subscripts, *operands) and not kwargs:
# TODO: tf.matmul doesn't support integer dtype if not specifying
# output_type="int32"
if "int" in compute_dtype and output_type is None:
compute_dtype = config.floatx()
operands = tree.map_structure(
lambda x: tf.cast(x, compute_dtype), operands
)
result = use_custom_ops(subscripts, *operands, output_type=output_type)
else:
# TODO: tf.einsum doesn't support integer dtype with gpu
if "int" in compute_dtype:
compute_dtype = config.floatx()
operands = tree.map_structure(
lambda x: tf.cast(x, compute_dtype), operands
)
result = tf.einsum(subscripts, *operands, **kwargs)
return tf.cast(result, result_dtype)
@sparse.elementwise_binary_union(sparse.sparse_subtract)
def subtract(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return tf.subtract(x1, x2)
def matmul(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
x1_shape = x1.shape
x2_shape = x2.shape
x1_sparse = isinstance(x1, tf.SparseTensor)
x2_sparse = isinstance(x2, tf.SparseTensor)
# When both x1 and x2 are of int8 and dense tensor, specifying `output_type`
# as int32 to enable hardware-accelerated matmul
x1_dtype = standardize_dtype(x1.dtype)
x2_dtype = standardize_dtype(x2.dtype)
if (
x1_dtype == "int8"
and x2_dtype == "int8"
and not x1_sparse
and not x2_sparse
and x1_shape.rank != 1 # TODO: support tf.tensordot
and x2_shape.rank != 1 # TODO: support tf.tensordot
):
compute_dtype = "int8"
result_dtype = "int32"
output_type = result_dtype
else:
# TODO: Typically, GPU and XLA only support float types
compute_dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
result_dtype = dtypes.result_type(x1.dtype, x2.dtype)
output_type = None
x1 = tf.cast(x1, compute_dtype)
x2 = tf.cast(x2, compute_dtype)
def with_combined_batch_dimensions(a, b, output_shape, fn_3d):
a_sparse = isinstance(a, tf.SparseTensor)
b_sparse = isinstance(b, tf.SparseTensor)
batch_shape = b.shape[:-2] if b_sparse else a.shape[:-2]
batch_size = math.prod(batch_shape)
a3d_shape = [batch_size] + a.shape[-2:]
a_3d = (
tf.sparse.reshape(a, a3d_shape)
if a_sparse
else tf.reshape(a, a3d_shape)
)
b3d_shape = [batch_size] + b.shape[-2:]
b_3d = (
tf.sparse.reshape(b, b3d_shape)
if b_sparse
else tf.reshape(b, b3d_shape)
)
result_3d = fn_3d(a_3d, b_3d)
return (
tf.sparse.reshape(result_3d, output_shape)
if isinstance(result_3d, tf.SparseTensor)
else tf.reshape(result_3d, output_shape)
)
def sparse_sparse_matmul(a, b):
dtype = a.values.dtype
# Convert SparseTensors to CSR SparseMatrix.
a_csr = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
a.indices, a.values, a.dense_shape
)
b_csr = sparse_csr_matrix_ops.sparse_tensor_to_csr_sparse_matrix(
b.indices, b.values, b.dense_shape
)
# Compute the CSR SparseMatrix matrix multiplication.
result_csr = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
a_csr, b_csr, dtype
)
# Convert the CSR SparseMatrix to a SparseTensor.
res = sparse_csr_matrix_ops.csr_sparse_matrix_to_sparse_tensor(
result_csr, dtype
)
return tf.SparseTensor(res.indices, res.values, res.dense_shape)
def embedding_lookup_sparse_dense_matmul(a, b):
# We need at least one id per rows for embedding_lookup_sparse,
# otherwise there will be missing rows in the output.
a, _ = tf.sparse.fill_empty_rows(a, 0)
# We need to split x1 into separate ids and weights tensors. The ids
# should be the column indices of x1 and the values of the weights
# can continue to be the actual x1. The column arrangement of ids
# and weights does not matter as we sum over columns. See details in
# the documentation for sparse_ops.sparse_tensor_dense_matmul.
ids = tf.SparseTensor(
indices=a.indices,
values=a.indices[:, 1],
dense_shape=a.dense_shape,
)
return tf.nn.embedding_lookup_sparse(b, ids, a, combiner="sum")
# Either a or b is sparse
def sparse_dense_matmul_3d(a, b):
return tf.map_fn(
lambda x: tf.sparse.sparse_dense_matmul(x[0], x[1]),
elems=(a, b),
fn_output_signature=a.dtype,
)
if x1_sparse or x2_sparse:
from keras.src.ops.operation_utils import compute_matmul_output_shape
output_shape = compute_matmul_output_shape(x1_shape, x2_shape)
if x1_sparse and x2_sparse:
if x1_shape.rank <= 3:
output = sparse_sparse_matmul(x1, x2)
else:
output = with_combined_batch_dimensions(
x1, x2, output_shape, sparse_sparse_matmul
)
else:
# Sparse * dense or dense * sparse
sparse_rank = x1_shape.rank if x1_sparse else x2_shape.rank
# Special case: embedding_lookup_sparse for sparse * dense, rank 2
if x1_sparse and sparse_rank == 2:
output = embedding_lookup_sparse_dense_matmul(x1, x2)
elif sparse_rank == 2:
output = tf.sparse.sparse_dense_matmul(x1, x2)
elif sparse_rank == 3:
output = sparse_dense_matmul_3d(x1, x2)
else:
output = with_combined_batch_dimensions(
x1, x2, output_shape, sparse_dense_matmul_3d
)
output = tf.cast(output, result_dtype)
output.set_shape(output_shape)
return output
else:
if x1_shape.rank == 2 and x2_shape.rank == 2:
output = tf.matmul(x1, x2, output_type=output_type)
elif x2_shape.rank == 1:
output = tf.tensordot(x1, x2, axes=1)
elif x1_shape.rank == 1:
output = tf.tensordot(x1, x2, axes=[[0], [-2]])
else:
output = tf.matmul(x1, x2, output_type=output_type)
return tf.cast(output, result_dtype)
@sparse.elementwise_binary_intersection
def multiply(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return tf.multiply(x1, x2)
def mean(x, axis=None, keepdims=False):
if isinstance(x, tf.IndexedSlices):
if axis is None:
# Reduce against all axes, result is a single value and dense.
# The denominator has to account for `dense_shape`.
sum = tf.reduce_sum(x.values, keepdims=keepdims)
return sum / tf.cast(tf.reduce_prod(x.dense_shape), dtype=sum.dtype)
axis = to_tuple_or_list(axis)
if not axis:
# Empty axis tuple, this is a no-op
return x
dense_shape = tf.convert_to_tensor(x.dense_shape)
rank = tf.shape(dense_shape)[0]
# Normalize axis: convert negative values and sort
axis = [canonicalize_axis(a, rank) for a in axis]
axis.sort()
if axis == [0]:
# Reduce against `axis=0` only, result is dense.
# The denominator has to account for `dense_shape[0]`.
sum = tf.reduce_sum(x.values, axis=0, keepdims=keepdims)
return sum / tf.cast(dense_shape[0], dtype=sum.dtype)
elif axis[0] == 0:
# Reduce against axis 0 and other axes, result is dense.
# We do `axis=0` separately first. The denominator has to account
# for `dense_shape[0]`.
# We use `keepdims=True` in `reduce_sum`` so that we can leave the
# 0 in axis and do `reduce_mean` with `keepdims` to apply it for all
# axes.
sum = tf.reduce_sum(x.values, axis=0, keepdims=True)
axis_0_mean = sum / tf.cast(dense_shape[0], dtype=sum.dtype)
return tf.reduce_mean(axis_0_mean, axis=axis, keepdims=keepdims)
elif keepdims:
# With `keepdims=True`, result is an `IndexedSlices` with the same
# indices since axis 0 is not touched. The only thing to do is to
# correct `dense_shape` to account for dimensions that became 1.
new_values = tf.reduce_mean(x.values, axis=axis, keepdims=True)
new_dense_shape = tf.concat(
[dense_shape[0:1], new_values.shape[1:]], axis=0
)
return tf.IndexedSlices(new_values, x.indices, new_dense_shape)
elif rank == len(axis) + 1:
# `keepdims=False` and reducing against all axes except 0, result is
# a 1D tensor, which cannot be `IndexedSlices`. We have to scatter
# the computed means to construct the correct dense tensor.
return tf.scatter_nd(
tf.expand_dims(x.indices, axis=1),
tf.reduce_mean(x.values, axis=axis),
[dense_shape[0]],
)
else:
# `keepdims=False`, not reducing against axis 0 and there is at
# least one other axis we are not reducing against. We simply need
# to fix `dense_shape` to remove dimensions that were reduced.
gather_indices = [i for i in range(rank) if i not in axis]
return tf.IndexedSlices(
tf.reduce_mean(x.values, axis=axis),
x.indices,
tf.gather(x.dense_shape, gather_indices, axis=0),
)
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
compute_dtype = dtypes.result_type(x.dtype, "float32")
# `tf.reduce_mean` does not handle low precision (e.g., float16) overflow
# correctly, so we compute with float32 and cast back to the original type.
if "int" in ori_dtype or ori_dtype == "bool":
result_dtype = compute_dtype
else:
result_dtype = ori_dtype
output = tf.reduce_mean(
tf.cast(x, compute_dtype), axis=axis, keepdims=keepdims
)
return tf.cast(output, result_dtype)
def max(x, axis=None, keepdims=False, initial=None):
x = convert_to_tensor(x)
# The TensorFlow numpy API implementation doesn't support `initial` so we
# handle it manually here.
if initial is not None:
if standardize_dtype(x.dtype) == "bool":
x = tf.reduce_any(x, axis=axis, keepdims=keepdims)
x = tf.math.maximum(tf.cast(x, "int32"), tf.cast(initial, "int32"))
return tf.cast(x, "bool")
else:
x = tf.reduce_max(x, axis=axis, keepdims=keepdims)
return tf.math.maximum(x, initial)
# TensorFlow returns -inf by default for an empty list, but for consistency
# with other backends and the numpy API we want to throw in this case.
if tf.executing_eagerly():
size_x = size(x)
tf.assert_greater(
size_x,
tf.constant(0, dtype=size_x.dtype),
message="Cannot compute the max of an empty tensor.",
)
if standardize_dtype(x.dtype) == "bool":
return tf.reduce_any(x, axis=axis, keepdims=keepdims)
else:
return tf.reduce_max(x, axis=axis, keepdims=keepdims)
def ones(shape, dtype=None):
dtype = dtype or config.floatx()
return tf.ones(shape, dtype=dtype)
def zeros(shape, dtype=None):
dtype = dtype or config.floatx()
return tf.zeros(shape, dtype=dtype)
@sparse.elementwise_unary
def absolute(x):
x = convert_to_tensor(x)
# uintx and bool are always non-negative
dtype = standardize_dtype(x.dtype)
if "uint" in dtype or dtype == "bool":
return x
return tf.abs(x)
def abs(x):
return absolute(x)
def all(x, axis=None, keepdims=False):
x = tf.cast(x, "bool")
return tf.reduce_all(x, axis=axis, keepdims=keepdims)
def angle(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = tf.cast(x, dtype)
return tf.math.angle(x)
def any(x, axis=None, keepdims=False):
x = tf.cast(x, "bool")
return tf.reduce_any(x, axis=axis, keepdims=keepdims)
def amax(x, axis=None, keepdims=False):
return max(x, axis=axis, keepdims=keepdims)
def amin(x, axis=None, keepdims=False):
return min(x, axis=axis, keepdims=keepdims)
def append(x1, x2, axis=None):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
x1 = tf.cast(x1, dtype)
x2 = tf.cast(x2, dtype)
if axis is None:
return tf.concat([tf.reshape(x1, [-1]), tf.reshape(x2, [-1])], axis=0)
else:
return tf.concat([x1, x2], axis=axis)
def arange(start, stop=None, step=None, dtype=None):
if dtype is None:
dtypes_to_resolve = [getattr(start, "dtype", type(start))]
if stop is not None:
dtypes_to_resolve.append(getattr(stop, "dtype", type(stop)))
if step is not None:
dtypes_to_resolve.append(getattr(step, "dtype", type(step)))
dtype = dtypes.result_type(*dtypes_to_resolve)
dtype = standardize_dtype(dtype)
if step is None:
step = 1
try:
out = tf.range(start, stop, delta=step, dtype=dtype)
except tf.errors.NotFoundError:
# Some dtypes may not work in eager mode on CPU or GPU.
out = tf.range(start, stop, delta=step, dtype="float32")
out = tf.cast(out, dtype)
return out
@sparse.densifying_unary(0.5 * np.pi)
def arccos(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = tf.cast(x, dtype)
return tf.math.acos(x)
@sparse.densifying_unary(np.nan)
def arccosh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = tf.cast(x, dtype)
return tf.math.acosh(x)
@sparse.elementwise_unary
def arcsin(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = tf.cast(x, dtype)
return tf.math.asin(x)
@sparse.elementwise_unary
def arcsinh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = tf.cast(x, dtype)
return tf.math.asinh(x)
@sparse.elementwise_unary
def arctan(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = tf.cast(x, dtype)
return tf.math.atan(x)
def arctan2(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
x1 = tf.cast(x1, dtype)
x2 = tf.cast(x2, dtype)
return tf.math.atan2(x1, x2)
@sparse.elementwise_unary
def arctanh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = tf.cast(x, dtype)
return tf.math.atanh(x)
def _keepdims(x, y, axis):
if axis is None:
shape = [1 for _ in range(len(x.shape))]
else:
shape = list(shape_op(x))
for axis in tree.flatten(axis):
shape[axis] = 1
y = tf.reshape(y, shape)
return y
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/trackable.py | keras/src/backend/tensorflow/trackable.py | import tensorflow as tf
from keras.src.utils import tracking
class KerasAutoTrackable(tf.__internal__.tracking.AutoTrackable):
"""Manages dependencies on other objects with Keras tracking.
Similar to TF AutoTrackable, but disabling tracking is based
on tracking within Keras.
This serves as an interface between Keras tracking and TF tracking.
"""
def __setattr__(self, name, value):
"""Support self.foo = trackable syntax."""
try:
if getattr(self, name) is value:
# Short circuit for `self.$x = self.$x`.
return
except AttributeError:
pass
if getattr(self, "_self_setattr_tracking", True):
value = sticky_attribute_assignment(
trackable=self, value=value, name=name
)
super().__setattr__(name, value)
def sticky_attribute_assignment(trackable, name, value):
"""Adds dependencies, called from __setattr__.
Args:
trackable: The object to add dependencies to (generally the one having
an attribute assigned).
name: The attribute name being assigned.
value: The value being assigned. Not necessarily a trackable object.
Returns:
The value which should be stored in the attribute.
"""
if isinstance(
value, (tracking.TrackedList, tracking.TrackedDict, tracking.TrackedSet)
) and hasattr(trackable, "_tracked"):
trackable._tracked.append(name)
if not tracking.is_tracking_enabled():
return value
if isinstance(value, tf.__internal__.tracking.Trackable):
trackable._track_trackable( # pylint: disable=protected-access
value,
name=name,
# Allow the user to switch the Trackable which is tracked by this
# name, since assigning a new variable to an attribute has
# historically been fine (e.g. Adam did this).
overwrite=True,
)
return value
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/trainer.py | keras/src/backend/tensorflow/trainer.py | import contextlib
import functools
import warnings
import numpy as np
import tensorflow as tf
from tensorflow.python.eager import context as tf_context
from keras.src import callbacks as callbacks_module
from keras.src import metrics as metrics_module
from keras.src import optimizers as optimizers_module
from keras.src import tree
from keras.src.backend import config
from keras.src.losses import loss as loss_module
from keras.src.trainers import trainer as base_trainer
from keras.src.trainers.data_adapters import array_slicing
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.epoch_iterator import EpochIterator
from keras.src.utils import traceback_utils
class TensorFlowTrainer(base_trainer.Trainer):
def __init__(self):
super().__init__()
self.train_function = None
self.test_function = None
self.predict_function = None
# Specifies how many steps of the step_per_execution loop to unroll.
# Increasing this value can reduce kernel launch overhead,
# but will increase memory usage and compilation time.
self.unrolled_steps_per_execution = 1
# Model must be created under scope of DistStrat it will be trained
# with.
if tf.distribute.has_strategy():
self._distribute_strategy = tf.distribute.get_strategy()
else:
self._distribute_strategy = None
@property
def distribute_strategy(self):
return self._distribute_strategy or tf.distribute.get_strategy()
@property
def distribute_reduction_method(self):
return self._distribute_reduction_method or "auto"
@distribute_reduction_method.setter
def distribute_reduction_method(self, value):
self._distribute_reduction_method = value
def train_step(self, data):
x, y, sample_weight = data_adapter_utils.unpack_x_y_sample_weight(data)
# Forward pass
with tf.GradientTape() as tape:
if self._call_has_training_arg:
y_pred = self(x, training=True)
else:
y_pred = self(x)
loss = self._compute_loss(
x=x,
y=y,
y_pred=y_pred,
sample_weight=sample_weight,
training=True,
)
self._loss_tracker.update_state(
loss_module.unscale_loss_for_distribution(loss),
sample_weight=tf.shape(
next(i for i in tree.flatten(x) if i is not None)
)[0],
)
if self.optimizer is not None:
loss = self.optimizer.scale_loss(loss)
# Compute gradients
if self.trainable_weights:
trainable_weights = self.trainable_weights
gradients = tape.gradient(loss, trainable_weights)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_weights))
else:
warnings.warn("The model does not have any trainable weights.")
return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight)
def test_step(self, data):
x, y, sample_weight = data_adapter_utils.unpack_x_y_sample_weight(data)
if self._call_has_training_arg:
y_pred = self(x, training=False)
else:
y_pred = self(x)
loss = self._compute_loss(
x=x, y=y, y_pred=y_pred, sample_weight=sample_weight, training=False
)
self._loss_tracker.update_state(
loss_module.unscale_loss_for_distribution(loss),
sample_weight=tf.shape(
next(i for i in tree.flatten(x) if i is not None)
)[0],
)
return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight)
def predict_step(self, data):
x, _, _ = data_adapter_utils.unpack_x_y_sample_weight(data)
if self._call_has_training_arg:
y_pred = self(x, training=False)
else:
y_pred = self(x)
return y_pred
def _autoconvert_optionals(self, step_func):
# Wrapper converting (nested) TF Optional in input data to None
@functools.wraps(step_func)
def wrapper(data):
converted_data = tree.map_structure(
lambda i: (
None if isinstance(i, tf.experimental.Optional) else i
),
data,
)
result = step_func(converted_data)
return result
return wrapper
def _make_function(self, step_function):
@tf.autograph.experimental.do_not_convert
def one_step_on_data(data):
"""Runs a single training step on a batch of data."""
outputs = self.distribute_strategy.run(step_function, args=(data,))
outputs = reduce_per_replica(
outputs,
self.distribute_strategy,
reduction="auto",
)
return outputs
if not self.run_eagerly:
one_step_on_data = tf.function(
one_step_on_data,
reduce_retracing=True,
jit_compile=self.jit_compile,
)
one_step_on_data = self._autoconvert_optionals(one_step_on_data)
@tf.autograph.experimental.do_not_convert
def multi_step_on_iterator(iterator):
if self.steps_per_execution == 1:
return tf.experimental.Optional.from_value(
one_step_on_data(iterator.get_next())
)
# the spec is set lazily during the tracing of `tf.while_loop`
empty_outputs = tf.experimental.Optional.empty(None)
def cond(execution_step, optional_outputs, next_optional_inputs):
return tf.logical_and(
tf.less(execution_step, self.steps_per_execution),
next_optional_inputs.has_value(),
)
def inner_body(
execution_step, optional_outputs, next_optional_inputs
):
def has_next():
next_optional_outputs = tf.experimental.Optional.from_value(
one_step_on_data(next_optional_inputs.get_value())
)
empty_outputs._element_spec = (
next_optional_outputs.element_spec
)
return next_optional_outputs
def no_has_next():
optional_outputs._element_spec = empty_outputs._element_spec
return optional_outputs
next_optional_outputs = tf.cond(
tf.logical_and(
tf.less(execution_step, self.steps_per_execution),
next_optional_inputs.has_value(),
),
has_next,
no_has_next,
)
return (
execution_step + 1,
next_optional_outputs,
# We don't want to iterate if we have reached
# `steps_per_execution` steps
tf.cond(
tf.less(execution_step + 1, self.steps_per_execution),
lambda: iterator.get_next_as_optional(),
lambda: next_optional_inputs,
),
)
def body(execution_step, optional_outputs, next_optional_inputs):
for _ in range(
min(
self.unrolled_steps_per_execution,
self.steps_per_execution,
)
):
execution_step, optional_outputs, next_optional_inputs = (
inner_body(
execution_step,
optional_outputs,
next_optional_inputs,
)
)
return (execution_step, optional_outputs, next_optional_inputs)
execution_step = tf.constant(0)
next_optional_inputs = iterator.get_next_as_optional()
# Run the while loop
_, final_optional_outputs, _ = tf.while_loop(
cond,
body,
loop_vars=[execution_step, empty_outputs, next_optional_inputs],
)
final_optional_outputs._element_spec = empty_outputs.element_spec
return final_optional_outputs
if not self.run_eagerly:
multi_step_on_iterator = tf.function(
multi_step_on_iterator, reduce_retracing=True
)
def function(iterator):
if isinstance(
iterator, (tf.data.Iterator, tf.distribute.DistributedIterator)
):
opt_outputs = multi_step_on_iterator(iterator)
if not opt_outputs.has_value():
raise StopIteration
return opt_outputs.get_value()
else:
for step, data in zip(
range(self.steps_per_execution), iterator
):
outputs = one_step_on_data(data)
return outputs
return function
def make_train_function(self, force=False):
if self.train_function is not None and not force:
return self.train_function
self.train_function = self._make_function(self.train_step)
def make_test_function(self, force=False):
if self.test_function is not None and not force:
return self.test_function
self.test_function = self._make_function(self.test_step)
def make_predict_function(self, force=False):
if self.predict_function is not None and not force:
return self.predict_function
@tf.autograph.experimental.do_not_convert
def one_step_on_data(data):
"""Runs a predict test step on a batch of data."""
return self.predict_step(data)
if not self.run_eagerly and self.jit_compile:
one_step_on_data = tf.function(
one_step_on_data, reduce_retracing=True, jit_compile=True
)
one_step_on_data = self._autoconvert_optionals(one_step_on_data)
@tf.autograph.experimental.do_not_convert
def one_step_on_data_distributed(data):
data = data[0]
outputs = self.distribute_strategy.run(
one_step_on_data, args=(data,)
)
outputs = reduce_per_replica(
outputs,
self.distribute_strategy,
reduction="concat",
)
return outputs
@tf.autograph.experimental.do_not_convert
def multi_step_on_data(data):
outputs = one_step_on_data_distributed(data[:1])
for single_step_data in data[1:]:
step_outputs = one_step_on_data_distributed([single_step_data])
outputs = tree.map_structure(
lambda t1, t2: concat([t1, t2]), outputs, step_outputs
)
return outputs
if self.steps_per_execution > 1:
predict_function = multi_step_on_data
else:
predict_function = one_step_on_data_distributed
if not self.run_eagerly:
predict_function = tf.function(
predict_function, reduce_retracing=True
)
self.predict_function = predict_function
@traceback_utils.filter_traceback
def fit(
self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose="auto",
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
):
self._assert_compile_called("fit")
# Possibly cap epochs for debugging runs.
max_epochs = config.max_epochs()
if max_epochs and max_epochs < epochs:
warnings.warn("Limiting epochs to %d" % max_epochs)
epochs = max_epochs
# TODO: respect compiled trainable state
self._eval_epoch_iterator = None
if validation_split and validation_data is None:
# Create the validation data using the training data. Only supported
# for TF/numpy/jax arrays.
(
(x, y, sample_weight),
validation_data,
) = array_slicing.train_validation_split(
(x, y, sample_weight), validation_split=validation_split
)
if validation_data is not None:
(
val_x,
val_y,
val_sample_weight,
) = data_adapter_utils.unpack_x_y_sample_weight(validation_data)
# Create an iterator that yields batches for one epoch.
epoch_iterator = TFEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
class_weight=class_weight,
distribute_strategy=self.distribute_strategy,
steps_per_execution=self.steps_per_execution,
)
self._maybe_symbolic_build(iterator=epoch_iterator)
epoch_iterator.reset()
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
verbose=verbose,
epochs=epochs,
steps=epoch_iterator.num_batches,
model=self,
)
self.stop_training = False
self.make_train_function()
callbacks.on_train_begin()
training_logs = None
logs = {}
initial_epoch = self._initial_epoch or initial_epoch
for epoch in range(initial_epoch, epochs):
self.reset_metrics()
callbacks.on_epoch_begin(epoch)
with epoch_iterator.catch_stop_iteration():
for begin_step, end_step, iterator in epoch_iterator:
callbacks.on_train_batch_begin(begin_step)
logs = self.train_function(iterator)
callbacks.on_train_batch_end(end_step, logs)
if self.stop_training:
break
# Override with model metrics instead of last step logs if needed.
epoch_logs = dict(self._get_metrics_result_or_logs(logs))
# Run validation.
if validation_data is not None and self._should_eval(
epoch, validation_freq
):
# Create EpochIterator for evaluation and cache it.
if getattr(self, "_eval_epoch_iterator", None) is None:
self._eval_epoch_iterator = TFEpochIterator(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
distribute_strategy=self.distribute_strategy,
steps_per_execution=self.steps_per_execution,
steps_per_epoch=validation_steps,
shuffle=False,
)
val_logs = self.evaluate(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
steps=validation_steps,
callbacks=callbacks,
return_dict=True,
_use_cached_eval_dataset=True,
)
val_logs = {
f"val_{name}": val for name, val in val_logs.items()
}
epoch_logs.update(val_logs)
callbacks.on_epoch_end(epoch, epoch_logs)
training_logs = epoch_logs
if self.stop_training:
break
if (
isinstance(self.optimizer, optimizers_module.Optimizer)
and epochs > 0
):
self.optimizer.finalize_variable_values(self.trainable_weights)
# If _eval_epoch_iterator exists, delete it after all epochs are done.
if getattr(self, "_eval_epoch_iterator", None) is not None:
del self._eval_epoch_iterator
callbacks.on_train_end(logs=training_logs)
return self.history
@traceback_utils.filter_traceback
def evaluate(
self,
x=None,
y=None,
batch_size=None,
verbose="auto",
sample_weight=None,
steps=None,
callbacks=None,
return_dict=False,
**kwargs,
):
self._assert_compile_called("evaluate")
# TODO: respect compiled trainable state
use_cached_eval_dataset = kwargs.pop("_use_cached_eval_dataset", False)
if kwargs:
raise ValueError(f"Arguments not recognized: {kwargs}")
if use_cached_eval_dataset:
epoch_iterator = self._eval_epoch_iterator
else:
# Create an iterator that yields batches of input/target data.
epoch_iterator = TFEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
distribute_strategy=self.distribute_strategy,
steps_per_execution=self.steps_per_execution,
)
self._maybe_symbolic_build(iterator=epoch_iterator)
epoch_iterator.reset()
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
self.make_test_function()
self.stop_evaluating = False
callbacks.on_test_begin()
logs = {}
self.reset_metrics()
with epoch_iterator.catch_stop_iteration():
for begin_step, end_step, iterator in epoch_iterator:
callbacks.on_test_batch_begin(begin_step)
logs = self.test_function(iterator)
callbacks.on_test_batch_end(end_step, logs)
if self.stop_evaluating:
break
logs = self._get_metrics_result_or_logs(logs)
callbacks.on_test_end(logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
@traceback_utils.filter_traceback
def predict(
self, x, batch_size=None, verbose="auto", steps=None, callbacks=None
):
# Create an iterator that yields batches of input data.
epoch_iterator = TFEpochIterator(
x=x,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
distribute_strategy=self.distribute_strategy,
steps_per_execution=self.steps_per_execution,
)
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
def append_to_outputs(batch_outputs, outputs):
if outputs is None:
outputs = tree.map_structure(
lambda batch_output: [batch_output],
batch_outputs,
)
else:
tree.map_structure_up_to(
batch_outputs,
lambda output, batch_output: output.append(batch_output),
outputs,
batch_outputs,
)
return outputs
def get_data(iterator):
"""Returns data for the next execution."""
data = []
for _ in range(self.steps_per_execution):
try:
single_step_data = next(iterator)
except (StopIteration, tf.errors.OutOfRangeError) as e:
if hasattr(data, "__len__") and len(data) > 0:
# Suppress the error when still have remaining data.
return data
else:
# Re-raise the error for
# EpochIterator.catch_stop_iteration() to catch when
# no data left.
raise e
data.append(single_step_data)
return data
self.make_predict_function()
self.stop_predicting = False
callbacks.on_predict_begin()
outputs = None
with epoch_iterator.catch_stop_iteration():
for begin_step, end_step, iterator in epoch_iterator:
callbacks.on_predict_batch_begin(begin_step)
data = get_data(iterator)
batch_outputs = self.predict_function(data)
outputs = append_to_outputs(batch_outputs, outputs)
callbacks.on_predict_batch_end(
end_step, {"outputs": batch_outputs}
)
if self.stop_predicting:
break
callbacks.on_predict_end()
outputs = tree.map_structure_up_to(
batch_outputs, potentially_ragged_concat, outputs
)
return tree.map_structure(convert_to_np_if_not_ragged, outputs)
def train_on_batch(
self,
x,
y=None,
sample_weight=None,
class_weight=None,
return_dict=False,
):
self._assert_compile_called("train_on_batch")
if class_weight is not None:
if sample_weight is not None:
raise ValueError(
"Arguments `sample_weight` and `class_weight` "
"cannot be specified at the same time. "
f"Received: sample_weight={sample_weight}, "
f"class_weight={class_weight}"
)
sample_weight = data_adapter_utils.class_weight_to_sample_weights(
y, class_weight
)
# Maybe build model
self._maybe_symbolic_build(data_batch=(x, y, sample_weight))
self.make_train_function()
def data():
yield (x, y, sample_weight)
logs = self.train_function(data())
logs = tree.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def test_on_batch(
self,
x,
y=None,
sample_weight=None,
return_dict=False,
):
self._assert_compile_called("test_on_batch")
def data():
yield (x, y, sample_weight)
# Maybe build model
self._maybe_symbolic_build(data_batch=(x, y, sample_weight))
self.make_test_function()
logs = self.test_function(data())
logs = tree.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def predict_on_batch(self, x):
self.make_predict_function()
batch_outputs = self.predict_function([(x,)])
batch_outputs = tree.map_structure(
convert_to_np_if_not_ragged, batch_outputs
)
return batch_outputs
# Backwards compatibility shims.
@property
def compiled_metrics(self):
class DeprecatedCompiledMetric:
def update_state(_, y, y_pred, sample_weight=None):
return self._compiled_metrics_update_state(
y, y_pred, sample_weight=sample_weight
)
return DeprecatedCompiledMetric()
def _compiled_metrics_update_state(self, y, y_pred, sample_weight=None):
warnings.warn(
"`model.compiled_metrics()` is deprecated. "
"Instead, use e.g.:\n"
"```\n"
"for metric in self.metrics:\n"
" metric.update_state(y, y_pred)\n"
"```\n",
stacklevel=2,
)
for metric in self.metrics:
if isinstance(metric, metrics_module.Mean):
metric.update_state(y_pred, sample_weight=sample_weight)
else:
metric.update_state(y, y_pred, sample_weight=sample_weight)
def compiled_loss(
self, y, y_pred, sample_weight=None, regularization_losses=None
):
warnings.warn(
"`model.compiled_loss()` is deprecated. Instead, use "
"`model.compute_loss(x, y, y_pred, sample_weight, training)`.",
)
return self.compute_loss(
x=None, y=y, y_pred=y_pred, sample_weight=sample_weight
)
def loss(self, y, y_pred, sample_weight=None):
warnings.warn(
"`model.loss()` is deprecated. Instead, use "
"`model.compute_loss(x, y, y_pred, sample_weight, training)`.",
)
return self.compute_loss(
x=None, y=y, y_pred=y_pred, sample_weight=sample_weight
)
def _maybe_symbolic_build(self, iterator=None, data_batch=None):
# Only symbolic build when distribute strategy is created in tf trainer
if self._distribute_strategy is None:
# When no distribution strategy is set, defer building
# to when the train/test/predict function gets traced.
# This maximizes backwards compatibility.
return
# Unlike jax/torch iterator, tf iterator returns an iterator instead
# of data batch in `iterator`.
if iterator is not None:
for _, _, it in iterator:
maybe_distributed_data_batch = next(it)
has_distributed_values = tree.map_structure(
lambda x: isinstance(x, tf.distribute.DistributedValues),
maybe_distributed_data_batch,
)
if all(tree.flatten(has_distributed_values)):
data_batch = self.distribute_strategy.reduce(
"MEAN",
maybe_distributed_data_batch,
axis=None,
)
else:
data_batch = maybe_distributed_data_batch
break
with self.distribute_strategy.scope():
self._symbolic_build(data_batch=data_batch)
def _aggregate_additional_loss(self, loss):
loss = super()._aggregate_additional_loss(loss)
return loss_module.scale_loss_for_distribution(loss)
class TFEpochIterator(EpochIterator):
def __init__(self, distribute_strategy=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._distribute_strategy = distribute_strategy
dataset = self.data_adapter.get_tf_dataset()
if not isinstance(dataset, tf.distribute.DistributedDataset):
dataset = self._distribute_strategy.experimental_distribute_dataset(
dataset
)
self._distributed_dataset = dataset
def _get_iterator(self):
return self._distributed_dataset
def tf_sync(self):
tf_context.async_wait()
def __next__(self):
return next(self._epoch_iterator)
@contextlib.contextmanager
def catch_stop_iteration(self):
"""Catches errors when an iterator runs out of data."""
with super().catch_stop_iteration():
try:
yield
self.tf_sync()
except tf.errors.OutOfRangeError:
raise StopIteration
def reduce_per_replica(values, strategy, reduction):
"""Attempt to reduce the structure `values` to single values.
Given `values` (a `tf.Tensor` or a `PerReplica` structure),
which represents the values across all the replicas, `reduce_per_replica`
attempts to "reduce" those values and returns the corresponding structure
that represents only single values.
Currently, `reduce_per_replica` is only used for reducing the metric results
from `tf.distribute.Strategy.run()`. Depending on the underlying
`Strategy` implementation, `values` may be a `PerReplica` object,
which can be thought of as a collection of values across the replicas,
or a `tf.Tensor`, if the strategy has already conducted the reduction
for the downstream library.
There are five possible outcomes of reduction:
1) if the `values` is a structure of simple `tf.Tensor`s, meaning that
reduction is not actually needed, `reduce_per_replica` returns the
structure as-is.
2) else, if `reduction="auto"`, then the best reduction strategy is
chosen based on the current environment. This should only be used
for training cases (`fit()`).
3) else, if `reduction="first"`, then `reduce_per_replica`
returns the values of the first replica. This is used in the case of
training and evaluation, where `values` is expected to hold the same
value across the replicas as a result of `Strategy`'s synchronization
across the replicas.
`reduce_per_replica` does not synchronize the values.
4) else, if `reduction="sum"`, then `reduce_per_replica` returns the sum
of values for all replicas. This may be used in the custom training loop
case, where each replica contain different values which are not
synchronized.
5) else, if `reduction="concat"`, then `reduce_per_replica`
returns the concatenation of the values across the replicas, along the
axis of dimension 0. This is used in the inference case (`predict()`).
Args:
values: Structure of `PerReplica` objects or `tf.Tensor`s.
`tf.Tensor`s are returned as-is.
strategy: `tf.distribute.Strategy` object.
reduction: One of `"auto"`, `"first"`, `"concat"`, `"mean"`, or `"sum"`.
`"auto"` will select `"first"` when used under a TPUStrategy, or
`"mean"` otherwise.
Returns:
Structure of `Tensor`s, representing the result of reduction.
"""
if reduction == "auto":
if isinstance(strategy, tf.distribute.TPUStrategy):
reduction = "first"
else:
reduction = "mean"
def _reduce(v):
"""Reduce a single `PerReplica` object."""
if _collective_all_reduce_multi_worker(strategy):
if reduction == "concat":
return _multi_worker_concat(v, strategy)
elif reduction == "sum":
return strategy.reduce("SUM", v)
elif reduction == "mean":
return strategy.reduce("MEAN", v, axis=0)
if not _is_per_replica_instance(v):
return v
elif reduction == "first":
return strategy.experimental_local_results(v)[0]
elif reduction == "concat":
if _is_tpu_multi_host(strategy):
return _tpu_multi_host_concat(v, strategy)
else:
return concat(strategy.experimental_local_results(v))
elif reduction == "sum":
return tf.reduce_sum(strategy.experimental_local_results(v))
elif reduction == "mean":
return tf.reduce_mean(
strategy.experimental_local_results(v), axis=0
)
else:
raise ValueError(
"`reduction` must be one of "
'"first", "concat", "mean", "sum", or "auto". '
f"Received: reduction={reduction}."
)
return tree.map_structure(_reduce, values)
def _multi_worker_concat(v, strategy):
"""Order PerReplica objects for CollectiveAllReduceStrategy and concat."""
replicas = strategy.gather(v, axis=0)
# v might not have the same shape on different replicas
if _is_per_replica_instance(v):
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/name_scope_test.py | keras/src/backend/tensorflow/name_scope_test.py | import tensorflow as tf
from keras.src.backend.tensorflow.core import name_scope
from keras.src.testing import TestCase
class TFNameScopeTest(TestCase):
def test_stacking(self):
self.assertEqual(tf.Variable(0, name="x").name, "x:0")
with name_scope("outer") as outer:
self.assertEqual(outer.name, "outer")
self.assertEqual(tf.Variable(0, name="x").name, "outer/x:0")
with name_scope("middle") as middle:
self.assertEqual(middle.name, "middle")
self.assertEqual(
tf.Variable(0, name="x").name, "outer/middle/x:0"
)
with name_scope("inner") as inner:
self.assertEqual(inner.name, "inner")
self.assertEqual(
tf.Variable(0, name="x").name, "outer/middle/inner/x:0"
)
self.assertEqual(
tf.Variable(0, name="x").name, "outer/middle/x:0"
)
self.assertEqual(tf.Variable(0, name="x").name, "outer/x:0")
self.assertEqual(tf.Variable(0, name="x").name, "x:0")
def test_deduplicate(self):
self.assertEqual(tf.Variable(0, name="x").name, "x:0")
with name_scope("name", caller=1):
with name_scope("name", caller=1):
self.assertEqual(tf.Variable(0, name="x").name, "name/x:0")
self.assertEqual(tf.Variable(0, name="x").name, "x:0")
with name_scope("name"):
with name_scope("name"):
self.assertEqual(tf.Variable(0, name="x").name, "name/name/x:0")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/random.py | keras/src/backend/tensorflow/random.py | import tensorflow as tf
from keras.src.backend.common import standardize_dtype
from keras.src.backend.config import floatx
from keras.src.random.seed_generator import SeedGenerator
from keras.src.random.seed_generator import draw_seed
from keras.src.random.seed_generator import make_default_seed
def _cast_seed(seed):
# TensorFlow has a device placement issue that `Variable` must be int64
# in `SeedGenerator`. However, all `tf.random.stateless_*` expect the seed
# to be int32 to run with XLA.
# This function addresses the inconsistency using `floormod`.
# Ref: https://www.tensorflow.org/api_docs/python/tf/random
if standardize_dtype(seed.dtype) == "int32":
return seed
else:
seed = tf.cast(tf.math.floormod(seed, tf.int32.max - 1), dtype="int32")
return seed
def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed = _cast_seed(draw_seed(seed))
return tf.random.stateless_normal(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed
)
def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed = _cast_seed(draw_seed(seed))
return tf.random.stateless_uniform(
shape=shape,
minval=tf.cast(minval, dtype),
maxval=tf.cast(maxval, dtype),
dtype=dtype,
seed=seed,
)
def categorical(logits, num_samples, dtype="int64", seed=None):
seed = _cast_seed(draw_seed(seed))
output = tf.random.stateless_categorical(logits, num_samples, seed=seed)
return tf.cast(output, dtype)
def randint(shape, minval, maxval, dtype="int32", seed=None):
intermediate_dtype = dtype
if standardize_dtype(dtype) not in ["int32", "int64"]:
intermediate_dtype = "int64"
seed = _cast_seed(draw_seed(seed))
output = tf.random.stateless_uniform(
shape=shape,
minval=minval,
maxval=maxval,
dtype=intermediate_dtype,
seed=seed,
)
return tf.cast(output, dtype)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed = _cast_seed(draw_seed(seed))
return tf.random.stateless_truncated_normal(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed
)
def _get_concrete_noise_shape(inputs, noise_shape):
if noise_shape is None:
return tf.shape(inputs)
concrete_inputs_shape = tf.shape(inputs)
concrete_noise_shape = []
for i, value in enumerate(noise_shape):
concrete_noise_shape.append(
concrete_inputs_shape[i] if value is None else value
)
return concrete_noise_shape
def dropout(inputs, rate, noise_shape=None, seed=None):
seed = _cast_seed(draw_seed(seed))
noise_shape = _get_concrete_noise_shape(inputs, noise_shape)
return tf.nn.experimental.stateless_dropout(
inputs,
rate=rate,
noise_shape=noise_shape,
seed=seed,
)
def shuffle(x, axis=0, seed=None):
seed = _cast_seed(draw_seed(seed))
indices = tf.argsort(
tf.random.stateless_uniform(shape=[tf.shape(x)[axis]], seed=seed)
)
return tf.gather(x, indices, axis=axis)
def gamma(shape, alpha, dtype=None, seed=None):
dtype = dtype or floatx()
seed = _cast_seed(draw_seed(seed))
# TODO: `tf.random.stateless_gamma` doesn't support bfloat16
intermediate_dtype = dtype
if standardize_dtype(dtype) == "bfloat16":
intermediate_dtype = "float32"
return tf.cast(
tf.random.stateless_gamma(
shape,
alpha=alpha,
dtype=intermediate_dtype,
seed=seed,
),
dtype,
)
def binomial(shape, counts, probabilities, dtype=None, seed=None):
dtype = dtype or floatx()
seed = _cast_seed(draw_seed(seed))
# TODO: `tf.random.stateless_binomial` doesn't support bfloat16
intermediate_dtype = dtype
if standardize_dtype(dtype) == "bfloat16":
intermediate_dtype = "float32"
return tf.cast(
tf.random.stateless_binomial(
shape=shape,
seed=seed,
counts=counts,
probs=probabilities,
output_dtype=intermediate_dtype,
),
dtype,
)
def beta(shape, alpha, beta, dtype=None, seed=None):
dtype = dtype or floatx()
# since tensorflow doesn't offer a beta distribution function
# so we'll use the formula U(a,b) = (X(a) / (X(a) + Y(b)),
# where U(a,b) is a beta-distributed random variable with
# parameters a and b, and X(a) and Y(b) are gamma-distributed
# random variables with parameters a and b respectively.
# Additionally, we'll use two different seeds for our two
# gamma random variables to prevent any unintended
# dependencies and correlations between the generated values
# due to the usage of same seed.
seed_1 = _cast_seed(draw_seed(seed))
# The choice of 12 is totally arbitrary, as we're
# incrementing the first drawn seed by a CONSTANT to
# ensure deterministic results.
seed_2 = seed_1 + 12
# TODO: `tf.random.stateless_gamma` doesn't support bfloat16
intermediate_dtype = dtype
if standardize_dtype(dtype) == "bfloat16":
intermediate_dtype = "float32"
alpha = tf.convert_to_tensor(alpha, dtype=intermediate_dtype)
beta = tf.convert_to_tensor(beta, dtype=intermediate_dtype)
# tensorflow's tf.random.stateless_gamma has a bit of unconventional
# implementation of the stateless_gamma function where it checks the
# broadcastability of alpha's shape with ONLY the RIGHTMOST dimension of
# the specified output shape instead of considering the whole.
# Consequently, it then results in errors for perfectly broadcastable shapes
# such as for output shape of (2, 3) and alpha shape of (1, 3)
# So to resolve this, we explicitly broadcast alpha and beta to shape before
# passing them to the stateless_gamma function.
alpha = tf.broadcast_to(alpha, shape)
beta = tf.broadcast_to(beta, shape)
gamma_a = tf.cast(
tf.random.stateless_gamma(
shape=shape, seed=seed_1, alpha=alpha, dtype=intermediate_dtype
),
dtype,
)
gamma_b = tf.cast(
tf.random.stateless_gamma(
shape=shape, seed=seed_2, alpha=beta, dtype=intermediate_dtype
),
dtype,
)
sample = gamma_a / (gamma_a + gamma_b)
return sample
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/saved_model_test.py | keras/src/backend/tensorflow/saved_model_test.py | """Tests for SavedModel functionality under tf implementation."""
import os
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from keras.src import backend
from keras.src import layers
from keras.src import metrics
from keras.src import models
from keras.src import ops
from keras.src import optimizers
from keras.src import testing
from keras.src.saving import object_registration
from keras.src.testing.test_utils import named_product
@object_registration.register_keras_serializable(package="my_package")
class CustomModelX(models.Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dense1 = layers.Dense(1)
self.dense2 = layers.Dense(1)
def call(self, inputs):
out = self.dense1(inputs)
return self.dense2(out)
def one(self):
return 1
@object_registration.register_keras_serializable(package="my_package")
class CustomSignatureModel(models.Model):
def __init__(self):
super(CustomSignatureModel, self).__init__()
self.v = tf.Variable(1.0)
@tf.function
def __call__(self, x):
return x * self.v
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def mutate(self, new_v):
self.v.assign(new_v)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The SavedModel test can only run with TF backend.",
)
class SavedModelTest(testing.TestCase):
def test_sequential(self):
model = models.Sequential([layers.Dense(1)])
model.compile(loss="mse", optimizer="adam")
X_train = np.random.rand(100, 3)
y_train = np.random.rand(100, 1)
model.fit(X_train, y_train)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
model(X_train),
restored_model.signatures["serving_default"](
tf.convert_to_tensor(X_train, dtype=tf.float32)
)["output_0"],
rtol=1e-4,
atol=1e-4,
)
def test_functional(self):
inputs = layers.Input(shape=(3,))
x = layers.Dense(1, name="first_dense")(inputs)
outputs = layers.Dense(1, name="second_dense")(x)
model = models.Model(inputs, outputs)
model.compile(
optimizer="adam",
loss="mse",
)
X_train = np.random.rand(100, 3)
y_train = np.random.rand(100, 1)
model.fit(X_train, y_train)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
model(X_train),
restored_model.signatures["serving_default"](
tf.convert_to_tensor(X_train, dtype=tf.float32)
)["output_0"],
rtol=1e-4,
atol=1e-4,
)
def test_subclassed(self):
model = CustomModelX()
model.compile(
optimizer="adam",
loss="mse",
metrics=[metrics.Hinge(), "mse"],
)
X_train = np.random.rand(100, 3)
y_train = np.random.rand(100, 1)
model.fit(X_train, y_train)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
model(X_train),
restored_model.signatures["serving_default"](
tf.convert_to_tensor(X_train, dtype=tf.float32)
)["output_0"],
rtol=1e-4,
atol=1e-4,
)
def test_custom_model_and_layer(self):
@object_registration.register_keras_serializable(package="my_package")
class CustomLayer(layers.Layer):
def __call__(self, inputs):
return inputs
@object_registration.register_keras_serializable(package="my_package")
class Model(models.Model):
def __init__(self):
super().__init__()
self.layer = CustomLayer()
@tf.function(input_signature=[tf.TensorSpec([None, 1])])
def call(self, inputs):
return self.layer(inputs)
model = Model()
inp = np.array([[1.0]])
result = model(inp)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
result,
restored_model.call(inp),
rtol=1e-4,
atol=1e-4,
)
@parameterized.named_parameters(
named_product(struct_type=["tuple", "array", "dict"])
)
def test_model_with_input_structure(self, struct_type):
class TupleModel(models.Model):
def call(self, inputs):
x, y = inputs
return x + ops.mean(y, axis=1)
class ArrayModel(models.Model):
def call(self, inputs):
x = inputs[0]
y = inputs[1]
return x + ops.mean(y, axis=1)
class DictModel(models.Model):
def call(self, inputs):
x = inputs["x"]
y = inputs["y"]
return x + ops.mean(y, axis=1)
input_x = tf.constant([1.0])
input_y = tf.constant([[1.0, 0.0, 2.0]])
if struct_type == "tuple":
model = TupleModel()
inputs = (input_x, input_y)
elif struct_type == "array":
model = ArrayModel()
inputs = [input_x, input_y]
elif struct_type == "dict":
model = DictModel()
inputs = {"x": input_x, "y": input_y}
result = model(inputs)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
outputs = restored_model.signatures["serving_default"](
inputs=input_x, inputs_1=input_y
)
self.assertAllClose(result, outputs["output_0"], rtol=1e-4, atol=1e-4)
def test_multi_input_model(self):
input_1 = layers.Input(shape=(3,))
input_2 = layers.Input(shape=(5,))
y1 = layers.Dense(1)(input_1)
y2 = layers.Dense(1)(input_2)
layer_2 = layers.Dense(1, activation="relu")
output_1 = layer_2(y1)
output_2 = layer_2(y2)
model = models.Model([input_1, input_2], [output_1, output_2])
input_arr_1 = np.random.random((1, 3)).astype("float32")
input_arr_2 = np.random.random((1, 5)).astype("float32")
model = models.Model([input_1, input_2], [output_1, output_2])
path = os.path.join(self.get_temp_dir(), "my_keras_model")
outputs_1 = model(
inputs=[
tf.convert_to_tensor(input_arr_1, dtype=tf.float32),
tf.convert_to_tensor(input_arr_2, dtype=tf.float32),
],
)
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
outputs_2 = restored_model.signatures["serving_default"](
inputs=tf.convert_to_tensor(input_arr_1, dtype=tf.float32),
inputs_1=tf.convert_to_tensor(input_arr_2, dtype=tf.float32),
)
self.assertAllClose(
outputs_1[0], outputs_2["output_0"], rtol=1e-4, atol=1e-4
)
self.assertAllClose(
outputs_1[1], outputs_2["output_1"], rtol=1e-4, atol=1e-4
)
def test_multi_input_custom_model_and_layer(self):
@object_registration.register_keras_serializable(package="my_package")
class CustomLayer(layers.Layer):
def build(self, *input_shape):
pass
def call(self, *input_list):
self.add_loss(input_list[-2] * 2)
return sum(input_list)
@object_registration.register_keras_serializable(package="my_package")
class CustomModel(models.Model):
def build(self, *input_shape):
self.layer = CustomLayer()
self.layer.build(*input_shape)
@tf.function
def call(self, *inputs):
inputs = list(inputs)
return self.layer(*inputs)
model = CustomModel()
inp = [
tf.constant(i, shape=[1, 1], dtype=tf.float32) for i in range(1, 4)
]
expected = model(*inp)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
output = restored_model.call(*inp)
self.assertAllClose(expected, output, rtol=1e-4, atol=1e-4)
def test_list_trackable_children_tracking(self):
@object_registration.register_keras_serializable(package="my_package")
class CustomLayerList(layers.Layer):
def __init__(self):
super().__init__()
self.sublayers = [
layers.Dense(2),
layers.Dense(2),
]
def call(self, inputs):
x = inputs
for sublayer in self.sublayers:
x = sublayer(x)
return x
inputs = layers.Input(shape=(1,))
outputs = CustomLayerList()(inputs)
model = models.Model(inputs, outputs)
inp = np.array([[1.0]])
expected = model(inp)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
expected,
restored_model.signatures["serving_default"](
tf.convert_to_tensor(inp, dtype=tf.float32)
)["output_0"],
rtol=1e-4,
atol=1e-4,
)
def test_dict_trackable_children_tracking(self):
@object_registration.register_keras_serializable(package="my_package")
class CustomLayerDict(layers.Layer):
def __init__(self):
super().__init__()
self.sublayers = {
"first_layer": layers.Dense(2),
"second_layer": layers.Dense(2),
}
def call(self, inputs):
x = inputs
for key, sublayer in self.sublayers.items():
x = sublayer(x)
return x
inputs = layers.Input(shape=(1,))
outputs = CustomLayerDict()(inputs)
model = models.Model(inputs, outputs)
inp = np.array([[1.0]])
expected = model(inp)
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertAllClose(
expected,
restored_model.signatures["serving_default"](
tf.convert_to_tensor(inp, dtype=tf.float32)
)["output_0"],
rtol=1e-4,
atol=1e-4,
)
def test_fixed_signature_string_dtype(self):
@object_registration.register_keras_serializable(package="my_package")
class Adder(models.Model):
@tf.function(
input_signature=[tf.TensorSpec(shape=[], dtype=tf.string)]
)
def concat(self, x):
return x + x
model = Adder()
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(model, path)
restored_model = tf.saved_model.load(path)
self.assertEqual(model.concat("hello"), restored_model.concat("hello"))
def test_non_fixed_signature_string_dtype(self):
@object_registration.register_keras_serializable(package="my_package")
class Adder(models.Model):
@tf.function
def concat(self, x):
return x + x
model = Adder()
no_fn_path = os.path.join(self.get_temp_dir(), "my_keras_model_no_fn")
tf.saved_model.save(model, no_fn_path)
restored_model = tf.saved_model.load(no_fn_path)
with self.assertRaisesRegex(ValueError, "zero restored functions"):
_ = restored_model.concat("hello")
path = os.path.join(self.get_temp_dir(), "my_keras_model")
tf.saved_model.save(
model,
path,
signatures=model.concat.get_concrete_function(
tf.TensorSpec(shape=[], dtype=tf.string, name="string_input")
),
)
restored_model = tf.saved_model.load(path)
self.assertEqual(model.concat("hello"), restored_model.concat("hello"))
def test_fine_tuning(self):
model = CustomSignatureModel()
model_no_signatures_path = os.path.join(
self.get_temp_dir(), "model_no_signatures"
)
_ = model(tf.constant(0.0))
tf.saved_model.save(model, model_no_signatures_path)
restored_model = tf.saved_model.load(model_no_signatures_path)
self.assertLen(list(restored_model.signatures.keys()), 0)
self.assertEqual(restored_model(tf.constant(3.0)).numpy(), 3)
restored_model.mutate(tf.constant(2.0))
self.assertEqual(restored_model(tf.constant(3.0)).numpy(), 6)
optimizer = optimizers.SGD(0.05)
def train_step():
with tf.GradientTape() as tape:
loss = (10.0 - restored_model(tf.constant(2.0))) ** 2
variables = tape.watched_variables()
grads = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(grads, variables))
return loss
for _ in range(10):
# "v" approaches 5, "loss" approaches 0
loss = train_step()
self.assertAllClose(loss, 0.0, rtol=1e-2, atol=1e-2)
self.assertAllClose(restored_model.v.numpy(), 5.0, rtol=1e-2, atol=1e-2)
def test_signatures_path(self):
model = CustomSignatureModel()
model_with_signature_path = os.path.join(
self.get_temp_dir(), "model_with_signature"
)
call = model.__call__.get_concrete_function(
tf.TensorSpec(None, tf.float32)
)
tf.saved_model.save(model, model_with_signature_path, signatures=call)
restored_model = tf.saved_model.load(model_with_signature_path)
self.assertEqual(
list(restored_model.signatures.keys()), ["serving_default"]
)
def test_multiple_signatures_dict_path(self):
model = CustomSignatureModel()
model_multiple_signatures_path = os.path.join(
self.get_temp_dir(), "model_with_multiple_signatures"
)
call = model.__call__.get_concrete_function(
tf.TensorSpec(None, tf.float32)
)
signatures = {
"serving_default": call,
"array_input": model.__call__.get_concrete_function(
tf.TensorSpec([None], tf.float32)
),
}
tf.saved_model.save(
model, model_multiple_signatures_path, signatures=signatures
)
restored_model = tf.saved_model.load(model_multiple_signatures_path)
self.assertEqual(
list(restored_model.signatures.keys()),
["serving_default", "array_input"],
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/__init__.py | keras/src/backend/tensorflow/__init__.py | from keras.src.backend.tensorflow import core
from keras.src.backend.tensorflow import distribution_lib
from keras.src.backend.tensorflow import image
from keras.src.backend.tensorflow import linalg
from keras.src.backend.tensorflow import math
from keras.src.backend.tensorflow import nn
from keras.src.backend.tensorflow import numpy
from keras.src.backend.tensorflow import random
from keras.src.backend.tensorflow import tensorboard
from keras.src.backend.tensorflow.core import IS_THREAD_SAFE
from keras.src.backend.tensorflow.core import SUPPORTS_RAGGED_TENSORS
from keras.src.backend.tensorflow.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.tensorflow.core import Variable
from keras.src.backend.tensorflow.core import cast
from keras.src.backend.tensorflow.core import compute_output_spec
from keras.src.backend.tensorflow.core import cond
from keras.src.backend.tensorflow.core import convert_to_numpy
from keras.src.backend.tensorflow.core import convert_to_tensor
from keras.src.backend.tensorflow.core import device_scope
from keras.src.backend.tensorflow.core import is_tensor
from keras.src.backend.tensorflow.core import name_scope
from keras.src.backend.tensorflow.core import random_seed_dtype
from keras.src.backend.tensorflow.core import scatter
from keras.src.backend.tensorflow.core import shape
from keras.src.backend.tensorflow.core import stop_gradient
from keras.src.backend.tensorflow.core import vectorized_map
from keras.src.backend.tensorflow.rnn import cudnn_ok
from keras.src.backend.tensorflow.rnn import gru
from keras.src.backend.tensorflow.rnn import lstm
from keras.src.backend.tensorflow.rnn import rnn
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/sparse.py | keras/src/backend/tensorflow/sparse.py | import functools
import tensorflow as tf
ones_bool = functools.partial(tf.ones, dtype=tf.bool)
ones_int8 = functools.partial(tf.ones, dtype=tf.int8)
zeros_int8 = functools.partial(tf.zeros, dtype=tf.int8)
ones_like_int8 = functools.partial(tf.ones_like, dtype=tf.int8)
zeros_like_int8 = functools.partial(tf.zeros_like, dtype=tf.int8)
def sparse_to_dense(x, default_value=None):
x_shape = x.shape
if x_shape.rank == 0:
# Workaround for bug on GPU when sparse tensor represents a scalar.
if x.values.shape[0] == 0:
return tf.constant(default_value, dtype=x.dtype)
else:
return tf.reshape(x.values, ())
x = tf.sparse.to_dense(x, default_value=default_value)
x.set_shape(x_shape)
return x
def sparse_with_values(x, values):
x_shape = x.shape
x = tf.SparseTensor(x.indices, values, x.dense_shape)
x.set_shape(x_shape)
return x
def broadcast_scalar_to_sparse_shape(scalar, sparse):
output = tf.broadcast_to(scalar, sparse.dense_shape)
output.set_shape(sparse.shape)
return output
def sparse_subtract(x1, x2):
"""Subtraction for `tf.SparseTensor`s.
Either `x1` or `x2` or both can be `tf.SparseTensor`s.
Args:
x1: fist tensor to add.
x2: second tensor to add.
Returns:
The sum of `x1` and `x2`, which is a `tf.SparseTensor` if and only if
both `x1` or `x2` are `tf.SparseTensor`s.
"""
if isinstance(x2, tf.SparseTensor):
return tf.sparse.add(x1, tf.sparse.map_values(tf.negative, x2))
else:
return tf.sparse.add(x1, tf.negative(x2))
def sparse_union_indices_and_values(x1, x2_indices, x2_values=None):
"""Compute the indices for the union of the indices of the provided
`tf.SparseTensor`s and another set of indices and return the modified values
for these indices.
Args:
x: a `tf.SparseTensor`.
indices: another set of indices in the `tf.SparseTensor` format.
Returns: A tuple containing:
- the indices for the union
- `x1` values for the union indices (some zeros were added)
- `x2` values for the union indices (some zeros were added) or `None` if
`x2_values` was `None`.
"""
# Add zeros at the x2 indices to x1 to create the union.
zeros2 = tf.SparseTensor(
x2_indices,
tf.zeros((tf.shape(x2_indices)[0],), x1.values.dtype),
x1.dense_shape,
)
x1_for_union = tf.sparse.add(x1, zeros2)
if x2_values is not None:
# Add zeros at the x1 indices to x2 to create the union.
x2 = tf.SparseTensor(x2_indices, x2_values, x1.dense_shape)
zeros1 = tf.sparse.map_values(tf.zeros_like, x1)
x2_for_union = tf.sparse.add(x2, zeros1)
return x1_for_union.indices, x1_for_union.values, x2_for_union.values
else:
return x1_for_union.indices, x1_for_union.values, None
def indexed_slices_union_indices_and_values(x1, x2_indices, x2_values=None):
"""Compute the indices for the union of two `tf.IndexedSlices` and modify
the values for these indices.
Args:
x1: the first `tf.IndexedSlices`.
x2_indices: the indices for the second `tf.IndexedSlices`.
x2_value: (optional) the values for the second `tf.IndexedSlices`.
Returns: A tuple containing:
- the indices for the union
- `x1` values for the union indices (some zeros were added)
- `x2` values for the union indices (some zeros were added) or `None` if
`x2_values` was `None`.
"""
# Compute the union of the indices by doing a logical or between the one-hot
# encoded indices for x1 and x2.
dim_0 = x1.dense_shape[0]
x1_indices_expanded = tf.expand_dims(x1.indices, axis=1)
x2_indices_expanded = tf.expand_dims(x2_indices, axis=1)
x1_indices_count = tf.shape(x1_indices_expanded)[0]
x2_indices_count = tf.shape(x2_indices_expanded)[0]
x1_indices_one_hot = tf.scatter_nd(
x1_indices_expanded,
ones_bool((x1_indices_count,)),
(dim_0,),
)
x2_indices_one_hot = tf.scatter_nd(
x2_indices_expanded,
ones_bool((x2_indices_count,)),
(dim_0,),
)
union_indices = tf.squeeze(
tf.where(tf.math.logical_or(x1_indices_one_hot, x2_indices_one_hot)),
axis=-1,
)
union_indices_count = tf.shape(union_indices)[0]
# Re-gather the values with extra zeros added at indices that are part of
# the union but were not in x1 or x2.
def values_for_union(indices_expanded, indices_count, values):
indices_indices = tf.scatter_nd(
indices_expanded,
tf.range(1, indices_count + 1),
(dim_0,),
)
to_union_indices = tf.gather(indices_indices, union_indices)
values_with_leading_zeros = tf.concat(
[tf.zeros_like(values[0:1]), values], axis=0
)
return tf.gather(values_with_leading_zeros, to_union_indices)
# Only recompute values if some indices were added.
x1_values_for_union_indices = tf.cond(
tf.equal(x1_indices_count, union_indices_count),
lambda: x1.values,
lambda: values_for_union(
x1_indices_expanded, x1_indices_count, x1.values
),
)
if x2_values is not None:
x2_values_for_union_indices = tf.cond(
tf.equal(x2_indices_count, union_indices_count),
lambda: x2_values,
lambda: values_for_union(
x2_indices_expanded, x2_indices_count, x2_values
),
)
else:
x2_values_for_union_indices = None
return (
union_indices,
x1_values_for_union_indices,
x2_values_for_union_indices,
)
def sparse_intersection_indices_and_values(x1, x2):
"""Compute the indices for the intersection of two `tf.SparseTensor`s and
modify the values for these indices.
Args:
x1: the first `tf.SparseTensor`.
x2: the second `tf.SparseTensor`.
Returns: A tuple containing:
- the indices for the intersection
- `x1` values for the intersection indices (some values were removed)
- `x2` values for the intersection indices (some values were removed)
"""
# Compute the intersection of indices in the form of a sparse
# tensor containing ones as values.
ones1 = tf.sparse.map_values(ones_like_int8, x1)
ones2 = tf.sparse.map_values(ones_like_int8, x2)
# tf.sets.intersection ignores the last dimension when, so we
# need to add a dummy extra dimension and then remove it.
intersection_extra_dim = tf.sets.intersection(
tf.sparse.expand_dims(ones1, axis=-1),
tf.sparse.expand_dims(ones2, axis=-1),
)
def empty_intersection():
return (
tf.zeros((0, x1.shape.rank), dtype=tf.int64),
tf.zeros((0,), dtype=x1.values.dtype),
tf.zeros((0,), dtype=x2.values.dtype),
)
def non_empty_intersection():
intersection = tf.sparse.reshape(intersection_extra_dim, x1.dense_shape)
# Compute the masks to remove indices in x1 and x2 that are not
# in the intersection, then trim x1 and x2.
zeros1 = tf.sparse.map_values(zeros_like_int8, x1)
zeros2 = tf.sparse.map_values(zeros_like_int8, x2)
mask1 = tf.sparse.add(zeros1, intersection)
mask2 = tf.sparse.add(zeros2, intersection)
return (
intersection.indices,
tf.sparse.retain(x1, tf.cast(mask1.values, tf.bool)).values,
tf.sparse.retain(x2, tf.cast(mask2.values, tf.bool)).values,
)
return tf.cond(
tf.equal(tf.size(intersection_extra_dim), 0),
empty_intersection,
non_empty_intersection,
)
def indexed_slices_intersection_indices_and_values(x1, x2):
"""Compute the indices for the intersection of two `tf.IndexedSlices` and
modify the values for these indices.
Args:
x1: the first `tf.IndexedSlices`.
x2: the second `tf.IndexedSlices`.
Returns: A tuple containing:
- the indices for the intersection
- `x1` values for the intersection indices (some values were removed)
- `x2` values for the intersection indices (some values were removed)
"""
# Compute the intersection of the indices by doing a logical
# and between the one hot encoded indices for x1 and x2.
dim_0 = x1.dense_shape[0]
x1_indices_expanded = tf.expand_dims(x1.indices, axis=1)
x2_indices_expanded = tf.expand_dims(x2.indices, axis=1)
x1_indices_count = x1_indices_expanded.shape[0]
x2_indices_count = x2_indices_expanded.shape[0]
x1_indices_one_hot = tf.scatter_nd(
x1_indices_expanded,
ones_bool((x1_indices_count,)),
(dim_0,),
)
x2_indices_one_hot = tf.scatter_nd(
x2_indices_expanded,
ones_bool((x2_indices_count,)),
(dim_0,),
)
intersection_indices = tf.squeeze(
tf.where(tf.math.logical_and(x1_indices_one_hot, x2_indices_one_hot)),
axis=-1,
)
intersection_indices_count = tf.shape(intersection_indices)[0]
def empty_intersection():
return (
intersection_indices,
tf.zeros((0,) + x1.values.shape[1:], x1.dtype),
tf.zeros((0,) + x2.values.shape[1:], x2.dtype),
)
def non_empty_intersection():
# Re-gather sub parts of the values that are part of the intersection.
def values_for_intersection(indices_expanded, indices_count, values):
indices_indices = tf.scatter_nd(
indices_expanded,
tf.range(indices_count),
(dim_0,),
)
to_intersection_indices = tf.gather(
indices_indices, intersection_indices
)
return tf.gather(values, to_intersection_indices)
# Only recompute values if some indices were removed.
x1_values_for_intersection = tf.cond(
tf.equal(x1_indices_count, intersection_indices_count),
lambda: x1.values,
lambda: values_for_intersection(
x1_indices_expanded, x1_indices_count, x1.values
),
)
x2_values_for_intersection = tf.cond(
tf.equal(x2_indices_count, intersection_indices_count),
lambda: x2.values,
lambda: values_for_intersection(
x2_indices_expanded, x2_indices_count, x2.values
),
)
return (
intersection_indices,
x1_values_for_intersection,
x2_values_for_intersection,
)
return tf.cond(
tf.equal(intersection_indices_count, 0),
empty_intersection,
non_empty_intersection,
)
def densifying_unary(default_value):
"""Decorator to add support for `tf.SparseTensor` and `tf.IndexedSlices` to
a non-zero-preserving element-wise unary operator.
There are requirements on the operator for this decorator to work correctly:
- The operator must be element-wise
- The operator must be unary (one input tensor and one output tensor)
- The operator must return a tensor of the same shape.
Additional arguments to the function (besides the input tensor) are
supported. The returned result is a dense tensor and contains
`default_value` outside of the indices of the input tensor.
Args:
default_value: The value to use outside of indices. It must be the value
that the operator returns for zero values.
Returns:
Wrapped function that supports `tf.SparseTensor` and `tf.IndexedSlices`.
"""
def wrap_densifying_unary(func):
@functools.wraps(func)
def sparse_wrapper(x, *args, **kwargs):
if isinstance(x, tf.SparseTensor):
sparse_output = sparse_with_values(
x, func(x.values, *args, **kwargs)
)
return sparse_to_dense(
sparse_output,
tf.cast(default_value, sparse_output.values.dtype),
)
elif isinstance(x, tf.IndexedSlices):
sparse_output_values = func(x.values, *args, **kwargs)
output = tf.fill(
x.dense_shape,
tf.cast(default_value, sparse_output_values.dtype),
)
return tf.tensor_scatter_nd_update(
output, tf.expand_dims(x.indices, 1), sparse_output_values
)
return func(x, *args, **kwargs)
return sparse_wrapper
return wrap_densifying_unary
def elementwise_unary(func):
"""Decorator to add support for `tf.SparseTensor` and `tf.IndexedSlices` to
a zero-preserving element-wise unary operator.
There are requirements on the operator for this decorator to work correctly:
- The operator must be element-wise
- The operator must be unary (one input tensor and one output tensor)
- The operator must return a tensor of the same shape, and if it is a
`tf.SparseTensor` or `tf.IndexedSlices`, the indices of the result must be
the same. Therefore:
- Reduction operations are not supported (e.g. `mean`).
- Operations for which the result may be dense (e.g. `reciprocal`), or
the sparse indices depend on the inputs are not supported (e.g.
`clip`). This implies that `func(0)` must be 0.
Additional arguments to the function (besides the input tensor) are
supported as long as they cannot change the indices of the result. For
instance,`round` is supported, but `clip` is not supported as
`clip(x, 1.0, 2.0)` would always return a dense tensor.
Note that if an input sparse tensor contains zero values, the indices and
the zero values are preserved.
Args:
func: The function to wrap.
Returns:
Wrapped function that supports `tf.SparseTensor` and `tf.IndexedSlices`.
"""
@functools.wraps(func)
def sparse_wrapper(x, *args, **kwargs):
if isinstance(x, tf.SparseTensor):
return sparse_with_values(x, func(x.values, *args, **kwargs))
elif isinstance(x, tf.IndexedSlices):
return tf.IndexedSlices(
func(x.values, *args, **kwargs), x.indices, x.dense_shape
)
else:
return func(x, *args, **kwargs)
return sparse_wrapper
def elementwise_binary_union(sparse_op, densify_mixed=False):
"""Decorator to add support for `tf.SparseTensor` and `tf.IndexedSlices` to
an element-wise binary operator such that the indices present in the result
are the union of the indices in the two operand.
The primary use case for this is the `add` and `subtract` operators.
There are requirements on the operator for this decorator to work correctly:
- The operator must be element-wise.
- The operator must be binary (two input tensors and one output tensor).
- Both inputs must be of the same shape or one input must be a scalar.
- The output must be of the same shape as the (non scalar) inputs.
- The indices of the output must be the union of the indices of the inputs.
This implies that func(0, 0) must be 0. As a result, if one operand is
dense or a scalar, then the result will be dense.
Additional arguments to the function (besides the input tensors) are not
supported.
Note that if the result of the operation is zero at some indices, including
because the operands were zero at these indices, the zeros and indices are
preserved.
Args:
sparse_op: implementation of the operation for `tf.SparseTensor`. Must
work if both of the operands are `tf.SparseTensor`s and can
optionally work if one of the operand is a `tf.SparseTensor` and
the other one is dense tensor, see `densify_mixed`.
densify_mixed: if `True`, `sparse_op` does not support a mix of
`tf.SparseTensor` and dense tensor or dense tensor with
`tf.SparseTensor` and the `tf.SparseTensor` tensor is densified.
Returns:
Wrapped function that supports `tf.SparseTensor` and `tf.IndexedSlices`.
"""
def wrap_elementwise_binary_union(func):
@functools.wraps(func)
def sparse_wrapper(x1, x2):
if isinstance(x1, tf.SparseTensor):
if isinstance(x2, tf.SparseTensor):
# x1 is a SparseTensor and x2 is a SparseTensor.
if x1.indices is x2.indices:
return sparse_with_values(
x1, func(x1.values, x2.values)
)
else:
output = sparse_op(x1, x2)
output.set_shape(x1.shape)
return output
else:
# x1 is a SparseTensor.
if densify_mixed:
x1 = sparse_to_dense(x1)
else:
if not hasattr(x2, "shape") or len(x2.shape) == 0:
# x2 is a scalar, broadcast.
x2 = broadcast_scalar_to_sparse_shape(x2, x1)
return sparse_op(x1, x2)
elif isinstance(x2, tf.SparseTensor):
# x2 is a SparseTensor.
if densify_mixed:
x2 = sparse_to_dense(x2)
else:
if not hasattr(x1, "shape") or len(x1.shape) == 0:
# x1 is a scalar, broadcast.
x1 = broadcast_scalar_to_sparse_shape(x1, x2)
return sparse_op(x1, x2)
elif isinstance(x1, tf.IndexedSlices):
if isinstance(x2, tf.IndexedSlices):
# x1 is an IndexedSlices and x2 is an IndexedSlices.
if x1.indices is x2.indices:
return tf.IndexedSlices(
func(x1.values, x2.values),
x1.indices,
x1.dense_shape,
)
else:
# Compute the union of indices.
(
union_indices,
x1_values_for_union,
x2_values_for_union,
) = indexed_slices_union_indices_and_values(
x1, x2.indices, x2.values
)
# Now, it is an element-wise operation on the union.
return tf.IndexedSlices(
func(
x1_values_for_union,
x2_values_for_union,
),
union_indices,
x1.dense_shape,
)
else:
# x1 is an IndexedSlices, densify.
x1 = tf.convert_to_tensor(x1)
elif isinstance(x2, tf.IndexedSlices):
# x2 is an IndexedSlices, densify.
x2 = tf.convert_to_tensor(x2)
return func(x1, x2)
return sparse_wrapper
return wrap_elementwise_binary_union
def elementwise_binary_intersection(func):
"""Decorator to add support for `tf.SparseTensor` and `tf.IndexedSlices` to
an element-wise binary operator such that the indices present in the result
are the intersection of the indices in the two operand.
The primary use case for this is the `multiply` operator.
There are requirements on the operator for this decorator to work correctly:
- The operator must be element-wise.
- The operator must be binary (two input tensors and one output tensor).
- Both inputs must be of the same shape or one input must be a scalar.
- The output must be of the same shape as the (non scalar) inputs.
- The indices of the output must be the intersection of the indices of the
inputs. This implies that func(0, x) and func(x, 0) must be 0 for any x.
As a result, if one operand is dense or a scalar, then the indices are the
ones from the other operand.
Additional arguments to the function (besides the input tensors) are not
supported.
Note that if the operands contains zero values at some common indices, the
indices and the zero values are preserved.
Args:
func: The function to wrap.
Returns:
Wrapped function that supports `tf.SparseTensor` and `tf.IndexedSlices`.
"""
@functools.wraps(func)
def sparse_wrapper(x1, x2):
if isinstance(x1, tf.SparseTensor):
if isinstance(x2, tf.SparseTensor):
# x1 is a SparseTensor and x2 is a SparseTensor.
if x1.indices is x2.indices:
return sparse_with_values(x1, func(x1.values, x2.values))
else:
# Compute the intersection of indices.
(
intersection_indices,
x1_values_for_intersection,
x2_values_for_intersection,
) = sparse_intersection_indices_and_values(x1, x2)
# Now, it is an element-wise operation on the intersection.
output = tf.SparseTensor(
intersection_indices,
func(
x1_values_for_intersection,
x2_values_for_intersection,
),
x1.dense_shape,
)
output.set_shape(x1.shape)
return output
else:
# x1 is a SparseTensor.
if not hasattr(x2, "shape") or len(x2.shape) == 0:
# x2 is a scalar, apply func element-wise.
return sparse_with_values(x1, func(x1.values, x2))
else:
# x2 is dense, gather values from x1 indices.
return sparse_with_values(
x1, func(x1.values, tf.gather_nd(x2, x1.indices))
)
elif isinstance(x2, tf.SparseTensor):
# x2 is a SparseTensor.
if not hasattr(x1, "shape") or len(x1.shape) == 0:
# x1 is a scalar, apply func element-wise.
return sparse_with_values(x2, func(x1, x2.values))
else:
# x1 is dense, gather values from x2 indices.
return sparse_with_values(
x2, func(tf.gather_nd(x1, x2.indices), x2.values)
)
elif isinstance(x1, tf.IndexedSlices):
if isinstance(x2, tf.IndexedSlices):
# x1 is an IndexedSlices and x2 is an IndexedSlices.
if x1.indices is x2.indices:
return tf.IndexedSlices(
func(x1.values, x2.values), x1.indices, x1.dense_shape
)
else:
# Compute the intersection of indices.
(
intersection_indices,
x1_values_for_intersection,
x2_values_for_intersection,
) = indexed_slices_intersection_indices_and_values(x1, x2)
# Now, it is an element-wise operation on the intersection.
return tf.IndexedSlices(
func(
x1_values_for_intersection,
x2_values_for_intersection,
),
intersection_indices,
x1.dense_shape,
)
else:
# x1 is an IndexedSlices.
if not hasattr(x2, "shape") or len(x2.shape) == 0:
# x2 is a scalar, apply func element-wise.
return tf.IndexedSlices(
func(x1.values, x2), x1.indices, x1.dense_shape
)
else:
# x2 is dense, gather values from x1 indices.
return tf.IndexedSlices(
func(x1.values, tf.gather(x2, x1.indices)),
x1.indices,
x1.dense_shape,
)
elif isinstance(x2, tf.IndexedSlices):
# x2 is an IndexedSlices.
if not hasattr(x1, "shape") or len(x1.shape) == 0:
# x1 is a scalar, apply func element-wise.
return tf.IndexedSlices(
func(x1, x2.values), x2.indices, x2.dense_shape
)
else:
# x1 is dense, gather values from x2 indices.
return tf.IndexedSlices(
func(tf.gather(x1, x2.indices), x2.values),
x2.indices,
x2.dense_shape,
)
# Default case, no SparseTensor and no IndexedSlices.
return func(x1, x2)
return sparse_wrapper
def elementwise_division(func):
"""Decorator to add support for `tf.SparseTensor` and `tf.IndexedSlices` to
element-wise binary division and related operators.
This decorator is designed for operations related to the division of two
operands (e.g. `divide`). It accepts `tf.SparseTensor` and
`tf.IndexedSlices` for both the dividend and the divisor, but handles them
differently based on whether they are the dividend or the divisor.
- If the divisor is a `tf.SparseTensor` or `tf.IndexedSlices`, it is
densified and the result is dense because the result contains Inf or Nan
outside of the indices of the dividend.
- If the dividend is a `tf.SparseTensor` or `tf.IndexedSlices` and the
divisor is dense, it finds occurrences of zeros and NaNs in the divisor.
The result may therefore have more indices than there were in the dividend
to return correct values where the divisor was zero or NaN.
- If the dividend is a `tf.SparseTensor` or `tf.IndexedSlices` and the
divisor is a scalar, it does the division element-wise. Note that the
result is incorrectly sparse if the scalar divisor is zero.
Args:
func: The function to wrap.
Returns:
Wrapped function that supports `tf.SparseTensor` and `tf.IndexedSlices`.
"""
@functools.wraps(func)
def sparse_wrapper(x1, x2):
if isinstance(x1, tf.SparseTensor):
if isinstance(x2, tf.SparseTensor):
# x1 is a SparseTensor and x2 is a SparseTensor.
# Divisor is sparse, meaning we're doing divisions by zero
# outside of x2.indices, so the result is dense. Densify both.
x1 = sparse_to_dense(x1)
x2 = sparse_to_dense(x2)
else:
# x1 is a SparseTensor.
if not hasattr(x2, "shape") or len(x2.shape) == 0:
# x2 is a scalar, apply func element-wise.
return sparse_with_values(x1, func(x1.values, x2))
else:
# x2 is dense.
x2_zeros_and_nans = tf.equal(x2, 0)
if not tf.as_dtype(x2.dtype).is_integer:
x2_zeros_and_nans = tf.math.logical_or(
x2_zeros_and_nans, tf.math.is_nan(x2)
)
def func_for_x1_indices():
# Gather values from x1 indices.
return sparse_with_values(
x1, func(x1.values, tf.gather_nd(x2, x1.indices))
)
def func_for_union_indices():
# Compute the union of indices to keep zeros and NaNs.
x2_zeros_and_nan_indices = tf.where(x2_zeros_and_nans)
(
union_indices,
x1_values_for_union,
_,
) = sparse_union_indices_and_values(
x1, x2_zeros_and_nan_indices
)
output = tf.SparseTensor(
union_indices,
func(
x1_values_for_union,
tf.gather_nd(x2, union_indices),
),
x1.dense_shape,
)
output.set_shape(x1.shape)
return output
return tf.cond(
tf.reduce_any(x2_zeros_and_nans),
func_for_union_indices,
func_for_x1_indices,
)
elif isinstance(x2, tf.SparseTensor):
# x2 is a SparseTensor.
# Divisor is sparse, densify to do the divisions by zero correctly.
x2 = sparse_to_dense(x2)
elif isinstance(x1, tf.IndexedSlices):
if isinstance(x2, tf.IndexedSlices):
# x1 is an IndexedSlices and x2 is an IndexedSlices.
# Divisor is slices, meaning we're doing divisions by zero
# outside of x2.indices, so the result is dense. Densify both.
x1 = tf.convert_to_tensor(x1)
x2 = tf.convert_to_tensor(x2)
else:
# x1 is a IndexedSlices.
if not hasattr(x2, "shape") or len(x2.shape) == 0:
# x2 is a scalar, apply func element-wise.
return tf.IndexedSlices(
func(x1.values, x2), x1.indices, x1.dense_shape
)
else:
# x2 is dense.
x2_zeros_and_nans = tf.equal(x2, 0)
if not tf.as_dtype(x2.dtype).is_integer:
x2_zeros_and_nans = tf.math.logical_or(
x2_zeros_and_nans, tf.math.is_nan(x2)
)
x2_zeros_and_nans = tf.reduce_any(
x2_zeros_and_nans, axis=tuple(range(1, x2.shape.rank))
)
def func_for_x1_indices():
# Gather values from x1 indices.
return tf.IndexedSlices(
func(x1.values, tf.gather(x2, x1.indices)),
x1.indices,
x1.dense_shape,
)
def func_for_union_indices():
x2_zeros_and_nan_indices = tf.squeeze(
tf.where(x2_zeros_and_nans), axis=-1
)
# Compute the union of indices to keep zeros and NaNs.
(
union_indices,
x1_values_for_union,
_,
) = indexed_slices_union_indices_and_values(
x1, x2_zeros_and_nan_indices
)
return tf.IndexedSlices(
func(
x1_values_for_union,
tf.gather(x2, union_indices),
),
union_indices,
x1.dense_shape,
)
return tf.cond(
tf.reduce_any(x2_zeros_and_nans),
func_for_union_indices,
func_for_x1_indices,
)
elif isinstance(x2, tf.IndexedSlices):
# x2 is a IndexedSlices.
# Divisor is slices, densify to do the divisions by zero correctly.
x2 = tf.convert_to_tensor(x2)
# Default case, no SparseTensor and no IndexedSlices.
return func(x1, x2)
return sparse_wrapper
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/optimizer_distribute_test.py | keras/src/backend/tensorflow/optimizer_distribute_test.py | # flake8: noqa
import numpy as np
import pytest
import tensorflow as tf
from absl.testing import parameterized
from tensorflow.python.eager import context
from keras.src import backend
from keras.src import testing
from keras.src.optimizers.sgd import SGD
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="The distribute test can only run with TF backend.",
)
class OptimizerDistributeTest(testing.TestCase):
def setUp(self):
super().setUp()
# Need at least 2 devices for distribution related tests.
cpus = tf.config.list_physical_devices("CPU")
context._reset_context()
tf.config.set_logical_device_configuration(
cpus[0],
[
tf.config.LogicalDeviceConfiguration(),
tf.config.LogicalDeviceConfiguration(),
],
)
self.strategy = tf.distribute.MirroredStrategy(["CPU:0", "CPU:1"])
def test_config(self):
with self.strategy.scope():
optimizer = SGD(
learning_rate=0.5,
momentum=0.06,
nesterov=True,
weight_decay=0.004,
)
self.run_class_serialization_test(optimizer)
@parameterized.parameters([("keras_sgd",), ("tf_keras_sgd",)])
def test_single_step(self, optimizer_type):
if optimizer_type == "tf_keras_sgd":
try:
import tf_keras
optimizer_fn = tf_keras.optimizers.SGD
except (ImportError, AttributeError):
self.skipTest("tf_keras not installed")
else:
optimizer_fn = SGD
with self.strategy.scope():
optimizer = optimizer_fn(
learning_rate=0.5,
momentum=0.06,
)
# use tf variable to work both in k2 & k3.
vars = tf.Variable([1.0, 2.0, 3.0, 4.0])
def update():
grads = tf.constant([1.0, 6.0, 7.0, 2.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.strategy.run(update)
self.assertAllClose(
vars, [0.0, -4.0, -4.0, 2.0], rtol=1e-4, atol=1e-4
)
def test_weight_decay(self):
with self.strategy.scope():
grads, var1, var2, var3 = (
tf.zeros(()),
backend.Variable(2.0),
backend.Variable(3.0, name="exclude"),
backend.Variable(4.0),
)
optimizer_1 = SGD(learning_rate=1.0, weight_decay=0.004)
self.strategy.run(
lambda: optimizer_1.apply_gradients(zip([grads], [var1]))
)
optimizer_2 = SGD(learning_rate=1.0, weight_decay=0.004)
def opt2_run():
optimizer_2.exclude_from_weight_decay(var_names=["exclude"])
optimizer_2.apply_gradients(zip([grads, grads], [var1, var2]))
self.strategy.run(opt2_run)
optimizer_3 = SGD(learning_rate=1.0, weight_decay=0.004)
def opt3_run():
optimizer_3.exclude_from_weight_decay(var_list=[var3])
optimizer_3.apply_gradients(zip([grads, grads], [var1, var3]))
self.strategy.run(opt3_run)
self.assertAlmostEqual(var1.numpy(), 1.9760959)
self.assertAlmostEqual(var2.numpy(), 3.0)
self.assertAlmostEqual(var3.numpy(), 4.0)
def test_correctness_with_golden(self):
with self.strategy.scope():
optimizer = SGD(nesterov=True)
x = backend.Variable(np.ones([10]))
def update_grads():
grads = backend.convert_to_tensor(np.arange(0.1, 1.1, 0.1))
optimizer.apply_gradients(zip([grads], [x]))
def update_first_grads():
first_grads = backend.convert_to_tensor(np.full((10,), 0.01))
optimizer.apply_gradients(zip([first_grads], [x]))
# fmt: off
golden = np.array(
[
[0.9980, 0.9960, 0.9940, 0.9920, 0.9900, 0.9880, 0.9860, 0.9840, 0.9820, 0.9800],
[0.9978, 0.9958, 0.9938, 0.9918, 0.9898, 0.9878, 0.9858, 0.9838, 0.9818, 0.9798],
[0.9976, 0.9956, 0.9936, 0.9916, 0.9896, 0.9876, 0.9856, 0.9836, 0.9816, 0.9796],
[0.9974, 0.9954, 0.9934, 0.9914, 0.9894, 0.9874, 0.9854, 0.9834, 0.9814, 0.9794],
[0.9972, 0.9952, 0.9932, 0.9912, 0.9892, 0.9872, 0.9852, 0.9832, 0.9812, 0.9792],
]
)
# fmt: on
self.strategy.run(update_grads)
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
self.strategy.run(update_first_grads)
def test_clip_norm(self):
with self.strategy.scope():
optimizer = SGD(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
with self.strategy.scope():
optimizer = SGD(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
def test_stateless_not_supported(self):
optimizer = SGD(learning_rate=0.5)
grads = [np.array([1.0, 6.0, 7.0, 2.0])]
vars = [backend.Variable([1.0, 2.0, 3.0, 4.0])]
optimizer.build(vars)
with self.assertRaisesRegex(ValueError, "not supported"):
optimizer.stateless_apply(optimizer.variables, grads, vars)
def test_ema(self):
with self.strategy.scope():
v = backend.Variable([[3.0, 4.0], [5.0, 6.0]])
grads = backend.convert_to_tensor([[1.0, 1.0], [1.0, 1.0]])
optimizer = SGD(
learning_rate=1.0,
use_ema=True,
ema_momentum=0.9,
ema_overwrite_frequency=3,
)
self.strategy.run(lambda: optimizer.apply_gradients([(grads, v)]))
self.assertAllClose(v, [[2.0, 3.0], [4.0, 5.0]])
self.assertAllClose(
optimizer._model_variables_moving_average[0],
[[2.0, 3.0], [4.0, 5.0]], # initialized after first step
)
self.strategy.run(lambda: optimizer.apply_gradients([(grads, v)]))
self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]])
self.assertAllClose(
optimizer._model_variables_moving_average[0],
[[1.9, 2.9], [3.9, 4.9]],
)
self.strategy.run(lambda: optimizer.apply_gradients([(grads, v)]))
# Variables were overwritten with EMA
self.assertAllClose(v, [[1.71, 2.71], [3.71, 4.71]])
self.assertAllClose(
optimizer._model_variables_moving_average[0],
[[1.71, 2.71], [3.71, 4.71]],
)
def test_gradient_accumulation(self):
with self.strategy.scope():
v = backend.Variable([[1.0, 2.0], [3.0, 4.0]])
grads = backend.convert_to_tensor([[1.0, 1.0], [2.0, 2.0]])
optimizer = SGD(learning_rate=1.0, gradient_accumulation_steps=3)
self.assertEqual(optimizer.gradient_accumulation_steps, 3)
self.strategy.run(lambda: optimizer.apply_gradients([(grads, v)]))
self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[1.0, 1.0], [2.0, 2.0]]
)
self.assertAllClose(optimizer._iterations, 1)
self.assertAllClose(optimizer.iterations, 0)
self.strategy.run(lambda: optimizer.apply_gradients([(grads, v)]))
self.assertAllClose(v, [[1.0, 2.0], [3.0, 4.0]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[2.0, 2.0], [4.0, 4.0]]
)
self.assertAllClose(optimizer._iterations, 2)
self.assertAllClose(optimizer.iterations, 0)
self.strategy.run(lambda: optimizer.apply_gradients([(grads, v)]))
self.assertAllClose(v, [[-1.0, 0.0], [-1.0, 0.0]])
self.assertAllClose(
optimizer._accumulated_gradients[0], [[0.0, 0.0], [0.0, 0.0]]
)
self.assertAllClose(optimizer._iterations, 3)
self.assertAllClose(optimizer.iterations, 1)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/tensorflow/math.py | keras/src/backend/tensorflow/math.py | import tensorflow as tf
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.tensorflow.core import cast
from keras.src.backend.tensorflow.core import convert_to_tensor
def segment_sum(data, segment_ids, num_segments=None, sorted=False):
if sorted:
if num_segments is not None:
raise ValueError(
"Argument `num_segments` cannot be set when sorted is True "
"when using the tensorflow backend."
f"Received: num_segments={num_segments}, sorted={sorted}."
)
return tf.math.segment_sum(data, segment_ids)
else:
if num_segments is None:
unique_segment_ids, _ = tf.unique(segment_ids)
num_segments = tf.shape(unique_segment_ids)[0]
return tf.math.unsorted_segment_sum(data, segment_ids, num_segments)
def segment_max(data, segment_ids, num_segments=None, sorted=False):
if sorted:
if num_segments is not None:
raise ValueError(
"Argument `num_segments` cannot be set when sorted is True "
"when using the tensorflow backend."
f"Received: num_segments={num_segments}, sorted={sorted}."
)
return tf.math.segment_max(data, segment_ids)
else:
if num_segments is None:
unique_segment_ids, _ = tf.unique(segment_ids)
num_segments = tf.shape(unique_segment_ids)[0]
return tf.math.unsorted_segment_max(data, segment_ids, num_segments)
def top_k(x, k, sorted=True):
return tf.math.top_k(x, k, sorted=sorted)
def in_top_k(targets, predictions, k):
return tf.math.in_top_k(targets, predictions, k)
def logsumexp(x, axis=None, keepdims=False):
return tf.math.reduce_logsumexp(x, axis=axis, keepdims=keepdims)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
if mode == "reduced":
return tf.linalg.qr(x)
return tf.linalg.qr(x, full_matrices=True)
def extract_sequences(x, sequence_length, sequence_stride):
return tf.signal.frame(
x,
frame_length=sequence_length,
frame_step=sequence_stride,
axis=-1,
pad_end=False,
)
def _get_complex_tensor_from_tuple(x):
if not isinstance(x, (tuple, list)) or len(x) != 2:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and imaginary."
f"Received: x={x}"
)
# `convert_to_tensor` does not support passing complex tensors. We separate
# the input out into real and imaginary and convert them separately.
real, imag = x
real = convert_to_tensor(real)
imag = convert_to_tensor(imag)
# Check shapes.
if real.shape != imag.shape:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and imaginary."
"Both the real and imaginary parts should have the same shape. "
f"Received: x[0].shape = {real.shape}, x[1].shape = {imag.shape}"
)
# Ensure dtype is float.
if not real.dtype.is_floating or not imag.dtype.is_floating:
raise ValueError(
"At least one tensor in input `x` is not of type float."
f"Received: x={x}."
)
complex_input = tf.dtypes.complex(real, imag)
return complex_input
def fft(x):
complex_input = _get_complex_tensor_from_tuple(x)
complex_output = tf.signal.fft(complex_input)
return tf.math.real(complex_output), tf.math.imag(complex_output)
def fft2(x):
complex_input = _get_complex_tensor_from_tuple(x)
complex_output = tf.signal.fft2d(complex_input)
return tf.math.real(complex_output), tf.math.imag(complex_output)
def ifft2(x):
real, imag = x
h = cast(tf.shape(real)[-2], real.dtype)
w = cast(tf.shape(real)[-1], real.dtype)
real_conj, imag_conj = real, -imag
fft_real, fft_imag = fft2((real_conj, imag_conj))
return fft_real / (h * w), -fft_imag / (h * w)
def rfft(x, fft_length=None):
if fft_length is not None:
fft_length = [fft_length]
complex_output = tf.signal.rfft(x, fft_length=fft_length)
return tf.math.real(complex_output), tf.math.imag(complex_output)
def irfft(x, fft_length=None):
complex_input = _get_complex_tensor_from_tuple(x)
if fft_length is not None:
fft_length = [fft_length]
return tf.signal.irfft(complex_input, fft_length)
def stft(
x, sequence_length, sequence_stride, fft_length, window="hann", center=True
):
if standardize_dtype(x.dtype) not in {"float32", "float64"}:
raise TypeError(
"Invalid input type. Expected `float32` or `float64`. "
f"Received: input type={x.dtype}"
)
if fft_length < sequence_length:
raise ValueError(
"`fft_length` must equal or larger than `sequence_length`. "
f"Received: sequence_length={sequence_length}, "
f"fft_length={fft_length}"
)
if isinstance(window, str):
if window not in {"hann", "hamming"}:
raise ValueError(
"If a string is passed to `window`, it must be one of "
f'`"hann"`, `"hamming"`. Received: window={window}'
)
x = convert_to_tensor(x)
if center:
pad_width = [(0, 0) for _ in range(len(x.shape))]
pad_width[-1] = (fft_length // 2, fft_length // 2)
x = tf.pad(x, pad_width, mode="reflect")
l_pad = (fft_length - sequence_length) // 2
r_pad = fft_length - sequence_length - l_pad
if window is not None:
if isinstance(window, str):
if window == "hann":
win_array = tf.signal.hann_window(
sequence_length, periodic=True, dtype=x.dtype
)
else:
win_array = tf.signal.hamming_window(
sequence_length, periodic=True, dtype=x.dtype
)
else:
win_array = convert_to_tensor(window, dtype=x.dtype)
if len(win_array.shape) != 1 or win_array.shape[-1] != sequence_length:
raise ValueError(
"The shape of `window` must be equal to [sequence_length]."
f"Received: window shape={win_array.shape}"
)
win_array = tf.pad(win_array, [[l_pad, r_pad]])
def win(frame_step, dtype):
return win_array
else:
win = None
result = tf.signal.stft(
x,
frame_length=(sequence_length + l_pad + r_pad),
frame_step=sequence_stride,
fft_length=fft_length,
window_fn=win,
)
return tf.math.real(result), tf.math.imag(result)
def istft(
x,
sequence_length,
sequence_stride,
fft_length,
length=None,
window="hann",
center=True,
):
complex_input = _get_complex_tensor_from_tuple(x)
dtype = tf.math.real(complex_input).dtype
expected_output_len = fft_length + sequence_stride * (
tf.shape(complex_input)[-2] - 1
)
l_pad = (fft_length - sequence_length) // 2
r_pad = fft_length - sequence_length - l_pad
if window is not None:
if isinstance(window, str):
if window == "hann":
win_array = tf.signal.hann_window(
sequence_length, periodic=True, dtype=dtype
)
else:
win_array = tf.signal.hamming_window(
sequence_length, periodic=True, dtype=dtype
)
else:
win_array = convert_to_tensor(window, dtype=dtype)
if len(win_array.shape) != 1 or win_array.shape[-1] != sequence_length:
raise ValueError(
"The shape of `window` must be equal to [sequence_length]."
f"Received: window shape={win_array.shape}"
)
win_array = tf.pad(win_array, [[l_pad, r_pad]])
win = tf.signal.inverse_stft_window_fn(
sequence_stride, lambda frame_step, dtype: win_array
)
else:
win = None
x = tf.signal.inverse_stft(
complex_input,
frame_length=(sequence_length + l_pad + r_pad),
frame_step=sequence_stride,
fft_length=fft_length,
window_fn=win,
)
start = 0 if center is False else fft_length // 2
if length is not None:
end = start + length
elif center is True:
end = -(fft_length // 2)
else:
end = expected_output_len
return x[..., start:end]
def rsqrt(x):
return tf.math.rsqrt(x)
def erf(x):
return tf.math.erf(x)
def erfinv(x):
return tf.math.erfinv(x)
def solve(a, b):
a = convert_to_tensor(a)
b = convert_to_tensor(b)
return tf.linalg.solve(a, b)
def norm(x, ord=None, axis=None, keepdims=False):
from keras.src.backend.tensorflow.numpy import moveaxis
x = convert_to_tensor(x)
x_shape = x.shape
ndim = x_shape.rank
if axis is None:
axis = tuple(range(ndim))
elif isinstance(axis, int):
axis = (axis,)
axis = axis[0] if len(axis) == 1 else axis
num_axes = 1 if isinstance(axis, int) else len(axis)
if num_axes == 1 and ord is None:
ord = "euclidean"
elif num_axes == 2 and ord is None:
ord = "fro"
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
# Fast path to utilize `tf.linalg.norm`
if (num_axes == 1 and ord in ("euclidean", 1, 2, float("inf"))) or (
num_axes == 2 and ord in ("euclidean", "fro", 1, 2, float("inf"))
):
return tf.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
# Ref: jax.numpy.linalg.norm
if num_axes == 1 and ord not in ("fro", "nuc"):
if ord == float("-inf"):
return tf.math.reduce_min(
tf.math.abs(x), axis=axis, keepdims=keepdims
)
elif ord == 0:
return tf.math.reduce_sum(
tf.cast(tf.not_equal(x, 0), dtype=x.dtype),
axis=axis,
keepdims=keepdims,
)
else:
ord = convert_to_tensor(ord, dtype=x.dtype)
out = tf.math.reduce_sum(
tf.pow(tf.math.abs(x), ord), axis=axis, keepdims=keepdims
)
return tf.pow(out, 1.0 / ord)
elif num_axes == 2 and ord in ("nuc", float("-inf"), -2, -1):
row_axis, col_axis = axis[0], axis[1]
row_axis = row_axis + ndim if row_axis < 0 else row_axis
col_axis = col_axis + ndim if col_axis < 0 else col_axis
if ord == float("-inf"):
if not keepdims and row_axis > col_axis:
row_axis -= 1
x = tf.math.reduce_min(
tf.reduce_sum(tf.math.abs(x), axis=col_axis, keepdims=keepdims),
axis=row_axis,
keepdims=keepdims,
)
elif ord == -1:
if not keepdims and col_axis > row_axis:
col_axis -= 1
x = tf.math.reduce_min(
tf.reduce_sum(tf.math.abs(x), axis=row_axis, keepdims=keepdims),
axis=col_axis,
keepdims=keepdims,
)
else:
x = moveaxis(x, axis, (-2, -1))
if ord == -2:
x = tf.math.reduce_min(
tf.linalg.svd(x, compute_uv=False), axis=-1
)
else:
x = tf.math.reduce_sum(
tf.linalg.svd(x, compute_uv=False), axis=-1
)
if keepdims:
x = tf.expand_dims(x, axis[0])
x = tf.expand_dims(x, axis[1])
return x
if num_axes == 1:
raise ValueError(
f"Invalid `ord` argument for vector norm. Received: ord={ord}"
)
elif num_axes == 2:
raise ValueError(
f"Invalid `ord` argument for matrix norm. Received: ord={ord}"
)
else:
raise ValueError(f"Invalid axis values. Received: axis={axis}")
def logdet(x):
x = convert_to_tensor(x)
return tf.linalg.logdet(x)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/stateless_scope_test.py | keras/src/backend/common/stateless_scope_test.py | import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.backend.common.stateless_scope import StatelessScope
class TestStatelessScope(testing.TestCase):
def test_basic_flow(self):
var1 = backend.Variable(np.zeros((2,)))
var2 = backend.Variable(np.zeros((2,)))
var_out = backend.Variable(np.zeros((2,)))
value1 = ops.ones(shape=(2,))
value2 = ops.ones(shape=(2,))
with StatelessScope(
state_mapping=[(var1, value1), (var2, value2)]
) as scope:
out = var1 + var2
var_out.assign(out)
var_out_value = var_out + 0.0
# Inside scope: new value is used.
self.assertAllClose(var_out_value, 2 * np.ones((2,)))
# Out of scope: old value is used.
var_out_value = var_out + 0.0
self.assertAllClose(var_out_value, np.zeros((2,)))
# Updates are tracked.
var_out_value = scope.get_current_value(var_out)
self.assertAllClose(var_out_value, 2 * np.ones((2,)))
# Updates can be reapplied.
var_out.assign(scope.get_current_value(var_out))
self.assertAllClose(var_out_value, 2 * np.ones((2,)))
def test_invalid_key_in_state_mapping(self):
# var1 = backend.Variable(np.zeros((2,)))
invalid_key = "not_a_keras_variable"
value1 = ops.ones(shape=(2,))
with self.assertRaisesRegex(
ValueError, "all keys in argument `mapping` must be Variable"
):
StatelessScope(state_mapping=[(invalid_key, value1)])
def test_invalid_value_shape_in_state_mapping(self):
var1 = backend.Variable(np.zeros((2,)))
invalid_value = ops.ones(shape=(3,)) # Incorrect shape
with self.assertRaisesRegex(
ValueError, "all values in argument `mapping` must be tensors with"
):
StatelessScope(state_mapping=[(var1, invalid_value)])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/keras_tensor.py | keras/src/backend/common/keras_tensor.py | from keras.src import tree
from keras.src.api_export import keras_export
from keras.src.utils.naming import auto_name
@keras_export("keras.KerasTensor")
class KerasTensor:
"""Symbolic tensor -- encapsulates a shape and a dtype.
You can use `KerasTensor` instances to build computation
graphs of Keras operations, such as `keras.Function`
objects or Functional `keras.models.Model` objects.
Example:
>>> x = keras.KerasTensor(shape=(3, 4), dtype="float32")
>>> x.shape
(3, 4)
>>> x.dtype
float32
Calling a Keras operation (including a layer or a model)
on a `KerasTensor` instance will return another `KerasTensor`
instance with the appropriate shape and dtype. This is
called a "symbolic call" (since there is no actual data
involved). The computation of the correct output shape and
dtype is called "static shape inference".
"""
def __init__(
self,
shape,
dtype="float32",
sparse=False,
ragged=False,
record_history=True,
name=None,
**kwargs,
):
from keras.src import backend
ragged_rank = kwargs.pop("ragged_rank", None)
row_splits_dtype = kwargs.pop("row_splits_dtype", None)
if kwargs:
raise TypeError(
f"Unexpected keyword arguments: {', '.join(kwargs.keys())}"
)
self._shape = backend.standardize_shape(shape)
self._dtype = backend.standardize_dtype(dtype)
self._sparse = bool(sparse)
self._ragged = bool(ragged)
if self._sparse and self._ragged:
raise ValueError(
"KerasTensor cannot have `sparse=True` and `ragged=True` at "
"the same time."
)
self._ragged_rank = (
int(ragged_rank) if ragged_rank is not None else None
)
self._row_splits_dtype = (
backend.standardize_dtype(row_splits_dtype)
if row_splits_dtype is not None
else None
)
self.name = name or auto_name(self.__class__.__name__)
self.record_history = record_history
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, value):
raise AttributeError(
"The `shape` attribute of KerasTensor is immutable. One should "
"create a new instance of KerasTensor for this."
)
@property
def dtype(self):
return self._dtype
@dtype.setter
def dtype(self, value):
raise AttributeError(
"The `dtype` attribute of KerasTensor is immutable. One should "
"create a new instance of KerasTensor for this."
)
@property
def sparse(self):
return self._sparse
@sparse.setter
def sparse(self, value):
raise AttributeError(
"The `sparse` attribute of KerasTensor is immutable. One should "
"create a new instance of KerasTensor for this."
)
@property
def ragged_rank(self):
return self._ragged_rank
@ragged_rank.setter
def ragged_rank(self, value):
raise AttributeError(
"The `ragged_rank` attribute of KerasTensor is immutable. One "
"should create a new instance of KerasTensor for this."
)
@property
def row_splits_dtype(self):
return self._row_splits_dtype
@row_splits_dtype.setter
def row_splits_dtype(self, value):
raise AttributeError(
"The `row_splits_dtype` attribute of KerasTensor is immutable. One "
"should create a new instance of KerasTensor for this."
)
@property
def ragged(self):
return self._ragged
@ragged.setter
def ragged(self, value):
raise AttributeError(
"The `ragged` attribute of KerasTensor is immutable. One should "
"create a new instance of KerasTensor for this."
)
@property
def ndim(self):
return len(self.shape)
def reshape(self, newshape):
from keras.src import ops
return ops.Reshape(newshape)(self)
def squeeze(self, axis=None):
from keras.src import ops
return ops.Squeeze(axis)(self)
def __int__(self):
raise ValueError(
"A KerasTensor is symbolic: it's a placeholder for a shape "
"an a dtype. It doesn't have any actual numerical value. "
"You cannot convert it to an int."
)
def __float__(self):
raise ValueError(
"A KerasTensor is symbolic: it's a placeholder for a shape "
"an a dtype. It doesn't have any actual numerical value. "
"You cannot convert it to a float."
)
def __array__(self):
raise ValueError(
"A KerasTensor is symbolic: it's a placeholder for a shape "
"an a dtype. It doesn't have any actual numerical value. "
"You cannot convert it to a NumPy array."
)
def __jax_array__(self):
raise ValueError(
"A KerasTensor cannot be used as input to a JAX function. "
"A KerasTensor is a symbolic placeholder for a shape and dtype, "
"used when constructing Keras Functional models "
"or Keras Functions. You can only use it as input to a Keras layer "
"or a Keras operation (from the namespaces `keras.layers` "
"and `keras.ops`). "
"You are likely doing something like:\n\n"
"```\n"
"x = Input(...)\n"
"...\n"
"jax_fn(x) # Invalid.\n"
"```\n\n"
"What you should do instead is wrap `jax_fn` in a layer:\n\n"
"```\n"
"class MyLayer(Layer):\n"
" def call(self, x):\n"
" return jax_fn(x)\n\n"
"x = MyLayer()(x)\n"
"```\n"
)
def __tf_tensor__(self, dtype=None, name=None):
raise ValueError(
"A KerasTensor cannot be used as input to a TensorFlow function. "
"A KerasTensor is a symbolic placeholder for a shape and dtype, "
"used when constructing Keras Functional models "
"or Keras Functions. You can only use it as input to a Keras layer "
"or a Keras operation (from the namespaces `keras.layers` "
"and `keras.ops`). "
"You are likely doing something like:\n\n"
"```\n"
"x = Input(...)\n"
"...\n"
"tf_fn(x) # Invalid.\n"
"```\n\n"
"What you should do instead is wrap `tf_fn` in a layer:\n\n"
"```\n"
"class MyLayer(Layer):\n"
" def call(self, x):\n"
" return tf_fn(x)\n\n"
"x = MyLayer()(x)\n"
"```\n"
)
def __repr__(self):
return (
f"<KerasTensor shape={self.shape}, dtype={self.dtype}, "
f"sparse={self.sparse}, ragged={self.ragged}, name={self.name}>"
)
def __iter__(self):
raise NotImplementedError(
"Iterating over a symbolic KerasTensor is not supported."
)
def __bool__(self):
raise TypeError("A symbolic KerasTensor cannot be used as a boolean.")
def __add__(self, other):
from keras.src import ops
return ops.Add().symbolic_call(self, other)
def __radd__(self, other):
from keras.src import ops
return ops.Add().symbolic_call(other, self)
def __sub__(self, other):
from keras.src import ops
return ops.Subtract().symbolic_call(self, other)
def __rsub__(self, other):
from keras.src import ops
return ops.Subtract().symbolic_call(other, self)
def __mul__(self, other):
from keras.src import ops
return ops.Multiply().symbolic_call(self, other)
def __rmul__(self, other):
from keras.src import ops
return ops.Multiply().symbolic_call(other, self)
def __matmul__(self, other):
from keras.src import ops
return ops.Matmul().symbolic_call(self, other)
def __rmatmul__(self, other):
from keras.src import ops
return ops.Matmul().symbolic_call(other, self)
def __div__(self, other):
from keras.src import ops
return ops.Divide().symbolic_call(self, other)
def __rdiv__(self, other):
from keras.src import ops
return ops.Divide().symbolic_call(other, self)
def __truediv__(self, other):
from keras.src import ops
return ops.TrueDivide().symbolic_call(self, other)
def __rtruediv__(self, other):
from keras.src import ops
return ops.TrueDivide().symbolic_call(other, self)
def __neg__(self):
from keras.src import ops
return ops.Negative().symbolic_call(self)
def __abs__(self):
from keras.src import ops
return ops.Absolute().symbolic_call(self)
def __pow__(self, other):
from keras.src import ops
return ops.Power().symbolic_call(self, other)
def __rpow__(self, other):
from keras.src import ops
return ops.Power().symbolic_call(other, self)
def __floordiv__(self, other):
from keras.src import ops
return ops.FloorDivide().symbolic_call(self, other)
def __rfloordiv__(self, other):
from keras.src import ops
return ops.FloorDivide().symbolic_call(other, self)
def __mod__(self, other):
from keras.src import ops
return ops.Mod().symbolic_call(self, other)
def __rmod__(self, other):
from keras.src import ops
return ops.Mod().symbolic_call(other, self)
def __lt__(self, other):
from keras.src import ops
return ops.Less().symbolic_call(self, other)
def __le__(self, other):
from keras.src import ops
return ops.LessEqual().symbolic_call(self, other)
def __gt__(self, other):
from keras.src import ops
return ops.Greater().symbolic_call(self, other)
def __ge__(self, other):
from keras.src import ops
return ops.GreaterEqual().symbolic_call(self, other)
def __ne__(self, other):
from keras.src import ops
return ops.NotEqual().symbolic_call(self, other)
def __and__(self, other):
from keras.src import ops
return ops.LogicalAnd().symbolic_call(self, other)
def __rand__(self, other):
from keras.src import ops
return ops.LogicalAnd().symbolic_call(other, self)
def __or__(self, other):
from keras.src import ops
return ops.LogicalOr().symbolic_call(self, other)
def __ror__(self, other):
from keras.src import ops
return ops.LogicalOr().symbolic_call(other, self)
def __invert__(self):
from keras.src import ops
return ops.LogicalNot().symbolic_call(self)
def __xor__(self, other):
from keras.src import ops
return ops.LogicalXor().symbolic_call(self, other)
def __rxor__(self, other):
from keras.src import ops
return ops.LogicalXor().symbolic_call(other, self)
def __getitem__(self, key):
from keras.src import ops
return ops.GetItem().symbolic_call(self, key)
def __round__(self, ndigits=None):
from keras.src import ops
decimals = ndigits or 0
return ops.Round(decimals=decimals).symbolic_call(self)
def any_symbolic_tensors(args=None, kwargs=None):
args = args or ()
kwargs = kwargs or {}
for x in tree.flatten((args, kwargs)):
if isinstance(x, KerasTensor):
return True
return False
@keras_export(["keras.utils.is_keras_tensor", "keras.backend.is_keras_tensor"])
def is_keras_tensor(x):
"""Returns whether `x` is a Keras tensor.
A "Keras tensor" is a *symbolic tensor*, such as a tensor
that was created via `Input()`. A "symbolic tensor"
can be understood as a placeholder -- it does not
contain any actual numerical data, only a shape and dtype.
It can be used for building Functional models, but it
cannot be used in actual computations.
"""
return isinstance(x, KerasTensor)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/symbolic_scope_test.py | keras/src/backend/common/symbolic_scope_test.py | import numpy as np
from keras.src import ops
from keras.src import testing
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.common.symbolic_scope import in_symbolic_scope
class TestSymbolicScope(testing.TestCase):
def test_basic_flow(self):
# Define a function that behaves differently according to
# `in_symbolic_scope`.
def compute_loss(y, y_pred):
if in_symbolic_scope():
return ops.zeros_like(y)
return ops.add(y, y_pred)
y = ops.ones(shape=(2,))
y_pred = ops.ones(shape=(2,))
with SymbolicScope():
loss = compute_loss(y, y_pred)
self.assertAllClose(loss, np.zeros((2,)))
loss = compute_loss(y, y_pred)
self.assertAllClose(loss, 2 * np.ones((2,)))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/masking.py | keras/src/backend/common/masking.py | from keras.src.backend.common.tensor_attributes import get_tensor_attr
from keras.src.backend.common.tensor_attributes import set_tensor_attr
def set_keras_mask(x, mask):
"""Sets the Keras mask attribute for the given tensor in-place.
Args:
x: Input tensor.
mask: The mask tensor to be set. If `None`, the `_keras_mask` attribute
will be cleared.
"""
set_tensor_attr(x, "_keras_mask", mask)
def get_keras_mask(x):
"""Gets the Keras mask attribute from the given tensor.
Args:
x: Input tensor.
Returns:
The mask tensor associated with the input tensor, or `None` if no mask
has been set.
"""
return get_tensor_attr(x, "_keras_mask")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/backend_utils.py | keras/src/backend/common/backend_utils.py | import functools
import math
import operator
import re
import warnings
def _convert_conv_transpose_padding_args_from_keras_to_jax(
kernel_size, stride, dilation_rate, padding, output_padding
):
"""Convert the padding arguments from Keras to the ones used by JAX.
JAX starts with an shape of size `(input-1) * stride - kernel_size + 2`,
then adds `left_pad` on the left, and `right_pad` on the right.
In Keras, the `padding` argument determines a base shape, to which
`output_padding` is added on the right. If `output_padding` is None, it will
be given a default value.
"""
assert padding.lower() in {"valid", "same"}
kernel_size = (kernel_size - 1) * dilation_rate + 1
if padding.lower() == "valid":
# If output_padding is None, we fill it so that the shape of the output
# is `(input-1)*s + max(kernel_size, stride)`
output_padding = (
max(kernel_size, stride) - kernel_size
if output_padding is None
else output_padding
)
left_pad = kernel_size - 1
right_pad = kernel_size - 1 + output_padding
else:
if output_padding is None:
# When output_padding is None, we want the shape of the output to
# be `input * s`, therefore a total padding of
# `stride + kernel_size - 2`
pad_len = stride + kernel_size - 2
else:
# When output_padding is filled, we want the shape of the output to
# be `(input-1)*stride + kernel_size%2 + output_padding`
pad_len = kernel_size + kernel_size % 2 - 2 + output_padding
left_pad = min(pad_len // 2 + pad_len % 2, kernel_size - 1)
right_pad = pad_len - left_pad
return left_pad, right_pad
def _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size, stride, dilation_rate, padding, output_padding
):
"""Convert the padding arguments from Keras to the ones used by Torch.
Torch starts with an output shape of `(input-1) * stride + kernel_size`,
then removes `torch_padding` from both sides, and adds
`torch_output_padding` on the right.
Because in Torch the output_padding can only be added to the right,
consistency with Tensorflow is not always possible. In particular this is
the case when both the Torch padding and output_padding values are
strictly positive.
"""
assert padding.lower() in {"valid", "same"}
original_kernel_size = kernel_size
kernel_size = (kernel_size - 1) * dilation_rate + 1
if padding.lower() == "valid":
# If output_padding is None, we fill it so that the shape of the output
# is `(i-1)*s + max(k, s)`
output_padding = (
max(kernel_size, stride) - kernel_size
if output_padding is None
else output_padding
)
torch_padding = 0
torch_output_padding = output_padding
else:
# When output_padding is None, we want the shape of the output to be
# `input * s`, otherwise we use the value provided.
output_padding = (
stride - kernel_size % 2
if output_padding is None
else output_padding
)
torch_padding = max(
-((kernel_size % 2 - kernel_size + output_padding) // 2), 0
)
torch_output_padding = (
2 * torch_padding + kernel_size % 2 - kernel_size + output_padding
)
if torch_padding > 0 and torch_output_padding > 0:
warnings.warn(
f"You might experience inconsistencies across backends when "
f"calling conv transpose with kernel_size={original_kernel_size}, "
f"stride={stride}, dilation_rate={dilation_rate}, "
f"padding={padding}, output_padding={output_padding}."
)
if torch_output_padding >= stride:
warnings.warn(
f"Torch backend requires output_padding < stride. "
f"Clamping output_padding {torch_output_padding} -> {stride - 1} "
f"for stride {stride}.",
UserWarning,
)
torch_output_padding = stride - 1
return torch_padding, torch_output_padding
def compute_conv_transpose_padding_args_for_jax(
input_shape,
kernel_shape,
strides,
padding,
output_padding,
dilation_rate,
):
num_spatial_dims = len(input_shape) - 2
kernel_spatial_shape = kernel_shape[:-2]
jax_padding = []
for i in range(num_spatial_dims):
output_padding_i = (
output_padding
if output_padding is None or isinstance(output_padding, int)
else output_padding[i]
)
strides_i = strides if isinstance(strides, int) else strides[i]
dilation_rate_i = (
dilation_rate
if isinstance(dilation_rate, int)
else dilation_rate[i]
)
(
pad_left,
pad_right,
) = _convert_conv_transpose_padding_args_from_keras_to_jax(
kernel_size=kernel_spatial_shape[i],
stride=strides_i,
dilation_rate=dilation_rate_i,
padding=padding,
output_padding=output_padding_i,
)
jax_padding.append((pad_left, pad_right))
return jax_padding
def compute_conv_transpose_padding_args_for_torch(
input_shape,
kernel_shape,
strides,
padding,
output_padding,
dilation_rate,
):
num_spatial_dims = len(input_shape) - 2
kernel_spatial_shape = kernel_shape[:-2]
torch_paddings = []
torch_output_paddings = []
for i in range(num_spatial_dims):
output_padding_i = (
output_padding
if output_padding is None or isinstance(output_padding, int)
else output_padding[i]
)
strides_i = strides if isinstance(strides, int) else strides[i]
dilation_rate_i = (
dilation_rate
if isinstance(dilation_rate, int)
else dilation_rate[i]
)
(
torch_padding,
torch_output_padding,
) = _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=kernel_spatial_shape[i],
stride=strides_i,
dilation_rate=dilation_rate_i,
padding=padding,
output_padding=output_padding_i,
)
torch_paddings.append(torch_padding)
torch_output_paddings.append(torch_output_padding)
# --- FIX FOR TORCH CONSTRAINT: output_padding < stride ---
corrected_output_paddings = []
for s, op in zip(
strides
if isinstance(strides, (list, tuple))
else [strides] * num_spatial_dims,
torch_output_paddings,
):
max_allowed = max(0, s - 1)
if op > max_allowed:
corrected_output_paddings.append(max_allowed)
else:
corrected_output_paddings.append(op)
torch_output_paddings = corrected_output_paddings
return torch_paddings, torch_output_paddings
def _get_output_shape_given_tf_padding(
input_size, kernel_size, strides, padding, output_padding, dilation_rate
):
if input_size is None:
return None
assert padding.lower() in {"valid", "same"}
kernel_size = (kernel_size - 1) * dilation_rate + 1
if padding.lower() == "valid":
output_padding = (
max(kernel_size, strides) - kernel_size
if output_padding is None
else output_padding
)
return (input_size - 1) * strides + kernel_size + output_padding
else:
if output_padding is None:
return input_size * strides
else:
return (input_size - 1) * strides + kernel_size % 2 + output_padding
def compute_conv_transpose_output_shape(
input_shape,
kernel_size,
filters,
strides,
padding,
output_padding=None,
data_format="channels_last",
dilation_rate=1,
):
num_spatial_dims = len(input_shape) - 2
kernel_spatial_shape = kernel_size
if isinstance(output_padding, int):
output_padding = (output_padding,) * len(kernel_spatial_shape)
if isinstance(strides, int):
strides = (strides,) * num_spatial_dims
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,) * num_spatial_dims
if data_format == "channels_last":
input_spatial_shape = input_shape[1:-1]
else:
input_spatial_shape = input_shape[2:]
output_shape = []
for i in range(num_spatial_dims):
current_output_padding = (
None if output_padding is None else output_padding[i]
)
shape_i = _get_output_shape_given_tf_padding(
input_size=input_spatial_shape[i],
kernel_size=kernel_spatial_shape[i],
strides=strides[i],
padding=padding,
output_padding=current_output_padding,
dilation_rate=dilation_rate[i],
)
output_shape.append(shape_i)
if data_format == "channels_last":
output_shape = [input_shape[0]] + output_shape + [filters]
else:
output_shape = [input_shape[0], filters] + output_shape
return output_shape
def canonicalize_axis(axis, num_dims):
"""Canonicalize an axis in [-num_dims, num_dims) to [0, num_dims)."""
axis = operator.index(axis)
if not -num_dims <= axis < num_dims:
raise ValueError(
f"axis {axis} is out of bounds for an array with dimension "
f"{num_dims}."
)
if axis < 0:
axis = axis + num_dims
return axis
def standardize_axis_for_numpy(axis):
"""Standardize an axis to a tuple if it is a list in the numpy backend."""
return tuple(axis) if isinstance(axis, list) else axis
def to_tuple_or_list(value):
"""Convert the non-`None` value to either a tuple or a list."""
if value is None:
return value
if not isinstance(value, (int, tuple, list)):
raise ValueError(
"`value` must be an integer, tuple or list. "
f"Received: value={value}"
)
if isinstance(value, int):
return (value,)
return value
### Code for ops.vectorize() used for TF and torch backends.
# See http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
_DIMENSION_NAME = r"\w+"
_CORE_DIMENSION_LIST = "(?:{0:}(?:,{0:})*)?".format(_DIMENSION_NAME)
_ARGUMENT = rf"\({_CORE_DIMENSION_LIST}\)"
_ARGUMENT_LIST = "{0:}(?:,{0:})*".format(_ARGUMENT)
_SIGNATURE = "^{0:}->{0:}$".format(_ARGUMENT_LIST)
def _vectorize_parse_gufunc_signature(
signature,
):
if not re.match(_SIGNATURE, signature):
raise ValueError(f"not a valid gufunc signature: {signature}")
args, retvals = (
[
tuple(re.findall(_DIMENSION_NAME, arg))
for arg in re.findall(_ARGUMENT, arg_list)
]
for arg_list in signature.split("->")
)
return args, retvals
def _vectorize_update_dim_sizes(dim_sizes, shape, core_dims, is_input=True):
num_core_dims = len(core_dims)
if is_input:
if len(shape) < num_core_dims:
raise ValueError(
f"input with shape {shape} does not "
"have enough dimensions for all core "
f"dimensions {core_dims}"
)
else:
if len(shape) != num_core_dims:
raise ValueError(
f"output shape {shape} does not "
f"match core dimensions {core_dims}"
)
core_shape = shape[-num_core_dims:] if core_dims else ()
for dim, size in zip(core_dims, core_shape):
if dim not in dim_sizes:
dim_sizes[dim] = size
elif size != dim_sizes[dim]:
raise ValueError(
f"inconsistent size for core dimension {dim}: "
f"{size} vs {dim_sizes[dim]}"
)
def _vectorize_parse_input_dimensions(
args,
input_core_dims,
):
from keras.src import ops
if len(args) != len(input_core_dims):
raise TypeError(
"wrong number of positional arguments: "
f"expected {len(input_core_dims)}, got {len(args)}"
)
shapes = []
dim_sizes = {}
for arg, core_dims in zip(args, input_core_dims):
_vectorize_update_dim_sizes(
dim_sizes, arg.shape, core_dims, is_input=True
)
ndim = arg.ndim - len(core_dims)
shapes.append(arg.shape[:ndim])
broadcast_shape = shapes[0]
for s in shapes:
broadcast_shape = ops.broadcast_shapes(broadcast_shape, s)
return broadcast_shape, dim_sizes
def _vectorize_check_output_dims(
func,
dim_sizes,
expected_output_core_dims,
):
from keras.src import ops
def wrapped(*args):
out = func(*args)
if isinstance(out, (list, tuple)):
out_shapes = [ops.shape(x) for x in out]
else:
out_shapes = [out.shape]
if expected_output_core_dims is None:
output_core_dims = [()] * len(out_shapes)
else:
output_core_dims = expected_output_core_dims
if len(output_core_dims) > 1 and not isinstance(out, tuple):
raise TypeError(
"output must be a tuple when multiple outputs "
f"are expected, got: {out}"
)
if len(out_shapes) != len(output_core_dims):
raise TypeError(
"wrong number of output arguments: "
f"expected {len(output_core_dims)}, got {len(out_shapes)}"
)
sizes = dict(dim_sizes)
for shape, core_dims in zip(out_shapes, output_core_dims):
_vectorize_update_dim_sizes(sizes, shape, core_dims, is_input=False)
return out
return wrapped
def _vectorize_apply_excluded(func, excluded, args, kwargs):
if not excluded:
return func, args, kwargs
dynamic_args = [arg for i, arg in enumerate(args) if i not in excluded]
dynamic_kwargs = {
key: val for key, val in kwargs.items() if key not in excluded
}
static_args = [
(i, args[i])
for i in sorted(e for e in excluded if isinstance(e, int))
if i < len(args)
]
static_kwargs = {key: val for key, val in kwargs.items() if key in excluded}
def new_func(*args, **kwargs):
args = list(args)
for i, arg in static_args:
args.insert(i, arg)
return func(*args, **kwargs, **static_kwargs)
return new_func, dynamic_args, dynamic_kwargs
def vectorize_impl(pyfunc, vmap_fn, *, excluded=None, signature=None):
"""Implementation adapted from JAX and NumPy."""
from keras.src import ops
excluded = None or set()
@functools.wraps(pyfunc)
def wrapped(*args, **kwargs):
excluded_func, args, kwargs = _vectorize_apply_excluded(
pyfunc, excluded, args, kwargs
)
if signature is not None:
input_core_dims, output_core_dims = (
_vectorize_parse_gufunc_signature(signature)
)
else:
input_core_dims = [()] * len(args)
output_core_dims = None
none_args = {i for i, arg in enumerate(args) if arg is None}
if any(none_args):
if any(input_core_dims[i] != () for i in none_args):
raise ValueError(
f"Cannot pass None at locations {none_args} "
f"with signature={signature}"
)
excluded_func, args, _ = _vectorize_apply_excluded(
excluded_func, none_args, args, {}
)
input_core_dims = [
dim
for i, dim in enumerate(input_core_dims)
if i not in none_args
]
args = tuple(map(ops.convert_to_tensor, args))
broadcast_shape, dim_sizes = _vectorize_parse_input_dimensions(
args, input_core_dims
)
checked_func = _vectorize_check_output_dims(
excluded_func, dim_sizes, output_core_dims
)
squeezed_args = []
rev_filled_shapes = []
for arg, core_dims in zip(args, input_core_dims):
noncore_shape = arg.shape[: arg.ndim - len(core_dims)]
pad_ndim = len(broadcast_shape) - len(noncore_shape)
filled_shape = pad_ndim * (1,) + noncore_shape
rev_filled_shapes.append(filled_shape[::-1])
squeeze_indices = tuple(
i for i, size in enumerate(noncore_shape) if size == 1
)
squeezed_arg = ops.squeeze(arg, axis=squeeze_indices)
squeezed_args.append(squeezed_arg)
vectorized_func = checked_func
dims_to_expand = []
for negdim, axis_sizes in enumerate(zip(*rev_filled_shapes)):
in_axes = tuple(None if size == 1 else 0 for size in axis_sizes)
if all(axis is None for axis in in_axes):
dims_to_expand.append(len(broadcast_shape) - 1 - negdim)
else:
vectorized_func = vmap_fn(vectorized_func, in_axes)
result = vectorized_func(*squeezed_args)
if not dims_to_expand:
return result
elif isinstance(result, tuple):
return tuple(
ops.expand_dims(r, axis=dims_to_expand) for r in result
)
else:
return ops.expand_dims(result, axis=dims_to_expand)
return wrapped
def slice_along_axis(x, start=0, stop=None, step=1, axis=0):
"""Slice a Tensor along the given axis."""
# Ref: same util function defined in tfp.math.scan_associative
if axis >= 0:
slices = [slice(None)] * axis + [slice(start, stop, step)]
else:
slices = [Ellipsis, slice(start, stop, step)] + [slice(None)] * (
-1 - axis
)
return x[tuple(slices)]
def compute_adaptive_pooling_window_sizes(input_dim, output_dim):
"""Compute small and big window sizes for adaptive pooling."""
small = math.ceil(input_dim / output_dim)
big = small + 1
return small, big
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/keras_tensor_test.py | keras/src/backend/common/keras_tensor_test.py | from unittest.mock import Mock
from unittest.mock import patch
import numpy as np
import tensorflow as tf
from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.backend.common import keras_tensor
class KerasTensorTest(testing.TestCase):
def test_attributes(self):
x = keras_tensor.KerasTensor(shape=(3,), dtype="float32", sparse=True)
self.assertEqual(x.dtype, "float32")
self.assertEqual(x.shape, (3,))
self.assertEqual(x.sparse, True)
# Raise error if trying to set attributes
with self.assertRaisesRegex(
AttributeError, "The `shape` attribute of KerasTensor is immutable."
):
x.shape = [3, 2]
with self.assertRaisesRegex(
AttributeError, "The `dtype` attribute of KerasTensor is immutable."
):
x.dtype = "int32"
def test_attributes_sparse(self):
x = keras_tensor.KerasTensor(shape=(3,), dtype="float32", sparse=True)
self.assertEqual(x.sparse, True)
# Raise error if trying to set attributes
with self.assertRaisesRegex(
AttributeError,
"The `sparse` attribute of KerasTensor is immutable.",
):
x.sparse = False
def test_attributes_ragged(self):
x = keras_tensor.KerasTensor(shape=(3,), dtype="float32", ragged=True)
self.assertEqual(x.ragged, True)
# Raise error if trying to set attributes
with self.assertRaisesRegex(
AttributeError,
"The `ragged` attribute of KerasTensor is immutable.",
):
x.ragged = False
def test_init_sparse_ragged_raises(self):
with self.assertRaisesRegex(
ValueError, "cannot have `sparse=True` and `ragged=True`"
):
keras_tensor.KerasTensor(shape=(3,), sparse=True, ragged=True)
def test_numpy_methods(self):
x = keras_tensor.KerasTensor(shape=(3, 2), dtype="float32")
# reshape
x = x.reshape((6,))
self.assertEqual(x.shape, (6,))
# expand_dims, squeeze
x = ops.expand_dims(x, -1)
self.assertEqual(x.shape, (6, 1))
x = x.squeeze()
self.assertEqual(x.shape, (6,))
x = ops.expand_dims(x, axis=0)
self.assertEqual(x.shape, (1, 6))
x = x.squeeze(axis=0)
self.assertEqual(x.shape, (6,))
def test_invalid_usage(self):
x = keras_tensor.KerasTensor(shape=(3,), dtype="float32")
with self.assertRaisesRegex(
ValueError, "doesn't have any actual numerical value"
):
np.array(x)
if backend.backend() == "jax":
from jax import numpy as jnp
with self.assertRaisesRegex(
ValueError, "cannot be used as input to a JAX function"
):
jnp.array(x)
with self.assertRaisesRegex(
ValueError, "cannot be used as input to a TensorFlow function"
):
tf.convert_to_tensor(x)
def test_bool(self):
tensor = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
with self.assertRaisesRegex(TypeError, "cannot be used as a boolean."):
bool(tensor)
def test_representation(self):
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
self.assertIn("<KerasTensor shape=(3, 4)", repr(x))
def test_iterating(self):
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
with self.assertRaises(NotImplementedError):
iter(x)
def test_any_symbolic_tensors(self):
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = np.array([1, 2, 3])
self.assertTrue(keras_tensor.any_symbolic_tensors(args=[x, y]))
self.assertFalse(keras_tensor.any_symbolic_tensors(args=[y]))
def test_is_keras_tensor(self):
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
self.assertTrue(keras_tensor.is_keras_tensor(x))
y = np.array([1, 2, 3])
self.assertFalse(keras_tensor.is_keras_tensor(y))
@patch("keras.src.ops.Absolute.symbolic_call")
def test_abs_method(self, mock_symbolic_call):
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
abs_x = abs(x) # this will internally call x.__abs__()
mock_symbolic_call.assert_called_once_with(x)
self.assertEqual(abs_x, mock_tensor)
@patch("keras.src.ops.Negative.symbolic_call")
def test_neg_method(self, mock_method):
self._test_unary_op_method(mock_method, lambda x: -x)
@patch("keras.src.ops.Subtract.symbolic_call")
def test_sub_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x - y)
@patch("keras.src.ops.Multiply.symbolic_call")
def test_mul_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x * y)
@patch("keras.src.ops.Matmul.symbolic_call")
def test_matmul_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x @ y)
@patch("keras.src.ops.Power.symbolic_call")
def test_pow_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x**y)
@patch("keras.src.ops.Mod.symbolic_call")
def test_mod_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x % y)
@patch("keras.src.ops.Less.symbolic_call")
def test_lt_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x < y)
@patch("keras.src.ops.LogicalAnd.symbolic_call")
def test_and_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x & y)
@patch("keras.src.ops.LogicalOr.symbolic_call")
def test_or_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x | y)
@patch("keras.src.ops.GetItem.symbolic_call")
def test_getitem_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x[y])
def _test_unary_op_method(self, mock_method, operator):
mock_tensor = Mock()
mock_method.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
result = operator(x)
mock_method.assert_called_once_with(x)
self.assertEqual(result, mock_tensor)
def _test_binary_op_method(self, mock_method, other, operator):
mock_tensor = Mock()
mock_method.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
result = operator(x, other)
mock_method.assert_called_once_with(x, other)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Add.symbolic_call")
def test_radd_method(self, mock_symbolic_call):
"""Test __radd__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = y + x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Subtract.symbolic_call")
def test_rsub_method(self, mock_symbolic_call):
"""Test __rsub__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = y - x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Multiply.symbolic_call")
def test_rmul_method(self, mock_symbolic_call):
"""Test __rmul__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = y * x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Matmul.symbolic_call")
def test_rmatmul_method(self, mock_symbolic_call):
"""Test __rmatmul__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = y @ x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Power.symbolic_call")
def test_rpow_method(self, mock_symbolic_call):
"""Test __rpow__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = y**x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.FloorDivide.symbolic_call")
def test_floordiv_method(self, mock_symbolic_call):
"""Test __floordiv__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = x // y
mock_symbolic_call.assert_called_once_with(x, y)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.FloorDivide.symbolic_call")
def test_rfloordiv_method(self, mock_symbolic_call):
"""Test __rfloordiv__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = y // x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Mod.symbolic_call")
def test_rmod_method(self, mock_symbolic_call):
"""Test __rmod__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = y % x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.LessEqual.symbolic_call")
def test_le_method(self, mock_symbolic_call):
"""Test __le__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = x <= y
mock_symbolic_call.assert_called_once_with(x, y)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Greater.symbolic_call")
def test_gt_method(self, mock_symbolic_call):
"""Test __gt__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = x > y
mock_symbolic_call.assert_called_once_with(x, y)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.GreaterEqual.symbolic_call")
def test_ge_method(self, mock_symbolic_call):
"""Test __ge__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = x >= y
mock_symbolic_call.assert_called_once_with(x, y)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.NotEqual.symbolic_call")
def test_ne_method(self, mock_symbolic_call):
"""Test __ne__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = x != y
mock_symbolic_call.assert_called_once_with(x, y)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.LogicalAnd.symbolic_call")
def test_rand_method(self, mock_symbolic_call):
"""Test __rand__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="bool")
y = Mock()
result = y & x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.LogicalOr.symbolic_call")
def test_ror_method(self, mock_symbolic_call):
"""Test __ror__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="bool")
y = Mock()
result = y | x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.LogicalNot.symbolic_call")
def test_invert_method(self, mock_symbolic_call):
"""Test __invert__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="bool")
result = ~x
mock_symbolic_call.assert_called_once_with(x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.LogicalXor.symbolic_call")
def test_xor_method(self, mock_symbolic_call):
"""Test __xor__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="bool")
y = Mock()
result = x ^ y
mock_symbolic_call.assert_called_once_with(x, y)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.LogicalXor.symbolic_call")
def test_rxor_method(self, mock_symbolic_call):
"""Test __rxor__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="bool")
y = Mock()
result = y ^ x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.TrueDivide.symbolic_call")
def test_truediv_method(self, mock_symbolic_call):
"""Test __truediv__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = x / y
mock_symbolic_call.assert_called_once_with(x, y)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.TrueDivide.symbolic_call")
def test_rtruediv_method(self, mock_symbolic_call):
"""Test __rtruediv__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = y / x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Divide.symbolic_call")
def test_div_method(self, mock_symbolic_call):
"""Test __div__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
# to ensure compatibility across Python versions
result = x.__div__(y)
mock_symbolic_call.assert_called_once_with(x, y)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Divide.symbolic_call")
def test_rdiv_method(self, mock_symbolic_call):
"""Test __rdiv__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
# to ensure compatibility across Python versions
result = x.__rdiv__(y)
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/thread_safe_test.py | keras/src/backend/common/thread_safe_test.py | import concurrent
import numpy as np
from keras.src import backend
from keras.src import ops
from keras.src import testing
class TestThreadSafe(testing.TestCase):
def test_is_thread_safe(self):
if backend.IS_THREAD_SAFE:
executor = concurrent.futures.ThreadPoolExecutor()
def sum(x, axis):
return ops.sum(x, axis=axis)
futures = []
for i in range(10000):
futures.clear()
x = ops.convert_to_tensor(np.random.rand(100, 100))
futures.append(executor.submit(sum, x, 1))
x = ops.convert_to_tensor(np.random.rand(100))
futures.append(executor.submit(sum, x, 0))
concurrent.futures.wait(
futures, return_when=concurrent.futures.ALL_COMPLETED
)
[future.result() for future in futures]
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/masking_test.py | keras/src/backend/common/masking_test.py | from keras.src import backend
from keras.src import ops
from keras.src import testing
from keras.src.backend.common.masking import get_keras_mask
from keras.src.backend.common.masking import set_keras_mask
class MaskingTest(testing.TestCase):
def test_mask_on_eager_tensor(self):
x = ops.zeros((2, 3))
self.assertIsNone(get_keras_mask(x))
set_keras_mask(x, None)
self.assertIsNone(get_keras_mask(x))
mask = ops.ones((2, 3))
set_keras_mask(x, mask)
self.assertIs(get_keras_mask(x), mask)
set_keras_mask(x, None)
self.assertIsNone(get_keras_mask(x))
set_keras_mask(x, None)
self.assertIsNone(get_keras_mask(x))
def test_mask_on_tracer_tensor(self):
def fn(x):
self.assertIsNone(get_keras_mask(x))
set_keras_mask(x, None)
self.assertIsNone(get_keras_mask(x))
mask = ops.ones((2, 3))
set_keras_mask(x, mask)
self.assertIs(get_keras_mask(x), mask)
set_keras_mask(x, None)
self.assertIsNone(get_keras_mask(x))
set_keras_mask(x, None) # key is now deleted, should be a no-op
self.assertIsNone(get_keras_mask(x))
backend.compute_output_spec(fn, backend.KerasTensor((2, 3)))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/compute_output_spec_test.py | keras/src/backend/common/compute_output_spec_test.py | import pytest
from keras.src import backend
from keras.src import testing
def example_fn(x):
x = (x + 2) * backend.numpy.ones_like(x)
x = backend.numpy.stack([x, x], axis=-1)
return x
class ComputeOutputSpecTest(testing.TestCase):
def test_basics(self):
out = backend.compute_output_spec(
example_fn, backend.KerasTensor((2, 3))
)
self.assertIsInstance(out, backend.KerasTensor)
self.assertEqual(out.shape, (2, 3, 2))
out = backend.compute_output_spec(
example_fn, backend.KerasTensor((None, 3))
)
self.assertIsInstance(out, backend.KerasTensor)
self.assertEqual(out.shape, (None, 3, 2))
out = backend.compute_output_spec(
example_fn, backend.KerasTensor((2, None))
)
self.assertIsInstance(out, backend.KerasTensor)
self.assertEqual(out.shape, (2, None, 2))
@pytest.mark.skipif(
backend.backend() != "torch", reason="Only applicable for torch"
)
def test_torch_meta_device_incompatible_ops(self):
class Container:
def __init__(self):
self.canary = False
def example_meta_fn(self, x):
y = backend.numpy.ones(x.shape)
if str(y.device) == "meta":
self.canary = True
raise ValueError("Erroring out on meta device")
x = (x + 2) * y
x = backend.numpy.stack([x, x], axis=-1)
return x
instance = Container()
out = backend.compute_output_spec(
instance.example_meta_fn, backend.KerasTensor((2, 3))
)
self.assertIsInstance(out, backend.KerasTensor)
self.assertTrue(instance.canary)
self.assertEqual(out.shape, (2, 3, 2))
instance = Container()
out = backend.compute_output_spec(
instance.example_meta_fn, backend.KerasTensor((2, None))
)
self.assertIsInstance(out, backend.KerasTensor)
self.assertTrue(instance.canary)
self.assertEqual(out.shape, (2, None, 2))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/backend_utils_test.py | keras/src/backend/common/backend_utils_test.py | from keras.src.backend.common.backend_utils import (
_convert_conv_transpose_padding_args_from_keras_to_jax,
)
from keras.src.backend.common.backend_utils import (
_convert_conv_transpose_padding_args_from_keras_to_torch,
)
from keras.src.backend.common.backend_utils import (
_get_output_shape_given_tf_padding,
)
from keras.src.backend.common.backend_utils import (
compute_conv_transpose_padding_args_for_jax,
)
from keras.src.backend.common.backend_utils import (
compute_conv_transpose_padding_args_for_torch,
)
from keras.src.testing import test_case
class ConvertConvTransposePaddingArgsJAXTest(test_case.TestCase):
def test_valid_padding_without_output_padding(self):
"""Test conversion with 'valid' padding and no output padding"""
(
left_pad,
right_pad,
) = _convert_conv_transpose_padding_args_from_keras_to_jax(
kernel_size=3,
stride=2,
dilation_rate=1,
padding="valid",
output_padding=None,
)
self.assertEqual(left_pad, 2)
self.assertEqual(right_pad, 2)
def test_same_padding_without_output_padding(self):
"""Test conversion with 'same' padding and no output padding."""
(
left_pad,
right_pad,
) = _convert_conv_transpose_padding_args_from_keras_to_jax(
kernel_size=3,
stride=2,
dilation_rate=1,
padding="same",
output_padding=None,
)
self.assertEqual(left_pad, 2)
self.assertEqual(right_pad, 1)
class ConvertConvTransposePaddingArgsTorchTest(test_case.TestCase):
def test_valid_padding_without_output_padding(self):
"""Test conversion with 'valid' padding and no output padding"""
(
torch_padding,
torch_output_padding,
) = _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=3,
stride=2,
dilation_rate=1,
padding="valid",
output_padding=None,
)
self.assertEqual(torch_padding, 0)
self.assertEqual(torch_output_padding, 0)
def test_same_padding_without_output_padding(self):
"""Test conversion with 'same' padding and no output padding"""
(
torch_padding,
torch_output_padding,
) = _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=3,
stride=2,
dilation_rate=1,
padding="same",
output_padding=None,
)
self.assertEqual(torch_padding, 1)
self.assertEqual(torch_output_padding, 1)
class ComputeConvTransposePaddingArgsForJAXTest(test_case.TestCase):
def test_valid_padding_without_output_padding(self):
"""Test computation with 'valid' padding and no output padding"""
jax_padding = compute_conv_transpose_padding_args_for_jax(
input_shape=(1, 5, 5, 3),
kernel_shape=(3, 3, 3, 3),
strides=2,
padding="valid",
output_padding=None,
dilation_rate=1,
)
self.assertEqual(jax_padding, [(2, 2), (2, 2)])
def test_same_padding_without_output_padding(self):
"""Test computation with 'same' padding and no output padding"""
jax_padding = compute_conv_transpose_padding_args_for_jax(
input_shape=(1, 5, 5, 3),
kernel_shape=(3, 3, 3, 3),
strides=2,
padding="same",
output_padding=None,
dilation_rate=1,
)
self.assertEqual(jax_padding, [(2, 1), (2, 1)])
class ComputeConvTransposePaddingArgsForTorchTest(test_case.TestCase):
def test_valid_padding_without_output_padding(self):
"""Test computation with 'valid' padding and no output padding"""
(
torch_paddings,
torch_output_paddings,
) = compute_conv_transpose_padding_args_for_torch(
input_shape=(1, 5, 5, 3),
kernel_shape=(3, 3, 3, 3),
strides=2,
padding="valid",
output_padding=None,
dilation_rate=1,
)
self.assertEqual(torch_paddings, [0, 0])
self.assertEqual(torch_output_paddings, [0, 0])
def test_same_padding_without_output_padding(self):
"""Test computation with 'same' padding and no output padding"""
(
torch_paddings,
torch_output_paddings,
) = compute_conv_transpose_padding_args_for_torch(
input_shape=(1, 5, 5, 3),
kernel_shape=(3, 3, 3, 3),
strides=2,
padding="same",
output_padding=None,
dilation_rate=1,
)
self.assertEqual(torch_paddings, [1, 1])
self.assertEqual(torch_output_paddings, [1, 1])
def test_valid_padding_with_none_output_padding(self):
"""Test conversion with 'valid' padding and no output padding"""
(
torch_padding,
torch_output_padding,
) = _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=3,
stride=2,
dilation_rate=1,
padding="valid",
output_padding=None,
)
self.assertEqual(torch_padding, 0)
self.assertEqual(torch_output_padding, 0)
def test_valid_padding_with_output_padding(self):
"""Test conversion with 'valid' padding and output padding for Torch."""
(
torch_padding,
torch_output_padding,
) = _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=3,
stride=2,
dilation_rate=1,
padding="valid",
output_padding=1,
)
self.assertEqual(torch_padding, 0)
self.assertEqual(torch_output_padding, 1)
def test_output_padding_clamped_for_torch_constraint(self):
"""Test that output_padding is clamped
when >= stride (Torch constraint).
"""
(
torch_paddings,
torch_output_paddings,
) = compute_conv_transpose_padding_args_for_torch(
input_shape=(1, 8, 8, 8, 16), # any shape
kernel_shape=(2, 2, 2, 16, 32), # Keras kernel shape
strides=1,
padding="same",
output_padding=1, # Keras wants this
dilation_rate=1,
)
# Torch expects output_padding < stride (1)
# so output_padding should be clamped to 0
self.assertEqual(torch_output_paddings, [0, 0, 0])
class GetOutputShapeGivenTFPaddingTest(test_case.TestCase):
def test_valid_padding_without_output_padding(self):
"""Test computation with 'valid' padding and no output padding."""
output_shape = _get_output_shape_given_tf_padding(
input_size=5,
kernel_size=3,
strides=2,
padding="valid",
output_padding=None,
dilation_rate=1,
)
self.assertEqual(output_shape, 11)
def test_same_padding_without_output_padding(self):
"""Test computation with 'same' padding and no output padding."""
output_shape = _get_output_shape_given_tf_padding(
input_size=5,
kernel_size=3,
strides=2,
padding="same",
output_padding=None,
dilation_rate=1,
)
self.assertEqual(output_shape, 10)
def test_valid_padding_with_output_padding(self):
"""Test computation with 'valid' padding and output padding."""
output_shape = _get_output_shape_given_tf_padding(
input_size=5,
kernel_size=3,
strides=2,
padding="valid",
output_padding=1,
dilation_rate=1,
)
self.assertEqual(output_shape, 12)
def test_warning_for_inconsistencies(self):
"""Test that a warning is raised for potential inconsistencies"""
with self.assertWarns(Warning):
_convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=3,
stride=2,
dilation_rate=1,
padding="same",
output_padding=1,
)
def test_same_padding_without_output_padding_for_torch_(self):
"""Test conversion with 'same' padding and no output padding."""
(
torch_padding,
torch_output_padding,
) = _convert_conv_transpose_padding_args_from_keras_to_torch(
kernel_size=3,
stride=2,
dilation_rate=1,
padding="same",
output_padding=None,
)
self.assertEqual(torch_padding, max(-((3 % 2 - 3) // 2), 0))
self.assertEqual(torch_output_padding, 1)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/remat_test.py | keras/src/backend/common/remat_test.py | import numpy as np
from keras.src import backend
from keras.src import layers
from keras.src import models
from keras.src import testing
from keras.src.backend.common import global_state
from keras.src.backend.common.remat import RematScope
from keras.src.backend.common.remat import get_current_remat_mode
from keras.src.layers import activations
class TestRematScope(testing.TestCase):
def setUp(self):
"""Reset global state before each test."""
global_state.clear_session()
def test_remat_scope_activation(self):
self.assertIsNone(
get_current_remat_mode()
) # Initially, no mode is active
with RematScope(mode="full"):
self.assertEqual(
get_current_remat_mode().mode, "full"
) # Mode is set to "full"
self.assertIsNone(
get_current_remat_mode()
) # Mode is restored to None after scope ends
def test_remat_scope_nested(self):
"""Test nested scopes with different rematerialization modes."""
with RematScope(mode="full"):
self.assertEqual(
get_current_remat_mode().mode, "full"
) # Outer scope is "full"
with RematScope(mode="activations"):
self.assertEqual(
get_current_remat_mode().mode, "activations"
) # Inner scope is "activations"
self.assertEqual(
get_current_remat_mode().mode, "full"
) # Back to outer scope
self.assertIsNone(
get_current_remat_mode()
) # Mode is restored to None after all scopes
def test_remat_scope_stack_management(self):
"""Test that the remat_scope_stack is managed correctly."""
self.assertIsNone(
global_state.get_global_attribute("remat_scope_stack")
) # No stack initially
with RematScope(mode="full"):
remat_stack = global_state.get_global_attribute("remat_scope_stack")
self.assertIsNotNone(remat_stack) # Stack is initialized
self.assertEqual(len(remat_stack), 1) # Stack contains one entry
with RematScope(mode="activations"):
remat_stack = global_state.get_global_attribute(
"remat_scope_stack"
)
self.assertEqual(
len(remat_stack), 2
) # Stack contains two entries
remat_stack = global_state.get_global_attribute("remat_scope_stack")
self.assertEqual(len(remat_stack), 1) # Back to one entry
self.assertEqual(
global_state.get_global_attribute("remat_scope_stack"), []
) # Stack is cleared
def test_invalid_mode(self):
"""Test that invalid rematerialization modes raise an error."""
with self.assertRaises(ValueError):
RematScope(mode="invalid") # Invalid mode should raise ValueError
class RematTest(testing.TestCase):
def test_remat_basic_call(self):
if backend.backend() in ("openvino", "numpy"):
self.skipTest(
"remat is not supported in openvino and numpy backends."
)
# Generate dummy data
data_size = 10**5
x_train = np.random.normal(size=(data_size, 4))
y_train = np.random.normal(size=(data_size, 1))
epochs = 5
batch_size = 512
# test applying remat
output_with_remat = backend.core.remat(activations.ReLU())(x_train)
output_without_remat = activations.ReLU()(x_train)
self.assertAllClose(output_with_remat, output_without_remat)
# test remat in a model
intermediate_function = backend.core.remat(activations.ReLU())
inputs = layers.Input(shape=(4,))
x = layers.Dense(4)(inputs)
x = layers.Lambda(intermediate_function)(x)
outputs = layers.Dense(1)(x)
model = models.Model(inputs=inputs, outputs=outputs)
model.predict(x_train)
model.compile(optimizer="sgd", loss="mse")
# Train model
model.fit(
x_train,
y_train,
epochs=epochs,
batch_size=batch_size,
verbose=0,
)
def test_remat_with_kwargs(self):
if backend.backend() in ("openvino", "numpy"):
self.skipTest(
"remat is not supported in openvino and numpy backends."
)
# Define a function that uses keyword arguments
def fn_with_kwargs(x, scale=1.0, offset=0.0):
return x * scale + offset
x = np.array([1.0, 2.0, 3.0], dtype=np.float32)
# Test with keyword arguments
remat_fn = backend.core.remat(fn_with_kwargs)
result_with_kwargs = remat_fn(x, scale=2.0, offset=1.0)
expected = fn_with_kwargs(x, scale=2.0, offset=1.0)
self.assertAllClose(result_with_kwargs, expected)
# Test with default keyword arguments
result_with_defaults = remat_fn(x)
expected_defaults = fn_with_kwargs(x)
self.assertAllClose(result_with_defaults, expected_defaults)
# Test with partial keyword arguments
result_partial = remat_fn(x, scale=3.0)
expected_partial = fn_with_kwargs(x, scale=3.0)
self.assertAllClose(result_partial, expected_partial)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/symbolic_scope.py | keras/src/backend/common/symbolic_scope.py | from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
@keras_export("keras.SymbolicScope")
class SymbolicScope:
"""Scope to indicate the symbolic stage."""
def __enter__(self):
self.original_scope = get_symbolic_scope()
global_state.set_global_attribute("symbolic_scope", self)
return self
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute("symbolic_scope", self.original_scope)
def in_symbolic_scope():
return global_state.get_global_attribute("symbolic_scope") is not None
def get_symbolic_scope():
return global_state.get_global_attribute("symbolic_scope")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/variables_test.py | keras/src/backend/common/variables_test.py | import itertools
import numpy as np
import pytest
from absl.testing import parameterized
from conftest import skip_if_backend
from keras.src import backend
from keras.src import initializers
from keras.src import ops
from keras.src.backend.common import dtypes
from keras.src.backend.common.variables import AutocastScope
from keras.src.backend.common.variables import shape_equal
from keras.src.backend.common.variables import standardize_dtype
from keras.src.backend.common.variables import standardize_shape
from keras.src.testing import test_case
from keras.src.testing.test_utils import named_product
class VariableInitializationTest(test_case.TestCase):
"""Tests for Variable.__init__()"""
def test_deferred_initialization(self):
"""Tests deferred initialization of variables."""
with backend.StatelessScope():
v = backend.Variable(
initializer=initializers.RandomNormal(), shape=(2, 2)
)
self.assertEqual(v._value, None)
# Variables can nevertheless be accessed
_ = v + 1
self.assertEqual(v._value.shape, (2, 2))
with self.assertRaisesRegex(ValueError, "while in a stateless scope"):
with backend.StatelessScope():
v = backend.Variable(initializer=0)
def test_variable_initialization_with_numpy_array(self):
"""Test variable init with numpy array initializer."""
v = backend.Variable(
initializer=np.ones((2, 2), dtype=np.int32), trainable=False
)
self.assertAllClose(v.value, np.ones((2, 2)))
self.assertEqual(v.dtype, "int32")
def test_variable_initialization_with_native_array(self):
"""Test variable init with native array initializer."""
v = backend.Variable(
initializer=ops.ones((2, 2), dtype="int32"), trainable=False
)
self.assertAllClose(v.value, np.ones((2, 2)))
self.assertEqual(v.dtype, "int32")
def test_variable_initialization_with_python_array(self):
"""Test variable init with python array initializer."""
v = backend.Variable(initializer=[[1, 1], [1, 1]], trainable=False)
self.assertAllClose(v.value, np.ones((2, 2)))
self.assertEqual(v.dtype, "int32")
v = backend.Variable(
initializer=[[1.0, 1.0], [1.0, 1.0]], trainable=False
)
self.assertAllClose(v.value, np.ones((2, 2)))
self.assertEqual(v.dtype, "float32")
def test_variable_initialization_with_lambda_expression(self):
# Test Python number
v = backend.Variable(
initializer=lambda *a, **kw: 1.0,
shape=(),
dtype="float32",
)
self.assertAllClose(v.value, 1.0)
self.assertEqual(v.dtype, "float32")
# Test Python array
v = backend.Variable(
initializer=lambda *a, **kw: [1.0],
shape=(1,),
dtype="float32",
)
self.assertAllClose(v.value, np.ones((1,)))
self.assertEqual(v.dtype, "float32")
# Test numpy array
v = backend.Variable(
initializer=lambda *a, **kw: np.ones((1,)),
shape=(1,),
dtype="float32",
)
self.assertAllClose(v.value, np.ones((1,)))
self.assertEqual(v.dtype, "float32")
# Test backend array
v = backend.Variable(
initializer=lambda *a, **kw: ops.ones((1,)),
shape=(1,),
dtype="float32",
)
self.assertAllClose(v.value, np.ones((1,)))
self.assertEqual(v.dtype, "float32")
def test_variable_initialization_with_strings(self):
"""Test variable init with non-callable initializer."""
v = backend.Variable(initializer="ones", shape=(2, 2))
self.assertAllClose(v.value, np.ones((2, 2)))
def test_variable_initialization_with_non_trainable(self):
"""Test variable initialization with non-trainable flag."""
v = backend.Variable(initializer=np.ones((2, 2)), trainable=False)
self.assertFalse(v.trainable)
def test_variable_initialization_without_shape(self):
"""Test variable init without a shape."""
with self.assertRaisesRegex(
ValueError,
"When creating a Variable from an initializer, the `shape` ",
):
backend.Variable(initializer=initializers.RandomNormal())
def test_deferred_initialize_already_initialized(self):
"""Test deferred init on an already initialized variable."""
v = backend.Variable(initializer=np.ones((2, 2)))
with self.assertRaisesRegex(
ValueError, f"Variable {v.path} is already initialized."
):
v._deferred_initialize()
def test_variable_initialize(self):
"""Test initializing a variable."""
v = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
init_value = np.array([4.0, 5.0, 6.0])
v._initialize(value=init_value)
self.assertAllClose(v.value, init_value)
def test_variable_without_shape_from_callable_initializer(self):
"""Test that Variable raises error
if shape is not provided for callable initializer."""
with self.assertRaisesRegex(
ValueError, "When creating a Variable from an initializer"
):
backend.Variable(initializer=lambda: np.ones((2, 2)))
class VariablePropertiesTest(test_case.TestCase):
"""Tests for Variable._deferred_initialize Variable._maybe_autocast"""
@skip_if_backend(
"openvino", "Can not constant fold eltwise node by CPU plugin"
)
def test_deferred_assignment(self):
"""Tests deferred assignment to variables."""
with backend.StatelessScope() as scope:
v = backend.Variable(
initializer=initializers.RandomNormal(), shape=(2, 2)
)
self.assertEqual(v._value, None)
v.assign(np.zeros((2, 2)))
v.assign_add(2 * np.ones((2, 2)))
v.assign_sub(np.ones((2, 2)))
out = scope.get_current_value(v)
self.assertAllClose(out, np.ones((2, 2)))
def test_trainable_setter(self):
"""Tests the trainable setter."""
v = backend.Variable(
initializer=initializers.RandomNormal(),
shape=(2, 2),
)
self.assertTrue(v.trainable)
v.trainable = False
self.assertFalse(v.trainable)
if backend.backend() == "torch":
v.trainable = True
self.assertTrue(v._value.requires_grad)
v.trainable = False
self.assertFalse(v._value.requires_grad)
def test_autocasting_float(self):
# Tests autocasting of float variables
v = backend.Variable(
initializer=initializers.RandomNormal(),
shape=(2, 2),
dtype="float32",
)
self.assertEqual(v.dtype, "float32")
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
with AutocastScope("float16"):
self.assertEqual(
backend.standardize_dtype(v.value.dtype), "float16"
)
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
def test_autocasting_float_assign(self):
# Tests assigning value to variable within an autocast scope
v = backend.Variable(
initializer=initializers.RandomNormal(),
shape=(2, 2),
dtype="float32",
)
self.assertEqual(v.dtype, "float32")
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
# Assign float16 value within float16 scope
with AutocastScope("float16"):
self.assertEqual(
backend.standardize_dtype(v.value.dtype), "float16"
)
v.assign(ops.ones((2, 2), "float16"))
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
# Assign float32 value within float16 scope
with AutocastScope("float16"):
self.assertEqual(
backend.standardize_dtype(v.value.dtype), "float16"
)
v.assign(ops.zeros((2, 2), "float32"))
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
def test_autocasting_int(self):
# Test non-float variables are not affected
v = backend.Variable(
initializer=initializers.Ones(),
shape=(2, 2),
dtype="int32",
trainable=False,
)
self.assertEqual(v.dtype, "int32")
self.assertEqual(backend.standardize_dtype(v.value.dtype), "int32")
with AutocastScope("float16"):
self.assertEqual(backend.standardize_dtype(v.value.dtype), "int32")
def test_autocasting_float_with_autocast_off(self):
# Test autocast argument
v = backend.Variable(
initializer=initializers.RandomNormal(),
shape=(2, 2),
dtype="float32",
autocast=False,
)
self.assertEqual(v.dtype, "float32")
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
with AutocastScope("float16"):
self.assertEqual(
backend.standardize_dtype(v.value.dtype),
"float32", # ignore AutocastScope
)
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
@parameterized.parameters(
*(
(
dtype
for dtype in dtypes.ALLOWED_DTYPES
if dtype not in ["string", "complex64", "complex28"]
)
)
)
def test_standardize_dtype(self, dtype):
"""Tests standardize_dtype for all ALLOWED_DTYPES except string."""
if backend.backend() == "torch" and dtype in (
"uint16",
"uint32",
"uint64",
"complex64",
"complex128",
):
self.skipTest(f"torch backend does not support dtype {dtype}")
if backend.backend() == "jax":
if dtype in ("complex128",):
self.skipTest(f"jax backend does not support dtype {dtype}")
import jax
if not jax.config.x64_enabled and "64" in dtype:
self.skipTest(
f"jax backend does not support {dtype} without x64 enabled"
)
if backend.backend() == "openvino" and dtype in (
"complex64",
"complex128",
):
self.skipTest(f"openvino backend does not support dtype {dtype}")
x = backend.convert_to_tensor(np.zeros(()), dtype)
actual = standardize_dtype(x.dtype)
self.assertEqual(actual, dtype)
def test_standardize_dtype_with_torch_dtype(self):
"""Tests dtype standardization with PyTorch dtypes."""
import torch
x = torch.randn(4, 4)
backend.standardize_dtype(x.dtype)
def test_name_validation(self):
"""Tests validation of variable names."""
with self.assertRaisesRegex(
ValueError, "Argument `name` must be a string"
):
backend.Variable(
initializer=initializers.RandomNormal(), name=12345
)
with self.assertRaisesRegex(ValueError, "cannot contain character `/`"):
backend.Variable(
initializer=initializers.RandomNormal(), name="invalid/name"
)
def test_standardize_shape_with_none(self):
with self.assertRaisesRegex(
ValueError, "Undefined shapes are not supported."
):
standardize_shape(None)
def test_standardize_shape_with_non_iterable(self):
with self.assertRaisesRegex(
ValueError, "Cannot convert '42' to a shape."
):
standardize_shape(42)
def test_standardize_shape_with_valid_input(self):
shape = (3, 4, 5)
standardized_shape = standardize_shape(shape)
self.assertEqual(standardized_shape, (3, 4, 5))
def test_standardize_shape_with_valid_input_with_none(self):
shape = (3, None, 5)
standardized_shape = standardize_shape(shape)
self.assertEqual(standardized_shape, (3, None, 5))
def test_standardize_shape_with_valid_not_tuple_input(self):
shape = [3, 4, 5]
standardized_shape = standardize_shape(shape)
self.assertEqual(standardized_shape, (3, 4, 5))
def test_standardize_shape_with_numpy(self):
shape = [3, np.int32(4), np.int64(5)]
standardized_shape = standardize_shape(shape)
self.assertEqual(standardized_shape, (3, 4, 5))
for d in standardized_shape:
self.assertIsInstance(d, int)
def test_standardize_shape_with_string(self):
shape_with_string = (3, 4, "5")
with self.assertRaisesRegex(
ValueError,
"Cannot convert .* to a shape. Found invalid dimension '5'.",
):
standardize_shape(shape_with_string)
def test_standardize_shape_with_float(self):
shape_with_float = (3, 4, 5.0)
with self.assertRaisesRegex(
ValueError,
"Cannot convert .* to a shape. Found invalid dimension '5.0'.",
):
standardize_shape(shape_with_float)
def test_standardize_shape_with_object(self):
shape_with_object = (3, 4, object())
with self.assertRaisesRegex(
ValueError,
"Cannot convert .* to a shape. Found invalid dimension .*object",
):
standardize_shape(shape_with_object)
def test_standardize_shape_with_negative_dimension(self):
with self.assertRaisesRegex(
ValueError,
"Cannot convert .* to a shape. Negative dimensions",
):
standardize_shape((3, 4, -5))
def test_shape_equal_length_mismatch(self):
"""Test mismatch in lengths of shapes."""
self.assertFalse(shape_equal((3, 2), (3, 2, 4)))
self.assertFalse(shape_equal((), (3,)))
self.assertFalse(shape_equal((3, 2, 4, 5), (3, 2, 4)))
def test_autocast_scope_with_non_float_dtype(self):
"""Tests autocast scope with non-float dtype."""
with self.assertRaisesRegex(
ValueError,
"`AutocastScope` can only be used with a floating-point",
):
_ = AutocastScope("int32")
def test_variable_path_creation(self):
"""Test path creation for a variable."""
v = backend.Variable(initializer=np.ones((2, 2)), name="test_var")
self.assertEqual(v.path, "test_var")
with backend.name_scope("test_scope"):
v = backend.Variable(initializer=np.ones((2, 2)), name="test_var")
self.assertEqual(v.path, "test_scope/test_var")
def test_overwrite_with_gradient_setter(self):
v = backend.Variable(
initializer=initializers.RandomNormal(),
shape=(2, 2),
)
self.assertFalse(v.overwrite_with_gradient)
v.overwrite_with_gradient = True
self.assertTrue(v.overwrite_with_gradient)
with self.assertRaisesRegex(TypeError, "must be a boolean."):
v.overwrite_with_gradient = "true"
class VariableNumpyValueAndAssignmentTest(test_case.TestCase):
"""tests for Variable.numpy(), Variable.value() and Variable.assign()"""
def test_variable_numpy(self):
"""Test retrieving the value of a variable as a numpy array."""
v = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
self.assertIsInstance(v.numpy(), np.ndarray)
self.assertAllClose(v.numpy(), np.array([1.0, 2.0, 3.0]))
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="Tests for MirroredVariable under tf backend",
)
def test_variable_numpy_scalar(self):
from keras.src.utils.module_utils import tensorflow as tf
strategy = tf.distribute.MirroredStrategy(["cpu:0", "cpu:1"])
with strategy.scope():
v = backend.Variable(initializer=0.0)
np_value = backend.convert_to_numpy(v)
self.assertIsInstance(np_value, np.ndarray)
self.assertAllClose(np_value, 0.0)
def test_variable_value(self):
"""Test retrieving the value of a variable."""
v = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
self.assertAllClose(v.value, np.array([1.0, 2.0, 3.0]))
def test_variable_assign(self):
"""Test assigning a new value to a variable."""
v = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
v.assign(np.array([4.0, 5.0, 6.0]))
self.assertAllClose(v.value, np.array([4.0, 5.0, 6.0]))
def test_variable_assign_return(self):
"""Test assigning a new value and returning."""
v = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
r = v.assign(np.array([4.0, 5.0, 6.0]))
self.assertAllClose(r, np.array([4.0, 5.0, 6.0]))
def test_variable_assign_add(self):
"""Test the assign_add method on a variable."""
v = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
v.assign_add(np.array([1.0, 1.0, 1.0]))
self.assertAllClose(v.value, np.array([2.0, 3.0, 4.0]))
def test_variable_assign_add_return(self):
"""Test assign_add a new value and returning."""
v = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
r = v.assign_add(np.array([1.0, 1.0, 1.0]))
self.assertAllClose(r, np.array([2.0, 3.0, 4.0]))
def test_variable_assign_sub(self):
"""Test the assign_sub method on a variable."""
v = backend.Variable(initializer=np.array([2.0, 3.0, 4.0]))
v.assign_sub(np.array([1.0, 1.0, 1.0]))
self.assertAllClose(v.value, np.array([1.0, 2.0, 3.0]))
def test_variable_assign_sub_return(self):
"""Test assign_sub a new value and returning."""
v = backend.Variable(initializer=np.array([2.0, 3.0, 4.0]))
r = v.assign_sub(np.array([1.0, 1.0, 1.0]))
self.assertAllClose(r, np.array([1.0, 2.0, 3.0]))
def test_deferred_initialize_within_stateless_scope(self):
"""Test deferred init within a stateless scope."""
with backend.StatelessScope():
v = backend.Variable(
initializer=initializers.RandomNormal(), shape=(2, 2)
)
with self.assertRaisesRegex(
ValueError,
"You are attempting to initialize a variable "
"while in a stateless scope. This is disallowed.",
):
v._deferred_initialize()
class VariableDtypeShapeNdimRepr(test_case.TestCase):
"""tests for dtype, shape, ndim, __repr__"""
def test_variable_dtype(self):
"""Test retrieving the dtype of a variable."""
v = backend.Variable(
initializer=np.array([1.0, 2.0, 3.0], dtype=np.float32)
)
self.assertEqual(v.dtype, "float32")
def test_variable_shape(self):
"""Test retrieving the shape of a variable."""
v = backend.Variable(initializer=np.array([[1.0, 2.0], [3.0, 4.0]]))
self.assertEqual(v.shape, (2, 2))
def test_variable_ndim(self):
"""Test retrieving the number of dimensions of a variable."""
v = backend.Variable(initializer=np.array([[1.0, 2.0], [3.0, 4.0]]))
self.assertEqual(v.ndim, 2)
def test_variable_repr(self):
"""Test the string representation of a variable."""
v = backend.Variable(
initializer=np.array([1.0, 2.0, 3.0], dtype=np.float32),
name="test_var",
)
expected_repr = (
"<Variable path=test_var, shape=(3,), dtype=float32, "
"value=[1. 2. 3.]>"
)
self.assertEqual(repr(v), expected_repr)
# Test with `backend.StatelessScope()`
with backend.StatelessScope():
v = backend.Variable(
initializer="zeros", shape=(3,), name="test_var"
)
expected_repr = (
"<Variable path=test_var, shape=(3,), dtype=float32>"
)
self.assertEqual(repr(v), expected_repr)
def test_variable_getitem(self):
"""Test getting an item from a variable."""
v = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
self.assertEqual(v[0], 1)
def test_variable_initialize(self):
"""Test initializing a variable."""
v = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
init_value = np.array([4.0, 5.0, 6.0])
v._initialize(value=init_value)
self.assertAllClose(v.value, init_value)
def test_variable_convert_to_tensor(self):
"""Test converting a variable to a tensor."""
v = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
self.assertAllClose(
v._convert_to_tensor(v.value), np.array([1.0, 2.0, 3.0])
)
def test_variable_convert_to_tensor_with_dtype(self):
"""Test converting a variable to a tensor with a dtype."""
v = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
self.assertAllClose(
v._convert_to_tensor(v.value, dtype="float32"),
np.array([1.0, 2.0, 3.0]),
)
def test_variable_array(self):
"""Test converting a variable to an array."""
v = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
self.assertAllClose(v.__array__(), np.array([1.0, 2.0, 3.0]))
class VariableOpsCorrectnessTest(test_case.TestCase):
"""Tests for operations on Variable."""
def test_int(self):
v = backend.Variable(initializer=np.array(-1.1))
self.assertAllClose(int(v), np.array(-1))
def test_float(self):
v = backend.Variable(initializer=np.array(-1.1))
self.assertAllClose(float(v), np.array(-1.1))
def test__neg__(self):
"""Test negating a variable."""
v = backend.Variable(initializer=np.array([-1.0, 2.0]), trainable=False)
self.assertAllClose(v.__neg__(), np.array([1.0, -2.0]))
def test__abs__(self):
"""Test absolute value on a variable."""
v = backend.Variable(initializer=np.array([-1.0, 2.0]), trainable=False)
self.assertAllClose(v.__abs__(), np.array([1.0, 2.0]))
def test__invert__(self):
"""Test bitwise not on a variable."""
v = backend.Variable(
initializer=np.array([True, False]), trainable=False, dtype="bool"
)
self.assertAllClose(v.__invert__(), np.array([False, True]))
def test__eq__(self):
"""Test equality comparison on a variable."""
v = backend.Variable(initializer=np.array([1.0, 2.0]), trainable=False)
self.assertAllClose(
v.__eq__(np.array([1.0, 2.0])), np.array([True, True])
)
def test__ne__(self):
"""Test inequality comparison on a variable."""
v = backend.Variable(initializer=np.array([1.0, 2.0]), trainable=False)
self.assertAllClose(
v.__ne__(np.array([1.0, 2.0])), np.array([False, False])
)
def test__lt__(self):
"""Test less than comparison on a variable."""
v = backend.Variable(initializer=np.array([1.0, 2.0]), trainable=False)
self.assertAllClose(
v.__lt__(np.array([1.0, 2.0])), np.array([False, False])
)
def test__le__(self):
"""Test less than or equal to comparison on a variable."""
v = backend.Variable(initializer=np.array([1.0, 2.0]), trainable=False)
self.assertAllClose(
v.__le__(np.array([1.0, 2.0])), np.array([True, True])
)
def test__gt__(self):
"""Test greater than comparison on a variable."""
v = backend.Variable(initializer=np.array([1.0, 2.0]), trainable=False)
self.assertAllClose(
v.__gt__(np.array([1.0, 2.0])), np.array([False, False])
)
def test__ge__(self):
"""Test greater than or equal to comparison on a variable."""
v = backend.Variable(initializer=np.array([1.0, 2.0]), trainable=False)
self.assertAllClose(
v.__ge__(np.array([1.0, 2.0])), np.array([True, True])
)
def test__add__(self):
"""Test addition operation on a variable."""
v1 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
v2 = backend.Variable(initializer=np.array([4.0, 5.0, 6.0]))
self.assertAllClose(v1.__add__(v2), np.array([5.0, 7.0, 9.0]))
def test__radd__(self):
"""Test reverse addition operation on a variable."""
v1 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
v2 = backend.Variable(initializer=np.array([4.0, 5.0, 6.0]))
self.assertAllClose(v1.__radd__(v2), np.array([5.0, 7.0, 9.0]))
def test__sub__(self):
"""Test subtraction operation on a variable."""
v1 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
v2 = backend.Variable(initializer=np.array([4.0, 5.0, 6.0]))
self.assertAllClose(v1.__sub__(v2), np.array([-3.0, -3.0, -3.0]))
def test__rsub__(self):
"""Test reverse subtraction operation on a variable."""
v1 = backend.Variable(initializer=np.array([4.0, 5.0, 6.0]))
v2 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
self.assertAllClose(v1.__rsub__(v2), np.array([-3.0, -3.0, -3.0]))
def test__mul__(self):
"""Test multiplication operation on a variable."""
v1 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
v2 = backend.Variable(initializer=np.array([4.0, 5.0, 6.0]))
self.assertAllClose(v1.__mul__(v2), np.array([4.0, 10.0, 18.0]))
def test__rmul__(self):
"""Test reverse multiplication operation on a variable."""
v1 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
v2 = backend.Variable(initializer=np.array([4.0, 5.0, 6.0]))
self.assertAllClose(v1.__rmul__(v2), np.array([4.0, 10.0, 18.0]))
def test__truediv__(self):
"""Test true division operation on a variable."""
v1 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
v2 = backend.Variable(initializer=np.array([4.0, 5.0, 6.0]))
self.assertAllClose(v1.__truediv__(v2), np.array([0.25, 0.4, 0.5]))
def test__rtruediv__(self):
"""Test reverse true division operation on a variable."""
v1 = backend.Variable(initializer=np.array([4.0, 5.0, 6.0]))
v2 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
self.assertAllClose(v1.__rtruediv__(v2), np.array([0.25, 0.4, 0.5]))
@skip_if_backend(
"openvino", "`floor_divide` is not supported with openvino backend"
)
def test__floordiv__(self):
"""Test floordiv operation on a variable."""
v1 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
v2 = backend.Variable(initializer=np.array([-4.0, 5.0, 6.0]))
self.assertAllClose(v1.__floordiv__(v2), np.array([-1.0, 0.0, 0.0]))
@skip_if_backend(
"openvino", "`floor_divide` is not supported with openvino backend"
)
def test__rfloordiv__(self):
"""Test reverse floordiv operation on a variable."""
v1 = backend.Variable(initializer=np.array([-4.0, 5.0, 6.0]))
v2 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
self.assertAllClose(v1.__rfloordiv__(v2), np.array([-1.0, 0.0, 0.0]))
def test__mod__(self):
"""Test mod operation on a variable."""
v1 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
v2 = backend.Variable(initializer=np.array([-4.0, 5.0, 6.0]))
self.assertAllClose(v1.__mod__(v2), np.array([-3.0, 2.0, 3.0]))
def test__rmod__(self):
"""Test reverse mod operation on a variable."""
v1 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
v2 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
self.assertAllClose(v1.__rmod__(v2), np.array([0.0, 0.0, 0.0]))
def test__pow__(self):
"""Test pow operation on a variable."""
v1 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
v2 = backend.Variable(initializer=np.array([-4.0, 5.0, 6.0]))
self.assertAllClose(v1.__pow__(v2), np.array([1.0, 32.0, 729.0]))
def test__rpow__(self):
"""Test reverse power operation on a variable."""
v1 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
v2 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
self.assertAllClose(v1.__rpow__(v2), np.array([1.0, 4.0, 27.0]))
def test__matmul__(self):
"""Test matmul operation on a variable."""
v1 = backend.Variable(initializer=np.array([[1.0, 2.0], [3.0, 4.0]]))
v2 = backend.Variable(initializer=np.array([[5.0, 6.0], [7.0, 8.0]]))
self.assertAllClose(
v1.__matmul__(v2), np.array([[19.0, 22.0], [43.0, 50.0]])
)
def test__rmatmul__(self):
"""Test reverse matmul operation on a variable."""
v1 = backend.Variable(initializer=np.array([[1.0, 2.0], [3.0, 4.0]]))
v2 = backend.Variable(initializer=np.array([[5.0, 6.0], [7.0, 8.0]]))
self.assertAllClose(
v1.__rmatmul__(v2), np.array([[23.0, 34.0], [31.0, 46.0]])
)
def test__and__(self):
"""Test bitwise and operation on a variable."""
v1 = backend.Variable(
initializer=np.array([True, False]), dtype="bool", trainable=False
)
v2 = backend.Variable(
initializer=np.array([True, True]), dtype="bool", trainable=False
)
self.assertAllClose(v1.__and__(v2), np.array([True, False]))
def test__rand__(self):
"""Test reverse bitwise and operation on a variable."""
v1 = backend.Variable(
initializer=np.array([True, False]), dtype="bool", trainable=False
)
v2 = backend.Variable(
initializer=np.array([True, True]), dtype="bool", trainable=False
)
self.assertAllClose(v1.__rand__(v2), np.array([True, False]))
def test__or__(self):
"""Test bitwise or operation on a variable."""
v1 = backend.Variable(
initializer=np.array([True, False]), dtype="bool", trainable=False
)
v2 = backend.Variable(
initializer=np.array([True, True]), dtype="bool", trainable=False
)
self.assertAllClose(v1.__or__(v2), np.array([True, True]))
def test__ror__(self):
"""Test reverse bitwise or operation on a variable."""
v1 = backend.Variable(
initializer=np.array([True, False]), dtype="bool", trainable=False
)
v2 = backend.Variable(
initializer=np.array([True, True]), dtype="bool", trainable=False
)
self.assertAllClose(v1.__ror__(v2), np.array([True, True]))
def test__xor__(self):
"""Test bitwise xor operation on a variable."""
v1 = backend.Variable(
initializer=np.array([True, False]), dtype="bool", trainable=False
)
v2 = backend.Variable(
initializer=np.array([True, True]), dtype="bool", trainable=False
)
self.assertAllClose(v1.__xor__(v2), np.array([False, True]))
def test__rxor__(self):
"""Test reverse bitwise xor operation on a variable."""
v1 = backend.Variable(
initializer=np.array([True, False]), dtype="bool", trainable=False
)
v2 = backend.Variable(
initializer=np.array([True, True]), dtype="bool", trainable=False
)
self.assertAllClose(v1.__rxor__(v2), np.array([False, True]))
def test__pos__(self):
"""Test unary plus on a variable."""
v = backend.Variable(initializer=np.array([-1.0, 2.0]), trainable=False)
self.assertAllClose(v.__pos__(), np.array([-1.0, 2.0]))
def test_variable_pow(self):
"""Test pow operation on a variable."""
v1 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
v2 = backend.Variable(initializer=np.array([4.0, 5.0, 6.0]))
result = v1**v2
self.assertAllClose(result, np.array([1.0, 32.0, 729.0]))
def test_variable_rpow(self):
"""Test reverse power operation on a variable."""
v1 = backend.Variable(initializer=np.array([1.0, 2.0, 3.0]))
v2 = backend.Variable(initializer=np.array([4.0, 5.0, 6.0]))
result = v2**v1
self.assertAllClose(result, np.array([4.0, 25.0, 216.0]))
@skip_if_backend(
"openvino", "`round` is not supported with openvino backend"
)
def test_round(self):
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/tensor_attributes.py | keras/src/backend/common/tensor_attributes.py | import weakref
from keras.src.backend.common import global_state
def _clear_tensor_attr(tensor_id, attr):
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is not None and tensor_id in attr_dict:
del attr_dict[tensor_id]
def set_tensor_attr(tensor, attr, value):
try:
setattr(tensor, attr, value)
except AttributeError:
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is None:
if value is None:
return
attr_dict = {}
global_state.set_global_attribute(f"{attr}_dict", attr_dict)
if value is not None:
attr_dict[id(tensor)] = value
weakref.finalize(tensor, _clear_tensor_attr, id(tensor), attr)
elif id(tensor) in attr_dict:
del attr_dict[id(tensor)]
def get_tensor_attr(tensor, attr):
if not hasattr(tensor, attr):
attr_dict = global_state.get_global_attribute(f"{attr}_dict")
if attr_dict is not None:
return attr_dict.get(id(tensor), None)
else:
return None
return getattr(tensor, attr, None)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/global_state.py | keras/src/backend/common/global_state.py | import gc
import threading
from keras.src import backend
from keras.src.api_export import keras_export
GLOBAL_STATE_TRACKER = threading.local()
GLOBAL_SETTINGS_TRACKER = threading.local()
def set_global_attribute(name, value):
setattr(GLOBAL_STATE_TRACKER, name, value)
def get_global_attribute(name, default=None, set_to_default=False):
attr = getattr(GLOBAL_STATE_TRACKER, name, None)
if attr is None and default is not None:
attr = default
if set_to_default:
set_global_attribute(name, attr)
return attr
@keras_export(["keras.utils.clear_session", "keras.backend.clear_session"])
def clear_session(free_memory=True):
"""Resets all state generated by Keras.
Keras manages a global state, which it uses to implement the Functional
model-building API and to uniquify autogenerated layer names.
If you are creating many models in a loop, this global state will consume
an increasing amount of memory over time, and you may want to clear it.
Calling `clear_session()` releases the global state: this helps avoid
clutter from old models and layers, especially when memory is limited.
Args:
free_memory: Whether to call Python garbage collection.
It's usually a good practice to call it to make sure
memory used by deleted objects is immediately freed.
However, it may take a few seconds to execute, so
when using `clear_session()` in a short loop,
you may want to skip it.
Example 1: calling `clear_session()` when creating models in a loop
```python
for _ in range(100):
# Without `clear_session()`, each iteration of this loop will
# slightly increase the size of the global state managed by Keras
model = keras.Sequential([
keras.layers.Dense(10) for _ in range(10)])
for _ in range(100):
# With `clear_session()` called at the beginning,
# Keras starts with a blank state at each iteration
# and memory consumption is constant over time.
keras.backend.clear_session()
model = keras.Sequential([
keras.layers.Dense(10) for _ in range(10)])
```
Example 2: resetting the layer name generation counter
>>> layers = [keras.layers.Dense(10) for _ in range(10)]
>>> new_layer = keras.layers.Dense(10)
>>> print(new_layer.name)
dense_10
>>> keras.backend.clear_session()
>>> new_layer = keras.layers.Dense(10)
>>> print(new_layer.name)
dense
"""
global GLOBAL_STATE_TRACKER
global GLOBAL_SETTINGS_TRACKER
GLOBAL_STATE_TRACKER = threading.local()
GLOBAL_SETTINGS_TRACKER = threading.local()
if backend.backend() == "tensorflow":
from keras.src.utils.module_utils import tensorflow as tf
tf.compat.v1.reset_default_graph()
if tf.executing_eagerly():
# Clear pending nodes in eager executors, kernel caches and
# step_containers.
from tensorflow.python.eager import context
context.context().clear_kernel_cache()
elif backend.backend() == "torch":
import torch._dynamo as dynamo
# reset's torchdynamo's cache so that cached guards, compiled fn, etc
# do not persist between clear_session() calls
dynamo.reset()
if free_memory:
# Manually trigger garbage collection.
gc.collect()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/dtypes_test.py | keras/src/backend/common/dtypes_test.py | from unittest.mock import patch
import pytest
from absl.testing import parameterized
from keras.src import backend
from keras.src import ops
from keras.src.backend.common import dtypes
from keras.src.testing import test_case
from keras.src.testing.test_utils import named_product
class DtypesTest(test_case.TestCase):
"""Test the dtype to verify that the behavior matches JAX."""
ALL_DTYPES = [
x
for x in dtypes.ALLOWED_DTYPES
if x
not in (
"string",
"complex128",
"float64",
"uint64",
"int64",
)
+ dtypes.FLOAT8_TYPES # Remove float8 dtypes for the following tests
] + [None]
if backend.backend() == "torch":
ALL_DTYPES = [x for x in ALL_DTYPES if x not in ("uint16", "uint32")]
elif backend.backend() == "tensorflow":
# TODO(hongyu): Re-enable uint32 tests once we determine how to handle
# dtypes.result_type(uint32, int*) -> int64 promotion.
# Since TF variables require int64 to be placed on the GPU, we
# exclusively enable the int64 dtype for TF. However, JAX does not
# natively support int64, which prevents us from comparing the dtypes.
ALL_DTYPES = [x for x in ALL_DTYPES if x not in ("uint32",)]
elif backend.backend() == "openvino":
ALL_DTYPES = [x for x in ALL_DTYPES if x not in ("complex64",)]
@parameterized.named_parameters(
named_product(dtype1=ALL_DTYPES, dtype2=[bool, int, float])
)
def test_result_type_with_python_scalar_types(self, dtype1, dtype2):
import jax.numpy as jnp
out = backend.result_type(dtype1, dtype2)
expected = jnp.result_type(dtype1, dtype2).name
self.assertEqual(out, expected)
@parameterized.named_parameters(
named_product(dtype1=ALL_DTYPES, dtype2=ALL_DTYPES)
)
def test_result_type_with_tensor(self, dtype1, dtype2):
import jax.numpy as jnp
x1 = ops.ones((1,), dtype=dtype1)
x2 = ops.ones((1,), dtype=dtype2)
x1_jax = jnp.ones((1,), dtype=dtype1)
x2_jax = jnp.ones((1,), dtype=dtype2)
out = backend.result_type(x1.dtype, x2.dtype)
expected = jnp.result_type(x1_jax, x2_jax).name
self.assertEqual(out, expected)
@parameterized.named_parameters(
named_product(
dtype=[
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
]
)
)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="TensorFlow only"
)
def test_result_type_with_int64(self, dtype):
# https://github.com/keras-team/keras/issues/21677
x1 = ops.ones((1,), dtype="int64")
x2 = ops.ones((1,), dtype=dtype)
out = backend.result_type(x1.dtype, x2.dtype)
self.assertEqual(out, "int64")
@parameterized.named_parameters(
named_product(
dtype=[
"float16",
"bfloat16",
"float32",
"float64",
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
]
)
)
@pytest.mark.skipif(
backend.backend() != "tensorflow", reason="TensorFlow only"
)
def test_result_type_with_float64(self, dtype):
# Float types have a similar issue as int64 in TF.:
# https://github.com/keras-team/keras/issues/21677
x1 = ops.ones((1,), dtype="float64")
x2 = ops.ones((1,), dtype=dtype)
out = backend.result_type(x1.dtype, x2.dtype)
self.assertEqual(out, "float64")
def test_result_type_with_none(self):
import jax.numpy as jnp
self.assertEqual(backend.result_type(None), jnp.result_type(None).name)
def test_result_type_empty_list(self):
self.assertEqual(backend.result_type(), "float32")
def test_respect_weak_type_for_bool(self):
self.assertEqual(dtypes._respect_weak_type("bool", True), "bool")
def test_respect_weak_type_for_int(self):
self.assertEqual(dtypes._respect_weak_type("int32", True), "int")
def test_respect_weak_type_for_float(self):
self.assertEqual(dtypes._respect_weak_type("float32", True), "float")
def test_resolve_weak_type_for_bfloat16(self):
self.assertEqual(dtypes._resolve_weak_type("bfloat16"), "float32")
def test_resolve_weak_type_for_bfloat16_with_precision(self):
self.assertEqual(
dtypes._resolve_weak_type("bfloat16", precision="64"), "float64"
)
def test_respect_weak_type_for_complex64(self):
self.assertAllEqual(
dtypes._respect_weak_type("complex64", True), "complex"
)
def test_respect_weak_type_for_complex128(self):
self.assertAllEqual(
dtypes._respect_weak_type("complex128", True), "complex"
)
def test_invalid_dtype_for_keras_promotion(self):
with self.assertRaisesRegex(
ValueError, "is not a valid dtype for Keras type promotion."
):
dtypes._least_upper_bound("invalid_dtype")
def test_resolve_weak_type_for_invalid_dtype(self):
with self.assertRaisesRegex(
ValueError, "Invalid value for argument `dtype`. Expected one of"
):
dtypes._resolve_weak_type("invalid_dtype")
def test_resolve_weak_type_for_invalid_precision(self):
with self.assertRaisesRegex(
ValueError,
"Invalid value for argument `precision`. Expected one of",
):
dtypes._resolve_weak_type("int32", precision="invalid_precision")
def test_cycle_detection_in_make_lattice_upper_bounds(self):
original_lattice_function = dtypes._type_promotion_lattice
def mock_lattice():
lattice = original_lattice_function()
lattice["int32"].append("float32")
lattice["float32"].append("int32")
return lattice
dtypes._type_promotion_lattice = mock_lattice
with self.assertRaisesRegex(
ValueError, "cycle detected in type promotion lattice for node"
):
dtypes._make_lattice_upper_bounds()
dtypes._type_promotion_lattice = original_lattice_function
def test_respect_weak_type_for_invalid_dtype(self):
with self.assertRaisesRegex(
ValueError, "Invalid value for argument `dtype`. Expected one of"
):
dtypes._respect_weak_type("invalid_dtype", True)
def test_invalid_dtype_in_least_upper_bound(self):
invalid_dtype = "non_existent_dtype"
with self.assertRaisesRegex(
ValueError, "is not a valid dtype for Keras type promotion"
):
dtypes._least_upper_bound(invalid_dtype)
def test_empty_lub_in_least_upper_bound(self):
dtype1 = "float32"
dtype2 = "int32"
with patch.dict(
dtypes.LATTICE_UPPER_BOUNDS,
{"float32": set(), "int32": set()},
clear=True,
):
with self.assertRaisesRegex(
ValueError, "no available implicit dtype promotion path"
):
dtypes._least_upper_bound(dtype1, dtype2)
def test_valid_dtype_leading_to_single_lub_element(self):
self.assertEqual(
dtypes._least_upper_bound("float32", "int32"), "float32"
)
def test_valid_dtype_leading_to_keyerror_and_valueerror(self):
invalid_dtype = "non_existent_dtype"
with self.assertRaisesRegex(
ValueError, "is not a valid dtype for Keras type promotion"
):
dtypes._least_upper_bound(invalid_dtype)
def test_resolve_weak_type_bool(self):
self.assertEqual(dtypes._resolve_weak_type("bool"), "bool")
def test_resolve_weak_type_int(self):
self.assertEqual(
dtypes._resolve_weak_type("int32", precision="32"), "int32"
)
self.assertEqual(
dtypes._resolve_weak_type("int64", precision="64"), "int64"
)
def test_resolve_weak_type_uint(self):
self.assertEqual(
dtypes._resolve_weak_type("uint32", precision="32"), "uint32"
)
self.assertEqual(
dtypes._resolve_weak_type("uint64", precision="64"), "uint64"
)
def test_resolve_weak_type_float(self):
self.assertEqual(
dtypes._resolve_weak_type("float32", precision="32"), "float32"
)
self.assertEqual(
dtypes._resolve_weak_type("float64", precision="64"), "float64"
)
def test_least_upper_bound_ensure_order_independence(self):
# Test to ensure _least_upper_bound is order-independent.
result1 = dtypes._least_upper_bound("float32", "int32")
result2 = dtypes._least_upper_bound("int32", "float32")
self.assertEqual(result1, result2)
def test_least_upper_bound_single_element(self):
dtypes.LATTICE_UPPER_BOUNDS["test_dtype"] = {"test_dtype"}
self.assertEqual(dtypes._least_upper_bound("test_dtype"), "test_dtype")
def test_least_upper_bound_no_element(self):
dtypes.LATTICE_UPPER_BOUNDS["test_dtype"] = set()
with self.assertRaisesRegex(
ValueError, "no available implicit dtype promotion path"
):
dtypes._least_upper_bound("test_dtype")
def test_least_upper_bound_with_no_common_upper_bound(self):
with patch.dict(
dtypes.LATTICE_UPPER_BOUNDS,
{"test_dtype1": set(), "test_dtype2": set()},
clear=True,
):
with self.assertRaisesRegex(
ValueError, "no available implicit dtype promotion path"
):
dtypes._least_upper_bound("test_dtype1", "test_dtype2")
def test_invalid_float8_dtype(self):
with self.assertRaisesRegex(
ValueError, "There is no implicit conversions from float8 dtypes"
):
dtypes.result_type("float8_e4m3fn", "bfloat16")
with self.assertRaisesRegex(
ValueError, "There is no implicit conversions from float8 dtypes"
):
dtypes.result_type("float8_e5m2", "bfloat16")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/name_scope.py | keras/src/backend/common/name_scope.py | from keras.src.backend.common import global_state
class name_scope:
"""Creates a sub-namespace for variable paths.
Args:
name: Name of the current scope (string).
caller: Optional ID of a caller object (e.g. class instance).
deduplicate: If `True`, if `caller` was passed,
and the previous caller matches the current caller,
and the previous name matches the current name,
do not reenter a new namespace.
override_parent: Can be used to provide an absolute path
which would override any previously opened name scopes.
"""
def __init__(
self, name, caller=None, deduplicate=True, override_parent=None
):
if not isinstance(name, str) or "/" in name:
raise ValueError(
"Argument `name` must be a string and "
"cannot contain character `/`. "
f"Received: name={name}"
)
self.name = name
self.caller = caller
self.deduplicate = deduplicate
self.override_parent = override_parent
if (
override_parent is None
and deduplicate
and getattr(caller, "_parent_path", None) is not None
):
self.override_parent = caller._parent_path
self._pop_on_exit = False
def __enter__(self):
name_scope_stack = global_state.get_global_attribute(
"name_scope_stack", default=[], set_to_default=True
)
if self.deduplicate and name_scope_stack:
parent_caller = name_scope_stack[-1].caller
parent_name = name_scope_stack[-1].name
if (
self.caller is not None
and self.caller is parent_caller
and self.name == parent_name
):
return self
name_scope_stack.append(self)
self._pop_on_exit = True
return self
def __exit__(self, *args, **kwargs):
if self._pop_on_exit:
name_scope_stack = global_state.get_global_attribute(
"name_scope_stack"
)
if name_scope_stack:
name_scope_stack.pop()
def current_path():
name_scope_stack = global_state.get_global_attribute("name_scope_stack")
if name_scope_stack is None:
return ""
parts = []
for entry in name_scope_stack:
if entry.override_parent is not None:
parts = [p for p in entry.override_parent.split("/") if p]
parts.append(entry.name)
return "/".join(parts)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/name_scope_test.py | keras/src/backend/common/name_scope_test.py | import threading
from keras.src import testing
from keras.src.backend.common import global_state
from keras.src.backend.common.name_scope import current_path
from keras.src.backend.common.name_scope import name_scope
class NameScopeTest(testing.TestCase):
def test_stacking(self):
self.assertEqual(current_path(), "")
with name_scope("outer") as outer:
self.assertEqual(outer.name, "outer")
self.assertEqual(current_path(), "outer")
with name_scope("middle") as middle:
self.assertEqual(middle.name, "middle")
self.assertEqual(current_path(), "outer/middle")
with name_scope("inner") as inner:
self.assertEqual(inner.name, "inner")
self.assertEqual(current_path(), "outer/middle/inner")
self.assertEqual(current_path(), "outer/middle")
self.assertEqual(current_path(), "outer")
self.assertEqual(current_path(), "")
def test_deduplication(self):
self.assertEqual(current_path(), "")
with name_scope("name", caller=1):
with name_scope("name", caller=1):
self.assertEqual(current_path(), "name")
self.assertEqual(current_path(), "")
with name_scope("name"):
with name_scope("name"):
self.assertEqual(current_path(), "name/name")
def test_errors(self):
with self.assertRaisesRegex(ValueError, "must be a string"):
name_scope("foo/bar")
with self.assertRaisesRegex(ValueError, "must be a string"):
name_scope(4)
def test_override_parent(self):
self.assertEqual(current_path(), "")
with name_scope("outer"):
self.assertEqual(current_path(), "outer")
with name_scope("middle", override_parent="/absolute/path"):
self.assertEqual(current_path(), "absolute/path/middle")
with name_scope("inner"):
self.assertEqual(
current_path(), "absolute/path/middle/inner"
)
self.assertEqual(current_path(), "outer")
def test_exit_with_none_stack(self):
"""Test that __exit__ handles None name_scope_stack gracefully."""
# Create a name_scope instance
scope = name_scope("test")
# Enter the scope normally
scope.__enter__()
# Simulate the scenario where global state is cleared
# (e.g., in a different thread)
global_state.set_global_attribute("name_scope_stack", None)
# Exit should not raise an AttributeError
scope.__exit__()
# Clean up: reset the stack
global_state.set_global_attribute("name_scope_stack", [])
def test_exit_with_empty_stack(self):
"""Test that __exit__ handles empty name_scope_stack gracefully."""
# Create a name_scope instance
scope = name_scope("test")
# Enter the scope normally
scope.__enter__()
# Simulate the scenario where the stack is cleared
name_scope_stack = global_state.get_global_attribute("name_scope_stack")
name_scope_stack.clear()
# Exit should not raise an IndexError
scope.__exit__()
# Verify stack is still empty
name_scope_stack = global_state.get_global_attribute(
"name_scope_stack", default=[]
)
self.assertEqual(len(name_scope_stack), 0)
def test_multithreaded_name_scope(self):
"""Test name_scope in multithreaded environment."""
results = []
def thread_function(thread_id):
# Each thread should have its own name_scope_stack
with name_scope(f"thread_{thread_id}"):
path = current_path()
results.append(path)
# Verify we get the expected path
self.assertEqual(path, f"thread_{thread_id}")
# Create and start multiple threads
threads = []
for i in range(5):
thread = threading.Thread(target=thread_function, args=(i,))
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
# Verify all threads executed successfully
self.assertEqual(len(results), 5)
def test_exit_without_pop_on_exit(self):
"""Test that __exit__ respects _pop_on_exit flag."""
# Create a name_scope but don't enter it
scope = name_scope("test")
# _pop_on_exit should be False
self.assertFalse(scope._pop_on_exit)
# Set up a stack manually
global_state.set_global_attribute("name_scope_stack", [scope])
scope.__exit__()
# Verify the stack still contains the scope
name_scope_stack = global_state.get_global_attribute("name_scope_stack")
self.assertEqual(len(name_scope_stack), 1)
# Clean up
global_state.set_global_attribute("name_scope_stack", [])
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/variables.py | keras/src/backend/common/variables.py | import numpy as np
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.backend import config
from keras.src.backend.common import dtypes
from keras.src.backend.common import global_state
from keras.src.backend.common.name_scope import current_path
from keras.src.backend.common.stateless_scope import get_stateless_scope
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.utils.module_utils import tensorflow as tf
from keras.src.utils.naming import auto_name
class Variable:
"""Represents a backend-agnostic variable in Keras.
A `Variable` acts as a container for state. It holds a tensor value and can
be updated. With the JAX backend, variables are used to implement
"functionalization", the pattern of lifting stateful operations out of
a piece of computation to turn it into a stateless function.
Args:
initializer: Initial value or callable for initialization.
If a callable is used, it should take the arguments
`shape` and `dtype`.
shape: Optional. Tuple for the variable's shape.
Required if `initializer` is a callable.
dtype: Optional. Data type of the variable. Defaults to the global float
dtype type (`"float32"` if never configured).
trainable: Optional. Boolean indicating if variable is trainable.
Defaults to `True`.
autocast: Optional. Boolean indicating whether the variable supports
autocasting. If `True`, the layer may first convert the variable
to the compute data type when accessed. Defaults to `True`.
aggregation: Optional string, one of `None`, `"none"`, `"mean"`,
`"sum"` or `"only_first_replica"` specifying how a distributed
variable will be aggregated. This serves as a semantic annotation,
to be taken into account by downstream backends or users. Defaults
to `"none"`.
name: Optional. A unique name for the variable. Automatically generated
if not set.
Attributes:
shape: The shape of the variable (tuple of integers).
ndim: The number of dimensions of the variable (integer).
dtype: The data type of the variable (string).
trainable: Whether the variable is trainable (boolean).
autocast: Whether the variable supports autocasting (boolean).
aggregation: How a distributed variable will be aggregated (string).
value: The current value of the variable (NumPy array or tensor).
name: The name of the variable (string).
path: The path of the variable within the Keras model or layer (string).
kwargs: Additional backend-specific keyword arguments.
Examples:
**Initializing a `Variable` with a NumPy array:**
```python
import numpy as np
import keras
initial_array = np.ones((3, 3))
variable_from_array = keras.Variable(initializer=initial_array)
```
**Using a Keras initializer to create a `Variable`:**
```python
from keras.src.initializers import Ones
variable_from_initializer = keras.Variable(
initializer=Ones(), shape=(3, 3), dtype="float32"
)
```
**Updating the value of a `Variable`:**
```python
new_value = np.zeros((3, 3), dtype="float32")
variable_from_array.assign(new_value)
```
**Marking a `Variable` as non-trainable:**
```python
non_trainable_variable = keras.Variable(
initializer=np.ones((3, 3), dtype="float32"), trainable=False
)
```
"""
def __init__(
self,
initializer,
shape=None,
dtype=None,
trainable=True,
autocast=True,
aggregation="none",
synchronization="auto",
name=None,
**kwargs,
):
del kwargs
name = name or auto_name(self.__class__.__name__)
if not isinstance(name, str) or "/" in name:
raise ValueError(
"Argument `name` must be a string and "
"cannot contain character `/`. "
f"Received: name={name}"
)
if aggregation not in (
None,
"none",
"mean",
"sum",
"only_first_replica",
):
raise ValueError(
"Invalid value for argument `aggregation`. Expected "
"one of `None`, `'none'`, `'mean'`, `'sum'`, "
"`'only_first_replica'`. "
f"Received: aggregation={aggregation}"
)
if aggregation is None:
aggregation = "none"
if synchronization not in (
None,
"none",
"on_read",
"on_write",
"auto",
):
raise ValueError(
"Invalid value for argument `synchronization`. Expected "
"one of `None`, `'none'`, `'on_read'`, `'on_write'`, "
"`'auto'`. "
f"Received: synchronization={synchronization}"
)
if synchronization is None:
synchronization = "none"
self._name = name
parent_path = current_path()
if parent_path:
self._path = f"{parent_path}/{name}"
else:
self._path = name
self._shape = None
self._initializer = None
self._regularizer = None
self._constraint = None
self._trainable = bool(trainable)
self._autocast = bool(autocast)
self._aggregation = aggregation
self._synchronization = synchronization
# `self._overwrite_with_gradient` is an internal property to determine
# whether this variable should be overwritten by the computed gradient.
# Ref: https://github.com/google/flax/blob/main/flax/linen/fp8_ops.py
self._overwrite_with_gradient = False
if isinstance(initializer, str):
from keras.src import initializers
initializer = initializers.get(initializer)
if callable(initializer):
if shape is None:
raise ValueError(
"When creating a Variable from an initializer, "
"the `shape` argument should be specified. "
f"Received: initializer={initializer} "
f"and shape={shape}"
)
else:
initializer = self._convert_to_tensor(initializer, dtype=dtype)
# If dtype is None and `initializer` is an array, use its dtype.
if dtype is None:
dtype = initializer.dtype
self._dtype = standardize_dtype(dtype)
if in_stateless_scope():
if callable(initializer):
self._value = None
self._initializer = initializer
self._shape = self._validate_shape(shape)
register_uninitialized_variable(self)
else:
raise ValueError(
"You are attempting to create a variable "
"while in a stateless scope. This is disallowed. "
"Make sure that all variables are created "
"before you start using your layer/model objects.\n\n"
"In some cases, you might be seeing this error "
"because you need to "
"implement a `def build(self, input_shape)` method "
"on your layer/model, which will "
"create its variables.\n\n"
"In some other cases, you might be seeing this error "
"because you are instantiating a `Variable` and "
"assigning it to a layer without going through "
"self.add_variable()/self.add_weight(). Always prefer "
"using these methods "
"(with a `shape` and `initializer` argument)."
)
else:
if callable(initializer):
self._shape = self._validate_shape(shape)
self._initialize_with_initializer(initializer)
else:
self._initialize(initializer)
self._shape = self._validate_shape(self._value.shape)
self._ndim = len(self._shape)
def _deferred_initialize(self):
if self._value is not None:
# If NNX is enabled, it's possible the variable was already
# initialized by a concrete call. In this case, _deferred_initialize
# returns early and does not raise an error.
if config.is_nnx_enabled():
return
raise ValueError(f"Variable {self.path} is already initialized.")
if in_stateless_scope():
raise ValueError(
"You are attempting to initialize a variable "
"while in a stateless scope. This is disallowed. "
"Make sure that all variables are initialized "
"before you start using your layer/model objects."
)
self._initialize_with_initializer(self._initializer)
self._initializer = None
def _validate_shape(self, shape):
shape = standardize_shape(shape)
if None in shape:
raise ValueError(
"Shapes used to initialize variables must be "
"fully-defined (no `None` dimensions). Received: "
f"shape={shape} for variable path='{self.path}'"
)
return shape
def _maybe_autocast(self, value):
autocast_scope = get_autocast_scope()
if self._autocast and autocast_scope is not None:
return autocast_scope.maybe_cast(value)
return value
def numpy(self):
return np.array(self)
@property
def aggregation(self):
"""The strategy for aggregating this variable."""
return self._aggregation
@property
def synchronization(self):
"""The strategy for synchronizing this variable."""
return self._synchronization
@property
def value(self):
"""The current value of the variable (numpy array or backend tensor)."""
if in_stateless_scope():
scope = get_stateless_scope()
value = scope.get_current_value(self)
if value is not None:
return self._maybe_autocast(value)
if self._value is None:
# Uninitialized variable. Return a placeholder.
# This is fine because it's only ever used
# in during shape inference / graph tracing
# (anything else would be a bug, to be fixed.)
return self._maybe_autocast(
self._initializer(self._shape, dtype=self._dtype)
)
return self._maybe_autocast(self._value)
def assign(self, value):
value = self._convert_to_tensor(value, dtype=self._dtype)
if not shape_equal(value.shape, self.shape):
raise ValueError(
"The shape of the target variable and "
"the shape of the target value in "
"`variable.assign(value)` must match. "
f"variable.shape={self.shape}, "
f"Received: value.shape={value.shape}. "
f"Target variable: {self}"
)
if in_stateless_scope():
scope = get_stateless_scope()
scope.add_update((self, value))
else:
self._direct_assign(value)
return value
def assign_add(self, value):
return self.assign(self + value)
def assign_sub(self, value):
return self.assign(self - value)
@property
def dtype(self):
"""The data type of the variable."""
autocast_scope = get_autocast_scope()
if (
self._autocast
and autocast_scope is not None
and is_float_dtype(self._dtype)
):
dtype = autocast_scope.dtype
else:
dtype = self._dtype
return backend.standardize_dtype(dtype)
@property
def shape(self):
"""The shape of the variable."""
return self._shape
@property
def ndim(self):
"""The number of dimensions of the variable."""
return self._ndim
@property
def trainable(self):
"""Whether the variable is trainable."""
return self._trainable
@trainable.setter
def trainable(self, value):
self._trainable = bool(value)
@property
def name(self):
"""The name of the variable."""
return self._name
@property
def path(self):
"""The path of the variable within the Keras model or layer."""
return self._path
@property
def overwrite_with_gradient(self):
"""Whether this variable should be overwritten by the gradient.
This property is designed for a special case where we want to overwrite
the variable directly with its computed gradient. For example, in float8
training, new `scale` and `amax_history` are computed as gradients, and
we want to overwrite them directly instead of following the typical
procedure such as gradient descent with a learning rate, gradient
clipping and weight decaying.
"""
return self._overwrite_with_gradient
@overwrite_with_gradient.setter
def overwrite_with_gradient(self, value):
if not isinstance(value, bool):
raise TypeError(
"`overwrite_with_gradient` must be a boolean. "
f"Received: {value}"
)
self._overwrite_with_gradient = value
@property
def regularizer(self):
return self._regularizer
@regularizer.setter
def regularizer(self, value):
from keras.src.regularizers import Regularizer
if value is not None and not isinstance(value, Regularizer):
raise ValueError(
"Invalid value for attribute `regularizer`. Expected an "
"instance of `keras.regularizers.Regularizer`, or `None`. "
f"Received: regularizer={value}"
)
self._regularizer = value
@property
def constraint(self):
return self._constraint
@constraint.setter
def constraint(self, value):
from keras.src.constraints import Constraint
if value is not None and not isinstance(value, Constraint):
raise ValueError(
"Invalid value for attribute `constraint`. Expected an "
"instance of `keras.constraints.Constraint`, or `None`. "
f"Received: constraint={value}"
)
self._constraint = value
def __repr__(self):
value = None
if hasattr(self, "_value") and self._value is not None:
try:
value = backend.core.convert_to_numpy(self._value)
except:
# In some cases the conversion to numpy can fail.
pass
value_str = f", value={value}" if value is not None else ""
return (
f"<Variable path={self.path}, shape={self.shape}, "
f"dtype={self.dtype}{value_str}>"
)
def _initialize(self, value):
raise NotImplementedError
def _initialize_with_initializer(self, initializer):
value = self._convert_to_tensor(
initializer(self._shape, dtype=self._dtype)
)
self._initialize(value)
def _convert_to_tensor(self, value, dtype=None):
raise NotImplementedError
def __getitem__(self, idx):
return self.value.__getitem__(idx)
def __int__(self):
if self.ndim > 0:
raise TypeError(
"Only scalar arrays can be converted to Python scalars. "
f"Got: shape={self.shape}"
)
return int(self.value)
def __float__(self):
if self.ndim > 0:
raise TypeError(
"Only scalar arrays can be converted to Python scalars. "
f"Got: shape={self.shape}"
)
return float(self.value)
def __array__(self, dtype=None):
# We can't directly use self.value.__array__ here because of scalar.
# Numpy require this method to return as array like object. In the case
# of scalar, it will fail the type checking from numpy. We need to
# return a 0d array via numpy.
return np.asarray(self.value.__array__(dtype))
def __bool__(self):
raise TypeError("A Keras Variable cannot be used as a boolean.")
def __neg__(self):
return self.value.__neg__()
def __pos__(self):
return self.value
def __abs__(self):
return self.value.__abs__()
def __invert__(self):
return self.value.__invert__()
def __eq__(self, other):
return backend.numpy.equal(self.value, other)
def __ne__(self, other):
return backend.numpy.not_equal(self.value, other)
def __lt__(self, other):
return backend.numpy.less(self.value, other)
def __le__(self, other):
return backend.numpy.less_equal(self.value, other)
def __gt__(self, other):
return backend.numpy.greater(self.value, other)
def __ge__(self, other):
return backend.numpy.greater_equal(self.value, other)
def __add__(self, other):
return backend.numpy.add(self.value, other)
def __radd__(self, other):
return backend.numpy.add(other, self.value)
def __sub__(self, other):
return backend.numpy.subtract(self.value, other)
def __rsub__(self, other):
return backend.numpy.subtract(other, self.value)
def __mul__(self, other):
return backend.numpy.multiply(self.value, other)
def __rmul__(self, other):
return backend.numpy.multiply(other, self.value)
def __truediv__(self, other):
return backend.numpy.true_divide(self.value, other)
def __rtruediv__(self, other):
return backend.numpy.true_divide(other, self.value)
def __floordiv__(self, other):
return backend.numpy.floor_divide(self.value, other)
def __rfloordiv__(self, other):
return backend.numpy.floor_divide(other, self.value)
def __mod__(self, other):
return backend.numpy.mod(self.value, other)
def __rmod__(self, other):
return backend.numpy.mod(other, self.value)
def __pow__(self, other):
return backend.numpy.power(self.value, other)
def __rpow__(self, other):
return backend.numpy.power(other, self.value)
def __matmul__(self, other):
return backend.numpy.matmul(self.value, other)
def __rmatmul__(self, other):
return backend.numpy.matmul(other, self.value)
def __and__(self, other):
return backend.numpy.logical_and(self.value, other)
def __rand__(self, other):
return backend.numpy.logical_and(other, self.value)
def __or__(self, other):
return backend.numpy.logical_or(self.value, other)
def __ror__(self, other):
return backend.numpy.logical_or(other, self.value)
def __xor__(self, other):
return backend.numpy.logical_xor(self.value, other)
def __rxor__(self, other):
return backend.numpy.logical_xor(other, self.value)
def __round__(self, ndigits=None):
decimals = ndigits or 0
return backend.numpy.round(self.value, decimals=decimals)
def register_uninitialized_variable(variable):
uninitialized_variables = global_state.get_global_attribute(
"uninitialized_variables", [], set_to_default=True
)
uninitialized_variables.append(variable)
def initialize_all_variables():
collection = global_state.get_global_attribute("uninitialized_variables")
if collection:
for v in collection:
v._deferred_initialize()
global_state.set_global_attribute("uninitialized_variables", [])
@keras_export(
["keras.utils.standardize_dtype", "keras.backend.standardize_dtype"]
)
def standardize_dtype(dtype):
if dtype is None:
return config.floatx()
dtype = dtypes.PYTHON_DTYPES_MAP.get(dtype, dtype)
if hasattr(dtype, "name"):
dtype = dtype.name
elif hasattr(dtype, "__name__"):
dtype = dtype.__name__
elif hasattr(dtype, "__str__") and (
"torch" in str(dtype) or "jax.numpy" in str(dtype)
):
dtype = str(dtype).split(".")[-1]
if dtype not in dtypes.ALLOWED_DTYPES:
raise ValueError(f"Invalid dtype: {dtype}")
return dtype
def standardize_shape(shape):
if not isinstance(shape, tuple):
if shape is None:
raise ValueError("Undefined shapes are not supported.")
if not hasattr(shape, "__iter__"):
raise ValueError(f"Cannot convert '{shape}' to a shape.")
if config.backend() == "tensorflow":
if isinstance(shape, tf.TensorShape):
# `tf.TensorShape` may contain `Dimension` objects.
# We need to convert the items in it to either int or `None`
shape = shape.as_list()
if config.backend() == "jax":
# Replace `_DimExpr` (dimension expression) with None
from jax import export as jax_export
shape = tuple(
None if jax_export.is_symbolic_dim(d) else d for d in shape
)
# Handle dimensions that are not ints and not None, verify they're >= 0.
standardized_shape = []
for d in shape:
if d is None:
standardized_shape.append(d)
continue
# Reject these even if they can be cast to int successfully.
if isinstance(d, (str, float)):
raise ValueError(
f"Cannot convert '{shape}' to a shape. "
f"Found invalid dimension '{d}' of type '{type(d)}'. "
)
try:
# Cast numpy scalars, tf constant tensors, etc.
d = int(d)
except Exception as e:
raise ValueError(
f"Cannot convert '{shape}' to a shape. "
f"Found invalid dimension '{d}' of type '{type(d)}'. "
) from e
if d < 0:
raise ValueError(
f"Cannot convert '{shape}' to a shape. "
"Negative dimensions are not allowed."
)
standardized_shape.append(d)
# This also turns subclasses of `tuple` (e.g. `torch.Size`) to plain tuple.
return tuple(standardized_shape)
def shape_equal(a_shape, b_shape):
"""Return whether a_shape == b_shape (allows None entries)."""
if len(a_shape) != len(b_shape):
return False
for e1, e2 in zip(a_shape, b_shape):
if e1 is not None and e2 is not None and e1 != e2:
return False
return True
@keras_export("keras.backend.is_float_dtype")
def is_float_dtype(dtype):
dtype = standardize_dtype(dtype)
return dtype.startswith("float") or dtype.startswith("bfloat")
@keras_export("keras.backend.is_int_dtype")
def is_int_dtype(dtype):
dtype = standardize_dtype(dtype)
return dtype.startswith("int") or dtype.startswith("uint")
def get_autocast_scope():
return global_state.get_global_attribute("autocast_scope")
class AutocastScope:
"""Context manager that enables the autocasting of float variables.
Under this context manager, float `Variables`s will be cast to `dtype`
(note that `dtype` must also be float).
"""
def __init__(self, dtype):
if dtype is not None:
dtype = standardize_dtype(dtype)
if not is_float_dtype(dtype):
raise ValueError(
"`AutocastScope` can only be used with "
"a floating-point target dtype, such as 'float16'. "
f"Received: dtype={dtype}"
)
self.dtype = dtype
self.original_scope = None
def maybe_cast(self, value):
from keras.src import backend
if self.dtype is not None and is_float_dtype(value.dtype):
return backend.cast(value, dtype=self.dtype)
return value
def __enter__(self):
self.original_scope = get_autocast_scope()
global_state.set_global_attribute("autocast_scope", self)
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute("autocast_scope", self.original_scope)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/__init__.py | keras/src/backend/common/__init__.py | from keras.src.backend.common import backend_utils
from keras.src.backend.common.dtypes import result_type
from keras.src.backend.common.variables import AutocastScope
from keras.src.backend.common.variables import Variable as KerasVariable
from keras.src.backend.common.variables import get_autocast_scope
from keras.src.backend.common.variables import is_float_dtype
from keras.src.backend.common.variables import is_int_dtype
from keras.src.backend.common.variables import standardize_dtype
from keras.src.backend.common.variables import standardize_shape
from keras.src.random import random
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/dtypes.py | keras/src/backend/common/dtypes.py | import functools
from keras.src.api_export import keras_export
from keras.src.backend import config
from keras.src.backend.common.variables import standardize_dtype
BOOL_TYPES = ("bool",)
INT_TYPES = (
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
)
FLOAT_TYPES = ("bfloat16", "float16", "float32", "float64")
WEAK_TYPES = ("int", "float")
COMPLEX_TYPES = ("complex64", "complex128")
# We need to separate float8 from float because there are no implicit
# conversions from float8 dtypes to other dtypes.
# Ref: https://github.com/google/jax/issues/16705
FLOAT8_TYPES = ("float8_e4m3fn", "float8_e5m2")
# All supported dtypes in Keras
ALLOWED_DTYPES = (
"float16",
"float32",
"float64",
"uint8",
"uint16",
"uint32",
"uint64",
"int8",
"int16",
"int32",
"int64",
"bfloat16",
"bool",
"string",
"float8_e4m3fn",
"float8_e5m2",
"complex64",
"complex128",
)
PYTHON_DTYPES_MAP = {
bool: "bool",
int: "int64" if config.backend() == "tensorflow" else "int32",
float: "float32",
str: "string",
# special case for string value
"int": "int64" if config.backend() == "tensorflow" else "int32",
complex: "complex128" if config.backend() == "tensorflow" else "complex64",
}
# We adapted the type promotion lattice from JAX. Ref:
# https://github.com/google/jax/blob/main/jax/_src/dtypes.py
def _type_promotion_lattice():
"""
Return the type promotion lattice in the form of a DAG.
This DAG maps each type to its immediately higher type on the lattice.
"""
(b1,) = BOOL_TYPES
(u1, u2, u4, u8, i1, i2, i4, i8) = INT_TYPES
bf, f2, f4, f8 = FLOAT_TYPES
i_, f_ = WEAK_TYPES
c64, c128 = COMPLEX_TYPES
out = {
b1: [i_],
u1: [i2, u2],
u2: [i4, u4],
u4: [i8, u8],
u8: [f_],
i_: [u1, i1, c64],
i1: [i2],
i2: [i4],
i4: [i8],
i8: [f_],
f_: [bf, f2],
bf: [f4],
f2: [f4],
f4: [f8, c64],
f8: [c128],
c64: [c128],
c128: [],
}
return out
def _make_lattice_upper_bounds():
lattice = _type_promotion_lattice()
upper_bounds = {node: {node} for node in lattice}
for n in lattice:
while True:
new_upper_bounds = set().union(
*(lattice[b] for b in upper_bounds[n])
)
if n in new_upper_bounds:
raise ValueError(
f"cycle detected in type promotion lattice for node {n}"
)
if new_upper_bounds.issubset(upper_bounds[n]):
break
upper_bounds[n] |= new_upper_bounds
return upper_bounds
LATTICE_UPPER_BOUNDS = _make_lattice_upper_bounds()
@functools.lru_cache(512)
def _least_upper_bound(*nodes):
"""Compute the least upper bound of a set of nodes.
Args:
nodes: sequence of entries from dtypes + weak_types
Returns:
The type representing the least upper bound of the input nodes on the
promotion lattice.
"""
# This function computes the least upper bound of a set of nodes N within a
# partially ordered set defined by the lattice generated above.
# Given a partially ordered set S, let the set of upper bounds of n ∈ S be
# UB(n) ≡ {m ∈ S | n ≤ m}
# Further, for a set of nodes N ⊆ S, let the set of common upper bounds be
# given by
# CUB(N) ≡ {a ∈ S | ∀ b ∈ N: a ∈ UB(b)}
# Then the least upper bound of N is defined as
# LUB(N) ≡ {c ∈ CUB(N) | ∀ d ∈ CUB(N), c ≤ d}
# The definition of an upper bound implies that
# c ≤ d if and only if d ∈ UB(c),
# so the LUB can be expressed:
# LUB(N) = {c ∈ CUB(N) | ∀ d ∈ CUB(N): d ∈ UB(c)}
# or, equivalently:
# LUB(N) = {c ∈ CUB(N) | CUB(N) ⊆ UB(c)}
# By definition, LUB(N) has a cardinality of 1 for a partially ordered set.
# Note a potential algorithmic shortcut: from the definition of CUB(N),
# we have
# ∀ c ∈ N: CUB(N) ⊆ UB(c)
# So if N ∩ CUB(N) is nonempty, if follows that LUB(N) = N ∩ CUB(N).
N = set(nodes)
UB = LATTICE_UPPER_BOUNDS
try:
bounds = [UB[n] for n in N]
except KeyError:
dtype = next(n for n in N if n not in UB)
raise ValueError(
f"{dtype=} is not a valid dtype for Keras type promotion."
)
CUB = set.intersection(*bounds)
LUB = (CUB & N) or {c for c in CUB if CUB.issubset(UB[c])}
if len(LUB) == 1:
return LUB.pop()
elif len(LUB) == 0:
msg = (
f"Input dtypes {tuple(str(n) for n in nodes)} have no available "
"implicit dtype promotion path. Try explicitly casting inputs to "
"the desired output type."
)
raise ValueError(msg)
else:
# If we get here, it means the lattice is ill-formed.
raise ValueError(
f"Internal Type Promotion error: {nodes} do not have a unique "
f"least upper bound on the specified lattice; options are {LUB}. "
"This is an unexpected error in Keras's internal logic; "
"please report it to the maintainers."
)
def _dtype_and_weaktype(value):
"""Return a (dtype, weak_type) tuple for the given input."""
is_weak_type = False
if value is int or value is float:
# Note that we can't use `value in [int, float]` because the dtype
# might be equal to python scalar types.
# e.g, tf.float32 == float is True
is_weak_type = True
return standardize_dtype(value), is_weak_type
@functools.lru_cache(maxsize=None)
def _respect_weak_type(dtype, weak_type):
"""Return the weak dtype of `dtype` if `weak_type==True`."""
if weak_type:
if dtype == "bool":
return dtype
elif "float" in dtype:
return "float"
elif "int" in dtype:
return "int"
elif "complex" in dtype:
return "complex"
else:
raise ValueError(
"Invalid value for argument `dtype`. Expected one of "
f"{ALLOWED_DTYPES}. Received: dtype={dtype}"
)
return dtype
@functools.lru_cache(maxsize=None)
def _resolve_weak_type(dtype, precision="32"):
"""Resolve weak type by the precision of `backend.floatx()`."""
extended_allowed_dtypes = set(ALLOWED_DTYPES).union(WEAK_TYPES)
if dtype not in extended_allowed_dtypes:
raise ValueError(
"Invalid value for argument `dtype`. Expected one of "
f"{extended_allowed_dtypes}. Received: dtype={dtype}"
)
if precision not in ["16", "32", "64"]:
raise ValueError(
f"Invalid value for argument `precision`. Expected one of "
f"('16', '32', '64'). Received: precision={precision}"
)
if dtype == "bfloat16": # special case for bfloat16
dtype_indicator = "f"
else:
dtype_indicator = dtype[:1]
if dtype_indicator == "b":
return "bool"
elif dtype_indicator == "i":
return f"int{precision}"
elif dtype_indicator == "u":
return f"uint{precision}"
else:
return f"float{precision}"
BIT64_TO_BIT32_DTYPE = {
# Since TF variables require int64 to be placed on the GPU, we exclusively
# enable the int64 dtype for TF.
"int64": "int64" if config.backend() == "tensorflow" else "int32",
"uint64": "uint32",
"float64": "float64" if config.backend() == "tensorflow" else "float32",
"complex128": "complex64",
}
def _lattice_result_type(*args):
dtypes, weak_types = zip(*(_dtype_and_weaktype(arg) for arg in args))
if len(dtypes) == 1:
out_dtype = dtypes[0]
out_weak_type = weak_types[0]
elif len(set(dtypes)) == 1 and not all(weak_types):
# Trivial promotion case. This allows extended dtypes through.
out_dtype = dtypes[0]
out_weak_type = False
elif all(weak_types):
# If all inputs are weakly typed, we compute the bound of the
# strongly-typed counterparts and apply the weak type at the end. This
# avoids returning the incorrect result with non-canonical weak types
# (e.g. weak int16).
out_dtype = _least_upper_bound(
*{_respect_weak_type(d, False) for d in dtypes}
)
out_weak_type = True
else:
out_dtype = _least_upper_bound(
*{_respect_weak_type(d, w) for d, w in zip(dtypes, weak_types)}
)
out_weak_type = any(out_dtype is t for t in WEAK_TYPES)
out_weak_type = (out_dtype != "bool") and out_weak_type
precision = config.floatx()[-2:]
if out_weak_type:
out_dtype = _resolve_weak_type(out_dtype, precision=precision)
# Force to be 32-bit dtype when encountering 64-bit dtype. This is to
# be aligned with JAX's default behavior.
out_dtype = BIT64_TO_BIT32_DTYPE.get(out_dtype, out_dtype)
return out_dtype
@keras_export("keras.backend.result_type")
def result_type(*dtypes):
"""Returns the type from applying the Keras type promotion rules.
In general, each argument is first parsed by `backend.standardize_dtype`,
and the resulting dtype is determined by the least upper bound of the type
promotion lattice.
Note: This function attempts to match the result of `jnp.result_type`.
Args:
dtypes: Input dtypes.
Returns:
The result dtype.
Examples:
>>> x = keras.ops.ones((1,), dtype="bfloat16")
>>> keras.backend.result_type(x.dtype, int)
"bfloat16"
>>> x = keras.ops.ones((1,), dtype="int32")
>>> y = keras.ops.ones((1,), dtype="float32")
>>> keras.backend.result_type(x.dtype, y.dtype)
"float32"
>>> z= keras.ops.ones((1,), dtype='complex64')
>>> keras.backend.result_type(z.dtype, int)
"float64"
"""
if len(dtypes) == 0:
# If no dtypes provided, default to floatx, this matches
# `ops.convert_to_tensor([])`
return config.floatx()
for dtype in dtypes:
if dtype in FLOAT8_TYPES:
raise ValueError(
"There is no implicit conversions from float8 dtypes to others."
f" You must cast it internally. Received: {dtypes}"
)
return _lattice_result_type(
*(config.floatx() if arg is None else arg for arg in dtypes),
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/stateless_scope.py | keras/src/backend/common/stateless_scope.py | from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
@keras_export("keras.StatelessScope")
class StatelessScope:
"""Scope to prevent any update to Keras Variables.
The values of variables to be used inside the scope
should be passed via the `state_mapping` argument, a
list of tuples `(k, v)` where `k` is a `Variable`
and `v` is the intended value for this variable
(a backend tensor).
Updated values can be collected on scope exit via
`value = scope.get_current_value(variable)`. No updates
will be applied in-place to any variables for the duration
of the scope.
Example:
```python
state_mapping = [(k, ops.ones(k.shape, k.dtype)) for k in model.weights]
with keras.StatelessScope(state_mapping) as scope:
outputs = model.some_function(inputs)
# All model variables remain unchanged. Their new values can be
# collected via:
for k in model.weights:
new_value = scope.get_current_value(k)
print(f"New value for {k}: {new_value})
```
"""
def __init__(
self,
state_mapping=None,
collect_losses=False,
initialize_variables=True,
):
from keras.src import backend
from keras.src.backend.common.variables import Variable
self.collect_losses = collect_losses
self.initialize_variables = initialize_variables
self.losses = []
self.state_mapping = {}
state_mapping = state_mapping or {}
for k, v in state_mapping:
if not isinstance(k, Variable):
raise ValueError(
"Invalid reference variable in StatelessScope: "
"all keys in argument `mapping` must be Variable "
f"instances. Received instead: {k}"
)
if isinstance(v, Variable):
v = backend.cast(v.value, dtype=k.dtype)
else:
v = backend.convert_to_tensor(v, dtype=k.dtype)
if k.shape != v.shape:
raise ValueError(
"Invalid variable value in StatelessScope: "
"all values in argument `mapping` must be tensors with "
"a shape that matches the corresponding variable shape. "
f"For variable {k}, received invalid value {v} with shape "
f"{v.shape}."
)
self.state_mapping[id(k)] = v
def __enter__(self):
self.original_scope = get_stateless_scope()
global_state.set_global_attribute("stateless_scope", self)
return self
def add_loss(self, loss):
self.losses.append(loss)
def add_update(self, update):
variable, value = update
self.state_mapping[id(variable)] = value
def get_current_value(self, variable):
return self.state_mapping.get(id(variable), None)
def __exit__(self, *args, **kwargs):
global_state.set_global_attribute(
"stateless_scope", self.original_scope
)
if self.original_scope is None and self.initialize_variables:
# We're back in eager scope;
# if any variables were created within the stateless
# scope, we initialize them here.
from keras.src.backend.common.variables import (
initialize_all_variables,
)
initialize_all_variables()
def in_stateless_scope():
return global_state.get_global_attribute("stateless_scope") is not None
def get_stateless_scope():
return global_state.get_global_attribute("stateless_scope")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/global_state_test.py | keras/src/backend/common/global_state_test.py | from keras.src.backend.common import global_state
from keras.src.testing import test_case
from keras.src.utils.naming import auto_name
class GlobalStateTest(test_case.TestCase):
def test_clear_session(self):
name0 = auto_name("somename")
self.assertEqual(name0, "somename")
name1 = auto_name("somename")
self.assertEqual(name1, "somename_1")
global_state.clear_session()
name0 = auto_name("somename")
self.assertEqual(name0, "somename")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/common/remat.py | keras/src/backend/common/remat.py | from collections import namedtuple
from keras.src import backend
from keras.src.api_export import keras_export
from keras.src.backend.common import global_state
@keras_export("keras.RematScope")
class RematScope:
"""A context manager for enabling rematerialization in Keras.
Rematerialization (gradient checkpointing) trades memory for computation by
recomputing intermediate activations during the backward pass. This is
particularly useful for training large models or large batch sizes within
limited memory constraints.
This should be used when initializing the layer (e.g., `layer(input)`).
Rematerialization applies at execution time, not at creation time.
Args:
mode: Rematerialization mode to apply.
Options:
- `"full"`: Apply rematerialization globally to all supported
operations.
- `"activations"`: Apply rematerialization to activations on any
layers that contain `keras.activations` (e.g., `Dense(...,
activation=relu)`).
- `"larger_than"`: Apply rematerialization to layers with output
sizes larger than `output_size_threshold`.
- `"list_of_layers"`: Apply rematerialization to a specific list of
layer names.
- `None`: Disable rematerialization.
output_size_threshold: Output size threshold for the
`"larger_than"` mode. Layers producing outputs larger than this
threshold will be rematerialized. Default is `1024`.
layer_names: List of layer names for the
`"list_of_layers"` mode. Default is an empty list.
Examples:
Using "list_of_layers" mode:
```python
from keras import RematScope
input_tensor = tf.random.normal((1, 32, 32, 3))
with RematScope(mode="list_of_layers", layer_names=["dense_1",
"conv2d_1"]):
layer1 = keras.layers.Dense(128, name="dense_1")
layer2 = keras.layers.Conv2D(64, (3, 3), name="conv2d_1")
layer3 = keras.layers.Dense(64, name="dense_2")
# Only layer1 and layer2 will apply rematerialization
output1 = layer1(input_tensor)
output2 = layer2(output1)
output3 = layer3(output2)
```
Using "larger_than" mode with a specific output size threshold:
```python
with RematScope(mode="larger_than", output_size_threshold=2048):
layer = keras.layers.Conv2D(64, (3, 3))
output = layer(input_tensor) # Conv2D outputs larger than 2048
```
Nested scopes for fine-grained control:
```python
with RematScope(mode="full"):
# Create layers
layer1 = keras.layers.Dense(128, activation='relu')
output1 = layer1(input_tensor) # layer1 is fully rematerialized
with RematScope(mode="larger_than", output_size_threshold=512):
layer2 = keras.layers.Conv2D(32, (3, 3))
output2 = layer2(output1) # layer2 is conditionally rematerialized
# if output > 512
```
"""
def __init__(
self, mode="full", output_size_threshold=1024, layer_names=None
):
if mode not in {
"full",
"activations",
"larger_than",
"list_of_layers",
None,
}:
raise ValueError(
f"Invalid mode '{mode}'. Supported modes are: "
"'full', 'activations', 'larger_than', 'list_of_layers', or "
" None."
)
self.mode = mode
self.output_size_threshold = output_size_threshold
self.layer_names = layer_names or []
self._pop_on_exit = False
def __enter__(self):
remat_scope_stack = global_state.get_global_attribute(
"remat_scope_stack", default=[], set_to_default=True
)
remat_scope_stack.append(self)
self._pop_on_exit = True
return self
def __exit__(self, *args, **kwargs):
if self._pop_on_exit:
remat_scope_stack = global_state.get_global_attribute(
"remat_scope_stack"
)
remat_scope_stack.pop()
RematMode = namedtuple(
"RematMode", ["mode", "output_size_threshold", "layer_names"]
)
def get_current_remat_mode():
"""Get the current rematerialization mode and associated settings.
Returns:
RematMode or None: The current rematerialization mode, or None if not
set.
"""
remat_scope_stack = global_state.get_global_attribute("remat_scope_stack")
if not remat_scope_stack:
return None
active_scope = remat_scope_stack[-1]
return RematMode(
active_scope.mode,
active_scope.output_size_threshold,
active_scope.layer_names,
)
@keras_export("keras.remat")
def remat(f):
"""Applies rematerialization to a function or layer for memory optimization.
Rematerialization is a memory optimization technique that trades off
computation for memory. Instead of storing intermediate results
(e.g. activations) for backpropagation, they are recomputed during the
backward pass. This reduces peak memory usage at the cost of increased
computation time, allowing the training of larger models or using larger
batch sizes within the same memory constraints.
Args:
f: A callable function, to which rematerialization is
applied. This is typically a computationally expensive operation
where intermediate states can be recomputed instead of stored.
Returns:
A wrapped function that applies rematerialization. The returned
function defines a custom gradient, ensuring that during the backward
pass, the forward computation is recomputed as needed.
Example:
```python
from keras import Model
class CustomRematLayer(layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.remat_function = remat(self.intermediate_function)
def intermediate_function(self, x):
for _ in range(2):
x = x + x * 0.1 # Simple scaled transformation
return x
def call(self, inputs):
return self.remat_function(inputs)
# Define a simple model using the custom layer
inputs = layers.Input(shape=(4,))
x = layers.Dense(4, activation="relu")(inputs)
x = CustomRematLayer()(x) # Custom layer with rematerialization
outputs = layers.Dense(1)(x)
# Create and compile the model
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer="sgd", loss="mse")
```
"""
return backend.core.remat(f)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/numpy/image.py | keras/src/backend/numpy/image.py | import ml_dtypes
import numpy as np
from keras.src import backend
from keras.src.backend.numpy.core import convert_to_tensor
from keras.src.random.seed_generator import draw_seed
from keras.src.utils.module_utils import scipy
RESIZE_INTERPOLATIONS = (
"bilinear",
"nearest",
"lanczos3",
"lanczos5",
"bicubic",
)
AFFINE_TRANSFORM_INTERPOLATIONS = { # map to order
"nearest": 0,
"bilinear": 1,
}
AFFINE_TRANSFORM_FILL_MODES = {
"constant",
"nearest",
"wrap",
"mirror",
"reflect",
}
MAP_COORDINATES_FILL_MODES = {
"constant",
"nearest",
"wrap",
"mirror",
"reflect",
}
SCALE_AND_TRANSLATE_METHODS = {
"linear",
"bilinear",
"trilinear",
"cubic",
"bicubic",
"tricubic",
"lanczos3",
"lanczos5",
}
def rgb_to_grayscale(images, data_format=None):
images = convert_to_tensor(images)
data_format = backend.standardize_data_format(data_format)
channels_axis = -1 if data_format == "channels_last" else -3
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
# Convert to floats
original_dtype = images.dtype
compute_dtype = backend.result_type(images.dtype, float)
images = images.astype(compute_dtype)
# Ref: tf.image.rgb_to_grayscale
rgb_weights = np.array([0.2989, 0.5870, 0.1140], dtype=images.dtype)
grayscales = np.tensordot(images, rgb_weights, axes=(channels_axis, -1))
grayscales = np.expand_dims(grayscales, axis=channels_axis)
return grayscales.astype(original_dtype)
def rgb_to_hsv(images, data_format=None):
# Ref: dm_pix
images = convert_to_tensor(images)
dtype = backend.standardize_dtype(images.dtype)
data_format = backend.standardize_data_format(data_format)
channels_axis = -1 if data_format == "channels_last" else -3
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if not backend.is_float_dtype(dtype):
raise ValueError(
"Invalid images dtype: expected float dtype. "
f"Received: images.dtype={dtype}"
)
eps = ml_dtypes.finfo(dtype).eps
images = np.where(np.abs(images) < eps, 0.0, images)
red, green, blue = np.split(images, 3, channels_axis)
red = np.squeeze(red, channels_axis)
green = np.squeeze(green, channels_axis)
blue = np.squeeze(blue, channels_axis)
def rgb_planes_to_hsv_planes(r, g, b):
value = np.maximum(np.maximum(r, g), b)
minimum = np.minimum(np.minimum(r, g), b)
range_ = value - minimum
safe_value = np.where(value > 0, value, 1.0)
safe_range = np.where(range_ > 0, range_, 1.0)
saturation = np.where(value > 0, range_ / safe_value, 0.0)
norm = 1.0 / (6.0 * safe_range)
hue = np.where(
value == g,
norm * (b - r) + 2.0 / 6.0,
norm * (r - g) + 4.0 / 6.0,
)
hue = np.where(value == r, norm * (g - b), hue)
hue = np.where(range_ > 0, hue, 0.0) + (hue < 0.0).astype(hue.dtype)
return hue, saturation, value
images = np.stack(
rgb_planes_to_hsv_planes(red, green, blue), axis=channels_axis
)
return images.astype(dtype)
def hsv_to_rgb(images, data_format=None):
# Ref: dm_pix
images = convert_to_tensor(images)
dtype = images.dtype
data_format = backend.standardize_data_format(data_format)
channels_axis = -1 if data_format == "channels_last" else -3
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if not backend.is_float_dtype(dtype):
raise ValueError(
"Invalid images dtype: expected float dtype. "
f"Received: images.dtype={backend.standardize_dtype(dtype)}"
)
hue, saturation, value = np.split(images, 3, channels_axis)
hue = np.squeeze(hue, channels_axis)
saturation = np.squeeze(saturation, channels_axis)
value = np.squeeze(value, channels_axis)
def hsv_planes_to_rgb_planes(hue, saturation, value):
dh = np.mod(hue, 1.0) * 6.0
dr = np.clip(np.abs(dh - 3.0) - 1.0, 0.0, 1.0)
dg = np.clip(2.0 - np.abs(dh - 2.0), 0.0, 1.0)
db = np.clip(2.0 - np.abs(dh - 4.0), 0.0, 1.0)
one_minus_s = 1.0 - saturation
red = value * (one_minus_s + saturation * dr)
green = value * (one_minus_s + saturation * dg)
blue = value * (one_minus_s + saturation * db)
return red, green, blue
images = np.stack(
hsv_planes_to_rgb_planes(hue, saturation, value), axis=channels_axis
)
return images.astype(dtype)
def resize(
images,
size,
interpolation="bilinear",
antialias=False,
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
fill_mode="constant",
fill_value=0.0,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
if interpolation not in RESIZE_INTERPOLATIONS:
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{RESIZE_INTERPOLATIONS}. Received: interpolation={interpolation}"
)
if fill_mode != "constant":
raise ValueError(
"Invalid value for argument `fill_mode`. Only `'constant'` "
f"is supported. Received: fill_mode={fill_mode}"
)
if pad_to_aspect_ratio and crop_to_aspect_ratio:
raise ValueError(
"Only one of `pad_to_aspect_ratio` & `crop_to_aspect_ratio` "
"can be `True`."
)
if not len(size) == 2:
raise ValueError(
"Argument `size` must be a tuple of two elements "
f"(height, width). Received: size={size}"
)
size = tuple(size)
target_height, target_width = size
if len(images.shape) == 4:
if data_format == "channels_last":
size = (images.shape[0],) + size + (images.shape[-1],)
else:
size = (images.shape[0], images.shape[1]) + size
elif len(images.shape) == 3:
if data_format == "channels_last":
size = size + (images.shape[-1],)
else:
size = (images.shape[0],) + size
else:
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if crop_to_aspect_ratio:
shape = images.shape
if data_format == "channels_last":
height, width = shape[-3], shape[-2]
else:
height, width = shape[-2], shape[-1]
crop_height = int(float(width * target_height) / target_width)
crop_height = max(min(height, crop_height), 1)
crop_width = int(float(height * target_width) / target_height)
crop_width = max(min(width, crop_width), 1)
crop_box_hstart = int(float(height - crop_height) / 2)
crop_box_wstart = int(float(width - crop_width) / 2)
if data_format == "channels_last":
if len(images.shape) == 4:
images = images[
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
:,
]
else:
images = images[
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
:,
]
else:
if len(images.shape) == 4:
images = images[
:,
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
]
else:
images = images[
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
]
elif pad_to_aspect_ratio:
shape = images.shape
batch_size = images.shape[0]
if data_format == "channels_last":
height, width, channels = shape[-3], shape[-2], shape[-1]
else:
channels, height, width = shape[-3], shape[-2], shape[-1]
pad_height = int(float(width * target_height) / target_width)
pad_height = max(height, pad_height)
pad_width = int(float(height * target_width) / target_height)
pad_width = max(width, pad_width)
img_box_hstart = int(float(pad_height - height) / 2)
img_box_wstart = int(float(pad_width - width) / 2)
if data_format == "channels_last":
if img_box_hstart > 0:
if len(images.shape) == 4:
padded_img = np.concatenate(
[
np.ones(
(batch_size, img_box_hstart, width, channels),
dtype=images.dtype,
)
* fill_value,
images,
np.ones(
(batch_size, img_box_hstart, width, channels),
dtype=images.dtype,
)
* fill_value,
],
axis=1,
)
else:
padded_img = np.concatenate(
[
np.ones(
(img_box_hstart, width, channels),
dtype=images.dtype,
)
* fill_value,
images,
np.ones(
(img_box_hstart, width, channels),
dtype=images.dtype,
)
* fill_value,
],
axis=0,
)
elif img_box_wstart > 0:
if len(images.shape) == 4:
padded_img = np.concatenate(
[
np.ones(
(batch_size, height, img_box_wstart, channels),
dtype=images.dtype,
)
* fill_value,
images,
np.ones(
(batch_size, height, img_box_wstart, channels),
dtype=images.dtype,
)
* fill_value,
],
axis=2,
)
else:
padded_img = np.concatenate(
[
np.ones(
(height, img_box_wstart, channels),
dtype=images.dtype,
)
* fill_value,
images,
np.ones(
(height, img_box_wstart, channels),
dtype=images.dtype,
)
* fill_value,
],
axis=1,
)
else:
padded_img = images
else:
if img_box_hstart > 0:
if len(images.shape) == 4:
padded_img = np.concatenate(
[
np.ones(
(batch_size, channels, img_box_hstart, width)
)
* fill_value,
images,
np.ones(
(batch_size, channels, img_box_hstart, width)
)
* fill_value,
],
axis=2,
)
else:
padded_img = np.concatenate(
[
np.ones((channels, img_box_hstart, width))
* fill_value,
images,
np.ones((channels, img_box_hstart, width))
* fill_value,
],
axis=1,
)
elif img_box_wstart > 0:
if len(images.shape) == 4:
padded_img = np.concatenate(
[
np.ones(
(batch_size, channels, height, img_box_wstart)
)
* fill_value,
images,
np.ones(
(batch_size, channels, height, img_box_wstart)
)
* fill_value,
],
axis=3,
)
else:
padded_img = np.concatenate(
[
np.ones((channels, height, img_box_wstart))
* fill_value,
images,
np.ones((channels, height, img_box_wstart))
* fill_value,
],
axis=2,
)
else:
padded_img = images
images = padded_img
return _resize(images, size, method=interpolation, antialias=antialias)
def _compute_weight_mat(
input_size, output_size, scale, translation, kernel, antialias
):
dtype = np.result_type(scale, translation)
inv_scale = 1.0 / scale
kernel_scale = np.maximum(inv_scale, 1.0) if antialias else 1.0
sample_f = (
(np.arange(output_size, dtype=dtype) + 0.5) * inv_scale
- translation * inv_scale
- 0.5
)
x = (
np.abs(
sample_f[np.newaxis, :]
- np.arange(input_size, dtype=dtype)[:, np.newaxis]
)
/ kernel_scale
)
weights = kernel(x)
total_weight_sum = np.sum(weights, axis=0, keepdims=True)
weights = np.where(
np.abs(total_weight_sum) > 1000.0 * np.finfo(np.float32).eps,
np.divide(
weights, np.where(total_weight_sum != 0, total_weight_sum, 1)
),
0,
)
input_size_minus_0_5 = input_size - 0.5
return np.where(
np.logical_and(sample_f >= -0.5, sample_f <= input_size_minus_0_5)[
np.newaxis, :
],
weights,
0,
)
def _resize(image, shape, method, antialias):
if method == "nearest":
return _resize_nearest(image, shape)
else:
kernel = _kernels.get(method, None)
if kernel is None:
raise ValueError("Unknown resize method")
spatial_dims = tuple(
i for i in range(len(shape)) if image.shape[i] != shape[i]
)
scale = [
shape[d] / image.shape[d] if image.shape[d] != 0 else 1.0
for d in spatial_dims
]
return _scale_and_translate(
image,
shape,
spatial_dims,
scale,
[0.0] * len(spatial_dims),
kernel,
antialias,
)
def _resize_nearest(x, output_shape):
input_shape = x.shape
spatial_dims = tuple(
i for i in range(len(input_shape)) if input_shape[i] != output_shape[i]
)
for d in spatial_dims:
m, n = input_shape[d], output_shape[d]
offsets = (np.arange(n, dtype=np.float32) + 0.5) * m / n
offsets = np.floor(offsets).astype(np.int32)
indices = [slice(None)] * len(input_shape)
indices[d] = offsets
x = x[tuple(indices)]
return x
def _fill_triangle_kernel(x):
return np.maximum(0, 1 - np.abs(x))
def _fill_keys_cubic_kernel(x):
out = ((1.5 * x - 2.5) * x) * x + 1.0
out = np.where(x >= 1.0, ((-0.5 * x + 2.5) * x - 4.0) * x + 2.0, out)
return np.where(x >= 2.0, 0.0, out)
def _fill_lanczos_kernel(radius, x):
y = radius * np.sin(np.pi * x) * np.sin(np.pi * x / radius)
out = np.where(
x > 1e-3, np.divide(y, np.where(x != 0, np.pi**2 * x**2, 1)), 1
)
return np.where(x > radius, 0.0, out)
_kernels = {
"linear": _fill_triangle_kernel,
"bilinear": _fill_triangle_kernel, # For `resize`.
"cubic": _fill_keys_cubic_kernel,
"bicubic": _fill_keys_cubic_kernel, # For `resize`.
"lanczos3": lambda x: _fill_lanczos_kernel(3.0, x),
"lanczos5": lambda x: _fill_lanczos_kernel(5.0, x),
}
def _scale_and_translate(
x, output_shape, spatial_dims, scale, translation, kernel, antialias
):
input_shape = x.shape
if len(spatial_dims) == 0:
return x
if np.issubdtype(x.dtype, np.integer):
output = x.astype(np.float32)
use_rounding = True
else:
output = x.copy()
use_rounding = False
for i, d in enumerate(spatial_dims):
d = d % x.ndim
m, n = input_shape[d], output_shape[d]
w = _compute_weight_mat(
m, n, scale[i], translation[i], kernel, antialias
).astype(output.dtype)
output = np.tensordot(output, w, axes=(d, 0))
output = np.moveaxis(output, -1, d)
if use_rounding:
output = np.clip(np.round(output), x.min(), x.max())
output = output.astype(x.dtype)
return output
def affine_transform(
images,
transform,
interpolation="bilinear",
fill_mode="constant",
fill_value=0,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS.keys():
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{set(AFFINE_TRANSFORM_INTERPOLATIONS.keys())}. Received: "
f"interpolation={interpolation}"
)
if fill_mode not in AFFINE_TRANSFORM_FILL_MODES:
raise ValueError(
"Invalid value for argument `fill_mode`. Expected of one "
f"{AFFINE_TRANSFORM_FILL_MODES}. Received: fill_mode={fill_mode}"
)
images = convert_to_tensor(images)
transform = convert_to_tensor(transform)
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if len(transform.shape) not in (1, 2):
raise ValueError(
"Invalid transform rank: expected rank 1 (single transform) "
"or rank 2 (batch of transforms). Received input with shape: "
f"transform.shape={transform.shape}"
)
# `scipy.ndimage.map_coordinates` lacks support for float16 and bfloat16.
input_dtype = backend.standardize_dtype(images.dtype)
compute_dtype = backend.result_type(input_dtype, "float32")
images = images.astype(compute_dtype)
transform = transform.astype(compute_dtype)
# unbatched case
need_squeeze = False
if len(images.shape) == 3:
images = np.expand_dims(images, axis=0)
need_squeeze = True
if len(transform.shape) == 1:
transform = np.expand_dims(transform, axis=0)
if data_format == "channels_first":
images = np.transpose(images, (0, 2, 3, 1))
batch_size = images.shape[0]
# get indices
meshgrid = np.meshgrid(
*[np.arange(size) for size in images.shape[1:]], indexing="ij"
)
indices = np.concatenate(
[np.expand_dims(x, axis=-1) for x in meshgrid], axis=-1
)
indices = np.tile(indices, (batch_size, 1, 1, 1, 1))
# swap the values
a0 = transform[:, 0].copy()
a2 = transform[:, 2].copy()
b1 = transform[:, 4].copy()
b2 = transform[:, 5].copy()
transform[:, 0] = b1
transform[:, 2] = b2
transform[:, 4] = a0
transform[:, 5] = a2
# deal with transform
transform = np.pad(transform, pad_width=[[0, 0], [0, 1]], constant_values=1)
transform = np.reshape(transform, (batch_size, 3, 3))
offset = transform[:, 0:2, 2].copy()
offset = np.pad(offset, pad_width=[[0, 0], [0, 1]])
transform[:, 0:2, 2] = 0
# transform the indices
coordinates = np.einsum("Bhwij, Bjk -> Bhwik", indices, transform)
coordinates = np.moveaxis(coordinates, source=-1, destination=1)
coordinates += np.reshape(offset, (*offset.shape, 1, 1, 1))
# apply affine transformation
affined = np.stack(
[
map_coordinates(
images[i],
coordinates[i],
order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation],
fill_mode=fill_mode,
fill_value=fill_value,
)
for i in range(batch_size)
],
axis=0,
)
if data_format == "channels_first":
affined = np.transpose(affined, (0, 3, 1, 2))
if need_squeeze:
affined = np.squeeze(affined, axis=0)
return affined.astype(input_dtype)
def perspective_transform(
images,
start_points,
end_points,
interpolation="bilinear",
fill_value=0,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
start_points = convert_to_tensor(start_points)
end_points = convert_to_tensor(end_points)
if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS:
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{AFFINE_TRANSFORM_INTERPOLATIONS}. Received: "
f"interpolation={interpolation}"
)
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if start_points.ndim not in (2, 3) or start_points.shape[-2:] != (4, 2):
raise ValueError(
"Invalid start_points shape: expected (4,2) for a single image"
f" or (N,4,2) for a batch. Received shape: {start_points.shape}"
)
if end_points.ndim not in (2, 3) or end_points.shape[-2:] != (4, 2):
raise ValueError(
"Invalid end_points shape: expected (4,2) for a single image"
f" or (N,4,2) for a batch. Received shape: {end_points.shape}"
)
if start_points.shape != end_points.shape:
raise ValueError(
"start_points and end_points must have the same shape."
f" Received start_points.shape={start_points.shape}, "
f"end_points.shape={end_points.shape}"
)
input_dtype = images.dtype
if input_dtype == "float16":
images = images.astype("float32")
need_squeeze = False
if len(images.shape) == 3:
images = np.expand_dims(images, axis=0)
need_squeeze = True
if len(start_points.shape) == 2:
start_points = np.expand_dims(start_points, axis=0)
if len(end_points.shape) == 2:
end_points = np.expand_dims(end_points, axis=0)
if data_format == "channels_first":
images = np.transpose(images, (0, 2, 3, 1))
batch_size, height, width, channels = images.shape
transforms = compute_homography_matrix(start_points, end_points)
if len(transforms.shape) == 1:
transforms = np.expand_dims(transforms, axis=0)
if transforms.shape[0] == 1 and batch_size > 1:
transforms = np.tile(transforms, (batch_size, 1))
x, y = np.meshgrid(
np.arange(width, dtype=np.float32),
np.arange(height, dtype=np.float32),
indexing="xy",
)
output = np.empty((batch_size, height, width, channels))
for i in range(batch_size):
a0, a1, a2, a3, a4, a5, a6, a7 = transforms[i]
denom = a6 * x + a7 * y + 1.0
x_in = (a0 * x + a1 * y + a2) / denom
y_in = (a3 * x + a4 * y + a5) / denom
coords = np.stack([y_in.ravel(), x_in.ravel()], axis=0)
mapped_channels = []
for channel in range(channels):
channel_img = images[i, :, :, channel]
mapped_channel = map_coordinates(
channel_img,
coords,
order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation],
fill_mode="constant",
fill_value=fill_value,
)
mapped_channels.append(mapped_channel.reshape(height, width))
output[i] = np.stack(mapped_channels, axis=-1)
if data_format == "channels_first":
output = np.transpose(output, (0, 3, 1, 2))
if need_squeeze:
output = np.squeeze(output, axis=0)
output = output.astype(input_dtype)
return output
def compute_homography_matrix(start_points, end_points):
start_points = convert_to_tensor(start_points)
end_points = convert_to_tensor(end_points)
dtype = backend.result_type(start_points.dtype, end_points.dtype, float)
# `np.linalg.solve` lacks support for float16 and bfloat16.
compute_dtype = backend.result_type(dtype, "float32")
start_points = start_points.astype(dtype)
end_points = end_points.astype(dtype)
start_x1, start_y1 = start_points[:, 0, 0], start_points[:, 0, 1]
start_x2, start_y2 = start_points[:, 1, 0], start_points[:, 1, 1]
start_x3, start_y3 = start_points[:, 2, 0], start_points[:, 2, 1]
start_x4, start_y4 = start_points[:, 3, 0], start_points[:, 3, 1]
end_x1, end_y1 = end_points[:, 0, 0], end_points[:, 0, 1]
end_x2, end_y2 = end_points[:, 1, 0], end_points[:, 1, 1]
end_x3, end_y3 = end_points[:, 2, 0], end_points[:, 2, 1]
end_x4, end_y4 = end_points[:, 3, 0], end_points[:, 3, 1]
coefficient_matrix = np.stack(
[
np.stack(
[
end_x1,
end_y1,
np.ones_like(end_x1),
np.zeros_like(end_x1),
np.zeros_like(end_x1),
np.zeros_like(end_x1),
-start_x1 * end_x1,
-start_x1 * end_y1,
],
axis=-1,
),
np.stack(
[
np.zeros_like(end_x1),
np.zeros_like(end_x1),
np.zeros_like(end_x1),
end_x1,
end_y1,
np.ones_like(end_x1),
-start_y1 * end_x1,
-start_y1 * end_y1,
],
axis=-1,
),
np.stack(
[
end_x2,
end_y2,
np.ones_like(end_x2),
np.zeros_like(end_x2),
np.zeros_like(end_x2),
np.zeros_like(end_x2),
-start_x2 * end_x2,
-start_x2 * end_y2,
],
axis=-1,
),
np.stack(
[
np.zeros_like(end_x2),
np.zeros_like(end_x2),
np.zeros_like(end_x2),
end_x2,
end_y2,
np.ones_like(end_x2),
-start_y2 * end_x2,
-start_y2 * end_y2,
],
axis=-1,
),
np.stack(
[
end_x3,
end_y3,
np.ones_like(end_x3),
np.zeros_like(end_x3),
np.zeros_like(end_x3),
np.zeros_like(end_x3),
-start_x3 * end_x3,
-start_x3 * end_y3,
],
axis=-1,
),
np.stack(
[
np.zeros_like(end_x3),
np.zeros_like(end_x3),
np.zeros_like(end_x3),
end_x3,
end_y3,
np.ones_like(end_x3),
-start_y3 * end_x3,
-start_y3 * end_y3,
],
axis=-1,
),
np.stack(
[
end_x4,
end_y4,
np.ones_like(end_x4),
np.zeros_like(end_x4),
np.zeros_like(end_x4),
np.zeros_like(end_x4),
-start_x4 * end_x4,
-start_x4 * end_y4,
],
axis=-1,
),
np.stack(
[
np.zeros_like(end_x4),
np.zeros_like(end_x4),
np.zeros_like(end_x4),
end_x4,
end_y4,
np.ones_like(end_x4),
-start_y4 * end_x4,
-start_y4 * end_y4,
],
axis=-1,
),
],
axis=1,
)
target_vector = np.stack(
[
start_x1,
start_y1,
start_x2,
start_y2,
start_x3,
start_y3,
start_x4,
start_y4,
],
axis=-1,
)
target_vector = np.expand_dims(target_vector, axis=-1)
coefficient_matrix = coefficient_matrix.astype(compute_dtype)
target_vector = target_vector.astype(compute_dtype)
homography_matrix = np.linalg.solve(coefficient_matrix, target_vector)
homography_matrix = np.reshape(homography_matrix, [-1, 8])
return homography_matrix.astype(dtype)
def map_coordinates(
inputs, coordinates, order, fill_mode="constant", fill_value=0.0
):
inputs = convert_to_tensor(inputs)
coordinates = convert_to_tensor(coordinates)
if coordinates.shape[0] != len(inputs.shape):
raise ValueError(
"First dim of `coordinates` must be the same as the rank of "
"`inputs`. "
f"Received inputs with shape: {inputs.shape} and coordinate "
f"leading dim of {coordinates.shape[0]}"
)
if len(coordinates.shape) < 2:
raise ValueError(
"Invalid coordinates rank: expected at least rank 2."
f" Received input with shape: {coordinates.shape}"
)
if fill_mode not in MAP_COORDINATES_FILL_MODES:
raise ValueError(
"Invalid value for argument `fill_mode`. Expected one of "
f"{set(MAP_COORDINATES_FILL_MODES.keys())}. Received: "
f"fill_mode={fill_mode}"
)
if order not in range(2):
raise ValueError(
"Invalid value for argument `order`. Expected one of "
f"{[0, 1]}. Received: order={order}"
)
# SciPy's implementation of map_coordinates handles boundaries incorrectly,
# unless mode='reflect'. For order=1, this only affects interpolation
# outside the bounds of the original array.
# https://github.com/scipy/scipy/issues/2640
padding = [
(
max(-np.floor(c.min()).astype(int) + 1, 0),
max(np.ceil(c.max()).astype(int) + 1 - size, 0),
)
for c, size in zip(coordinates, inputs.shape)
]
shifted_coords = [c + p[0] for p, c in zip(padding, coordinates)]
pad_mode = {
"nearest": "edge",
"mirror": "reflect",
"reflect": "symmetric",
}.get(fill_mode, fill_mode)
if fill_mode == "constant":
padded = np.pad(
inputs, padding, mode=pad_mode, constant_values=fill_value
)
else:
padded = np.pad(inputs, padding, mode=pad_mode)
# `scipy.ndimage.map_coordinates` lacks support for float16 and bfloat16.
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/numpy/layer.py | keras/src/backend/numpy/layer.py | class NumpyLayer:
pass
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/numpy/export.py | keras/src/backend/numpy/export.py | class NumpyExportArchive:
def track(self, resource):
raise NotImplementedError(
"`track` is not implemented in the numpy backend."
)
def add_endpoint(self, name, fn, input_signature=None, **kwargs):
raise NotImplementedError(
"`add_endpoint` is not implemented in the numpy backend."
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/numpy/rnn.py | keras/src/backend/numpy/rnn.py | import numpy as np
from keras.src import tree
def rnn(
step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False,
return_all_outputs=True,
):
def swap_batch_timestep(input_t):
# Swap the batch and timestep dim for the incoming tensor.
axes = list(range(len(input_t.shape)))
axes[0], axes[1] = 1, 0
return np.transpose(input_t, axes)
if not time_major:
inputs = tree.map_structure(swap_batch_timestep, inputs)
flattened_inputs = tree.flatten(inputs)
time_steps = flattened_inputs[0].shape[0]
if mask is not None:
if mask.dtype != "bool":
mask = mask.astype("bool")
if len(mask.shape) == 2:
mask = np.expand_dims(mask, axis=-1)
if not time_major:
mask = swap_batch_timestep(mask)
if constants is None:
constants = []
def _expand_mask(mask_t, input_t, fixed_dim=1):
if tree.is_nested(mask_t):
raise ValueError(
f"mask_t is expected to be tensor, but got {mask_t}"
)
if tree.is_nested(input_t):
raise ValueError(
f"input_t is expected to be tensor, but got {input_t}"
)
rank_diff = len(input_t.shape) - len(mask_t.shape)
for _ in range(rank_diff):
mask_t = np.expand_dims(mask_t, -1)
multiples = [1] * fixed_dim + list(input_t.shape[fixed_dim:])
return np.tile(mask_t, multiples)
if unroll:
if not time_steps:
raise ValueError("Unrolling requires a fixed number of timesteps.")
states = tuple(initial_states)
successive_states = []
successive_outputs = []
# Process the input tensors. The input tensor need to be split on the
# time_step dim, and reverse if go_backwards is True. In the case of
# nested input, the input is flattened and then transformed
# individually. The result of this will be a tuple of lists, each of
# the item in tuple is list of the tensor with shape (batch, feature)
def _process_single_input_t(input_t):
input_t = unstack(input_t) # unstack for time_step dim
if go_backwards:
input_t.reverse()
return input_t
if tree.is_nested(inputs):
processed_input = tree.map_structure(
_process_single_input_t, inputs
)
else:
processed_input = (_process_single_input_t(inputs),)
def _get_input_tensor(time):
inp = [t_[time] for t_ in processed_input]
return tree.pack_sequence_as(inputs, inp)
if mask is not None:
mask_list = unstack(mask)
if go_backwards:
mask_list.reverse()
for i in range(time_steps):
inp = _get_input_tensor(i)
mask_t = mask_list[i]
output, new_states = step_function(
inp, tuple(states) + tuple(constants)
)
tiled_mask_t = _expand_mask(mask_t, output)
if not successive_outputs:
prev_output = np.zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = np.where(tiled_mask_t, output, prev_output)
flat_states = tree.flatten(states)
flat_new_states = tree.flatten(new_states)
tiled_mask_t = tuple(
_expand_mask(mask_t, s) for s in flat_states
)
flat_final_states = tuple(
np.where(m, s, ps)
for m, s, ps in zip(
tiled_mask_t, flat_new_states, flat_states
)
)
states = tree.pack_sequence_as(states, flat_final_states)
if return_all_outputs:
successive_outputs.append(output)
successive_states.append(states)
else:
successive_outputs = [output]
successive_states = [states]
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = np.stack(successive_outputs)
else: # mask is None
for i in range(time_steps):
inp = _get_input_tensor(i)
output, states = step_function(
inp, tuple(states) + tuple(constants)
)
if return_all_outputs:
successive_outputs.append(output)
successive_states.append(states)
else:
successive_outputs = [output]
successive_states = [states]
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = np.stack(successive_outputs)
else: # Unroll == False
if mask is not None:
def _step(states, current_input):
current_input, current_mask = current_input
is_masked = np.all(
np.logical_not(current_mask), axis=-1, keepdims=True
)
output_t, new_states = step_function(current_input, states)
if zero_output_for_mask:
masked_outs = np.where(
is_masked, np.zeros_like(output_t), output_t
)
else:
# Assume the first state is the previous output.
output_tm1 = states[0]
if tree.is_nested(output_tm1):
# Stacked RNN case: assume first state of last cell.
output_tm1 = states[-1][0]
masked_outs = np.where(is_masked, output_tm1, output_t)
new_states = tree.map_structure(
lambda s, ns: np.where(is_masked, s, ns),
states,
new_states,
)
return (new_states, masked_outs)
scan_xs = (inputs, mask)
else:
def _step(states, current_input):
output_t, new_states = step_function(current_input, states)
return new_states, output_t
scan_xs = inputs
new_states, outputs = numpy_scan(
f=_step,
init=initial_states,
xs=scan_xs,
reverse=go_backwards,
mask=mask,
)
if go_backwards:
outputs = np.flip(outputs, axis=0)
last_output = outputs[-1]
if not time_major:
outputs = tree.map_structure(swap_batch_timestep, outputs)
return last_output, outputs, new_states
def lstm(*args, **kwargs):
raise NotImplementedError
def gru(*args, **kwargs):
raise NotImplementedError
def unstack(x, axis=0):
return [x.take(i, axis) for i in range(x.shape[axis])]
def numpy_scan(f, init, xs, reverse=False, mask=None):
states = init
outputs = []
if mask is not None:
x, mask = xs
x = np.flip(x, axis=0) if reverse else x
mask = np.flip(mask, axis=0) if reverse else mask
for each_x, each_mask in zip(x, mask):
states, output = f(states, (each_x, each_mask))
outputs.append(output)
else:
xs = np.flip(xs, axis=0) if reverse else xs
for x in xs:
states, output = f(states, x)
outputs.append(output)
outputs = np.array(outputs)
if reverse:
outputs = np.flip(outputs, axis=0)
return states, outputs
def cudnn_ok(*args, **kwargs):
return False
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/numpy/core.py | keras/src/backend/numpy/core.py | import builtins
import contextlib
import functools
import warnings
import numpy as np
from keras.src import tree
from keras.src.backend.common import KerasVariable
from keras.src.backend.common import standardize_dtype
from keras.src.backend.common.backend_utils import slice_along_axis
from keras.src.backend.common.dtypes import result_type
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.symbolic_scope import SymbolicScope
SUPPORTS_SPARSE_TENSORS = False
SUPPORTS_RAGGED_TENSORS = False
IS_THREAD_SAFE = True
class Variable(KerasVariable):
def _initialize(self, value):
self._value = value
def _direct_assign(self, value):
self._value = np.array(value, dtype=self._dtype)
def _convert_to_tensor(self, value, dtype=None):
return convert_to_tensor(value, dtype=dtype)
# Overload native accessor.
def __array__(self):
return self.value
def convert_to_tensor(x, dtype=None, sparse=None, ragged=None):
if sparse:
raise ValueError("`sparse=True` is not supported with numpy backend")
if ragged:
raise ValueError("`ragged=True` is not supported with numpy backend")
if dtype is not None:
dtype = standardize_dtype(dtype)
if isinstance(x, Variable):
if dtype and dtype != x.dtype:
return x.value.astype(dtype)
return x.value
if not is_tensor(x) and standardize_dtype(dtype) == "bfloat16":
# Can't create bfloat16 arrays on the fly (e.g. from a h5 Dataset).
# Instead we convert "as is" (to stored dtype) and cast.
return np.asarray(x).astype(dtype)
if dtype is None:
dtype = result_type(
*[getattr(item, "dtype", type(item)) for item in tree.flatten(x)]
)
return np.array(x, dtype=dtype)
def convert_to_numpy(x):
return np.array(x)
def is_tensor(x):
if isinstance(x, (np.generic, np.ndarray)):
return True
return False
def shape(x):
return x.shape
def cast(x, dtype):
return convert_to_tensor(x, dtype=dtype)
def cond(pred, true_fn, false_fn):
if pred:
return true_fn()
return false_fn()
def vectorized_map(function, elements):
if not isinstance(elements, (list, tuple)):
return np.stack([function(x) for x in elements])
else:
batch_size = elements[0].shape[0]
output_store = []
for index in range(batch_size):
output_store.append(function([x[index] for x in elements]))
return np.stack(output_store)
# Shape / dtype inference util
def compute_output_spec(fn, *args, **kwargs):
with StatelessScope(), SymbolicScope():
def has_none_shape(x):
if isinstance(x, KerasTensor):
return None in x.shape
return False
none_in_shape = any(
builtins.map(has_none_shape, tree.flatten((args, kwargs)))
)
def convert_keras_tensor_to_numpy(x, fill_value=None):
if isinstance(x, KerasTensor):
shape = list(x.shape)
if fill_value:
for i, e in enumerate(shape):
if e is None:
shape[i] = fill_value
return np.empty(
shape=shape,
dtype=x.dtype,
)
return x
args_1, kwargs_1 = tree.map_structure(
lambda x: convert_keras_tensor_to_numpy(x, fill_value=83),
(args, kwargs),
)
outputs_1 = fn(*args_1, **kwargs_1)
outputs = outputs_1
if none_in_shape:
args_2, kwargs_2 = tree.map_structure(
lambda x: convert_keras_tensor_to_numpy(x, fill_value=89),
(args, kwargs),
)
outputs_2 = fn(*args_2, **kwargs_2)
flat_out_1 = tree.flatten(outputs_1)
flat_out_2 = tree.flatten(outputs_2)
flat_out = []
for x1, x2 in zip(flat_out_1, flat_out_2):
shape = list(x1.shape)
for i, e in enumerate(x2.shape):
if e != shape[i]:
shape[i] = None
flat_out.append(KerasTensor(shape, standardize_dtype(x1.dtype)))
outputs = tree.pack_sequence_as(outputs_1, flat_out)
def convert_numpy_to_keras_tensor(x):
if is_tensor(x):
return KerasTensor(x.shape, standardize_dtype(x.dtype))
return x
output_spec = tree.map_structure(convert_numpy_to_keras_tensor, outputs)
return output_spec
def map(f, xs):
def g(_, x):
return (), f(x)
_, ys = scan(g, (), xs)
return ys
def scan(f, init, xs=None, length=None, reverse=False, unroll=1):
# Ref: jax.lax.scan
if not callable(f):
raise TypeError(f"`f` should be a callable. Received: f={f}")
if not isinstance(unroll, bool):
if not isinstance(unroll, int) or unroll < 1:
raise ValueError(
"`unroll` must be an positive integer or boolean. "
f"Received: unroll={unroll}"
)
if xs is None and length is None:
raise ValueError("Got no `xs` to scan over and `length` not provided.")
input_is_sequence = tree.is_nested(xs)
output_is_sequence = tree.is_nested(init)
def pack_input(x):
return tree.pack_sequence_as(xs, x) if input_is_sequence else x[0]
def pack_output(x):
return tree.pack_sequence_as(init, x) if output_is_sequence else x[0]
if xs is None:
xs_flat = []
n = int(length)
else:
xs_flat = tree.flatten(xs)
xs_flat = [convert_to_tensor(elem) for elem in xs_flat]
n = int(length) if length is not None else shape(xs_flat[0])[0]
init_flat = tree.flatten(init)
init_flat = [convert_to_tensor(init) for init in init_flat]
init = pack_output(init_flat)
dummy_y = [np.zeros_like(init) for init in init_flat]
carry = init
ys = []
maybe_reversed = reversed if reverse else lambda x: x
for i in maybe_reversed(range(n)):
xs_slice = [x[i] for x in xs_flat]
packed_xs = pack_input(xs_slice) if len(xs_slice) > 0 else None
carry, y = f(carry, packed_xs)
ys.append(y if y is not None else dummy_y)
stacked_y = tree.map_structure(
lambda *ys: np.stack(ys), *maybe_reversed(ys)
)
return carry, stacked_y
def associative_scan(f, elems, reverse=False, axis=0):
# Ref: jax.lax.associative_scan
if not callable(f):
raise TypeError(f"`f` should be a callable. Received: f={f}")
elems_flat = tree.flatten(elems)
elems_flat = [convert_to_tensor(elem) for elem in elems_flat]
if reverse:
elems_flat = [np.flip(elem, (axis,)) for elem in elems_flat]
def _combine(a_flat, b_flat):
a = tree.pack_sequence_as(elems, a_flat)
b = tree.pack_sequence_as(elems, b_flat)
c = f(a, b)
c_flat = tree.flatten(c)
return c_flat
num_elems = int(elems_flat[0].shape[axis])
if not all(int(elem.shape[axis]) == num_elems for elem in elems_flat[1:]):
raise ValueError(
"Array inputs to associative_scan must have the same "
"first dimension. (saw: {})".format(
[elem.shape for elem in elems_flat]
)
)
def _interleave(a, b, axis):
"""Given two Tensors of static shape, interleave them along axis."""
assert (
a.shape[axis] == b.shape[axis] or a.shape[axis] == b.shape[axis] + 1
)
# we want to get a: [a1, a2], b: [b1, b2]
# to a: [a1, 0, a2, 0], b: [0, b1, 0, b2]
a_shape = list(a.shape)
a_shape[axis] = a.shape[axis] * 2 - 1
b_shape = list(b.shape)
b_shape[axis] = b.shape[axis] * 2 - 1
a_dil = np.zeros(a_shape)
np.copyto(slice_along_axis(a_dil, 0, None, 2, axis), a)
b_dil = np.zeros(b_shape)
np.copyto(slice_along_axis(b_dil, 0, None, 2, axis), b)
a_pad = [[0, 0] for _ in range(a.ndim)]
a_pad[axis][-1] = 1 if a.shape[axis] == b.shape[axis] else 0
b_pad = [[0, 0] for _ in range(b.ndim)]
b_pad[axis] = [1, 0] if a.shape[axis] == b.shape[axis] else [1, 1]
op = np.bitwise_or if a.dtype == np.bool_ else np.add
return op(
np.pad(a_dil, a_pad),
np.pad(b_dil, b_pad),
)
def _scan(elems):
num_elems = elems[0].shape[axis]
if num_elems < 2:
return elems
reduced_elems = _combine(
[
slice_along_axis(elem, 0, -1, step=2, axis=axis)
for elem in elems
],
[
slice_along_axis(elem, 1, None, step=2, axis=axis)
for elem in elems
],
)
odd_elems = _scan(reduced_elems)
if num_elems % 2 == 0:
even_elems = _combine(
[slice_along_axis(e, 0, -1, axis=axis) for e in odd_elems],
[
slice_along_axis(e, 2, None, step=2, axis=axis)
for e in elems
],
)
else:
even_elems = _combine(
odd_elems,
[
slice_along_axis(e, 2, None, step=2, axis=axis)
for e in elems
],
)
even_elems = [
np.concatenate(
[slice_along_axis(elem, 0, 1, axis=axis), result],
axis=axis,
)
for (elem, result) in zip(elems, even_elems)
]
return list(
builtins.map(
functools.partial(_interleave, axis=axis), even_elems, odd_elems
)
)
scans = _scan(elems_flat)
if reverse:
scans = [np.flip(scanned, (axis,)) for scanned in scans]
return tree.pack_sequence_as(elems, scans)
def scatter(indices, values, shape):
indices = convert_to_tensor(indices)
values = convert_to_tensor(values)
zeros = np.zeros(shape, dtype=values.dtype)
index_length = indices.shape[-1]
value_shape = shape[index_length:]
indices = np.reshape(indices, [-1, index_length])
values = np.reshape(values, [-1] + list(value_shape))
for i in range(indices.shape[0]):
index = indices[i]
zeros[tuple(index)] += values[i]
return zeros
def scatter_update(inputs, indices, updates):
indices = np.array(indices)
indices = np.transpose(indices)
inputs[tuple(indices)] = updates
return inputs
def slice(inputs, start_indices, shape):
# Validate inputs
assert len(start_indices) == len(shape)
# Generate list of indices arrays for each dimension
indices = [
np.arange(start, start + length)
for start, length in zip(start_indices, shape)
]
# Use np.ix_ to create a multidimensional index array
mesh = np.ix_(*indices)
return inputs[mesh]
def slice_update(inputs, start_indices, updates):
# Generate list of indices arrays for each dimension
indices = [
np.arange(start, start + length)
for start, length in zip(start_indices, updates.shape)
]
# Use np.ix_ to create a multidimensional index array
mesh = np.ix_(*indices)
inputs[mesh] = updates
return inputs
def switch(index, branches, *operands):
index = convert_to_tensor(index, "int32")
index = np.clip(index, 0, len(branches) - 1)
return branches[index](*operands)
def while_loop(
cond,
body,
loop_vars,
maximum_iterations=None,
):
current_iter = 0
iteration_check = (
lambda iter: maximum_iterations is None or iter < maximum_iterations
)
is_tuple = isinstance(loop_vars, (tuple, list))
loop_vars = tuple(loop_vars) if is_tuple else (loop_vars,)
loop_vars = tree.map_structure(convert_to_tensor, loop_vars)
while cond(*loop_vars) and iteration_check(current_iter):
loop_vars = body(*loop_vars)
if not isinstance(loop_vars, (list, tuple)):
loop_vars = (loop_vars,)
loop_vars = tuple(loop_vars)
current_iter += 1
return loop_vars if is_tuple else loop_vars[0]
def fori_loop(lower, upper, body_fun, init_val):
val = init_val
for i in range(lower, upper):
val = body_fun(i, val)
return val
def stop_gradient(variable):
return variable
def unstack(x, num=None, axis=0):
x = np.moveaxis(x, axis, 0)
return [x[i] for i in range(x.shape[0])]
def random_seed_dtype():
return "uint32"
class custom_gradient:
"""Decorator for custom gradients.
Args:
fun: Forward pass function.
"""
def __init__(self, fun):
warnings.warn(
"`custom_gradient` for the numpy backend acts as a pass-through to "
"support the forward pass. No gradient computation or modification "
"takes place."
)
self.fun = fun
def __call__(self, *args, **kwargs):
outputs, _ = self.fun(*args, **kwargs)
return outputs
@contextlib.contextmanager
def device_scope(device_name):
yield
def remat(f):
warnings.warn(
"Rematerialization memory optimization is not supported by the "
"Numpy backend. Please switch to JAX, TensorFlow, or PyTorch to "
"utilize this feature."
)
return f
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/numpy/nn.py | keras/src/backend/numpy/nn.py | import jax
import numpy as np
from jax import lax
from keras.src import backend
from keras.src.backend.common.backend_utils import (
compute_adaptive_pooling_window_sizes,
)
from keras.src.backend.common.backend_utils import (
compute_conv_transpose_padding_args_for_jax,
)
from keras.src.backend.numpy.core import cast
from keras.src.backend.numpy.core import convert_to_tensor
from keras.src.backend.numpy.core import is_tensor
from keras.src.utils.module_utils import scipy
def relu(x):
x = convert_to_tensor(x)
return np.maximum(x, np.array(0.0, x.dtype))
def relu6(x):
x = convert_to_tensor(x)
# np.clip incorrectly promote bfloat16 to float32, so we replace it with
# np.minimum and np.maximum here
return np.minimum(
np.maximum(x, np.array(0.0, x.dtype)), np.array(6.0, x.dtype)
)
def sigmoid(x):
x = convert_to_tensor(x)
return np.array(1.0, x.dtype) / (np.array(1.0, x.dtype) + np.exp(-x))
def sparse_sigmoid(x):
x = convert_to_tensor(x)
return np.where(
x <= -1,
np.array(0.0, x.dtype),
np.where(
x >= 1, np.array(1.0, x.dtype), np.array(0.5 * (x + 1), x.dtype)
),
)
def tanh(x):
return np.tanh(x)
def tanh_shrink(x):
x = convert_to_tensor(x)
return x - np.tanh(x)
def softplus(x):
x = convert_to_tensor(x)
return np.logaddexp(x, np.array(0.0, x.dtype))
def softsign(x):
x = convert_to_tensor(x)
return x / (np.array(1.0, x.dtype) + np.abs(x))
def soft_shrink(x, threshold=0.5):
return np.where(
x > threshold,
np.array(x - threshold, dtype=x.dtype),
np.where(
x < -threshold,
np.array(x + threshold, dtype=x.dtype),
np.array(0.0, dtype=x.dtype),
),
)
def sparse_plus(x):
return np.where(
x <= -1,
np.zeros_like(x, dtype=x.dtype),
np.where(x < 1, np.array((1 / 4) * (x + 1) ** 2, dtype=x.dtype), x),
)
def silu(x):
x = convert_to_tensor(x)
return x * sigmoid(x)
def squareplus(x, b=4):
x = convert_to_tensor(x)
b = convert_to_tensor(b, dtype=x.dtype)
y = x + np.sqrt(x**2 + b)
return y / 2
def log_sigmoid(x):
x = convert_to_tensor(x)
return -softplus(-x)
def leaky_relu(x, negative_slope=0.2):
x = convert_to_tensor(x)
return np.maximum(x, np.array(negative_slope, x.dtype) * x)
def hard_sigmoid(x):
# python numbers will be promoted to float64 by np, so it's necessary to
# first convert the python numbers to np scalars
x = x / np.array(6.0, x.dtype) + np.array(0.5, x.dtype)
return np.where(
x <= 0.0,
np.array(0.0, x.dtype),
np.where(x >= 1.0, np.array(1.0, x.dtype), x),
)
def hard_silu(x):
return x * hard_sigmoid(x)
def elu(x, alpha=1.0):
x = convert_to_tensor(x)
return np.where(
x >= np.array(0.0, x.dtype), x, np.array(alpha, x.dtype) * np.expm1(x)
)
def selu(x):
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
x = convert_to_tensor(x)
return np.array(scale, x.dtype) * elu(x, alpha)
def gelu(x, approximate=True):
x = convert_to_tensor(x)
# followed by JAX's implementation
if approximate:
sqrt_2_over_pi = np.sqrt(2 / np.pi).astype(x.dtype)
cdf = np.array(0.5, x.dtype) * (
np.array(1.0, x.dtype)
+ np.tanh(
sqrt_2_over_pi
* (x + np.array(0.044715, x.dtype) * (x**3).astype(x.dtype))
)
)
return x * cdf
else:
sqrt_2 = np.sqrt(2).astype(x.dtype)
return (
x
* (scipy.special.erf(x / sqrt_2) + 1).astype(x.dtype)
/ np.array(2, x.dtype)
)
def celu(x, alpha=1.0):
x = convert_to_tensor(x)
alpha = np.array(alpha, x.dtype)
return np.maximum(x, np.array(0.0, dtype=x.dtype)) + alpha * np.expm1(
np.minimum(x, np.array(0.0, dtype=x.dtype)) / alpha
)
def glu(x, axis=-1):
x = convert_to_tensor(x)
dtype = x.dtype
if x.shape[axis] % 2 != 0:
raise ValueError(
"axis size must be divisible by 2. "
f"Received: x.shape={x.shape} with axis={axis}"
)
x1, x2 = np.split(x, 2, axis)
return (x1 * sigmoid(x2)).astype(dtype)
def hard_tanh(x):
x = convert_to_tensor(x)
min_val = np.asarray(-1.0, x.dtype)
max_val = np.asarray(1.0, x.dtype)
return np.array(np.clip(x, min_val, max_val), dtype=x.dtype)
def hard_shrink(x, threshold=0.5):
x = convert_to_tensor(x)
threshold = np.asarray(threshold, x.dtype)
return np.array(
np.where(np.abs(x) > threshold, x, np.array(0.0, dtype=x.dtype)),
dtype=x.dtype,
)
def threshold(x, threshold, default_value):
x = convert_to_tensor(x)
return np.where(x > threshold, x, np.array(default_value, dtype=x.dtype))
def softmax(x, axis=-1):
exp_x = np.exp(x - np.max(x, axis=axis, keepdims=True))
return exp_x / np.sum(exp_x, axis=axis, keepdims=True)
def log_softmax(x, axis=-1):
max_x = np.max(x, axis=axis, keepdims=True)
logsumexp = np.log(np.exp(x - max_x).sum(axis=axis, keepdims=True))
return x - max_x - logsumexp
def sparsemax(x, axis=-1):
# Sort logits along the specified axis in descending order
logits = convert_to_tensor(x)
logits_sorted = -1.0 * np.sort(-1.0 * logits, axis=axis)
logits_cumsum = np.cumsum(logits_sorted, axis=axis)
r = np.arange(1, logits.shape[axis] + 1)
r_shape = [1] * logits.ndim
r_shape[axis] = -1 # Broadcast to match the target axis
r = r.reshape(r_shape)
support = logits_sorted - (logits_cumsum - 1) / r > 0
# Find the threshold
k = np.sum(support, axis=axis, keepdims=True)
logits_cumsum_safe = np.where(support, logits_cumsum, 0.0)
tau = (np.sum(logits_cumsum_safe, axis=axis, keepdims=True) - 1) / k
output = np.maximum(logits - tau, 0.0)
return output
def _convert_to_spatial_operand(
x,
num_spatial_dims,
data_format="channels_last",
include_batch_and_channels=True,
):
# Helper function that converts an operand to a spatial operand.
x = (x,) * num_spatial_dims if isinstance(x, int) else x
if not include_batch_and_channels:
return x
if data_format == "channels_last":
x = (1,) + x + (1,)
else:
x = (1,) + (1,) + x
return x
def _pool(
inputs,
initial_value,
reduce_fn,
pool_size,
strides=None,
padding="valid",
):
"""Helper function to define pooling functions.
Args:
inputs: input data of shape `N+2`.
initial_value: the initial value for the reduction.
reduce_fn: a reduce function of the form `(T, T) -> T`.
pool_size: a sequence of `N` integers, representing the window size to
reduce over.
strides: a sequence of `N` integers, representing the inter-window
strides (default: `(1, ..., 1)`).
padding: either the string `same` or `valid`.
Returns:
The output of the reduction for each window slice.
"""
if padding not in ("same", "valid"):
raise ValueError(
f"Invalid padding '{padding}', must be 'same' or 'valid'."
)
padding = padding.upper()
return np.array(
lax.reduce_window(
inputs,
initial_value,
reduce_fn,
pool_size,
strides,
padding,
)
)
def max_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
pool_size = _convert_to_spatial_operand(
pool_size, num_spatial_dims, data_format
)
strides = pool_size if strides is None else strides
strides = _convert_to_spatial_operand(
strides, num_spatial_dims, data_format
)
return _pool(inputs, -np.inf, lax.max, pool_size, strides, padding)
def average_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
pool_size = _convert_to_spatial_operand(
pool_size, num_spatial_dims, data_format
)
strides = pool_size if strides is None else strides
strides = _convert_to_spatial_operand(
strides, num_spatial_dims, data_format
)
pooled = _pool(inputs, 0.0, lax.add, pool_size, strides, padding)
if padding == "valid":
# Avoid the extra reduce_window.
return pooled / np.prod(pool_size)
else:
# Count the number of valid entries at each input point, then use that
# for computing average. Assumes that any two arrays of same shape will
# be padded the same. Avoid broadcasting on axis where pooling is
# skipped.
shape = [
(a if b != 1 else 1) for (a, b) in zip(inputs.shape, pool_size)
]
window_counts = _pool(
np.ones(shape, inputs.dtype),
0.0,
lax.add,
pool_size,
strides,
padding,
)
return pooled / window_counts
def _compute_adaptive_pooling_gather_indices(
input_dim, output_size, big_window
):
window_starts = np.floor(
(np.arange(output_size) * input_dim) / output_size
).astype(np.int32)
window_ends = np.ceil(
(np.arange(1, output_size + 1) * input_dim) / output_size
).astype(np.int32)
window_sizes = window_ends - window_starts
is_big = window_sizes == big_window
small_window = big_window - 1
small_pool_len = input_dim - small_window + 1
small_indices = window_starts
big_indices = window_starts + small_pool_len
gather = np.where(is_big, big_indices, small_indices)
return gather.astype(np.int32)
def _strided_view_1d(x, window_size):
n, l, c = x.shape
out = l - window_size + 1
strides = x.strides
shape = (n, out, window_size, c)
new_strides = (strides[0], strides[1], strides[1], strides[2])
return np.lib.stride_tricks.as_strided(x, shape=shape, strides=new_strides)
def _adaptive_pool1d_impl(inputs, output_size, mode, data_format):
if isinstance(output_size, int):
output_size = (output_size,)
if data_format == "channels_first":
inputs = np.transpose(inputs, (0, 2, 1))
n, l, c = inputs.shape
out_l = output_size[0]
small, big = compute_adaptive_pooling_window_sizes(l, out_l)
gather = _compute_adaptive_pooling_gather_indices(l, out_l, big)
sv_small = _strided_view_1d(inputs, small)
small_pool = (
np.mean(sv_small, axis=2)
if mode == "average"
else np.max(sv_small, axis=2)
)
sv_big = _strided_view_1d(inputs, big)
big_pool = (
np.mean(sv_big, axis=2) if mode == "average" else np.max(sv_big, axis=2)
)
combined = np.concatenate([small_pool, big_pool], axis=1)
out = combined[:, gather, :]
if data_format == "channels_first":
out = np.transpose(out, (0, 2, 1))
return out
def _adaptive_pool2d_impl(inputs, output_size, mode, data_format):
if isinstance(output_size, int):
output_size = (output_size, output_size)
if data_format == "channels_first":
inputs = np.transpose(inputs, (0, 2, 3, 1))
n, h, w, c = inputs.shape
out_h, out_w = output_size
small_h, big_h = compute_adaptive_pooling_window_sizes(h, out_h)
gather_h = _compute_adaptive_pooling_gather_indices(h, out_h, big_h)
x_h = np.transpose(inputs, (0, 2, 1, 3)).reshape(n * w, h, c)
sv_small_h = _strided_view_1d(x_h, small_h)
small_pool_h = (
np.mean(sv_small_h, axis=2)
if mode == "average"
else np.max(sv_small_h, axis=2)
)
sv_big_h = _strided_view_1d(x_h, big_h)
big_pool_h = (
np.mean(sv_big_h, axis=2)
if mode == "average"
else np.max(sv_big_h, axis=2)
)
combined_h = np.concatenate([small_pool_h, big_pool_h], axis=1)
pooled_h = combined_h[:, gather_h, :]
pooled_h = pooled_h.reshape(n, w, out_h, c)
pooled_h = np.transpose(pooled_h, (0, 2, 1, 3))
small_w, big_w = compute_adaptive_pooling_window_sizes(w, out_w)
gather_w = _compute_adaptive_pooling_gather_indices(w, out_w, big_w)
x_w = pooled_h.reshape(n * out_h, w, c)
sv_small_w = _strided_view_1d(x_w, small_w)
small_pool_w = (
np.mean(sv_small_w, axis=2)
if mode == "average"
else np.max(sv_small_w, axis=2)
)
sv_big_w = _strided_view_1d(x_w, big_w)
big_pool_w = (
np.mean(sv_big_w, axis=2)
if mode == "average"
else np.max(sv_big_w, axis=2)
)
combined_w = np.concatenate([small_pool_w, big_pool_w], axis=1)
out = combined_w[:, gather_w, :].reshape(n, out_h, out_w, c)
if data_format == "channels_first":
out = np.transpose(out, (0, 3, 1, 2))
return out
def _adaptive_pool3d_impl(inputs, output_size, mode, data_format):
if isinstance(output_size, int):
output_size = (output_size, output_size, output_size)
if data_format == "channels_first":
inputs = np.transpose(inputs, (0, 2, 3, 4, 1))
n, d, h, w, c = inputs.shape
out_d, out_h, out_w = output_size
small_d, big_d = compute_adaptive_pooling_window_sizes(d, out_d)
gather_d = _compute_adaptive_pooling_gather_indices(d, out_d, big_d)
x_d = np.transpose(inputs, (0, 2, 3, 1, 4)).reshape(n * h * w, d, c)
sv_small_d = _strided_view_1d(x_d, small_d)
small_pool_d = (
np.mean(sv_small_d, axis=2)
if mode == "average"
else np.max(sv_small_d, axis=2)
)
sv_big_d = _strided_view_1d(x_d, big_d)
big_pool_d = (
np.mean(sv_big_d, axis=2)
if mode == "average"
else np.max(sv_big_d, axis=2)
)
combined_d = np.concatenate([small_pool_d, big_pool_d], axis=1)
pooled_d = combined_d[:, gather_d, :].reshape(n, h, w, out_d, c)
pooled_d = np.transpose(pooled_d, (0, 3, 1, 2, 4))
small_h, big_h = compute_adaptive_pooling_window_sizes(h, out_h)
gather_h = _compute_adaptive_pooling_gather_indices(h, out_h, big_h)
x_h = np.transpose(pooled_d, (0, 1, 3, 2, 4)).reshape(n * out_d * w, h, c)
sv_small_h = _strided_view_1d(x_h, small_h)
small_pool_h = (
np.mean(sv_small_h, axis=2)
if mode == "average"
else np.max(sv_small_h, axis=2)
)
sv_big_h = _strided_view_1d(x_h, big_h)
big_pool_h = (
np.mean(sv_big_h, axis=2)
if mode == "average"
else np.max(sv_big_h, axis=2)
)
combined_h = np.concatenate([small_pool_h, big_pool_h], axis=1)
pooled_h = combined_h[:, gather_h, :].reshape(n, out_d, w, out_h, c)
pooled_h = np.transpose(pooled_h, (0, 1, 3, 2, 4))
small_w, big_w = compute_adaptive_pooling_window_sizes(w, out_w)
gather_w = _compute_adaptive_pooling_gather_indices(w, out_w, big_w)
x_w = pooled_h.reshape(n * out_d * out_h, w, c)
sv_small_w = _strided_view_1d(x_w, small_w)
small_pool_w = (
np.mean(sv_small_w, axis=2)
if mode == "average"
else np.max(sv_small_w, axis=2)
)
sv_big_w = _strided_view_1d(x_w, big_w)
big_pool_w = (
np.mean(sv_big_w, axis=2)
if mode == "average"
else np.max(sv_big_w, axis=2)
)
combined_w = np.concatenate([small_pool_w, big_pool_w], axis=1)
out = combined_w[:, gather_w, :].reshape(n, out_d, out_h, out_w, c)
if data_format == "channels_first":
out = np.transpose(out, (0, 4, 1, 2, 3))
return out
def adaptive_average_pool(inputs, output_size, data_format=None):
data_format = backend.standardize_data_format(data_format)
dims = inputs.ndim - 2
if dims == 1:
return _adaptive_pool1d_impl(
inputs, output_size, "average", data_format
)
if dims == 2:
return _adaptive_pool2d_impl(
inputs, output_size, "average", data_format
)
if dims == 3:
return _adaptive_pool3d_impl(
inputs, output_size, "average", data_format
)
raise ValueError("adaptive_average_pool supports only 1D/2D/3D")
def adaptive_max_pool(inputs, output_size, data_format=None):
data_format = backend.standardize_data_format(data_format)
dims = inputs.ndim - 2
if dims == 1:
return _adaptive_pool1d_impl(inputs, output_size, "max", data_format)
if dims == 2:
return _adaptive_pool2d_impl(inputs, output_size, "max", data_format)
if dims == 3:
return _adaptive_pool3d_impl(inputs, output_size, "max", data_format)
raise ValueError("adaptive_max_pool supports only 1D/2D/3D")
def _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format="channels_last",
transpose=False,
):
"""Create a `lax.ConvDimensionNumbers` for the given inputs."""
num_dims = num_spatial_dims + 2
if data_format == "channels_last":
spatial_dims = tuple(range(1, num_dims - 1))
inputs_dn = (0, num_dims - 1) + spatial_dims
else:
spatial_dims = tuple(range(2, num_dims))
inputs_dn = (0, 1) + spatial_dims
if transpose:
kernel_dn = (num_dims - 2, num_dims - 1) + tuple(range(num_dims - 2))
else:
kernel_dn = (num_dims - 1, num_dims - 2) + tuple(range(num_dims - 2))
return lax.ConvDimensionNumbers(
lhs_spec=inputs_dn, rhs_spec=kernel_dn, out_spec=inputs_dn
)
def conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format,
transpose=False,
)
strides = _convert_to_spatial_operand(
strides,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
dilation_rate = _convert_to_spatial_operand(
dilation_rate,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
if data_format == "channels_last":
channels = inputs.shape[-1]
else:
channels = inputs.shape[1]
kernel_in_channels = kernel.shape[-2]
if channels % kernel_in_channels > 0:
raise ValueError(
"The number of input channels must be evenly divisible by "
f"kernel's in_channels. Received input channels {channels} and "
f"kernel in_channels {kernel_in_channels}. "
)
feature_group_count = channels // kernel_in_channels
result = np.array(
jax.lax.conv_general_dilated(
inputs,
kernel if is_tensor(kernel) else kernel.numpy(),
strides,
padding,
rhs_dilation=dilation_rate,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
)
)
if result.size == 0:
raise ValueError(
"The convolution operation resulted in an empty output. "
"This can happen if the input is too small for the given "
"kernel size, strides, dilation rate, and padding mode. "
"Please check the input shape and convolution parameters."
)
return result
def depthwise_conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format,
transpose=False,
)
strides = _convert_to_spatial_operand(
strides,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
dilation_rate = _convert_to_spatial_operand(
dilation_rate,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
feature_group_count = (
inputs.shape[-1] if data_format == "channels_last" else inputs.shape[1]
)
kernel = np.reshape(
kernel if is_tensor(kernel) else kernel.numpy(),
kernel.shape[:-2] + (1, feature_group_count * kernel.shape[-1]),
)
return np.array(
jax.lax.conv_general_dilated(
inputs,
kernel,
strides,
padding,
rhs_dilation=dilation_rate,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
)
)
def separable_conv(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = backend.standardize_data_format(data_format)
depthwise_conv_output = depthwise_conv(
inputs,
depthwise_kernel,
strides,
padding,
data_format,
dilation_rate,
)
return conv(
depthwise_conv_output,
pointwise_kernel,
strides=1,
padding="valid",
data_format=data_format,
dilation_rate=dilation_rate,
)
def conv_transpose(
inputs,
kernel,
strides=1,
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=1,
):
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
padding_values = compute_conv_transpose_padding_args_for_jax(
input_shape=inputs.shape,
kernel_shape=kernel.shape,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=dilation_rate,
)
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format,
transpose=False,
)
strides = _convert_to_spatial_operand(
strides,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
dilation_rate = _convert_to_spatial_operand(
dilation_rate,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
return np.array(
jax.lax.conv_transpose(
inputs,
kernel if is_tensor(kernel) else kernel.numpy(),
strides,
padding=padding_values,
rhs_dilation=dilation_rate,
dimension_numbers=dimension_numbers,
transpose_kernel=True,
)
)
def one_hot(x, num_classes, axis=-1, dtype=None, sparse=False):
if sparse:
raise ValueError("Unsupported value `sparse=True` with numpy backend")
if dtype is None:
dtype = "float32"
x = convert_to_tensor(x)
input_shape = x.shape
x = x.reshape(-1)
if not num_classes:
num_classes = np.max(x) + 1
batch_size = x.shape[0]
categorical = np.zeros((batch_size, num_classes), dtype=dtype)
valid_indices = x >= 0
categorical[np.arange(batch_size)[valid_indices], x[valid_indices]] = 1
# First, reshape the array with the extra dimension at the end
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
# Then, move this new dimension to the right place (according to axis)
if axis != -1:
categorical = np.moveaxis(categorical, -1, axis)
return categorical
def multi_hot(x, num_classes, axis=-1, dtype=None, sparse=False):
if sparse:
raise ValueError("Unsupported value `sparse=True` with numpy backend")
x = convert_to_tensor(x)
reduction_axis = 1 if len(x.shape) > 1 else 0
outputs = np.max(
one_hot(cast(x, "int32"), num_classes, axis=axis, dtype=dtype),
axis=reduction_axis,
)
return outputs
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
target = np.array(target)
output = np.array(output)
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if len(target.shape) < 1:
raise ValueError(
"Arguments `target` and `output` must be at least rank 1. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_prob = log_softmax(output, axis=axis)
else:
output = output / np.sum(output, axis, keepdims=True)
output = np.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
log_prob = np.log(output)
return -np.sum(target * log_prob, axis=axis)
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
target = np.array(target, dtype="int32")
output = np.array(output)
if len(target.shape) == len(output.shape) and target.shape[-1] == 1:
target = np.squeeze(target, axis=-1)
if len(output.shape) < 1:
raise ValueError(
"Argument `output` must be at least rank 1. "
"Received: "
f"output.shape={output.shape}"
)
if target.shape != output.shape[:-1]:
raise ValueError(
"Arguments `target` and `output` must have the same shape "
"up until the last dimension: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_prob = log_softmax(output, axis=axis)
else:
output = output / np.sum(output, axis, keepdims=True)
output = np.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
log_prob = np.log(output)
target = one_hot(target, output.shape[axis], axis=axis)
return -np.sum(target * log_prob, axis=axis)
def binary_crossentropy(target, output, from_logits=False):
target = np.array(target)
output = np.array(output)
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
output = sigmoid(output)
output = np.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
bce = target * np.log(output)
bce += (1.0 - target) * np.log(1.0 - output)
return -bce
def moments(x, axes, keepdims=False, synchronized=False):
if synchronized:
raise NotImplementedError(
"Argument synchronized=True is not supported with NumPy."
)
axes = tuple(axes) if isinstance(axes, list) else axes
# The dynamic range of float16 is too limited for statistics. As a
# workaround, we simply perform the operations on float32 and convert back
# to float16
need_cast = False
ori_dtype = backend.standardize_dtype(x.dtype)
if ori_dtype == "float16":
need_cast = True
x = cast(x, "float32")
mean = np.mean(x, axes, keepdims=True)
# The variance is computed using $Var = E[|x|^2] - |E[x]|^2$, It is faster
# but less numerically stable.
variance = np.mean(np.square(x), axis=axes, keepdims=True) - np.square(mean)
if not keepdims:
mean = np.squeeze(mean, axes)
variance = np.squeeze(variance, axes)
if need_cast:
# avoid overflow and underflow when casting from float16 to float32
mean = np.clip(mean, np.finfo(np.float16).min, np.finfo(np.float16).max)
variance = np.clip(
variance, np.finfo(np.float16).min, np.finfo(np.float16).max
)
mean = cast(mean, ori_dtype)
variance = cast(variance, ori_dtype)
return mean, variance
def batch_normalization(
x, mean, variance, axis, offset=None, scale=None, epsilon=1e-3
):
shape = [1] * len(x.shape)
shape[axis] = mean.shape[0]
mean = np.reshape(mean, shape)
variance = np.reshape(variance, shape)
inv = 1.0 / np.sqrt(variance + epsilon)
if scale is not None:
scale = np.reshape(scale, shape)
inv = inv * scale
res = -mean * inv
if offset is not None:
offset = np.reshape(offset, shape)
res = res + offset
return x * inv + res
def ctc_loss(target, output, target_length, output_length, mask_index=0):
# Ref: https://github.com/google-deepmind/optax
# optax.ctc_loss_with_forward_probs
target = convert_to_tensor(target, dtype="int32")
output = convert_to_tensor(output)
target_length = convert_to_tensor(target_length, "int32")
output_length = convert_to_tensor(output_length, "int32")
batch_size, max_input_length, num_classes = output.shape
batch_size, max_label_length = target.shape
log_epsilon = -1e5
# Ensure that the dtype promotion behavior matches that of `tf.nn.ctc_loss`
dtype = backend.result_type(output.dtype, "float32")
output = output.astype(dtype)
def _lengths_to_paddings(lengths, max_length):
indices = np.arange(max_length).reshape(
(1,) * lengths.ndim + (max_length,)
)
lengths = np.expand_dims(lengths, axis=-1)
elem_valid = indices < lengths
return np.logical_not(elem_valid)
target_paddings = _lengths_to_paddings(target_length, max_label_length)
output_paddings = _lengths_to_paddings(output_length, max_input_length)
target_paddings = target_paddings.astype(output.dtype)
output_paddings = output_paddings.astype(output.dtype)
logprobs = log_softmax(output, axis=-1)
label_lengths = max_label_length - np.sum(target_paddings, axis=1).astype(
np.int32
)
# repeat[b, n] == 1.0 when label[b, n] == label[b, n+1].
repeat = (target[:, :-1] == target[:, 1:]).astype(np.float32)
repeat = np.pad(repeat, ((0, 0), (0, 1)))
logprobs_phi = logprobs[:, :, mask_index : mask_index + 1] # [B, T, 1]
logprobs_phi = np.transpose(logprobs_phi, (1, 0, 2)) # [T, B, 1]
_one_hot = one_hot(target, num_classes=num_classes) # [B, N, K]
logprobs_emit = np.einsum("btk,bnk->btn", logprobs, _one_hot)
logprobs_emit = np.transpose(logprobs_emit, (1, 0, 2)) # [T, B, N]
# [B, N]
logalpha_phi_init = (
np.ones((batch_size, max_label_length + 1), dtype=output.dtype)
* log_epsilon
)
logalpha_phi_init[:, 0] = 0.0
logalpha_emit_init = (
np.ones((batch_size, max_label_length), dtype=output.dtype)
* log_epsilon
)
def update_phi_score(phi, added_score):
# Update `phi[:, 1:]`` with adding `added_score` in log space.
return np.concatenate(
[phi[:, :1], np.logaddexp(phi[:, 1:], added_score)], axis=-1
)
def loop_body(prev, x):
prev_phi, prev_emit = prev
# emit-to-phi epsilon transition, except if the next label is repetition
prev_phi_orig = prev_phi
prev_phi = update_phi_score(prev_phi, prev_emit + log_epsilon * repeat)
logprob_emit, logprob_phi, pad = x
# phi-to-emit transition
next_emit = np.logaddexp(
prev_phi[:, :-1] + logprob_emit, prev_emit + logprob_emit
)
# self-loop transition
next_phi = prev_phi + logprob_phi
# emit-to-phi blank transition only when the next label is repetition
next_phi = update_phi_score(
next_phi, prev_emit + logprob_phi + log_epsilon * (1.0 - repeat)
)
pad = pad.reshape((batch_size, 1))
next_emit = pad * prev_emit + (1.0 - pad) * next_emit
next_phi = pad * prev_phi_orig + (1.0 - pad) * next_phi
return (next_phi, next_emit), (next_phi, next_emit)
def np_scan(f, init, xs):
carry = init
ys = []
for x in zip(*xs):
carry, y = f(carry, x)
ys.append(y)
result = []
for i in range(len(ys[0])):
result.append(np.stack([y[i] for y in ys]))
return carry, result
xs = (logprobs_emit, logprobs_phi, output_paddings.transpose((1, 0)))
_, (logalpha_phi, logalpha_emit) = np_scan(
loop_body, (logalpha_phi_init, logalpha_emit_init), xs
)
# last row needs to be updated with the last epsilon transition
logalpha_phi_last = update_phi_score(logalpha_phi[-1], logalpha_emit[-1])
logalpha_phi[-1] = logalpha_phi_last
# extract per_seq_loss
# [B, N+1]
_one_hot = one_hot(label_lengths, num_classes=max_label_length + 1)
per_seq_loss = -np.einsum("bn,bn->b", logalpha_phi_last, _one_hot)
return per_seq_loss
def _ctc_greedy_decode(
inputs,
sequence_lengths,
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/numpy/linalg.py | keras/src/backend/numpy/linalg.py | import numpy as np
import scipy.linalg as sl
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.numpy.core import convert_to_tensor
def cholesky(a, upper=False):
return np.linalg.cholesky(a, upper=upper)
def cholesky_inverse(a, upper=False):
identity = np.eye(a.shape[-1], dtype=a.dtype)
inv_chol = solve_triangular(a, identity, lower=not upper)
if upper:
a_inv = np.matmul(inv_chol, inv_chol.T)
else:
a_inv = np.matmul(inv_chol.T, inv_chol)
return a_inv
def det(a):
return np.linalg.det(a)
def eig(a):
return np.linalg.eig(a)
def eigh(a):
return np.linalg.eigh(a)
def inv(a):
return np.linalg.inv(a)
def lu_factor(a):
if a.ndim == 2:
return sl.lu_factor(a)
m, n = a.shape[-2:]
signature = "(m,n) -> (m,n), "
signature += "(m)" if m <= n else "(n)"
_lu_factor_gufunc = np.vectorize(
sl.lu_factor,
signature=signature,
)
return _lu_factor_gufunc(a)
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
dtype = standardize_dtype(x.dtype)
if "int" in dtype or dtype == "bool":
dtype = dtypes.result_type(x.dtype, "float32")
return np.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims).astype(
dtype
)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return np.linalg.qr(x, mode=mode)
def solve(a, b):
return np.linalg.solve(a, b)
def solve_triangular(a, b, lower=False):
if a.ndim == 2:
return sl.solve_triangular(a, b, lower=lower)
_vectorized_solve_triangular = np.vectorize(
lambda a, b: sl.solve_triangular(a, b, lower=lower),
signature="(n,n),(n,m)->(n,m)",
)
if b.ndim == a.ndim - 1:
b = np.expand_dims(b, axis=-1)
return _vectorized_solve_triangular(a, b).squeeze(axis=-1)
return _vectorized_solve_triangular(a, b)
def svd(x, full_matrices=True, compute_uv=True):
return np.linalg.svd(x, full_matrices=full_matrices, compute_uv=compute_uv)
def lstsq(a, b, rcond=None):
a = convert_to_tensor(a)
b = convert_to_tensor(b)
return np.linalg.lstsq(a, b, rcond=rcond)[0]
def jvp(fun, primals, tangents, has_aux=False):
raise NotImplementedError("JVP is not supported by the Numpy backend.")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/numpy/numpy.py | keras/src/backend/numpy/numpy.py | import numpy as np
from keras.src import tree
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.common.backend_utils import standardize_axis_for_numpy
from keras.src.backend.numpy.core import convert_to_tensor
def rot90(array, k=1, axes=(0, 1)):
"""Rotate an array by 90 degrees in the specified plane."""
if array.ndim < 2:
raise ValueError(
"Input array must have at least 2 dimensions. "
f"Received: array.ndim={array.ndim}"
)
if len(axes) != 2 or axes[0] == axes[1]:
raise ValueError(
f"Invalid axes: {axes}. Axes must be a tuple "
"of two different dimensions."
)
return np.rot90(array, k=k, axes=axes)
def add(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return np.add(x1, x2)
def einsum(subscripts, *operands, **kwargs):
operands = tree.map_structure(convert_to_tensor, operands)
dtypes_to_resolve = list(set(standardize_dtype(x.dtype) for x in operands))
# When operands are of int8, we cast the result to int32 to align with
# the behavior of jax.
if len(dtypes_to_resolve) == 1 and dtypes_to_resolve[0] == "int8":
compute_dtype = "int32" # prevent overflow
result_dtype = "int32"
else:
result_dtype = dtypes.result_type(*dtypes_to_resolve)
compute_dtype = result_dtype
# TODO: np.einsum doesn't support bfloat16
if compute_dtype == "bfloat16":
compute_dtype = "float32"
operands = tree.map_structure(lambda x: x.astype(compute_dtype), operands)
return np.einsum(subscripts, *operands, **kwargs).astype(result_dtype)
def subtract(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return np.subtract(x1, x2)
def matmul(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
# When both x1 and x2 are of int8, we cast the outputs to int32 to align
# with jax
x1_dtype = standardize_dtype(x1.dtype)
x2_dtype = standardize_dtype(x2.dtype)
if x1_dtype == "int8" and x2_dtype == "int8":
dtype = "int32"
else:
dtype = dtypes.result_type(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.matmul(x1, x2).astype(dtype)
def multiply(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return np.multiply(x1, x2)
def mean(x, axis=None, keepdims=False):
axis = standardize_axis_for_numpy(axis)
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
if "int" in ori_dtype or ori_dtype == "bool":
result_dtype = dtypes.result_type(x.dtype, "float32")
else:
result_dtype = ori_dtype
return np.mean(x, axis=axis, keepdims=keepdims).astype(result_dtype)
def max(x, axis=None, keepdims=False, initial=None):
axis = standardize_axis_for_numpy(axis)
return np.max(x, axis=axis, keepdims=keepdims, initial=initial)
def ones(shape, dtype=None):
dtype = dtype or config.floatx()
return np.ones(shape, dtype=dtype)
def zeros(shape, dtype=None):
dtype = dtype or config.floatx()
return np.zeros(shape, dtype=dtype)
def absolute(x):
return np.absolute(x)
def abs(x):
return absolute(x)
def all(x, axis=None, keepdims=False):
axis = standardize_axis_for_numpy(axis)
return np.all(x, axis=axis, keepdims=keepdims)
def angle(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.angle(x)
def any(x, axis=None, keepdims=False):
axis = standardize_axis_for_numpy(axis)
return np.any(x, axis=axis, keepdims=keepdims)
def amax(x, axis=None, keepdims=False):
axis = standardize_axis_for_numpy(axis)
return np.amax(x, axis=axis, keepdims=keepdims)
def amin(x, axis=None, keepdims=False):
axis = standardize_axis_for_numpy(axis)
return np.amin(x, axis=axis, keepdims=keepdims)
def append(x1, x2, axis=None):
axis = standardize_axis_for_numpy(axis)
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.append(x1, x2, axis=axis)
def arange(start, stop=None, step=None, dtype=None):
if dtype is None:
dtypes_to_resolve = [getattr(start, "dtype", type(start))]
if stop is not None:
dtypes_to_resolve.append(getattr(stop, "dtype", type(stop)))
if step is not None:
dtypes_to_resolve.append(getattr(step, "dtype", type(step)))
dtype = dtypes.result_type(*dtypes_to_resolve)
if stop is None:
start, stop = 0, start
if step is None:
step = 1
return np.arange(start, stop, step=step, dtype=dtype)
def arccos(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.arccos(x)
def arccosh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.arccosh(x)
def arcsin(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.arcsin(x)
def arcsinh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.arcsinh(x)
def arctan(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.arctan(x)
def arctan2(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.arctan2(x1, x2)
def arctanh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.arctanh(x)
def argmax(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
axis = standardize_axis_for_numpy(axis)
dtype = standardize_dtype(x.dtype)
if "float" not in dtype or x.ndim == 0:
return np.argmax(x, axis=axis, keepdims=keepdims).astype("int32")
dtype = dtypes.result_type(dtype, "float32")
x = x.astype(dtype)
is_negative_zero = (x == 0.0) & np.signbit(x)
x = np.where(is_negative_zero, -np.finfo(x.dtype).tiny, x)
return np.argmax(x, axis=axis, keepdims=keepdims).astype("int32")
def argmin(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
axis = standardize_axis_for_numpy(axis)
dtype = standardize_dtype(x.dtype)
if "float" not in dtype or x.ndim == 0:
return np.argmin(x, axis=axis, keepdims=keepdims).astype("int32")
dtype = dtypes.result_type(dtype, "float32")
x = x.astype(dtype)
is_negative_zero = (x == 0.0) & np.signbit(x)
x = np.where(is_negative_zero, -np.finfo(x.dtype).tiny, x)
return np.argmin(x, axis=axis, keepdims=keepdims).astype("int32")
def argsort(x, axis=-1):
axis = standardize_axis_for_numpy(axis)
return np.argsort(x, axis=axis).astype("int32")
def array(x, dtype=None):
return convert_to_tensor(x, dtype=dtype)
def view(x, dtype=None):
x = convert_to_tensor(x)
return x.view(dtype=dtype)
def average(x, axis=None, weights=None):
axis = standardize_axis_for_numpy(axis)
x = convert_to_tensor(x)
dtypes_to_resolve = [x.dtype, float]
if weights is not None:
weights = convert_to_tensor(weights)
dtypes_to_resolve.append(weights.dtype)
dtype = dtypes.result_type(*dtypes_to_resolve)
x = x.astype(dtype)
if weights is not None:
weights = weights.astype(dtype)
return np.average(x, weights=weights, axis=axis)
def bartlett(x):
x = convert_to_tensor(x)
return np.bartlett(x).astype(config.floatx())
def hamming(x):
x = convert_to_tensor(x)
return np.hamming(x).astype(config.floatx())
def hanning(x):
x = convert_to_tensor(x)
return np.hanning(x).astype(config.floatx())
def heaviside(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
if dtype in ["int8", "int16", "int32", "uint8", "uint16", "uint32"]:
dtype = config.floatx()
elif dtype in ["int64"]:
dtype = "float64"
return np.heaviside(x1, x2).astype(dtype)
def kaiser(x, beta):
x = convert_to_tensor(x)
return np.kaiser(x, beta).astype(config.floatx())
def bincount(x, weights=None, minlength=0, sparse=False):
if sparse:
raise ValueError("Unsupported value `sparse=True` with numpy backend")
x = convert_to_tensor(x)
dtypes_to_resolve = [x.dtype]
if weights is not None:
weights = convert_to_tensor(weights)
dtypes_to_resolve.append(weights.dtype)
dtype = dtypes.result_type(*dtypes_to_resolve)
else:
dtype = "int32"
if len(x.shape) == 2:
if weights is None:
def bincount_fn(arr):
return np.bincount(arr, minlength=minlength)
bincounts = list(map(bincount_fn, x))
else:
def bincount_fn(arr_w):
return np.bincount(
arr_w[0], weights=arr_w[1], minlength=minlength
)
bincounts = list(map(bincount_fn, zip(x, weights)))
return np.stack(bincounts).astype(dtype)
return np.bincount(x, weights, minlength).astype(dtype)
def bitwise_and(x, y):
x = convert_to_tensor(x)
y = convert_to_tensor(y)
dtype = dtypes.result_type(x.dtype, y.dtype)
x = x.astype(dtype)
y = y.astype(dtype)
return np.bitwise_and(x, y)
def bitwise_invert(x):
x = convert_to_tensor(x)
return np.bitwise_not(x)
def bitwise_not(x):
return bitwise_invert(x)
def bitwise_or(x, y):
x = convert_to_tensor(x)
y = convert_to_tensor(y)
dtype = dtypes.result_type(x.dtype, y.dtype)
x = x.astype(dtype)
y = y.astype(dtype)
return np.bitwise_or(x, y)
def bitwise_xor(x, y):
x = convert_to_tensor(x)
y = convert_to_tensor(y)
dtype = dtypes.result_type(x.dtype, y.dtype)
x = x.astype(dtype)
y = y.astype(dtype)
return np.bitwise_xor(x, y)
def bitwise_left_shift(x, y):
x = convert_to_tensor(x)
if not isinstance(y, int):
y = convert_to_tensor(y)
dtype = dtypes.result_type(x.dtype, y.dtype)
x = x.astype(dtype)
y = y.astype(dtype)
return np.left_shift(x, y)
def left_shift(x, y):
return bitwise_left_shift(x, y)
def bitwise_right_shift(x, y):
x = convert_to_tensor(x)
if not isinstance(y, int):
y = convert_to_tensor(y)
dtype = dtypes.result_type(x.dtype, y.dtype)
x = x.astype(dtype)
y = y.astype(dtype)
return np.right_shift(x, y)
def right_shift(x, y):
return bitwise_right_shift(x, y)
def blackman(x):
x = convert_to_tensor(x)
return np.blackman(x).astype(config.floatx())
def broadcast_to(x, shape):
return np.broadcast_to(x, shape)
def cbrt(x):
x = convert_to_tensor(x)
dtype = standardize_dtype(x.dtype)
if dtype in ["bool", "int8", "int16", "int32", "uint8", "uint16", "uint32"]:
dtype = config.floatx()
elif dtype == "int64":
dtype = "float64"
return np.cbrt(x).astype(dtype)
def ceil(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.ceil(x)
def clip(x, x_min, x_max):
x = convert_to_tensor(x)
dtype = standardize_dtype(x.dtype)
if dtype == "bool":
dtype = "int32"
return np.clip(x, x_min, x_max).astype(dtype)
def concatenate(xs, axis=0):
axis = standardize_axis_for_numpy(axis)
dtype_set = set([getattr(x, "dtype", type(x)) for x in xs])
if len(dtype_set) > 1:
dtype = dtypes.result_type(*dtype_set)
xs = tree.map_structure(
lambda x: convert_to_tensor(x).astype(dtype), xs
)
return np.concatenate(xs, axis=axis)
def conjugate(x):
return np.conjugate(x)
def conj(x):
return conjugate(x)
def copy(x):
return np.copy(x)
def cos(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.cos(x)
def cosh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.cosh(x)
def count_nonzero(x, axis=None):
axis = standardize_axis_for_numpy(axis)
# np.count_nonzero will return python int when axis=None, so we need
# to convert_to_tensor
return convert_to_tensor(np.count_nonzero(x, axis=axis)).astype("int32")
def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None):
axis = standardize_axis_for_numpy(axis)
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.cross(
x1,
x2,
axisa=axisa,
axisb=axisb,
axisc=axisc,
axis=axis,
)
def cumprod(x, axis=None, dtype=None):
axis = standardize_axis_for_numpy(axis)
dtype = dtypes.result_type(dtype or x.dtype)
if dtype == "bool":
dtype = "int32"
return np.cumprod(x, axis=axis, dtype=dtype)
def cumsum(x, axis=None, dtype=None):
axis = standardize_axis_for_numpy(axis)
dtype = dtypes.result_type(dtype or x.dtype)
if dtype == "bool":
dtype = "int32"
return np.cumsum(x, axis=axis, dtype=dtype)
def deg2rad(x):
x = convert_to_tensor(x)
if x.dtype in ["int64", "float64"]:
dtype = "float64"
elif x.dtype in ["bfloat16", "float16"]:
dtype = x.dtype
else:
dtype = config.floatx()
return np.deg2rad(x).astype(dtype)
def diag(x, k=0):
return np.diag(x, k=k)
def diagflat(x, k=0):
return np.diagflat(x, k=k)
def diagonal(x, offset=0, axis1=0, axis2=1):
axis1 = standardize_axis_for_numpy(axis1)
axis2 = standardize_axis_for_numpy(axis2)
return np.diagonal(x, offset=offset, axis1=axis1, axis2=axis2)
def diff(a, n=1, axis=-1):
return np.diff(a, n=n, axis=axis)
def digitize(x, bins):
return np.digitize(x, bins).astype(np.int32)
def dot(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.dot(x1, x2)
def empty(shape, dtype=None):
dtype = dtype or config.floatx()
return np.empty(shape, dtype=dtype)
def empty_like(x, dtype=None):
return np.empty_like(x, dtype=dtype)
def equal(x1, x2):
return np.equal(x1, x2)
def exp(x):
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
if "int" in ori_dtype or ori_dtype == "bool":
x = x.astype(config.floatx())
return np.exp(x)
def exp2(x):
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
if "int" in ori_dtype or ori_dtype == "bool":
x = x.astype(config.floatx())
return np.exp2(x)
def expand_dims(x, axis):
axis = standardize_axis_for_numpy(axis)
return np.expand_dims(x, axis)
def expm1(x):
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
if "int" in ori_dtype or ori_dtype == "bool":
x = x.astype(config.floatx())
return np.expm1(x)
def flip(x, axis=None):
axis = standardize_axis_for_numpy(axis)
return np.flip(x, axis=axis)
def floor(x):
x = convert_to_tensor(x)
dtype = (
config.floatx()
if standardize_dtype(x.dtype) == "int64"
else dtypes.result_type(x.dtype, float)
)
x = x.astype(dtype)
return np.floor(x)
def full(shape, fill_value, dtype=None):
dtype = dtype or config.floatx()
return np.full(shape, fill_value, dtype=dtype)
def full_like(x, fill_value, dtype=None):
return np.full_like(x, fill_value, dtype=dtype)
def gcd(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
return np.gcd(x1, x2).astype(dtype)
def greater(x1, x2):
return np.greater(x1, x2)
def greater_equal(x1, x2):
return np.greater_equal(x1, x2)
def hstack(xs):
dtype_set = set([getattr(x, "dtype", type(x)) for x in xs])
if len(dtype_set) > 1:
dtype = dtypes.result_type(*dtype_set)
xs = tree.map_structure(
lambda x: convert_to_tensor(x).astype(dtype), xs
)
return np.hstack(xs)
def hypot(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
if dtype in ["int8", "int16", "int32", "uint8", "uint16", "uint32"]:
dtype = config.floatx()
elif dtype in ["int64"]:
dtype = "float64"
return np.hypot(x1, x2).astype(dtype)
def identity(n, dtype=None):
dtype = dtype or config.floatx()
return np.identity(n, dtype=dtype)
def imag(x):
return np.imag(x)
def isclose(x1, x2, rtol=1e-5, atol=1e-8, equal_nan=False):
return np.isclose(x1, x2, rtol, atol, equal_nan)
def isfinite(x):
return np.isfinite(x)
def isin(x1, x2, assume_unique=False, invert=False):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return np.isin(x1, x2, assume_unique=assume_unique, invert=invert)
def isinf(x):
return np.isinf(x)
def isnan(x):
return np.isnan(x)
def isneginf(x):
x = convert_to_tensor(x)
return np.isneginf(x)
def isposinf(x):
x = convert_to_tensor(x)
return np.isposinf(x)
def isreal(x):
x = convert_to_tensor(x)
return np.isreal(x)
def kron(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
return np.kron(x1, x2).astype(dtype)
def lcm(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
return np.lcm(x1, x2).astype(dtype)
def ldexp(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
if standardize_dtype(x2.dtype) not in dtypes.INT_TYPES:
raise TypeError(
f"ldexp exponent must be an integer type. "
f"Received: x2 dtype={x2.dtype}"
)
return np.ldexp(x1, x2).astype(dtype)
def less(x1, x2):
return np.less(x1, x2)
def less_equal(x1, x2):
return np.less_equal(x1, x2)
def linspace(
start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0
):
axis = standardize_axis_for_numpy(axis)
if dtype is None:
dtypes_to_resolve = [
getattr(start, "dtype", type(start)),
getattr(stop, "dtype", type(stop)),
float,
]
dtype = dtypes.result_type(*dtypes_to_resolve)
return np.linspace(
start,
stop,
num=num,
endpoint=endpoint,
retstep=retstep,
dtype=dtype,
axis=axis,
)
def log(x):
x = convert_to_tensor(x)
dtype = (
config.floatx()
if standardize_dtype(x.dtype) == "int64"
else dtypes.result_type(x.dtype, float)
)
return np.log(x, dtype=dtype)
def log10(x):
x = convert_to_tensor(x)
dtype = (
config.floatx()
if standardize_dtype(x.dtype) == "int64"
else dtypes.result_type(x.dtype, float)
)
return np.log10(x, dtype=dtype)
def log1p(x):
x = convert_to_tensor(x)
dtype = (
config.floatx()
if standardize_dtype(x.dtype) == "int64"
else dtypes.result_type(x.dtype, float)
)
return np.log1p(x, dtype=dtype)
def log2(x):
x = convert_to_tensor(x)
dtype = (
config.floatx()
if standardize_dtype(x.dtype) == "int64"
else dtypes.result_type(x.dtype, float)
)
return np.log2(x, dtype=dtype)
def logaddexp(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.logaddexp(x1, x2)
def logaddexp2(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
return np.logaddexp2(x1, x2).astype(dtype)
def logical_and(x1, x2):
return np.logical_and(x1, x2)
def logical_not(x):
return np.logical_not(x)
def logical_or(x1, x2):
return np.logical_or(x1, x2)
def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0):
if dtype is None:
dtypes_to_resolve = [
getattr(start, "dtype", type(start)),
getattr(stop, "dtype", type(stop)),
float,
]
dtype = dtypes.result_type(*dtypes_to_resolve)
return np.logspace(
start,
stop,
num=num,
endpoint=endpoint,
base=base,
dtype=dtype,
axis=axis,
)
def maximum(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return np.maximum(x1, x2)
def median(x, axis=None, keepdims=False):
dtype = dtypes.result_type(x.dtype, float)
return np.median(x, axis=axis, keepdims=keepdims).astype(dtype)
def meshgrid(*x, indexing="xy"):
return np.meshgrid(*x, indexing=indexing)
def min(x, axis=None, keepdims=False, initial=None):
axis = standardize_axis_for_numpy(axis)
return np.min(x, axis=axis, keepdims=keepdims, initial=initial)
def minimum(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return np.minimum(x1, x2)
def mod(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
if dtype == "bool":
dtype = "int32"
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.mod(x1, x2)
def moveaxis(x, source, destination):
return np.moveaxis(x, source=source, destination=destination)
def nan_to_num(x, nan=0.0, posinf=None, neginf=None):
return np.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf)
def ndim(x):
return np.ndim(x)
def nonzero(x):
return tuple(indices.astype("int32") for indices in np.nonzero(x))
def not_equal(x1, x2):
return np.not_equal(x1, x2)
def zeros_like(x, dtype=None):
return np.zeros_like(x, dtype=dtype)
def ones_like(x, dtype=None):
return np.ones_like(x, dtype=dtype)
def outer(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.outer(x1, x2)
def pad(x, pad_width, mode="constant", constant_values=None):
kwargs = {}
if constant_values is not None:
if mode != "constant":
raise ValueError(
"Argument `constant_values` can only be "
"provided when `mode == 'constant'`. "
f"Received: mode={mode}"
)
kwargs["constant_values"] = constant_values
return np.pad(x, pad_width, mode=mode, **kwargs)
def prod(x, axis=None, keepdims=False, dtype=None):
axis = standardize_axis_for_numpy(axis)
x = convert_to_tensor(x)
if dtype is None:
dtype = dtypes.result_type(x.dtype)
if dtype in ("bool", "int8", "int16"):
dtype = "int32"
elif dtype in ("uint8", "uint16"):
dtype = "uint32"
return np.prod(x, axis=axis, keepdims=keepdims, dtype=dtype)
def quantile(x, q, axis=None, method="linear", keepdims=False):
axis = standardize_axis_for_numpy(axis)
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
# np.quantile doesn't support bool
if ori_dtype == "bool":
x = x.astype(config.floatx())
if ori_dtype == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
return np.quantile(
x, q, axis=axis, method=method, keepdims=keepdims
).astype(dtype)
def ravel(x):
return np.ravel(x)
def unravel_index(indices, shape):
dtype = dtypes.result_type(indices.dtype)
return tuple(
indices.astype(dtype) for indices in np.unravel_index(indices, shape)
)
def real(x):
return np.real(x)
def reciprocal(x):
return np.reciprocal(x)
def repeat(x, repeats, axis=None):
return np.repeat(x, repeats, axis=axis)
def reshape(x, newshape):
return np.reshape(x, newshape)
def roll(x, shift, axis=None):
return np.roll(x, shift, axis=axis)
def searchsorted(sorted_sequence, values, side="left"):
if ndim(sorted_sequence) != 1:
raise ValueError(
"`searchsorted` only supports 1-D sorted sequences. "
"You can use `keras.ops.vectorized_map` "
"to extend it to N-D sequences. Received: "
f"sorted_sequence.shape={sorted_sequence.shape}"
)
out_type = (
"int32"
if sorted_sequence.shape[0] <= np.iinfo(np.int32).max
else "int64"
)
return np.searchsorted(sorted_sequence, values, side=side).astype(out_type)
def sign(x):
return np.sign(x)
def signbit(x):
return np.signbit(x)
def sin(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.sin(x)
def sinh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.sinh(x)
def size(x):
return np.size(x)
def sort(x, axis=-1):
axis = standardize_axis_for_numpy(axis)
return np.sort(x, axis=axis)
def split(x, indices_or_sections, axis=0):
axis = standardize_axis_for_numpy(axis)
return np.split(x, indices_or_sections, axis=axis)
def array_split(x, indices_or_sections, axis=0):
axis = standardize_axis_for_numpy(axis)
return np.array_split(x, indices_or_sections, axis=axis)
def stack(x, axis=0):
axis = standardize_axis_for_numpy(axis)
dtype_set = set([getattr(a, "dtype", type(a)) for a in x])
if len(dtype_set) > 1:
dtype = dtypes.result_type(*dtype_set)
x = tree.map_structure(lambda a: convert_to_tensor(a).astype(dtype), x)
return np.stack(x, axis=axis)
def std(x, axis=None, keepdims=False):
axis = standardize_axis_for_numpy(axis)
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
if "int" in ori_dtype or ori_dtype == "bool":
x = x.astype(config.floatx())
return np.std(x, axis=axis, keepdims=keepdims)
def swapaxes(x, axis1, axis2):
return np.swapaxes(x, axis1=axis1, axis2=axis2)
def take(x, indices, axis=None):
axis = standardize_axis_for_numpy(axis)
return np.take(x, indices, axis=axis)
def take_along_axis(x, indices, axis=None):
axis = standardize_axis_for_numpy(axis)
return np.take_along_axis(x, indices, axis=axis)
def tan(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.tan(x)
def tanh(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = x.astype(dtype)
return np.tanh(x)
def tensordot(x1, x2, axes=2):
axes = tuple(axes) if isinstance(axes, list) else axes
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.tensordot(x1, x2, axes=axes)
def round(x, decimals=0):
return np.round(x, decimals=decimals)
def tile(x, repeats):
return np.tile(x, repeats)
def trace(x, offset=0, axis1=0, axis2=1):
axis1 = standardize_axis_for_numpy(axis1)
axis2 = standardize_axis_for_numpy(axis2)
x = convert_to_tensor(x)
dtype = standardize_dtype(x.dtype)
if dtype in ("bool", "int8", "int16"):
dtype = "int32"
elif dtype in ("uint8", "uint16"):
dtype = "uint32"
return np.trace(x, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)
def tri(N, M=None, k=0, dtype=None):
dtype = dtype or config.floatx()
return np.tri(N, M=M, k=k, dtype=dtype)
def tril(x, k=0):
return np.tril(x, k=k)
def triu(x, k=0):
return np.triu(x, k=k)
def trunc(x):
x = convert_to_tensor(x)
dtype = standardize_dtype(x.dtype)
if "int" in dtype or "bool" == dtype:
return x
return np.trunc(x)
def vdot(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.vdot(x1, x2)
def inner(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.inner(x1, x2)
def vstack(xs):
dtype_set = set([getattr(x, "dtype", type(x)) for x in xs])
if len(dtype_set) > 1:
dtype = dtypes.result_type(*dtype_set)
xs = tree.map_structure(
lambda x: convert_to_tensor(x).astype(dtype), xs
)
return np.vstack(xs)
def vectorize(pyfunc, *, excluded=None, signature=None):
return np.vectorize(pyfunc, excluded=excluded, signature=signature)
def where(condition, x1=None, x2=None):
if x1 is not None and x2 is not None:
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return np.where(condition, x1, x2)
else:
return np.where(condition)
def divide(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/numpy/trainer.py | keras/src/backend/numpy/trainer.py | import numpy as np
from keras.src import backend
from keras.src import callbacks as callbacks_module
from keras.src import tree
from keras.src.backend.common import standardize_dtype
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.numpy.core import is_tensor
from keras.src.trainers import trainer as base_trainer
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.epoch_iterator import EpochIterator
from keras.src.utils import traceback_utils
class NumpyTrainer(base_trainer.Trainer):
def __init__(self):
super().__init__()
self.test_function = None
self.predict_function = None
def test_step(self, data):
(
x,
y,
sample_weight,
) = data_adapter_utils.unpack_x_y_sample_weight(data)
if self._call_has_training_arg:
y_pred = self(x, training=False)
else:
y_pred = self(x)
loss = self._compute_loss(
x=x, y=y, y_pred=y_pred, sample_weight=sample_weight, training=False
)
self._loss_tracker.update_state(
loss, sample_weight=tree.flatten(x)[0].shape[0]
)
return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight)
def predict_step(self, data):
x, _, _ = data_adapter_utils.unpack_x_y_sample_weight(data)
if self._call_has_training_arg:
y_pred = self(x, training=False)
else:
y_pred = self(x)
return y_pred
def make_test_function(self, force=False):
if self.test_function is not None and not force:
return self.test_function
def one_test_step(data):
data = data[0]
return self.test_step(data)
def multi_test_steps(data):
for single_step_data in data:
logs = one_test_step([single_step_data])
return logs
if self.steps_per_execution > 1:
test_step = multi_test_steps
else:
test_step = one_test_step
self.test_function = test_step
def make_predict_function(self, force=False):
if self.predict_function is not None and not force:
return self.predict_function
def one_predict_step(data):
data = data[0]
return self.predict_step(data)
def multi_predict_steps(data):
outputs = one_predict_step(data[:1])
for single_step_data in data[1:]:
step_outputs = one_predict_step([single_step_data])
outputs = tree.map_structure(
lambda t1, t2: np.concatenate([t1, t2]),
outputs,
step_outputs,
)
return outputs
if self.steps_per_execution > 1:
predict_step = multi_predict_steps
else:
predict_step = one_predict_step
self.predict_function = predict_step
def _symbolic_build(self, data_batch):
model_unbuilt = not all(layer.built for layer in self._flatten_layers())
compile_metrics_unbuilt = (
self._compile_metrics is not None
and not self._compile_metrics.built
)
compile_loss_unbuilt = (
self._compile_loss is not None and not self._compile_loss.built
)
if model_unbuilt or compile_metrics_unbuilt or compile_loss_unbuilt:
# Create symbolic tensors matching an input batch.
def to_symbolic_input(v):
if is_tensor(v):
return KerasTensor(v.shape, standardize_dtype(v.dtype))
return v
data_batch = tree.map_structure(to_symbolic_input, data_batch)
(
x,
y,
sample_weight,
) = data_adapter_utils.unpack_x_y_sample_weight(data_batch)
# Build all model state with `backend.compute_output_spec`.
try:
y_pred = backend.compute_output_spec(self, x)
except:
raise RuntimeError(
"Unable to automatically build the model. "
"Please build it yourself before calling "
"fit/evaluate/predict. "
"A model is 'built' when its variables have "
"been created and its `self.built` attribute "
"is True. Usually, calling the model on a batch "
"of data is the right way to build it."
)
if compile_metrics_unbuilt:
# Build all metric state with `backend.compute_output_spec`.
backend.compute_output_spec(
self.compute_metrics,
x,
y,
y_pred,
sample_weight=sample_weight,
)
if compile_loss_unbuilt:
# Build `CompileLoss` state with `backend.compute_output_spec`.
backend.compute_output_spec(
self._compute_loss,
x,
y,
y_pred,
sample_weight=sample_weight,
)
self._post_build()
def fit(
self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose="auto",
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
):
raise NotImplementedError("fit not implemented for NumPy backend.")
@traceback_utils.filter_traceback
def predict(
self, x, batch_size=None, verbose="auto", steps=None, callbacks=None
):
# Create an iterator that yields batches of input data.
epoch_iterator = EpochIterator(
x=x,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
steps_per_execution=self.steps_per_execution,
)
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
def append_to_outputs(batch_outputs, outputs):
if outputs is None:
outputs = tree.map_structure(
lambda batch_output: [batch_output],
batch_outputs,
)
else:
tree.map_structure_up_to(
batch_outputs,
lambda output, batch_output: output.append(batch_output),
outputs,
batch_outputs,
)
return outputs
self.make_predict_function()
self.stop_predicting = False
callbacks.on_predict_begin()
outputs = None
for begin_step, end_step, data in epoch_iterator:
callbacks.on_predict_batch_begin(begin_step)
batch_outputs = self.predict_function(data)
outputs = append_to_outputs(batch_outputs, outputs)
callbacks.on_predict_batch_end(end_step, {"outputs": batch_outputs})
if self.stop_predicting:
break
callbacks.on_predict_end()
return tree.map_structure_up_to(batch_outputs, np.concatenate, outputs)
@traceback_utils.filter_traceback
def evaluate(
self,
x=None,
y=None,
batch_size=None,
verbose="auto",
sample_weight=None,
steps=None,
callbacks=None,
return_dict=False,
**kwargs,
):
# TODO: respect compiled trainable state
use_cached_eval_dataset = kwargs.pop("_use_cached_eval_dataset", False)
if kwargs:
raise ValueError(f"Arguments not recognized: {kwargs}")
if use_cached_eval_dataset:
epoch_iterator = self._eval_epoch_iterator
else:
# Create an iterator that yields batches of input/target data.
epoch_iterator = EpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
steps_per_execution=self.steps_per_execution,
)
if not all(layer.built for layer in self._flatten_layers()):
# Build the model on one batch of data.
for _, _, data in epoch_iterator:
data_batch = data[0]
self._symbolic_build(data_batch)
break
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
self.make_test_function()
self.stop_evaluating = False
callbacks.on_test_begin()
logs = {}
self.reset_metrics()
for begin_step, end_step, data in epoch_iterator:
callbacks.on_test_batch_begin(begin_step)
logs = self.test_function(data)
callbacks.on_test_batch_end(end_step, logs)
if self.stop_evaluating:
break
logs = self._get_metrics_result_or_logs(logs)
callbacks.on_test_end(logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def train_on_batch(
self,
x,
y=None,
sample_weight=None,
class_weight=None,
return_dict=False,
):
raise NotImplementedError(
"train_on_batch not implemented for NumPy backend."
)
def test_on_batch(
self,
x,
y=None,
sample_weight=None,
return_dict=False,
):
self._assert_compile_called("test_on_batch")
data = (x, y, sample_weight)
# Maybe build model
self._symbolic_build(data)
self.make_test_function()
logs = self.test_function([data])
logs = tree.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def predict_on_batch(self, x):
self.make_predict_function()
batch_outputs = self.predict_function([(x,)])
batch_outputs = tree.map_structure(
backend.convert_to_numpy, batch_outputs
)
return batch_outputs
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/numpy/random.py | keras/src/backend/numpy/random.py | import numpy as np
from keras.src.backend.config import floatx
from keras.src.backend.numpy.nn import softmax
from keras.src.random.seed_generator import SeedGenerator
from keras.src.random.seed_generator import draw_seed
from keras.src.random.seed_generator import make_default_seed
def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed = draw_seed(seed)
rng = np.random.default_rng(seed)
return rng.normal(size=shape, loc=mean, scale=stddev).astype(dtype)
def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed = draw_seed(seed)
rng = np.random.default_rng(seed)
return rng.uniform(size=shape, low=minval, high=maxval).astype(dtype)
def categorical(logits, num_samples, dtype="int64", seed=None):
seed = draw_seed(seed)
rng = np.random.default_rng(seed)
output = []
for logits_instance in logits:
probabilities = softmax(logits_instance)
classes = np.arange(logits_instance.shape[-1])
samples = rng.choice(classes, size=num_samples, p=probabilities)
output.append(samples)
return np.array(output).astype(dtype)
def randint(shape, minval, maxval, dtype="int32", seed=None):
seed = draw_seed(seed)
rng = np.random.default_rng(seed)
output = rng.integers(low=minval, high=maxval, size=shape, dtype=dtype)
return output
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed = draw_seed(seed)
rng = np.random.default_rng(seed)
lower_bound = mean - 2 * stddev
upper_bound = mean + 2 * stddev
flat_shape = np.prod(shape)
random_numbers = np.empty(0)
# loop until we have enough valid numbers to fill our desired shape
while random_numbers.shape[0] < flat_shape:
# Generate a batch of random numbers from a normal distribution
batch = rng.normal(loc=mean, scale=stddev, size=flat_shape)
# Filter the numbers to keep only those within the specified bounds
valid = batch[(batch >= lower_bound) & (batch <= upper_bound)]
# Append the valid numbers to the result array
random_numbers = np.append(random_numbers, valid)
# Truncate the result array to the desired size and reshape it
return random_numbers[:flat_shape].astype(dtype).reshape(shape)
def dropout(inputs, rate, noise_shape=None, seed=None):
dtype = inputs.dtype
seed = draw_seed(seed)
keep_prob = 1.0 - rate
# If noise_shape is not provided, use the shape of inputs
if noise_shape is None:
noise_shape = inputs.shape
else:
# If noise_shape is provided, replace None with corresponding
# input shape
noise_shape = [
n if n is not None else inputs.shape[i]
for i, n in enumerate(noise_shape)
]
rng = np.random.default_rng(seed)
mask = rng.uniform(size=noise_shape) < keep_prob
mask = np.broadcast_to(mask, inputs.shape)
return np.where(
mask, (inputs / keep_prob).astype(dtype), np.zeros_like(inputs)
)
def shuffle(x, axis=0, seed=None):
seed = draw_seed(seed)
rng = np.random.default_rng(seed)
return rng.permuted(x, axis=axis)
def gamma(shape, alpha, dtype=None, seed=None):
dtype = dtype or floatx()
seed = draw_seed(seed)
rng = np.random.default_rng(seed)
return rng.gamma(alpha, scale=1.0, size=shape).astype(dtype)
def binomial(shape, counts, probabilities, dtype=None, seed=None):
dtype = dtype or floatx()
seed = draw_seed(seed)
rng = np.random.default_rng(seed)
sample = rng.binomial(n=counts, p=probabilities, size=shape).astype(dtype)
return sample
def beta(shape, alpha, beta, dtype=None, seed=None):
dtype = dtype or floatx()
seed = draw_seed(seed)
rng = np.random.default_rng(seed)
sample = rng.beta(a=alpha, b=beta, size=shape).astype(dtype)
return sample
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/numpy/__init__.py | keras/src/backend/numpy/__init__.py | from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.numpy import core
from keras.src.backend.numpy import image
from keras.src.backend.numpy import linalg
from keras.src.backend.numpy import math
from keras.src.backend.numpy import nn
from keras.src.backend.numpy import numpy
from keras.src.backend.numpy import random
from keras.src.backend.numpy.core import IS_THREAD_SAFE
from keras.src.backend.numpy.core import SUPPORTS_RAGGED_TENSORS
from keras.src.backend.numpy.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.numpy.core import Variable
from keras.src.backend.numpy.core import cast
from keras.src.backend.numpy.core import compute_output_spec
from keras.src.backend.numpy.core import cond
from keras.src.backend.numpy.core import convert_to_numpy
from keras.src.backend.numpy.core import convert_to_tensor
from keras.src.backend.numpy.core import device_scope
from keras.src.backend.numpy.core import is_tensor
from keras.src.backend.numpy.core import random_seed_dtype
from keras.src.backend.numpy.core import shape
from keras.src.backend.numpy.core import vectorized_map
from keras.src.backend.numpy.rnn import cudnn_ok
from keras.src.backend.numpy.rnn import gru
from keras.src.backend.numpy.rnn import lstm
from keras.src.backend.numpy.rnn import rnn
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/numpy/math.py | keras/src/backend/numpy/math.py | import numpy as np
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.jax.math import fft as jax_fft
from keras.src.backend.jax.math import fft2 as jax_fft2
from keras.src.backend.numpy.core import convert_to_tensor
from keras.src.utils.module_utils import scipy
def _segment_reduction_fn(
data, segment_ids, reduction_method, num_segments, sorted
):
if num_segments is None:
num_segments = np.amax(segment_ids) + 1
valid_indices = segment_ids >= 0 # Ignore segment_ids that are -1
valid_data = data[valid_indices]
valid_segment_ids = segment_ids[valid_indices]
data_shape = list(valid_data.shape)
data_shape[0] = (
num_segments # Replace first dimension (which corresponds to segments)
)
if reduction_method == np.maximum:
result = np.ones(data_shape, dtype=valid_data.dtype) * -np.inf
else:
result = np.zeros(data_shape, dtype=valid_data.dtype)
if sorted:
reduction_method.at(result, valid_segment_ids, valid_data)
else:
sort_indices = np.argsort(valid_segment_ids)
sorted_segment_ids = valid_segment_ids[sort_indices]
sorted_data = valid_data[sort_indices]
reduction_method.at(result, sorted_segment_ids, sorted_data)
return result
def segment_sum(data, segment_ids, num_segments=None, sorted=False):
return _segment_reduction_fn(
data, segment_ids, np.add, num_segments, sorted
)
def segment_max(data, segment_ids, num_segments=None, sorted=False):
return _segment_reduction_fn(
data, segment_ids, np.maximum, num_segments, sorted
)
def top_k(x, k, sorted=True):
if sorted:
# Take the k largest values.
sorted_indices = np.argsort(x, axis=-1)[..., ::-1]
sorted_values = np.take_along_axis(x, sorted_indices, axis=-1)
top_k_values = sorted_values[..., :k]
top_k_indices = sorted_indices[..., :k]
else:
# Partition the array such that all values larger than the k-th
# largest value are to the right of it.
top_k_indices = np.argpartition(x, -k, axis=-1)[..., -k:]
top_k_values = np.take_along_axis(x, top_k_indices, axis=-1)
return top_k_values, top_k_indices
def in_top_k(targets, predictions, k):
targets = targets[:, None]
topk_values = top_k(predictions, k)[0]
targets_values = np.take_along_axis(predictions, targets, axis=-1)
mask = targets_values >= topk_values
return np.any(mask, axis=-1)
def logsumexp(x, axis=None, keepdims=False):
return scipy.special.logsumexp(x, axis=axis, keepdims=keepdims)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return np.linalg.qr(x, mode=mode)
def extract_sequences(x, sequence_length, sequence_stride):
*batch_shape, _ = x.shape
batch_shape = list(batch_shape)
shape = x.shape[:-1] + (
(x.shape[-1] - (sequence_length - sequence_stride)) // sequence_stride,
sequence_length,
)
strides = x.strides[:-1] + (
sequence_stride * x.strides[-1],
x.strides[-1],
)
x = np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
return np.reshape(x, (*batch_shape, *x.shape[-2:]))
def _get_complex_tensor_from_tuple(x):
if not isinstance(x, (tuple, list)) or len(x) != 2:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and imaginary."
f"Received: x={x}"
)
# `convert_to_tensor` does not support passing complex tensors. We separate
# the input out into real and imaginary and convert them separately.
real, imag = x
# Check shapes.
if real.shape != imag.shape:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and imaginary."
"Both the real and imaginary parts should have the same shape. "
f"Received: x[0].shape = {real.shape}, x[1].shape = {imag.shape}"
)
# Ensure dtype is float.
if not np.issubdtype(real.dtype, np.floating) or not np.issubdtype(
imag.dtype, np.floating
):
raise ValueError(
"At least one tensor in input `x` is not of type float."
f"Received: x={x}."
)
complex_input = real + 1j * imag
return complex_input
def fft(x):
real, imag = jax_fft(x)
return np.array(real), np.array(imag)
def fft2(x):
real, imag = jax_fft2(x)
return np.array(real), np.array(imag)
def ifft2(x):
complex_input = _get_complex_tensor_from_tuple(x)
complex_output = np.fft.ifft2(complex_input)
return np.real(complex_output), np.imag(complex_output)
def rfft(x, fft_length=None):
complex_output = np.fft.rfft(x, n=fft_length, axis=-1, norm="backward")
# numpy always outputs complex128, so we need to recast the dtype
return (
np.real(complex_output).astype(x.dtype),
np.imag(complex_output).astype(x.dtype),
)
def irfft(x, fft_length=None):
complex_input = _get_complex_tensor_from_tuple(x)
# numpy always outputs float64, so we need to recast the dtype
return np.fft.irfft(
complex_input, n=fft_length, axis=-1, norm="backward"
).astype(x[0].dtype)
def stft(
x, sequence_length, sequence_stride, fft_length, window="hann", center=True
):
if standardize_dtype(x.dtype) not in {"float32", "float64"}:
raise TypeError(
"Invalid input type. Expected `float32` or `float64`. "
f"Received: input type={x.dtype}"
)
if fft_length < sequence_length:
raise ValueError(
"`fft_length` must equal or larger than `sequence_length`. "
f"Received: sequence_length={sequence_length}, "
f"fft_length={fft_length}"
)
if isinstance(window, str):
if window not in {"hann", "hamming"}:
raise ValueError(
"If a string is passed to `window`, it must be one of "
f'`"hann"`, `"hamming"`. Received: window={window}'
)
x = convert_to_tensor(x)
ori_dtype = x.dtype
if center:
pad_width = [(0, 0) for _ in range(len(x.shape))]
pad_width[-1] = (fft_length // 2, fft_length // 2)
x = np.pad(x, pad_width, mode="reflect")
l_pad = (fft_length - sequence_length) // 2
r_pad = fft_length - sequence_length - l_pad
if window is not None:
if isinstance(window, str):
win = convert_to_tensor(
scipy.signal.get_window(window, sequence_length), dtype=x.dtype
)
else:
win = convert_to_tensor(window, dtype=x.dtype)
if len(win.shape) != 1 or win.shape[-1] != sequence_length:
raise ValueError(
"The shape of `window` must be equal to [sequence_length]."
f"Received: window shape={win.shape}"
)
win = np.pad(win, [[l_pad, r_pad]])
else:
win = np.ones((sequence_length + l_pad + r_pad), dtype=x.dtype)
x = scipy.signal.stft(
x,
fs=1.0,
window=win,
nperseg=(sequence_length + l_pad + r_pad),
noverlap=(sequence_length + l_pad + r_pad - sequence_stride),
nfft=fft_length,
boundary=None,
padded=False,
)[-1]
# scale and swap to (..., num_sequences, fft_bins)
x = x / np.sqrt(1.0 / win.sum() ** 2)
x = np.swapaxes(x, -2, -1)
return np.real(x).astype(ori_dtype), np.imag(x).astype(ori_dtype)
def istft(
x,
sequence_length,
sequence_stride,
fft_length,
length=None,
window="hann",
center=True,
):
x = _get_complex_tensor_from_tuple(x)
dtype = np.real(x).dtype
expected_output_len = fft_length + sequence_stride * (x.shape[-2] - 1)
l_pad = (fft_length - sequence_length) // 2
r_pad = fft_length - sequence_length - l_pad
if window is not None:
if isinstance(window, str):
win = convert_to_tensor(
scipy.signal.get_window(window, sequence_length), dtype=dtype
)
else:
win = convert_to_tensor(window, dtype=dtype)
if len(win.shape) != 1 or win.shape[-1] != sequence_length:
raise ValueError(
"The shape of `window` must be equal to [sequence_length]."
f"Received: window shape={win.shape}"
)
win = np.pad(win, [[l_pad, r_pad]])
else:
win = np.ones((sequence_length + l_pad + r_pad), dtype=dtype)
x = scipy.signal.istft(
x,
fs=1.0,
window=win,
nperseg=(sequence_length + l_pad + r_pad),
noverlap=(sequence_length + l_pad + r_pad - sequence_stride),
nfft=fft_length,
boundary=False,
time_axis=-2,
freq_axis=-1,
)[-1]
# scale
x = x / win.sum() if window is not None else x / sequence_stride
start = 0 if center is False else fft_length // 2
if length is not None:
end = start + length
elif center is True:
end = -(fft_length // 2)
else:
end = expected_output_len
return x[..., start:end]
def rsqrt(x):
return 1.0 / np.sqrt(x)
def erf(x):
return np.array(scipy.special.erf(x))
def erfinv(x):
return np.array(scipy.special.erfinv(x))
def solve(a, b):
a = convert_to_tensor(a)
b = convert_to_tensor(b)
return np.linalg.solve(a, b)
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
dtype = standardize_dtype(x.dtype)
if "int" in dtype or dtype == "bool":
dtype = dtypes.result_type(x.dtype, "float32")
return np.linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims).astype(
dtype
)
def logdet(x):
from keras.src.backend.numpy.numpy import slogdet
# In NumPy slogdet is more stable than `np.log(np.linalg.det(x))`. See
# https://numpy.org/doc/stable/reference/generated/numpy.linalg.slogdet.html
return slogdet(x)[1]
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/openvino/image.py | keras/src/backend/openvino/image.py | def rgb_to_grayscale(images, data_format=None):
raise NotImplementedError(
"`rgb_to_grayscale` is not supported with openvino backend"
)
def resize(
image,
size,
interpolation="bilinear",
antialias=False,
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
fill_mode="constant",
fill_value=0.0,
data_format="channels_last",
):
raise NotImplementedError("`resize` is not supported with openvino backend")
def affine_transform(
images,
transform,
interpolation="bilinear",
fill_mode="constant",
fill_value=0,
data_format=None,
):
raise NotImplementedError(
"`affine_transform` is not supported with openvino backend"
)
def perspective_transform(
images,
start_points,
end_points,
interpolation="bilinear",
fill_value=0,
data_format=None,
):
raise NotImplementedError(
"`perspective_transform` is not supported with openvino backend"
)
def map_coordinates(
inputs, coordinates, order, fill_mode="constant", fill_value=0
):
raise NotImplementedError(
"`map_coordinates` is not supported with openvino backend"
)
def gaussian_blur(
images, kernel_size=(3, 3), sigma=(1.0, 1.0), data_format=None
):
raise NotImplementedError(
"`gaussian_blur` is not supported with openvino backend"
)
def elastic_transform(
images,
alpha=20.0,
sigma=5.0,
interpolation="bilinear",
fill_mode="reflect",
fill_value=0.0,
seed=None,
data_format=None,
):
raise NotImplementedError(
"`elastic_transform` is not supported with openvino backend"
)
def scale_and_translate(
images,
output_shape,
scale,
translation,
spatial_dims,
method,
antialias=True,
):
raise NotImplementedError(
"`scale_and_translate` is not supported with openvino backend"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/openvino/layer.py | keras/src/backend/openvino/layer.py | class OpenvinoLayer:
pass
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/openvino/export.py | keras/src/backend/openvino/export.py | class OpenvinoExportArchive:
def track(self, resource):
raise NotImplementedError(
"`track` is not implemented in the openvino backend."
)
def add_endpoint(self, name, fn, input_signature=None, **kwargs):
raise NotImplementedError(
"`add_endpoint` is not implemented in the openvino backend."
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/openvino/rnn.py | keras/src/backend/openvino/rnn.py | def rnn(
step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False,
return_all_outputs=True,
):
raise NotImplementedError("`rnn` is not supported with openvino backend")
def lstm(*args, **kwargs):
raise NotImplementedError("`lstm` is not supported with openvino backend")
def gru(*args, **kwargs):
raise NotImplementedError("`gru` is not supported with openvino backend")
def unstack(x, axis=0):
raise NotImplementedError(
"`unstack` is not supported with openvino backend"
)
def numpy_scan(f, init, xs, reverse=False, mask=None):
raise NotImplementedError(
"`numpy_scan` is not supported with openvino backend"
)
def cudnn_ok(*args, **kwargs):
return False
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/openvino/core.py | keras/src/backend/openvino/core.py | import builtins
import contextlib
import warnings
import numpy as np
import openvino as ov
import openvino.opset14 as ov_opset
from openvino import Model
from openvino import Tensor
from openvino import Type
from openvino import compile_model
from keras.src import tree
from keras.src.backend.common import KerasVariable
from keras.src.backend.common import dtypes
from keras.src.backend.common import standardize_dtype
from keras.src.backend.common.dtypes import result_type
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
SUPPORTS_SPARSE_TENSORS = False
SUPPORTS_RAGGED_TENSORS = False
IS_THREAD_SAFE = True
OPENVINO_DTYPES = {
"float16": ov.Type.f16,
"float32": ov.Type.f32,
"float64": ov.Type.f64,
"uint8": ov.Type.u8,
"uint16": ov.Type.u16,
"uint32": ov.Type.u32,
"uint64": ov.Type.u64,
"int8": ov.Type.i8,
"int16": ov.Type.i16,
"int32": ov.Type.i32,
"int64": ov.Type.i64,
"bfloat16": ov.Type.bf16,
"bool": ov.Type.boolean,
"float8_e4m3fn": ov.Type.f8e4m3,
"float8_e5m2": ov.Type.f8e5m2,
"string": ov.Type.string,
}
DTYPES_MAX = {
ov.Type.bf16: 3.38953139e38,
ov.Type.f16: np.finfo(np.float16).max,
ov.Type.f32: np.finfo(np.float32).max,
ov.Type.f64: np.finfo(np.float64).max,
ov.Type.u8: np.iinfo(np.uint8).max,
ov.Type.u16: np.iinfo(np.uint16).max,
ov.Type.u32: np.iinfo(np.uint32).max,
ov.Type.u64: np.iinfo(np.uint64).max,
ov.Type.i8: np.iinfo(np.int8).max,
ov.Type.i16: np.iinfo(np.int16).max,
ov.Type.i32: np.iinfo(np.int32).max,
ov.Type.i64: np.iinfo(np.int64).max,
ov.Type.boolean: 1,
}
DTYPES_MIN = {
ov.Type.bf16: -3.38953139e38,
ov.Type.f16: np.finfo(np.float16).min,
ov.Type.f32: np.finfo(np.float32).min,
ov.Type.f64: np.finfo(np.float64).min,
ov.Type.u8: np.iinfo(np.uint8).min,
ov.Type.u16: np.iinfo(np.uint16).min,
ov.Type.u32: np.iinfo(np.uint32).min,
ov.Type.u64: np.iinfo(np.uint64).min,
ov.Type.i8: np.iinfo(np.int8).min,
ov.Type.i16: np.iinfo(np.int16).min,
ov.Type.i32: np.iinfo(np.int32).min,
ov.Type.i64: np.iinfo(np.int64).min,
ov.Type.boolean: 0,
}
def align_operand_types(x1, x2, op_name):
x1_type = x1.element_type
x2_type = x2.element_type
if x1_type.is_dynamic() or x2_type.is_dynamic():
raise ValueError(
f"'{op_name}' operation is not supported for dynamic operand type "
"with openvino backend"
)
x1_type = ov_to_keras_type(x1_type)
x2_type = ov_to_keras_type(x2_type)
result_type = dtypes.result_type(x1_type, x2_type)
result_type = OPENVINO_DTYPES[result_type]
if x1_type != result_type:
x1 = ov_opset.convert(x1, result_type).output(0)
if x2_type != result_type:
x2 = ov_opset.convert(x2, result_type).output(0)
return x1, x2
# create ov.Output (symbolic OpenVINO tensor)
# for different input `x`
def get_ov_output(x, ov_type=None):
if isinstance(x, float):
if ov_type is None:
ov_type = Type.f32
x = ov_opset.constant(x, ov_type).output(0)
elif isinstance(x, int):
if ov_type is None:
ov_type = Type.i32
x = ov_opset.constant(x, ov_type).output(0)
elif isinstance(x, np.ndarray):
if x.dtype == np.dtype("bfloat16"):
x = ov_opset.constant(x, OPENVINO_DTYPES["bfloat16"]).output(0)
else:
x = ov_opset.constant(x).output(0)
elif isinstance(x, (list, tuple)):
if isinstance(x, tuple):
x = list(x)
if ov_type is None:
x = ov_opset.constant(x).output(0)
else:
x = ov_opset.constant(x, ov_type).output(0)
elif np.isscalar(x):
x = ov_opset.constant(x).output(0)
elif isinstance(x, KerasVariable):
if isinstance(x.value, OpenVINOKerasTensor):
return x.value.output
x = ov_opset.constant(x.value.data).output(0)
elif isinstance(x, OpenVINOKerasTensor):
x = x.output
elif isinstance(x, Tensor):
x = ov_opset.constant(x.data).output(0)
else:
raise ValueError(
"unsupported type of `x` to create ov.Output: {}".format(type(x))
)
return x
# wrapper for OpenVINO symbolic tensor ov.Output
# that provides interface similar to KerasTensor
# with dtype and shape members
class OpenVINOKerasTensor:
def __init__(self, x, data=None):
x_shape = x.get_partial_shape()
if x_shape.rank.is_dynamic:
x_keras_shape = None
else:
x_keras_shape = [
None if dim.is_dynamic else dim.get_length()
for dim in list(x_shape)
]
x_type = x.get_element_type()
x_keras_type = ov_to_keras_type(x_type)
self.output = x
self.shape = tuple(x_keras_shape)
self.dtype = x_keras_type
self.ndim = None
self.data = data
if x.get_partial_shape().rank.is_static:
self.ndim = x.get_partial_shape().rank.get_length()
def __add__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__add__"
)
return OpenVINOKerasTensor(ov_opset.add(first, other).output(0))
def __radd__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__radd__"
)
return OpenVINOKerasTensor(ov_opset.add(first, other).output(0))
def __sub__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__sub__"
)
if first.get_element_type() == Type.boolean:
return OpenVINOKerasTensor(
ov_opset.logical_xor(first, other).output(0)
)
return OpenVINOKerasTensor(ov_opset.subtract(first, other).output(0))
def __rsub__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__rsub__"
)
return OpenVINOKerasTensor(ov_opset.subtract(other, first).output(0))
def __mul__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__mul__"
)
if first.get_element_type() == Type.boolean:
return OpenVINOKerasTensor(
ov_opset.logical_and(first, other).output(0)
)
return OpenVINOKerasTensor(ov_opset.multiply(first, other).output(0))
def __rmul__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__rmul__"
)
if first.get_element_type() == Type.boolean:
return OpenVINOKerasTensor(
ov_opset.logical_and(first, other).output(0)
)
return OpenVINOKerasTensor(ov_opset.multiply(first, other).output(0))
def __truediv__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__truediv__"
)
return OpenVINOKerasTensor(ov_opset.divide(first, other).output(0))
def __rtruediv__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__rtruediv__"
)
return OpenVINOKerasTensor(ov_opset.divide(other, first).output(0))
def __floordiv__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__floordiv__"
)
return OpenVINOKerasTensor(ov_opset.divide(first, other).output(0))
def __rfloordiv__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__rfloordiv__"
)
return OpenVINOKerasTensor(ov_opset.divide(other, first).output(0))
def __neg__(self):
first = self.output
return OpenVINOKerasTensor(ov_opset.negative(first).output(0))
def __abs__(self):
first = self.output
return OpenVINOKerasTensor(ov_opset.absolute(first).output(0))
def __invert__(self):
first = self.output
return OpenVINOKerasTensor(ov_opset.logical_not(first).output(0))
def __pow__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__pow__"
)
return OpenVINOKerasTensor(ov_opset.power(first, other).output(0))
def __rpow__(self, other):
other = get_ov_output(other)
first = self.output
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__rpow__"
)
return OpenVINOKerasTensor(ov_opset.power(other, first).output(0))
def __lt__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__lt__"
)
return OpenVINOKerasTensor(ov_opset.less(first, other).output(0))
def __gt__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__gt__"
)
return OpenVINOKerasTensor(ov_opset.greater(first, other).output(0))
def __le__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__le__"
)
return OpenVINOKerasTensor(ov_opset.less_equal(first, other).output(0))
def __ge__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__ge__"
)
return OpenVINOKerasTensor(
ov_opset.greater_equal(first, other).output(0)
)
def __eq__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__eq__"
)
return OpenVINOKerasTensor(ov_opset.equal(first, other).output(0))
def __ne__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__ne__"
)
return OpenVINOKerasTensor(ov_opset.not_equal(first, other).output(0))
def __getitem__(self, indices):
data = self.output
rank = len(data.get_partial_shape())
axes, gather_indices_nodes = [], []
slice_axes, slice_starts, slice_ends, slice_steps = [], [], [], []
unsqueeze_axes = []
if not isinstance(indices, tuple):
indices = (indices,)
if any(i is Ellipsis for i in indices):
ellipsis_pos = indices.index(Ellipsis)
num_specified = sum(
i is not Ellipsis and i is not None for i in indices
)
num_missing = rank - num_specified
indices = (
indices[:ellipsis_pos]
+ (builtins.slice(None),) * num_missing
+ indices[ellipsis_pos + 1 :]
)
def count_unsqueeze_before(dim):
return sum(1 for i in range(dim) if indices[i] is None)
partial_shape = ov_opset.shape_of(data, Type.i32)
zero_const = ov_opset.constant(0, Type.i32)
for dim, index in enumerate(indices):
if isinstance(index, bool):
raise ValueError(
"OpenVINO backend does not support boolean indexing"
)
elif isinstance(index, (int, np.integer, np.ndarray)):
if isinstance(index, (np.ndarray, np.integer)):
if isinstance(index, np.ndarray) and len(index.shape) != 0:
raise ValueError(
"OpenVINO backend does not support"
"multi-dimensional indexing"
)
index = int(index)
actual_dim = dim - count_unsqueeze_before(dim)
if not (0 <= actual_dim < rank):
raise IndexError(
f"Index {index} is out of bounds for "
f"axis {dim} with rank {rank}"
)
length = ov_opset.gather(
partial_shape,
ov_opset.constant([actual_dim], Type.i32),
zero_const,
)
if index >= 0:
idx_value = ov_opset.constant([index], Type.i32)
else:
idx_value = ov_opset.add(
ov_opset.constant([index], Type.i32), length
)
axes.append(dim)
gather_indices_nodes.append(idx_value.output(0))
elif isinstance(index, builtins.slice):
if index == builtins.slice(None):
continue
if index.step is not None and index.step < 0:
raise ValueError("OpenVINO doesn't support negative steps")
slice_axes.append(dim)
slice_starts.append(0 if index.start is None else index.start)
slice_ends.append(
2**31 - 1 if index.stop is None else index.stop
)
slice_steps.append(1 if index.step is None else index.step)
elif index is None:
unsqueeze_axes.append(dim)
elif isinstance(index, OpenVINOKerasTensor):
index = get_ov_output(index)
index_type = index.get_element_type()
index_shape = index.get_partial_shape()
if index_type == Type.boolean or not index_type.is_integral():
raise ValueError(
"OpenVINO backend does not "
f"support {index_type} indexing"
)
axes.append(dim)
if len(index_shape) > 1:
raise ValueError(
"OpenVINO backend does not "
"support multi-dimensional indexing"
)
if len(index_shape) == 0:
index = ov_opset.unsqueeze(index, zero_const).output(0)
if index_type != Type.i32:
index = ov_opset.convert(index, Type.i32).output(0)
shape_tensor = ov_opset.shape_of(data, Type.i32)
axis_i32 = ov_opset.constant([dim], dtype=Type.i32)
dim_size = ov_opset.gather(shape_tensor, axis_i32, zero_const)
is_negative = ov_opset.less(index, zero_const)
adjusted_index = ov_opset.add(index, dim_size)
index = ov_opset.select(
is_negative, adjusted_index, index
).output(0)
gather_indices_nodes.append(index)
else:
raise ValueError(
f"Unsupported index type {type(index)} "
"in OpenVINOKerasTensor.__getitem__"
)
if slice_axes:
step = ov_opset.constant(slice_steps, Type.i32).output(0)
start = ov_opset.constant(slice_starts, Type.i32).output(0)
stop = ov_opset.constant(slice_ends, Type.i32).output(0)
adjusted_slice_axes = [
ax - sum(1 for unsq in unsqueeze_axes if unsq <= ax)
for ax in slice_axes
]
axes_const = ov_opset.constant(
adjusted_slice_axes, Type.i32
).output(0)
data = ov_opset.slice(data, start, stop, step, axes_const).output(0)
if axes:
gather_indices_const = (
gather_indices_nodes[0]
if len(gather_indices_nodes) == 1
else ov_opset.concat(gather_indices_nodes, axis=0).output(0)
)
adjusted_axes = [
ax - sum(1 for unsq in unsqueeze_axes if unsq <= ax)
for ax in axes
]
if len(axes) == 1:
data = ov_opset.gather(
data, gather_indices_const, adjusted_axes[0]
).output(0)
data = ov_opset.squeeze(data, adjusted_axes[0]).output(0)
else:
rank = len(data.get_partial_shape())
remaining_axes = [
i for i in range(rank) if i not in adjusted_axes
]
perm = ov_opset.constant(
adjusted_axes + remaining_axes, Type.i32
)
data = ov_opset.transpose(data, perm).output(0)
data = ov_opset.gather_nd(data, gather_indices_const).output(0)
if unsqueeze_axes:
adjusted_unsqueeze = []
for ax in unsqueeze_axes:
ax -= sum(1 for s in axes if s < ax)
ax -= sum(1 for s in slice_axes if s < ax)
adjusted_unsqueeze.append(ax)
unsqueeze_const = ov_opset.constant(
adjusted_unsqueeze, Type.i32
).output(0)
data = ov_opset.unsqueeze(data, unsqueeze_const).output(0)
return OpenVINOKerasTensor(data)
def __len__(self):
ov_output = self.output
ov_shape = ov_output.get_partial_shape()
assert ov_shape.rank.is_static and ov_shape.rank.get_length() > 0, (
"rank must be static and greater than zero"
)
assert ov_shape[0].is_static, "the first dimension must be static"
return ov_shape[0].get_length()
def __mod__(self, other):
first = self.output
other = get_ov_output(other)
first, other = align_operand_types(
first, other, "OpenVINOKerasTensor::__mod__"
)
return OpenVINOKerasTensor(ov_opset.mod(first, other).output(0))
def __array__(self, dtype=None):
try:
tensor = cast(self, dtype=dtype) if dtype is not None else self
return convert_to_numpy(tensor)
except Exception as e:
raise RuntimeError(
"An OpenVINOKerasTensor is symbolic: it's a placeholder "
"for a shape and a dtype.\n"
"It doesn't have any actual numerical value.\n"
"You cannot convert it to a NumPy array."
) from e
def numpy(self):
return self.__array__()
def ov_to_keras_type(ov_type):
for _keras_type, _ov_type in OPENVINO_DTYPES.items():
if ov_type == _ov_type:
return _keras_type
raise ValueError(
f"Requested OpenVINO type has no keras analogue '{ov_type.to_string()}'"
)
@contextlib.contextmanager
def device_scope(device_name):
yield
def get_device():
return "CPU"
class Variable(KerasVariable):
def _initialize(self, value):
if isinstance(value, OpenVINOKerasTensor):
self._value = value
elif isinstance(value, Tensor):
value_const = ov_opset.constant(
value.data, dtype=OPENVINO_DTYPES[self._dtype]
)
self._value = OpenVINOKerasTensor(value_const.output(0))
else:
value_const = ov_opset.constant(
value, dtype=OPENVINO_DTYPES[self._dtype]
)
self._value = OpenVINOKerasTensor(value_const.output(0))
def _direct_assign(self, value):
self._value = value
def _convert_to_tensor(self, value, dtype=None):
return convert_to_tensor(value, dtype=dtype)
def __array__(self):
if isinstance(self.value, OpenVINOKerasTensor):
return self.value.output.get_node().data
return self.value.data
def __getitem__(self, idx):
if isinstance(self.value, OpenVINOKerasTensor):
arr = self.value.output.get_node().data
return arr.__getitem__(idx)
return self.value.__getitem__(idx)
def __int__(self):
if isinstance(self.value, OpenVINOKerasTensor):
arr = self.value.output.get_node().data
else:
arr = self.value.data
if arr.ndim > 0:
raise TypeError(
"Only scalar arrays can be converted to Python scalars. "
f"Got: shape={arr.shape}"
)
return int(arr)
def __float__(self):
if isinstance(self.value, OpenVINOKerasTensor):
arr = self.value.output.get_node().data
else:
arr = self.value.data
if arr.ndim > 0:
raise TypeError(
"Only scalar arrays can be converted to Python scalars. "
f"Got: shape={arr.shape}"
)
return float(arr)
def _is_scalar(elem):
return not isinstance(elem, (list, tuple, set, dict))
def convert_to_tensor(x, dtype=None, sparse=None, ragged=None):
if sparse:
raise ValueError("`sparse=True` is not supported with openvino backend")
if ragged:
raise ValueError("`ragged=True` is not supported with openvino backend")
if dtype is not None:
dtype = standardize_dtype(dtype)
if isinstance(x, OpenVINOKerasTensor):
if dtype and dtype != standardize_dtype(x.dtype):
x = cast(x, dtype)
return x
elif isinstance(x, np.ndarray):
if dtype is not None:
ov_type = OPENVINO_DTYPES[dtype]
else:
ov_type = OPENVINO_DTYPES[standardize_dtype(x.dtype)]
return OpenVINOKerasTensor(ov_opset.constant(x, ov_type).output(0))
elif isinstance(x, (list, tuple)):
if dtype is None:
dtype = result_type(
*[
getattr(item, "dtype", type(item))
for item in tree.flatten(x)
]
)
x = np.array(x, dtype=dtype)
ov_type = OPENVINO_DTYPES[dtype]
return OpenVINOKerasTensor(ov_opset.constant(x, ov_type).output(0), x)
elif isinstance(x, (float, int, bool)):
if dtype is None:
dtype = standardize_dtype(type(x))
ov_type = OPENVINO_DTYPES[dtype]
return OpenVINOKerasTensor(ov_opset.constant(x, ov_type).output(0), x)
elif isinstance(x, ov.Output):
return OpenVINOKerasTensor(x)
if isinstance(x, Variable):
x = x.value
if dtype and dtype != x.dtype:
x = cast(x, dtype)
return x
original_type = type(x)
try:
if dtype is None:
dtype = getattr(x, "dtype", original_type)
ov_type = OPENVINO_DTYPES[standardize_dtype(dtype)]
else:
ov_type = OPENVINO_DTYPES[dtype]
x = np.array(x)
return OpenVINOKerasTensor(ov_opset.constant(x, ov_type).output(0))
except Exception as e:
raise TypeError(
f"Cannot convert object of type {original_type} "
f"to OpenVINOKerasTensor: {e}"
)
def convert_to_numpy(x):
if isinstance(x, np.ndarray):
return x
elif isinstance(x, (int, float)):
return np.array(x)
elif isinstance(x, (list, tuple)):
x_new = []
for elem in x:
x_new.append(convert_to_numpy(elem))
return np.array(x_new)
elif np.isscalar(x):
return x
elif isinstance(x, ov.Tensor):
return x.data
elif x is None:
return x
elif isinstance(x, KerasVariable):
if isinstance(x.value, OpenVINOKerasTensor):
x = x.value
else:
return x.value.data
assert isinstance(x, OpenVINOKerasTensor), (
"unsupported type {} for `convert_to_numpy` in openvino backend".format(
type(x)
)
)
try:
ov_result = x.output
ov_model = Model(results=[ov_result], parameters=[])
ov_compiled_model = compile_model(ov_model, get_device())
result = ov_compiled_model({})[0]
except Exception as inner_exception:
raise RuntimeError(
"`convert_to_numpy` failed to convert the tensor."
) from inner_exception
return result
def is_tensor(x):
if isinstance(x, OpenVINOKerasTensor):
return True
if isinstance(x, ov.Tensor):
return True
return False
def shape(x):
return tuple(x.shape)
def cast(x, dtype):
dtype = standardize_dtype(dtype)
ov_type = OPENVINO_DTYPES[dtype]
x = get_ov_output(x)
return OpenVINOKerasTensor(ov_opset.convert(x, ov_type).output(0))
def cond(pred, true_fn, false_fn):
raise NotImplementedError("`cond` is not supported with openvino backend")
def vectorized_map(function, elements):
raise NotImplementedError(
"`vectorized_map` is not supported with openvino backend"
)
# Shape / dtype inference util
def compute_output_spec(fn, *args, **kwargs):
with StatelessScope():
def convert_keras_tensor_to_openvino(x):
if isinstance(x, KerasTensor):
x_shape = list(x.shape)
x_shape = [-1 if dim is None else dim for dim in x_shape]
x_type = OPENVINO_DTYPES[x.dtype]
param = ov_opset.parameter(shape=x_shape, dtype=x_type)
return OpenVINOKerasTensor(param.output(0))
return x
args_1, kwargs_1 = tree.map_structure(
lambda x: convert_keras_tensor_to_openvino(x),
(args, kwargs),
)
outputs_1 = fn(*args_1, **kwargs_1)
outputs = outputs_1
def convert_openvino_to_keras_tensor(x):
if is_tensor(x):
x_type = x.dtype
x_shape = x.shape
return KerasTensor(x_shape, x_type)
elif isinstance(x, OpenVINOKerasTensor):
x_type = x.dtype
x_shape = x.shape
return KerasTensor(x_shape, x_type)
return x
output_spec = tree.map_structure(
convert_openvino_to_keras_tensor, outputs
)
return output_spec
def scan(f, init, xs=None, length=None, reverse=False, unroll=1):
raise NotImplementedError("`scan` is not supported with openvino backend")
def scatter(indices, values, shape):
raise NotImplementedError(
"`scatter` is not supported with openvino backend"
)
def scatter_update(inputs, indices, updates):
raise NotImplementedError(
"`scatter_update` is not supported with openvino backend"
)
def slice(inputs, start_indices, shape):
inputs = get_ov_output(inputs)
if isinstance(start_indices, (list, np.ndarray)):
start_indices = tuple(start_indices)
if isinstance(shape, (list, np.ndarray)):
shape = tuple(shape)
assert isinstance(start_indices, tuple), (
"`slice` is not supported by openvino backend"
" for `start_indices` of type {}".format(type(start_indices))
)
assert isinstance(shape, tuple), (
"`slice` is not supported by openvino backend"
" for `shape` of type {}".format(type(shape))
)
axes = []
start = []
stop = []
def prepare_slice_index(val):
val_type = val.get_element_type()
if not val_type.is_integral():
raise ValueError(
"`slice` is not supported by OpenVINO backend "
"for `start_indices` or `shape` with non-integer types"
)
if val_type != Type.i32:
val = ov_opset.convert(val, Type.i32).output(0)
if len(val.get_partial_shape()) == 0:
val = ov_opset.unsqueeze(
val, ov_opset.constant(0, Type.i32)
).output(0)
return val
for idx, length in enumerate(shape):
if length is not None and length >= 0:
axes.append(idx)
start_val = prepare_slice_index(get_ov_output(start_indices[idx]))
stop_val = prepare_slice_index(
get_ov_output(start_indices[idx] + length)
)
start.append(start_val)
stop.append(stop_val)
if len(axes) == 0:
return inputs
step = [1] * len(start)
step = ov_opset.constant(step, Type.i32).output(0)
start = ov_opset.concat(start, axis=0).output(0)
stop = ov_opset.concat(stop, axis=0).output(0)
axes = ov_opset.constant(axes, Type.i32).output(0)
result = ov_opset.slice(inputs, start, stop, step, axes).output(0)
# Apply reshape to ensure output matches expected shape
# Convert None (dynamic) dimensions to -1 for OpenVINO compatibility
if all(dim is None or (isinstance(dim, int) and dim >= 0) for dim in shape):
reshape_pattern = [(-1 if dim is None else dim) for dim in shape]
target_shape = ov_opset.constant(reshape_pattern, Type.i32).output(0)
result = ov_opset.reshape(result, target_shape, False).output(0)
return OpenVINOKerasTensor(result)
def slice_update(inputs, start_indices, updates):
inputs = get_ov_output(inputs)
updates_tensor = get_ov_output(updates)
if isinstance(start_indices, (list, np.ndarray)):
start_indices = tuple(start_indices)
if not isinstance(start_indices, tuple):
raise ValueError(
"`slice_update` is not supported by openvino backend"
" for `start_indices` of type {}".format(type(start_indices))
)
zero_scalar = ov_opset.constant(0, Type.i32)
one_scalar = ov_opset.constant(1, Type.i32)
zero_tensor = ov_opset.constant([0], Type.i32)
one_tensor = ov_opset.constant([1], Type.i32)
processed_start_indices = []
for idx in start_indices:
val = get_ov_output(idx)
if not val.get_element_type().is_integral():
raise ValueError("`slice_update` requires integral start_indices")
if val.get_element_type() != Type.i32:
val = ov_opset.convert(val, Type.i32).output(0)
if val.get_partial_shape().rank.get_length() == 0:
val = ov_opset.unsqueeze(val, zero_scalar).output(0)
processed_start_indices.append(val)
updates_shape = ov_opset.shape_of(updates_tensor, Type.i32).output(0)
rank = updates_tensor.get_partial_shape().rank.get_length()
if rank == 0:
# Handle scalar update
start_tensor = ov_opset.concat(processed_start_indices, axis=0).output(
0
)
# For scatter_nd_update,
# indices should be of shape [num_updates, rank_of_inputs]
# and updates should be of shape [num_updates]. Here num_updates is 1.
absolute_indices = ov_opset.unsqueeze(start_tensor, zero_scalar).output(
0
)
updates_flat = ov_opset.unsqueeze(updates_tensor, zero_scalar).output(0)
result = ov_opset.scatter_nd_update(
inputs, absolute_indices, updates_flat
).output(0)
return OpenVINOKerasTensor(result)
# Compute the total number of elements in the updates tensor.
# Example:
# if updates.shape = [2, 3], total_elements = 6.
total_elements = ov_opset.reduce_prod(
updates_shape, zero_tensor, keep_dims=False
).output(0)
# Generate a flat range [0, 1, ..., total_elements-1].
# This will be used to enumerate all positions in the updates tensor.
flat_indices = ov_opset.range(
zero_scalar, total_elements, one_scalar, output_type=Type.i32
).output(0)
dim_sizes = []
strides = []
# For each dimension, compute its size and the stride.
# (number of elements to skip to move to the next index in this dimension).
# Example:
# for shape [2, 3], strides = [3, 1].
for dim in range(rank):
dim_size = ov_opset.gather(
updates_shape, ov_opset.constant([dim], Type.i32), zero_scalar
).output(0)
dim_size_scalar = ov_opset.squeeze(dim_size, zero_tensor).output(0)
dim_sizes.append(dim_size_scalar)
# Strides to convert a flat index into a multi-dimensional index.
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/openvino/nn.py | keras/src/backend/openvino/nn.py | import openvino.opset14 as ov_opset
from openvino import Type
from keras.src import backend
from keras.src.backend.openvino.core import OPENVINO_DTYPES
from keras.src.backend.openvino.core import OpenVINOKerasTensor
from keras.src.backend.openvino.core import get_ov_output
def relu(x):
x = get_ov_output(x)
return OpenVINOKerasTensor(ov_opset.relu(x).output(0))
def relu6(x):
x = get_ov_output(x)
return OpenVINOKerasTensor(ov_opset.clamp(x, 0.0, 6.0).output(0))
def celu(x, alpha=1.0):
x = get_ov_output(x)
const_zero = get_ov_output(0.0, x.get_element_type())
const_alpha = get_ov_output(alpha, x.get_element_type())
const_one = get_ov_output(1.0, x.get_element_type())
exp_x_div_alpha = ov_opset.exp(ov_opset.divide(x, const_alpha)).output(0)
negative_branch = ov_opset.multiply(
const_alpha, ov_opset.subtract(exp_x_div_alpha, const_one)
)
celu_x = ov_opset.add(
ov_opset.maximum(x, const_zero).output(0),
ov_opset.minimum(negative_branch, const_zero).output(0),
)
return OpenVINOKerasTensor(celu_x.output(0))
def sigmoid(x):
x = get_ov_output(x)
return OpenVINOKerasTensor(ov_opset.sigmoid(x).output(0))
def tanh(x):
x = get_ov_output(x)
return OpenVINOKerasTensor(ov_opset.tanh(x).output(0))
def tanh_shrink(x):
x = get_ov_output(x)
return OpenVINOKerasTensor(ov_opset.subtract(x, ov_opset.tanh(x)).output(0))
def hard_tanh(x):
x = get_ov_output(x)
return OpenVINOKerasTensor(ov_opset.clamp(x, -1.0, 1.0).output(0))
def soft_shrink(x, threshold=0.5):
x = get_ov_output(x)
et = x.get_element_type()
thr = get_ov_output(threshold, et)
zero = get_ov_output(0.0, et)
abs_x = ov_opset.abs(x)
sub = ov_opset.subtract(abs_x, thr)
shrunk = ov_opset.maximum(sub, zero)
sign = ov_opset.sign(x)
out = ov_opset.multiply(sign, shrunk)
return OpenVINOKerasTensor(out.output(0))
def hard_shrink(x, threshold=0.5):
x = get_ov_output(x)
et = x.get_element_type()
thr = get_ov_output(threshold, et)
zero = get_ov_output(0.0, et)
cond = ov_opset.greater(ov_opset.abs(x), thr)
out = ov_opset.select(cond, x, zero)
return OpenVINOKerasTensor(out.output(0))
def softplus(x):
x = get_ov_output(x)
return OpenVINOKerasTensor(ov_opset.softplus(x).output(0))
def softsign(x):
x = get_ov_output(x)
return OpenVINOKerasTensor(ov_opset.softsign(x).output(0))
def silu(x):
x = get_ov_output(x)
beta = get_ov_output(1.0, x.get_element_type())
return OpenVINOKerasTensor(ov_opset.swish(x, beta=beta).output(0))
def log_sigmoid(x):
x = get_ov_output(x)
neg_x = ov_opset.negative(x)
return OpenVINOKerasTensor(
ov_opset.negative(ov_opset.softplus(neg_x)).output(0)
)
def leaky_relu(x, negative_slope=0.2):
x = get_ov_output(x)
slope_const = ov_opset.constant(
negative_slope, x.get_element_type()
).output(0)
leaky_relu = ov_opset.prelu(x, slope_const).output(0)
return OpenVINOKerasTensor(leaky_relu)
def sparse_sigmoid(x):
x = get_ov_output(x)
et = x.get_element_type()
one = get_ov_output(1.0, et)
neg_one = get_ov_output(-1.0, et)
half = get_ov_output(0.5, et)
y = ov_opset.minimum(ov_opset.maximum(x, neg_one), one)
out = ov_opset.multiply(half, ov_opset.add(y, one))
return OpenVINOKerasTensor(out.output(0))
def hard_sigmoid(x):
x = get_ov_output(x)
alpha = get_ov_output(1.0 / 6.0, x.get_element_type())
beta = get_ov_output(0.5, x.get_element_type())
return OpenVINOKerasTensor(ov_opset.hard_sigmoid(x, alpha, beta).output(0))
def hard_silu(x):
hard_sigmoid_output = get_ov_output(hard_sigmoid(x))
x = get_ov_output(x)
return OpenVINOKerasTensor(
ov_opset.multiply(x, hard_sigmoid_output).output(0)
)
def elu(x, alpha=1.0):
x = get_ov_output(x)
return OpenVINOKerasTensor(ov_opset.elu(x, alpha).output(0))
def selu(x):
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
x = get_ov_output(x)
alpha = get_ov_output(alpha, x.get_element_type())
scale = get_ov_output(scale, x.get_element_type())
return OpenVINOKerasTensor(ov_opset.selu(x, alpha, scale).output(0))
def gelu(x, approximate=True):
x = get_ov_output(x)
approximate_mode = "erf"
if approximate:
approximate_mode = "tanh"
return OpenVINOKerasTensor(ov_opset.gelu(x, approximate_mode).output(0))
def softmax(x, axis=-1):
x = get_ov_output(x)
if axis is None:
x_shape = ov_opset.shape_of(x)
flatten_shape = ov_opset.constant([-1], Type.i32).output(0)
flatten_x = ov_opset.reshape(x, flatten_shape, False).output(0)
softmax_x = ov_opset.softmax(flatten_x, 0).output(0)
return OpenVINOKerasTensor(
ov_opset.reshape(softmax_x, x_shape, False).output(0)
)
return OpenVINOKerasTensor(ov_opset.softmax(x, axis).output(0))
def log_softmax(x, axis=-1):
x = get_ov_output(x)
if axis is None:
x_shape = ov_opset.shape_of(x)
flatten_shape = ov_opset.constant([-1], Type.i32).output(0)
flatten_x = ov_opset.reshape(x, flatten_shape, False).output(0)
log_softmax_x = ov_opset.log_softmax(flatten_x, 0).output(0)
return OpenVINOKerasTensor(
ov_opset.reshape(log_softmax_x, x_shape, False).output(0)
)
return OpenVINOKerasTensor(ov_opset.log_softmax(x, axis).output(0))
def squareplus(x, b=4):
x = get_ov_output(x)
et = x.get_element_type()
b = get_ov_output(b, et)
two = get_ov_output(2.0, et)
x_squared = ov_opset.multiply(x, x)
inside = ov_opset.add(x_squared, b)
root = ov_opset.sqrt(inside)
summed = ov_opset.add(x, root)
out = ov_opset.divide(summed, two)
return OpenVINOKerasTensor(out.output(0))
def sparse_plus(x):
x = get_ov_output(x)
et = x.get_element_type()
one = get_ov_output(1.0, et)
neg_one = get_ov_output(-1.0, et)
zero = get_ov_output(0.0, et)
quarter = get_ov_output(0.25, et)
x_plus_1 = ov_opset.add(x, one)
quad = ov_opset.multiply(quarter, ov_opset.multiply(x_plus_1, x_plus_1))
leq_than_neg_one = ov_opset.less_equal(x, neg_one)
less_than_one = ov_opset.less(x, one)
out = ov_opset.select(
leq_than_neg_one,
zero,
ov_opset.select(less_than_one, quad, x),
)
return OpenVINOKerasTensor(out.output(0))
def threshold(x, threshold, default_value):
x = get_ov_output(x)
et = x.get_element_type()
thr = get_ov_output(threshold, et)
dv = get_ov_output(default_value, et)
cond = ov_opset.greater(x, thr)
out = ov_opset.select(cond, x, dv)
return OpenVINOKerasTensor(out.output(0))
def max_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
num_spatial_dims = (
get_ov_output(inputs).get_partial_shape().rank.get_length() - 2
)
kwargs = {"dilations": [1] * num_spatial_dims} # required for ov max_pool
return _pool(
inputs,
pool_size,
ov_opset.max_pool,
strides,
padding,
data_format,
**kwargs,
)
def average_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
return _pool(
inputs,
pool_size,
ov_opset.avg_pool,
strides,
padding,
data_format,
exclude_pad=True,
)
def adaptive_average_pool(inputs, output_size, data_format=None):
"""Adaptive average pooling - OpenVINO backend not yet supported."""
raise NotImplementedError("Adaptive pooling not implemented for OpenVINO.")
def adaptive_max_pool(inputs, output_size, data_format=None):
"""Adaptive max pooling - OpenVINO backend not yet supported."""
raise NotImplementedError("Adaptive pooling not implemented for OpenVINO.")
def _pool(
inputs,
pool_size,
pooling_func,
strides=None,
padding="valid",
data_format=None,
**kwargs,
):
data_format = backend.standardize_data_format(data_format)
inputs = get_ov_output(inputs)
num_spatial_dims = inputs.get_partial_shape().rank.get_length() - 2
if isinstance(pool_size, int):
pool_size = [pool_size] * num_spatial_dims
if strides is None:
strides = pool_size
strides = _adjust_strides_dilation(strides, num_spatial_dims)
pad_mode, pads_begin, pads_end = _adjust_padding(padding)
inputs = _adjust_input(inputs, num_spatial_dims, data_format)
pool_kwargs = {
"kernel_shape": pool_size,
"strides": strides,
"auto_pad": pad_mode,
"pads_begin": pads_begin,
"pads_end": pads_end,
**kwargs,
}
pooled = pooling_func(inputs, **pool_kwargs).output(0)
adjusted_pooled = _adjust_outputs(pooled, num_spatial_dims, data_format)
return OpenVINOKerasTensor(adjusted_pooled)
def _adjust_strides_dilation(
x,
num_spatial_dims,
):
# Helper function that converts an operand to a spatial operand.
x = (x,) * num_spatial_dims if isinstance(x, int) else x
# OpenVINO expects input in NCHW layout
# x = [1, 1] + list(x)
x = list(x)
return x
def _adjust_padding(
padding,
):
padding = padding.lower() if isinstance(padding, str) else padding
if padding == "same":
return "SAME_UPPER", [], []
elif padding == "same_lower":
return "SAME_LOWER", [], []
elif padding == "valid":
return "VALID", [], []
pads_begin = []
pads_end = []
for padding_pair in padding:
pads_begin.append(padding_pair[0])
pads_end.append(padding_pair[1])
return "EXPLICIT", pads_begin, pads_end
def _adjust_input(inputs, num_spatial_dims, data_format):
if data_format == "channels_first":
return inputs
if num_spatial_dims == 1:
permutation = [0, 2, 1]
elif num_spatial_dims == 2:
permutation = [0, 3, 1, 2]
else:
permutation = [0, 4, 1, 2, 3]
permutation = ov_opset.constant(permutation, Type.i32)
return ov_opset.transpose(inputs, permutation).output(0)
def _adjust_kernel(kernel, num_spatial_dims):
if num_spatial_dims == 1:
permutation = [2, 1, 0]
elif num_spatial_dims == 2:
permutation = [3, 2, 0, 1]
else:
permutation = [4, 3, 0, 1, 2]
permutation = ov_opset.constant(permutation, Type.i32)
return ov_opset.transpose(kernel, permutation).output(0)
def _adjust_depthwise_kernel(kernel, num_spatial_dims):
# kernel layout: filter_H, filter_W, C_IN, Ch_mul
if num_spatial_dims == 1:
# kernel layout: filter_H, C_IN, Ch_mul
permutation = [1, 2, 0]
elif num_spatial_dims == 2:
# kernel layout: filter_H, filter_W, C_IN, Ch_mul
permutation = [2, 3, 0, 1]
else:
# kernel layout: filter_H, filter_W, filter_Z, C_IN, Ch_mul
permutation = [3, 4, 0, 1, 2]
permutation = ov_opset.constant(permutation, Type.i32)
return ov_opset.transpose(kernel, permutation).output(0)
def _adjust_outputs(outputs, num_spatial_dims, data_format):
if data_format == "channels_first":
return outputs
# convert a tensor from NCHW to NHWC layout
if num_spatial_dims == 1:
permutation = [0, 2, 1]
elif num_spatial_dims == 2:
permutation = [0, 2, 3, 1]
else:
permutation = [0, 2, 3, 4, 1]
permutation = ov_opset.constant(permutation, Type.i32)
return ov_opset.transpose(outputs, permutation).output(0)
def conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
inputs = get_ov_output(inputs)
kernel = get_ov_output(kernel)
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = inputs.get_partial_shape().rank.get_length() - 2
if data_format == "channels_last":
inputs_in_channels = inputs.get_partial_shape()[
2 + num_spatial_dims - 1
]
else:
inputs_in_channels = inputs.get_partial_shape()[1]
kernel_in_channels = kernel.get_partial_shape()[-2]
strides = _adjust_strides_dilation(strides, num_spatial_dims)
dilation_rate = _adjust_strides_dilation(dilation_rate, num_spatial_dims)
pad_mode, pads_begin, pads_end = _adjust_padding(padding)
inputs = _adjust_input(inputs, num_spatial_dims, data_format)
kernel = _adjust_kernel(kernel, num_spatial_dims)
num_groups = (
inputs_in_channels.get_length() // kernel_in_channels.get_length()
)
if num_groups == 1:
conv = ov_opset.convolution(
inputs,
kernel,
strides,
pads_begin,
pads_end,
dilation_rate,
pad_mode,
)
else:
input_shape = ov_opset.shape_of(inputs).output(0)
filter_shape = ov_opset.shape_of(kernel).output(0)
zero_const = ov_opset.constant([0], Type.i32).output(0)
one_const = ov_opset.constant([1], Type.i32).output(0)
two_const = ov_opset.constant([2], Type.i32).output(0)
input_cin = ov_opset.slice(
input_shape, one_const, two_const, one_const
).output(0)
filter_cin = ov_opset.slice(
filter_shape, one_const, two_const, one_const
).output(0)
num_groups = ov_opset.divide(input_cin, filter_cin).output(0)
# reshape the filter based on the number of groups information
int_max_const = ov_opset.constant([2**31 - 1], Type.i32).output(0)
filter_cout = ov_opset.slice(
filter_shape, zero_const, one_const, one_const
).output(0)
filter_new_cout = ov_opset.divide(filter_cout, num_groups).output(0)
shape_cin_xy = ov_opset.slice(
filter_shape, one_const, int_max_const, one_const
).output(0)
filter_new_shape = ov_opset.concat(
[num_groups, filter_new_cout, shape_cin_xy], 0
).output(0)
new_filter = ov_opset.reshape(kernel, filter_new_shape, False).output(0)
conv = ov_opset.group_convolution(
inputs,
new_filter,
strides,
pads_begin,
pads_end,
dilation_rate,
pad_mode,
)
conv = _adjust_outputs(conv.output(0), num_spatial_dims, data_format)
return OpenVINOKerasTensor(conv)
def depthwise_conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
inputs = get_ov_output(inputs)
kernel = get_ov_output(kernel)
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = inputs.get_partial_shape().rank.get_length() - 2
assert data_format == "channels_last", (
"`depthwise_conv` is supported only for channels_last data_format"
)
strides = _adjust_strides_dilation(strides, num_spatial_dims)
dilation_rate = _adjust_strides_dilation(dilation_rate, num_spatial_dims)
pad_mode, pads_begin, pads_end = _adjust_padding(padding)
inputs = _adjust_input(inputs, num_spatial_dims, data_format)
kernel = _adjust_depthwise_kernel(kernel, num_spatial_dims)
unsqueeze_dim = ov_opset.constant([2], Type.i32)
kernel = ov_opset.unsqueeze(kernel, unsqueeze_dim)
group_conv = ov_opset.group_convolution(
inputs, kernel, strides, pads_begin, pads_end, dilation_rate, pad_mode
)
group_conv = _adjust_outputs(
group_conv.output(0), num_spatial_dims, data_format
)
return OpenVINOKerasTensor(group_conv)
def separable_conv(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
raise NotImplementedError(
"`separable_conv` is not supported with openvino backend"
)
def conv_transpose(
inputs,
kernel,
strides=1,
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=1,
):
raise NotImplementedError(
"`conv_transpose` is not supported with openvino backend"
)
def one_hot(x, num_classes, axis=-1, dtype=None, sparse=False):
if sparse:
raise ValueError("`sparse=True` is not supported with openvino backend")
x = get_ov_output(x)
if dtype is None:
dtype = backend.floatx()
ov_dtype = OPENVINO_DTYPES[dtype]
on_value = get_ov_output(1, ov_dtype)
off_value = get_ov_output(0, ov_dtype)
one_hot_encoded = ov_opset.one_hot(
x,
depth=num_classes,
axis=axis,
on_value=on_value,
off_value=off_value,
).output(0)
return OpenVINOKerasTensor(one_hot_encoded)
def multi_hot(x, num_classes, axis=-1, dtype=None, sparse=False):
reduction_axis = 1 if len(x.shape) > 1 else 0
if backend.standardize_dtype(dtype) == "bool":
outputs = one_hot(x, num_classes, axis=axis, dtype=dtype, sparse=sparse)
result = ov_opset.reduce_logical_or(outputs, reduction_axis)
else:
outputs = one_hot(x, num_classes, axis=axis, dtype=dtype)
result = ov_opset.reduce_max(outputs, reduction_axis)
return OpenVINOKerasTensor(result.output(0))
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
raise NotImplementedError(
"`categorical_crossentropy` is not supported with openvino backend"
)
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
raise NotImplementedError(
"`sparse_categorical_crossentropy` is not supported "
"with openvino backend"
)
def binary_crossentropy(target, output, from_logits=False):
raise NotImplementedError(
"`binary_crossentropy` is not supported with openvino backend"
)
def moments(x, axes, keepdims=False, synchronized=False):
x = get_ov_output(x)
axes = ov_opset.constant(axes, Type.i32).output(0)
# The variance is computed using $Var = E[|x|^2] - |E[x]|^2$, It is faster
# but less numerically stable.
mean = ov_opset.reduce_mean(x, axes, keepdims).output(0)
const_two = ov_opset.constant(2, x.get_element_type()).output(0)
squared_x = ov_opset.power(x, const_two).output(0)
squared_mean = ov_opset.power(mean, const_two).output(0)
squared_x_mean = ov_opset.reduce_mean(squared_x, axes, keepdims)
mean = OpenVINOKerasTensor(mean)
variance = OpenVINOKerasTensor(
ov_opset.subtract(squared_x_mean, squared_mean).output(0)
)
return mean, variance
def batch_normalization(
x, mean, variance, axis, offset=None, scale=None, epsilon=1e-3
):
x = get_ov_output(x)
mean = get_ov_output(mean)
variance = get_ov_output(variance)
if offset is not None:
offset = get_ov_output(offset)
else:
mean_shape = ov_opset.shape_of(mean)
mean_type = mean.get_element_type()
zero_const = ov_opset.constant([0], mean_type)
offset = ov_opset.broadcast(zero_const, mean_shape)
if scale is not None:
scale = get_ov_output(scale)
else:
mean_shape = ov_opset.shape_of(mean)
mean_type = mean.get_element_type()
one_const = ov_opset.constant([1], mean_type)
scale = ov_opset.broadcast(one_const, mean_shape)
# adjust x input to have the second dimension representing the channel axis
x_rank = x.get_partial_shape().rank.get_length()
if axis < 0:
axis += x_rank
if axis != 1:
perm_vector = list(range(0, x_rank))
perm_vector[1] = axis
perm_vector[axis] = 1
perm_vector = ov_opset.constant(perm_vector, Type.i32).output(0)
x = ov_opset.transpose(x, perm_vector).output(0)
batch_norm = ov_opset.batch_norm_inference(
x, scale, offset, mean, variance, epsilon
).output(0)
if axis != 1:
perm_vector = list(range(0, x_rank))
perm_vector[1] = axis
perm_vector[axis] = 1
perm_vector = ov_opset.constant(perm_vector, Type.i32).output(0)
batch_norm = ov_opset.transpose(batch_norm, perm_vector).output(0)
return OpenVINOKerasTensor(batch_norm)
def ctc_loss(target, output, target_length, output_length, mask_index=0):
target = get_ov_output(target)
output = get_ov_output(output)
target_length = get_ov_output(target_length)
output_length = get_ov_output(output_length)
ctc_loss_ = ov_opset.ctc_loss(
output, output_length, target, target_length, blank_index=mask_index
)
ctc_loss_ = ov_opset.convert(ctc_loss_, OPENVINO_DTYPES[backend.floatx()])
return OpenVINOKerasTensor(ctc_loss_.output(0))
def ctc_decode(
inputs,
sequence_lengths,
strategy="greedy",
beam_width=100,
top_paths=1,
merge_repeated=True,
mask_index=0,
):
raise NotImplementedError(
"`ctc_decode` is not supported with openvino backend"
)
def psnr(x1, x2, max_val):
from keras.src.backend.openvino.numpy import log10
x1 = get_ov_output(x1)
x2 = get_ov_output(x2)
max_val = get_ov_output(max_val, x1.get_element_type())
diff = ov_opset.subtract(x1, x2)
squared_diff = ov_opset.multiply(diff, diff)
reduction_axes = list(range(0, x1.get_partial_shape().rank.get_length()))
mse = ov_opset.reduce_mean(squared_diff, reduction_axes).output(0)
log_max_val = get_ov_output(log10(OpenVINOKerasTensor(max_val)))
log_mse = get_ov_output(log10(OpenVINOKerasTensor(mse)))
psnr = ov_opset.subtract(
ov_opset.multiply(
ov_opset.constant(20, log_max_val.get_element_type()), log_max_val
),
ov_opset.multiply(
ov_opset.constant(10, log_mse.get_element_type()), log_mse
),
).output(0)
return OpenVINOKerasTensor(psnr)
def dot_product_attention(
query,
key,
value,
bias=None,
mask=None,
scale=None,
is_causal=False,
flash_attention=None,
attn_logits_soft_cap=None,
):
if bias is not None:
raise NotImplementedError(
"`dot_product_attention` with `bias` is not supported "
"with openvino backend"
)
if flash_attention:
raise NotImplementedError(
"`dot_product_attention` with `flash_attention` is not supported "
"with openvino backend"
)
if attn_logits_soft_cap is not None:
raise NotImplementedError(
"`dot_product_attention` with `attn_logits_soft_cap` is not "
"supported with openvino backend"
)
query = get_ov_output(query)
key = get_ov_output(key)
value = get_ov_output(value)
if query.get_element_type() != key.get_element_type():
ov_type = OPENVINO_DTYPES[backend.floatx()]
query = ov_opset.convert(query, ov_type).output(0)
key = ov_opset.convert(key, ov_type).output(0)
if value.get_element_type() != query.get_element_type():
value = ov_opset.convert(value, query.get_element_type()).output(0)
axes_const = ov_opset.constant([0, 2, 1, 3], Type.i32).output(0)
query = ov_opset.transpose(query, axes_const)
key = ov_opset.transpose(key, axes_const)
value = ov_opset.transpose(value, axes_const)
mask = get_ov_output(mask) if mask is not None else None
scale = (
get_ov_output(scale, query.get_element_type())
if scale is not None
else None
)
dpa = ov_opset.scaled_dot_product_attention(
query, key, value, attention_mask=mask, scale=scale, causal=is_causal
)
dpa = ov_opset.transpose(dpa, axes_const)
return OpenVINOKerasTensor(dpa.output(0))
def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
raise NotImplementedError("`unfold` is not supported with openvino backend")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/openvino/linalg.py | keras/src/backend/openvino/linalg.py | def cholesky(a, upper=False):
raise NotImplementedError(
"`cholesky` is not supported with openvino backend."
)
def cholesky_inverse(a, upper=False):
raise NotImplementedError(
"`cholesky_inverse` is not supported with openvino backend."
)
def det(a):
raise NotImplementedError("`det` is not supported with openvino backend")
def eig(a):
raise NotImplementedError("`eig` is not supported with openvino backend")
def eigh(a):
raise NotImplementedError("`eigh` is not supported with openvino backend")
def inv(a):
raise NotImplementedError("`inv` is not supported with openvino backend")
def lu_factor(a):
raise NotImplementedError(
"`lu_factor` is not supported with openvino backend"
)
def norm(x, ord=None, axis=None, keepdims=False):
raise NotImplementedError("`norm` is not supported with openvino backend")
def qr(x, mode="reduced"):
raise NotImplementedError("`qr` is not supported with openvino backend")
def solve(a, b):
raise NotImplementedError("`solve` is not supported with openvino backend")
def solve_triangular(a, b, lower=False):
raise NotImplementedError(
"`solve_triangular` is not supported with openvino backend"
)
def svd(x, full_matrices=True, compute_uv=True):
raise NotImplementedError("`svd` is not supported with openvino backend")
def lstsq(a, b, rcond=None):
raise NotImplementedError("`lstsq` is not supported with openvino backend")
def jvp(fun, primals, tangents, has_aux=False):
raise NotImplementedError("`jvp` is not supported with openvino backend")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/openvino/numpy.py | keras/src/backend/openvino/numpy.py | import numpy as np
import openvino.opset14 as ov_opset
from openvino import Type
from keras.src.backend import config
from keras.src.backend.common import dtypes
from keras.src.backend.common.variables import standardize_dtype
from keras.src.backend.openvino.core import DTYPES_MAX
from keras.src.backend.openvino.core import DTYPES_MIN
from keras.src.backend.openvino.core import OPENVINO_DTYPES
from keras.src.backend.openvino.core import OpenVINOKerasTensor
from keras.src.backend.openvino.core import (
align_operand_types as _align_operand_types,
)
from keras.src.backend.openvino.core import convert_to_tensor
from keras.src.backend.openvino.core import get_ov_output
from keras.src.backend.openvino.core import ov_to_keras_type
def add(x1, x2):
element_type = None
if isinstance(x1, OpenVINOKerasTensor):
element_type = x1.output.get_element_type()
if isinstance(x2, OpenVINOKerasTensor):
element_type = x2.output.get_element_type()
x1 = get_ov_output(x1, element_type)
x2 = get_ov_output(x2, element_type)
x1, x2 = _align_operand_types(x1, x2, "add()")
return OpenVINOKerasTensor(ov_opset.add(x1, x2).output(0))
def einsum(subscripts, *operands, **kwargs):
inputs = []
for operand in operands:
operand = get_ov_output(operand)
inputs.append(operand)
return OpenVINOKerasTensor(ov_opset.einsum(inputs, subscripts).output(0))
def subtract(x1, x2):
element_type = None
if isinstance(x1, OpenVINOKerasTensor):
element_type = x1.output.get_element_type()
if isinstance(x2, OpenVINOKerasTensor):
element_type = x2.output.get_element_type()
x1 = get_ov_output(x1, element_type)
x2 = get_ov_output(x2, element_type)
x1, x2 = _align_operand_types(x1, x2, "subtract()")
if x1.get_element_type() == Type.boolean:
return OpenVINOKerasTensor(ov_opset.logical_xor(x1, x2).output(0))
return OpenVINOKerasTensor(ov_opset.subtract(x1, x2).output(0))
def matmul(x1, x2):
element_type = None
if isinstance(x1, OpenVINOKerasTensor):
element_type = x1.output.get_element_type()
if isinstance(x2, OpenVINOKerasTensor):
element_type = x2.output.get_element_type()
x1 = get_ov_output(x1, element_type)
x2 = get_ov_output(x2, element_type)
x1, x2 = _align_operand_types(x1, x2, "matmul()")
return OpenVINOKerasTensor(ov_opset.matmul(x1, x2, False, False).output(0))
def multiply(x1, x2):
element_type = None
if isinstance(x1, OpenVINOKerasTensor):
element_type = x1.output.get_element_type()
if isinstance(x2, OpenVINOKerasTensor):
element_type = x2.output.get_element_type()
x1 = get_ov_output(x1, element_type)
x2 = get_ov_output(x2, element_type)
x1, x2 = _align_operand_types(x1, x2, "multiply()")
return OpenVINOKerasTensor(ov_opset.multiply(x1, x2).output(0))
def mean(x, axis=None, keepdims=False):
x_ov = get_ov_output(x)
x_type = x_ov.get_element_type()
was_axis_none = axis is None
x_resolved, axis_resolved = _resolve_axis(x_ov, axis)
if axis_resolved is None:
return OpenVINOKerasTensor(x_ov)
if x_type.is_integral():
ov_type = OPENVINO_DTYPES[config.floatx()]
x_resolved = ov_opset.convert(x_resolved, ov_type).output(0)
result = ov_opset.reduce_mean(x_resolved, axis_resolved, keepdims).output(0)
if keepdims and was_axis_none:
rank = x.get_partial_shape().rank.get_length()
result_shape = [1] * rank
result = ov_opset.reshape(
result,
ov_opset.constant(result_shape, Type.i32).output(0),
False,
).output(0)
return OpenVINOKerasTensor(result)
def max(x, axis=None, keepdims=False, initial=None):
return _compute_extrema(x, "max", axis, keepdims, initial)
def _compute_extrema(x, operation, axis=None, keepdims=False, initial=None):
if operation == "min":
reduction_op = ov_opset.reduce_min
elementwise_op = ov_opset.minimum
elif operation == "max":
reduction_op = ov_opset.reduce_max
elementwise_op = ov_opset.maximum
else:
raise ValueError(
f"Operation must be 'min' or 'max', received {operation}"
)
x = get_ov_output(x)
x_type = x.get_element_type()
x_for_rank = x
is_bool = x_type == Type.boolean
if is_bool:
x = ov_opset.convert(x, Type.i32).output(0)
x_type = Type.i32
if isinstance(axis, tuple) and len(axis) == 0:
return OpenVINOKerasTensor(x)
was_axis_none = axis is None
x, axis = _resolve_axis(x, axis)
result = reduction_op(x, axis, keepdims).output(0)
if initial is not None:
initial_tensor = ov_opset.constant(initial, x_type).output(0)
result = elementwise_op(result, initial_tensor).output(0)
if keepdims and was_axis_none:
orig_shape = ov_opset.shape_of(x_for_rank, Type.i32).output(0)
orig_rank_shape = ov_opset.shape_of(orig_shape, Type.i32).output(0)
one = ov_opset.constant(1, Type.i32).output(0)
result_shape = ov_opset.broadcast(one, orig_rank_shape).output(0)
result = ov_opset.reshape(result, result_shape, False).output(0)
if is_bool:
result = ov_opset.convert(result, Type.boolean).output(0)
return OpenVINOKerasTensor(result)
def ones(shape, dtype=None):
dtype = standardize_dtype(dtype) or config.floatx()
ov_type = OPENVINO_DTYPES[dtype]
const_one = ov_opset.constant(1, ov_type).output(0)
if isinstance(shape, tuple):
shape = list(shape)
elif isinstance(shape, int):
shape = [shape]
output_shape = ov_opset.constant(shape, dtype=Type.i32).output(0)
ones = ov_opset.broadcast(const_one, output_shape)
return OpenVINOKerasTensor(ones.output(0))
def zeros(shape, dtype=None):
dtype = standardize_dtype(dtype) or config.floatx()
ov_type = OPENVINO_DTYPES[dtype]
const_zero = ov_opset.constant(0, dtype=ov_type).output(0)
if isinstance(shape, tuple):
shape = list(shape)
elif isinstance(shape, int):
shape = [shape]
output_shape = ov_opset.constant(shape, dtype=Type.i32).output(0)
zeros = ov_opset.broadcast(const_zero, output_shape)
return OpenVINOKerasTensor(zeros.output(0))
def absolute(x):
x = get_ov_output(x)
x_type = x.get_element_type()
if x_type == Type.boolean:
return OpenVINOKerasTensor(x)
return OpenVINOKerasTensor(ov_opset.absolute(x).output(0))
def abs(x):
x = get_ov_output(x)
return OpenVINOKerasTensor(ov_opset.absolute(x).output(0))
def all(x, axis=None, keepdims=False):
x = get_ov_output(x)
x, axis = _resolve_axis(x, axis)
if axis is None:
return OpenVINOKerasTensor(x)
x = ov_opset.convert(x, Type.boolean).output(0)
return OpenVINOKerasTensor(
ov_opset.reduce_logical_and(x, axis, keepdims).output(0)
)
def angle(x):
raise NotImplementedError("`angle` is not supported with openvino backend")
def any(x, axis=None, keepdims=False):
x = get_ov_output(x)
x, axis = _resolve_axis(x, axis)
if axis is None:
return OpenVINOKerasTensor(x)
x = ov_opset.convert(x, Type.boolean).output(0)
return OpenVINOKerasTensor(
ov_opset.reduce_logical_or(x, axis, keepdims).output(0)
)
def amax(x, axis=None, keepdims=False):
x = get_ov_output(x)
x_type = x.get_element_type()
x, axis = _resolve_axis(x, axis)
if axis is None:
return OpenVINOKerasTensor(x)
if x_type == Type.boolean:
return OpenVINOKerasTensor(
ov_opset.reduce_logical_or(x, axis, keepdims).output(0)
)
return OpenVINOKerasTensor(ov_opset.reduce_max(x, axis, keepdims).output(0))
def amin(x, axis=None, keepdims=False):
x = get_ov_output(x)
x_type = x.get_element_type()
x, axis = _resolve_axis(x, axis)
if axis is None:
return OpenVINOKerasTensor(x)
if x_type == Type.boolean:
return OpenVINOKerasTensor(
ov_opset.reduce_logical_and(x, axis, keepdims).output(0)
)
return OpenVINOKerasTensor(ov_opset.reduce_min(x, axis, keepdims).output(0))
def _resolve_axis(x, axis):
if axis == () or axis == []:
return x, None
if axis is None:
flatten_shape = ov_opset.constant([-1], Type.i32).output(0)
x = ov_opset.reshape(x, flatten_shape, False).output(0)
axis = 0
if isinstance(axis, tuple):
axis = list(axis)
axis = ov_opset.constant(axis, Type.i32).output(0)
return x, axis
def _upcast_type_if_needed(x):
x_type = x.get_element_type()
if x_type == Type.boolean:
x = ov_opset.convert(x, Type.i32).output(0)
elif x_type in (Type.i8, Type.i16):
x = ov_opset.convert(x, Type.i32).output(0)
elif x_type in (Type.u8, Type.u16):
x = ov_opset.convert(x, Type.u32).output(0)
return x
def append(x1, x2, axis=None):
x1, x2 = get_ov_output(x1), get_ov_output(x2)
x1, x2 = _align_operand_types(x1, x2, "append()")
if axis is None:
flatten_shape = ov_opset.constant([-1], Type.i32).output(0)
x1 = ov_opset.reshape(x1, flatten_shape, False).output(0)
x2 = ov_opset.reshape(x2, flatten_shape, False).output(0)
axis = 0
return OpenVINOKerasTensor(ov_opset.concat([x1, x2], axis).output(0))
def arange(start, stop=None, step=None, dtype=None):
if stop is None:
start, stop = get_ov_output(0), get_ov_output(start)
else:
start, stop = get_ov_output(start), get_ov_output(stop)
step = get_ov_output(1) if step is None else get_ov_output(step)
ov_type = None
if dtype is not None:
ov_type = OPENVINO_DTYPES[standardize_dtype(dtype)]
else:
ov_type = OPENVINO_DTYPES[
dtypes.result_type(
ov_to_keras_type(start.get_element_type()),
ov_to_keras_type(stop.get_element_type()),
ov_to_keras_type(step.get_element_type()),
"int32",
)
]
start_node = ov_opset.convert(start, ov_type)
stop_node = ov_opset.convert(stop, ov_type)
step_node = ov_opset.convert(step, ov_type)
return OpenVINOKerasTensor(
ov_opset.range(start_node, stop_node, step_node, ov_type).output(0)
)
def arccos(x):
x = get_ov_output(x)
x_type = x.get_element_type()
if x_type.is_integral():
ov_type = OPENVINO_DTYPES[config.floatx()]
x = ov_opset.convert(x, ov_type)
return OpenVINOKerasTensor(ov_opset.acos(x).output(0))
def arccosh(x):
x = get_ov_output(x)
x_type = x.get_element_type()
if x_type.is_integral():
ov_type = OPENVINO_DTYPES[config.floatx()]
x = ov_opset.convert(x, ov_type)
return OpenVINOKerasTensor(ov_opset.acosh(x).output(0))
def arcsin(x):
x = get_ov_output(x)
x_type = x.get_element_type()
if x_type.is_integral():
ov_type = OPENVINO_DTYPES[config.floatx()]
x = ov_opset.convert(x, ov_type)
return OpenVINOKerasTensor(ov_opset.asin(x).output(0))
def arcsinh(x):
x = get_ov_output(x)
x_type = x.get_element_type()
if x_type.is_integral():
ov_type = OPENVINO_DTYPES[config.floatx()]
x = ov_opset.convert(x, ov_type)
return OpenVINOKerasTensor(ov_opset.asinh(x).output(0))
def arctan(x):
x = get_ov_output(x)
x_type = x.get_element_type()
if x_type.is_integral():
ov_type = OPENVINO_DTYPES[config.floatx()]
x = ov_opset.convert(x, ov_type)
return OpenVINOKerasTensor(ov_opset.atan(x).output(0))
def arctan2(x1, x2):
x1 = get_ov_output(x1)
x2 = get_ov_output(x2)
x1_type = ov_to_keras_type(x1.get_element_type())
x2_type = ov_to_keras_type(x2.get_element_type())
result_type = dtypes.result_type(x1_type, x2_type, float)
result_type = OPENVINO_DTYPES[result_type]
x1 = ov_opset.convert(x1, result_type)
x2 = ov_opset.convert(x2, result_type)
x = ov_opset.divide(x1, x2)
y = ov_opset.atan(x)
ov_type = x1.get_element_type()
pi = ov_opset.constant(float(np.pi), ov_type)
half_pi = ov_opset.constant(float(np.pi / 2), ov_type)
neg_half_pi = ov_opset.constant(-float(np.pi / 2), ov_type)
zero_const = ov_opset.constant(0.0, ov_type)
cond_x2_gt0 = ov_opset.greater(x2, zero_const).output(0)
cond_x2_lt0 = ov_opset.less(x2, zero_const).output(0)
cond_x1_ge0 = ov_opset.greater_equal(x1, zero_const).output(0)
cond_x1_gt0 = ov_opset.greater(x1, zero_const).output(0)
cond_x1_eq0 = ov_opset.equal(x1, zero_const).output(0)
out_x2_lt0 = ov_opset.select(
cond_x1_ge0,
ov_opset.add(y, pi),
ov_opset.subtract(y, pi),
)
out_x1_zero = ov_opset.select(cond_x1_eq0, zero_const, neg_half_pi)
out_x2_zero = ov_opset.select(cond_x1_gt0, half_pi, out_x1_zero)
out_not_pos = ov_opset.select(cond_x2_lt0, out_x2_lt0, out_x2_zero)
final_out = ov_opset.select(cond_x2_gt0, y, out_not_pos)
return OpenVINOKerasTensor(final_out.output(0))
def arctanh(x):
x = get_ov_output(x)
x_type = x.get_element_type()
if x_type.is_integral():
ov_type = OPENVINO_DTYPES[config.floatx()]
x = ov_opset.convert(x, ov_type)
return OpenVINOKerasTensor(ov_opset.atanh(x).output(0))
def argmax(x, axis=None, keepdims=False):
x = get_ov_output(x)
x_shape = x.get_partial_shape()
rank = x_shape.rank.get_length()
if rank == 0:
return OpenVINOKerasTensor(ov_opset.constant([0], Type.i32).output(0))
if axis is None:
flatten_shape = ov_opset.constant(
[-1] + [1] * (rank - 1), Type.i32
).output(0)
x = ov_opset.reshape(x, flatten_shape, False).output(0)
axis = 0
k = ov_opset.constant(1, Type.i32).output(0)
else:
if axis < 0:
axis = rank + axis
k = ov_opset.constant(1, Type.i32).output(0)
topk_outputs = ov_opset.topk(
x,
k=k,
axis=axis,
mode="max",
sort="value",
stable=True,
index_element_type=Type.i32,
)
topk_indices = topk_outputs.output(1)
if not keepdims:
topk_indices = ov_opset.squeeze(topk_indices, [axis]).output(0)
return OpenVINOKerasTensor(topk_indices)
def argmin(x, axis=None, keepdims=False):
x = get_ov_output(x)
x_shape = x.get_partial_shape()
rank = x_shape.rank.get_length()
if rank == 0:
return OpenVINOKerasTensor(ov_opset.constant([0], Type.i32).output(0))
if axis is None:
flatten_shape = ov_opset.constant(
[-1] + [1] * (rank - 1), Type.i32
).output(0)
x = ov_opset.reshape(x, flatten_shape, False).output(0)
axis = 0
k = ov_opset.constant(1, Type.i32).output(0)
else:
if axis < 0:
axis = rank + axis
k = ov_opset.constant(1, Type.i32).output(0)
topk_outputs = ov_opset.topk(
x,
k=k,
axis=axis,
mode="min",
sort="value",
stable=True,
index_element_type=Type.i32,
)
topk_indices = topk_outputs.output(1)
if not keepdims:
topk_indices = ov_opset.squeeze(topk_indices, [axis]).output(0)
return OpenVINOKerasTensor(topk_indices)
def argsort(x, axis=-1):
x = get_ov_output(x)
x_shape = x.get_partial_shape()
rank = x_shape.rank.get_length()
if rank == 0:
return OpenVINOKerasTensor(ov_opset.constant([0], Type.i32).output(0))
if axis is None:
flatten_shape = ov_opset.constant([-1], Type.i32).output(0)
x = ov_opset.reshape(x, flatten_shape, False).output(0)
x_shape_tensor = ov_opset.shape_of(x, Type.i32).output(0)
k = ov_opset.reduce_prod(
x_shape_tensor, ov_opset.constant([0], Type.i32), keep_dims=False
)
axis = 0
else:
if axis < 0:
axis = rank + axis
x_shape_tensor = ov_opset.shape_of(x, Type.i32).output(0)
k = ov_opset.gather(
x_shape_tensor,
ov_opset.constant(axis, Type.i32).output(0),
ov_opset.constant(0, Type.i32).output(0),
).output(0)
sorted_indices = ov_opset.topk(
x,
k=k,
axis=axis,
mode="min",
sort="value",
).output(1)
return OpenVINOKerasTensor(sorted_indices)
def array(x, dtype=None):
if dtype is not None:
return np.array(x, dtype=dtype)
return np.array(x)
def view(x, dtype=None):
raise NotImplementedError("`view` is not supported with openvino backend")
def average(x, axis=None, weights=None):
x = get_ov_output(x)
if weights is not None:
weights = get_ov_output(weights)
if axis is None:
flatten_shape = ov_opset.constant([-1], Type.i32).output(0)
x = ov_opset.reshape(x, flatten_shape, False).output(0)
if weights is not None:
weights = ov_opset.reshape(weights, flatten_shape, False).output(0)
axis = 0
if weights is not None:
x_type = x.get_element_type()
weights_type = weights.get_element_type()
if (weights_type.is_integral() or weights_type == Type.boolean) and (
x_type.is_integral() or x_type == Type.boolean
):
x = ov_opset.convert(x, Type.f32).output(0)
weights = ov_opset.convert(weights, Type.f32).output(0)
x, weights = _align_operand_types(x, weights, "multiply()")
x = ov_opset.multiply(x, weights)
if isinstance(axis, tuple):
axis = list(axis)
if axis == []:
return OpenVINOKerasTensor(x)
axis_const = ov_opset.constant(axis, dtype=Type.i32).output(0)
mean_ops = ov_opset.reduce_mean(x, axis_const, False)
return OpenVINOKerasTensor(mean_ops.output(0))
def bartlett(x):
x = get_ov_output(x)
zero_const = ov_opset.constant(0, Type.i64)
one_const = ov_opset.constant(1, Type.i64)
two_const = ov_opset.constant(2, Type.i64)
two_const_f64 = ov_opset.constant(2.0, Type.f64)
if x.get_element_type() != Type.i64:
x = ov_opset.convert(x, Type.i64)
half = ov_opset.convert(
ov_opset.divide(ov_opset.subtract(x, one_const), two_const), Type.f64
)
n = ov_opset.range(zero_const, x, one_const, Type.f64)
condition = ov_opset.less_equal(n, half)
first_half = ov_opset.divide(
ov_opset.multiply(two_const_f64, n),
ov_opset.convert(ov_opset.subtract(x, one_const), Type.f64),
)
second_half = ov_opset.subtract(two_const_f64, first_half)
window = ov_opset.select(condition, first_half, second_half)
window = ov_opset.convert(window, OPENVINO_DTYPES[config.floatx()]).output(
0
)
return OpenVINOKerasTensor(window)
def hamming(x):
m = get_ov_output(x)
m_i64 = (
m if m.get_element_type() == Type.i64 else ov_opset.convert(m, Type.i64)
)
start = ov_opset.constant(0, Type.i64)
step = ov_opset.constant(1, Type.i64)
n = ov_opset.range(start, m_i64, step, Type.f64)
one_i64 = ov_opset.constant(1, Type.i64)
denom_i64 = ov_opset.subtract(m_i64, one_i64)
denom = ov_opset.convert(denom_i64, Type.f64)
two_pi = ov_opset.constant(2.0 * np.pi, Type.f64)
two_pi_over_m_minus_1 = ov_opset.divide(two_pi, denom)
x = ov_opset.multiply(two_pi_over_m_minus_1, n)
c = ov_opset.cos(x)
# 0.54 - 0.46 * cos(...)
a = ov_opset.constant(0.54, Type.f64)
b = ov_opset.constant(0.46, Type.f64)
hamming_window = ov_opset.subtract(a, ov_opset.multiply(b, c))
hamming_window = ov_opset.convert(
hamming_window, OPENVINO_DTYPES[config.floatx()]
)
return OpenVINOKerasTensor(hamming_window.output(0))
def heaviside(x1, x2):
x1 = get_ov_output(x1)
x_type = x1.get_element_type()
x2 = get_ov_output(x2, x_type)
zero_scalar = ov_opset.constant(0, x_type).output(0)
one_scalar = ov_opset.constant(1, x_type).output(0)
neg = ov_opset.less(x1, zero_scalar).output(0)
pos = ov_opset.greater(x1, zero_scalar).output(0)
eq = ov_opset.equal(x1, zero_scalar).output(0)
x = ov_opset.select(neg, zero_scalar, x1).output(0)
x = ov_opset.select(pos, one_scalar, x).output(0)
x = ov_opset.select(eq, x2, x).output(0)
return OpenVINOKerasTensor(x)
def kaiser(x, beta):
raise NotImplementedError("`kaiser` is not supported with openvino backend")
def bincount(x, weights=None, minlength=0, sparse=False):
if x is None:
raise ValueError("input x is None")
if sparse:
raise ValueError("Unsupported value `sparse=True`")
x = get_ov_output(x)
x_type = x.get_element_type()
shape_x = ov_opset.shape_of(x, "i64").output(0)
rank_x = ov_opset.shape_of(shape_x, "i64").output(0)
rank_x = ov_opset.convert(rank_x, x_type).output(0)
scalar_shape = ov_opset.constant([], x_type).output(0)
rank_x = ov_opset.reshape(rank_x, scalar_shape, False).output(0)
const_minus_one = ov_opset.constant(-1, x_type).output(0)
rank_minus_one = ov_opset.add(rank_x, const_minus_one).output(0)
minlength = get_ov_output(minlength)
minlength = ov_opset.convert(minlength, x_type).output(0)
const_one = ov_opset.constant(1, x_type).output(0)
const_zero = ov_opset.constant(0, x_type).output(0)
max_element = ov_opset.reduce_max(x, const_zero, keep_dims=False).output(0)
depth = ov_opset.add(max_element, const_one).output(0)
depth = ov_opset.maximum(depth, minlength).output(0)
depth_scalar = ov_opset.reduce_max(
depth, const_zero, keep_dims=False
).output(0)
one_hot = ov_opset.one_hot(
x, depth_scalar, const_one, const_zero, axis=-1
).output(0)
if weights is not None:
weights = get_ov_output(weights)
weights_type = weights.get_element_type()
weights_new = ov_opset.reshape(weights, [-1, 1], False).output(0)
one_hot = ov_opset.convert(one_hot, weights_type).output(0)
final_one_hot = ov_opset.multiply(one_hot, weights_new).output(0)
final_output = ov_opset.reduce_sum(
final_one_hot, rank_minus_one, keep_dims=False
).output(0)
return OpenVINOKerasTensor(final_output)
else:
final_output = ov_opset.reduce_sum(
one_hot, rank_minus_one, keep_dims=False
).output(0)
final_output = ov_opset.convert(final_output, Type.i32).output(0)
return OpenVINOKerasTensor(final_output)
def blackman(x):
x = get_ov_output(x)
zero_const = ov_opset.constant(0, Type.i64)
one_const = ov_opset.constant(1, Type.i64)
two_pi = ov_opset.constant(2.0 * np.pi, Type.f64)
term_1 = ov_opset.constant(0.42, Type.f64)
term_2 = ov_opset.constant(0.5, Type.f64)
term_3 = ov_opset.constant(0.08, Type.f64)
if x.get_element_type() != Type.i64:
x = ov_opset.convert(x, Type.i64)
n = ov_opset.range(zero_const, x, one_const, Type.f64)
n_minus_1 = ov_opset.subtract(
ov_opset.convert(x, Type.f64), ov_opset.constant(1.0, Type.f64)
).output(0)
angle_2pi = ov_opset.divide(ov_opset.multiply(two_pi, n), n_minus_1)
angle_4pi = ov_opset.multiply(angle_2pi, ov_opset.constant(2.0, Type.f64))
cos_2pi = ov_opset.cos(angle_2pi)
cos_4pi = ov_opset.cos(angle_4pi)
term_2_final = ov_opset.multiply(term_2, cos_2pi)
term_3_final = ov_opset.multiply(term_3, cos_4pi)
window = ov_opset.add(ov_opset.subtract(term_1, term_2_final), term_3_final)
window = ov_opset.convert(window, OPENVINO_DTYPES[config.floatx()]).output(
0
)
return OpenVINOKerasTensor(window)
def broadcast_to(x, shape):
assert isinstance(shape, (tuple, list)), (
"`broadcast_to` is supported only for tuple and list `shape`"
)
target_shape = ov_opset.constant(list(shape), Type.i32).output(0)
x = get_ov_output(x)
return OpenVINOKerasTensor(ov_opset.broadcast(x, target_shape).output(0))
def cbrt(x):
raise NotImplementedError("`cbrt` is not supported with openvino backend")
def ceil(x):
x = get_ov_output(x)
x_type = x.get_element_type()
if x_type.is_integral():
x = ov_opset.convert(x, OPENVINO_DTYPES[config.floatx()]).output(0)
ceiling = ov_opset.ceil(x).output(0)
return OpenVINOKerasTensor(ceiling)
def clip(x, x_min, x_max):
x = get_ov_output(x)
x_type = x.get_element_type()
if x_type == Type.boolean:
x = ov_opset.convert(x, Type.i32).output(0)
x_min = get_ov_output(x_min, x.get_element_type())
x_max = get_ov_output(x_max, x.get_element_type())
clip_by_min = ov_opset.maximum(x, x_min).output(0)
clip_by_max = ov_opset.minimum(clip_by_min, x_max).output(0)
return OpenVINOKerasTensor(clip_by_max)
def concatenate(xs, axis=0):
assert isinstance(xs, list), "`concatenate` is supported only for `x` list"
elems = []
for elem in xs:
elem = get_ov_output(elem)
elems.append(elem)
res = ov_opset.concat(elems, axis).output(0)
return OpenVINOKerasTensor(res)
def conjugate(x):
raise NotImplementedError(
"`conjugate` is not supported with openvino backend"
)
def conj(x):
raise NotImplementedError("`conj` is not supported with openvino backend")
def copy(x):
return x
def cos(x):
x = get_ov_output(x)
x_type = x.get_element_type()
if x_type.is_integral():
ov_type = OPENVINO_DTYPES[config.floatx()]
x = ov_opset.convert(x, ov_type)
return OpenVINOKerasTensor(ov_opset.cos(x).output(0))
def cosh(x):
x = get_ov_output(x)
x_type = x.get_element_type()
if x_type.is_integral():
ov_type = OPENVINO_DTYPES[config.floatx()]
x = ov_opset.convert(x, ov_type)
return OpenVINOKerasTensor(ov_opset.cosh(x).output(0))
def count_nonzero(x, axis=None):
x = get_ov_output(x)
zero_constant = ov_opset.constant(0, dtype=Type.i32).output(0)
zero_constant = ov_opset.convert_like(zero_constant, x)
x = ov_opset.not_equal(x, zero_constant).output(0)
x = ov_opset.convert(x, Type.i32).output(0)
x, axis = _resolve_axis(x, axis)
if not axis:
return OpenVINOKerasTensor(x)
return OpenVINOKerasTensor(ov_opset.reduce_sum(x, axis, False).output(0))
def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None):
raise NotImplementedError("`cross` is not supported with openvino backend")
def cumprod(x, axis=None, dtype=None):
raise NotImplementedError(
"`cumprod` is not supported with openvino backend"
)
def cumsum(x, axis=None, dtype=None):
x = get_ov_output(x)
if dtype is not None:
ov_type = OPENVINO_DTYPES[standardize_dtype(dtype)]
x = ov_opset.convert(x, ov_type).output(0)
x, axis = _resolve_axis(x, axis)
if x.get_element_type() == Type.boolean:
x = ov_opset.convert(x, Type.i32).output(0)
return OpenVINOKerasTensor(ov_opset.cumsum(x, axis).output(0))
def deg2rad(x):
x = get_ov_output(x)
x_type = x.get_element_type()
pi_over_180 = np.pi / 180.0
if x_type == Type.i64:
output_type = Type.f64
elif x_type.is_integral():
output_type = OPENVINO_DTYPES[config.floatx()]
else:
output_type = x_type
if x_type != output_type:
x = ov_opset.convert(x, output_type)
const_pi_over_180 = ov_opset.constant(pi_over_180, output_type).output(0)
result = ov_opset.multiply(x, const_pi_over_180).output(0)
return OpenVINOKerasTensor(result)
def diag(x, k=0):
x = get_ov_output(x)
x_shape = x.get_partial_shape()
rank = x_shape.rank.get_length()
if rank == 1:
N_dim = x_shape[0]
if not N_dim.is_static:
raise ValueError(
"diag requires input with static shape for 1D input."
)
N = N_dim.get_length()
output_size = N + np.abs(k)
out_shape = ov_opset.constant(
[output_size, output_size], dtype=Type.i32
).output(0)
zeros_const = ov_opset.constant(0, x.get_element_type()).output(0)
diag_matrix = ov_opset.broadcast(zeros_const, out_shape)
indices = []
if k >= 0:
for i in range(N):
indices.append([i, i + k])
else:
for i in range(N):
indices.append([i - k, i])
indices = np.array(indices, dtype=np.int32)
indices_const = ov_opset.constant(indices, dtype=Type.i32).output(0)
updated = ov_opset.scatter_nd_update(diag_matrix, indices_const, x)
return OpenVINOKerasTensor(updated.output(0))
elif rank == 2:
M_dim = x_shape[0]
N_dim = x_shape[1]
if not M_dim.is_static or not N_dim.is_static:
raise ValueError(
"diag requires input with static shape for 2D input."
)
M = M_dim.get_length()
N = N_dim.get_length()
if k >= 0:
L = np.minimum(M, N - k) if (N - k) > 0 else 0
indices = [[i, i + k] for i in range(L)]
else:
L = np.minimum(M + k, N) if (M + k) > 0 else 0
indices = [[i - k, i] for i in range(L)]
if L <= 0:
keras_dtype = ov_to_keras_type(x.get_element_type())
np_dtype = np.dtype(keras_dtype)
empty_np = np.empty((0,), dtype=np_dtype)
empty_const = ov_opset.constant(
empty_np, x.get_element_type()
).output(0)
return OpenVINOKerasTensor(empty_const)
indices = np.array(indices, dtype=np.int32)
indices_const = ov_opset.constant(indices, dtype=Type.i32).output(0)
diag_vec = ov_opset.gather_nd(x, indices_const)
return OpenVINOKerasTensor(diag_vec.output(0))
else:
raise ValueError("diag supports only 1D or 2D tensors")
def diagonal(x, offset=0, axis1=0, axis2=1):
raise NotImplementedError(
"`diagonal` is not supported with openvino backend"
)
def diff(a, n=1, axis=-1):
if n == 0:
return OpenVINOKerasTensor(get_ov_output(a))
if n < 0:
raise ValueError(f"order must be non-negative but got {repr(n)}")
a = get_ov_output(a)
a_type = a.get_element_type()
if isinstance(a, np.ndarray):
rank = a.ndim
else:
rank = a.get_partial_shape().rank.get_length()
if axis < 0:
axis = axis + rank
result = a
for _ in range(n):
rank = result.get_partial_shape().rank.get_length()
strides = ov_opset.constant(
np.array([1] * rank, dtype=np.int64), Type.i64
).output(0)
begin_upper_list = [0] * rank
begin_upper_list[axis] = 1
begin_upper = ov_opset.constant(
np.array(begin_upper_list, dtype=np.int64), Type.i64
).output(0)
end_upper = ov_opset.constant(
np.array([0] * rank, dtype=np.int64), Type.i64
).output(0)
begin_mask_upper = [1] * rank
begin_mask_upper[axis] = 0
end_mask_upper = [1] * rank
upper = ov_opset.strided_slice(
data=result,
begin=begin_upper,
end=end_upper,
strides=strides,
begin_mask=begin_mask_upper,
end_mask=end_mask_upper,
new_axis_mask=[],
shrink_axis_mask=[],
ellipsis_mask=[],
).output(0)
begin_lower = ov_opset.constant(
np.array([0] * rank, dtype=np.int64), Type.i64
).output(0)
end_lower_list = [0] * rank
end_lower_list[axis] = -1
end_lower = ov_opset.constant(
np.array(end_lower_list, dtype=np.int64), Type.i64
).output(0)
begin_mask_lower = [1] * rank
end_mask_lower = [1] * rank
end_mask_lower[axis] = 0
lower = ov_opset.strided_slice(
data=result,
begin=begin_lower,
end=end_lower,
strides=strides,
begin_mask=begin_mask_lower,
end_mask=end_mask_lower,
new_axis_mask=[],
shrink_axis_mask=[],
ellipsis_mask=[],
).output(0)
if a_type == Type.boolean:
result = ov_opset.not_equal(upper, lower).output(0)
else:
result = ov_opset.subtract(upper, lower).output(0)
return OpenVINOKerasTensor(result)
def digitize(x, bins):
x_node = get_ov_output(x)
if isinstance(bins, OpenVINOKerasTensor):
bins_node = get_ov_output(bins)
else:
bins_np = np.asarray(bins)
if bins_np.ndim != 1:
raise ValueError("`bins` must be 1-D array-like")
bins_node = ov_opset.constant(bins_np).output(0)
x_node, bins_node = _align_operand_types(x_node, bins_node, "digitize()")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/openvino/trainer.py | keras/src/backend/openvino/trainer.py | import numpy as np
import openvino as ov
import openvino.opset14 as ov_opset
from keras.src import backend
from keras.src import callbacks as callbacks_module
from keras.src import tree
from keras.src.backend.openvino.core import OPENVINO_DTYPES
from keras.src.backend.openvino.core import OpenVINOKerasTensor
from keras.src.backend.openvino.core import get_device
from keras.src.trainers import trainer as base_trainer
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.epoch_iterator import EpochIterator
from keras.src.utils import traceback_utils
class OpenVINOTrainer(base_trainer.Trainer):
def __init__(self):
super().__init__()
self.test_function = None
self.predict_function = None
self.ov_compiled_model = None
self.ov_device = None
self.struct_params = None
self.struct_outputs = None
def _unpack_singleton(self, x):
if isinstance(x, (list, tuple)) and len(x) == 1:
return x[0]
return x
def test_step(self, data):
raise NotImplementedError(
"`test_step` is not supported with openvino backend"
)
def predict_step(self, data):
x, _, _ = data_adapter_utils.unpack_x_y_sample_weight(data)
ov_compiled_model = self._get_compiled_model(x)
flatten_x = tree.flatten(x)
y_pred = ov_compiled_model(flatten_x)
# recover structure of the model output
y_pred = self._unpack_singleton(
tree.pack_sequence_as(self.struct_outputs, y_pred.to_tuple())
)
return y_pred
def make_test_function(self, force=False):
if self.test_function is not None and not force:
return self.test_function
def one_test_step(data):
data = data[0]
return self.test_step(data)
def multi_test_steps(data):
for single_step_data in data:
logs = one_test_step([single_step_data])
return logs
if self.steps_per_execution > 1:
test_step = multi_test_steps
else:
test_step = one_test_step
self.test_function = test_step
def _parameterize_data(self, data):
if isinstance(data, (list, tuple)):
parametrize_data = []
for elem in data:
param_elem = self._parameterize_data(elem)
parametrize_data.append(param_elem)
elif isinstance(data, dict):
parametrize_data = dict()
for elem_name, elem in data.items():
param_elem = self._parameterize_data(elem)
parametrize_data[elem_name] = param_elem
elif isinstance(data, np.ndarray) or np.isscalar(data):
ov_type = OPENVINO_DTYPES[str(data.dtype)]
ov_shape = list(data.shape)
param = ov_opset.parameter(shape=ov_shape, dtype=ov_type)
parametrize_data = OpenVINOKerasTensor(param.output(0))
elif isinstance(data, int):
param = ov_opset.parameter(shape=[], dtype=ov.Type.i32)
parametrize_data = OpenVINOKerasTensor(param.output(0))
elif isinstance(data, float):
param = ov_opset.parameter(shape=[], dtype=ov.Type.f32)
parametrize_data = OpenVINOKerasTensor(param.output(0))
else:
raise "Unknown type of input data {}".format(type(data))
return parametrize_data
def _get_compiled_model(self, data):
if (
self.ov_compiled_model is not None
and get_device() == self.ov_device
):
return self.ov_compiled_model
# remove the previous cached compiled model if exists
del self.ov_compiled_model
# prepare parameterized input
self.struct_params = self._parameterize_data(data)
# construct OpenVINO graph during calling Keras Model
self.struct_outputs = self(self.struct_params)
parameters = []
for p in tree.flatten(self.struct_params):
parameters.append(p.output.get_node())
results = []
for r in tree.flatten(self.struct_outputs):
results.append(ov_opset.result(r.output))
# prepare compiled model from scratch
ov_model = ov.Model(results=results, parameters=parameters)
self.ov_compiled_model = ov.compile_model(ov_model, get_device())
self.ov_device = get_device()
return self.ov_compiled_model
def make_predict_function(self, force=False):
if self.predict_function is not None and not force:
return self.predict_function
def one_predict_step(data):
data = data[0]
return self.predict_step(data)
def multi_predict_steps(data):
outputs = one_predict_step(data[:1])
for single_step_data in data[1:]:
step_outputs = one_predict_step([single_step_data])
outputs = tree.map_structure(
lambda t1, t2: np.concatenate([t1, t2]),
outputs,
step_outputs,
)
return outputs
if self.steps_per_execution > 1:
predict_step = multi_predict_steps
else:
predict_step = one_predict_step
self.predict_function = predict_step
def fit(
self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose="auto",
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
):
raise NotImplementedError(
"`fit` is not supported with openvino backend"
)
@traceback_utils.filter_traceback
def predict(
self, x, batch_size=None, verbose="auto", steps=None, callbacks=None
):
# Create an iterator that yields batches of input data.
epoch_iterator = EpochIterator(
x=x,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
steps_per_execution=self.steps_per_execution,
)
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
def append_to_outputs(batch_outputs, outputs):
if outputs is None:
outputs = tree.map_structure(
lambda batch_output: [batch_output],
batch_outputs,
)
else:
tree.map_structure_up_to(
batch_outputs,
lambda output, batch_output: output.append(batch_output),
outputs,
batch_outputs,
)
return outputs
self.make_predict_function()
self.stop_predicting = False
callbacks.on_predict_begin()
outputs = None
for begin_step, end_step, data in epoch_iterator.enumerate_epoch():
callbacks.on_predict_batch_begin(begin_step)
batch_outputs = self.predict_function(data)
outputs = append_to_outputs(batch_outputs, outputs)
callbacks.on_predict_batch_end(end_step, {"outputs": batch_outputs})
if self.stop_predicting:
break
callbacks.on_predict_end()
return tree.map_structure_up_to(batch_outputs, np.concatenate, outputs)
@traceback_utils.filter_traceback
def evaluate(
self,
x=None,
y=None,
batch_size=None,
verbose="auto",
sample_weight=None,
steps=None,
callbacks=None,
return_dict=False,
**kwargs,
):
raise NotImplementedError(
"`evaluate` is not supported with openvino backend"
)
def train_on_batch(
self,
x,
y=None,
sample_weight=None,
class_weight=None,
return_dict=False,
):
raise NotImplementedError(
"`train_on_batch` is not supported with openvino backend"
)
def test_on_batch(
self,
x,
y=None,
sample_weight=None,
return_dict=False,
):
raise NotImplementedError(
"`test_on_batch` is not supported with openvino backend"
)
def predict_on_batch(self, x):
self.make_predict_function()
batch_outputs = self.predict_function([(x,)])
batch_outputs = tree.map_structure(
backend.convert_to_numpy, batch_outputs
)
return batch_outputs
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/openvino/random.py | keras/src/backend/openvino/random.py | import numpy as np
import openvino.opset14 as ov_opset
from openvino import Type
from keras.src.backend.config import floatx
from keras.src.backend.openvino.core import OPENVINO_DTYPES
from keras.src.backend.openvino.core import OpenVINOKerasTensor
from keras.src.backend.openvino.core import convert_to_numpy
from keras.src.backend.openvino.core import get_ov_output
from keras.src.random.seed_generator import SeedGenerator
from keras.src.random.seed_generator import draw_seed
from keras.src.random.seed_generator import make_default_seed
def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed = draw_seed(seed)
rng = np.random.default_rng(seed.data)
normal_const = rng.normal(size=shape, loc=mean, scale=stddev).astype(dtype)
return OpenVINOKerasTensor(ov_opset.constant(normal_const).output(0))
def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed_val = draw_seed(seed)
if isinstance(seed_val, OpenVINOKerasTensor):
seed_data = convert_to_numpy(seed_val)
else:
seed_data = seed_val.data
rng = np.random.default_rng(seed_data)
random_values = rng.uniform(minval, maxval, size=shape).astype(dtype)
return OpenVINOKerasTensor(ov_opset.constant(random_values).output(0))
def categorical(logits, num_samples, dtype="int64", seed=None):
dtype = dtype or "int64"
ov_dtype = OPENVINO_DTYPES[dtype]
logits = get_ov_output(logits)
zero_const = ov_opset.constant(0, Type.i32).output(0)
one_const = ov_opset.constant(1, Type.i32).output(0)
neg_one_const = ov_opset.constant(-1, Type.i32).output(0)
# Compute probabilities and cumulative sum
probs = ov_opset.softmax(logits, axis=-1).output(0)
cumsum_probs = ov_opset.cumsum(probs, neg_one_const).output(0)
# Get shape and compute batch dimensions
logits_shape = ov_opset.shape_of(logits, Type.i32).output(0)
rank = ov_opset.shape_of(logits_shape, Type.i32).output(0)
rank_scalar = ov_opset.squeeze(rank, zero_const).output(0)
rank_minus_1 = ov_opset.subtract(rank_scalar, one_const).output(0)
# Extract batch shape (all dimensions except last)
batch_indices = ov_opset.range(
zero_const, rank_minus_1, one_const, output_type=Type.i32
).output(0)
batch_shape = ov_opset.gather(logits_shape, batch_indices, axis=0).output(0)
# Create final shape [batch_dims..., num_samples]
num_samples_const = ov_opset.constant([num_samples], Type.i32).output(0)
final_shape = ov_opset.concat(
[batch_shape, num_samples_const], axis=0
).output(0)
seed_tensor = draw_seed(seed)
if isinstance(seed_tensor, OpenVINOKerasTensor):
seed1, seed2 = convert_to_numpy(seed_tensor)
else:
seed1, seed2 = seed_tensor.data
probs_dtype = probs.get_element_type()
zero_float = ov_opset.constant(0.0, probs_dtype).output(0)
one_float = ov_opset.constant(1.0, probs_dtype).output(0)
rand = ov_opset.random_uniform(
final_shape, zero_float, one_float, probs_dtype, seed1, seed2
).output(0)
rand_unsqueezed = ov_opset.unsqueeze(rand, neg_one_const).output(0)
cumsum_unsqueezed = ov_opset.unsqueeze(cumsum_probs, one_const).output(0)
# Count how many cumulative probabilities each random number exceeds
greater = ov_opset.greater(rand_unsqueezed, cumsum_unsqueezed).output(0)
samples = ov_opset.reduce_sum(
ov_opset.convert(greater, Type.i32).output(0), neg_one_const
).output(0)
result = ov_opset.convert(samples, ov_dtype).output(0)
return OpenVINOKerasTensor(result)
def randint(shape, minval, maxval, dtype="int32", seed=None):
raise NotImplementedError(
"`randint` is not supported with openvino backend"
)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
seed = draw_seed(seed)
rng = np.random.default_rng(seed.data)
lower_bound = mean - 2 * stddev
upper_bound = mean + 2 * stddev
flat_shape = np.prod(shape)
random_numbers = np.empty(0)
# loop until we have enough valid numbers to fill our desired shape
while random_numbers.shape[0] < flat_shape:
# Generate a batch of random numbers from a normal distribution
batch = rng.normal(loc=mean, scale=stddev, size=flat_shape)
# Filter the numbers to keep only those within the specified bounds
valid = batch[(batch >= lower_bound) & (batch <= upper_bound)]
# Append the valid numbers to the result array
random_numbers = np.append(random_numbers, valid)
# Truncate the result array to the desired size and reshape it
np_array_res = random_numbers[:flat_shape].astype(dtype).reshape(shape)
return OpenVINOKerasTensor(ov_opset.constant(np_array_res).output(0))
def dropout(inputs, rate, noise_shape=None, seed=None):
raise NotImplementedError(
"`dropout` is not supported with openvino backend"
)
def shuffle(x, axis=0, seed=None):
raise NotImplementedError(
"`shuffle` is not supported with openvino backend"
)
def gamma(shape, alpha, dtype=None, seed=None):
raise NotImplementedError("`gamma` is not supported with openvino backend")
def binomial(shape, counts, probabilities, dtype=None, seed=None):
raise NotImplementedError(
"`binomial` is not supported with openvino backend"
)
def beta(shape, alpha, beta, dtype=None, seed=None):
raise NotImplementedError("`beta` is not supported with openvino backend")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/openvino/__init__.py | keras/src/backend/openvino/__init__.py | from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.openvino import core
from keras.src.backend.openvino import image
from keras.src.backend.openvino import linalg
from keras.src.backend.openvino import math
from keras.src.backend.openvino import nn
from keras.src.backend.openvino import numpy
from keras.src.backend.openvino import random
from keras.src.backend.openvino.core import IS_THREAD_SAFE
from keras.src.backend.openvino.core import SUPPORTS_RAGGED_TENSORS
from keras.src.backend.openvino.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.openvino.core import Variable
from keras.src.backend.openvino.core import cast
from keras.src.backend.openvino.core import compute_output_spec
from keras.src.backend.openvino.core import cond
from keras.src.backend.openvino.core import convert_to_numpy
from keras.src.backend.openvino.core import convert_to_tensor
from keras.src.backend.openvino.core import device_scope
from keras.src.backend.openvino.core import is_tensor
from keras.src.backend.openvino.core import random_seed_dtype
from keras.src.backend.openvino.core import shape
from keras.src.backend.openvino.core import vectorized_map
from keras.src.backend.openvino.rnn import cudnn_ok
from keras.src.backend.openvino.rnn import gru
from keras.src.backend.openvino.rnn import lstm
from keras.src.backend.openvino.rnn import rnn
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/openvino/math.py | keras/src/backend/openvino/math.py | import openvino.opset14 as ov_opset
from openvino import Type
from keras.src.backend.openvino.core import OpenVINOKerasTensor
from keras.src.backend.openvino.core import get_ov_output
def segment_sum(data, segment_ids, num_segments=None, sorted=False):
raise NotImplementedError(
"`segment_sum` is not supported with openvino backend"
)
def segment_max(data, segment_ids, num_segments=None, sorted=False):
raise NotImplementedError(
"`segment_max` is not supported with openvino backend"
)
def top_k(x, k, sorted=True):
x = get_ov_output(x)
k_tensor = ov_opset.constant(k, dtype=Type.i32)
axis = -1
sort_type = "value" if sorted else "none"
topk_node = ov_opset.topk(x, k_tensor, axis, "max", sort_type)
values = topk_node.output(0)
indices = topk_node.output(1)
return OpenVINOKerasTensor(values), OpenVINOKerasTensor(indices)
def in_top_k(targets, predictions, k):
raise NotImplementedError(
"`in_top_k` is not supported with openvino backend"
)
def logsumexp(x, axis=None, keepdims=False):
x = get_ov_output(x)
if axis is None:
flatten_shape = ov_opset.constant([-1], Type.i32).output(0)
x = ov_opset.reshape(x, flatten_shape, False).output(0)
axis = 0
if isinstance(axis, tuple):
axis = list(axis)
axis = ov_opset.constant(axis, Type.i32).output(0)
const_zero = ov_opset.constant(0, x.get_element_type()).output(0)
# Use keepdims=True for reduce_max to ensure proper broadcasting
reduce_max = ov_opset.reduce_max(x, axis, True).output(0)
is_finite = ov_opset.is_finite(reduce_max).output(0)
norm_max = ov_opset.select(is_finite, reduce_max, const_zero).output(0)
norm_max_sub = ov_opset.subtract(x, norm_max).output(0)
exp_norm_max = ov_opset.exp(norm_max_sub).output(0)
sum_exp = ov_opset.reduce_sum(exp_norm_max, axis, keepdims).output(0)
log_sum_exp = ov_opset.log(sum_exp).output(0)
# Squeeze norm_max if needed to match dimensions
if not keepdims:
norm_max = ov_opset.squeeze(norm_max, axis).output(0)
log_sum_exp = ov_opset.add(norm_max, log_sum_exp).output(0)
return OpenVINOKerasTensor(log_sum_exp)
def qr(x, mode="reduced"):
raise NotImplementedError("`qr` is not supported with openvino backend")
def extract_sequences(x, sequence_length, sequence_stride):
raise NotImplementedError(
"`extract_sequences` is not supported with openvino backend"
)
def fft(x):
raise NotImplementedError("`fft` is not supported with openvino backend")
def fft2(x):
raise NotImplementedError("`fft2` is not supported with openvino backend")
def rfft(x, fft_length=None):
raise NotImplementedError("`rfft` is not supported with openvino backend")
def irfft(x, fft_length=None):
raise NotImplementedError("`irfft` is not supported with openvino backend")
def stft(
x, sequence_length, sequence_stride, fft_length, window="hann", center=True
):
raise NotImplementedError("`stft` is not supported with openvino backend")
def istft(
x,
sequence_length,
sequence_stride,
fft_length,
length=None,
window="hann",
center=True,
):
raise NotImplementedError("`istft` is not supported with openvino backend")
def rsqrt(x):
x = get_ov_output(x)
const_one = ov_opset.constant(1, x.get_element_type()).output(0)
sqrt = ov_opset.sqrt(x).output(0)
return OpenVINOKerasTensor(ov_opset.divide(const_one, sqrt).output(0))
def erf(x):
x = get_ov_output(x)
erf = ov_opset.erf(x).output(0)
return OpenVINOKerasTensor(erf)
def erfinv(x):
raise NotImplementedError("`erfinv` is not supported with openvino backend")
def solve(a, b):
raise NotImplementedError("`solve` is not supported with openvino backend")
def norm(x, ord=None, axis=None, keepdims=False):
raise NotImplementedError("`norm` is not supported with openvino backend")
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/image.py | keras/src/backend/torch/image.py | import functools
import itertools
import operator
import numpy as np
import torch
import torch._dynamo as dynamo
import torch.nn.functional as F
from keras.src import backend
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import get_device
from keras.src.backend.torch.core import to_torch_dtype
from keras.src.random.seed_generator import draw_seed
RESIZE_INTERPOLATIONS = {
"bilinear": "bilinear",
"nearest": "nearest-exact",
"bicubic": "bicubic",
}
UNSUPPORTED_INTERPOLATIONS = (
"lanczos3",
"lanczos5",
)
AFFINE_TRANSFORM_INTERPOLATIONS = {
"nearest": 0,
"bilinear": 1,
}
AFFINE_TRANSFORM_FILL_MODES = {
"constant",
"nearest",
"wrap",
"mirror",
"reflect",
}
SCALE_AND_TRANSLATE_METHODS = {
"linear",
"bilinear",
"trilinear",
"cubic",
"bicubic",
"tricubic",
"lanczos3",
"lanczos5",
}
def rgb_to_grayscale(images, data_format=None):
images = convert_to_tensor(images)
data_format = backend.standardize_data_format(data_format)
if images.ndim not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
channel_axis = -3 if data_format == "channels_first" else -1
if images.shape[channel_axis] not in (1, 3):
raise ValueError(
"Invalid channel size: expected 3 (RGB) or 1 (Grayscale). "
f"Received input with shape: images.shape={images.shape}"
)
# This implementation is based on
# https://github.com/pytorch/vision/blob/main/torchvision/transforms/_functional_tensor.py
if images.shape[channel_axis] == 3:
r, g, b = images.unbind(dim=channel_axis)
images = (0.2989 * r + 0.587 * g + 0.114 * b).to(images.dtype)
images = images.unsqueeze(dim=channel_axis)
else:
images = images.clone()
return images
def rgb_to_hsv(images, data_format=None):
# Ref: dm_pix
images = convert_to_tensor(images)
dtype = images.dtype
data_format = backend.standardize_data_format(data_format)
channels_axis = -1 if data_format == "channels_last" else -3
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if not backend.is_float_dtype(dtype):
raise ValueError(
"Invalid images dtype: expected float dtype. "
f"Received: images.dtype={backend.standardize_dtype(dtype)}"
)
eps = torch.finfo(dtype).eps
images = torch.where(torch.abs(images) < eps, 0.0, images)
red, green, blue = torch.split(images, [1, 1, 1], channels_axis)
red = torch.squeeze(red, channels_axis)
green = torch.squeeze(green, channels_axis)
blue = torch.squeeze(blue, channels_axis)
def rgb_planes_to_hsv_planes(r, g, b):
value = torch.maximum(torch.maximum(r, g), b)
minimum = torch.minimum(torch.minimum(r, g), b)
range_ = value - minimum
safe_value = torch.where(value > 0, value, 1.0)
safe_range = torch.where(range_ > 0, range_, 1.0)
saturation = torch.where(value > 0, range_ / safe_value, 0.0)
norm = 1.0 / (6.0 * safe_range)
hue = torch.where(
value == g,
norm * (b - r) + 2.0 / 6.0,
norm * (r - g) + 4.0 / 6.0,
)
hue = torch.where(value == r, norm * (g - b), hue)
hue = torch.where(range_ > 0, hue, 0.0) + (hue < 0.0).to(hue.dtype)
return hue, saturation, value
images = torch.stack(
rgb_planes_to_hsv_planes(red, green, blue), axis=channels_axis
)
return images
def hsv_to_rgb(images, data_format=None):
# Ref: dm_pix
images = convert_to_tensor(images)
dtype = images.dtype
data_format = backend.standardize_data_format(data_format)
channels_axis = -1 if data_format == "channels_last" else -3
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if not backend.is_float_dtype(dtype):
raise ValueError(
"Invalid images dtype: expected float dtype. "
f"Received: images.dtype={backend.standardize_dtype(dtype)}"
)
hue, saturation, value = torch.split(images, [1, 1, 1], channels_axis)
hue = torch.squeeze(hue, channels_axis)
saturation = torch.squeeze(saturation, channels_axis)
value = torch.squeeze(value, channels_axis)
def hsv_planes_to_rgb_planes(hue, saturation, value):
dh = torch.remainder(hue, 1.0) * 6.0
dr = torch.clip(torch.abs(dh - 3.0) - 1.0, 0.0, 1.0)
dg = torch.clip(2.0 - torch.abs(dh - 2.0), 0.0, 1.0)
db = torch.clip(2.0 - torch.abs(dh - 4.0), 0.0, 1.0)
one_minus_s = 1.0 - saturation
red = value * (one_minus_s + saturation * dr)
green = value * (one_minus_s + saturation * dg)
blue = value * (one_minus_s + saturation * db)
return red, green, blue
images = torch.stack(
hsv_planes_to_rgb_planes(hue, saturation, value), axis=channels_axis
)
return images
def _cast_squeeze_in(image, req_dtypes):
need_squeeze = False
# make image NCHW
if image.ndim < 4:
image = image.unsqueeze(dim=0)
need_squeeze = True
out_dtype = image.dtype
need_cast = False
if out_dtype not in req_dtypes:
need_cast = True
req_dtype = req_dtypes[0]
image = image.to(req_dtype)
return image, need_cast, need_squeeze, out_dtype
def _cast_squeeze_out(image, need_cast, need_squeeze, out_dtype):
if need_squeeze:
image = image.squeeze(dim=0)
if need_cast:
if out_dtype in (
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
):
# it is better to round before cast
image = torch.round(image)
image = image.to(out_dtype)
return image
def resize(
images,
size,
interpolation="bilinear",
antialias=False,
crop_to_aspect_ratio=False,
pad_to_aspect_ratio=False,
fill_mode="constant",
fill_value=0.0,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
if interpolation in UNSUPPORTED_INTERPOLATIONS:
raise ValueError(
"Resizing with Lanczos interpolation is "
"not supported by the PyTorch backend. "
f"Received: interpolation={interpolation}."
)
if interpolation not in RESIZE_INTERPOLATIONS:
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{RESIZE_INTERPOLATIONS}. Received: interpolation={interpolation}"
)
if fill_mode != "constant":
raise ValueError(
"Invalid value for argument `fill_mode`. Only `'constant'` "
f"is supported. Received: fill_mode={fill_mode}"
)
if pad_to_aspect_ratio and crop_to_aspect_ratio:
raise ValueError(
"Only one of `pad_to_aspect_ratio` & `crop_to_aspect_ratio` "
"can be `True`."
)
if not len(size) == 2:
raise ValueError(
"Argument `size` must be a tuple of two elements "
f"(height, width). Received: size={size}"
)
size = tuple(size)
images = convert_to_tensor(images)
if images.ndim not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
images, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(
images, [torch.float32, torch.float64]
)
if data_format == "channels_last":
images = images.permute((0, 3, 1, 2))
if crop_to_aspect_ratio:
shape = images.shape
height, width = shape[-2], shape[-1]
target_height, target_width = size
crop_height = int(float(width * target_height) / target_width)
crop_height = max(min(height, crop_height), 1)
crop_width = int(float(height * target_width) / target_height)
crop_width = max(min(width, crop_width), 1)
crop_box_hstart = int(float(height - crop_height) / 2)
crop_box_wstart = int(float(width - crop_width) / 2)
images = images[
:,
:,
crop_box_hstart : crop_box_hstart + crop_height,
crop_box_wstart : crop_box_wstart + crop_width,
]
elif pad_to_aspect_ratio:
shape = images.shape
height, width = shape[-2], shape[-1]
target_height, target_width = size
pad_height = int(float(width * target_height) / target_width)
pad_height = max(height, pad_height)
pad_width = int(float(height * target_width) / target_height)
pad_width = max(width, pad_width)
img_box_hstart = int(float(pad_height - height) / 2)
img_box_wstart = int(float(pad_width - width) / 2)
batch_size = images.shape[0]
channels = images.shape[1]
if img_box_hstart > 0:
padded_img = torch.cat(
[
torch.ones(
(batch_size, channels, img_box_hstart, width),
dtype=images.dtype,
device=images.device,
)
* fill_value,
images,
torch.ones(
(batch_size, channels, img_box_hstart, width),
dtype=images.dtype,
device=images.device,
)
* fill_value,
],
axis=2,
)
else:
padded_img = images
if img_box_wstart > 0:
padded_img = torch.cat(
[
torch.ones(
(batch_size, channels, height, img_box_wstart),
dtype=images.dtype,
device=images.device,
),
padded_img,
torch.ones(
(batch_size, channels, height, img_box_wstart),
dtype=images.dtype,
device=images.device,
)
* fill_value,
],
axis=3,
)
images = padded_img
# This implementation is based on
# https://github.com/pytorch/vision/blob/main/torchvision/transforms/_functional_tensor.py
if antialias and interpolation not in ("bilinear", "bicubic"):
# We manually set it to False to avoid an error downstream in
# interpolate(). This behaviour is documented: the parameter is
# irrelevant for modes that are not bilinear or bicubic. We used to
# raise an error here, but now we don't use True as the default.
antialias = False
# Define align_corners to avoid warnings
align_corners = False if interpolation in ("bilinear", "bicubic") else None
resized = F.interpolate(
images,
size=size,
mode=RESIZE_INTERPOLATIONS[interpolation],
align_corners=align_corners,
antialias=antialias,
)
if interpolation == "bicubic" and out_dtype == torch.uint8:
resized = resized.clamp(min=0, max=255)
if data_format == "channels_last":
resized = resized.permute((0, 2, 3, 1))
resized = _cast_squeeze_out(
resized,
need_cast=need_cast,
need_squeeze=need_squeeze,
out_dtype=out_dtype,
)
return resized
def affine_transform(
images,
transform,
interpolation="bilinear",
fill_mode="constant",
fill_value=0,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS.keys():
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{set(AFFINE_TRANSFORM_INTERPOLATIONS.keys())}. Received: "
f"interpolation={interpolation}"
)
if fill_mode not in AFFINE_TRANSFORM_FILL_MODES:
raise ValueError(
"Invalid value for argument `fill_mode`. Expected of one "
f"{AFFINE_TRANSFORM_FILL_MODES}. Received: fill_mode={fill_mode}"
)
images = convert_to_tensor(images)
transform = convert_to_tensor(transform)
if images.ndim not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if transform.ndim not in (1, 2):
raise ValueError(
"Invalid transform rank: expected rank 1 (single transform) "
"or rank 2 (batch of transforms). Received input with shape: "
f"transform.shape={transform.shape}"
)
# unbatched case
need_squeeze = False
if images.ndim == 3:
images = images.unsqueeze(dim=0)
need_squeeze = True
if transform.ndim == 1:
transform = transform.unsqueeze(dim=0)
if data_format == "channels_first":
images = images.permute((0, 2, 3, 1))
batch_size = images.shape[0]
# get indices
meshgrid = torch.meshgrid(
*[
torch.arange(size, dtype=transform.dtype, device=transform.device)
for size in images.shape[1:]
],
indexing="ij",
)
indices = torch.concatenate(
[torch.unsqueeze(x, dim=-1) for x in meshgrid], dim=-1
)
indices = torch.tile(indices, (batch_size, 1, 1, 1, 1))
# swap the values
a0 = transform[:, 0].clone()
a2 = transform[:, 2].clone()
b1 = transform[:, 4].clone()
b2 = transform[:, 5].clone()
transform[:, 0] = b1
transform[:, 2] = b2
transform[:, 4] = a0
transform[:, 5] = a2
# deal with transform
transform = torch.nn.functional.pad(
transform, pad=[0, 1, 0, 0], mode="constant", value=1
)
transform = torch.reshape(transform, (batch_size, 3, 3))
offset = transform[:, 0:2, 2].clone()
offset = torch.nn.functional.pad(offset, pad=[0, 1, 0, 0])
transform[:, 0:2, 2] = 0
# transform the indices
coordinates = torch.einsum("Bhwij, Bjk -> Bhwik", indices, transform)
coordinates = torch.moveaxis(coordinates, source=-1, destination=1)
coordinates += torch.reshape(offset, shape=(*offset.shape, 1, 1, 1))
# Note: torch.stack is faster than torch.vmap when the batch size is small.
affined = torch.stack(
[
map_coordinates(
images[i],
coordinates[i],
order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation],
fill_mode=fill_mode,
fill_value=fill_value,
)
for i in range(len(images))
],
)
if data_format == "channels_first":
affined = affined.permute((0, 3, 1, 2))
if need_squeeze:
affined = affined.squeeze(dim=0)
return affined
def perspective_transform(
images,
start_points,
end_points,
interpolation="bilinear",
fill_value=0,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
images = convert_to_tensor(images)
dtype = backend.standardize_dtype(images.dtype)
start_points = convert_to_tensor(start_points, dtype=dtype)
end_points = convert_to_tensor(end_points, dtype=dtype)
if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS.keys():
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{set(AFFINE_TRANSFORM_INTERPOLATIONS.keys())}. Received: "
f"interpolation={interpolation}"
)
if images.ndim not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
if start_points.shape[-2:] != (4, 2) or start_points.dim() not in (2, 3):
raise ValueError(
"Invalid start_points shape: expected (4,2) for a single image"
f" or (N,4,2) for a batch. Received shape: {start_points.shape}"
)
if end_points.shape[-2:] != (4, 2) or end_points.dim() not in (2, 3):
raise ValueError(
"Invalid end_points shape: expected (4,2) for a single image"
f" or (N,4,2) for a batch. Received shape: {end_points.shape}"
)
if start_points.shape != end_points.shape:
raise ValueError(
"start_points and end_points must have the same shape."
f" Received start_points.shape={start_points.shape}, "
f"end_points.shape={end_points.shape}"
)
need_squeeze = False
if images.ndim == 3:
images = images.unsqueeze(dim=0)
need_squeeze = True
if start_points.ndim == 2:
start_points = start_points.unsqueeze(dim=0)
if end_points.ndim == 2:
end_points = end_points.unsqueeze(dim=0)
if data_format == "channels_first":
images = images.permute((0, 2, 3, 1))
batch_size, height, width, channels = images.shape
transforms = compute_homography_matrix(start_points, end_points)
if transforms.dim() == 1:
transforms = transforms.unsqueeze(0)
if transforms.shape[0] == 1 and batch_size > 1:
transforms = transforms.repeat(batch_size, 1)
grid_x, grid_y = torch.meshgrid(
torch.arange(width, dtype=to_torch_dtype(dtype), device=images.device),
torch.arange(height, dtype=to_torch_dtype(dtype), device=images.device),
indexing="xy",
)
output = torch.empty(
[batch_size, height, width, channels],
dtype=to_torch_dtype(dtype),
device=images.device,
)
for i in range(batch_size):
a0, a1, a2, a3, a4, a5, a6, a7 = transforms[i]
denom = a6 * grid_x + a7 * grid_y + 1.0
x_in = (a0 * grid_x + a1 * grid_y + a2) / denom
y_in = (a3 * grid_x + a4 * grid_y + a5) / denom
coords = torch.stack([y_in.flatten(), x_in.flatten()], dim=0)
mapped_channels = []
for channel in range(channels):
channel_img = images[i, :, :, channel]
mapped_channel = map_coordinates(
channel_img,
coords,
order=AFFINE_TRANSFORM_INTERPOLATIONS[interpolation],
fill_mode="constant",
fill_value=fill_value,
)
mapped_channels.append(mapped_channel.reshape(height, width))
output[i] = torch.stack(mapped_channels, dim=-1)
if data_format == "channels_first":
output = output.permute((0, 3, 1, 2))
if need_squeeze:
output = output.squeeze(dim=0)
return output
def compute_homography_matrix(start_points, end_points):
start_points = convert_to_tensor(start_points)
end_points = convert_to_tensor(end_points)
dtype = backend.result_type(start_points.dtype, end_points.dtype, float)
# `torch.linalg.solve` requires float32.
compute_dtype = backend.result_type(dtype, "float32")
start_points = cast(start_points, dtype)
end_points = cast(end_points, dtype)
start_x1, start_y1 = start_points[:, 0, 0], start_points[:, 0, 1]
start_x2, start_y2 = start_points[:, 1, 0], start_points[:, 1, 1]
start_x3, start_y3 = start_points[:, 2, 0], start_points[:, 2, 1]
start_x4, start_y4 = start_points[:, 3, 0], start_points[:, 3, 1]
end_x1, end_y1 = end_points[:, 0, 0], end_points[:, 0, 1]
end_x2, end_y2 = end_points[:, 1, 0], end_points[:, 1, 1]
end_x3, end_y3 = end_points[:, 2, 0], end_points[:, 2, 1]
end_x4, end_y4 = end_points[:, 3, 0], end_points[:, 3, 1]
coefficient_matrix = torch.stack(
[
torch.stack(
[
end_x1,
end_y1,
torch.ones_like(end_x1),
torch.zeros_like(end_x1),
torch.zeros_like(end_x1),
torch.zeros_like(end_x1),
-start_x1 * end_x1,
-start_x1 * end_y1,
],
dim=-1,
),
torch.stack(
[
torch.zeros_like(end_x1),
torch.zeros_like(end_x1),
torch.zeros_like(end_x1),
end_x1,
end_y1,
torch.ones_like(end_x1),
-start_y1 * end_x1,
-start_y1 * end_y1,
],
dim=-1,
),
torch.stack(
[
end_x2,
end_y2,
torch.ones_like(end_x2),
torch.zeros_like(end_x2),
torch.zeros_like(end_x2),
torch.zeros_like(end_x2),
-start_x2 * end_x2,
-start_x2 * end_y2,
],
dim=-1,
),
torch.stack(
[
torch.zeros_like(end_x2),
torch.zeros_like(end_x2),
torch.zeros_like(end_x2),
end_x2,
end_y2,
torch.ones_like(end_x2),
-start_y2 * end_x2,
-start_y2 * end_y2,
],
dim=-1,
),
torch.stack(
[
end_x3,
end_y3,
torch.ones_like(end_x3),
torch.zeros_like(end_x3),
torch.zeros_like(end_x3),
torch.zeros_like(end_x3),
-start_x3 * end_x3,
-start_x3 * end_y3,
],
dim=-1,
),
torch.stack(
[
torch.zeros_like(end_x3),
torch.zeros_like(end_x3),
torch.zeros_like(end_x3),
end_x3,
end_y3,
torch.ones_like(end_x3),
-start_y3 * end_x3,
-start_y3 * end_y3,
],
dim=-1,
),
torch.stack(
[
end_x4,
end_y4,
torch.ones_like(end_x4),
torch.zeros_like(end_x4),
torch.zeros_like(end_x4),
torch.zeros_like(end_x4),
-start_x4 * end_x4,
-start_x4 * end_y4,
],
dim=-1,
),
torch.stack(
[
torch.zeros_like(end_x4),
torch.zeros_like(end_x4),
torch.zeros_like(end_x4),
end_x4,
end_y4,
torch.ones_like(end_x4),
-start_y4 * end_x4,
-start_y4 * end_y4,
],
dim=-1,
),
],
dim=1,
)
target_vector = torch.stack(
[
start_x1,
start_y1,
start_x2,
start_y2,
start_x3,
start_y3,
start_x4,
start_y4,
],
dim=-1,
).unsqueeze(-1)
coefficient_matrix = cast(coefficient_matrix, compute_dtype)
target_vector = cast(target_vector, compute_dtype)
homography_matrix = torch.linalg.solve(coefficient_matrix, target_vector)
homography_matrix = homography_matrix.reshape(-1, 8)
homography_matrix = cast(homography_matrix, dtype)
return homography_matrix
def _mirror_index_fixer(index, size):
s = size - 1 # Half-wavelength of triangular wave
# Scaled, integer-valued version of the triangular wave |x - round(x)|
return torch.abs((index + s) % (2 * s) - s)
def _reflect_index_fixer(index, size):
return torch.floor_divide(
_mirror_index_fixer(2 * index + 1, 2 * size + 1) - 1, 2
)
_INDEX_FIXERS = {
# we need to take care of out-of-bound indices in torch
"constant": lambda index, size: torch.clip(index, 0, size - 1),
"nearest": lambda index, size: torch.clip(index, 0, size - 1),
"wrap": lambda index, size: index % size,
"mirror": _mirror_index_fixer,
"reflect": _reflect_index_fixer,
}
def _is_integer(a):
if not torch.is_floating_point(a) and not torch.is_complex(a):
return True
return False
def _nearest_indices_and_weights(coordinate):
coordinate = (
coordinate if _is_integer(coordinate) else torch.round(coordinate)
)
index = coordinate.to(torch.int32)
return [(index, 1)]
def _linear_indices_and_weights(coordinate):
lower = torch.floor(coordinate)
upper_weight = coordinate - lower
lower_weight = 1 - upper_weight
index = lower.to(torch.int32)
return [(index, lower_weight), (index + 1, upper_weight)]
def map_coordinates(
inputs, coordinates, order, fill_mode="constant", fill_value=0.0
):
input_arr = convert_to_tensor(inputs)
coordinate_arrs = [convert_to_tensor(c) for c in coordinates]
if len(coordinate_arrs) != len(input_arr.shape):
raise ValueError(
"First dim of `coordinates` must be the same as the rank of "
"`inputs`. "
f"Received inputs with shape: {input_arr.shape} and coordinate "
f"leading dim of {len(coordinate_arrs)}"
)
if len(coordinate_arrs[0].shape) < 1:
dim = len(coordinate_arrs)
shape = (dim,) + coordinate_arrs[0].shape
raise ValueError(
"Invalid coordinates rank: expected at least rank 2."
f" Received input with shape: {shape}"
)
# skip tensor creation as possible
if isinstance(fill_value, (int, float)) and _is_integer(input_arr):
fill_value = int(fill_value)
if len(coordinates) != len(input_arr.shape):
raise ValueError(
"coordinates must be a sequence of length inputs.shape, but "
f"{len(coordinates)} != {len(input_arr.shape)}"
)
index_fixer = _INDEX_FIXERS.get(fill_mode)
if index_fixer is None:
raise ValueError(
"Invalid value for argument `fill_mode`. Expected one of "
f"{set(_INDEX_FIXERS.keys())}. Received: fill_mode={fill_mode}"
)
if order == 0:
interp_fun = _nearest_indices_and_weights
elif order == 1:
interp_fun = _linear_indices_and_weights
else:
raise NotImplementedError("map_coordinates currently requires order<=1")
if fill_mode == "constant":
def is_valid(index, size):
return (0 <= index) & (index < size)
else:
def is_valid(index, size):
return True
valid_1d_interpolations = []
for coordinate, size in zip(coordinate_arrs, input_arr.shape):
interp_nodes = interp_fun(coordinate)
valid_interp = []
for index, weight in interp_nodes:
fixed_index = index_fixer(index, size)
valid = is_valid(index, size)
valid_interp.append((fixed_index, valid, weight))
valid_1d_interpolations.append(valid_interp)
outputs = []
for items in itertools.product(*valid_1d_interpolations):
indices, validities, weights = zip(*items)
if all(valid is True for valid in validities):
# fast path
contribution = input_arr[indices]
else:
all_valid = functools.reduce(operator.and_, validities)
contribution = torch.where(
all_valid, input_arr[indices], fill_value
)
outputs.append(functools.reduce(operator.mul, weights) * contribution)
result = functools.reduce(operator.add, outputs)
if _is_integer(input_arr):
result = result if _is_integer(result) else torch.round(result)
return result.to(input_arr.dtype)
def gaussian_blur(
images, kernel_size=(3, 3), sigma=(1.0, 1.0), data_format=None
):
def _create_gaussian_kernel(kernel_size, sigma, dtype):
def _get_gaussian_kernel1d(size, sigma):
x = (
torch.arange(size, dtype=dtype, device=sigma.device)
- (size - 1) / 2
)
kernel1d = torch.exp(-0.5 * (x / sigma) ** 2)
return kernel1d / torch.sum(kernel1d)
def _get_gaussian_kernel2d(size, sigma):
kernel1d_x = _get_gaussian_kernel1d(size[0], sigma[0])
kernel1d_y = _get_gaussian_kernel1d(size[1], sigma[1])
return torch.outer(kernel1d_y, kernel1d_x)
kernel = _get_gaussian_kernel2d(kernel_size, sigma)
kernel = kernel.view(1, 1, kernel_size[0], kernel_size[1])
return kernel
images = convert_to_tensor(images)
kernel_size = convert_to_tensor(kernel_size)
sigma = convert_to_tensor(sigma)
dtype = images.dtype
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
need_squeeze = False
if images.ndim == 3:
images = images.unsqueeze(dim=0)
need_squeeze = True
if data_format == "channels_last":
images = images.permute(0, 3, 1, 2)
num_channels = images.shape[1]
kernel = _create_gaussian_kernel(kernel_size, sigma, dtype)
kernel = kernel.expand(num_channels, 1, kernel_size[0], kernel_size[1])
blurred_images = torch.nn.functional.conv2d(
images,
kernel,
stride=1,
padding=int(kernel_size[0] // 2),
groups=num_channels,
)
if data_format == "channels_last":
blurred_images = blurred_images.permute(0, 2, 3, 1)
if need_squeeze:
blurred_images = blurred_images.squeeze(dim=0)
return blurred_images
@dynamo.disable()
def _torch_seed_generator(seed):
first_seed, second_seed = draw_seed(seed)
device = get_device()
if device == "meta":
return None
generator = torch.Generator(device=get_device())
generator.manual_seed(int(first_seed + second_seed))
return generator
def elastic_transform(
images,
alpha=20.0,
sigma=5.0,
interpolation="bilinear",
fill_mode="reflect",
fill_value=0.0,
seed=None,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
if interpolation not in AFFINE_TRANSFORM_INTERPOLATIONS.keys():
raise ValueError(
"Invalid value for argument `interpolation`. Expected of one "
f"{set(AFFINE_TRANSFORM_INTERPOLATIONS.keys())}. Received: "
f"interpolation={interpolation}"
)
if fill_mode not in AFFINE_TRANSFORM_FILL_MODES:
raise ValueError(
"Invalid value for argument `fill_mode`. Expected of one "
f"{AFFINE_TRANSFORM_FILL_MODES}. Received: fill_mode={fill_mode}"
)
if len(images.shape) not in (3, 4):
raise ValueError(
"Invalid images rank: expected rank 3 (single image) "
"or rank 4 (batch of images). Received input with shape: "
f"images.shape={images.shape}"
)
images = convert_to_tensor(images)
alpha = convert_to_tensor(alpha)
sigma = convert_to_tensor(sigma)
input_dtype = images.dtype
kernel_size = (int(6 * sigma) | 1, int(6 * sigma) | 1)
need_squeeze = False
if images.ndim == 3:
images = images.unsqueeze(dim=0)
need_squeeze = True
if data_format == "channels_last":
batch_size, height, width, channels = images.shape
channel_axis = -1
else:
batch_size, channels, height, width = images.shape
channel_axis = 1
generator = _torch_seed_generator(seed) if get_device() == "meta" else None
dx = (
torch.normal(
0.0,
1.0,
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/layer.py | keras/src/backend/torch/layer.py | import torch
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.ops.operation import Operation
class TorchLayer(torch.nn.Module):
@property
def torch_params(self):
if not hasattr(self, "_torch_params"):
self._track_variables()
return self._torch_params
def _post_build(self):
# Do not track variables when in a stateless scope.
# The variables are not initialized.
if in_stateless_scope():
return
self._track_variables()
def _track_variables(self):
# set torch_params attribute will have module automatically track
# parameters.
self._torch_params = torch.nn.ParameterDict(
{variable.path: variable.value for variable in self.variables}
)
def named_parameters(
self,
prefix="",
recurse=True,
remove_duplicate=True,
):
if not hasattr(self, "_torch_params"):
self._track_variables()
return torch.nn.Module.named_parameters(
self, prefix, recurse, remove_duplicate
)
def forward(self, *args, **kwargs):
return Operation.__call__(self, *args, **kwargs)
def _setattr_hook(self, name, value):
from keras.src.layers import Layer
if (
isinstance(value, torch.nn.Module)
and not isinstance(value, Layer)
and not name == "_torch_params"
):
from keras.src.utils.torch_utils import TorchModuleWrapper
if not isinstance(self, TorchModuleWrapper):
value = TorchModuleWrapper(value)
return name, value
def _post_track_variable(self, variable):
if hasattr(self, "_torch_params"):
if variable.path not in self.torch_params:
self.torch_params[variable.path] = variable.value
def _post_untrack_variable(self, variable):
if hasattr(self, "_torch_params"):
if variable.path in self.torch_params:
self.torch_params.pop(variable.path)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/export.py | keras/src/backend/torch/export.py | import copy
import warnings
import torch
from keras.src import tree
from keras.src.export.export_utils import convert_spec_to_tensor
from keras.src.utils.module_utils import tensorflow as tf
from keras.src.utils.module_utils import torch_xla
class TorchExportArchive:
def _track_layer(self, layer):
raise NotImplementedError(
"`track` is not supported for `Layer`s and `Model`s in the torch "
"backend. Use `track_and_add_endpoint` instead."
)
def add_endpoint(self, name, fn, input_signature, **kwargs):
raise NotImplementedError(
"`add_endpoint` is not supported for `Layer`s and `Model`s in the "
"torch backend. Use `track_and_add_endpoint` instead."
)
def track_and_add_endpoint(self, name, resource, input_signature, **kwargs):
# Disable false alarms related to lifting parameters.
warnings.filterwarnings("ignore", message=".*created when tracing.*")
warnings.filterwarnings(
"ignore", message=".*Unable to find the path of the module.*"
)
if not isinstance(resource, torch.nn.Module):
raise TypeError(
"`resource` must be an instance of `torch.nn.Module`. "
f"Received: resource={resource} (of type {type(resource)})"
)
sample_inputs = tree.map_structure(
lambda x: convert_spec_to_tensor(x, replace_none_number=1),
input_signature,
)
sample_inputs = tuple(sample_inputs)
# Ref: torch_xla.tf_saved_model_integration
# TODO: Utilize `dynamic_shapes`
exported = torch.export.export(
resource, sample_inputs, dynamic_shapes=None, strict=False
)
options = torch_xla.stablehlo.StableHLOExportOptions(
override_tracing_arguments=sample_inputs
)
stablehlo_model = torch_xla.stablehlo.exported_program_to_stablehlo(
exported, options
)
state_dict_keys = list(stablehlo_model._bundle.state_dict.keys())
# Remove unused variables.
for k in state_dict_keys:
if "lifted" not in k:
stablehlo_model._bundle.state_dict.pop(k)
bundle = copy.deepcopy(stablehlo_model._bundle)
bundle.state_dict = {
k: tf.Variable(v, trainable=False, name=k)
for k, v in bundle.state_dict.items()
}
bundle.additional_constants = [
tf.Variable(v, trainable=False) for v in bundle.additional_constants
]
# Track variables in `bundle` for `write_out`.
self._tf_trackable.variables += (
list(bundle.state_dict.values()) + bundle.additional_constants
)
# Ref: torch_xla.tf_saved_model_integration.save_stablehlo_graph_as_tf
def make_tf_function(func, bundle):
from tensorflow.compiler.tf2xla.python import xla as tfxla
def _get_shape_with_dynamic(signature):
shape = copy.copy(signature.shape)
for i in signature.dynamic_dims:
shape[i] = None
return shape
def _extract_call_parameters(args, meta, bundle):
call_args = []
if meta.input_pytree_spec is not None:
args = tree.flatten(args)
for loc in meta.input_locations:
if loc.type_ == torch_xla.stablehlo.VariableType.PARAMETER:
call_args.append(bundle.state_dict[loc.name])
elif loc.type_ == torch_xla.stablehlo.VariableType.CONSTANT:
call_args.append(
bundle.additional_constants[loc.position]
)
else:
call_args.append(args[loc.position])
return call_args
def inner(*args):
Touts = [sig.dtype for sig in func.meta.output_signature]
Souts = [
_get_shape_with_dynamic(sig)
for sig in func.meta.output_signature
]
call_args = _extract_call_parameters(args, func.meta, bundle)
results = tfxla.call_module(
tuple(call_args),
version=5,
Tout=Touts, # dtype information
Sout=Souts, # Shape information
function_list=[],
module=func.bytecode,
)
if len(Souts) == 1:
results = results[0]
return results
return inner
decorated_fn = tf.function(
make_tf_function(
stablehlo_model._bundle.stablehlo_funcs[0], bundle
),
input_signature=input_signature,
)
return decorated_fn
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/rnn.py | keras/src/backend/torch/rnn.py | import numpy as np
import torch
from keras.src import tree
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import get_device
def rnn(
step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False,
return_all_outputs=True,
):
input_length = input_length or inputs.shape[1]
def swap_batch_timestep(input_t):
# Swap the batch and timestep dim for the incoming tensor.
axes = list(range(len(input_t.shape)))
axes[0], axes[1] = 1, 0
return torch.permute(input_t, axes)
if not time_major:
inputs = tree.map_structure(swap_batch_timestep, inputs)
flattened_inputs = tree.flatten(inputs)
time_steps = flattened_inputs[0].shape[0]
time_steps_t = time_steps
if mask is not None:
if mask.dtype != torch.bool:
mask = mask.type(torch.bool)
if len(mask.shape) == 2:
mask = torch.unsqueeze(mask, -1)
if not time_major:
mask = swap_batch_timestep(mask)
if constants is None:
constants = []
def _expand_mask(mask_t, input_t, fixed_dim=1):
if tree.is_nested(mask_t):
raise ValueError(
f"mask_t is expected to be tensor,\
but got {mask_t}"
)
if tree.is_nested(input_t):
raise ValueError(
f"input_t is expected to be tensor,\
but got {input_t}"
)
rank_diff = len(input_t.shape) - len(mask_t.shape)
for _ in range(rank_diff):
mask_t = torch.unsqueeze(mask_t, -1)
multiples = [1] * fixed_dim + list(input_t.shape[fixed_dim:])
return torch.tile(mask_t, multiples)
if unroll:
if not time_steps:
raise ValueError("Unrolling requires a fixed number of timesteps.")
states = tuple(initial_states)
successive_states = []
successive_outputs = []
# Process the input tensors. The input tensor need to be split on the
# time_step dim, and reverse if go_backwards is True. In the case of
# nested input, the input is flattened and then transformed
# individually. The result of this will be a tuple of lists, each of
# the item in tuple is list of the tensor with shape (batch, feature)
def _process_single_input_t(input_t):
input_t = torch.unbind(input_t) # unstack for time_step dim
if go_backwards:
input_t = input_t[::-1]
return input_t
if tree.is_nested(inputs):
processed_input = tree.map_structure(
_process_single_input_t, inputs
) # noqa: E501
else:
processed_input = (_process_single_input_t(inputs),)
def _get_input_tensor(time):
inp = [t_[time] for t_ in processed_input]
return tree.pack_sequence_as(inputs, inp)
if mask is not None:
mask_list = torch.unbind(mask)
if go_backwards:
mask_list = torch.flip(mask_list, dims=mask_list.shape)
for i in range(time_steps):
inp = _get_input_tensor(i)
mask_t = mask_list[i]
output, new_states = step_function(
inp, tuple(states) + tuple(constants)
)
tiled_mask_t = _expand_mask(mask_t, output)
if not successive_outputs:
prev_output = torch.zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = torch.where(tiled_mask_t, output, prev_output)
flat_states = tree.flatten(states)
flat_new_states = tree.flatten(new_states)
tiled_mask_t = tuple(
_expand_mask(mask_t, s) for s in flat_states
) # noqa: E501
flat_final_states = tuple(
torch.where(m, s, ps)
for m, s, ps in zip(
tiled_mask_t, flat_new_states, flat_states
) # noqa: E501
)
states = tree.pack_sequence_as(states, flat_final_states)
if return_all_outputs:
successive_outputs.append(output)
successive_states.append(states)
else:
successive_outputs = [output]
successive_states = [states]
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = torch.stack(successive_outputs)
if zero_output_for_mask:
last_output = torch.where(
_expand_mask(mask_list[-1], last_output),
last_output,
torch.zeros_like(last_output),
)
outputs = torch.where(
_expand_mask(mask, outputs, fixed_dim=2),
outputs,
torch.zeros_like(outputs),
)
else: # mask is None
for i in range(time_steps):
inp = _get_input_tensor(i)
output, states = step_function(
inp, tuple(states) + tuple(constants)
) # noqa: E501
if return_all_outputs:
successive_outputs.append(output)
successive_states.append(states)
else:
successive_outputs = [output]
successive_states = [states]
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = torch.stack(successive_outputs)
else: # Unroll == False
states = tuple(initial_states)
# Create input tensor array, if the inputs is nested tensors, then it
# will be flattened first, and tensor array will be created one per
# flattened tensor.
input_ta = tuple(
(
list(torch.unbind(input_))
if not go_backwards
else list(torch.unbind(torch.flip(input_, [0])))
)
for input_ in flattened_inputs
)
# Get the time(0) input and compute the output for that.
input_time_zero = tree.pack_sequence_as(
inputs, [inp[0] for inp in flattened_inputs]
)
# output_time_zero is used to determine the cell output shape.
output_time_zero, _ = step_function(
input_time_zero, tuple(initial_states) + tuple(constants)
)
output_ta_size = time_steps_t if return_all_outputs else 1
output_ta = []
for out in tree.flatten(output_time_zero):
out_list = list(out)
if len(out) < output_ta_size:
out_list.extend([[]] * (output_ta_size - len(out)))
output_ta.append(out_list)
time = torch.tensor(0, dtype=torch.int32)
if input_length is None:
max_iterations = time_steps_t
else:
if hasattr(input_length, "__len__"):
input_length = convert_to_tensor(input_length)
max_iterations = torch.max(input_length)
else:
max_iterations = input_length
if mask is not None:
if go_backwards:
mask = torch.flip(mask, [0])
mask_ta = list(torch.unbind(mask))
def masking_fn(time):
return mask_ta[time]
def compute_masked_output(mask_t, flat_out, flat_mask):
tiled_mask_t = tuple(
_expand_mask(mask_t, o, fixed_dim=len(mask_t.shape))
for o in flat_out
)
return tuple(
torch.where(m, o, fm)
for m, o, fm in zip(tiled_mask_t, flat_out, flat_mask)
)
elif isinstance(input_length, torch.Tensor):
if go_backwards:
max_len = torch.max(input_length, dim=0)
if isinstance(max_len, torch.return_types.max):
max_len = max_len[0]
rev_input_length = torch.subtract(max_len - 1, input_length)
def masking_fn(time):
return torch.less(rev_input_length, time)
else:
def masking_fn(time):
return torch.greater(input_length, time)
def compute_masked_output(mask_t, flat_out, flat_mask):
return tuple(
torch.where(mask_t, o, zo)
for (o, zo) in zip(flat_out, flat_mask) # noqa: E501
)
else:
masking_fn = None
if masking_fn is not None:
# Mask for the T output will be base on the output of T - 1. In the
# case T = 0, a zero filled tensor will be used.
flat_zero_output = tuple(
torch.zeros_like(o) for o in tree.flatten(output_time_zero)
)
def _step(time, output_ta_t, prev_output, *states):
"""RNN step function.
Args:
time: Current timestep value.
output_ta_t: TensorArray.
prev_output: tuple of outputs from time - 1.
*states: List of states.
Returns:
Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
"""
current_input = tuple(ta[time] for ta in input_ta)
# maybe set shape.
current_input = tree.pack_sequence_as(inputs, current_input)
mask_t = masking_fn(time)
output, new_states = step_function(
current_input, tuple(states) + tuple(constants)
)
# mask output
flat_output = tree.flatten(output)
flat_mask_output = (
flat_zero_output
if zero_output_for_mask
else tree.flatten(prev_output)
)
flat_new_output = compute_masked_output(
mask_t, flat_output, flat_mask_output
)
# mask states
flat_state = tree.flatten(states)
flat_new_state = tree.flatten(new_states)
flat_final_state = compute_masked_output(
mask_t, flat_new_state, flat_state
)
new_states = tree.pack_sequence_as(new_states, flat_final_state) # noqa: E501
ta_index_to_write = time if return_all_outputs else 0
for ta, out in zip(output_ta_t, flat_new_output):
ta[ta_index_to_write] = out
return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(
new_states
)
it = 0
output_ta_t, new_states, prev_output = (
output_ta,
states,
flat_zero_output,
)
while time < time_steps_t and it < max_iterations:
final_outputs = _step(
time, output_ta_t, prev_output, *new_states
) # noqa: E501
time, output_ta_t, prev_output = final_outputs[:3]
new_states = final_outputs[3:]
it += 1
else:
def _step(time, output_ta_t, *states):
"""RNN step function.
Args:
time: Current timestep value.
output_ta_t: TensorArray.
*states: List of states.
Returns:
Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
"""
current_input = tuple(ta[time] for ta in input_ta)
current_input = tree.pack_sequence_as(inputs, current_input)
output, new_states = step_function(
current_input, tuple(states) + tuple(constants)
)
flat_new_state = tree.flatten(new_states)
flat_output = tree.flatten(output)
ta_index_to_write = time if return_all_outputs else 0
for ta, out in zip(output_ta_t, flat_output):
ta[ta_index_to_write] = out
new_states = tree.pack_sequence_as(
initial_states, flat_new_state
) # noqa: E501
return (time + 1, output_ta_t) + tuple(new_states)
it = 0
output_ta_t = output_ta
new_states = states
while time < time_steps_t and it < max_iterations:
final_outputs = _step(time, output_ta_t, *new_states)
time, output_ta_t = final_outputs[:2]
new_states = final_outputs[2:]
it += 1
def _stack(tensor_list):
max_ndims = max([t.ndim for t in tensor_list])
max_list = []
for i, t in enumerate(tensor_list):
if t.ndim == max_ndims:
max_list.append(t)
return torch.stack(max_list)
output_ta = final_outputs[1]
outputs = tuple(_stack(o) for o in output_ta)
last_output = tuple(o[-1] for o in outputs)
outputs = tree.pack_sequence_as(output_time_zero, outputs)
last_output = tree.pack_sequence_as(output_time_zero, last_output)
if not time_major:
outputs = tree.map_structure(swap_batch_timestep, outputs)
return last_output, outputs, new_states
def _is_sequence_right_padded(mask):
"""Check the mask tensor and see if it right padded.
cuDNN uses the sequence length param to skip the tailing
timestep. If the data is left padded, or not a strict right padding (has
masked value in the middle of the sequence), then cuDNN won't work
properly in those cases.
Left padded data: [[False, False, True, True, True]].
Right padded data: [[True, True, True, False, False]].
Mixture of mask/unmasked data: [[True, False, True, False, False]].
Note that for the mixed data example above, the actually data RNN should see
are those 2 Trues (index 0 and 2), the index 1 False should be ignored and
not pollute the internal states.
Args:
mask: the Boolean tensor with shape [batch, timestep]
Returns:
boolean scalar tensor, whether the mask is strictly right padded.
"""
# Get max sequence length
max_seq_length = mask.shape[1]
# Count True values in each sequence
count_of_true = torch.sum(mask, dim=1)
# Create right padded mask
batch_size = mask.shape[0]
indices = torch.arange(max_seq_length, device=mask.device).repeat(
batch_size, 1
) # noqa: E501
right_padded_mask = indices < count_of_true.unsqueeze(1)
return torch.all(mask == right_padded_mask)
def _has_fully_masked_sequence(mask):
# Cudnn kernel will error out if the input sequence contains any
# fully masked data. We walk around this issue by rerouting the computation
# to standard kernel, until the issue on cudnn side has been fixed. For a
# fully masked sequence, it will contain all Falses. To make it easy to
# check, we inverse the boolean, check if any of the sequence has all True.
return torch.any(torch.all(~mask, dim=1))
def _assert_valid_mask(mask):
# Check if mask is valid for cuDNN
no_fully_masked = ~_has_fully_masked_sequence(mask)
is_right_padded = _is_sequence_right_padded(mask)
valid = no_fully_masked & is_right_padded
if not valid.item():
error_message = (
"You are passing a RNN mask that does not correspond to "
"right-padded sequences, while using cuDNN, which is not "
"supported. With cuDNN, RNN masks can only be used for "
"right-padding, e.g. `[[True, True, False, False]]` would "
"be a valid mask, but any mask that isn't just contiguous "
"`True`'s on the left and contiguous `False`'s on the right "
"would be invalid. You can pass `use_cudnn=False` to your "
"RNN layer to stop using cuDNN (this may be slower)."
)
raise ValueError(error_message)
def _compute_sequence_length_from_mask(mask, batch_first):
"""Calculate the sequence length tensor (1-D) based on the masking tensor.
The masking tensor is a 2D boolean tensor with shape [batch, timestep]. For
any timestep that should be masked, the corresponding field will be False.
Consider the following example:
a = [[True, True, False, False]
[True, True, True, False]]
It is a (2, 4) tensor, and the corresponding sequence length result should
be 1D tensor with value [2, 3]. Note that the masking tensor must be right
padded that could be checked by, e.g., `is_sequence_right_padded()`.
Args:
mask: Boolean tensor with shape [batch, timestep] or [timestep, batch]
if time_major=True.
time_major: Boolean, which indicates whether the mask is time major or
batch major.
Returns:
sequence_length: 1D int32 tensor.
"""
timestep_index = 0 if not batch_first else 1
return torch.sum(mask.int(), dim=timestep_index)
def prepare_lstm_weights(lstm, kernel, recurrent_kernel, bias, device):
"""Copies kernel and recurrent kernel weights in the Pytorch format
We split the kernel and recurrent kernel weights, create associated
torch tensors adapted to be in line with the Cudnn optimization.
After we have copied the weights, we ensure the paramters are on
the same device and memory layout is optimized for Cudnn.
"""
lstm = lstm.to(device)
hidden_size = lstm.hidden_size
# Convert gates from Keras [i,f,c,o] to PyTorch [i,f,g,o]
i_k, f_k, c_k, o_k = np.split(kernel, 4, axis=1)
weight_ih_data = np.concatenate([i_k, f_k, c_k, o_k], axis=1).T
i_r, f_r, c_r, o_r = np.split(recurrent_kernel, 4, axis=1)
weight_hh_data = np.concatenate([i_r, f_r, c_r, o_r], axis=1).T
if bias is not None:
# Split Keras combined bias into input and hidden biases
bias_ih_data = convert_to_tensor(bias, dtype="float32")
bias_hh_data = torch.zeros_like(bias_ih_data)
else:
bias_ih_data = torch.zeros(4 * hidden_size, device=device)
bias_hh_data = torch.zeros(4 * hidden_size, device=device)
# Create PyTorch tensors for weights
weight_ih = convert_to_tensor(weight_ih_data, dtype="float32").contiguous()
weight_hh = convert_to_tensor(weight_hh_data, dtype="float32").contiguous()
bias_ih = convert_to_tensor(bias_ih_data, dtype="float32").contiguous()
bias_hh = convert_to_tensor(bias_hh_data, dtype="float32").contiguous()
# Ensure the weights are all on the same device
weight_ih = weight_ih.to(device)
weight_hh = weight_hh.to(device)
bias_ih = bias_ih.to(device)
bias_hh = bias_hh.to(device)
# Copy Keras weights into Torch's flat weights
with torch.no_grad():
lstm.weight_ih_l0.copy_(weight_ih)
lstm.weight_hh_l0.copy_(weight_hh)
lstm.bias_ih_l0.copy_(bias_ih)
lstm.bias_hh_l0.copy_(bias_hh)
# Optimize the layout
lstm.flatten_parameters()
# After prepare_lstm_weights:
# Force all LSTM parameters to be on the correct device
for param in lstm.parameters():
if param.device != device:
param.data = param.data.to(device)
def _is_cuda_cudnn_available():
# We check if the cuda device and drivers are available
return torch.cuda.is_available() and torch.backends.cudnn.is_available()
def cudnn_ok(
activation,
recurrent_activation,
unroll,
use_bias=True,
):
from keras.src import activations
from keras.src import ops
return (
activation in (activations.tanh, torch.tanh, ops.tanh)
and recurrent_activation
in (activations.sigmoid, torch.sigmoid, ops.sigmoid) # noqa: E501
and not unroll
and use_bias
and _is_cuda_cudnn_available()
)
def lstm(
inputs,
initial_state_h,
initial_state_c,
mask,
kernel,
recurrent_kernel,
bias,
activation,
recurrent_activation,
return_sequences=False,
go_backwards=False,
unroll=False,
batch_first=True,
):
cudnn_supported = cudnn_ok(
activation,
recurrent_activation,
unroll,
use_bias=bias is not None,
)
if not cudnn_supported:
raise NotImplementedError
# Get device from inputs
device = get_device()
from keras.src.backend.torch import Variable
if isinstance(kernel, Variable):
kernel = kernel.value
if isinstance(recurrent_kernel, Variable):
recurrent_kernel = recurrent_kernel.value
if isinstance(bias, Variable):
bias = bias.value
# Convert to torch tensors
inputs = convert_to_tensor(inputs, dtype="float32")
initial_state_h = convert_to_tensor(initial_state_h, dtype="float32")
initial_state_c = convert_to_tensor(initial_state_c, dtype="float32")
if mask is not None:
mask = convert_to_tensor(mask, dtype="bool")
# Preprocess for go_backwards by flipping the sequence
if go_backwards:
seq_dim = 1 if batch_first else 0
inputs = torch.flip(inputs, dims=[seq_dim])
if mask is not None:
mask = torch.flip(mask, dims=[seq_dim])
# Move all tensors to the same device
inputs = inputs.to(device)
initial_state_h = initial_state_h.to(device)
initial_state_c = initial_state_c.to(device)
if mask is not None:
mask = mask.to(device)
try:
return _cudnn_lstm(
inputs,
initial_state_h,
initial_state_c,
kernel,
recurrent_kernel,
bias,
mask,
batch_first,
go_backwards,
return_sequences,
device,
)
except Exception:
raise NotImplementedError
def _cudnn_lstm(
inputs,
initial_state_h,
initial_state_c,
kernel,
recurrent_kernel,
bias,
mask,
batch_first,
go_backwards,
return_sequences,
device,
):
if mask is not None:
_assert_valid_mask(mask)
sequence_lengths = _compute_sequence_length_from_mask(mask, batch_first)
# Ensure inputs are in batch_first format for consistency
if not batch_first:
inputs = inputs.permute(1, 0, 2)
seq_axis, batch_axis = (0, 1) if not batch_first else (1, 0)
# If shape is [batch, hidden]; Make [1, batch, hidden]
if initial_state_h.dim() == 2:
initial_state_h = initial_state_h.unsqueeze(0)
initial_state_c = initial_state_c.unsqueeze(0)
# If shape is [batch, 1, hidden]
elif initial_state_h.dim() == 3 and initial_state_h.shape[1] == 1:
initial_state_h = initial_state_h.permute(1, 0, 2)
initial_state_c = initial_state_c.permute(1, 0, 2)
input_size = kernel.shape[0]
hidden_size = recurrent_kernel.shape[0]
# Configure LSTM with the provided parameters
lstm = torch.nn.LSTM(
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
batch_first=batch_first,
bidirectional=False,
)
prepare_lstm_weights(lstm, kernel, recurrent_kernel, bias, device)
if mask is not None:
# Sort and pack
sorted_lengths, sorted_indices = torch.sort(
sequence_lengths, descending=True
) # noqa: E501
sorted_inputs = inputs[sorted_indices]
sorted_initial_h = initial_state_h[:, sorted_indices]
sorted_initial_c = initial_state_c[:, sorted_indices]
# Create the packed sequence
packed_inputs = torch.nn.utils.rnn.pack_padded_sequence(
sorted_inputs, sorted_lengths.cpu(), batch_first
)
# Process with LSTM (which handles the packed sequence correctly)
packed_outputs, (h_n, c_n) = lstm(
packed_inputs, (sorted_initial_h, sorted_initial_c)
)
# Unpack back to padded tensor
outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(
packed_outputs, batch_first
) # noqa: E501
else:
# Run LSTM without packing for fixed-length sequences
outputs, (h_n, c_n) = lstm(inputs, (initial_state_h, initial_state_c))
outputs = outputs.detach().clone().cpu()
h_n = h_n.detach().clone().cpu()
c_n = c_n.detach().clone().cpu()
# Reshape hidden states for return
h_n = h_n.squeeze(batch_axis)
c_n = c_n.squeeze(batch_axis)
# Return appropriate outputs based on return_sequences flag
if mask is not None:
last_output = h_n
else:
last_output = outputs[:, -1] if batch_first else outputs[-1]
if not return_sequences:
outputs = (
last_output.unsqueeze(1)
if batch_first
else last_output.unsqueeze(0)
) # noqa: E501
if go_backwards and return_sequences:
outputs = torch.flip(outputs, dims=[seq_axis])
return last_output, outputs, [h_n, c_n]
def gru(*args, **kwargs):
raise NotImplementedError
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/core.py | keras/src/backend/torch/core.py | import builtins
import contextlib
import functools
import ml_dtypes
import numpy as np
import torch
from keras.src import tree
from keras.src.backend.common import KerasVariable
from keras.src.backend.common import global_state
from keras.src.backend.common import standardize_dtype
from keras.src.backend.common.backend_utils import slice_along_axis
from keras.src.backend.common.dtypes import result_type
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.backend.common.stateless_scope import StatelessScope
from keras.src.backend.common.stateless_scope import get_stateless_scope
from keras.src.backend.common.stateless_scope import in_stateless_scope
from keras.src.backend.common.symbolic_scope import SymbolicScope
from keras.src.backend.config import floatx
SUPPORTS_SPARSE_TENSORS = False
SUPPORTS_RAGGED_TENSORS = False
IS_THREAD_SAFE = True
# Some operators such as 'aten::_foreach_mul_.Scalar'
# are not currently implemented for the MPS device.
# check https://github.com/pytorch/pytorch/issues/77764.
if torch.backends.mps.is_available():
DEFAULT_DEVICE = "mps"
elif torch.cuda.is_available():
DEFAULT_DEVICE = "cuda"
elif hasattr(torch, "xpu") and torch.xpu.is_available():
DEFAULT_DEVICE = "xpu"
else:
DEFAULT_DEVICE = "cpu"
TORCH_DTYPES = {
"float16": torch.float16,
"float32": torch.float32,
"float64": torch.float64,
"uint8": torch.uint8,
"uint16": torch.int32, # TODO: Torch doesn't have `uint16` dtype.
"uint32": torch.int64, # TODO: Torch doesn't have `uint32` dtype.
"int8": torch.int8,
"int16": torch.int16,
"int32": torch.int32,
"int64": torch.int64,
"bfloat16": torch.bfloat16,
"bool": torch.bool,
"float8_e4m3fn": torch.float8_e4m3fn,
"float8_e5m2": torch.float8_e5m2,
"complex32": torch.complex32,
"complex64": torch.complex64,
"complex128": torch.complex128,
}
@contextlib.contextmanager
def device_scope(device_name):
previous_device = global_state.get_global_attribute("torch_device", None)
current_device = _parse_device_input(device_name)
global_state.set_global_attribute("torch_device", current_device)
try:
yield torch.device(current_device)
finally:
global_state.set_global_attribute("torch_device", previous_device)
def get_device():
device = global_state.get_global_attribute("torch_device", None)
if device is None:
return DEFAULT_DEVICE
return device
def _parse_device_input(device_name):
if isinstance(device_name, str):
# We support string value like "cpu:0", "gpu:1", and need to convert
# "gpu" to "cuda"
device_name = device_name.lower()
if "gpu" in device_name:
device_name = device_name.replace("gpu", "cuda")
else:
raise ValueError(
"Invalid value for argument `device_name`. "
"Expected a string like 'gpu:0' or 'cpu'. "
f"Received: device_name='{device_name}'"
)
# The torch.Device instance can be used directly.
return device_name
def to_torch_dtype(dtype):
standardized_dtype = TORCH_DTYPES.get(standardize_dtype(dtype), None)
if standardized_dtype is None:
raise ValueError(f"Unsupported dtype for PyTorch: {dtype}")
return standardized_dtype
class Variable(KerasVariable):
def _initialize(self, value):
if isinstance(value, torch.nn.Parameter):
# Reuse same parameter
self._value = value
else:
self._value = torch.nn.Parameter(
convert_to_tensor(value, dtype=self._dtype),
requires_grad=self.trainable,
).to(get_device())
def _direct_assign(self, value):
with torch.no_grad():
self.value.copy_(value)
def _convert_to_tensor(self, value, dtype=None):
return convert_to_tensor(value, dtype=dtype)
# Overload native accessor.
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
args = [arg.value if isinstance(arg, Variable) else arg for arg in args]
if kwargs is None:
kwargs = {}
kwargs = {
key: value.value if isinstance(value, Variable) else value
for key, value in kwargs.items()
}
return func(*args, **kwargs)
def __array__(self, dtype=None):
value = convert_to_numpy(self.value)
if dtype:
return value.astype(dtype)
return value
@property
def value(self):
# We cannot chain super() here because it will fail TorchDynamo. The
# reason why is unclear.
def maybe_use_symbolic_tensor(value):
# Create and use a symbolic tensor stub in symbolic calls.
if str(get_device()) == "meta" and str(value.device) != "meta":
return torch.nn.Parameter(
torch.empty(
size=self._shape,
dtype=to_torch_dtype(self._dtype),
device="meta",
),
requires_grad=self.trainable,
)
return value
if in_stateless_scope():
scope = get_stateless_scope()
value = scope.get_current_value(self)
if value is not None:
value = self._maybe_autocast(value)
return maybe_use_symbolic_tensor(value)
if self._value is None:
# Uninitialized variable. Return a placeholder.
# This is fine because it's only ever used
# in during shape inference / graph tracing
# (anything else would be a bug, to be fixed.)
value = self._maybe_autocast(
self._initializer(self._shape, dtype=self._dtype)
)
else:
value = self._maybe_autocast(self._value)
return maybe_use_symbolic_tensor(value)
@property
def trainable(self):
return self._trainable
@trainable.setter
def trainable(self, value):
self._trainable = value
if self._value is not None:
self._value.requires_grad = value
def __eq__(self, other):
try:
return super().__eq__(other)
except Exception:
return False
def convert_to_tensor(x, dtype=None, sparse=None, ragged=None):
if sparse:
raise ValueError("`sparse=True` is not supported with torch backend")
if ragged:
raise ValueError("`ragged=True` is not supported with torch backend")
if isinstance(x, Variable) or is_tensor(x):
if isinstance(x, Variable):
x = x.value
device = get_device()
if x.device != device:
if x.is_meta:
x = torch.empty_like(x, device=device)
else:
x = x.to(device)
if dtype is not None:
x = x.to(to_torch_dtype(dtype))
return x
if dtype is None:
if isinstance(x, bool):
return torch.as_tensor(x, dtype=torch.bool, device=get_device())
elif isinstance(x, int):
return torch.as_tensor(x, dtype=torch.int32, device=get_device())
elif isinstance(x, float):
return torch.as_tensor(
x, dtype=to_torch_dtype(floatx()), device=get_device()
)
# Convert to np in case of any array-like that is not list or tuple.
if not isinstance(x, (list, tuple)):
x = np.array(x)
elif len(x) > 0 and any(isinstance(x1, torch.Tensor) for x1 in x):
# Handle list or tuple of torch tensors
return torch.stack([convert_to_tensor(x1) for x1 in x])
if isinstance(x, np.ndarray):
if x.dtype == np.uint32:
# Torch backend does not support uint32.
x = x.astype(np.int64)
if standardize_dtype(x.dtype) == "bfloat16":
# Torch backend does not support converting bfloat16 ndarray.
x = x.astype(np.float32)
dtype = "bfloat16"
dtype = dtype or x.dtype
if dtype is None:
dtype = result_type(
*[getattr(item, "dtype", type(item)) for item in tree.flatten(x)]
)
dtype = to_torch_dtype(dtype)
return torch.as_tensor(x, dtype=dtype, device=get_device())
def convert_to_numpy(x):
def transform(x):
if is_tensor(x):
if x.requires_grad:
x = x.detach()
# Tensor has to be moved to CPU before converting to numpy.
if x.device != torch.device("cpu"):
x = x.cpu()
if x.dtype == torch.bfloat16:
# Attempting to call .numpy() on a bfloat16 torch tensor leads
# to an immediate error. Instead we upcast to float32 and then
# convert to the numpy friendly bfloat16 type.
# https://github.com/pytorch/pytorch/issues/90574
return np.array(x.to(torch.float32)).astype(ml_dtypes.bfloat16)
return np.array(x)
if isinstance(x, (list, tuple)):
return np.array([transform(e) for e in x])
return transform(x)
def is_tensor(x):
# Using the built-in `isinstance` is recommended by pytorch
# over using torch.is_tensor
# see: https://pytorch.org/docs/stable/generated/torch.is_tensor.html
#
# Also, `torch.is_tensor()` causes issues with dynamo caching when
# a torch.Tensor and numpy.ndarray of the same size, shape, and dtype
# is passed, if called on a Tensor first the second call with ndarray
# will return `True` and vice-versa.
return isinstance(x, torch.Tensor)
def shape(x):
# Convert from `torch.Size` to plain tuple.
return tuple(x.shape)
def cast(x, dtype):
dtype = to_torch_dtype(dtype)
if isinstance(x, Variable):
x = x.value
if is_tensor(x):
if x.dtype == dtype:
return x
else:
return x.to(dtype)
return convert_to_tensor(x, dtype)
# Shape / dtype inference util
def compute_output_spec(fn, *args, **kwargs):
def has_none_shape(x):
"""Check for if a `KerasTensor` has dynamic shape."""
if isinstance(x, KerasTensor):
return None in x.shape
return False
def convert_keras_tensor_to_torch(x, fill_value=None):
"""Convert `KerasTensor`s to `torch.Tensor`s."""
if isinstance(x, KerasTensor):
shape = list(x.shape)
if fill_value:
for i, e in enumerate(shape):
if e is None:
shape[i] = fill_value
return torch.ones(
size=shape,
dtype=TORCH_DTYPES[x.dtype],
device=get_device(),
)
return x
def convert_torch_to_keras_tensor(x):
"""Convert `torch.Tensor`s to `KerasTensor`s."""
if is_tensor(x):
return KerasTensor(x.shape, standardize_dtype(x.dtype))
return x
def symbolic_call(fn, args, kwargs, fill_value):
"""Call `fn` to infer output shape and dtype."""
try:
# First try instantiating all tensors on the `"meta"` device,
# which should give a "zero flop" way to trace shape, but does
# not have universal support with torch operations.
with device_scope("meta"):
meta_args, meta_kwargs = tree.map_structure(
lambda x: convert_keras_tensor_to_torch(x, fill_value),
(args, kwargs),
)
return fn(*meta_args, **meta_kwargs)
except:
with device_scope(DEFAULT_DEVICE):
# If the `"meta"` device placement fails, fall back to tracing
# eagerly with tensors on the default device. This will be
# more robust, but more expensive.
eager_args, eager_kwargs = tree.map_structure(
lambda x: convert_keras_tensor_to_torch(x, fill_value),
(args, kwargs),
)
return fn(*eager_args, **eager_kwargs)
with StatelessScope(), SymbolicScope(), torch.no_grad():
outputs = symbolic_call(fn, args, kwargs, fill_value=83)
none_in_shape = any(
builtins.map(has_none_shape, tree.flatten((args, kwargs)))
)
if none_in_shape:
outputs_1 = outputs
outputs_2 = symbolic_call(fn, args, kwargs, fill_value=89)
flat_out_1 = tree.flatten(outputs_1)
flat_out_2 = tree.flatten(outputs_2)
flat_out = []
for x1, x2 in zip(flat_out_1, flat_out_2):
shape = list(x1.shape)
for i, e in enumerate(x2.shape):
if e != shape[i]:
shape[i] = None
flat_out.append(KerasTensor(shape, standardize_dtype(x1.dtype)))
outputs = tree.pack_sequence_as(outputs_1, flat_out)
output_spec = tree.map_structure(convert_torch_to_keras_tensor, outputs)
return output_spec
def cond(pred, true_fn, false_fn):
# When symbolic execution, take pred as true.
if get_device() == "meta":
return true_fn()
if pred:
return true_fn()
return false_fn()
def vectorized_map(function, elements):
return torch.vmap(function)(elements)
def map(f, xs):
def g(_, x):
return (), f(x)
_, ys = scan(g, (), xs)
return ys
def scan(f, init, xs=None, length=None, reverse=False, unroll=1):
# Ref: jax.lax.scan
if not callable(f):
raise TypeError(f"`f` should be a callable. Received: f={f}")
if not isinstance(unroll, bool):
if not isinstance(unroll, int) or unroll < 1:
raise ValueError(
"`unroll` must be an positive integer or boolean. "
f"Received: unroll={unroll}"
)
if xs is None and length is None:
raise ValueError("Got no `xs` to scan over and `length` not provided.")
input_is_sequence = tree.is_nested(xs)
output_is_sequence = tree.is_nested(init)
def pack_input(x):
return tree.pack_sequence_as(xs, x) if input_is_sequence else x[0]
def pack_output(x):
return tree.pack_sequence_as(init, x) if output_is_sequence else x[0]
if xs is None:
xs_flat = []
n = int(length)
else:
xs_flat = tree.flatten(xs)
xs_flat = [convert_to_tensor(elem) for elem in xs_flat]
n = int(length) if length is not None else shape(xs_flat[0])[0]
init_flat = tree.flatten(init)
init_flat = [convert_to_tensor(init) for init in init_flat]
init = pack_output(init_flat)
dummy_y = [torch.zeros_like(init) for init in init_flat]
carry = init
ys = []
maybe_reversed = reversed if reverse else lambda x: x
for i in maybe_reversed(range(n)):
xs_slice = [x[i] for x in xs_flat]
packed_xs = pack_input(xs_slice) if len(xs_slice) > 0 else None
carry, y = f(carry, packed_xs)
ys.append(y if y is not None else dummy_y)
stacked_y = tree.map_structure(
lambda *ys: torch.stack(ys), *maybe_reversed(ys)
)
return carry, stacked_y
def associative_scan(f, elems, reverse=False, axis=0):
# Ref: jax.lax.associative_scan
if not callable(f):
raise TypeError(f"`f` should be a callable. Received: f={f}")
elems_flat = tree.flatten(elems)
elems_flat = [convert_to_tensor(elem) for elem in elems_flat]
if reverse:
elems_flat = [torch.flip(elem, (axis,)) for elem in elems_flat]
def _combine(a_flat, b_flat):
a_flat = [convert_to_tensor(a) for a in a_flat]
b_flat = [convert_to_tensor(b) for b in b_flat]
a = tree.pack_sequence_as(elems, a_flat)
b = tree.pack_sequence_as(elems, b_flat)
c = f(a, b)
c_flat = tree.flatten(c)
return c_flat
num_elems = int(elems_flat[0].shape[axis])
if not all(int(elem.shape[axis]) == num_elems for elem in elems_flat[1:]):
raise ValueError(
"Array inputs to associative_scan must have the same "
"first dimension. (saw: {})".format(
[elem.shape for elem in elems_flat]
)
)
def _interleave(a, b, axis):
"""Given two Tensors of static shape, interleave them along axis."""
assert (
a.shape[axis] == b.shape[axis] or a.shape[axis] == b.shape[axis] + 1
)
# we want to get a: [a1, a2], b: [b1, b2]
# to a: [a1, 0, a2, 0], b: [0, b1, 0, b2]
a_shape = list(a.shape)
a_shape[axis] = a.shape[axis] * 2 - 1
b_shape = list(b.shape)
b_shape[axis] = b.shape[axis] * 2 - 1
a_dil = torch.zeros(a_shape)
slice_along_axis(a_dil, 0, None, 2, axis).copy_(a)
b_dil = torch.zeros(b_shape)
slice_along_axis(b_dil, 0, None, 2, axis).copy_(b)
a_pad = [[0, 0] for _ in range(a.dim())]
a_pad[axis][-1] = 1 if a.shape[axis] == b.shape[axis] else 0
a_pad = a_pad[::-1]
a_pad = tree.flatten(a_pad)
b_pad = [[0, 0] for _ in range(b.dim())]
b_pad[axis] = [1, 0] if a.shape[axis] == b.shape[axis] else [1, 1]
b_pad = b_pad[::-1]
b_pad = tree.flatten(b_pad)
op = torch.bitwise_or if a.dtype == torch.bool else torch.add
return op(
torch.nn.functional.pad(a_dil, a_pad),
torch.nn.functional.pad(b_dil, b_pad),
)
def _scan(elems):
num_elems = elems[0].shape[axis]
if num_elems < 2:
return elems
reduced_elems = _combine(
[
slice_along_axis(elem, 0, -1, step=2, axis=axis)
for elem in elems
],
[
slice_along_axis(elem, 1, None, step=2, axis=axis)
for elem in elems
],
)
odd_elems = _scan(reduced_elems)
if num_elems % 2 == 0:
even_elems = _combine(
[slice_along_axis(e, 0, -1, axis=axis) for e in odd_elems],
[
slice_along_axis(e, 2, None, step=2, axis=axis)
for e in elems
],
)
else:
even_elems = _combine(
odd_elems,
[
slice_along_axis(e, 2, None, step=2, axis=axis)
for e in elems
],
)
even_elems = [
torch.cat(
[slice_along_axis(elem, 0, 1, axis=axis), result],
dim=axis,
)
for (elem, result) in zip(elems, even_elems)
]
return list(
builtins.map(
functools.partial(_interleave, axis=axis), even_elems, odd_elems
)
)
scans = _scan(elems_flat)
if reverse:
scans = [torch.flip(scanned, (axis,)) for scanned in scans]
return tree.pack_sequence_as(elems, scans)
def scatter(indices, values, shape):
indices = convert_to_tensor(indices)
values = convert_to_tensor(values)
zeros = torch.zeros(shape, dtype=values.dtype, device=get_device())
index_length = indices.shape[-1]
value_shape = shape[index_length:]
indices = torch.reshape(indices, [-1, index_length])
values = torch.reshape(values, [-1] + list(value_shape))
for i in range(indices.shape[0]):
index = indices[i]
zeros[tuple(index)] += values[i]
return zeros
def scatter_update(inputs, indices, updates):
inputs = convert_to_tensor(inputs)
indices = convert_to_tensor(indices, dtype="int64")
updates = convert_to_tensor(updates, dtype=inputs.dtype)
indices = torch.transpose(indices, 0, 1)
outputs = torch.clone(inputs)
outputs[tuple(indices)] = updates
return outputs
def slice(inputs, start_indices, shape):
shape_dtype = to_torch_dtype("int64")
inputs = convert_to_tensor(inputs)
start_indices = convert_to_tensor(start_indices).to(shape_dtype)
shape = convert_to_tensor(shape).to(shape_dtype)
python_slice = __builtins__["slice"]
slices = [
python_slice(start_index, start_index + length)
for start_index, length in zip(start_indices, shape)
]
return inputs[slices]
def slice_update(inputs, start_indices, updates):
shape_dtype = to_torch_dtype("int64")
inputs = convert_to_tensor(inputs)
start_indices = convert_to_tensor(start_indices).to(shape_dtype)
updates = convert_to_tensor(updates)
python_slice = __builtins__["slice"]
slices = [
python_slice(start_index, start_index + update_length)
for start_index, update_length in zip(start_indices, updates.shape)
]
outputs = torch.clone(inputs)
outputs[slices] = updates
return outputs
def switch(index, branches, *operands):
index = convert_to_tensor(index, "int32")
index = torch.clamp(index, 0, len(branches) - 1)
return branches[index](*operands)
def while_loop(
cond,
body,
loop_vars,
maximum_iterations=None,
):
current_iter = 0
iteration_check = (
lambda iter: maximum_iterations is None or iter < maximum_iterations
)
is_tuple = isinstance(loop_vars, (tuple, list))
loop_vars = tuple(loop_vars) if is_tuple else (loop_vars,)
loop_vars = tree.map_structure(convert_to_tensor, loop_vars)
while cond(*loop_vars) and iteration_check(current_iter):
loop_vars = body(*loop_vars)
if not isinstance(loop_vars, (list, tuple)):
loop_vars = (loop_vars,)
loop_vars = tuple(loop_vars)
current_iter += 1
return loop_vars if is_tuple else loop_vars[0]
def fori_loop(lower, upper, body_fun, init_val):
val = init_val
for i in range(lower, upper):
val = body_fun(i, val)
return val
def stop_gradient(variable):
if isinstance(variable, Variable):
variable = variable.value
# We can't use `.requires_grad_(False)` here since it only
# works when the tensor is a leaf node in the graph.
return variable.detach()
def unstack(x, num=None, axis=0):
return x.unbind(axis)
def random_seed_dtype():
# uint32 doesn't exist in torch, use int32 instead.
return "int32"
def remat(f):
"""Implementation of rematerialization.
Args:
f: The function or operation to rematerialize.
Returns:
A function wrapping f that defines a custom gradient, which
recomputes f on the backwards pass of a gradient call.
"""
def wrapped(*args, **kwargs):
return torch.utils.checkpoint.checkpoint(
f, *args, use_reentrant=False, **kwargs
)
return wrapped
class custom_gradient:
"""Decorator for custom gradients.
Args:
forward_fn: Forward pass function.
"""
def __init__(self, forward_fn):
self.forward_fn = forward_fn
def __call__(self, *args, **kwargs):
return CustomGradientFunction.apply(self.forward_fn, *args, **kwargs)
class CustomGradientFunction(torch.autograd.Function):
"""Enables custom forward & backward passes for gradient computation."""
@staticmethod
def forward(ctx, forward_fn, *args, **kwargs):
"""Forward pass computation specification.
Args:
ctx: Context object.
forward_fn: Function to compute forward pass.
*args: Arguments for the forward pass.
**kwargs: Keyword arguments for the forward pass.
"""
ctx.forward_fn = forward_fn
ctx.save_for_backward(*args)
try:
output, ctx.grad_fn = forward_fn(*args, **kwargs)
except:
output = forward_fn(*args, **kwargs)
ctx.grad_fn = lambda *args, **kwargs: torch.full((), float("nan"))
return output
@staticmethod
def backward(ctx, grad_output):
"""Backward pass computation specification.
Args:
ctx: Context object.
grad_output: Gradient with respect to the output.
"""
args = ctx.saved_tensors
grad_fn = ctx.grad_fn
if grad_fn is None:
raise ValueError("grad_fn must be provided for custom gradient")
grads = grad_fn(*args, upstream=grad_output)
if not isinstance(grads, tuple):
grads = (grads,)
return (None,) + grads
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/nn.py | keras/src/backend/torch/nn.py | import torch
import torch.nn.functional as tnn
from keras.src import backend
from keras.src.backend.common.backend_utils import (
compute_conv_transpose_padding_args_for_torch,
)
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import get_device
from keras.src.backend.torch.numpy import expand_dims
from keras.src.backend.torch.numpy import where
from keras.src.utils.argument_validation import standardize_tuple
def relu(x):
x = convert_to_tensor(x)
return tnn.relu(x)
def relu6(x):
x = convert_to_tensor(x)
return tnn.relu6(x)
def sigmoid(x):
x = convert_to_tensor(x)
return tnn.sigmoid(x)
def sparse_sigmoid(x):
x = convert_to_tensor(x)
return torch.where(
x <= -1,
torch.tensor(0.0, device=x.device, dtype=x.dtype),
torch.where(
x >= 1,
torch.tensor(1.0, device=x.device, dtype=x.dtype),
0.5 * (x + 1),
),
)
def tanh(x):
x = convert_to_tensor(x)
return tnn.tanh(x)
def tanh_shrink(x):
x = convert_to_tensor(x)
return tnn.tanhshrink(x)
def softplus(x):
x = convert_to_tensor(x)
return tnn.softplus(x)
def softsign(x):
x = convert_to_tensor(x)
return tnn.softsign(x)
def soft_shrink(x, threshold=0.5):
x = convert_to_tensor(x)
return tnn.softshrink(x, lambd=threshold)
def sparse_plus(x):
x = convert_to_tensor(x)
return torch.where(
x <= -1,
torch.zeros_like(x),
torch.where(x < 1, (1 / 4) * (x + 1) ** 2, x),
)
def silu(x):
x = convert_to_tensor(x)
return tnn.silu(x)
def squareplus(x, b=4):
x = convert_to_tensor(x)
b = convert_to_tensor(b)
y = x + torch.sqrt(x**2 + b)
return y / 2
def log_sigmoid(x):
x = convert_to_tensor(x)
return tnn.logsigmoid(x)
def leaky_relu(x, negative_slope=0.2):
x = convert_to_tensor(x)
return tnn.leaky_relu(x, negative_slope=negative_slope)
def hard_sigmoid(x):
x = convert_to_tensor(x)
return tnn.hardsigmoid(x)
def hard_silu(x):
x = convert_to_tensor(x)
return tnn.hardswish(x)
def elu(x, alpha=1.0):
x = convert_to_tensor(x)
return tnn.elu(x, alpha)
def selu(x):
x = convert_to_tensor(x)
return tnn.selu(x)
def gelu(x, approximate=True):
# TODO: torch.nn.gelu expects string approximate of `"none"` or `"tanh"`
x = convert_to_tensor(x)
if approximate:
return tnn.gelu(x, approximate="tanh")
return tnn.gelu(x)
def celu(x, alpha=1.0):
x = convert_to_tensor(x)
return tnn.celu(x, alpha=alpha)
def glu(x, axis=-1):
x = convert_to_tensor(x)
return tnn.glu(x, dim=axis)
def hard_tanh(x):
x = convert_to_tensor(x)
return tnn.hardtanh(x, min_val=-1.0, max_val=1.0)
def hard_shrink(x, threshold=0.5):
x = convert_to_tensor(x)
return tnn.hardshrink(x, lambd=threshold)
def threshold(x, threshold, default_value):
x = convert_to_tensor(x)
return tnn.threshold(x, threshold=threshold, value=default_value)
def softmax(x, axis=-1):
x = convert_to_tensor(x)
dtype = backend.standardize_dtype(x.dtype)
# TODO: tnn.softmax doesn't support float16 using cpu
if (
get_device() == "cpu"
and backend.standardize_dtype(x.dtype) == "float16"
):
x = cast(x, "float32")
if axis is None:
# Unlike numpy, PyTorch will handle axis=None as axis=-1.
# We need this workaround for the reduction on every dim.
output = torch.reshape(x, [-1])
output = tnn.softmax(output, dim=-1)
output = torch.reshape(output, x.shape)
else:
output = tnn.softmax(x, dim=axis)
return cast(output, dtype)
def log_softmax(x, axis=-1):
x = convert_to_tensor(x)
dtype = backend.standardize_dtype(x.dtype)
# TODO: tnn.log_softmax doesn't support float16 using cpu
if (
get_device() == "cpu"
and backend.standardize_dtype(x.dtype) == "float16"
):
x = cast(x, "float32")
if axis is None:
# Unlike numpy, PyTorch will handle axis=None as axis=-1.
# We need this workaround for the reduction on every dim.
output = torch.reshape(x, [-1])
output = tnn.log_softmax(output, dim=-1)
output = torch.reshape(output, x.shape)
else:
output = tnn.log_softmax(x, dim=axis)
return cast(output, dtype)
def sparsemax(x, axis=-1):
# Sort logits along the specified axis in descending order
logits = convert_to_tensor(x)
logits_sorted, _ = torch.sort(logits, dim=axis, descending=True)
logits_cumsum = torch.cumsum(logits_sorted, dim=axis)
r = torch.arange(
1, logits.size(axis) + 1, device=logits.device, dtype=logits.dtype
)
r_shape = [1] * logits.ndim
r_shape[axis] = -1 # Broadcast to match the target axis
r = r.view(r_shape)
support = logits_sorted - (logits_cumsum - 1) / r > 0
# Find the threshold
k = torch.sum(support, dim=axis, keepdim=True)
logits_cumsum_safe = torch.where(
support, logits_cumsum, torch.tensor(0.0, device=logits.device)
)
tau = (torch.sum(logits_cumsum_safe, dim=axis, keepdim=True) - 1) / k
output = torch.clamp(logits - tau, min=0.0)
return output
def _compute_padding_length(
input_length, kernel_length, stride, dilation_rate=1
):
"""Compute padding length along one dimension with support
for asymmetric padding."""
effective_k_size = (kernel_length - 1) * dilation_rate + 1
if stride == 1:
# total padding is kernel_size - 1
total_padding = effective_k_size - 1
else:
# calc. needed padding for case with stride involved
output_size = (input_length + stride - 1) // stride
total_padding = max(
0, (output_size - 1) * stride + effective_k_size - input_length
)
# divide padding evenly, with extra pixel going at the end if needed
left_padding = total_padding // 2
right_padding = total_padding - left_padding
return (left_padding, right_padding)
def _apply_same_padding(
inputs, kernel_size, strides, data_format, operation_type, dilation_rate=1
):
"""Apply same padding to the input tensor.
This function will evaluate if the padding value is compatible with torch
functions. To avoid calling `pad()` as much as possible, which may cause
performance or memory issues, when compatible, it does not apply the padding
to the tensor, but returns the input tensor and the padding value to pass to
the torch functions. If not compatible, it returns the padded tensor and 0
as the padding value.
Returns:
tensor: A padded tensor or the inputs.
padding: The padding value, ready to pass to the torch functions.
"""
spatial_shape = inputs.shape[2:]
num_spatial_dims = len(spatial_shape)
padding = []
if operation_type != "pooling":
dilation_rate = standardize_tuple(
dilation_rate, num_spatial_dims, "dilation_rate"
)
for i in range(num_spatial_dims):
dil = 1 if operation_type == "pooling" else dilation_rate[i]
pad = _compute_padding_length(
spatial_shape[i], kernel_size[i], strides[i], dil
)
padding.append(pad)
# convert padding to torch format
if all(left == right for left, right in padding):
return inputs, [left for left, _ in padding]
# else, need to pad manually
flattened_padding = []
for pad in reversed(padding):
flattened_padding.extend(pad)
mode = "replicate" if operation_type == "pooling" else "constant"
return tnn.pad(inputs, pad=tuple(flattened_padding), mode=mode), 0
def _transpose_spatial_inputs(inputs):
"""Transpose inputs from channels_last to channels_first format."""
# Torch pooling does not support `channels_last` format, so
# we need to transpose to `channels_first` format.
ndim = inputs.ndim - 2
if ndim == 1: # 1D case
return torch.permute(inputs, (0, 2, 1))
elif ndim == 2: # 2D case
return torch.permute(inputs, (0, 3, 1, 2))
elif ndim == 3: # 3D case
return torch.permute(inputs, (0, 4, 1, 2, 3))
raise ValueError(
"Inputs must have ndim=3, 4 or 5, "
"corresponding to 1D, 2D and 3D inputs. "
f"Received input shape: {inputs.shape}."
)
def _transpose_spatial_outputs(outputs):
# Undo the transpose in `_transpose_spatial_inputs`.
num_spatial_dims = len(outputs.shape) - 2
if num_spatial_dims == 1:
outputs = torch.permute(outputs, (0, 2, 1))
elif num_spatial_dims == 2:
outputs = torch.permute(outputs, (0, 2, 3, 1))
elif num_spatial_dims == 3:
outputs = torch.permute(outputs, (0, 2, 3, 4, 1))
return outputs
def _transpose_conv_kernel(kernel):
# Torch requires conv kernel of format
# `(out_channels, in_channels, spatial_dims)`, we need to transpose.
num_spatial_dims = len(kernel.shape) - 2
if num_spatial_dims == 1:
kernel = torch.permute(kernel, (2, 1, 0))
elif num_spatial_dims == 2:
kernel = torch.permute(kernel, (3, 2, 0, 1))
elif num_spatial_dims == 3:
kernel = torch.permute(kernel, (4, 3, 0, 1, 2))
return kernel
def max_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
"""Fixed max pooling implementation."""
inputs = convert_to_tensor(inputs)
num_spatial_dims = inputs.ndim - 2
pool_size = standardize_tuple(pool_size, num_spatial_dims, "pool_size")
if strides is None:
strides = pool_size
else:
strides = standardize_tuple(strides, num_spatial_dims, "strides")
data_format = backend.standardize_data_format(data_format)
if data_format == "channels_last":
inputs = _transpose_spatial_inputs(inputs)
if padding == "same":
# Torch does not natively support `"same"` padding, we need to manually
# apply the right amount of padding to `inputs`.
inputs, padding = _apply_same_padding(
inputs, pool_size, strides, data_format, "pooling"
)
else:
padding = 0
device = get_device()
# Torch max pooling ops do not support symbolic tensors.
# Create a real tensor to execute the ops.
if device == "meta":
inputs = torch.empty(
size=inputs.shape, dtype=inputs.dtype, device="cpu"
)
if num_spatial_dims == 1:
outputs = tnn.max_pool1d(
inputs, kernel_size=pool_size, stride=strides, padding=padding
)
elif num_spatial_dims == 2:
outputs = tnn.max_pool2d(
inputs, kernel_size=pool_size, stride=strides, padding=padding
)
elif num_spatial_dims == 3:
outputs = tnn.max_pool3d(
inputs, kernel_size=pool_size, stride=strides, padding=padding
)
else:
raise ValueError(
"Inputs to pooling op must have ndim=3, 4 or 5, "
"corresponding to 1D, 2D and 3D inputs. "
f"Received input shape: {inputs.shape}."
)
outputs = outputs.to(device)
if data_format == "channels_last":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def average_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
"""Fixed average pooling with correct padding calculation."""
inputs = convert_to_tensor(inputs)
num_spatial_dims = inputs.ndim - 2
pool_size = standardize_tuple(pool_size, num_spatial_dims, "pool_size")
strides = (
pool_size
if strides is None
else standardize_tuple(strides, num_spatial_dims, "strides")
)
data_format = backend.standardize_data_format(data_format)
orig_format = data_format
if data_format == "channels_last":
inputs = _transpose_spatial_inputs(inputs)
if padding == "same":
# Torch does not natively support `"same"` padding, we need to manually
# apply the right amount of padding to `inputs`.
inputs, padding = _apply_same_padding(
inputs,
pool_size,
strides,
"channels_first", # we're in channels_first here
"pooling",
)
else:
padding = 0
# apply pooling
if num_spatial_dims == 1:
outputs = tnn.avg_pool1d(
inputs,
kernel_size=pool_size,
stride=strides,
padding=padding,
count_include_pad=False,
)
elif num_spatial_dims == 2:
outputs = tnn.avg_pool2d(
inputs,
kernel_size=pool_size,
stride=strides,
padding=padding,
count_include_pad=False,
)
elif num_spatial_dims == 3:
outputs = tnn.avg_pool3d(
inputs,
kernel_size=pool_size,
stride=strides,
padding=padding,
count_include_pad=False,
)
else:
raise ValueError(
"Inputs to pooling op must have ndim=3, 4 or 5, "
"corresponding to 1D, 2D and 3D inputs. "
f"Received input shape: {inputs.shape}."
)
if orig_format == "channels_last":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def adaptive_average_pool(inputs, output_size, data_format=None):
"""Adaptive average pooling(1D/2D/3D) with channels_last support."""
inputs = convert_to_tensor(inputs)
num_spatial_dims = inputs.ndim - 2
data_format = backend.standardize_data_format(data_format)
orig_format = data_format
if data_format == "channels_last":
inputs = _transpose_spatial_inputs(inputs)
if isinstance(output_size, int):
torch_output_size = (
output_size
if num_spatial_dims == 1
else (output_size,) * num_spatial_dims
)
else:
torch_output_size = standardize_tuple(
output_size, num_spatial_dims, "output_size"
)
if get_device() == "meta":
inputs = torch.empty(
size=inputs.shape, dtype=inputs.dtype, device="cpu"
)
if num_spatial_dims == 1:
outputs = tnn.adaptive_avg_pool1d(inputs, output_size=torch_output_size)
elif num_spatial_dims == 2:
outputs = tnn.adaptive_avg_pool2d(inputs, output_size=torch_output_size)
elif num_spatial_dims == 3:
outputs = tnn.adaptive_avg_pool3d(inputs, output_size=torch_output_size)
else:
raise ValueError(
"Inputs to adaptive average pooling must have ndim=3, 4 or 5, "
f"Received input shape: {inputs.shape}."
)
if orig_format == "channels_last":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def adaptive_max_pool(inputs, output_size, data_format=None):
"""Adaptive max pooling(1D/2D/3D) with channels_last support."""
inputs = convert_to_tensor(inputs)
num_spatial_dims = inputs.ndim - 2
data_format = backend.standardize_data_format(data_format)
orig_format = data_format
if data_format == "channels_last":
inputs = _transpose_spatial_inputs(inputs)
if isinstance(output_size, int):
torch_output_size = (
output_size
if num_spatial_dims == 1
else (output_size,) * num_spatial_dims
)
else:
torch_output_size = standardize_tuple(
output_size, num_spatial_dims, "output_size"
)
if get_device() == "meta":
inputs = torch.empty(
size=inputs.shape, dtype=inputs.dtype, device="cpu"
)
if num_spatial_dims == 1:
res = tnn.adaptive_max_pool1d(inputs, output_size=torch_output_size)
elif num_spatial_dims == 2:
res = tnn.adaptive_max_pool2d(inputs, output_size=torch_output_size)
elif num_spatial_dims == 3:
res = tnn.adaptive_max_pool3d(inputs, output_size=torch_output_size)
else:
raise ValueError(
"Inputs to adaptive max pooling must have ndim=3, 4 or 5, "
f"Received input shape: {inputs.shape}."
)
outputs = res[0] if isinstance(res, tuple) else res
if orig_format == "channels_last":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
"""Convolution with fixed group handling."""
inputs = convert_to_tensor(inputs)
kernel = convert_to_tensor(kernel)
num_spatial_dims = inputs.ndim - 2
strides = standardize_tuple(strides, num_spatial_dims, "strides")
data_format = backend.standardize_data_format(data_format)
if data_format == "channels_last":
inputs = _transpose_spatial_inputs(inputs)
kernel = _transpose_conv_kernel(kernel)
# calc. groups snippet
in_channels = inputs.shape[1]
kernel_in_channels = kernel.shape[1]
if in_channels % kernel_in_channels != 0:
raise ValueError(
f"Input channels ({in_channels}) must be divisible by "
f"kernel input channels ({kernel_in_channels})"
)
groups = in_channels // kernel_in_channels
# handle padding
if padding == "same":
inputs, padding = _apply_same_padding(
inputs,
kernel.shape[2:],
strides,
data_format,
"conv",
dilation_rate,
)
else:
padding = 0
# apply convolution
if num_spatial_dims == 1:
outputs = tnn.conv1d(
inputs,
kernel,
stride=strides,
padding=padding,
dilation=dilation_rate,
groups=groups,
)
elif num_spatial_dims == 2:
outputs = tnn.conv2d(
inputs,
kernel,
stride=strides,
padding=padding,
dilation=dilation_rate,
groups=groups,
)
elif num_spatial_dims == 3:
outputs = tnn.conv3d(
inputs,
kernel,
stride=strides,
padding=padding,
dilation=dilation_rate,
groups=groups,
)
else:
raise ValueError(
"Inputs to conv operation should have ndim=3, 4, or 5,"
"corresponding to 1D, 2D and 3D inputs. Received input "
f"shape: {inputs.shape}."
)
if data_format == "channels_last":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def depthwise_conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
kernel = convert_to_tensor(kernel)
kernel = torch.reshape(
kernel, kernel.shape[:-2] + (1, kernel.shape[-2] * kernel.shape[-1])
)
return conv(inputs, kernel, strides, padding, data_format, dilation_rate)
def separable_conv(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
depthwise_conv_output = depthwise_conv(
inputs,
depthwise_kernel,
strides,
padding,
data_format,
dilation_rate,
)
return conv(
depthwise_conv_output,
pointwise_kernel,
strides=1,
padding="valid",
data_format=data_format,
dilation_rate=dilation_rate,
)
def conv_transpose(
inputs,
kernel,
strides=1,
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=1,
):
inputs = convert_to_tensor(inputs)
kernel = convert_to_tensor(kernel)
num_spatial_dims = inputs.ndim - 2
strides = standardize_tuple(strides, num_spatial_dims, "strides")
data_format = backend.standardize_data_format(data_format)
(
torch_padding,
torch_output_padding,
) = compute_conv_transpose_padding_args_for_torch(
input_shape=inputs.shape,
kernel_shape=kernel.shape,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=dilation_rate,
)
if data_format == "channels_last":
inputs = _transpose_spatial_inputs(inputs)
# Transpose kernel from keras format to torch format.
kernel = _transpose_conv_kernel(kernel)
kernel_spatial_shape = kernel.shape[2:]
if isinstance(dilation_rate, int):
dilation_rate = [dilation_rate] * len(kernel_spatial_shape)
if num_spatial_dims == 1:
outputs = tnn.conv_transpose1d(
inputs,
kernel,
stride=strides,
padding=torch_padding,
output_padding=torch_output_padding,
dilation=dilation_rate,
)
elif num_spatial_dims == 2:
outputs = tnn.conv_transpose2d(
inputs,
kernel,
stride=strides,
padding=torch_padding,
output_padding=torch_output_padding,
dilation=dilation_rate,
)
elif num_spatial_dims == 3:
outputs = tnn.conv_transpose3d(
inputs,
kernel,
stride=strides,
padding=torch_padding,
output_padding=torch_output_padding,
dilation=dilation_rate,
)
else:
raise ValueError(
"Inputs to conv transpose operation should have ndim=3, 4, or 5,"
"corresponding to 1D, 2D and 3D inputs. Received input "
f"shape: {inputs.shape}."
)
if data_format == "channels_last":
outputs = _transpose_spatial_outputs(outputs)
return outputs
def one_hot(x, num_classes, axis=-1, dtype=None, sparse=False):
if sparse:
raise ValueError("Unsupported value `sparse=True` with torch backend")
# Axis is the output axis. By default, PyTorch, outputs to last axis.
# If axis is not last, change output to axis and shift remaining elements.
x = convert_to_tensor(x, dtype=torch.long)
zero = convert_to_tensor(0, dtype=torch.long)
# Torch one_hot does not natively handle negative values, so we add some
# manual handling for negatives in the input to one_hot by using max(x, 0).
# The output will have some invalid results, so we set them back to 0 using
# `where` afterwards.
output = tnn.one_hot(torch.clamp(x, min=0), num_classes)
output = where(expand_dims(x, axis=-1) >= 0, output, zero)
output = convert_to_tensor(output, dtype=dtype)
dims = output.dim()
if axis != -1 and axis != dims:
new_axes_order = list(range(dims))
new_axes_order[axis] = -1 # Shifts output to axis position
# Shift remaining axes with offset by 1 since output moved to `axis`.
for ax in range(axis + 1, dims):
new_axes_order[ax] -= 1
output = output.permute(new_axes_order)
return output
def multi_hot(x, num_classes, axis=-1, dtype=None, sparse=False):
if sparse:
raise ValueError("Unsupported value `sparse=True` with torch backend")
x = convert_to_tensor(x)
reduction_axis = 1 if len(x.shape) > 1 else 0
outputs = torch.amax(
one_hot(cast(x, "int32"), num_classes, axis=axis, dtype=dtype),
dim=reduction_axis,
)
return outputs
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
target = convert_to_tensor(target)
output = convert_to_tensor(output)
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if len(target.shape) < 1:
raise ValueError(
"Arguments `target` and `output` must be at least rank 1. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_prob = tnn.log_softmax(output, dim=axis)
else:
output = output / torch.sum(output, dim=axis, keepdim=True)
output = torch.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
log_prob = torch.log(output)
return -torch.sum(target * log_prob, dim=axis)
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
target = convert_to_tensor(target, dtype=torch.long)
output = convert_to_tensor(output)
if len(target.shape) == len(output.shape) and target.shape[-1] == 1:
target = torch.squeeze(target, dim=-1)
if len(output.shape) < 1:
raise ValueError(
"Argument `output` must be at least rank 1. "
"Received: "
f"output.shape={output.shape}"
)
output_shape_without_class_dim = list(output.shape)
del output_shape_without_class_dim[axis]
if list(target.shape) != output_shape_without_class_dim:
raise ValueError(
"Arguments `target` and `output` must have the same shape "
"up until the last dimension: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_prob = tnn.log_softmax(output, dim=axis)
else:
output = output / torch.sum(output, dim=axis, keepdim=True)
output = torch.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
log_prob = torch.log(output)
target = one_hot(target, output.shape[axis], axis=axis)
return -torch.sum(target * log_prob, dim=axis)
def binary_crossentropy(target, output, from_logits=False):
target = convert_to_tensor(target)
output = convert_to_tensor(output)
# We only apply the squeeze fix if we are on an MPS device,
# as this change breaks tests on other platforms that
# expect the original tensor shape to be preserved.
if (
torch.backends.mps.is_available()
and target.ndim > 1
and output.ndim == target.ndim
and target.shape[-1] == 1
and output.shape[-1] == 1
):
target = torch.squeeze(target, -1).contiguous()
output = torch.squeeze(output, -1).contiguous()
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
# By default, PyTorch, does reduction of `sum` over all rows,
# change reduction to `none` to keep dim
if from_logits:
return tnn.binary_cross_entropy_with_logits(
output, target, reduction="none"
)
else:
output = torch.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
return tnn.binary_cross_entropy(output, target, reduction="none")
def moments(x, axes, keepdims=False, synchronized=False):
if synchronized:
raise NotImplementedError(
"Argument synchronized=True is not supported with PyTorch."
)
x = convert_to_tensor(x)
# The dynamic range of float16 is too limited for statistics. As a
# workaround, we simply perform the operations on float32 and convert back
# to float16
need_cast = False
ori_dtype = backend.standardize_dtype(x.dtype)
if ori_dtype == "float16":
need_cast = True
x = cast(x, "float32")
mean = torch.mean(x, dim=axes, keepdim=True)
# The variance is computed using $Var = E[|x|^2] - |E[x]|^2$, It is faster
# but less numerically stable.
# Note: stop_gradient does not change the gradient to the mean, because that
# gradient is zero.
variance = torch.mean(
torch.square(x), dim=axes, keepdim=True
) - torch.square(mean)
if not keepdims:
mean = torch.squeeze(mean, axes)
variance = torch.squeeze(variance, axes)
if need_cast:
# avoid overflow and underflow when casting from float16 to float32
mean = torch.clip(
mean,
torch.finfo(torch.float16).min,
torch.finfo(torch.float16).max,
)
variance = torch.clip(
variance,
torch.finfo(torch.float16).min,
torch.finfo(torch.float16).max,
)
mean = cast(mean, ori_dtype)
variance = cast(variance, ori_dtype)
return mean, variance
def batch_normalization(
x, mean, variance, axis, offset=None, scale=None, epsilon=1e-3
):
x = convert_to_tensor(x)
mean = convert_to_tensor(mean)
variance = convert_to_tensor(variance)
shape = [1] * len(x.shape)
shape[axis] = mean.shape[0]
mean = torch.reshape(mean, shape)
variance = torch.reshape(variance, shape)
if offset is not None:
offset = convert_to_tensor(offset)
offset = torch.reshape(offset, shape)
else:
offset = torch.zeros_like(mean)
if scale is not None:
scale = convert_to_tensor(scale)
scale = torch.reshape(scale, shape)
else:
scale = torch.ones_like(variance)
return (
x.subtract(mean)
.mul_(variance.add(epsilon).rsqrt_().mul(scale))
.add_(offset)
)
def ctc_loss(target, output, target_length, output_length, mask_index=0):
target = convert_to_tensor(target)
output = convert_to_tensor(output)
target_length = convert_to_tensor(target_length)
output_length = convert_to_tensor(output_length)
# Ensure that the dtype promotion behavior matches that of `tf.nn.ctc_loss`
dtype = backend.result_type(output.dtype, "float32")
output = cast(output, dtype)
output = torch.transpose(output, 1, 0)
logits = tnn.log_softmax(output, dim=-1)
loss = tnn.ctc_loss(
logits,
target,
output_length,
target_length,
blank=mask_index,
reduction="none",
)
return loss
def _ctc_greedy_decode(
inputs,
sequence_lengths,
merge_repeated=True,
mask_index=None,
):
inputs = convert_to_tensor(inputs)
sequence_lengths = convert_to_tensor(sequence_lengths, dtype="int32")
batch_size, max_length, num_classes = inputs.shape
if mask_index is None:
mask_index = num_classes - 1
indices = torch.argmax(inputs, axis=-1)
indices = cast(indices, "int32")
scores = torch.max(inputs, axis=-1)[0]
seqlen_mask = torch.arange(max_length, device=indices.device)[None, :]
seqlen_mask = seqlen_mask >= sequence_lengths[:, None]
indices = torch.where(seqlen_mask, mask_index, indices)
scores = torch.where(seqlen_mask, 0.0, scores)
if merge_repeated:
repeat = indices[:, 1:] == indices[:, :-1]
repeat = tnn.pad(repeat, (1, 0, 0, 0))
indices = torch.where(repeat, mask_index, indices)
# We set to -1 for blank labels
invalid_mask = indices == mask_index
indices = torch.where(invalid_mask, -1, indices)
# We rearrange the indices by moving `mask_index` to the end of the array
order = torch.unsqueeze(
torch.arange(max_length, device=indices.device), dim=0
) # [1, N]
order = torch.tile(order, (batch_size, 1)) # [B, N]
order = torch.where(invalid_mask, max_length, order)
order = torch.argsort(order, dim=-1)
indices = torch.take_along_dim(indices, order, dim=-1)
scores = -torch.sum(scores, axis=1)[:, None]
indices = torch.unsqueeze(indices, dim=0)
return indices, scores
def ctc_decode(
inputs,
sequence_lengths,
strategy="greedy",
beam_width=100,
top_paths=1,
merge_repeated=True,
mask_index=0,
):
inputs = convert_to_tensor(inputs)
dtype = backend.result_type(inputs.dtype, "float32")
inputs = cast(inputs, dtype)
if strategy == "greedy":
return _ctc_greedy_decode(
inputs,
sequence_lengths,
merge_repeated=merge_repeated,
mask_index=mask_index,
)
elif strategy == "beam_search":
raise NotImplementedError(
"Torch backend doesn't yet support the beam search strategy for CTC"
"decoding."
)
else:
raise ValueError(
f"Invalid strategy {strategy}. Supported values are "
"'greedy' and 'beam_search'."
)
def psnr(x1, x2, max_val):
if x1.shape != x2.shape:
raise ValueError(
f"Input shapes {x1.shape} and {x2.shape} must "
"match for PSNR calculation. "
)
x1, x2 = (
convert_to_tensor(x1),
convert_to_tensor(x2),
)
max_val = convert_to_tensor(max_val, dtype=x1.dtype)
mse = torch.mean((x1 - x2) ** 2)
psnr = 20 * torch.log10(max_val) - 10 * torch.log10(mse)
return psnr
def _get_large_negative(dtype):
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/linalg.py | keras/src/backend/torch/linalg.py | import torch
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import convert_to_tensor
def cholesky(x, upper=False):
return torch.linalg.cholesky(x, upper=upper)
def cholesky_inverse(x, upper=False):
return torch.cholesky_inverse(x, upper=upper)
def det(x):
return torch.det(x)
def eig(x):
return torch.linalg.eig(x)
def eigh(x):
return torch.linalg.eigh(x)
def inv(x):
return torch.linalg.inv(x)
def lu_factor(x):
LU, pivots = torch.linalg.lu_factor(x)
# torch returns pivots with 1-based indexing
return LU, pivots - 1
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return torch.linalg.norm(x, ord=ord, dim=axis, keepdim=keepdims)
def qr(x, mode="reduced"):
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
return torch.linalg.qr(x, mode=mode)
def solve(a, b):
return torch.linalg.solve(a, b)
def solve_triangular(a, b, lower=False):
if b.ndim == a.ndim - 1:
b = torch.unsqueeze(b, axis=-1)
return torch.linalg.solve_triangular(a, b, upper=not lower).squeeze(
axis=-1
)
return torch.linalg.solve_triangular(a, b, upper=not lower)
def svd(x, full_matrices=True, compute_uv=True):
if not compute_uv:
return torch.linalg.svdvals(x)
return torch.linalg.svd(x, full_matrices=full_matrices)
def lstsq(a, b, rcond=None):
a = convert_to_tensor(a)
b = convert_to_tensor(b)
return torch.linalg.lstsq(a, b, rcond=rcond)[0]
def jvp(fun, primals, tangents, has_aux=False):
return torch.func.jvp(fun, primals, tangents, has_aux=has_aux)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/numpy.py | keras/src/backend/torch/numpy.py | import builtins
import math
import numpy as np
import torch
from keras.src.backend import KerasTensor
from keras.src.backend import config
from keras.src.backend.common import dtypes
from keras.src.backend.common.backend_utils import canonicalize_axis
from keras.src.backend.common.backend_utils import to_tuple_or_list
from keras.src.backend.common.backend_utils import vectorize_impl
from keras.src.backend.common.variables import standardize_dtype
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import get_device
from keras.src.backend.torch.core import is_tensor
from keras.src.backend.torch.core import to_torch_dtype
TORCH_INT_TYPES = (
torch.int8,
torch.int16,
torch.int32,
torch.int64,
)
def rot90(array, k=1, axes=(0, 1)):
"""Rotate an array by 90 degrees in the specified plane using PyTorch.
Args:
array: Input tensor
k: Number of 90-degree rotations (default=1)
axes: Tuple of two axes that define the
plane of rotation (defaults to `(0, 1)`).
Returns:
Rotated tensor
"""
array = convert_to_tensor(array)
if array.ndim < 2:
raise ValueError(
"Input array must have at least 2 dimensions. "
f"Received: array.ndim={array.ndim}"
)
if len(axes) != 2 or axes[0] == axes[1]:
raise ValueError(
f"Invalid axes: {axes}. Axes must be a tuple "
"of two different dimensions."
)
axes = tuple(axis if axis >= 0 else array.ndim + axis for axis in axes)
if not builtins.all(0 <= axis < array.ndim for axis in axes):
raise ValueError(
f"Invalid axes {axes} for tensor with {array.ndim} dimensions"
)
rotated = torch.rot90(array, k=k, dims=axes)
if isinstance(array, np.ndarray):
rotated = rotated.cpu().numpy()
return rotated
def add(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return torch.add(x1, x2)
def einsum(subscripts, *operands, **kwargs):
operands = [convert_to_tensor(operand) for operand in operands]
# When all operands are of int8, we cast the result to int32 to align with
# the behavior of jax.
dtypes_to_resolve = list(set(standardize_dtype(x.dtype) for x in operands))
if len(dtypes_to_resolve) == 1 and dtypes_to_resolve[0] == "int8":
compute_dtype = "int32"
if get_device() == "cuda":
# TODO: torch.einsum doesn't support int32 when using cuda
compute_dtype = config.floatx()
# prevent overflow
operands = [cast(operand, compute_dtype) for operand in operands]
return cast(torch.einsum(subscripts, *operands), "int32")
return torch.einsum(subscripts, *operands)
def subtract(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
# TODO: torch.subtract doesn't support bool
if standardize_dtype(x1.dtype) == "bool":
x1 = cast(x1, x2.dtype)
if standardize_dtype(x2.dtype) == "bool":
x2 = cast(x2, x1.dtype)
return torch.subtract(x1, x2)
def matmul(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
def can_use_int_matmul(x1, x2):
# torch._int_mm only accepts the following conditions:
# 1. cuda
# 2. both inputs must have int8 dtype
# 3. both inputs must be 2d
# 4. x1.shape must be [>16, >= 16 and a multiplier of 8]
# 5. x2.shape must be [>= 16 and a multiplier of 8, multiplier of 8]
if get_device() != "cuda":
return False
x1_dtype = standardize_dtype(x1.dtype)
x2_dtype = standardize_dtype(x2.dtype)
if x1_dtype != "int8" or x2_dtype != "int8":
return False
x1_shape = x1.shape
x2_shape = x2.shape
if x1.ndim != 2 or x2.ndim != 2:
return False
if x1_shape[0] <= 16 or x1_shape[1] < 16 or x1_shape[1] % 8 != 0:
return False
if x2_shape[0] < 16 or x2_shape[0] % 8 != 0 or x2_shape[1] % 8 != 0:
return False
return True
# Shortcut for torch._int_mm
# TODO: Loosen the restriction of the usage of torch._int_mm
# TODO: We should replace torch._int_mm with the public api if possible
if can_use_int_matmul(x1, x2):
return torch._int_mm(x1, x2)
x1_dtype = standardize_dtype(x1.dtype)
x2_dtype = standardize_dtype(x2.dtype)
if x1_dtype == "int8" and x2_dtype == "int8":
result_dtype = "int32"
else:
result_dtype = dtypes.result_type(x1.dtype, x2.dtype)
compute_dtype = result_dtype
# TODO: torch.matmul doesn't support bool
if compute_dtype == "bool":
compute_dtype = config.floatx()
# TODO: torch.matmul doesn't support float16 with cpu
if get_device() == "cpu" and compute_dtype == "float16":
compute_dtype = "float32"
# TODO: torch.matmul doesn't support integer types with cuda
if get_device() == "cuda" and "int" in compute_dtype:
compute_dtype = config.floatx()
x1 = cast(x1, compute_dtype)
x2 = cast(x2, compute_dtype)
return cast(torch.matmul(x1, x2), result_dtype)
def multiply(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return torch.multiply(x1, x2)
def mean(x, axis=None, keepdims=False):
if isinstance(x, (list, tuple)):
x = stack(x)
x = convert_to_tensor(x)
if axis == () or axis == []:
# Torch handles the empty axis case differently from numpy.
return x
axis = to_tuple_or_list(axis) # see [NB] below
ori_dtype = standardize_dtype(x.dtype)
# torch.mean only supports floating point inputs
compute_dtype = dtypes.result_type(x.dtype, "float32")
if "int" in ori_dtype or ori_dtype == "bool":
result_dtype = compute_dtype
else:
result_dtype = ori_dtype
# [NB] the python torch op torch.mean() is generated into
# `torch._C._VariableFunctions.pyi`, and the method
# signature is overloaded.
# Dynamo won't actually find the correct signature of
# `torch.mean()` if arguments are passed via kwargs
# So we have to pass the arguments via positional args
# EXCEPT for those that are forced as kwargs via the `*`
# delimiter in the overloaded method signatures.
# Additionally, we have to create a singleton-tuple
# when `axis` is an int to match the existing fn signature
result = torch.mean(
x,
axis,
keepdims,
dtype=to_torch_dtype(compute_dtype),
)
return cast(result, result_dtype)
def max(x, axis=None, keepdims=False, initial=None):
x = convert_to_tensor(x)
if 0 in x.shape:
if initial is None:
raise ValueError("Cannot compute the max of an empty tensor.")
elif keepdims:
return torch.full((1,) * len(x.shape), initial)
else:
return torch.tensor(initial)
if axis is None:
result = torch.max(x)
else:
result = amax(x, axis=axis, keepdims=keepdims)
if isinstance(getattr(result, "values", None), torch.Tensor):
result = result.values
if initial is not None:
dtype = to_torch_dtype(result.dtype)
initial = convert_to_tensor(initial, dtype=dtype)
return torch.maximum(
result, torch.full(result.shape, initial, dtype=dtype)
)
return result
def ones(shape, dtype=None):
dtype = to_torch_dtype(dtype or config.floatx())
if isinstance(shape, int):
shape = (shape,)
return torch.ones(size=shape, dtype=dtype, device=get_device())
def zeros(shape, dtype=None):
dtype = to_torch_dtype(dtype or config.floatx())
if isinstance(shape, int):
shape = (shape,)
return torch.zeros(size=shape, dtype=dtype, device=get_device())
def zeros_like(x, dtype=None):
x = convert_to_tensor(x)
dtype = to_torch_dtype(dtype or x.dtype)
return torch.zeros_like(x, dtype=dtype)
def absolute(x):
x = convert_to_tensor(x)
# bool are always non-negative
if standardize_dtype(x.dtype) == "bool":
return x
return torch.abs(x)
def abs(x):
return absolute(x)
def all(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
if axis is None:
return cast(torch.all(x), "bool")
axis = to_tuple_or_list(axis)
for a in axis:
# `torch.all` does not handle multiple axes.
x = torch.all(x, dim=a, keepdim=keepdims)
return cast(x, "bool")
def angle(x):
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
# torch.angle doesn't support float16 with cuda
if get_device() != "cpu" and ori_dtype == "float16":
x = cast(x, "float32")
return cast(torch.angle(x), "float16")
return torch.angle(x)
def any(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
if axis is None:
return cast(torch.any(x), "bool")
axis = to_tuple_or_list(axis)
for a in axis:
# `torch.any` does not handle multiple axes.
x = torch.any(x, dim=a, keepdim=keepdims)
return cast(x, "bool")
def amax(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
if axis is None:
return torch.amax(x)
if axis == () or axis == []:
# Torch handles the empty axis case differently from numpy.
return x
return torch.amax(x, dim=axis, keepdim=keepdims)
def amin(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
if axis is None:
return torch.amin(x)
if axis == () or axis == []:
# Torch handles the empty axis case differently from numpy.
return x
return torch.amin(x, dim=axis, keepdim=keepdims)
def append(x1, x2, axis=None):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
if axis is None:
return torch.cat((x1.flatten(), x2.flatten()))
return torch.cat((x1, x2), dim=axis)
def arange(start, stop=None, step=None, dtype=None):
if dtype is None:
dtypes_to_resolve = [getattr(start, "dtype", type(start))]
if stop is not None:
dtypes_to_resolve.append(getattr(stop, "dtype", type(stop)))
if step is not None:
dtypes_to_resolve.append(getattr(step, "dtype", type(step)))
dtype = dtypes.result_type(*dtypes_to_resolve)
dtype = to_torch_dtype(dtype)
if stop is None:
start, stop = 0, start
if step is None:
step = 1
return torch.arange(
start, stop, step=step, dtype=dtype, device=get_device()
)
def arccos(x):
x = convert_to_tensor(x)
return torch.arccos(x)
def arccosh(x):
x = convert_to_tensor(x)
return torch.arccosh(x)
def arcsin(x):
x = convert_to_tensor(x)
return torch.arcsin(x)
def arcsinh(x):
x = convert_to_tensor(x)
return torch.arcsinh(x)
def arctan(x):
x = convert_to_tensor(x)
return torch.arctan(x)
def arctan2(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
result_dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
compute_dtype = result_dtype
# TODO: torch.arctan2 doesn't support float16 with cpu
if get_device() == "cpu" and compute_dtype == "float16":
compute_dtype = "float32"
x1 = cast(x1, compute_dtype)
x2 = cast(x2, compute_dtype)
return cast(torch.arctan2(x1, x2), result_dtype)
def arctanh(x):
x = convert_to_tensor(x)
return torch.arctanh(x)
def argmax(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
# TODO: torch.argmax doesn't support bool
if standardize_dtype(x.dtype) == "bool":
x = cast(x, "uint8")
return cast(torch.argmax(x, dim=axis, keepdim=keepdims), dtype="int32")
def argmin(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
# TODO: torch.argmin doesn't support bool
if standardize_dtype(x.dtype) == "bool":
x = cast(x, "uint8")
return cast(torch.argmin(x, dim=axis, keepdim=keepdims), dtype="int32")
def argsort(x, axis=-1):
x = convert_to_tensor(x)
# TODO: torch.argsort doesn't support bool
if standardize_dtype(x.dtype) == "bool":
x = cast(x, "uint8")
if axis is None:
axis = -1
x = x.reshape(-1)
return cast(torch.argsort(x, dim=axis, stable=True), dtype="int32")
def array(x, dtype=None):
return convert_to_tensor(x, dtype=dtype)
def view(x, dtype=None):
dtype = to_torch_dtype(dtype)
x = convert_to_tensor(x)
return x.view(dtype=dtype)
def average(x, axis=None, weights=None):
x = convert_to_tensor(x)
dtypes_to_resolve = [x.dtype, float]
if weights is not None:
weights = convert_to_tensor(weights)
dtypes_to_resolve.append(weights.dtype)
dtype = dtypes.result_type(*dtypes_to_resolve)
x = cast(x, dtype)
if weights is not None:
weights = cast(weights, dtype)
if axis == () or axis == []:
# Torch handles the empty axis case differently from numpy.
return x
if weights is not None:
return torch.sum(torch.mul(x, weights), dim=axis) / torch.sum(
weights, dim=-1
)
return torch.mean(x, axis)
def bartlett(x):
x = convert_to_tensor(x)
return torch.signal.windows.bartlett(x)
def hamming(x):
x = convert_to_tensor(x)
return torch.signal.windows.hamming(x)
def hanning(x):
x = convert_to_tensor(x)
return torch.signal.windows.hann(x)
def heaviside(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
if dtype in ["int8", "int16", "int32", "uint8", "uint16", "uint32"]:
dtype = config.floatx()
elif dtype == "int64":
dtype = "float64"
x1 = cast(x1, dtype)
x2 = cast(x2, dtype)
return torch.heaviside(x1, x2)
def kaiser(x, beta):
x = convert_to_tensor(x)
return torch.signal.windows.kaiser(x, beta=beta)
def bincount(x, weights=None, minlength=0, sparse=False):
if sparse:
raise ValueError("Unsupported value `sparse=True` with torch backend")
x = convert_to_tensor(x)
dtypes_to_resolve = [x.dtype]
if weights is not None:
weights = convert_to_tensor(weights)
dtypes_to_resolve.append(weights.dtype)
dtype = dtypes.result_type(*dtypes_to_resolve)
else:
dtype = "int32"
if len(x.shape) == 2:
if weights is None:
def bincount_fn(arr):
return torch.bincount(arr, minlength=minlength)
bincounts = list(map(bincount_fn, x))
else:
def bincount_fn(arr_w):
return torch.bincount(
arr_w[0], weights=arr_w[1], minlength=minlength
)
bincounts = list(map(bincount_fn, zip(x, weights)))
return cast(torch.stack(bincounts), dtype)
return cast(torch.bincount(x, weights, minlength), dtype)
def bitwise_and(x, y):
x = convert_to_tensor(x)
y = convert_to_tensor(y)
return torch.bitwise_and(x, y)
def bitwise_invert(x):
x = convert_to_tensor(x)
return torch.bitwise_not(x)
def bitwise_not(x):
return bitwise_invert(x)
def bitwise_or(x, y):
x = convert_to_tensor(x)
y = convert_to_tensor(y)
return torch.bitwise_or(x, y)
def bitwise_xor(x, y):
x = convert_to_tensor(x)
y = convert_to_tensor(y)
return torch.bitwise_xor(x, y)
def bitwise_left_shift(x, y):
x = convert_to_tensor(x)
if not isinstance(y, int):
y = convert_to_tensor(y)
return torch.bitwise_left_shift(x, y)
def left_shift(x, y):
return bitwise_left_shift(x, y)
def bitwise_right_shift(x, y):
x = convert_to_tensor(x)
if not isinstance(y, int):
y = convert_to_tensor(y)
return torch.bitwise_right_shift(x, y)
def right_shift(x, y):
return bitwise_right_shift(x, y)
def blackman(x):
x = convert_to_tensor(x)
return torch.signal.windows.blackman(x)
def broadcast_to(x, shape):
x = convert_to_tensor(x)
return torch.broadcast_to(x, shape)
def cbrt(x):
x = convert_to_tensor(x)
dtype = standardize_dtype(x.dtype)
if dtype == "bool":
x = cast(x, "int32")
elif dtype == "int64":
x = cast(x, "float64")
return torch.sign(x) * torch.abs(x) ** (1.0 / 3.0)
def ceil(x):
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
# TODO: torch.ceil doesn't support bool
if ori_dtype == "bool":
x = cast(x, "uint8")
# TODO: torch.ceil doesn't support float16 with cpu
elif get_device() == "cpu" and ori_dtype == "float16":
x = cast(x, config.floatx())
if ori_dtype == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(ori_dtype, float)
return cast(torch.ceil(x), dtype=dtype)
def clip(x, x_min, x_max):
x = convert_to_tensor(x)
x_min = convert_to_tensor(x_min)
x_max = convert_to_tensor(x_max)
ori_dtype = standardize_dtype(x.dtype)
# TODO: torch.clip doesn't support float16 with cpu
if get_device() == "cpu" and ori_dtype == "float16":
x = cast(x, "float32")
return cast(torch.clip(x, min=x_min, max=x_max), "float16")
if ori_dtype == "bool":
x = cast(x, "int32")
return torch.clip(x, min=x_min, max=x_max)
def concatenate(xs, axis=0):
xs = [convert_to_tensor(x) for x in xs]
return torch.cat(xs, dim=axis)
def conjugate(x):
if not isinstance(x, torch.Tensor):
x = torch.from_numpy(x) # needed for complex type conversion
return torch.conj(x).resolve_conj()
def conj(x):
if not isinstance(x, torch.Tensor):
x = torch.from_numpy(x) # needed for complex type conversion
return torch.conj(x).resolve_conj()
def copy(x):
x = convert_to_tensor(x)
return torch.clone(x)
def cos(x):
x = convert_to_tensor(x)
return torch.cos(x)
def cosh(x):
x = convert_to_tensor(x)
return torch.cosh(x)
def count_nonzero(x, axis=None):
x = convert_to_tensor(x)
if axis == () or axis == []:
# Torch handles the empty axis case differently from numpy.
return cast(torch.ne(x, 0), "int32")
return cast(torch.count_nonzero(x, dim=axis).T, "int32")
def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None):
if axisa != -1 or axisb != -1 or axisc != -1:
raise ValueError(
"Torch backend does not support `axisa`, `axisb`, or `axisc`. "
f"Received: axisa={axisa}, axisb={axisb}, axisc={axisc}. Please "
"use `axis` arg in torch backend."
)
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
compute_dtype = dtypes.result_type(x1.dtype, x2.dtype)
result_dtype = compute_dtype
# TODO: torch.cross doesn't support bfloat16 with gpu
if get_device() == "cuda" and compute_dtype == "bfloat16":
compute_dtype = "float32"
# TODO: torch.cross doesn't support float16 with cpu
elif get_device() == "cpu" and compute_dtype == "float16":
compute_dtype = "float32"
x1 = cast(x1, compute_dtype)
x2 = cast(x2, compute_dtype)
return cast(torch.cross(x1, x2, dim=axis), result_dtype)
def cumprod(x, axis=None, dtype=None):
x = convert_to_tensor(x)
if axis is None:
x = x.flatten()
axis = 0
dtype = dtypes.result_type(dtype or x.dtype)
if dtype == "bool":
dtype = "int32"
# TODO: torch.cumprod doesn't support float16 with cpu
elif get_device() == "cpu" and dtype == "float16":
return cast(
torch.cumprod(x, dim=axis, dtype=to_torch_dtype("float32")),
"float16",
)
return torch.cumprod(x, dim=axis, dtype=to_torch_dtype(dtype))
def cumsum(x, axis=None, dtype=None):
x = convert_to_tensor(x)
if axis is None:
x = x.flatten()
axis = 0
dtype = dtypes.result_type(dtype or x.dtype)
if dtype == "bool":
dtype = "int32"
# TODO: torch.cumsum doesn't support float16 with cpu
elif get_device() == "cpu" and dtype == "float16":
return cast(
torch.cumsum(x, dim=axis, dtype=to_torch_dtype("float32")),
"float16",
)
return torch.cumsum(x, dim=axis, dtype=to_torch_dtype(dtype))
def deg2rad(x):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
return cast(torch.deg2rad(x), "float64")
return torch.deg2rad(x)
def diag(x, k=0):
x = convert_to_tensor(x)
return torch.diag(x, diagonal=k)
def diagflat(x, k=0):
x = convert_to_tensor(x)
return torch.diagflat(x, offset=k)
def diagonal(x, offset=0, axis1=0, axis2=1):
x = convert_to_tensor(x)
return torch.diagonal(
x,
offset=offset,
dim1=axis1,
dim2=axis2,
)
def diff(a, n=1, axis=-1):
a = convert_to_tensor(a)
return torch.diff(a, n=n, dim=axis)
def digitize(x, bins):
x = convert_to_tensor(x)
bins = convert_to_tensor(bins)
if standardize_dtype(x.dtype) == "bool":
x = cast(x, "uint8")
return cast(torch.bucketize(x, bins, right=True), "int32")
def dot(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
result_dtype = dtypes.result_type(x1.dtype, x2.dtype)
# GPU only supports float types
compute_dtype = dtypes.result_type(result_dtype, float)
# TODO: torch.matmul doesn't support float16 with cpu
if get_device() == "cpu" and compute_dtype == "float16":
compute_dtype = "float32"
x1 = cast(x1, compute_dtype)
x2 = cast(x2, compute_dtype)
if x1.ndim == 0 or x2.ndim == 0:
return cast(torch.multiply(x1, x2), result_dtype)
return cast(torch.matmul(x1, x2), result_dtype)
def empty(shape, dtype=None):
dtype = to_torch_dtype(dtype or config.floatx())
return torch.empty(size=shape, dtype=dtype, device=get_device())
def empty_like(x, dtype=None):
x = convert_to_tensor(x)
dtype = to_torch_dtype(dtype or x.dtype)
return torch.empty_like(x, dtype=dtype, device=get_device())
def equal(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.eq(x1, x2)
def exp(x):
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
if "int" in ori_dtype or ori_dtype == "bool":
x = cast(x, config.floatx())
return torch.exp(x)
def exp2(x):
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
if "int" in ori_dtype or ori_dtype == "bool":
x = cast(x, config.floatx())
return torch.exp2(x)
def expand_dims(x, axis):
x = convert_to_tensor(x)
axis = to_tuple_or_list(axis)
out_ndim = len(x.shape) + len(axis)
axis = sorted([canonicalize_axis(a, out_ndim) for a in axis])
for a in axis:
x = torch.unsqueeze(x, dim=a)
return x
def expm1(x):
x = convert_to_tensor(x)
ori_dtype = standardize_dtype(x.dtype)
if "int" in ori_dtype or ori_dtype == "bool":
x = cast(x, config.floatx())
return torch.expm1(x)
def flip(x, axis=None):
x = convert_to_tensor(x)
if axis is None:
axis = tuple(range(x.ndim))
axis = to_tuple_or_list(axis)
return torch.flip(x, dims=axis)
def floor(x):
x = convert_to_tensor(x)
dtype = (
config.floatx()
if standardize_dtype(x.dtype) == "int64"
else dtypes.result_type(x.dtype, float)
)
x = cast(x, dtype)
return torch.floor(x)
def full(shape, fill_value, dtype=None):
dtype = to_torch_dtype(dtype)
fill_value = convert_to_tensor(fill_value, dtype=dtype)
if len(fill_value.shape) > 0:
# `torch.full` only supports scala `fill_value`.
expand_size = len(shape) - len(fill_value.shape)
tile_shape = tuple(shape[:expand_size]) + (1,) * len(fill_value.shape)
return torch.tile(fill_value, tile_shape)
return torch.full(
size=shape, fill_value=fill_value, dtype=dtype, device=get_device()
)
def full_like(x, fill_value, dtype=None):
dtype = dtype or x.dtype
return full(shape=x.shape, fill_value=fill_value, dtype=dtype)
def gcd(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return torch.gcd(x1, x2)
def greater(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.greater(x1, x2)
def greater_equal(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.greater_equal(x1, x2)
def hstack(xs):
xs = [convert_to_tensor(x) for x in xs]
return torch.hstack(xs)
def hypot(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
if dtype in ["int8", "int16", "int32", "uint8", "uint16", "uint32"]:
dtype = config.floatx()
elif dtype == "int64":
dtype = "float64"
x1 = cast(x1, dtype)
x2 = cast(x2, dtype)
return torch.hypot(x1, x2)
def identity(n, dtype=None):
dtype = to_torch_dtype(dtype or config.floatx())
# TODO: torch.eye doesn't support bfloat16 with cpu
if get_device() == "cpu" and dtype == torch.bfloat16:
return cast(
torch.eye(n, dtype=to_torch_dtype("float32"), device=get_device()),
dtype,
)
return torch.eye(n, dtype=dtype, device=get_device())
def imag(x):
if not isinstance(x, torch.Tensor):
x = torch.from_numpy(x) # needed for complex type conversion
return torch.imag(x)
def isclose(x1, x2, rtol=1e-5, atol=1e-8, equal_nan=False):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
result_dtype = dtypes.result_type(x1.dtype, x2.dtype)
x1 = cast(x1, result_dtype)
x2 = cast(x2, result_dtype)
return torch.isclose(x1, x2, rtol, atol, equal_nan)
def isfinite(x):
x = convert_to_tensor(x)
return torch.isfinite(x)
def isin(x1, x2, assume_unique=False, invert=False):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
if dtype == "bool":
x1 = cast(x1, "int32")
x2 = cast(x2, "int32")
if standardize_dtype(x1.dtype) == "bool":
x1 = cast(x1, x2.dtype)
if standardize_dtype(x2.dtype) == "bool":
x2 = cast(x2, x1.dtype)
return torch.isin(x1, x2, assume_unique=assume_unique, invert=invert)
def isinf(x):
x = convert_to_tensor(x)
return torch.isinf(x)
def isnan(x):
x = convert_to_tensor(x)
return torch.isnan(x)
def isneginf(x):
x = convert_to_tensor(x)
return torch.isneginf(x)
def isposinf(x):
x = convert_to_tensor(x)
return torch.isposinf(x)
def isreal(x):
x = convert_to_tensor(x)
return torch.isreal(x)
def kron(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return torch.kron(x1, x2)
def lcm(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
return torch.lcm(x1, x2)
def ldexp(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
if standardize_dtype(x2.dtype) not in dtypes.INT_TYPES:
raise TypeError(
f"ldexp exponent must be an integer type. "
f"Received: x2 dtype={x2.dtype}"
)
return cast(torch.ldexp(x1, x2), dtype)
def less(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.less(x1, x2)
def less_equal(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.less_equal(x1, x2)
def linspace(
start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0
):
if axis != 0:
raise ValueError(
"torch.linspace does not support an `axis` argument. "
f"Received axis={axis}"
)
if dtype is None:
dtypes_to_resolve = [
getattr(start, "dtype", type(start)),
getattr(stop, "dtype", type(stop)),
float,
]
dtype = dtypes.result_type(*dtypes_to_resolve)
dtype = to_torch_dtype(dtype)
step = convert_to_tensor(torch.nan)
if endpoint:
if num > 1:
step = (stop - start) / (num - 1)
else:
if num > 0:
step = (stop - start) / num
if num > 1:
stop = stop - ((stop - start) / num)
if hasattr(start, "__len__") and hasattr(stop, "__len__"):
start = convert_to_tensor(start, dtype=dtype)
stop = convert_to_tensor(stop, dtype=dtype)
steps = torch.arange(num, dtype=dtype, device=get_device()) / (num - 1)
# reshape `steps` to allow for broadcasting
for i in range(start.ndim):
steps = steps.unsqueeze(-1)
# increments from `start` to `stop` in each dimension
linspace = start[None] + steps * (stop - start)[None]
else:
linspace = torch.linspace(
start=start,
end=stop,
steps=num,
dtype=dtype,
device=get_device(),
)
if retstep is True:
return (linspace, step)
return linspace
def log(x):
x = convert_to_tensor(x)
return torch.log(x)
def log10(x):
x = convert_to_tensor(x)
return torch.log10(x)
def log1p(x):
x = convert_to_tensor(x)
return torch.log1p(x)
def log2(x):
x = convert_to_tensor(x)
return torch.log2(x)
def logaddexp(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
# TODO: torch.logaddexp doesn't support float16 with cpu
if get_device() == "cpu" and dtype == "float16":
x1 = cast(x1, "float32")
x2 = cast(x2, "float32")
return cast(torch.logaddexp(x1, x2), dtype)
else:
x1 = cast(x1, dtype)
x2 = cast(x2, dtype)
return torch.logaddexp(x1, x2)
def logaddexp2(x1, x2):
x1 = convert_to_tensor(x1)
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(x1.dtype, x2.dtype, float)
x1 = cast(x1, dtype)
x2 = cast(x2, dtype)
return torch.logaddexp2(x1, x2)
def logical_and(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.logical_and(x1, x2)
def logical_not(x):
x = convert_to_tensor(x)
return torch.logical_not(x)
def logical_or(x1, x2):
x1, x2 = convert_to_tensor(x1), convert_to_tensor(x2)
return torch.logical_or(x1, x2)
def logspace(start, stop, num=50, endpoint=True, base=10, dtype=None, axis=0):
if axis != 0:
raise ValueError(
"torch.logspace does not support an `axis` argument. "
f"Received axis={axis}"
)
if dtype is None:
dtypes_to_resolve = [
getattr(start, "dtype", type(start)),
getattr(stop, "dtype", type(stop)),
float,
]
dtype = dtypes.result_type(*dtypes_to_resolve)
dtype = to_torch_dtype(dtype)
if endpoint is False:
stop = stop - ((stop - start) / num)
if hasattr(start, "__len__") and hasattr(stop, "__len__"):
start = convert_to_tensor(start, dtype=dtype)
stop = convert_to_tensor(stop, dtype=dtype)
steps = torch.arange(num, dtype=dtype, device=get_device()) / (num - 1)
# reshape `steps` to allow for broadcasting
for i in range(start.ndim):
steps = steps.unsqueeze(-1)
# increments from `start` to `stop` in each dimension
linspace = start[None] + steps * (stop - start)[None]
logspace = base**linspace
else:
compute_dtype = dtype
# TODO: torch.logspace doesn't support float16 with cpu
if get_device() == "cpu" and dtype == torch.float16:
compute_dtype = torch.float32
logspace = cast(
torch.logspace(
start=start,
end=stop,
steps=num,
base=base,
dtype=compute_dtype,
device=get_device(),
),
dtype,
)
return logspace
def maximum(x1, x2):
if not isinstance(x1, (int, float)):
x1 = convert_to_tensor(x1)
if not isinstance(x2, (int, float)):
x2 = convert_to_tensor(x2)
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
x1 = convert_to_tensor(x1, dtype)
x2 = convert_to_tensor(x2, dtype)
return torch.maximum(x1, x2)
def median(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
compute_dtype = dtypes.result_type(x.dtype, "float32")
result_dtype = dtypes.result_type(x.dtype, float)
x = cast(x, compute_dtype)
if axis is None and keepdims is False:
return cast(torch.median(x), result_dtype)
elif isinstance(axis, int):
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/trainer.py | keras/src/backend/torch/trainer.py | import warnings
import numpy as np
import torch
from packaging.version import parse
from keras.src import backend
from keras.src import callbacks as callbacks_module
from keras.src import optimizers as optimizers_module
from keras.src import tree
from keras.src.backend import config
from keras.src.trainers import trainer as base_trainer
from keras.src.trainers.data_adapters import array_slicing
from keras.src.trainers.data_adapters import data_adapter_utils
from keras.src.trainers.epoch_iterator import EpochIterator
from keras.src.utils import traceback_utils
class TorchTrainer(base_trainer.Trainer):
def __init__(self):
super().__init__()
self.train_function = None
self.test_function = None
self.predict_function = None
def _should_torch_compile(self):
# require torch>=2.1.0 to enable dynamo since it
# includes many improvements/fixes to torch.compile()
# TODO eventually we want to get rid of this when
# torch is upgraded to >=2.1 (from 2.0.1) in g3
if self.jit_compile and parse(torch.__version__) < parse("2.1.0"):
warnings.warn(
"Please upgrade to torch>=2.1.0 for `jit_compile=True` "
"to take effect. Using `jit_compile=False`"
)
self.jit_compile = False
return self.jit_compile
def train_step(self, data):
x, y, sample_weight = data_adapter_utils.unpack_x_y_sample_weight(data)
# Compute predictions
if self._call_has_training_arg:
y_pred = self(x, training=True)
else:
y_pred = self(x)
# Call torch.nn.Module.zero_grad() to clear the leftover gradients
# for the weights from the previous train step.
self.zero_grad()
loss = self._compute_loss(
x=x, y=y, y_pred=y_pred, sample_weight=sample_weight, training=True
)
self._loss_tracker.update_state(
loss,
sample_weight=next(
i for i in tree.flatten(x) if i is not None
).shape[0],
)
if self.optimizer is not None:
loss = self.optimizer.scale_loss(loss)
# Compute gradients
if self.trainable_weights:
# Call torch.Tensor.backward() on the loss to compute gradients
# for the weights.
loss.backward()
trainable_weights = self.trainable_weights[:]
gradients = [v.value.grad for v in trainable_weights]
# Update weights
with torch.no_grad():
self.optimizer.apply(gradients, trainable_weights)
else:
warnings.warn("The model does not have any trainable weights.")
return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight)
def test_step(self, data):
(
x,
y,
sample_weight,
) = data_adapter_utils.unpack_x_y_sample_weight(data)
if self._call_has_training_arg:
y_pred = self(x, training=False)
else:
y_pred = self(x)
loss = self._compute_loss(
x=x, y=y, y_pred=y_pred, sample_weight=sample_weight, training=False
)
self._loss_tracker.update_state(
loss,
sample_weight=next(
i for i in tree.flatten(x) if i is not None
).shape[0],
)
return self.compute_metrics(x, y, y_pred, sample_weight=sample_weight)
def predict_step(self, data):
x, _, _ = data_adapter_utils.unpack_x_y_sample_weight(data)
if self._call_has_training_arg:
y_pred = self(x, training=False)
else:
y_pred = self(x)
return y_pred
def make_train_function(self, force=False):
if self.train_function is not None and not force:
return self.train_function
if self.steps_per_execution > 1:
raise ValueError(
"`steps_per_execution` must be 1 with the PyTorch backend. "
f"Received: steps_per_execution={self.steps_per_execution}"
)
def one_step_on_data(data):
"""Runs a single training step on a batch of data."""
data = data[0]
return self.train_step(data)
if self._should_torch_compile():
self.train_function = torch.compile(one_step_on_data)
else:
self.train_function = one_step_on_data
def make_test_function(self, force=False):
if self.test_function is not None and not force:
return self.test_function
if self.steps_per_execution > 1:
raise ValueError(
"`steps_per_execution` must be 1 with the PyTorch backend. "
f"Received: steps_per_execution={self.steps_per_execution}"
)
def one_step_on_data(data):
"""Runs a single test step on a batch of data."""
data = data[0]
with torch.no_grad():
return self.test_step(data)
if self._should_torch_compile():
self.test_function = torch.compile(one_step_on_data)
else:
self.test_function = one_step_on_data
def make_predict_function(self, force=False):
if self.predict_function is not None and not force:
return self.predict_function
if self.steps_per_execution > 1:
raise ValueError(
"`steps_per_execution` must be 1 with the PyTorch backend. "
f"Received: steps_per_execution={self.steps_per_execution}"
)
def one_step_on_data(data):
"""Runs a predict test step on a batch of data."""
data = data[0]
with torch.no_grad():
return self.predict_step(data)
if self._should_torch_compile():
self.predict_function = torch.compile(one_step_on_data)
else:
self.predict_function = one_step_on_data
@traceback_utils.filter_traceback
def fit(
self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose="auto",
callbacks=None,
validation_split=0.0,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_batch_size=None,
validation_freq=1,
):
if not self.compiled:
raise ValueError(
"You must call `compile()` before calling `fit()`."
)
# Possibly cap epochs for debugging runs.
max_epochs = config.max_epochs()
if max_epochs and max_epochs < epochs:
warnings.warn("Limiting epochs to %d" % max_epochs)
epochs = max_epochs
# TODO: respect compiled trainable state
self._eval_epoch_iterator = None
if validation_split and validation_data is None:
# Create the validation data using the training data. Only supported
# for TF/numpy/jax arrays.
# TODO: Support torch tensors for validation data.
(
(x, y, sample_weight),
validation_data,
) = array_slicing.train_validation_split(
(x, y, sample_weight), validation_split=validation_split
)
if validation_data is not None:
(
val_x,
val_y,
val_sample_weight,
) = data_adapter_utils.unpack_x_y_sample_weight(validation_data)
# Create an iterator that yields batches for one epoch.
epoch_iterator = TorchEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
shuffle=shuffle,
class_weight=class_weight,
steps_per_execution=self.steps_per_execution,
)
self._symbolic_build(iterator=epoch_iterator)
epoch_iterator.reset()
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_history=True,
add_progbar=verbose != 0,
verbose=verbose,
epochs=epochs,
steps=epoch_iterator.num_batches,
model=self,
)
self.stop_training = False
training_logs = {}
self.make_train_function()
callbacks.on_train_begin()
initial_epoch = self._initial_epoch or initial_epoch
for epoch in range(initial_epoch, epochs):
self.reset_metrics()
callbacks.on_epoch_begin(epoch)
# Switch the torch Module to training mode. Inform torch layers to
# do training behavior in case the user did not use `self.training`
# when implementing a custom layer with torch layers.
self.train()
logs = {}
for begin_step, end_step, data in epoch_iterator:
# Callbacks
callbacks.on_train_batch_begin(begin_step)
logs = self.train_function(data)
# Callbacks
callbacks.on_train_batch_end(end_step, logs)
if self.stop_training:
break
# Override with model metrics instead of last step logs if needed.
epoch_logs = dict(self._get_metrics_result_or_logs(logs))
# Switch the torch Module back to testing mode.
self.eval()
# Run validation.
if validation_data is not None and self._should_eval(
epoch, validation_freq
):
# Create TorchEpochIterator for evaluation and cache it.
if getattr(self, "_eval_epoch_iterator", None) is None:
self._eval_epoch_iterator = TorchEpochIterator(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
steps_per_execution=self.steps_per_execution,
steps_per_epoch=validation_steps,
shuffle=False,
)
val_logs = self.evaluate(
x=val_x,
y=val_y,
sample_weight=val_sample_weight,
batch_size=validation_batch_size or batch_size,
steps=validation_steps,
callbacks=callbacks,
return_dict=True,
_use_cached_eval_dataset=True,
)
val_logs = {
f"val_{name}": val for name, val in val_logs.items()
}
epoch_logs.update(val_logs)
callbacks.on_epoch_end(epoch, epoch_logs)
training_logs = epoch_logs
if self.stop_training:
break
if (
isinstance(self.optimizer, optimizers_module.Optimizer)
and epochs > 0
):
self.optimizer.finalize_variable_values(self.trainable_weights)
# If _eval_epoch_iterator exists, delete it after all epochs are done.
if getattr(self, "_eval_epoch_iterator", None) is not None:
del self._eval_epoch_iterator
callbacks.on_train_end(logs=training_logs)
return self.history
@traceback_utils.filter_traceback
def evaluate(
self,
x=None,
y=None,
batch_size=None,
verbose="auto",
sample_weight=None,
steps=None,
callbacks=None,
return_dict=False,
**kwargs,
):
# TODO: respect compiled trainable state
use_cached_eval_dataset = kwargs.pop("_use_cached_eval_dataset", False)
if kwargs:
raise ValueError(f"Arguments not recognized: {kwargs}")
if use_cached_eval_dataset:
epoch_iterator = self._eval_epoch_iterator
else:
# Create an iterator that yields batches of input/target data.
epoch_iterator = TorchEpochIterator(
x=x,
y=y,
sample_weight=sample_weight,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
steps_per_execution=self.steps_per_execution,
)
self._symbolic_build(iterator=epoch_iterator)
epoch_iterator.reset()
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
# Switch the torch Module back to testing mode.
self.eval()
self.make_test_function()
self.stop_evaluating = False
callbacks.on_test_begin()
logs = {}
self.reset_metrics()
for begin_step, end_step, data in epoch_iterator:
callbacks.on_test_batch_begin(begin_step)
logs = self.test_function(data)
callbacks.on_test_batch_end(end_step, logs)
if self.stop_evaluating:
break
logs = self._get_metrics_result_or_logs(logs)
callbacks.on_test_end(logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
@traceback_utils.filter_traceback
def predict(
self, x, batch_size=None, verbose="auto", steps=None, callbacks=None
):
# Create an iterator that yields batches of input data.
epoch_iterator = TorchEpochIterator(
x=x,
batch_size=batch_size,
steps_per_epoch=steps,
shuffle=False,
steps_per_execution=self.steps_per_execution,
)
# Container that configures and calls callbacks.
if not isinstance(callbacks, callbacks_module.CallbackList):
callbacks = callbacks_module.CallbackList(
callbacks,
add_progbar=verbose != 0,
verbose=verbose,
epochs=1,
steps=epoch_iterator.num_batches,
model=self,
)
def append_to_outputs(batch_outputs, outputs):
if outputs is None:
outputs = tree.map_structure(
lambda batch_output: [batch_output],
batch_outputs,
)
else:
tree.map_structure_up_to(
batch_outputs,
lambda output, batch_output: output.append(batch_output),
outputs,
batch_outputs,
)
return outputs
# Switch the torch Module back to testing mode.
self.eval()
self.make_predict_function()
self.stop_predicting = False
callbacks.on_predict_begin()
outputs = None
for begin_step, end_step, data in epoch_iterator:
callbacks.on_predict_batch_begin(begin_step)
batch_outputs = self.predict_function(data)
outputs = append_to_outputs(batch_outputs, outputs)
callbacks.on_predict_batch_end(end_step, {"outputs": batch_outputs})
if self.stop_predicting:
break
callbacks.on_predict_end()
outputs = tree.map_structure(backend.convert_to_numpy, outputs)
return tree.map_structure_up_to(batch_outputs, np.concatenate, outputs)
def train_on_batch(
self,
x,
y=None,
sample_weight=None,
class_weight=None,
return_dict=False,
):
self._assert_compile_called("train_on_batch")
if class_weight is not None:
if sample_weight is not None:
raise ValueError(
"Arguments `sample_weight` and `class_weight` "
"cannot be specified at the same time. "
f"Received: sample_weight={sample_weight}, "
f"class_weight={class_weight}"
)
sample_weight = data_adapter_utils.class_weight_to_sample_weights(
y, class_weight
)
data = (x, y, sample_weight)
# Maybe build model
self._symbolic_build(data_batch=data)
self.make_train_function()
logs = self.train_function([data])
logs = tree.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def test_on_batch(
self,
x,
y=None,
sample_weight=None,
return_dict=False,
):
self._assert_compile_called("test_on_batch")
data = (x, y, sample_weight)
# Maybe build model
self._symbolic_build(data_batch=data)
self.make_test_function()
logs = self.test_function([data])
logs = tree.map_structure(lambda x: np.array(x), logs)
if return_dict:
return logs
return self._flatten_metrics_in_order(logs)
def predict_on_batch(self, x):
self.make_predict_function()
batch_outputs = self.predict_function([(x,)])
batch_outputs = tree.map_structure(
backend.convert_to_numpy, batch_outputs
)
return batch_outputs
class TorchEpochIterator(EpochIterator):
def _get_iterator(self):
return self.data_adapter.get_torch_dataloader()
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/random.py | keras/src/backend/torch/random.py | import torch
import torch._dynamo as dynamo
import torch.nn.functional as tnn
from keras.src.backend.config import floatx
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import get_device
from keras.src.backend.torch.core import to_torch_dtype
from keras.src.random.seed_generator import SeedGenerator
from keras.src.random.seed_generator import draw_seed
from keras.src.random.seed_generator import make_default_seed
# torch.Generator not supported with dynamo
# see: https://github.com/pytorch/pytorch/issues/88576
@dynamo.disable()
def torch_seed_generator(seed):
first_seed, second_seed = draw_seed(seed)
device = get_device()
if device == "meta":
# Generator is not supported by the meta device.
return None
generator = torch.Generator(device=get_device())
generator.manual_seed(int(first_seed + second_seed))
return generator
def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
dtype = to_torch_dtype(dtype)
# Do not use generator during symbolic execution.
if get_device() == "meta":
return torch.normal(
mean, stddev, size=shape, dtype=dtype, device=get_device()
)
generator = torch_seed_generator(seed)
return torch.normal(
mean,
stddev,
size=shape,
generator=generator,
dtype=dtype,
device=get_device(),
)
def categorical(logits, num_samples, dtype="int32", seed=None):
logits = convert_to_tensor(logits)
dtype = to_torch_dtype(dtype)
probs = torch.softmax(logits, dim=-1)
# Do not use generator during symbolic execution.
if get_device() == "meta":
return torch.multinomial(
probs,
num_samples,
replacement=True,
).type(dtype)
generator = torch_seed_generator(seed)
return torch.multinomial(
probs,
num_samples,
replacement=True,
generator=generator,
).type(dtype)
def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
dtype = to_torch_dtype(dtype)
requested_shape = shape
if len(requested_shape) == 0:
shape = (1,)
# Do not use generator during symbolic execution.
if get_device() == "meta":
rand_tensor = torch.rand(size=shape, dtype=dtype, device=get_device())
else:
generator = torch_seed_generator(seed)
rand_tensor = torch.rand(
size=shape, generator=generator, dtype=dtype, device=get_device()
)
output = (maxval - minval) * rand_tensor + minval
if len(requested_shape) == 0:
return output[0]
return output
def randint(shape, minval, maxval, dtype="int32", seed=None):
dtype = to_torch_dtype(dtype)
# Do not use generator during symbolic execution.
if get_device() == "meta":
return torch.randint(
low=minval,
high=maxval,
size=shape,
dtype=dtype,
device=get_device(),
)
generator = torch_seed_generator(seed)
return torch.randint(
low=minval,
high=maxval,
size=shape,
generator=generator,
dtype=dtype,
device=get_device(),
)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = to_torch_dtype(dtype)
# Take a larger standard normal dist, discard values outside 2 * stddev
# Offset by mean and stddev
x = normal(tuple(shape) + (4,), mean=0, stddev=1, dtype=dtype, seed=seed)
valid = (x > -2) & (x < 2)
indexes = valid.max(-1, keepdim=True)[1]
trunc_x = torch.empty(shape, dtype=dtype, device=get_device())
trunc_x.data.copy_(x.gather(-1, indexes).squeeze(-1))
trunc_x.data.mul_(stddev).add_(mean)
return trunc_x
def _get_concrete_noise_shape(inputs, noise_shape):
if noise_shape is None:
return inputs.shape
concrete_inputs_shape = inputs.shape
concrete_noise_shape = []
for i, value in enumerate(noise_shape):
concrete_noise_shape.append(
concrete_inputs_shape[i] if value is None else value
)
return concrete_noise_shape
def dropout(inputs, rate, noise_shape=None, seed=None):
if (
seed is not None
and not (isinstance(seed, SeedGenerator) and seed._initial_seed is None)
or noise_shape is not None
):
keep_prob = 1.0 - rate
noise_shape = _get_concrete_noise_shape(inputs, noise_shape)
keep_prob_matrix = torch.full(
noise_shape, keep_prob, device=get_device()
)
generator = torch_seed_generator(seed)
# Do not use generator during symbolic execution.
if get_device() == "meta":
mask = torch.bernoulli(keep_prob_matrix)
else:
mask = torch.bernoulli(keep_prob_matrix, generator=generator)
mask = mask.bool()
mask = torch.broadcast_to(mask, inputs.shape)
return torch.where(
mask,
inputs / keep_prob,
torch.zeros_like(inputs, dtype=inputs.dtype),
)
# Fast path, unseeded (since torch doesn't support seeding dropout!!!!)
# Using the above implementation is possible, but much slower.
return torch.nn.functional.dropout(
inputs, p=rate, training=True, inplace=False
)
def shuffle(x, axis=0, seed=None):
# Ref: https://github.com/pytorch/pytorch/issues/71409
x = convert_to_tensor(x)
# Get permutation indices
# Do not use generator during symbolic execution.
if get_device() == "meta":
row_perm = torch.rand(x.shape[: axis + 1], device=get_device()).argsort(
axis
)
else:
generator = torch_seed_generator(seed)
row_perm = torch.rand(
x.shape[: axis + 1], generator=generator, device=get_device()
).argsort(axis)
for _ in range(x.ndim - axis - 1):
row_perm.unsqueeze_(-1)
# Reformat this for the gather operation
row_perm = row_perm.repeat(
*[1 for _ in range(axis + 1)], *(x.shape[axis + 1 :])
)
return x.gather(axis, row_perm)
def gamma(shape, alpha, dtype=None, seed=None):
dtype = dtype or floatx()
dtype = to_torch_dtype(dtype)
alpha = torch.broadcast_to(convert_to_tensor(alpha), shape)
beta = torch.ones(shape, device=get_device())
prev_rng_state = torch.random.get_rng_state()
# Do not draw seed during symbolic execution
if not get_device() == "meta":
first_seed, second_seed = draw_seed(seed)
torch.manual_seed(first_seed + second_seed)
gamma_distribution = torch.distributions.gamma.Gamma(alpha, beta)
sample = gamma_distribution.sample().type(dtype)
torch.random.set_rng_state(prev_rng_state)
return sample
def binomial(shape, counts, probabilities, dtype=None, seed=None):
dtype = dtype or floatx()
dtype = to_torch_dtype(dtype)
counts = torch.broadcast_to(convert_to_tensor(counts), shape)
probabilities = torch.broadcast_to(convert_to_tensor(probabilities), shape)
prev_rng_state = torch.random.get_rng_state()
# Do not draw seed during symbolic execution
if not get_device() == "meta":
first_seed, second_seed = draw_seed(seed)
torch.manual_seed(first_seed + second_seed)
binomial_distribution = torch.distributions.binomial.Binomial(
total_count=counts, probs=probabilities
)
sample = binomial_distribution.sample().type(dtype)
torch.random.set_rng_state(prev_rng_state)
return sample
def beta(shape, alpha, beta, dtype=None, seed=None):
dtype = dtype or floatx()
dtype = to_torch_dtype(dtype)
alpha = torch.broadcast_to(convert_to_tensor(alpha), shape)
beta = torch.broadcast_to(convert_to_tensor(beta), shape)
prev_rng_state = torch.random.get_rng_state()
# Do not draw seed during symbolic execution
if not get_device() == "meta":
first_seed, second_seed = draw_seed(seed)
torch.manual_seed(first_seed + second_seed)
beta_distribution = torch.distributions.beta.Beta(
concentration1=alpha, concentration0=beta
)
sample = beta_distribution.sample().type(dtype)
torch.random.set_rng_state(prev_rng_state)
return sample
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/__init__.py | keras/src/backend/torch/__init__.py | """Torch backend APIs.
# Note on device placement
Torch has a different device placement style compared to TF and JAX.
In short, variables/tensors are not created on GPU by default,
and the GPU cannot directly communicate with the CPU.
To bring Torch behavior in line with TF and JAX automated device placement,
we are doing the following to automate device placement if a GPU is available:
- Variables are created on GPU.
- Input data will be placed on GPU at the first `keras.layers.Layer` call.
- Tensor creation happens on GPU, e.g., `zeros()` will create a tensor on GPU.
- `convert_to_numpy` will bring the tensor to CPU before converting it to NumPy.
"""
from keras.src.backend.common.name_scope import name_scope
from keras.src.backend.torch import core
from keras.src.backend.torch import image
from keras.src.backend.torch import linalg
from keras.src.backend.torch import math
from keras.src.backend.torch import nn
from keras.src.backend.torch import numpy
from keras.src.backend.torch import random
from keras.src.backend.torch.core import IS_THREAD_SAFE
from keras.src.backend.torch.core import SUPPORTS_RAGGED_TENSORS
from keras.src.backend.torch.core import SUPPORTS_SPARSE_TENSORS
from keras.src.backend.torch.core import Variable
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import compute_output_spec
from keras.src.backend.torch.core import cond
from keras.src.backend.torch.core import convert_to_numpy
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import device_scope
from keras.src.backend.torch.core import is_tensor
from keras.src.backend.torch.core import random_seed_dtype
from keras.src.backend.torch.core import scatter
from keras.src.backend.torch.core import shape
from keras.src.backend.torch.core import stop_gradient
from keras.src.backend.torch.core import to_torch_dtype
from keras.src.backend.torch.core import vectorized_map
from keras.src.backend.torch.rnn import cudnn_ok
from keras.src.backend.torch.rnn import gru
from keras.src.backend.torch.rnn import lstm
from keras.src.backend.torch.rnn import rnn
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/math.py | keras/src/backend/torch/math.py | import math
import torch
from keras.src.backend import config
from keras.src.backend import standardize_dtype
from keras.src.backend.common import dtypes
from keras.src.backend.torch.core import cast
from keras.src.backend.torch.core import convert_to_tensor
from keras.src.backend.torch.core import get_device
from keras.src.backend.torch.numpy import pad
def _segment_reduction_fn(data, segment_ids, reduction_method, num_segments):
num_repeats = torch.prod(
torch.tensor(data.shape[1:], device=get_device())
).long()
# To use `scatter_add` in torch, we need to replicate `segment_ids` into the
# shape of `data`.
segment_ids = (
segment_ids.repeat_interleave(num_repeats)
.view(*data.shape)
.type(torch.int64)
)
num_segments = num_segments or len(torch.unique(segment_ids))
# .scatter_add does not support -1 in the indices.
# Add all out-of-bound indices value to an extra dimension after
# num_segments, which is removed before returning the result.
# Replacing the out-of-bound indices.
segment_ids = torch.where(segment_ids >= 0, segment_ids, num_segments)
segment_ids = torch.where(
segment_ids < num_segments, segment_ids, num_segments
)
# Add one more dimension to the result shape with the "+1".
shape = (num_segments + 1,) + tuple(data.shape[1:])
if reduction_method == "amax":
result = torch.ones(*shape, device=get_device()) * -float("Inf")
else:
result = torch.zeros(*shape, device=get_device())
result = result.scatter_reduce(
0, segment_ids, data.float(), reduction_method
)
# Removing the extra dimension.
result = result[:-1, ...]
return result.type(data.dtype)
def segment_sum(data, segment_ids, num_segments=None, sorted=False):
data = convert_to_tensor(data)
segment_ids = convert_to_tensor(segment_ids)
return _segment_reduction_fn(data, segment_ids, "sum", num_segments)
def segment_max(data, segment_ids, num_segments=None, sorted=False):
data = convert_to_tensor(data)
segment_ids = convert_to_tensor(segment_ids)
return _segment_reduction_fn(data, segment_ids, "amax", num_segments)
def top_k(x, k, sorted=True):
x = convert_to_tensor(x)
return torch.topk(x, k, sorted=sorted)
def in_top_k(targets, predictions, k):
targets = convert_to_tensor(targets).type(torch.int64)
targets = targets[:, None]
predictions = convert_to_tensor(predictions)
topk_values = top_k(predictions, k).values
targets_values = torch.take_along_dim(predictions, targets, dim=-1)
mask = targets_values >= topk_values
return torch.any(mask, axis=-1)
def logsumexp(x, axis=None, keepdims=False):
x = convert_to_tensor(x)
axis = tuple(range(x.dim())) if axis is None else axis
return torch.logsumexp(x, dim=axis, keepdim=keepdims)
def qr(x, mode="reduced"):
x = convert_to_tensor(x)
if mode not in {"reduced", "complete"}:
raise ValueError(
"`mode` argument value not supported. "
"Expected one of {'reduced', 'complete'}. "
f"Received: mode={mode}"
)
x = convert_to_tensor(x)
return torch.linalg.qr(x, mode=mode)
def extract_sequences(x, sequence_length, sequence_stride):
x = convert_to_tensor(x)
return torch.unfold_copy(
x, dimension=-1, size=sequence_length, step=sequence_stride
)
def _overlap_sequences(x, sequence_stride):
# Ref: https://github.com/google/jax/blob/main/jax/_src/scipy/signal.py
x = convert_to_tensor(x)
*batch_shape, num_sequences, sequence_length = x.shape
if sequence_stride > sequence_length:
raise ValueError(
"`sequence_stride` must equal or less than x.shape[-1]. "
f"Received: sequence_stride={sequence_stride}, "
f"x.shape[-1]={sequence_length}"
)
if sequence_stride < (sequence_length / num_sequences):
raise ValueError(
"`sequence_stride` must equal or greater than "
"x.shape[-1] / x.shape[-2]. "
f"Received: sequence_stride={sequence_stride}, "
f"x.shape[-1]={sequence_length}, x.shape[-2]={num_sequences}"
)
flat_batchsize = math.prod(batch_shape)
x = torch.reshape(x, (flat_batchsize, num_sequences, sequence_length))
output_size = sequence_stride * (num_sequences - 1) + sequence_length
nstep_per_segment = 1 + (sequence_length - 1) // sequence_stride
# Here, we use shorter notation for axes.
# B: batch_size, N: num_sequences, S: nstep_per_segment,
# T: sequence_length divided by S
padded_segment_len = nstep_per_segment * sequence_stride
x = torch.nn.functional.pad(
x, (0, padded_segment_len - sequence_length, 0, 0, 0, 0)
)
x = torch.reshape(
x, (flat_batchsize, num_sequences, nstep_per_segment, sequence_stride)
)
# For obtaining shifted signals, this routine reinterprets flattened array
# with a shrinked axis. With appropriate truncation/ padding, this
# operation pushes the last padded elements of the previous row to the head
# of the current row.
# See implementation of `overlap_and_add` in Tensorflow for details.
x = torch.permute(x, (0, 2, 1, 3)) # x: (B, S, N, T)
x = torch.nn.functional.pad(x, (0, 0, 0, num_sequences, 0, 0, 0, 0))
# x: (B, S, N*2, T)
shrinked = x.shape[2] - 1
x = torch.reshape(x, (flat_batchsize, -1))
x = x[:, : (nstep_per_segment * shrinked * sequence_stride)]
x = torch.reshape(
x, (flat_batchsize, nstep_per_segment, shrinked * sequence_stride)
)
# Finally, sum shifted segments, and truncate results to the output_size.
x = torch.sum(x, dim=1)[:, :output_size]
return torch.reshape(x, tuple(batch_shape) + (-1,))
def _get_complex_tensor_from_tuple(x):
if not isinstance(x, (tuple, list)) or len(x) != 2:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and imaginary."
f"Received: x={x}"
)
# `convert_to_tensor` does not support passing complex tensors. We separate
# the input out into real and imaginary and convert them separately.
real, imag = x
real = convert_to_tensor(real)
imag = convert_to_tensor(imag)
# Check shape.
if real.shape != imag.shape:
raise ValueError(
"Input `x` should be a tuple of two tensors - real and imaginary."
"Both the real and imaginary parts should have the same shape. "
f"Received: x[0].shape = {real.shape}, x[1].shape = {imag.shape}"
)
# Ensure dtype is float.
if not torch.is_floating_point(real) or not torch.is_floating_point(imag):
raise ValueError(
"At least one tensor in input `x` is not of type float."
f"Received: x={x}."
)
complex_input = torch.complex(real, imag)
return complex_input
def fft(x):
complex_input = _get_complex_tensor_from_tuple(x)
complex_output = torch.fft.fft(complex_input)
return complex_output.real, complex_output.imag
def fft2(x):
complex_input = _get_complex_tensor_from_tuple(x)
complex_output = torch.fft.fft2(complex_input)
return complex_output.real, complex_output.imag
def ifft2(x):
complex_input = _get_complex_tensor_from_tuple(x)
complex_output = torch.fft.ifft2(complex_input)
return complex_output.real, complex_output.imag
def rfft(x, fft_length=None):
x = convert_to_tensor(x)
complex_output = torch.fft.rfft(x, n=fft_length, dim=-1, norm="backward")
return complex_output.real, complex_output.imag
def irfft(x, fft_length=None):
complex_input = _get_complex_tensor_from_tuple(x)
return torch.fft.irfft(complex_input, n=fft_length, dim=-1, norm="backward")
def stft(
x, sequence_length, sequence_stride, fft_length, window="hann", center=True
):
if standardize_dtype(x.dtype) not in {"float32", "float64"}:
raise TypeError(
"Invalid input type. Expected `float32` or `float64`. "
f"Received: input type={x.dtype}"
)
if fft_length < sequence_length:
raise ValueError(
"`fft_length` must equal or larger than `sequence_length`. "
f"Received: sequence_length={sequence_length}, "
f"fft_length={fft_length}"
)
if isinstance(window, str):
if window not in {"hann", "hamming"}:
raise ValueError(
"If a string is passed to `window`, it must be one of "
f'`"hann"`, `"hamming"`. Received: window={window}'
)
x = convert_to_tensor(x)
if window is not None:
if isinstance(window, str):
if window == "hann":
win = torch.hann_window(
sequence_length,
periodic=True,
dtype=x.dtype,
device=get_device(),
)
else:
win = torch.hamming_window(
sequence_length,
periodic=True,
dtype=x.dtype,
device=get_device(),
)
else:
win = convert_to_tensor(window, dtype=x.dtype)
if len(win.shape) != 1 or win.shape[-1] != sequence_length:
raise ValueError(
"The shape of `window` must be equal to [sequence_length]."
f"Received: window shape={win.shape}"
)
else:
win = torch.ones((sequence_length,), dtype=x.dtype, device=get_device())
need_unpack = False
*batch_shape, samples = x.shape
if len(x.shape) > 2:
need_unpack = True
flat_batchsize = math.prod(batch_shape)
x = torch.reshape(x, (flat_batchsize, samples))
x = torch.stft(
x,
n_fft=fft_length,
hop_length=sequence_stride,
win_length=sequence_length,
window=win,
center=center,
return_complex=True,
)
if need_unpack:
fft_unique_bins, num_sequences = x.shape[-2:]
x = torch.reshape(x, (*batch_shape, fft_unique_bins, num_sequences))
x = torch.swapaxes(x, -2, -1)
return x.real, x.imag
def istft(
x,
sequence_length,
sequence_stride,
fft_length,
length=None,
window="hann",
center=True,
):
complex_input = _get_complex_tensor_from_tuple(x)
dtype = complex_input.real.dtype
win = None
if window is not None:
if isinstance(window, str):
if window == "hann":
win = torch.hann_window(
sequence_length,
periodic=True,
dtype=dtype,
device=get_device(),
)
else:
win = torch.hamming_window(
sequence_length,
periodic=True,
dtype=dtype,
device=get_device(),
)
else:
win = convert_to_tensor(window, dtype=dtype)
if len(win.shape) != 1 or win.shape[-1] != sequence_length:
raise ValueError(
"The shape of `window` must be equal to [sequence_length]."
f"Received: window shape={win.shape}"
)
if sequence_length == fft_length and center is True and win is not None:
# can be fallen back to torch.istft
need_unpack = False
*batch_shape, num_sequences, fft_unique_bins = complex_input.shape
if len(complex_input.shape) > 3:
need_unpack = True
flat_batchsize = math.prod(batch_shape)
complex_input = torch.reshape(
complex_input, (flat_batchsize, num_sequences, fft_unique_bins)
)
complex_input = torch.swapaxes(complex_input, -2, -1)
x = torch.istft(
complex_input,
n_fft=fft_length,
hop_length=sequence_stride,
win_length=sequence_length,
window=win,
center=center,
length=length,
return_complex=False,
)
if need_unpack:
samples = x.shape[-1]
x = torch.reshape(x, (*batch_shape, samples))
return x
# custom implementation with irfft and _overlap_sequences
# references:
# torch: aten/src/ATen/native/SpectralOps.cpp
# tf: tf.signal.inverse_stft_window_fn
x = irfft(x, fft_length)
expected_output_len = fft_length + sequence_stride * (x.shape[-2] - 1)
if win is not None:
l_pad = (fft_length - sequence_length) // 2
r_pad = fft_length - sequence_length - l_pad
win = pad(win, [[l_pad, r_pad]], "constant")
# square and sum
_sequence_length = sequence_length + l_pad + r_pad
denom = torch.square(win)
overlaps = -(-_sequence_length // sequence_stride)
denom = pad(denom, [(0, overlaps * sequence_stride - _sequence_length)])
denom = torch.reshape(denom, [overlaps, sequence_stride])
denom = torch.sum(denom, 0, keepdims=True)
denom = torch.tile(denom, [overlaps, 1])
denom = torch.reshape(denom, [overlaps * sequence_stride])
win = torch.divide(win, denom[:_sequence_length])
x = torch.multiply(x, win)
x = _overlap_sequences(x, sequence_stride)
start = 0 if center is False else fft_length // 2
if length is not None:
end = start + length
elif center is True:
end = -(fft_length // 2)
else:
end = expected_output_len
return x[..., start:end]
def rsqrt(x):
x = convert_to_tensor(x)
return torch.rsqrt(x)
def erf(x):
x = convert_to_tensor(x)
return torch.erf(x)
def erfinv(x):
x = convert_to_tensor(x)
return torch.erfinv(x)
def solve(a, b):
a = convert_to_tensor(a)
b = convert_to_tensor(b)
return torch.linalg.solve(a, b)
def norm(x, ord=None, axis=None, keepdims=False):
x = convert_to_tensor(x)
if standardize_dtype(x.dtype) == "int64":
dtype = config.floatx()
else:
dtype = dtypes.result_type(x.dtype, float)
x = cast(x, dtype)
return torch.linalg.norm(x, ord=ord, dim=axis, keepdim=keepdims)
def logdet(x):
x = convert_to_tensor(x)
return torch.logdet(x)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/optimizers/torch_lion.py | keras/src/backend/torch/optimizers/torch_lion.py | import torch
from keras.src import ops
from keras.src import optimizers
from keras.src.backend.torch.optimizers import torch_parallel_optimizer
class Lion(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Lion):
def _parallel_update_step(
self,
grads,
variables,
learning_rate,
):
keras_variables = variables
variables = [v.value for v in variables]
dtype = variables[0].dtype
lr = ops.cast(learning_rate, dtype)
m_list = [
self._momentums[self._get_variable_index(variable)].value
for variable in keras_variables
]
c_t = torch._foreach_mul(m_list, self.beta_1)
torch._foreach_add_(c_t, grads, alpha=1 - self.beta_1)
c_t = [c.sign() for c in c_t]
torch._foreach_add_(
variables,
torch._foreach_mul(c_t, lr),
alpha=-1,
)
torch._foreach_mul_(m_list, self.beta_2)
torch._foreach_add_(m_list, grads, alpha=1 - self.beta_2)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/optimizers/torch_nadam.py | keras/src/backend/torch/optimizers/torch_nadam.py | import torch
from keras.src import ops
from keras.src import optimizers
from keras.src.backend.torch import core
from keras.src.backend.torch.optimizers import torch_parallel_optimizer
class Nadam(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Nadam):
def _parallel_update_step(
self,
grads,
variables,
learning_rate,
):
keras_variables = variables
variables = [v.value for v in variables]
dtype = variables[0].dtype
lr = ops.cast(learning_rate, dtype)
local_step = ops.cast(self.iterations + 1, dtype)
next_step = ops.cast(self.iterations + 2, dtype)
decay = ops.cast(0.96, dtype)
beta_1 = ops.cast(self.beta_1, dtype)
beta_2 = ops.cast(self.beta_2, dtype)
u_t = beta_1 * (1.0 - 0.5 * (ops.power(decay, local_step)))
u_t_1 = beta_1 * (1.0 - 0.5 * (ops.power(decay, next_step)))
u_product_t = self._u_product.value * u_t
u_product_t_1 = u_product_t * u_t_1
beta_2_power = ops.power(beta_2, local_step)
self._u_product.assign(u_product_t)
m_list = [
self._momentums[self._get_variable_index(variable)].value
for variable in keras_variables
]
v_list = [
self._velocities[self._get_variable_index(variable)].value
for variable in keras_variables
]
torch._foreach_mul_(m_list, self.beta_1)
torch._foreach_add_(m_list, grads, alpha=1 - self.beta_1)
torch._foreach_mul_(v_list, self.beta_2)
torch._foreach_add_(
v_list, torch._foreach_mul(grads, grads), alpha=1 - self.beta_2
)
m_hat_list = torch._foreach_add(
torch._foreach_div(
torch._foreach_mul(m_list, u_t_1),
1 - core.convert_to_numpy(u_product_t_1),
),
torch._foreach_div(
torch._foreach_mul(grads, 1 - u_t),
1 - core.convert_to_numpy(u_product_t),
),
)
v_hat_list = torch._foreach_div(v_list, 1 - beta_2_power)
torch._foreach_add_(
variables,
torch._foreach_div(
torch._foreach_mul(m_hat_list, lr),
torch._foreach_add(
torch._foreach_sqrt(v_hat_list), self.epsilon
),
),
alpha=-1,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/optimizers/torch_adamax.py | keras/src/backend/torch/optimizers/torch_adamax.py | import torch
from keras.src import ops
from keras.src import optimizers
from keras.src.backend.torch.optimizers import torch_parallel_optimizer
class Adamax(
torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adamax
):
def _parallel_update_step(
self,
grads,
variables,
learning_rate,
):
keras_variables = variables
variables = [v.value for v in variables]
dtype = variables[0].dtype
lr = ops.cast(learning_rate, dtype)
local_step = ops.cast(self.iterations + 1, dtype)
beta_1_power = ops.power(ops.cast(self.beta_1, dtype), local_step)
m_list = [
self._m[self._get_variable_index(variable)].value
for variable in keras_variables
]
u_list = [
self._u[self._get_variable_index(variable)].value
for variable in keras_variables
]
torch._foreach_mul_(m_list, self.beta_1)
torch._foreach_add_(m_list, grads, alpha=1 - self.beta_1)
torch._foreach_mul_(u_list, self.beta_2)
torch._foreach_maximum_(u_list, torch._foreach_abs(grads))
torch._foreach_add_(
variables,
torch._foreach_div(
torch._foreach_mul(m_list, lr),
torch._foreach_mul(
torch._foreach_add(u_list, self.epsilon),
1 - beta_1_power,
),
),
alpha=-1,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/optimizers/torch_rmsprop.py | keras/src/backend/torch/optimizers/torch_rmsprop.py | import torch
from keras.src import ops
from keras.src import optimizers
from keras.src.backend.torch.optimizers import torch_parallel_optimizer
class RMSprop(
torch_parallel_optimizer.TorchParallelOptimizer, optimizers.RMSprop
):
def _parallel_update_step(
self,
grads,
variables,
learning_rate,
):
keras_variables = variables
variables = [v.value for v in variables]
dtype = variables[0].dtype
lr = ops.cast(learning_rate, dtype)
velocities = [
self._velocities[self._get_variable_index(variable)].value
for variable in keras_variables
]
rho = self.rho
torch._foreach_mul_(velocities, rho)
torch._foreach_add_(
velocities, torch._foreach_mul(grads, grads), alpha=1 - rho
)
denominators = torch._foreach_add(velocities, self.epsilon)
if self.centered:
average_grads = [
self._average_gradients[
self._get_variable_index(variable)
].value
for variable in keras_variables
]
torch._foreach_mul_(average_grads, rho)
torch._foreach_add_(average_grads, grads, alpha=1 - rho)
torch._foreach_add_(
denominators,
torch._foreach_mul(average_grads, average_grads),
alpha=-1,
)
torch._foreach_sqrt_(denominators)
increments = torch._foreach_div(
torch._foreach_mul(grads, lr), denominators
)
if self.momentum > 0:
momentum_list = [
self._momentums[self._get_variable_index(variable)].value
for variable in keras_variables
]
torch._foreach_mul_(momentum_list, self.momentum)
torch._foreach_add_(momentum_list, increments)
torch._foreach_add_(variables, momentum_list, alpha=-1)
else:
torch._foreach_add_(variables, increments, alpha=-1)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/optimizers/torch_sgd.py | keras/src/backend/torch/optimizers/torch_sgd.py | import torch
from keras.src import optimizers
from keras.src.backend.torch.optimizers import torch_parallel_optimizer
class SGD(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.SGD):
def _parallel_update_step(
self,
grads,
variables,
learning_rate,
):
keras_variables = variables
variables = [v.value for v in variables]
if self.momentum != 0:
bufs = [
self.momentums[self._get_variable_index(variable)].value
for variable in keras_variables
]
for i in range(len(bufs)):
if bufs[i] is None:
bufs[i] = torch.clone(grads[i]).detach()
torch._foreach_mul_(bufs, self.momentum)
torch._foreach_add_(bufs, grads, alpha=-learning_rate)
if self.nesterov:
torch._foreach_add_(variables, grads, alpha=-learning_rate)
torch._foreach_add_(variables, bufs, alpha=self.momentum)
else:
torch._foreach_add_(variables, bufs)
else:
torch._foreach_add_(variables, grads, alpha=-learning_rate)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/optimizers/torch_optimizer.py | keras/src/backend/torch/optimizers/torch_optimizer.py | import torch
from keras.src import optimizers
from keras.src.optimizers.base_optimizer import BaseOptimizer
from keras.src.utils import torch_utils
class TorchOptimizer(BaseOptimizer):
def __new__(cls, *args, **kwargs):
# Import locally to avoid circular imports.
from keras.src.backend.torch.optimizers import torch_adadelta
from keras.src.backend.torch.optimizers import torch_adagrad
from keras.src.backend.torch.optimizers import torch_adam
from keras.src.backend.torch.optimizers import torch_adamax
from keras.src.backend.torch.optimizers import torch_adamw
from keras.src.backend.torch.optimizers import torch_lion
from keras.src.backend.torch.optimizers import torch_nadam
from keras.src.backend.torch.optimizers import torch_rmsprop
from keras.src.backend.torch.optimizers import torch_sgd
OPTIMIZERS = {
optimizers.Adadelta: torch_adadelta.Adadelta,
optimizers.Adagrad: torch_adagrad.Adagrad,
optimizers.Adam: torch_adam.Adam,
optimizers.Adamax: torch_adamax.Adamax,
optimizers.AdamW: torch_adamw.AdamW,
optimizers.Lion: torch_lion.Lion,
optimizers.Nadam: torch_nadam.Nadam,
optimizers.RMSprop: torch_rmsprop.RMSprop,
optimizers.SGD: torch_sgd.SGD,
}
if cls in OPTIMIZERS:
return OPTIMIZERS[cls](*args, **kwargs)
return super().__new__(cls)
@torch_utils.no_grad
def _apply_weight_decay(self, variables):
if self.weight_decay is None:
return
torch._foreach_mul_(
[v.value for v in variables if self._use_weight_decay(v)],
1 - self.weight_decay * self._get_current_learning_rate(),
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/optimizers/torch_adagrad.py | keras/src/backend/torch/optimizers/torch_adagrad.py | import torch
from keras.src import ops
from keras.src import optimizers
from keras.src.backend.torch.optimizers import torch_parallel_optimizer
class Adagrad(
torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adagrad
):
def _parallel_update_step(
self,
grads,
variables,
learning_rate,
):
keras_variables = variables
variables = [v.value for v in variables]
dtype = variables[0].dtype
lr = ops.cast(learning_rate, dtype)
accumulators = [
self._accumulators[self._get_variable_index(variable)].value
for variable in keras_variables
]
torch._foreach_add_(accumulators, torch._foreach_mul(grads, grads))
torch._foreach_add_(
variables,
torch._foreach_div(
torch._foreach_mul(grads, lr),
torch._foreach_sqrt(
torch._foreach_add(accumulators, self.epsilon)
),
),
alpha=-1,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/optimizers/torch_adamw.py | keras/src/backend/torch/optimizers/torch_adamw.py | from keras.src import optimizers
from keras.src.backend.torch.optimizers import torch_adam
class AdamW(torch_adam.Adam, optimizers.AdamW):
pass
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/optimizers/torch_adadelta.py | keras/src/backend/torch/optimizers/torch_adadelta.py | import torch
from keras.src import ops
from keras.src import optimizers
from keras.src.backend.torch.optimizers import torch_parallel_optimizer
class Adadelta(
torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adadelta
):
def _parallel_update_step(
self,
grads,
variables,
learning_rate,
):
keras_variables = variables
variables = [v.value for v in variables]
dtype = variables[0].dtype
lr = ops.cast(learning_rate, dtype)
rho = self.rho
accumulated_grads = [
self._accumulated_grads[self._get_variable_index(variable)].value
for variable in keras_variables
]
accumulated_delta_vars = [
self._accumulated_delta_vars[
self._get_variable_index(variable)
].value
for variable in keras_variables
]
torch._foreach_mul_(accumulated_grads, rho)
torch._foreach_add_(
accumulated_grads, torch._foreach_mul(grads, grads), alpha=1 - rho
)
def rms(x):
return torch._foreach_sqrt(torch._foreach_add(x, self.epsilon))
delta_vars = torch._foreach_mul(
torch._foreach_div(
torch._foreach_mul(rms(accumulated_delta_vars), grads),
rms(accumulated_grads),
),
-1,
)
torch._foreach_mul_(accumulated_delta_vars, rho)
torch._foreach_add_(
accumulated_delta_vars,
torch._foreach_mul(delta_vars, delta_vars),
alpha=1 - rho,
)
torch._foreach_add_(variables, delta_vars, alpha=lr)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/optimizers/__init__.py | keras/src/backend/torch/optimizers/__init__.py | from keras.src.backend.torch.optimizers.torch_optimizer import TorchOptimizer
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/optimizers/torch_adam.py | keras/src/backend/torch/optimizers/torch_adam.py | import torch
from keras.src import ops
from keras.src import optimizers
from keras.src.backend.torch.optimizers import torch_parallel_optimizer
class Adam(torch_parallel_optimizer.TorchParallelOptimizer, optimizers.Adam):
def _parallel_update_step(
self,
grads,
variables,
learning_rate,
):
keras_variables = variables
variables = [v.value for v in variables]
dtype = variables[0].dtype
lr = ops.cast(learning_rate, dtype)
local_step = ops.cast(self.iterations + 1, dtype)
beta_1_power = ops.power(ops.cast(self.beta_1, dtype), local_step)
beta_2_power = ops.power(ops.cast(self.beta_2, dtype), local_step)
alpha = lr * ops.sqrt(1 - beta_2_power) / (1 - beta_1_power)
m_list = [
self._momentums[self._get_variable_index(variable)].value
for variable in keras_variables
]
v_list = [
self._velocities[self._get_variable_index(variable)].value
for variable in keras_variables
]
torch._foreach_mul_(m_list, self.beta_1)
torch._foreach_add_(m_list, grads, alpha=1 - self.beta_1)
torch._foreach_mul_(v_list, self.beta_2)
torch._foreach_add_(
v_list, torch._foreach_mul(grads, grads), alpha=1 - self.beta_2
)
if self.amsgrad:
v_hat_list = [
self._velocity_hats[self._get_variable_index(variable)].value
for variable in keras_variables
]
torch._foreach_maximum_(v_hat_list, v_list)
v_list = v_hat_list
torch._foreach_add_(
variables,
torch._foreach_div(
torch._foreach_mul(m_list, alpha),
torch._foreach_add(torch._foreach_sqrt(v_list), self.epsilon),
),
alpha=-1,
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/backend/torch/optimizers/torch_parallel_optimizer.py | keras/src/backend/torch/optimizers/torch_parallel_optimizer.py | import torch
from keras.src.optimizers.base_optimizer import BaseOptimizer
from keras.src.utils import torch_utils
class TorchParallelOptimizer(BaseOptimizer):
@torch_utils.no_grad
def _backend_update_step(self, grads, trainable_variables, learning_rate):
self._parallel_update_step(
grads,
trainable_variables,
learning_rate,
)
@torch_utils.no_grad
def _backend_reset_gradient_accumulators(self):
acc_list = [
v.value for v in self._accumulated_gradients if v is not None
]
torch._foreach_mul_(acc_list, 0.0)
@torch_utils.no_grad
def _backend_increment_gradient_accumulators(self, grads, acc_grads):
acc_list = [v.value for v in acc_grads]
torch._foreach_add_(acc_list, grads, alpha=1.0)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/constraints/constraints_test.py | keras/src/constraints/constraints_test.py | import numpy as np
from keras.src import backend
from keras.src import constraints
from keras.src import testing
def get_example_array():
np.random.seed(3537)
example_array = np.random.random((100, 100)) * 100.0 - 50.0
example_array[0, 0] = 0.0 # Possible edge case
return example_array
class ConstraintsTest(testing.TestCase):
def test_max_norm(self):
constraint_fn = constraints.MaxNorm(2.0)
x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
target = np.array(
[
[0, 0, 0],
[1.0, 0, 0],
[2.0, 0, 0],
[2.0 / np.sqrt(3), 2.0 / np.sqrt(3), 2.0 / np.sqrt(3)],
]
).T
output = constraint_fn(x)
self.assertAllClose(target, output)
def test_non_neg(self):
constraint_fn = constraints.NonNeg()
output = constraint_fn(get_example_array())
output = backend.convert_to_numpy(output)
self.assertTrue((np.min(output, axis=1) >= 0.0).all())
def test_unit_norm(self):
constraint_fn = constraints.UnitNorm()
output = constraint_fn(get_example_array())
output = backend.convert_to_numpy(output)
l2 = np.sqrt(np.sum(np.square(output), axis=0))
self.assertAllClose(l2, 1.0)
def test_min_max_norm(self):
constraint_fn = constraints.MinMaxNorm(min_value=0.2, max_value=0.5)
output = constraint_fn(get_example_array())
output = backend.convert_to_numpy(output)
l2 = np.sqrt(np.sum(np.square(output), axis=0))
self.assertTrue(np.all(l2 >= 0.2))
self.assertTrue(np.all(l2 <= 0.5 + 1e-6))
def test_get_method(self):
obj = constraints.get("unit_norm")
self.assertTrue(obj, constraints.UnitNorm)
obj = constraints.get(None)
self.assertEqual(obj, None)
with self.assertRaises(ValueError):
constraints.get("typo")
def test_default_constraint_call(self):
constraint_fn = constraints.Constraint()
x = np.array([1.0, 2.0, 3.0])
output = constraint_fn(x)
self.assertAllClose(x, output)
def test_constraint_get_config(self):
constraint_fn = constraints.Constraint()
config = constraint_fn.get_config()
self.assertEqual(config, {})
def test_constraint_from_config(self):
constraint_fn = constraints.Constraint()
config = constraint_fn.get_config()
recreated_constraint_fn = constraints.Constraint.from_config(config)
self.assertIsInstance(recreated_constraint_fn, constraints.Constraint)
def test_max_norm_get_config(self):
constraint_fn = constraints.MaxNorm(max_value=3.0, axis=1)
config = constraint_fn.get_config()
expected_config = {"max_value": 3.0, "axis": 1}
self.assertEqual(config, expected_config)
def test_unit_norm_get_config(self):
constraint_fn = constraints.UnitNorm(axis=1)
config = constraint_fn.get_config()
expected_config = {"axis": 1}
self.assertEqual(config, expected_config)
def test_min_max_norm_get_config(self):
constraint_fn = constraints.MinMaxNorm(
min_value=0.5, max_value=2.0, rate=0.7, axis=1
)
config = constraint_fn.get_config()
expected_config = {
"min_value": 0.5,
"max_value": 2.0,
"rate": 0.7,
"axis": 1,
}
self.assertEqual(config, expected_config)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/constraints/constraints.py | keras/src/constraints/constraints.py | from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
@keras_export("keras.constraints.Constraint")
class Constraint:
"""Base class for weight constraints.
A `Constraint` instance works like a stateless function.
Users who subclass this
class should override the `__call__()` method, which takes a single
weight parameter and return a projected version of that parameter
(e.g. normalized or clipped). Constraints can be used with various Keras
layers via the `kernel_constraint` or `bias_constraint` arguments.
Here's a simple example of a non-negative weight constraint:
>>> class NonNegative(keras.constraints.Constraint):
...
... def __call__(self, w):
... return w * ops.cast(ops.greater_equal(w, 0.), dtype=w.dtype)
>>> weight = ops.convert_to_tensor((-1.0, 1.0))
>>> NonNegative()(weight)
[0., 1.]
Usage in a layer:
>>> keras.layers.Dense(4, kernel_constraint=NonNegative())
"""
def __call__(self, w):
"""Applies the constraint to the input weight variable.
By default, the inputs weight variable is not modified.
Users should override this method to implement their own projection
function.
Args:
w: Input weight variable.
Returns:
Projected variable (by default, returns unmodified inputs).
"""
return w
def get_config(self):
"""Returns a Python dict of the object config.
A constraint config is a Python dictionary (JSON-serializable) that can
be used to reinstantiate the same object.
Returns:
Python dict containing the configuration of the constraint object.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates a weight constraint from a configuration dictionary.
Example:
```python
constraint = UnitNorm()
config = constraint.get_config()
constraint = UnitNorm.from_config(config)
```
Args:
config: A Python dictionary, the output of `get_config()`.
Returns:
A `keras.constraints.Constraint` instance.
"""
return cls(**config)
@keras_export(["keras.constraints.MaxNorm", "keras.constraints.max_norm"])
class MaxNorm(Constraint):
"""MaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have a norm less than or equal to a desired value.
Also available via the shortcut function `keras.constraints.max_norm`.
Args:
max_value: the maximum norm value for the incoming weights.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, max_value=2, axis=0):
self.max_value = max_value
self.axis = axis
def __call__(self, w):
w = backend.convert_to_tensor(w)
norms = ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True))
desired = ops.clip(norms, 0, self.max_value)
return ops.cast(w, norms.dtype) * (
desired / (backend.epsilon() + norms)
)
def get_config(self):
return {"max_value": self.max_value, "axis": self.axis}
@keras_export(["keras.constraints.NonNeg", "keras.constraints.non_neg"])
class NonNeg(Constraint):
"""Constrains the weights to be non-negative."""
def __call__(self, w):
w = backend.convert_to_tensor(w)
return ops.multiply(w, ops.greater_equal(w, 0.0))
@keras_export(["keras.constraints.UnitNorm", "keras.constraints.unit_norm"])
class UnitNorm(Constraint):
"""Constrains the weights incident to each hidden unit to have unit norm.
Args:
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, axis=0):
self.axis = axis
def __call__(self, w):
w = backend.convert_to_tensor(w)
norms = ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True))
return ops.cast(w, norms.dtype) / (backend.epsilon() + norms)
def get_config(self):
return {"axis": self.axis}
@keras_export(
["keras.constraints.MinMaxNorm", "keras.constraints.min_max_norm"]
)
class MinMaxNorm(Constraint):
"""MinMaxNorm weight constraint.
Constrains the weights incident to each hidden unit
to have the norm between a lower bound and an upper bound.
Args:
min_value: the minimum norm for the incoming weights.
max_value: the maximum norm for the incoming weights.
rate: rate for enforcing the constraint: weights will be
rescaled to yield
`(1 - rate) * norm + rate * norm.clip(min_value, max_value)`.
Effectively, this means that rate=1.0 stands for strict
enforcement of the constraint, while rate<1.0 means that
weights will be rescaled at each step to slowly move
towards a value inside the desired interval.
axis: integer, axis along which to calculate weight norms.
For instance, in a `Dense` layer the weight matrix
has shape `(input_dim, output_dim)`,
set `axis` to `0` to constrain each weight vector
of length `(input_dim,)`.
In a `Conv2D` layer with `data_format="channels_last"`,
the weight tensor has shape
`(rows, cols, input_depth, output_depth)`,
set `axis` to `[0, 1, 2]`
to constrain the weights of each filter tensor of size
`(rows, cols, input_depth)`.
"""
def __init__(self, min_value=0.0, max_value=1.0, rate=1.0, axis=0):
self.min_value = min_value
self.max_value = max_value
self.rate = rate
self.axis = axis
def __call__(self, w):
w = backend.convert_to_tensor(w)
norms = ops.sqrt(ops.sum(ops.square(w), axis=self.axis, keepdims=True))
desired = (
self.rate * ops.clip(norms, self.min_value, self.max_value)
+ (1 - self.rate) * norms
)
return ops.cast(w, norms.dtype) * (
desired / (backend.epsilon() + norms)
)
def get_config(self):
return {
"min_value": self.min_value,
"max_value": self.max_value,
"rate": self.rate,
"axis": self.axis,
}
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/constraints/__init__.py | keras/src/constraints/__init__.py | import inspect
from keras.src.api_export import keras_export
from keras.src.constraints.constraints import Constraint
from keras.src.constraints.constraints import MaxNorm
from keras.src.constraints.constraints import MinMaxNorm
from keras.src.constraints.constraints import NonNeg
from keras.src.constraints.constraints import UnitNorm
from keras.src.saving import serialization_lib
from keras.src.utils.naming import to_snake_case
ALL_OBJECTS = {
Constraint,
MaxNorm,
MinMaxNorm,
NonNeg,
UnitNorm,
}
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
ALL_OBJECTS_DICT.update(
{to_snake_case(cls.__name__): cls for cls in ALL_OBJECTS}
)
@keras_export("keras.constraints.serialize")
def serialize(constraint):
return serialization_lib.serialize_keras_object(constraint)
@keras_export("keras.constraints.deserialize")
def deserialize(config, custom_objects=None):
"""Return a Keras constraint object via its config."""
return serialization_lib.deserialize_keras_object(
config,
module_objects=ALL_OBJECTS_DICT,
custom_objects=custom_objects,
)
@keras_export("keras.constraints.get")
def get(identifier):
"""Retrieve a Keras constraint object via an identifier."""
if identifier is None:
return None
if isinstance(identifier, dict):
obj = deserialize(identifier)
elif isinstance(identifier, str):
obj = ALL_OBJECTS_DICT.get(identifier, None)
else:
obj = identifier
if callable(obj):
if inspect.isclass(obj):
obj = obj()
return obj
else:
raise ValueError(
f"Could not interpret constraint identifier: {identifier}"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/wrappers/sklearn_wrapper.py | keras/src/wrappers/sklearn_wrapper.py | import copy
import numpy as np
from keras.src.api_export import keras_export
from keras.src.models.cloning import clone_model
from keras.src.models.model import Model
from keras.src.wrappers.fixes import _routing_enabled
from keras.src.wrappers.fixes import _validate_data
from keras.src.wrappers.fixes import type_of_target
from keras.src.wrappers.utils import TargetReshaper
from keras.src.wrappers.utils import _check_model
from keras.src.wrappers.utils import assert_sklearn_installed
try:
import sklearn
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.base import RegressorMixin
from sklearn.base import TransformerMixin
except ImportError:
sklearn = None
class BaseEstimator:
pass
class ClassifierMixin:
pass
class RegressorMixin:
pass
class TransformerMixin:
pass
class SKLBase(BaseEstimator):
"""Base class for scikit-learn wrappers.
Note that there are sources of randomness in model initialization and
training. Refer to [Reproducibility in Keras Models](
https://keras.io/examples/keras_recipes/reproducibility_recipes/) on how to
control randomness.
Args:
model: `Model`.
An instance of `Model`, or a callable returning such an object.
Note that if input is a `Model`, it will be cloned using
`keras.models.clone_model` before being fitted, unless
`warm_start=True`.
The `Model` instance needs to be passed as already compiled.
If callable, it must accept at least `X` and `y` as keyword
arguments. Other arguments must be accepted if passed as
`model_kwargs` by the user.
warm_start: bool, defaults to `False`.
Whether to reuse the model weights from the previous fit. If `True`,
the given model won't be cloned and the weights from the previous
fit will be reused.
model_kwargs: dict, defaults to `None`.
Keyword arguments passed to `model`, if `model` is callable.
fit_kwargs: dict, defaults to `None`.
Keyword arguments passed to `model.fit`. These can also be passed
directly to the `fit` method of the scikit-learn wrapper. The
values passed directly to the `fit` method take precedence over
these.
Attributes:
model_ : `Model`
The fitted model.
history_ : dict
The history of the fit, returned by `model.fit`.
"""
def __init__(
self,
model,
warm_start=False,
model_kwargs=None,
fit_kwargs=None,
):
assert_sklearn_installed(self.__class__.__name__)
self.model = model
self.warm_start = warm_start
self.model_kwargs = model_kwargs
self.fit_kwargs = fit_kwargs
def _more_tags(self):
return {"non_deterministic": True}
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.non_deterministic = True
return tags
def __sklearn_clone__(self):
"""Return a deep copy of the model.
This is used by the `sklearn.base.clone` function.
"""
model = (
self.model if callable(self.model) else copy.deepcopy(self.model)
)
return type(self)(
model=model,
warm_start=self.warm_start,
model_kwargs=self.model_kwargs,
)
@property
def epoch_(self):
"""The current training epoch."""
return getattr(self, "history_", {}).get("epoch", 0)
def set_fit_request(self, **kwargs):
"""Set requested parameters by the fit method.
Please see [scikit-learn's metadata routing](
https://scikit-learn.org/stable/metadata_routing.html) for more
details.
Arguments:
kwargs : dict
Arguments should be of the form `param_name=alias`, and `alias`
can be one of `{True, False, None, str}`.
Returns:
self
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is "
"enabled. You can enable it using "
"sklearn.set_config(enable_metadata_routing=True)."
)
self._metadata_request = sklearn.utils.metadata_routing.MetadataRequest(
owner=self.__class__.__name__
)
for param, alias in kwargs.items():
self._metadata_request.score.add_request(param=param, alias=alias)
return self
def _get_model(self, X, y):
if isinstance(self.model, Model):
return clone_model(self.model)
else:
args = self.model_kwargs or {}
return self.model(X=X, y=y, **args)
def fit(self, X, y, **kwargs):
"""Fit the model.
Args:
X: array-like, shape=(n_samples, n_features)
The input samples.
y: array-like, shape=(n_samples,) or (n_samples, n_outputs)
The targets.
**kwargs: keyword arguments passed to `model.fit`
"""
X, y = _validate_data(self, X, y)
y = self._process_target(y, reset=True)
model = self._get_model(X, y)
_check_model(model)
fit_kwargs = self.fit_kwargs or {}
fit_kwargs.update(kwargs)
self.history_ = model.fit(X, y, **fit_kwargs)
self.model_ = model
return self
def predict(self, X):
"""Predict using the model."""
from sklearn.utils.validation import check_is_fitted
check_is_fitted(self)
X = _validate_data(self, X, reset=False)
raw_output = self.model_.predict(X)
return self._reverse_process_target(raw_output)
def _process_target(self, y, reset=False):
"""Regressors are NOOP here, classifiers do OHE."""
# This is here to raise the right error in case of invalid target
type_of_target(y, raise_unknown=True)
if reset:
self._target_encoder = TargetReshaper().fit(y)
return self._target_encoder.transform(y)
def _reverse_process_target(self, y):
"""Regressors are NOOP here, classifiers reverse OHE."""
return self._target_encoder.inverse_transform(y)
@keras_export("keras.wrappers.SKLearnClassifier")
class SKLearnClassifier(ClassifierMixin, SKLBase):
"""scikit-learn compatible classifier wrapper for Keras models.
Note that there are sources of randomness in model initialization and
training. Refer to [Reproducibility in Keras Models](
https://keras.io/examples/keras_recipes/reproducibility_recipes/) on how to
control randomness.
Args:
model: `Model`.
An instance of `Model`, or a callable returning such an object.
Note that if input is a `Model`, it will be cloned using
`keras.models.clone_model` before being fitted, unless
`warm_start=True`.
The `Model` instance needs to be passed as already compiled.
If callable, it must accept at least `X` and `y` as keyword
arguments. Other arguments must be accepted if passed as
`model_kwargs` by the user.
warm_start: bool, defaults to `False`.
Whether to reuse the model weights from the previous fit. If `True`,
the given model won't be cloned and the weights from the previous
fit will be reused.
model_kwargs: dict, defaults to `None`.
Keyword arguments passed to `model`, if `model` is callable.
fit_kwargs: dict, defaults to `None`.
Keyword arguments passed to `model.fit`. These can also be passed
directly to the `fit` method of the scikit-learn wrapper. The
values passed directly to the `fit` method take precedence over
these.
Attributes:
model_ : `Model`
The fitted model.
history_ : dict
The history of the fit, returned by `model.fit`.
classes_ : array-like, shape=(n_classes,)
The classes labels.
Example:
Here we use a function which creates a basic MLP model dynamically
choosing the input and output shapes. We will use this to create our
scikit-learn model.
``` python
from keras.layers import Dense, Input
from keras.models import Model
def dynamic_model(X, y, loss, layers=[10]):
# Creates a basic MLP model dynamically choosing the input and
# output shapes.
n_features_in = X.shape[1]
inp = Input(shape=(n_features_in,))
hidden = inp
for layer_size in layers:
hidden = Dense(layer_size, activation="relu")(hidden)
n_outputs = y.shape[1] if len(y.shape) > 1 else 1
out = Dense(n_outputs, activation="softmax")(hidden)
model = Model(inp, out)
model.compile(loss=loss, optimizer="rmsprop")
return model
```
You can then use this function to create a scikit-learn compatible model
and fit it on some data.
``` python
from sklearn.datasets import make_classification
from keras.wrappers import SKLearnClassifier
X, y = make_classification(n_samples=1000, n_features=10)
est = SKLearnClassifier(
model=dynamic_model,
model_kwargs={
"loss": "categorical_crossentropy",
"layers": [20, 20, 20],
},
)
est.fit(X, y, epochs=5)
```
"""
def _process_target(self, y, reset=False):
"""Classifiers do OHE."""
target_type = type_of_target(y, raise_unknown=True)
if target_type not in ["binary", "multiclass"]:
raise ValueError(
"Only binary and multiclass target types are supported."
f" Target type: {target_type}"
)
if reset:
self._target_encoder = sklearn.pipeline.make_pipeline(
TargetReshaper(),
sklearn.preprocessing.OneHotEncoder(sparse_output=False),
).fit(y)
self.classes_ = np.unique(y)
if len(self.classes_) == 1:
raise ValueError(
"Classifier can't train when only one class is present."
)
return self._target_encoder.transform(y)
def _more_tags(self):
# required to be compatible with scikit-learn<1.6
return {"poor_score": True}
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.classifier_tags.poor_score = True
return tags
@keras_export("keras.wrappers.SKLearnRegressor")
class SKLearnRegressor(RegressorMixin, SKLBase):
"""scikit-learn compatible regressor wrapper for Keras models.
Note that there are sources of randomness in model initialization and
training. Refer to [Reproducibility in Keras Models](
https://keras.io/examples/keras_recipes/reproducibility_recipes/) on how to
control randomness.
Args:
model: `Model`.
An instance of `Model`, or a callable returning such an object.
Note that if input is a `Model`, it will be cloned using
`keras.models.clone_model` before being fitted, unless
`warm_start=True`.
The `Model` instance needs to be passed as already compiled.
If callable, it must accept at least `X` and `y` as keyword
arguments. Other arguments must be accepted if passed as
`model_kwargs` by the user.
warm_start: bool, defaults to `False`.
Whether to reuse the model weights from the previous fit. If `True`,
the given model won't be cloned and the weights from the previous
fit will be reused.
model_kwargs: dict, defaults to `None`.
Keyword arguments passed to `model`, if `model` is callable.
fit_kwargs: dict, defaults to `None`.
Keyword arguments passed to `model.fit`. These can also be passed
directly to the `fit` method of the scikit-learn wrapper. The
values passed directly to the `fit` method take precedence over
these.
Attributes:
model_ : `Model`
The fitted model.
Example:
Here we use a function which creates a basic MLP model dynamically
choosing the input and output shapes. We will use this to create our
scikit-learn model.
``` python
from keras.layers import Dense, Input
from keras.models import Model
def dynamic_model(X, y, loss, layers=[10]):
# Creates a basic MLP model dynamically choosing the input and
# output shapes.
n_features_in = X.shape[1]
inp = Input(shape=(n_features_in,))
hidden = inp
for layer_size in layers:
hidden = Dense(layer_size, activation="relu")(hidden)
n_outputs = y.shape[1] if len(y.shape) > 1 else 1
out = Dense(n_outputs)(hidden)
model = Model(inp, out)
model.compile(loss=loss, optimizer="rmsprop")
return model
```
You can then use this function to create a scikit-learn compatible model
and fit it on some data.
``` python
from sklearn.datasets import make_regression
from keras.wrappers import SKLearnRegressor
X, y = make_regression(n_samples=1000, n_features=10)
est = SKLearnRegressor(
model=dynamic_model,
model_kwargs={
"loss": "mse",
"layers": [20, 20, 20],
},
)
est.fit(X, y, epochs=5)
```
"""
def _more_tags(self):
# required to be compatible with scikit-learn<1.6
return {"poor_score": True}
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.regressor_tags.poor_score = True
return tags
@keras_export("keras.wrappers.SKLearnTransformer")
class SKLearnTransformer(TransformerMixin, SKLBase):
"""scikit-learn compatible transformer wrapper for Keras models.
Note that this is a scikit-learn compatible transformer, and not a
transformer in the deep learning sense.
Also note that there are sources of randomness in model initialization and
training. Refer to [Reproducibility in Keras Models](
https://keras.io/examples/keras_recipes/reproducibility_recipes/) on how to
control randomness.
Args:
model: `Model`.
An instance of `Model`, or a callable returning such an object.
Note that if input is a `Model`, it will be cloned using
`keras.models.clone_model` before being fitted, unless
`warm_start=True`.
The `Model` instance needs to be passed as already compiled.
If callable, it must accept at least `X` and `y` as keyword
arguments. Other arguments must be accepted if passed as
`model_kwargs` by the user.
warm_start: bool, defaults to `False`.
Whether to reuse the model weights from the previous fit. If `True`,
the given model won't be cloned and the weights from the previous
fit will be reused.
model_kwargs: dict, defaults to `None`.
Keyword arguments passed to `model`, if `model` is callable.
fit_kwargs: dict, defaults to `None`.
Keyword arguments passed to `model.fit`. These can also be passed
directly to the `fit` method of the scikit-learn wrapper. The
values passed directly to the `fit` method take precedence over
these.
Attributes:
model_ : `Model`
The fitted model.
history_ : dict
The history of the fit, returned by `model.fit`.
Example:
A common use case for a scikit-learn transformer, is to have a step
which gives you the embedding of your data. Here we assume
`my_package.my_model` is a Keras model which takes the input and gives
embeddings of the data, and `my_package.my_data` is your dataset loader.
``` python
from my_package import my_model, my_data
from keras.wrappers import SKLearnTransformer
from sklearn.frozen import FrozenEstimator # requires scikit-learn>=1.6
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import HistGradientBoostingClassifier
X, y = my_data()
trs = FrozenEstimator(SKLearnTransformer(model=my_model))
pipe = make_pipeline(trs, HistGradientBoostingClassifier())
pipe.fit(X, y)
```
Note that in the above example, `FrozenEstimator` prevents any further
training of the transformer step in the pipeline, which can be the case
if you don't want to change the embedding model at hand.
"""
def transform(self, X):
"""Transform the data.
Args:
X: array-like, shape=(n_samples, n_features)
The input samples.
Returns:
X_transformed: array-like, shape=(n_samples, n_features)
The transformed data.
"""
from sklearn.utils.validation import check_is_fitted
check_is_fitted(self)
X = _validate_data(self, X, reset=False)
return self.model_.predict(X)
def _more_tags(self):
# required to be compatible with scikit-learn<1.6
return {
"preserves_dtype": [],
}
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = []
return tags
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/wrappers/fixes.py | keras/src/wrappers/fixes.py | try:
import sklearn
except ImportError:
sklearn = None
def _validate_data(estimator, *args, **kwargs):
"""Validate the input data.
wrapper for sklearn.utils.validation.validate_data or
BaseEstimator._validate_data depending on the scikit-learn version.
TODO: remove when minimum scikit-learn version is 1.6
"""
try:
# scikit-learn >= 1.6
from sklearn.utils.validation import validate_data
return validate_data(estimator, *args, **kwargs)
except ImportError:
return estimator._validate_data(*args, **kwargs)
except:
raise
def type_of_target(y, input_name="", *, raise_unknown=False):
def _raise_or_return(target_type):
"""Depending on the value of raise_unknown, either raise an error or
return 'unknown'.
"""
if raise_unknown and target_type == "unknown":
input = input_name if input_name else "data"
raise ValueError(f"Unknown label type for {input}: {y!r}")
else:
return target_type
from sklearn.utils.multiclass import type_of_target as sk_type_of_target
target_type = sk_type_of_target(y, input_name=input_name)
return _raise_or_return(target_type)
def _routing_enabled():
"""Return whether metadata routing is enabled.
Returns:
enabled : bool
Whether metadata routing is enabled. If the config is not set, it
defaults to False.
TODO: remove when the config key is no longer available in scikit-learn
"""
return sklearn.get_config().get("enable_metadata_routing", False)
def _raise_for_params(params, owner, method):
"""Raise an error if metadata routing is not enabled and params are passed.
Parameters:
params : dict
The metadata passed to a method.
owner : object
The object to which the method belongs.
method : str
The name of the method, e.g. "fit".
Raises:
ValueError
If metadata routing is not enabled and params are passed.
"""
caller = (
f"{owner.__class__.__name__}.{method}"
if method
else owner.__class__.__name__
)
if not _routing_enabled() and params:
raise ValueError(
f"Passing extra keyword arguments to {caller} is only supported if"
" enable_metadata_routing=True, which you can set using"
" `sklearn.set_config`. See the User Guide"
" <https://scikit-learn.org/stable/metadata_routing.html> for more"
f" details. Extra parameters passed are: {set(params)}"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/wrappers/utils.py | keras/src/wrappers/utils.py | import numpy as np
try:
import sklearn
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
except ImportError:
sklearn = None
class BaseEstimator:
pass
class TransformerMixin:
pass
def assert_sklearn_installed(symbol_name):
if sklearn is None:
raise ImportError(
f"{symbol_name} requires `scikit-learn` to be installed. "
"Run `pip install scikit-learn` to install it."
)
def _check_model(model):
"""Check whether the model need sto be compiled."""
# compile model if user gave us an un-compiled model
if not model.compiled or not model.loss or not model.optimizer:
raise RuntimeError(
"Given model needs to be compiled, and have a loss "
"and an optimizer."
)
class TargetReshaper(TransformerMixin, BaseEstimator):
"""Convert 1D targets to 2D and back.
For use in pipelines with transformers that only accept
2D inputs, like OneHotEncoder and OrdinalEncoder.
Attributes:
ndim_ : int
Dimensions of y that the transformer was trained on.
"""
def fit(self, y):
"""Fit the transformer to a target y.
Returns:
TargetReshaper
A reference to the current instance of TargetReshaper.
"""
self.ndim_ = y.ndim
return self
def transform(self, y):
"""Makes 1D y 2D.
Args:
y : np.ndarray
Target y to be transformed.
Returns:
np.ndarray
A numpy array, of dimension at least 2.
"""
if y.ndim == 1:
return y.reshape(-1, 1)
return y
def inverse_transform(self, y):
"""Revert the transformation of transform.
Args:
y: np.ndarray
Transformed numpy array.
Returns:
np.ndarray
If the transformer was fit to a 1D numpy array,
and a 2D numpy array with a singleton second dimension
is passed, it will be squeezed back to 1D. Otherwise, it
will eb left untouched.
"""
from sklearn.utils.validation import check_is_fitted
check_is_fitted(self)
if self.ndim_ == 1 and y.ndim == 2:
return np.squeeze(y, axis=1)
return y
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/wrappers/__init__.py | keras/src/wrappers/__init__.py | from keras.src.wrappers.sklearn_wrapper import SKLearnClassifier
from keras.src.wrappers.sklearn_wrapper import SKLearnRegressor
from keras.src.wrappers.sklearn_wrapper import SKLearnTransformer
__all__ = ["SKLearnClassifier", "SKLearnRegressor", "SKLearnTransformer"]
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/wrappers/sklearn_test.py | keras/src/wrappers/sklearn_test.py | """Tests using Scikit-Learn's bundled estimator_checks."""
import unittest
from contextlib import contextmanager
import pytest
import sklearn
from packaging.version import parse as parse_version
from sklearn.utils.estimator_checks import parametrize_with_checks
import keras
from keras.src.backend import floatx
from keras.src.backend import set_floatx
from keras.src.layers import Dense
from keras.src.layers import Input
from keras.src.models import Model
from keras.src.wrappers import SKLearnClassifier
from keras.src.wrappers import SKLearnRegressor
from keras.src.wrappers import SKLearnTransformer
def wrapped_parametrize_with_checks(
estimators,
*,
legacy=True,
expected_failed_checks=None,
):
"""Wrapped `parametrize_with_checks` handling backwards compat."""
sklearn_version = parse_version(
parse_version(sklearn.__version__).base_version
)
if sklearn_version >= parse_version("1.6"):
return parametrize_with_checks(
estimators,
legacy=legacy,
expected_failed_checks=expected_failed_checks,
)
def patched_more_tags(estimator, expected_failed_checks):
import copy
original_tags = copy.deepcopy(sklearn.utils._tags._safe_tags(estimator))
def patched_more_tags(self):
original_tags.update({"_xfail_checks": expected_failed_checks})
return original_tags
estimator.__class__._more_tags = patched_more_tags
return estimator
estimators = [
patched_more_tags(estimator, expected_failed_checks(estimator))
for estimator in estimators
]
# legacy is not supported and ignored
return parametrize_with_checks(estimators)
def dynamic_model(X, y, loss, layers=[10]):
"""Creates a basic MLP classifier dynamically choosing binary/multiclass
classification loss and output activations.
"""
n_features_in = X.shape[1]
inp = Input(shape=(n_features_in,))
hidden = inp
for layer_size in layers:
hidden = Dense(layer_size, activation="relu")(hidden)
n_outputs = y.shape[1] if len(y.shape) > 1 else 1
out = [Dense(n_outputs, activation="softmax")(hidden)]
model = Model(inp, out)
model.compile(loss=loss, optimizer="rmsprop")
return model
@contextmanager
def use_floatx(x):
"""Context manager to temporarily
set the keras backend precision.
"""
_floatx = floatx()
set_floatx(x)
try:
yield
finally:
set_floatx(_floatx)
EXPECTED_FAILED_CHECKS = {
"SKLearnClassifier": {
"check_classifiers_regression_target": "not an issue in sklearn>=1.6",
"check_parameters_default_constructible": (
"not an issue in sklearn>=1.6"
),
"check_classifiers_one_label_sample_weights": (
"0 sample weight is not ignored"
),
"check_classifiers_classes": (
"with small test cases the estimator returns not all classes "
"sometimes"
),
"check_classifier_data_not_an_array": (
"This test assumes reproducibility in fit."
),
"check_supervised_y_2d": "This test assumes reproducibility in fit.",
"check_fit_idempotent": "This test assumes reproducibility in fit.",
},
"SKLearnRegressor": {
"check_parameters_default_constructible": (
"not an issue in sklearn>=1.6"
),
},
"SKLearnTransformer": {
"check_parameters_default_constructible": (
"not an issue in sklearn>=1.6"
),
},
}
@wrapped_parametrize_with_checks(
estimators=[
SKLearnClassifier(
model=dynamic_model,
model_kwargs={
"loss": "categorical_crossentropy",
"layers": [20, 20, 20],
},
fit_kwargs={"epochs": 5},
),
SKLearnRegressor(
model=dynamic_model,
model_kwargs={"loss": "mse"},
),
SKLearnTransformer(
model=dynamic_model,
model_kwargs={"loss": "mse"},
),
],
expected_failed_checks=lambda estimator: EXPECTED_FAILED_CHECKS[
type(estimator).__name__
],
)
def test_sklearn_estimator_checks(estimator, check):
"""Checks that can be passed with sklearn's default tolerances
and in a single epoch.
"""
try:
check(estimator)
except Exception as exc:
if keras.config.backend() in ["numpy", "openvino"] and (
isinstance(exc, NotImplementedError)
or "NotImplementedError" in str(exc)
):
pytest.xfail("Backend not implemented")
elif isinstance(exc, unittest.SkipTest):
# Workaround for https://github.com/pytest-dev/pytest/issues/13895
pytest.skip(str(exc))
else:
raise
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/distribution/distribution_lib.py | keras/src/distribution/distribution_lib.py | """Unified high-level distribution APIs across backends.
Currently only the JAX backend is supported. The TensorFlow backend
will be supported in the future (via tf.dtensor API).
"""
import collections
import contextlib
import os
import re
import warnings
import numpy as np
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.backend import distribution_lib
from keras.src.backend.common import global_state
DEFAULT_BATCH_DIM_NAME = "batch"
GLOBAL_ATTRIBUTE_NAME = "distribution"
@keras_export("keras.distribution.list_devices")
def list_devices(device_type=None):
"""Return all the available devices based on the device type.
Note: in a distributed setting, global devices are returned.
Args:
device_type: string, one of `"cpu"`, `"gpu"` or `"tpu"`.
Defaults to `"gpu"` or `"tpu"` if available when
`device_type` is not provided. Otherwise
will return the `"cpu"` devices.
Return:
List of devices that are available for distribute computation.
"""
return distribution_lib.list_devices(device_type)
@keras_export("keras.distribution.get_device_count")
def get_device_count(device_type=None):
"""Returns the number of available JAX devices.
Args:
device_type: Optional device type to count (e.g., "cpu", "gpu", "tpu").
If `None`, it defaults to counting "gpu" or "tpu" devices if
available, otherwise it counts "cpu" devices. It does not
return the sum of all device types.
Returns:
int: The total number of JAX devices for the specified type.
"""
return distribution_lib.get_device_count(device_type=device_type)
@keras_export("keras.distribution.initialize")
def initialize(job_addresses=None, num_processes=None, process_id=None):
"""Initialize the distribution system for multi-host/process setting.
Calling `initialize` will prepare the backend for execution on multi-host
GPU or TPUs. It should be called before any computations.
Note that the parameters can also be injected via environment variables,
which can be better controlled by the launch script at startup time.
For certain backend that also rely on the environment variables to
configure, Keras will properly forward them.
Args:
job_addresses: string. Comma separated IP addresses for all the jobs
that will form the whole computation cluster. Note that for JAX
backend, only the address for job 0 (coodinator) is needed. For
certain runtime like cloud TPU, this value can be `None`, and the
backend will figure it out with the TPU environment variables. You
can also config this value via environment variable
`KERAS_DISTRIBUTION_JOB_ADDRESSES`.
num_processes: int. The number of worker/processes that will form the
whole computation cluster. For certain runtime like cloud TPU, this
value can be `None`, and the backend will figure it out with the TPU
environment variables. You can also configure this value via
environment variable `KERAS_DISTRIBUTION_NUM_PROCESSES`.
process_id: int. The ID number of the current worker/process. The value
should be ranged from `0` to `num_processes - 1`. `0` will indicate
the current worker/process is the master/coordinate job. You can
also configure this value via environment variable
`KERAS_DISTRIBUTION_PROCESS_ID`.
Example:
Suppose there are two GPU processes, and process 0 is running at
address `10.0.0.1:1234`, and process 1 is running at address
`10.0.0.2:2345`. To configure such cluster, you can run
On process 0:
```python
keras.distribute.initialize(
job_addresses="10.0.0.1:1234,10.0.0.2:2345",
num_processes=2,
process_id=0)
```
On process 1:
```python
keras.distribute.initialize(
job_addresses="10.0.0.1:1234,10.0.0.2:2345",
num_processes=2,
process_id=1)
```
or via the environment variables:
On process 0:
```python
os.environ[
"KERAS_DISTRIBUTION_JOB_ADDRESSES"] = "10.0.0.1:1234,10.0.0.2:2345"
os.environ["KERAS_DISTRIBUTION_NUM_PROCESSES"] = "2"
os.environ["KERAS_DISTRIBUTION_PROCESS_ID"] = "0"
keras.distribute.initialize()
```
On process 1:
```python
os.environ[
"KERAS_DISTRIBUTION_JOB_ADDRESSES"] = "10.0.0.1:1234,10.0.0.2:2345"
os.environ["KERAS_DISTRIBUTION_NUM_PROCESSES"] = "2"
os.environ["KERAS_DISTRIBUTION_PROCESS_ID"] = "1"
keras.distribute.initialize()
```
Also note that for JAX backend, the `job_addresses` can be further
reduced to just the master/coordinator address, which is
`10.0.0.1:1234`.
"""
if (
job_addresses is None
and "KERAS_DISTRIBUTION_JOB_ADDRESSES" in os.environ
):
job_addresses = os.environ["KERAS_DISTRIBUTION_JOB_ADDRESSES"]
if (
num_processes is None
and "KERAS_DISTRIBUTION_NUM_PROCESSES" in os.environ
):
num_processes = int(os.environ["KERAS_DISTRIBUTION_NUM_PROCESSES"])
if process_id is None and "KERAS_DISTRIBUTION_PROCESS_ID" in os.environ:
process_id = int(os.environ["KERAS_DISTRIBUTION_PROCESS_ID"])
distribution_lib.initialize(job_addresses, num_processes, process_id)
@keras_export("keras.distribution.DeviceMesh")
class DeviceMesh:
"""A cluster of computation devices for distributed computation.
This API is aligned with `jax.sharding.Mesh` and `tf.dtensor.Mesh`, which
represents the computation devices in the global context.
See more details in [jax.sharding.Mesh](
https://jax.readthedocs.io/en/latest/jax.sharding.html#jax.sharding.Mesh)
and [tf.dtensor.Mesh](
https://www.tensorflow.org/api_docs/python/tf/experimental/dtensor/Mesh).
Args:
shape: tuple of list of integers. The shape of the overall
`DeviceMesh`, e.g. `(8,)` for a data parallel only distribution,
or `(4, 2)` for a model+data parallel distribution.
axis_names: List of string. The logical name of the each axis for
the `DeviceMesh`. The length of the `axis_names` should match to
the rank of the `shape`. The `axis_names` will be used to
match/create the `TensorLayout` when distribute the data and
variables.
devices: Optional list of devices. Defaults to all the available
devices locally from `keras.distribution.list_devices()`.
"""
def __init__(
self,
shape,
axis_names,
devices=None,
):
if not shape or not axis_names:
raise ValueError(
"Shape and axis_names cannot be empty. Received: "
f"shape={shape}, axis_names={axis_names}"
)
if len(shape) != len(axis_names):
raise ValueError(
"Shape and axis_names should have same size. "
f"Received: shape={shape}, axis_names={axis_names}"
)
if devices is None:
devices = list_devices()
devices = np.array(devices)
if np.prod(shape) != np.prod(devices.shape):
raise ValueError(
"Shape does not match the number of devices. "
f"Received: shape={shape}; devices.shape="
f"{devices.shape}"
)
self._shape = shape
self._axis_names = axis_names
self._devices = np.reshape(devices, shape)
@property
def shape(self):
return self._shape
@property
def axis_names(self):
return self._axis_names
@property
def devices(self):
return self._devices
@property
def backend_mesh(self):
if not hasattr(self, "_backend_mesh"):
self._backend_mesh = distribution_lib._to_backend_mesh(self)
return self._backend_mesh
def __repr__(self):
return (
f"<{self.__class__.__name__} "
f"shape={self.shape}, axis_names={self.axis_names}>"
)
def __str__(self):
return self.__repr__()
@keras_export("keras.distribution.TensorLayout")
class TensorLayout:
"""A layout to apply to a tensor.
This API is aligned with `jax.sharding.NamedSharding`
and `tf.dtensor.Layout`.
See more details in [jax.sharding.NamedSharding](
https://jax.readthedocs.io/en/latest/jax.sharding.html#jax.sharding.NamedSharding)
and [tf.dtensor.Layout](
https://www.tensorflow.org/api_docs/python/tf/experimental/dtensor/Layout).
Args:
axes: tuple of strings that should map to the `axis_names` in
a `DeviceMesh`. For any dimensions that doesn't need any sharding,
A `None` can be used a placeholder.
device_mesh: Optional `DeviceMesh` that will be used to create
the layout. The actual mapping of tensor to physical device
is not known until the mesh is specified.
"""
def __init__(self, axes, device_mesh=None):
self._axes = tuple(axes)
self._device_mesh = device_mesh
self._validate_axes()
@property
def axes(self):
return self._axes
@property
def device_mesh(self):
return self._device_mesh
@device_mesh.setter
def device_mesh(self, device_mesh):
if self._device_mesh is not None:
raise ValueError(
"Cannot override device mesh value. Existing "
f"value is {self._device_mesh}"
)
self._device_mesh = device_mesh
self._validate_axes()
@property
def backend_layout(self):
if not hasattr(self, "_backend_layout"):
self._backend_layout = distribution_lib._to_backend_layout(self)
return self._backend_layout
def _validate_axes(self):
if self._device_mesh:
valid_axis_names = set(self._device_mesh.axis_names)
axis_names = set(self._axes) - set([None])
if axis_names - valid_axis_names:
raise ValueError(
"Invalid axis names for Layout. Valid axis "
f"names: {valid_axis_names}, Got {axis_names}"
)
def __repr__(self):
return (
f"<{self.__class__.__name__} "
f"axes={self.axes}, device_mesh={self.device_mesh}>"
)
def __str__(self):
return self.__repr__()
class Distribution:
"""Base class for variable distribution strategies.
A `Distribution` has following key functionalities:
1. Distribute the model variables to a `DeviceMesh`.
2. Distribute the input data to a `DeviceMesh`.
3. Distribute an intermediate state tensor in the model.
It can create a context scope so that the framework to properly detect the
`Distribution` and distribute the variable/data accordingly.
Args:
device_mesh: A `DeviceMesh` instance.
batch_dim_name: Optional string name for the batch dimension.
Defaults to None.
auto_shard_dataset: Automatically shard the dataset amongst
processes in a multi-process setting. Set to `False` if the dataset
is already sharded across hosts. Defaults to `True`.
"""
def __init__(
self, device_mesh, batch_dim_name=None, auto_shard_dataset=True
):
self._device_mesh = device_mesh
self._batch_dim_name = batch_dim_name
self._auto_shard_dataset = auto_shard_dataset
def get_data_layout(self, data_shape):
"""Retrieve the `TensorLayout` for the input data.
Args:
data_shape: shape for the input data in list or tuple format.
Returns:
The `TensorLayout` for the data, which can be used by
`backend.distribute_value()` to redistribute a input data.
"""
raise NotImplementedError()
def get_variable_layout(self, variable):
"""Retrieve the `TensorLayout` for the variable.
Args:
variable: A `Variable` instance.
return:
The `TensorLayout` for the variable, which can be used by
`backend.distribute_value()` to redistribute a variable.
"""
raise NotImplementedError()
def get_tensor_layout(self, path):
"""Retrieve the `TensorLayout` for the intermediate tensor.
Args:
path: a string path for the corresponding tensor.
return:
The `TensorLayout` for the intermediate tensor, which can be used
by `backend.relayout()` to reshard the tensor. Could also return
None.
"""
raise NotImplementedError()
@contextlib.contextmanager
def scope(self):
"""Context manager to make the `Distribution` current."""
original_scope = distribution()
set_distribution(self)
try:
yield
finally:
set_distribution(original_scope)
@property
def device_mesh(self):
return self._device_mesh
@property
def batch_dim_name(self):
return self._batch_dim_name
@property
def auto_shard_dataset(self):
return self._auto_shard_dataset
@auto_shard_dataset.setter
def auto_shard_dataset(self, auto_shard_dataset):
self._auto_shard_dataset = auto_shard_dataset
def distribute_dataset(self, dataset):
"""Create a distributed dataset from the original global dataset.
Args:
dataset: the original global dataset instance.
Returns:
If `auto_shard_dataset` is `True`, returns a sharded dataset that
only produces data for the current local worker/process. Otherwise,
returns the original dataset.
Raises:
ValueError: if auto-sharding is requested in a multi-process
setting, but the dataset type is not supported.
"""
raise NotImplementedError()
def __repr__(self):
return f"<{self.__class__.__name__} device_mesh={self.device_mesh}>"
def __str__(self):
return self.__repr__()
@keras_export("keras.distribution.DataParallel")
class DataParallel(Distribution):
"""Distribution for data parallelism.
You can choose to create this instance by either specifying
the `device_mesh` or `devices` arguments (but not both).
The `device_mesh` argument is expected to be a `DeviceMesh` instance,
and is expected to be 1D only. In case that the mesh has multiple axes,
then the first axis will be treated as the data parallel dimension
(and a warning will be raised).
When a list of `devices` are provided, they will be used to construct a
1D mesh.
When both `mesh` and `devices` are absent, then `list_devices()`
will be used to detect any available devices and create a 1D mesh from
them.
Args:
device_mesh: Optional `DeviceMesh` instance.
devices: Optional list of devices.
auto_shard_dataset: Automatically shard the dataset amongst
processes in a multi-process setting. Set to `False` if the dataset
is already sharded across hosts. Defaults to `True`.
"""
def __init__(self, device_mesh=None, devices=None, auto_shard_dataset=True):
if device_mesh:
self._initialize_with_device_mesh(device_mesh, auto_shard_dataset)
elif devices:
self._initialize_mesh_from_devices(devices, auto_shard_dataset)
else:
self._initialize_mesh_from_list_devices(auto_shard_dataset)
# Those following attributes might get convert to public methods.
self._num_process = distribution_lib.num_processes()
self._process_id = distribution_lib.process_id()
self._is_multi_process = self._num_process > 1
def _initialize_with_device_mesh(self, device_mesh, auto_shard_dataset):
if not isinstance(device_mesh, DeviceMesh):
raise ValueError(
"Expect `mesh` to be an instance of `DeviceMesh`. "
f"Received: mesh={device_mesh} (of type {type(device_mesh)})"
)
super().__init__(
device_mesh, device_mesh.axis_names[0], auto_shard_dataset
)
if self.device_mesh.devices.ndim != 1:
warnings.warn(
"Expect the input mesh to be 1D, but received "
"mesh.devices.ndim=%d. "
"The first axis will be used for data-parallel sharding.",
device_mesh.devices.ndim,
)
def _initialize_mesh_from_devices(self, devices, auto_shard_dataset):
devices = np.array(devices)
device_mesh = DeviceMesh(
shape=devices.shape,
axis_names=[DEFAULT_BATCH_DIM_NAME],
devices=devices,
)
super().__init__(
device_mesh, DEFAULT_BATCH_DIM_NAME, auto_shard_dataset
)
def _initialize_mesh_from_list_devices(self, auto_shard_dataset):
devices = np.array(list_devices())
device_mesh = DeviceMesh(
shape=devices.shape,
axis_names=[DEFAULT_BATCH_DIM_NAME],
devices=devices,
)
super().__init__(
device_mesh, DEFAULT_BATCH_DIM_NAME, auto_shard_dataset
)
def get_data_layout(self, data_shape):
data_shard_spec = [None] * len(data_shape)
data_shard_spec[0] = self.batch_dim_name # Shard on the first dim
return TensorLayout(data_shard_spec, self.device_mesh)
def get_variable_layout(self, variable):
# First check if the variable already has a layout assigned.
if getattr(variable, "_layout", None) is not None:
return variable._layout
# Otherwise, replicate variable.
variable_shard_spec = [None] * len(variable.shape)
return TensorLayout(variable_shard_spec, self.device_mesh)
def get_tensor_layout(self, path):
# For data parallel training, the intermediate state is not changed.
return None
def distribute_dataset(self, dataset):
if not self._is_multi_process or not self.auto_shard_dataset:
return dataset
# Try to distribute a global tf.data.Dataset.
from keras.src.utils.module_utils import tensorflow as tf
if not tf.available or not isinstance(dataset, tf.data.Dataset):
raise ValueError(
"Only `tf.data.Dataset` is supported for auto-sharding, "
f"got {type(dataset)}"
)
from tensorflow.python.data.experimental.ops import (
distribute as tf_data_distribute,
)
batch_size = tf_data_distribute.compute_batch_size(dataset)
if batch_size.numpy() < 0:
raise ValueError(
"The batch size of the input dataset is "
"unknown. Please config the batch size for "
"the input dataset, e.g via `dataset.batch(batch_size)`"
)
per_worker_batch_size = tf_data_distribute.batch_sizes_for_worker(
global_batch_size=batch_size,
num_workers=self._num_process,
num_replicas_per_worker=1, # We hard code this for now.
worker_index=self._process_id,
)
distributed_dataset = dataset.rebatch(per_worker_batch_size)
distributed_dataset = tf_data_distribute._AutoShardDataset(
distributed_dataset,
num_workers=self._num_process,
index=self._process_id,
num_replicas=self._num_process,
)
return distributed_dataset.prefetch(tf.data.AUTOTUNE)
@keras_export("keras.distribution.ModelParallel")
class ModelParallel(Distribution):
"""Distribution that shards model variables.
Compare to `DataParallel` which replicates the variables across all devices,
`ModelParallel` allows you to shard variables in addition to the input data.
To construct a `ModelParallel` distribution, you need to provide a
`DeviceMesh` and a `LayoutMap`.
1. `DeviceMesh` contains physical device information. The axis names in
the mesh will be used to map the variable and data layout.
2. `LayoutMap` contains the mapping between variable paths to their
corresponding `TensorLayout`.
Example:
```python
devices = list_devices() # Assume there are 8 devices.
# Create a mesh with 2 devices for data parallelism and 4 devices for
# model parallelism.
device_mesh = DeviceMesh(shape=(2, 4), axis_names=('batch', 'model'),
devices=devices)
# Create a layout map that shard the `Dense` layer and `Conv2D`
# layer variables on the last dimension.
# Based on the `device_mesh`, this means the variables
# will be split across 4 devices. Any other variable that doesn't
# match any key in the layout map will be fully replicated.
layout_map = LayoutMap(device_mesh)
layout_map['dense.*kernel'] = (None, 'model')
layout_map['dense.*bias'] = ('model',)
layout_map['conv2d.*kernel'] = (None, None, None, 'model')
layout_map['conv2d.*bias'] = ('model',)
distribution = ModelParallel(
layout_map=layout_map,
batch_dim_name='batch',
)
# Set the global distribution, or via `with distribution.scope():`
set_distribution(distribution)
model = model_creation()
model.compile()
model.fit(data)
```
You can quickly update the device mesh shape to change the sharding factor
of the variables. E.g.
```python
# With only the shape change for the device mesh, the variables will be
# sharded across 8 devices instead of 4, which further reduces the memory
# footprint of variables on each of the device.
device_mesh = DeviceMesh(
shape=(1, 8),
axis_names=('batch', 'model'),
devices=devices,
)
```
To figure out a proper layout mapping rule for all the model variables, you
can first list out all the model variable paths, which will be used as the
key to map the variables to `TensorLayout`.
e.g.
```python
model = create_model()
for v in model.variables:
print(v.path)
```
Args:
layout_map: `LayoutMap` instance which map the variable path to the
corresponding tensor layout.
batch_dim_name: Optional string, the axis name in the device mesh
(of the `layout_map` object)
that will be used to distribute data. If unspecified, the
first axis from the device mesh will be used.
auto_shard_dataset: Automatically shard the dataset amongst
processes in a multi-process setting. Set to `False` if the dataset
is already sharded across hosts. Defaults to `True`.
"""
def __init__(
self,
*,
layout_map=None,
batch_dim_name=None,
auto_shard_dataset=True,
**kwargs,
):
kwargs.pop("device_mesh", None)
if layout_map is None:
raise ValueError("You must specify a layout_map argument.")
if not isinstance(layout_map, LayoutMap):
raise ValueError(
"Argument `layout_map` must be a `LayoutMap` instance. "
f"Received: layout_map={layout_map}"
)
device_mesh = layout_map.device_mesh
batch_dim_name = batch_dim_name or device_mesh.axis_names[0]
super().__init__(device_mesh, batch_dim_name, auto_shard_dataset)
self._layout_map = layout_map
# Those following attributes might get convert to public methods.
self._num_process = distribution_lib.num_processes()
self._process_id = distribution_lib.process_id()
self._is_multi_process = self._num_process > 1
def get_data_layout(self, data_shape):
data_shard_spec = [None] * len(data_shape)
data_shard_spec[0] = self.batch_dim_name # Shard on the first dim
return TensorLayout(data_shard_spec, self.device_mesh)
def get_variable_layout(self, variable):
# First check if the variable already has a layout assigned.
if getattr(variable, "_layout", None) is not None:
return variable._layout
# Check the layout map.
variable_layout = self._layout_map[variable.path]
if variable_layout is not None:
return variable_layout
variable_shard_spec = [None] * len(variable.shape)
return TensorLayout(variable_shard_spec, self.device_mesh)
def get_tensor_layout(self, path):
return self._layout_map[path]
def distribute_dataset(self, dataset):
if not self._is_multi_process or not self.auto_shard_dataset:
return dataset
# Try to distribute a global tf.data.Dataset.
from keras.src.utils.module_utils import tensorflow as tf
if not tf.available or not isinstance(dataset, tf.data.Dataset):
raise ValueError(
"Only `tf.data.Dataset` is supported for auto-sharding, "
f"got {type(dataset)}"
)
from tensorflow.python.data.experimental.ops import (
distribute as tf_data_distribute,
)
global_batch_size = tf_data_distribute.compute_batch_size(dataset)
if global_batch_size.numpy() < 0:
raise ValueError(
"The batch size of the input dataset is "
"unknown. Please config the batch size for "
"the input dataset, e.g via `dataset.batch(batch_size)`"
)
# We need to compute the per-process/worker/host batch size.
# This will depend on how many model replicas we have on each process.
# Note that this might be smaller than one if model replicas are sharded
# across multiple processes.
mesh_batch_dim_index = self.device_mesh.axis_names.index(
self.batch_dim_name
)
num_model_replicas = self.device_mesh.shape[mesh_batch_dim_index]
if num_model_replicas == 1:
# No sharding is needed in this case. Each process will have the
# global batch size, and data from the iterator will need to be
# replicated across all processes.
return dataset.prefetch(tf.data.AUTOTUNE)
num_model_replicas_per_process = num_model_replicas / self._num_process
if num_model_replicas_per_process >= 1:
# Each process will have one or more full model replicas. Data will
# be sharded across all processes without replication.
if global_batch_size % self._num_process != 0:
raise ValueError(
"Global batch size must be divisible by the number of "
f"processes. `global_batch_size`={global_batch_size} and "
f"`num_process`={self._num_process}"
)
per_process_batch_size = global_batch_size // self._num_process
distributed_dataset = dataset.rebatch(per_process_batch_size)
distributed_dataset = distributed_dataset.shard(
num_shards=self._num_process,
index=self._process_id,
)
return distributed_dataset.prefetch(tf.data.AUTOTUNE)
else:
# Model replicas are sharded across multiple processes. Data will be
# sharded across model replicas, and replicated across processes
# within the same model replica.
if global_batch_size % num_model_replicas != 0:
raise ValueError(
"Global batch size must be divisible by the number of "
f"replicas. `global_batch_size`={global_batch_size} and "
f"`num_model_replicas`={num_model_replicas}"
)
per_process_batch_size = global_batch_size // num_model_replicas
distributed_dataset = dataset.rebatch(per_process_batch_size)
processes_per_replica = self._num_process // num_model_replicas
# TODO: Figure out what the convention is for data sharding id.
data_shard_id = self._process_id % processes_per_replica
distributed_dataset = distributed_dataset.shard(
num_shards=num_model_replicas,
index=data_shard_id,
)
return distributed_dataset.prefetch(tf.data.AUTOTUNE)
@keras_export("keras.distribution.LayoutMap")
class LayoutMap(collections.abc.MutableMapping):
"""A dict-like object that maps string to `TensorLayout` instances.
`LayoutMap` uses a string as key and a `TensorLayout` as value. There is a
behavior difference between a normal Python dict and this class. The string
key will be treated as a regex when retrieving the value. See the docstring
of `get` for more details.
See below for a usage example. You can define the naming schema
of the `TensorLayout`, and then retrieve the corresponding
`TensorLayout` instance.
In the normal case, the key to query is usually the `variable.path`, which
is the identifier of the variable.
As shortcut, tuple or list of axis names are also allowed when inserting
as value, and will be converted to `TensorLayout`.
```python
layout_map = LayoutMap(device_mesh)
layout_map['dense.*kernel'] = (None, 'model')
layout_map['dense.*bias'] = ('model',)
layout_map['conv2d.*kernel'] = (None, None, None, 'model')
layout_map['conv2d.*bias'] = ('model',)
layout_1 = layout_map['dense_1.kernel'] # layout_1 == layout_2d
layout_2 = layout_map['dense_1.bias'] # layout_2 == layout_1d
layout_3 = layout_map['dense_2.kernel'] # layout_3 == layout_2d
layout_4 = layout_map['dense_2.bias'] # layout_4 == layout_1d
layout_5 = layout_map['my_model/conv2d_123/kernel'] # layout_5 == layout_4d
layout_6 = layout_map['my_model/conv2d_123/bias'] # layout_6 == layout_1d
layout_7 = layout_map['my_model/conv3d_1/kernel'] # layout_7 == None
layout_8 = layout_map['my_model/conv3d_1/bias'] # layout_8 == None
```
Args:
device_mesh: `keras.distribution.DeviceMesh` instance.
"""
def __init__(self, device_mesh):
self._layout_map = collections.OrderedDict()
self._device_mesh = device_mesh
def __getitem__(self, key):
"""Retrieves the corresponding layout by the string key.
When there isn't an exact match, all the existing keys in the layout map
will be treated as a regex and map against the input key again. When
there are multiple matches for the regex, an `ValueError` will be
raised. Returns `None` if there isn't any match found.
Args:
key: String key to query a layout.
Returns:
Corresponding layout based on the query.
"""
if key in self._layout_map:
return self._layout_map[key]
matching_keys = []
for k in self._layout_map:
if re.search(k, key):
matching_keys.append(k)
if len(matching_keys) > 1:
raise ValueError(
f"Path '{key}' matches multiple layout "
f"specification keys: {matching_keys}. Please make "
"sure each tensor/variable path only matches at most "
"one layout specification key in the LayoutMap."
)
elif len(matching_keys) == 1:
return self._layout_map[matching_keys[0]]
return None
def __setitem__(self, key, layout):
"""Insert TensorLayout to the LayoutMap.
Args:
key: String key for the `TensorLayout`.
layout: The `TensorLayout`. As a shortcut, tuple of string and None
are also acceptable, and will be converted to `TensorLayout`.
"""
if key in self._layout_map:
raise ValueError(
f"{key} already exist in the LayoutMap with "
f"value {self._layout_map[key]}. Please make sure to "
"not use duplicated keys."
)
if isinstance(layout, tuple):
layout = TensorLayout(axes=layout, device_mesh=None)
if not isinstance(layout, TensorLayout):
raise ValueError(
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/distribution/__init__.py | keras/src/distribution/__init__.py | from keras.src.distribution.distribution_lib import DataParallel
from keras.src.distribution.distribution_lib import DeviceMesh
from keras.src.distribution.distribution_lib import Distribution
from keras.src.distribution.distribution_lib import LayoutMap
from keras.src.distribution.distribution_lib import ModelParallel
from keras.src.distribution.distribution_lib import TensorLayout
from keras.src.distribution.distribution_lib import distribute_tensor
from keras.src.distribution.distribution_lib import distribution
from keras.src.distribution.distribution_lib import initialize
from keras.src.distribution.distribution_lib import list_devices
from keras.src.distribution.distribution_lib import set_distribution
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/distribution/distribution_lib_test.py | keras/src/distribution/distribution_lib_test.py | """Test for distribution_lib.py."""
import os
from unittest import mock
import numpy as np
import pytest
import tensorflow as tf
from keras.src import backend
from keras.src import testing
from keras.src.backend import distribution_lib as backend_dlib
from keras.src.distribution import distribution_lib
@pytest.mark.skipif(
backend.backend() != "jax",
reason="Only JAX has the backend to mock at the moment",
)
@mock.patch.object(
backend_dlib,
"initialize",
return_value=None,
)
class MultiProcessInitializeTest(testing.TestCase):
def tearDown(self):
super().tearDown()
os.environ.clear()
def test_initialize_with_explicit_param(self, mock_backend_initialize):
job_addresses = "10.0.0.1:1234,10.0.0.2:2345"
num_processes = 2
current_process_id = 0
distribution_lib.initialize(
job_addresses, num_processes, current_process_id
)
mock_backend_initialize.assert_called_once_with(
job_addresses, num_processes, current_process_id
)
def test_initialize_with_env_vars(self, mock_backend_initialize):
job_addresses = "10.0.0.1:1234,10.0.0.2:2345"
num_processes = 2
current_process_id = 0
os.environ["KERAS_DISTRIBUTION_JOB_ADDRESSES"] = job_addresses
os.environ["KERAS_DISTRIBUTION_NUM_PROCESSES"] = str(num_processes)
os.environ["KERAS_DISTRIBUTION_PROCESS_ID"] = str(current_process_id)
distribution_lib.initialize()
mock_backend_initialize.assert_called_once_with(
job_addresses, num_processes, current_process_id
)
def test_init_with_nones(self, mock_backend_initialize):
# This is also valid case for Cloud TPU on JAX
distribution_lib.initialize()
mock_backend_initialize.assert_called_once_with(None, None, None)
class DeviceMeshTest(testing.TestCase):
def test_mesh_creation(self):
devices = [f"cpu:{i}" for i in range(8)]
shape = (4, 2)
axis_names = ["batch", "model"]
mesh = distribution_lib.DeviceMesh(shape, axis_names, devices)
self.assertEqual(mesh.shape, shape)
self.assertEqual(mesh.axis_names, axis_names)
self.assertEqual(mesh.devices.shape, shape)
def test_input_validation(self):
devices = [f"cpu:{i}" for i in range(4)]
with self.assertRaisesRegex(
ValueError, "Shape and axis_names cannot be empty"
):
distribution_lib.DeviceMesh((4,), "", devices)
with self.assertRaisesRegex(
ValueError, "Shape and axis_names should have same size"
):
distribution_lib.DeviceMesh((4, 2), ["batch"], devices)
with self.assertRaisesRegex(
ValueError, "Shape does not match the number of devices"
):
distribution_lib.DeviceMesh((4, 2), ["batch", "model"], devices)
class TensorLayoutTest(testing.TestCase):
def setUp(self):
self.mesh = distribution_lib.DeviceMesh(
(4, 2), ["data", "model"], [f"cpu:{i}" for i in range(8)]
)
def test_tensor_layout_creation(self):
axes = ("data", None)
layout = distribution_lib.TensorLayout(axes, self.mesh)
self.assertEqual(layout.device_mesh, self.mesh)
self.assertEqual(layout.axes, axes)
def test_tensor_layout_validation(self):
axes = ("data", "unknown", None)
with self.assertRaisesRegex(
ValueError, "Invalid axis names for Layout"
):
distribution_lib.TensorLayout(axes, self.mesh)
def test_lazy_device_mesh_injection(self):
axes = ("data", None)
layout = distribution_lib.TensorLayout(axes, None)
self.assertIsNone(layout.device_mesh)
self.assertEqual(layout.axes, axes)
layout.device_mesh = self.mesh
self.assertEqual(layout.device_mesh, self.mesh)
self.assertEqual(layout.axes, axes)
def test_lazy_device_mesh_validation(self):
axes = ("data", "unknown", None)
layout = distribution_lib.TensorLayout(axes, None)
self.assertIsNone(layout.device_mesh)
self.assertEqual(layout.axes, axes)
with self.assertRaisesRegex(
ValueError, "Invalid axis names for Layout"
):
layout.device_mesh = self.mesh
class DistributionTest(testing.TestCase):
def setUp(self):
super().setUp()
devices = [f"cpu:{i}" for i in range(8)]
shape = (4, 2)
axis_names = ["batch", "model"]
self.device_mesh = distribution_lib.DeviceMesh(
shape, axis_names, devices
)
def test_init_with_device_mesh(self):
distribution = distribution_lib.Distribution(self.device_mesh)
self.assertIs(distribution.device_mesh, self.device_mesh)
def test_scope(self):
distribution_1 = distribution_lib.Distribution(self.device_mesh)
distribution_2 = distribution_lib.Distribution(self.device_mesh)
self.assertIsNone(distribution_lib.distribution())
with distribution_1.scope():
self.assertIs(distribution_lib.distribution(), distribution_1)
with distribution_2.scope():
self.assertIs(distribution_lib.distribution(), distribution_2)
self.assertIs(distribution_lib.distribution(), distribution_1)
self.assertIsNone(distribution_lib.distribution())
@pytest.mark.skipif(
backend.backend() != "jax",
reason="Only JAX has the proper backend distribution lib",
)
class DataParallelDistributionTest(testing.TestCase):
def setUp(self):
super().setUp()
self.devices = [f"cpu:{i}" for i in range(8)]
shape = (8,)
axis_names = ["data"]
self.device_mesh = distribution_lib.DeviceMesh(
shape, axis_names, self.devices
)
def test_create_with_device_mesh(self):
distribution = distribution_lib.DataParallel(
device_mesh=self.device_mesh
)
device_mesh = distribution.device_mesh
self.assertEqual(len(device_mesh.devices), 8)
self.assertEqual(device_mesh.axis_names, ["data"])
self.assertEqual(distribution.batch_dim_name, "data")
self.assertFalse(distribution._is_multi_process)
self.assertEqual(distribution._process_id, 0)
self.assertEqual(distribution._num_process, 1)
def test_create_with_devices(self):
distribution = distribution_lib.DataParallel(devices=self.devices)
device_mesh = distribution.device_mesh
self.assertEqual(len(device_mesh.devices), 8)
self.assertEqual(device_mesh.axis_names, ["batch"])
self.assertEqual(distribution.batch_dim_name, "batch")
@mock.patch.object(
distribution_lib,
"list_devices",
return_value=[f"cpu:{i}" for i in range(8)],
)
def test_create_with_list_devices(self, mock_list_devices):
distribution = distribution_lib.DataParallel()
mock_list_devices.assert_called_once()
device_mesh = distribution.device_mesh
self.assertEqual(len(device_mesh.devices), 8)
self.assertEqual(device_mesh.axis_names, ["batch"])
self.assertEqual(distribution.batch_dim_name, "batch")
def test_get_data_layout(self):
distribution = distribution_lib.DataParallel(
device_mesh=self.device_mesh
)
data = np.arange(16).reshape((4, 2, 2))
data_layout = distribution.get_data_layout(data.shape)
self.assertIs(data_layout.device_mesh, self.device_mesh)
self.assertEqual(data_layout.axes, ("data", None, None))
@pytest.mark.skipif(testing.jax_uses_gpu(), reason="CI segfault")
def test_get_variable_layout(self):
distribution = distribution_lib.DataParallel(
device_mesh=self.device_mesh
)
variable = backend.Variable(initializer=[1, 2, 3])
variable_layout = distribution.get_variable_layout(variable)
self.assertIs(variable_layout.device_mesh, self.device_mesh)
self.assertEqual(variable_layout.axes, (None,))
@pytest.mark.skipif(testing.jax_uses_gpu(), reason="CI segfault")
def test_get_variable_layout_with_explicit_layout(self):
distribution = distribution_lib.DataParallel(
device_mesh=self.device_mesh
)
explicit_mesh = distribution_lib.DeviceMesh((8,), ["x"], self.devices)
explicit_layout = distribution_lib.TensorLayout(["x"], explicit_mesh)
variable = backend.Variable(initializer=[1, 2, 3])
variable._layout = explicit_layout
variable_layout = distribution.get_variable_layout(variable)
self.assertIs(variable_layout.device_mesh, explicit_mesh)
self.assertEqual(variable_layout.axes, explicit_layout.axes)
def test_get_tensor_layout(self):
distribution = distribution_lib.DataParallel(
device_mesh=self.device_mesh
)
path = "path/to/tensor"
tensor_layout = distribution.get_tensor_layout(path)
self.assertIsNone(tensor_layout)
def test_distribute_dataset(self):
# We can only verify the single worker/process case in OSS for now.
dataset = tf.data.Dataset.range(8)
distribution = distribution_lib.DataParallel(
device_mesh=self.device_mesh
)
distributed_dataset = distribution.distribute_dataset(dataset)
self.assertIs(dataset, distributed_dataset)
@pytest.mark.skipif(
backend.backend() != "jax",
reason="Only JAX has the proper backend distribution lib",
)
class ModelParallelDistributionTest(testing.TestCase):
def setUp(self):
super().setUp()
self.devices = [f"cpu:{i}" for i in range(8)]
shape = (2, 4)
axis_names = ["data", "model"]
self.device_mesh = distribution_lib.DeviceMesh(
shape, axis_names, self.devices
)
@pytest.mark.skipif(testing.jax_uses_gpu(), reason="CI segfault")
def test_distribute_weights(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
layout_map[".*kernel"] = distribution_lib.TensorLayout([None, "model"])
layout_map[".*bias"] = distribution_lib.TensorLayout(["model"])
distribution = distribution_lib.ModelParallel(
layout_map=layout_map, batch_dim_name="data"
)
kernel = backend.Variable(initializer=np.arange(8, 4), name="kernel")
bias = backend.Variable(initializer=np.arange(4), name="bias")
rng_seed = backend.Variable(initializer=[0, 1], name="seed")
kernel_layout = distribution.get_variable_layout(kernel)
self.assertIs(kernel_layout.device_mesh, self.device_mesh)
self.assertEqual(kernel_layout.axes, (None, "model"))
bias_layout = distribution.get_variable_layout(bias)
self.assertIs(bias_layout.device_mesh, self.device_mesh)
self.assertEqual(bias_layout.axes, ("model",))
rng_seed_layout = distribution.get_variable_layout(rng_seed)
self.assertIs(rng_seed_layout.device_mesh, self.device_mesh)
self.assertEqual(rng_seed_layout.axes, (None,))
def test_distribute_data(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
distribution = distribution_lib.ModelParallel(
layout_map=layout_map, batch_dim_name="data"
)
data = np.arange(16).reshape((4, 2, 2))
data_layout = distribution.get_data_layout(data.shape)
self.assertIs(data_layout.device_mesh, self.device_mesh)
self.assertEqual(data_layout.axes, ("data", None, None))
def test_get_tensor_layout(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
layout_map[".*kernel"] = distribution_lib.TensorLayout([None, "model"])
layout_map[".*bias"] = distribution_lib.TensorLayout(["model"])
layout_map["/model/layer/tensor"] = ("data", None)
distribution = distribution_lib.ModelParallel(
layout_map=layout_map, batch_dim_name="data"
)
layout = distribution.get_tensor_layout("/model/layer/tensor")
self.assertIs(layout.device_mesh, self.device_mesh)
self.assertEqual(layout.axes, ("data", None))
layout = distribution.get_tensor_layout("/model/layer/other_tensor")
self.assertIsNone(layout)
@pytest.mark.skipif(testing.jax_uses_gpu(), reason="CI segfault")
def test_get_variable_layout_with_explicit_layout(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
layout_map[".*kernel"] = distribution_lib.TensorLayout([None, "model"])
distribution = distribution_lib.ModelParallel(
layout_map=layout_map, batch_dim_name="data"
)
explicit_mesh = distribution_lib.DeviceMesh((8,), ["x"], self.devices)
explicit_layout = distribution_lib.TensorLayout(["x"], explicit_mesh)
variable = backend.Variable(initializer=[1, 2, 3], name="kernel")
variable._layout = explicit_layout
variable_layout = distribution.get_variable_layout(variable)
self.assertIs(variable_layout.device_mesh, explicit_mesh)
self.assertEqual(variable_layout.axes, explicit_layout.axes)
def test_distribute_dataset(self):
# We can only verify the single worker/process case in OSS for now.
dataset = tf.data.Dataset.range(8)
layout_map = distribution_lib.LayoutMap(self.device_mesh)
distribution = distribution_lib.ModelParallel(
layout_map=layout_map, batch_dim_name="data"
)
distributed_dataset = distribution.distribute_dataset(dataset)
self.assertIs(dataset, distributed_dataset)
class LayoutMapTest(testing.TestCase):
def setUp(self):
super().setUp()
self.devices = [f"cpu:{i}" for i in range(8)]
shape = (4, 2)
axis_names = ["data", "model"]
self.device_mesh = distribution_lib.DeviceMesh(
shape, axis_names, self.devices
)
self.sharded_2d = distribution_lib.TensorLayout([None, "model"])
self.sharded_1d = distribution_lib.TensorLayout(["model"])
self.replicated_2d = distribution_lib.TensorLayout([None, None])
self.replicated_1d = distribution_lib.TensorLayout([None])
def test_add(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
layout_map["dense/kernel"] = self.sharded_2d
layout_map["dense/bias"] = self.sharded_1d
# Test for adding list/tuple as shortcut for TensorLayout
layout_map["conv/bias"] = ("model",)
# Make there are two items in the map, and we access them via the
# underlying container at layout_map._layout_map
self.assertLen(layout_map, 3)
kernel_layout = layout_map["dense/kernel"]
self.assertEqual(kernel_layout.axes, (None, "model"))
self.assertIs(kernel_layout.device_mesh, self.device_mesh)
bias_layout = layout_map["dense/bias"]
self.assertEqual(bias_layout.axes, ("model",))
self.assertIs(bias_layout.device_mesh, self.device_mesh)
conv_bias_layout = layout_map["conv/bias"]
self.assertEqual(conv_bias_layout.axes, ("model",))
self.assertIs(bias_layout.device_mesh, self.device_mesh)
with self.assertRaisesRegex(ValueError, "dense/kernel already exist"):
layout_map["dense/kernel"] = self.sharded_2d
with self.assertRaisesRegex(ValueError, "should be a TensorLayout"):
layout_map["conv.kernel"] = ["a", "b"]
def test_get(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
layout_map["dense/kernel"] = self.sharded_2d
layout_map["dense/bias"] = self.sharded_1d
layout_map["dense.*kernel"] = self.replicated_2d
layout_map["dense.*bias"] = self.replicated_1d
layout_map["bias"] = self.sharded_1d
self.assertEqual(layout_map["dense/kernel"], self.sharded_2d)
self.assertEqual(layout_map["dense/bias"], self.sharded_1d)
self.assertEqual(layout_map["dense_2/kernel"], self.replicated_2d)
# Map against the wildcard bias rule for dense. This will cause a
# ValueError
with self.assertRaisesRegex(
ValueError, "Path 'dense_2/bias' matches multiple layout"
):
layout_map["dense_2/bias"]
self.assertIsNone(layout_map["conv2d/kernel"])
self.assertEqual(layout_map["conv2d/bias"], self.sharded_1d)
def test_delete(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
layout_map["dense/kernel"] = self.sharded_2d
layout_map["dense/bias"] = self.sharded_1d
self.assertEqual(layout_map.pop("dense/kernel"), self.sharded_2d)
# Make sure to match against the exact string, not the regex
with self.assertRaises(KeyError):
layout_map.pop(".*bias")
# Make sure del also works
del layout_map["dense/bias"]
self.assertLen(layout_map, 0)
def test_len(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
self.assertLen(layout_map, 0)
layout_map["dense/kernel"] = self.sharded_2d
layout_map["dense/bias"] = self.sharded_1d
self.assertLen(layout_map, 2)
def test_iter(self):
layout_map = distribution_lib.LayoutMap(self.device_mesh)
layout_map["dense/kernel"] = self.sharded_2d
layout_map["dense/bias"] = self.sharded_1d
# Make sure the items are ordered based on the insertion order.
self.assertEqual(
list(layout_map.keys()), ["dense/kernel", "dense/bias"]
)
keys = []
values = []
for k, v in layout_map.items():
keys.append(k)
values.append(v)
self.assertEqual(keys, ["dense/kernel", "dense/bias"])
self.assertEqual(values, [self.sharded_2d, self.sharded_1d])
# @pytest.mark.skipif(
# backend.backend() != "tensorflow",
# reason="Backend specific test",
# )
# class TensorflowDistributionLibTest(testing.TestCase):
# def setUp(self):
# super().setUp()
# # Config virtual devices for testing.
# cpus = tf.config.list_physical_devices("cpu")
# context._reset_context()
# tf.config.set_logical_device_configuration(
# cpus[0], [tf.config.LogicalDeviceConfiguration()] * 8
# )
# dtensor.initialize_accelerator_system("cpu")
# def tearDown(self) -> None:
# super().tearDown()
# dtensor.shutdown_accelerator_system()
# def test_list_devices(self):
# self.assertEqual(len(distribution_lib.list_devices()), 8)
# self.assertEqual(len(distribution_lib.list_devices("cpu")), 8)
# self.assertEqual(len(distribution_lib.list_devices("cpu")), 8)
# def test_to_dtensor_mesh(self):
# devices = [f"cpu:{i}" for i in range(8)]
# shape = (4, 2)
# axis_names = ["batch", "model"]
# mesh = distribution_lib.DeviceMesh(shape, axis_names, devices)
# dtensor_mesh = backend_dlib._to_dtensor_mesh(mesh)
# self.assertIsInstance(dtensor_mesh, dtensor.Mesh)
# self.assertEqual(dtensor_mesh.shape(), list(shape))
# self.assertEqual(dtensor_mesh.dim_names, axis_names)
# def test_to_dtensor_layout(self):
# axes = ["data", None]
# mesh = distribution_lib.DeviceMesh(
# (4, 2), ["data", "model"], [f"cpu:{i}" for i in range(8)]
# )
# layout = distribution_lib.TensorLayout(axes, mesh)
# dtensor_layout = backend_dlib._to_dtensor_layout(layout)
# dtensor_mesh = backend_dlib._to_dtensor_mesh(mesh)
# self.assertEqual(
# dtensor_layout,
# dtensor.Layout(["data", dtensor.UNSHARDED], dtensor_mesh),
# )
# def test_validation_for_device_mesh(self):
# axes = ["data", None]
# layout = distribution_lib.TensorLayout(axes, device_mesh=None)
# with self.assertRaisesRegex(
# ValueError, "Cannot create sharding when device mesh is not set"
# ):
# backend_dlib._to_dtensor_layout(layout)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/testing/test_utils_test.py | keras/src/testing/test_utils_test.py | import numpy as np
from absl.testing import parameterized
from keras.src.testing import test_case
from keras.src.testing import test_utils
class GetTestDataTest(test_case.TestCase):
def setUp(self):
self.train_samples = 100
self.test_samples = 50
self.input_shape = (28, 28)
self.num_classes = 10
def test_labels_within_range(self):
"""Check if labels are within valid range."""
(_, y_train), (_, y_test) = test_utils.get_test_data(
self.train_samples,
self.test_samples,
self.input_shape,
self.num_classes,
)
self.assertTrue(np.all(y_train < self.num_classes))
self.assertTrue(np.all(y_train >= 0))
self.assertTrue(np.all(y_test < self.num_classes))
self.assertTrue(np.all(y_test >= 0))
def test_edge_cases_for_zero_samples(self):
"""Test when train or test samples are zero."""
(x_train, _), (x_test, _) = test_utils.get_test_data(
0, self.test_samples, self.input_shape, self.num_classes
)
self.assertEqual(len(x_train), 0)
(x_train, _), (x_test, _) = test_utils.get_test_data(
self.train_samples, 0, self.input_shape, self.num_classes
)
self.assertEqual(len(x_test), 0)
def test_get_test_data_returns_correct_number_of_samples(self):
"""Check if returned samples count is correct."""
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
self.train_samples,
self.test_samples,
self.input_shape,
self.num_classes,
)
self.assertEqual(len(x_train), self.train_samples)
self.assertEqual(len(y_train), self.train_samples)
self.assertEqual(len(x_test), self.test_samples)
self.assertEqual(len(y_test), self.test_samples)
def test_get_test_data_returns_correct_shape_of_data(self):
"""Check if returned data shape is correct."""
(x_train, y_train), (x_test, y_test) = test_utils.get_test_data(
self.train_samples,
self.test_samples,
self.input_shape,
self.num_classes,
)
self.assertEqual(
x_train.shape, (self.train_samples,) + self.input_shape
)
self.assertEqual(y_train.shape, (self.train_samples,))
self.assertEqual(x_test.shape, (self.test_samples,) + self.input_shape)
self.assertEqual(y_test.shape, (self.test_samples,))
def test_get_test_data_returns_different_data_for_different_seeds(self):
"""Test variability with different seeds."""
(x_train_1, y_train_1), (x_test_1, y_test_1) = test_utils.get_test_data(
self.train_samples,
self.test_samples,
self.input_shape,
self.num_classes,
random_seed=1,
)
(x_train_2, y_train_2), (x_test_2, y_test_2) = test_utils.get_test_data(
self.train_samples,
self.test_samples,
self.input_shape,
self.num_classes,
random_seed=2,
)
self.assertFalse(np.array_equal(x_train_1, x_train_2))
self.assertFalse(np.array_equal(y_train_1, y_train_2))
self.assertFalse(np.array_equal(x_test_1, x_test_2))
self.assertFalse(np.array_equal(y_test_1, y_test_2))
def test_get_test_data_returns_consistent_data_for_same_seed(self):
"""Test consistency with the same seed."""
(x_train_1, y_train_1), (x_test_1, y_test_1) = test_utils.get_test_data(
self.train_samples,
self.test_samples,
self.input_shape,
self.num_classes,
random_seed=1,
)
(x_train_2, y_train_2), (x_test_2, y_test_2) = test_utils.get_test_data(
self.train_samples,
self.test_samples,
self.input_shape,
self.num_classes,
random_seed=1,
)
self.assertTrue(np.array_equal(x_train_1, x_train_2))
self.assertTrue(np.array_equal(y_train_1, y_train_2))
self.assertTrue(np.array_equal(x_test_1, x_test_2))
self.assertTrue(np.array_equal(y_test_1, y_test_2))
def test_input_shape_variations(self):
"""Check function for different input shapes."""
input_shape_3d = (28, 28, 3)
(x_train_3d, _), (_, _) = test_utils.get_test_data(
self.train_samples,
self.test_samples,
input_shape_3d,
self.num_classes,
)
self.assertEqual(
x_train_3d.shape, (self.train_samples,) + input_shape_3d
)
def test_all_classes_represented(self):
"""Ensure all classes are represented in the data."""
(_, y_train), (_, y_test) = test_utils.get_test_data(
self.train_samples,
self.test_samples,
self.input_shape,
self.num_classes,
)
self.assertEqual(len(np.unique(y_train)), self.num_classes)
self.assertEqual(len(np.unique(y_test)), self.num_classes)
def test_data_type(self):
"""Validate the type of the generated data."""
(x_train, _), (x_test, _) = test_utils.get_test_data(
self.train_samples,
self.test_samples,
self.input_shape,
self.num_classes,
)
self.assertEqual(x_train.dtype, np.float32)
self.assertEqual(x_test.dtype, np.float32)
def test_label_type(self):
"""Validate label type of the generated labels."""
(_, y_train), (_, y_test) = test_utils.get_test_data(
self.train_samples,
self.test_samples,
self.input_shape,
self.num_classes,
)
self.assertEqual(y_train.dtype, np.int64)
self.assertEqual(y_test.dtype, np.int64)
class ClassDistributionTests(test_case.TestCase):
def setUp(self):
self.train_samples = 100
self.test_samples = 50
self.input_shape = (28, 28)
self.num_classes = 10
def test_equal_class_distribution(self):
"""Verify equal class distribution in train and test sets."""
(_, y_train), (_, y_test) = test_utils.get_test_data(
self.train_samples,
self.test_samples,
self.input_shape,
self.num_classes,
)
_, counts_train = np.unique(y_train, return_counts=True)
_, counts_test = np.unique(y_test, return_counts=True)
self.assertTrue(
np.all(counts_train == self.train_samples // self.num_classes)
)
self.assertTrue(
np.all(counts_test == self.test_samples // self.num_classes)
)
def test_uneven_samples_class_distribution(self):
"""Check class distribution with uneven samples."""
train_samples = 103
test_samples = 52
(_, y_train), (_, y_test) = test_utils.get_test_data(
train_samples,
test_samples,
self.input_shape,
self.num_classes,
)
_, counts_train = np.unique(y_train, return_counts=True)
_, counts_test = np.unique(y_test, return_counts=True)
self.assertTrue(np.max(counts_train) - np.min(counts_train) <= 1)
self.assertTrue(np.max(counts_test) - np.min(counts_test) <= 1)
def test_randomness_in_class_distribution(self):
"""Ensure class distribution isn't too deterministic."""
(_, y_train_1), (_, y_test_1) = test_utils.get_test_data(
self.train_samples,
self.test_samples,
self.input_shape,
self.num_classes,
)
(_, y_train_2), (_, y_test_2) = test_utils.get_test_data(
self.train_samples,
self.test_samples,
self.input_shape,
self.num_classes,
)
self.assertFalse(np.array_equal(y_train_1, y_train_2))
self.assertFalse(np.array_equal(y_test_1, y_test_2))
def test_large_number_of_classes(self):
"""Validate function with a large number of classes."""
num_classes = 150
train_samples = (
num_classes * 10
) # 10 samples for each class in training
test_samples = num_classes * 5 # 5 samples for each class in testing
(_, y_train), (_, y_test) = test_utils.get_test_data(
train_samples,
test_samples,
self.input_shape,
num_classes,
)
self.assertEqual(len(np.unique(y_train)), num_classes)
self.assertEqual(len(np.unique(y_test)), num_classes)
def test_single_class(self):
"""Test with a single class."""
num_classes = 1
(_, y_train), (_, y_test) = test_utils.get_test_data(
self.train_samples,
self.test_samples,
self.input_shape,
num_classes,
)
self.assertTrue(np.all(y_train == 0))
self.assertTrue(np.all(y_test == 0))
class NamedProductTest(parameterized.TestCase):
def test_test_cases(self):
all_tests = test_utils.named_product(
[
{"testcase_name": "negative", "x": -1},
{"testcase_name": "positive", "x": 1},
{"testcase_name": "zero", "x": 0},
],
numeral_type=[float, int],
)
names = [test["testcase_name"] for test in all_tests]
self.assertListEqual(
names,
[
"negative_float",
"positive_float",
"zero_float",
"negative_int",
"positive_int",
"zero_int",
],
)
def test_test_cases_no_product(self):
all_tests = test_utils.named_product(numeral_type=[float, int])
names = [test["testcase_name"] for test in all_tests]
self.assertListEqual(names, ["float", "int"])
@parameterized.named_parameters(
test_utils.named_product(
[
{"testcase_name": "negative", "x": -1},
{"testcase_name": "positive", "x": 1},
{"testcase_name": "zero", "x": 0},
],
numeral_type=[float, int],
)
)
def test_via_decorator(self, x, numeral_type):
self.assertIn(x, (-1, 1, 0))
self.assertIn(numeral_type, (float, int))
@parameterized.named_parameters(
test_utils.named_product(numeral_type=[float, int])
)
def test_via_decorator_no_product(self, numeral_type):
self.assertIn(numeral_type, (float, int))
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/testing/test_utils.py | keras/src/testing/test_utils.py | import numpy as np
def get_test_data(
train_samples, test_samples, input_shape, num_classes, random_seed=None
):
"""Generates balanced, stratified synthetic test data to train a model on.
Args:
train_samples: Integer, how many training samples to generate.
test_samples: Integer, how many test samples to generate.
input_shape: Tuple of integers, shape of the inputs.
num_classes: Integer, number of classes for the data and targets.
random_seed: Integer, random seed used by Numpy to generate data.
Returns:
A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
np.random.seed(random_seed)
# Total samples
total_samples = train_samples + test_samples
# Ensure that we generate a balanced dataset
samples_per_class = total_samples // num_classes
y = np.array(
[i for i in range(num_classes) for _ in range(samples_per_class)],
dtype=np.int32,
)
# Generate extra samples in a deterministic manner
extra_samples = total_samples - len(y)
y_extra = np.array(
[i % num_classes for i in range(extra_samples)], dtype=np.int64
)
y = np.concatenate([y, y_extra])
# Generate data
templates = 2 * num_classes * np.random.random((num_classes,) + input_shape)
x = np.zeros((total_samples,) + input_shape, dtype=np.float32)
for i in range(total_samples):
x[i] = templates[y[i]] + np.random.normal(
loc=0, scale=1.0, size=input_shape
)
# Shuffle the entire dataset to ensure randomness based on seed
indices = np.arange(total_samples)
np.random.shuffle(indices)
x, y = x[indices], y[indices]
# Stratified Shuffle Split
x_train, y_train, x_test, y_test = [], [], [], []
for cls in range(num_classes):
cls_indices = np.where(y == cls)[0]
np.random.shuffle(cls_indices)
train_count = int(train_samples / num_classes)
x_train.extend(x[cls_indices[:train_count]])
y_train.extend(y[cls_indices[:train_count]])
x_test.extend(x[cls_indices[train_count:]])
y_test.extend(y[cls_indices[train_count:]])
# Convert to numpy arrays
x_train, y_train = np.array(x_train), np.array(y_train)
x_test, y_test = np.array(x_test), np.array(y_test)
# Shuffle training and test sets after stratified split
train_indices = np.arange(len(x_train))
test_indices = np.arange(len(x_test))
np.random.shuffle(train_indices)
np.random.shuffle(test_indices)
x_train, y_train = x_train[train_indices], y_train[train_indices]
x_test, y_test = x_test[test_indices], y_test[test_indices]
return (x_train, y_train), (x_test, y_test)
def named_product(*args, **kwargs):
"""Utility to generate the cartesian product of parameters values and
generate a test case names for each combination.
The result of this function is to be used with the
`@parameterized.named_parameters` decorator. It is a replacement for
`@parameterized.product` which adds explicit test case names.
For example, this code:
```
class NamedExample(parameterized.TestCase):
@parameterized.named_parameters(
named_product(
[
{'testcase_name': 'negative', 'x': -1},
{'testcase_name': 'positive', 'x': 1},
{'testcase_name': 'zero', 'x': 0},
],
numeral_type=[float, int],
)
)
def test_conversion(self, x, numeral_type):
self.assertEqual(numeral_type(x), x)
```
produces six tests (note that absl will reorder them by name):
- `NamedExample::test_conversion_negative_float`
- `NamedExample::test_conversion_positive_float`
- `NamedExample::test_conversion_zero_float`
- `NamedExample::test_conversion_negative_int`
- `NamedExample::test_conversion_positive_int`
- `NamedExample::test_conversion_zero_int`
This function is also useful in the case where there is no product to
generate test case names for one argument:
```
@parameterized.named_parameters(named_product(numeral_type=[float, int]))
```
Args:
*args: Each positional parameter is a sequence of keyword arg dicts.
Every test case generated will include exactly one dict from each
positional parameter. These will then be merged to form an overall
list of arguments for the test case. Each dict must contain a
`"testcase_name"` key whose value is combined with others to
generate the test case name.
**kwargs: A mapping of parameter names and their possible values.
Possible values should given as either a list or a tuple. A string
representation of each value is used to generate the test case name.
Returns:
A list of maps for the test parameters combinations to pass to
`@parameterized.named_parameters`.
"""
def value_to_str(value):
if hasattr(value, "__name__"):
return value.__name__.lower()
return str(value).lower()
# Convert the keyword arguments in the same dict format as the args
all_test_dicts = args + tuple(
tuple({"testcase_name": value_to_str(v), key: v} for v in values)
for key, values in kwargs.items()
)
# The current list of tests, start with one empty test
tests = [{}]
for test_dicts in all_test_dicts:
new_tests = []
for test_dict in test_dicts:
for test in tests:
# Augment the testcase name by appending
testcase_name = test.get("testcase_name", "")
testcase_name += "_" if testcase_name else ""
testcase_name += test_dict["testcase_name"]
new_test = test.copy()
# Augment the test by adding all the parameters
new_test.update(test_dict)
new_test["testcase_name"] = testcase_name
new_tests.append(new_test)
# Overwrite the list of tests with the product obtained so far
tests = new_tests
return tests
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/testing/test_case.py | keras/src/testing/test_case.py | import json
import shutil
import tempfile
import unittest
from pathlib import Path
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import distribution
from keras.src import ops
from keras.src import tree
from keras.src import utils
from keras.src.backend.common import is_float_dtype
from keras.src.backend.common import standardize_dtype
from keras.src.backend.common.global_state import clear_session
from keras.src.backend.common.keras_tensor import KerasTensor
from keras.src.losses.loss import Loss
from keras.src.models import Model
from keras.src.utils import traceback_utils
class TestCase(parameterized.TestCase, unittest.TestCase):
maxDiff = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def setUp(self):
# clear global state so that test cases are independent
# required for the jit enabled torch tests since dynamo has
# a global cache for guards, compiled fn, etc
clear_session(free_memory=False)
if traceback_utils.is_traceback_filtering_enabled():
traceback_utils.disable_traceback_filtering()
def get_temp_dir(self):
temp_dir = tempfile.mkdtemp()
self.addCleanup(lambda: shutil.rmtree(temp_dir))
return temp_dir
def assertAllClose(
self,
x1,
x2,
atol=1e-6,
rtol=1e-6,
tpu_atol=None,
tpu_rtol=None,
msg=None,
):
if tpu_atol is not None and uses_tpu():
atol = tpu_atol
if tpu_rtol is not None and uses_tpu():
rtol = tpu_rtol
if not isinstance(x1, np.ndarray):
x1 = backend.convert_to_numpy(x1)
if not isinstance(x2, np.ndarray):
x2 = backend.convert_to_numpy(x2)
np.testing.assert_allclose(x1, x2, atol=atol, rtol=rtol, err_msg=msg)
def assertNotAllClose(self, x1, x2, atol=1e-6, rtol=1e-6, msg=None):
try:
self.assertAllClose(x1, x2, atol=atol, rtol=rtol, msg=msg)
except AssertionError:
return
msg = msg or ""
raise AssertionError(
f"The two values are close at all elements. \n{msg}.\nValues: {x1}"
)
def assertAlmostEqual(self, x1, x2, decimal=3, tpu_decimal=None, msg=None):
if tpu_decimal is not None and uses_tpu():
decimal = tpu_decimal
msg = msg or ""
if not isinstance(x1, np.ndarray):
x1 = backend.convert_to_numpy(x1)
if not isinstance(x2, np.ndarray):
x2 = backend.convert_to_numpy(x2)
np.testing.assert_almost_equal(x1, x2, decimal=decimal, err_msg=msg)
def assertAllEqual(self, x1, x2, msg=None):
self.assertEqual(len(x1), len(x2), msg=msg)
for e1, e2 in zip(x1, x2):
if isinstance(e1, (list, tuple)) or isinstance(e2, (list, tuple)):
self.assertAllEqual(e1, e2, msg=msg)
else:
e1 = backend.convert_to_numpy(e1)
e2 = backend.convert_to_numpy(e2)
self.assertEqual(e1, e2, msg=msg)
def assertLen(self, iterable, expected_len, msg=None):
self.assertEqual(len(iterable), expected_len, msg=msg)
def assertSparse(self, x, sparse=True):
if isinstance(x, KerasTensor):
self.assertEqual(x.sparse, sparse)
elif backend.backend() == "tensorflow":
import tensorflow as tf
if sparse:
self.assertIsInstance(x, tf.SparseTensor)
else:
self.assertNotIsInstance(x, tf.SparseTensor)
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
if sparse:
self.assertIsInstance(x, jax_sparse.JAXSparse)
else:
self.assertNotIsInstance(x, jax_sparse.JAXSparse)
else:
self.assertFalse(
sparse,
f"Backend {backend.backend()} does not support sparse tensors",
)
def assertRagged(self, x, ragged=True):
if isinstance(x, KerasTensor):
self.assertEqual(x.ragged, ragged)
elif backend.backend() == "tensorflow":
import tensorflow as tf
if ragged:
self.assertIsInstance(x, tf.RaggedTensor)
else:
self.assertNotIsInstance(x, tf.RaggedTensor)
else:
self.assertFalse(
ragged,
f"Backend {backend.backend()} does not support ragged tensors",
)
def assertDType(self, x, dtype, msg=None):
if hasattr(x, "dtype"):
x_dtype = backend.standardize_dtype(x.dtype)
else:
# If x is a python number
x_dtype = backend.standardize_dtype(type(x))
standardized_dtype = backend.standardize_dtype(dtype)
default_msg = (
"The dtype of x does not match the expected one. "
f"Received: x.dtype={x_dtype} and dtype={dtype}"
)
msg = msg or default_msg
self.assertEqual(x_dtype, standardized_dtype, msg=msg)
def assertFileExists(self, path):
if not Path(path).is_file():
raise AssertionError(f"File {path} does not exist")
def run_class_serialization_test(self, instance, custom_objects=None):
from keras.src.saving import custom_object_scope
from keras.src.saving import deserialize_keras_object
from keras.src.saving import serialize_keras_object
# get_config roundtrip
cls = instance.__class__
config = instance.get_config()
config_json = to_json_with_tuples(config)
ref_dir = dir(instance)[:]
with custom_object_scope(custom_objects):
revived_instance = cls.from_config(config)
revived_config = revived_instance.get_config()
revived_config_json = to_json_with_tuples(revived_config)
self.assertEqual(config_json, revived_config_json)
self.assertEqual(set(ref_dir), set(dir(revived_instance)))
# serialization roundtrip
serialized = serialize_keras_object(instance)
serialized_json = to_json_with_tuples(serialized)
with custom_object_scope(custom_objects):
revived_instance = deserialize_keras_object(
from_json_with_tuples(serialized_json)
)
revived_config = revived_instance.get_config()
revived_config_json = to_json_with_tuples(revived_config)
self.assertEqual(config_json, revived_config_json)
new_dir = dir(revived_instance)[:]
for lst in [ref_dir, new_dir]:
if "__annotations__" in lst:
lst.remove("__annotations__")
self.assertEqual(set(ref_dir), set(new_dir))
return revived_instance
def run_layer_test(
self,
layer_cls,
init_kwargs,
input_shape=None,
input_dtype=None,
input_sparse=False,
input_ragged=False,
input_data=None,
call_kwargs=None,
expected_output_shape=None,
expected_output_dtype=None,
expected_output_sparse=False,
expected_output_ragged=False,
expected_output=None,
expected_num_trainable_weights=None,
expected_num_non_trainable_weights=None,
expected_num_non_trainable_variables=None,
expected_num_seed_generators=None,
expected_num_losses=None,
supports_masking=None,
expected_mask_shape=None,
custom_objects=None,
run_training_check=True,
run_mixed_precision_check=True,
assert_built_after_instantiation=False,
tpu_atol=None,
tpu_rtol=None,
):
"""Run basic checks on a layer.
Args:
layer_cls: The class of the layer to test.
init_kwargs: Dict of arguments to be used to
instantiate the layer.
input_shape: Shape tuple (or list/dict of shape tuples)
to call the layer on.
input_dtype: Corresponding input dtype.
input_sparse: Whether the input is a sparse tensor (this requires
the backend to support sparse tensors).
input_ragged: Whether the input is a ragged tensor (this requires
the backend to support ragged tensors).
input_data: Tensor (or list/dict of tensors)
to call the layer on.
call_kwargs: Dict of arguments to use when calling the
layer (does not include the first input tensor argument)
expected_output_shape: Shape tuple
(or list/dict of shape tuples)
expected as output.
expected_output_dtype: dtype expected as output.
expected_output_sparse: Whether the output is expected to be sparse
(this requires the backend to support sparse tensors).
expected_output_ragged: Whether the output is expected to be ragged
(this requires the backend to support ragged tensors).
expected_output: Expected output tensor -- only
to be specified if input_data is provided.
expected_num_trainable_weights: Expected number
of trainable weights of the layer once built.
expected_num_non_trainable_weights: Expected number
of non-trainable weights of the layer once built.
expected_num_seed_generators: Expected number of
SeedGenerators objects of the layer once built.
expected_num_losses: Expected number of loss tensors
produced when calling the layer.
supports_masking: If True, will check that the layer
supports masking.
expected_mask_shape: Expected mask shape tuple
returned by compute_mask() (only supports 1 shape).
custom_objects: Dict of any custom objects to be
considered during deserialization.
run_training_check: Whether to attempt to train the layer
(if an input shape or input data was provided).
run_mixed_precision_check: Whether to test the layer with a mixed
precision dtype policy.
assert_built_after_instantiation: Whether to assert `built=True`
after the layer's instantiation.
"""
if input_shape is not None and input_data is not None:
raise ValueError(
"input_shape and input_data cannot be passed at the same time."
)
if expected_output_shape is not None and expected_output is not None:
raise ValueError(
"expected_output_shape and expected_output cannot be passed "
"at the same time."
)
if expected_output is not None and input_data is None:
raise ValueError(
"In order to use expected_output, input_data must be provided."
)
if expected_mask_shape is not None and supports_masking is not True:
raise ValueError(
"In order to use expected_mask_shape, supports_masking "
"must be True."
)
init_kwargs = init_kwargs or {}
call_kwargs = call_kwargs or {}
if input_shape is not None and input_dtype is not None:
if isinstance(input_shape, tuple) and is_shape_tuple(
input_shape[0]
):
self.assertIsInstance(input_dtype, tuple)
self.assertEqual(
len(input_shape),
len(input_dtype),
msg="The number of input shapes and dtypes does not match",
)
elif isinstance(input_shape, dict):
self.assertIsInstance(input_dtype, dict)
self.assertEqual(
set(input_shape.keys()),
set(input_dtype.keys()),
msg="The number of input shapes and dtypes does not match",
)
elif isinstance(input_shape, list):
self.assertIsInstance(input_dtype, list)
self.assertEqual(
len(input_shape),
len(input_dtype),
msg="The number of input shapes and dtypes does not match",
)
elif not isinstance(input_shape, tuple):
raise ValueError("The type of input_shape is not supported")
if input_shape is not None and input_dtype is None:
input_dtype = tree.map_shape_structure(
lambda _: "float32", input_shape
)
# Estimate actual number of weights, variables, seed generators if
# expected ones not set. When using layers uses composition it should
# build each sublayer manually.
if input_data is not None or input_shape is not None:
if input_data is None:
input_data = create_eager_tensors(
input_shape, input_dtype, input_sparse, input_ragged
)
layer = layer_cls(**init_kwargs)
if isinstance(input_data, dict):
layer(**input_data, **call_kwargs)
else:
layer(input_data, **call_kwargs)
if expected_num_trainable_weights is None:
expected_num_trainable_weights = len(layer.trainable_weights)
if expected_num_non_trainable_weights is None:
expected_num_non_trainable_weights = len(
layer.non_trainable_weights
)
if expected_num_non_trainable_variables is None:
expected_num_non_trainable_variables = len(
layer.non_trainable_variables
)
if expected_num_seed_generators is None:
expected_num_seed_generators = len(get_seed_generators(layer))
# Serialization test.
layer = layer_cls(**init_kwargs)
self.run_class_serialization_test(layer, custom_objects)
# Basic masking test.
if supports_masking is not None:
self.assertEqual(
layer.supports_masking,
supports_masking,
msg="Unexpected supports_masking value",
)
def run_build_asserts(layer):
self.assertTrue(layer.built)
if expected_num_trainable_weights is not None:
self.assertLen(
layer.trainable_weights,
expected_num_trainable_weights,
msg="Unexpected number of trainable_weights",
)
if expected_num_non_trainable_weights is not None:
self.assertLen(
layer.non_trainable_weights,
expected_num_non_trainable_weights,
msg="Unexpected number of non_trainable_weights",
)
if expected_num_non_trainable_variables is not None:
self.assertLen(
layer.non_trainable_variables,
expected_num_non_trainable_variables,
msg="Unexpected number of non_trainable_variables",
)
if expected_num_seed_generators is not None:
self.assertLen(
get_seed_generators(layer),
expected_num_seed_generators,
msg="Unexpected number of seed_generators",
)
if (
backend.backend() == "torch"
and expected_num_trainable_weights is not None
and expected_num_non_trainable_weights is not None
and expected_num_seed_generators is not None
):
self.assertLen(
layer.torch_params,
expected_num_trainable_weights
+ expected_num_non_trainable_weights
+ expected_num_seed_generators,
msg="Unexpected number of torch_params",
)
def run_output_asserts(
layer, output, eager=False, tpu_atol=None, tpu_rtol=None
):
if expected_output_shape is not None:
def verify_shape(expected_shape, x):
shape = x.shape
if len(shape) != len(expected_shape):
return False
for expected_dim, dim in zip(expected_shape, shape):
if expected_dim is not None and expected_dim != dim:
return False
return True
shapes_match = tree.map_structure_up_to(
output, verify_shape, expected_output_shape, output
)
self.assertTrue(
all(tree.flatten(shapes_match)),
msg=f"Expected output shapes {expected_output_shape} but "
f"received {tree.map_structure(lambda x: x.shape, output)}",
)
if expected_output_dtype is not None:
def verify_dtype(expected_dtype, x):
return expected_dtype == backend.standardize_dtype(x.dtype)
dtypes_match = tree.map_structure(
verify_dtype, expected_output_dtype, output
)
self.assertTrue(
all(tree.flatten(dtypes_match)),
msg=f"Expected output dtypes {expected_output_dtype} but "
f"received {tree.map_structure(lambda x: x.dtype, output)}",
)
if expected_output_sparse:
for x in tree.flatten(output):
self.assertSparse(x)
if expected_output_ragged:
for x in tree.flatten(output):
self.assertRagged(x)
if eager:
if expected_output is not None:
self.assertEqual(type(expected_output), type(output))
for ref_v, v in zip(
tree.flatten(expected_output), tree.flatten(output)
):
self.assertAllClose(
ref_v,
v,
msg="Unexpected output value",
tpu_atol=tpu_atol,
tpu_rtol=tpu_rtol,
)
if expected_num_losses is not None:
self.assertLen(layer.losses, expected_num_losses)
def run_training_step(layer, input_data, output_data):
class TestModel(Model):
def __init__(self, layer):
super().__init__()
self.layer = layer
def call(self, x, training=False):
return self.layer(x, training=training)
model = TestModel(layer)
data = (input_data, output_data)
if backend.backend() == "torch":
data = tree.map_structure(backend.convert_to_numpy, data)
def data_generator():
while True:
yield data
# Single op loss to avoid compilation issues with ragged / sparse.
class TestLoss(Loss):
def __call__(self, y_true, y_pred, sample_weight=None):
return ops.sum(y_pred)
# test the "default" path for each backend by setting
# jit_compile="auto".
# for tensorflow and jax backends auto is jitted
# Note that tensorflow cannot be jitted with sparse tensors
# for torch backend auto is eager
#
# NB: for torch, jit_compile=True turns on torchdynamo
# which may not always succeed in tracing depending
# on the model. Run your program with these env vars
# to get debug traces of dynamo:
# TORCH_LOGS="+dynamo"
# TORCHDYNAMO_VERBOSE=1
# TORCHDYNAMO_REPORT_GUARD_FAILURES=1
jit_compile = "auto"
if backend.backend() == "tensorflow" and input_sparse:
jit_compile = False
model.compile(
optimizer="sgd", loss=TestLoss(), jit_compile=jit_compile
)
model.fit(data_generator(), steps_per_epoch=1, verbose=0)
# Build test.
if input_data is not None or input_shape is not None:
if input_shape is None:
build_shape = tree.map_structure(
lambda x: ops.shape(x), input_data
)
else:
build_shape = input_shape
layer = layer_cls(**init_kwargs)
if isinstance(build_shape, dict):
layer.build(**build_shape)
else:
layer.build(build_shape)
run_build_asserts(layer)
# Symbolic call test.
if input_shape is None:
keras_tensor_inputs = tree.map_structure(
lambda x: create_keras_tensors(
ops.shape(x), x.dtype, input_sparse, input_ragged
),
input_data,
)
else:
keras_tensor_inputs = create_keras_tensors(
input_shape, input_dtype, input_sparse, input_ragged
)
layer = layer_cls(**init_kwargs)
if isinstance(keras_tensor_inputs, dict):
keras_tensor_outputs = layer(
**keras_tensor_inputs, **call_kwargs
)
else:
keras_tensor_outputs = layer(keras_tensor_inputs, **call_kwargs)
run_build_asserts(layer)
run_output_asserts(layer, keras_tensor_outputs, eager=False)
if expected_mask_shape is not None:
output_mask = layer.compute_mask(keras_tensor_inputs)
self.assertEqual(expected_mask_shape, output_mask.shape)
# The stateless layers should be built after instantiation.
if assert_built_after_instantiation:
layer = layer_cls(**init_kwargs)
self.assertTrue(
layer.built,
msg=(
f"{type(layer)} is stateless, so it should be built "
"after instantiation."
),
)
# Ensure that the subclass layer doesn't mark itself as built
# when `build` is overridden.
class ModifiedBuildLayer(layer_cls):
def build(self, *args, **kwargs):
pass
layer = ModifiedBuildLayer(**init_kwargs)
self.assertFalse(
layer.built,
msg=(
f"The `build` of {type(layer)} is overriden, so it "
"should not be built after instantiation."
),
)
# Eager call test and compiled training test.
if input_data is not None or input_shape is not None:
if input_data is None:
input_data = create_eager_tensors(
input_shape, input_dtype, input_sparse
)
layer = layer_cls(**init_kwargs)
if isinstance(input_data, dict):
output_data = layer(**input_data, **call_kwargs)
else:
output_data = layer(input_data, **call_kwargs)
run_output_asserts(
layer,
output_data,
eager=True,
tpu_atol=tpu_atol,
tpu_rtol=tpu_rtol,
)
if run_training_check:
run_training_step(layer, input_data, output_data)
# Never test mixed precision on torch CPU. Torch lacks support.
if run_mixed_precision_check and backend.backend() == "torch":
import torch
run_mixed_precision_check = torch.cuda.is_available()
if run_mixed_precision_check:
layer = layer_cls(**{**init_kwargs, "dtype": "mixed_float16"})
input_spec = tree.map_structure(
lambda spec: KerasTensor(
spec.shape,
dtype=(
layer.compute_dtype
if layer.autocast
and backend.is_float_dtype(spec.dtype)
else spec.dtype
),
),
keras_tensor_inputs,
)
if isinstance(input_data, dict):
output_data = layer(**input_data, **call_kwargs)
output_spec = layer.compute_output_spec(**input_spec)
else:
output_data = layer(input_data, **call_kwargs)
output_spec = layer.compute_output_spec(input_spec)
for tensor, spec in zip(
tree.flatten(output_data), tree.flatten(output_spec)
):
dtype = standardize_dtype(tensor.dtype)
self.assertEqual(
dtype,
spec.dtype,
f"expected output dtype {spec.dtype}, got {dtype}",
)
for weight in layer.weights:
dtype = standardize_dtype(weight.dtype)
if is_float_dtype(dtype):
self.assertEqual(dtype, "float32")
def tensorflow_uses_gpu():
return backend.backend() == "tensorflow" and uses_gpu()
def jax_uses_gpu():
return backend.backend() == "jax" and uses_gpu()
def torch_uses_gpu():
if backend.backend() != "torch":
return False
from keras.src.backend.torch.core import get_device
return get_device() == "cuda"
def uses_gpu():
# Condition used to skip tests when using the GPU
devices = distribution.list_devices()
if any(d.startswith("gpu") for d in devices):
return True
return False
def uses_tpu():
# Condition used to skip tests when using the TPU
try:
devices = distribution.list_devices()
if any(d.startswith("tpu") for d in devices):
return True
except AttributeError:
return False
return False
def uses_cpu():
devices = distribution.list_devices()
if any(d.startswith("cpu") for d in devices):
return True
return False
def create_keras_tensors(input_shape, dtype, sparse, ragged):
if isinstance(input_shape, dict):
return {
utils.removesuffix(k, "_shape"): KerasTensor(
v, dtype=dtype[k], sparse=sparse, ragged=ragged
)
for k, v in input_shape.items()
}
return map_shape_dtype_structure(
lambda shape, dt: KerasTensor(
shape, dtype=dt, sparse=sparse, ragged=ragged
),
input_shape,
dtype,
)
def create_eager_tensors(input_shape, dtype, sparse, ragged):
from keras.src.backend import random
if set(tree.flatten(dtype)).difference(
[
"float16",
"float32",
"float64",
"int8",
"uint8",
"int16",
"uint16",
"int32",
"uint32",
"int64",
"uint64",
]
):
raise ValueError(
"dtype must be a standard float or int dtype. "
f"Received: dtype={dtype}"
)
if sparse:
if backend.backend() == "tensorflow":
import tensorflow as tf
def create_fn(shape, dt):
rng = np.random.default_rng(0)
x = (4 * rng.standard_normal(shape)).astype(dt)
x = np.multiply(x, rng.random(shape) < 0.7)
return tf.sparse.from_dense(x)
elif backend.backend() == "jax":
import jax.experimental.sparse as jax_sparse
def create_fn(shape, dt):
rng = np.random.default_rng(0)
x = (4 * rng.standard_normal(shape)).astype(dt)
x = np.multiply(x, rng.random(shape) < 0.7)
return jax_sparse.BCOO.fromdense(x, n_batch=1)
else:
raise ValueError(
f"Sparse is unsupported with backend {backend.backend()}"
)
elif ragged:
if backend.backend() == "tensorflow":
import tensorflow as tf
def create_fn(shape, dt):
rng = np.random.default_rng(0)
x = (4 * rng.standard_normal(shape)).astype(dt)
x = np.multiply(x, rng.random(shape) < 0.7)
return tf.RaggedTensor.from_tensor(x, padding=0)
else:
raise ValueError(
f"Ragged is unsupported with backend {backend.backend()}"
)
else:
def create_fn(shape, dt):
return ops.cast(
random.uniform(shape, dtype="float32") * 3, dtype=dt
)
if isinstance(input_shape, dict):
return {
utils.removesuffix(k, "_shape"): create_fn(v, dtype[k])
for k, v in input_shape.items()
}
return map_shape_dtype_structure(create_fn, input_shape, dtype)
def is_shape_tuple(x):
return isinstance(x, (list, tuple)) and all(
isinstance(e, (int, type(None))) for e in x
)
def map_shape_dtype_structure(fn, shape, dtype):
"""Variant of tree.map_structure that operates on shape tuples."""
if is_shape_tuple(shape):
return fn(tuple(shape), dtype)
if isinstance(shape, list):
return [
map_shape_dtype_structure(fn, s, d) for s, d in zip(shape, dtype)
]
if isinstance(shape, tuple):
return tuple(
map_shape_dtype_structure(fn, s, d) for s, d in zip(shape, dtype)
)
if isinstance(shape, dict):
return {
k: map_shape_dtype_structure(fn, v, dtype[k])
for k, v in shape.items()
}
else:
raise ValueError(
f"Cannot map function to unknown objects {shape} and {dtype}"
)
def get_seed_generators(layer):
"""Get a List of all seed generators in the layer recursively."""
seed_generators = []
seen_ids = set()
for sublayer in layer._flatten_layers(True, True):
for sg in sublayer._seed_generators:
if id(sg) not in seen_ids:
seed_generators.append(sg)
seen_ids.add(id(sg))
return seed_generators
def to_json_with_tuples(value):
def _tuple_encode(obj):
if isinstance(obj, tuple):
return {"__class__": "tuple", "__value__": list(obj)}
if isinstance(obj, list):
return [_tuple_encode(e) for e in obj]
if isinstance(obj, dict):
return {key: _tuple_encode(value) for key, value in obj.items()}
return obj
class _PreserveTupleJsonEncoder(json.JSONEncoder):
def encode(self, obj):
obj = _tuple_encode(obj)
return super().encode(obj)
return _PreserveTupleJsonEncoder(sort_keys=True, indent=4).encode(value)
def from_json_with_tuples(value):
def _tuple_decode(obj):
if not isinstance(obj, dict):
return obj
if "__class__" not in obj or "__value__" not in obj:
return obj
return tuple(obj["__value__"])
return json.loads(value, object_hook=_tuple_decode)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/testing/__init__.py | keras/src/testing/__init__.py | from keras.src.testing.test_case import TestCase
from keras.src.testing.test_case import jax_uses_gpu
from keras.src.testing.test_case import tensorflow_uses_gpu
from keras.src.testing.test_case import torch_uses_gpu
from keras.src.testing.test_case import uses_gpu
from keras.src.testing.test_case import uses_tpu
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/input_spec.py | keras/src/layers/input_spec.py | from keras.src import backend
from keras.src import tree
from keras.src.api_export import keras_export
@keras_export(["keras.InputSpec", "keras.layers.InputSpec"])
class InputSpec:
"""Specifies the rank, dtype and shape of every input to a layer.
Layers can expose (if appropriate) an `input_spec` attribute:
an instance of `InputSpec`, or a nested structure of `InputSpec` instances
(one per input tensor). These objects enable the layer to run input
compatibility checks for input structure, input rank, input shape, and
input dtype for the first argument of `Layer.__call__`.
A `None` entry in a shape is compatible with any dimension.
Args:
dtype: Expected dtype of the input.
shape: Shape tuple, expected shape of the input
(may include `None` for dynamic axes).
Includes the batch size.
ndim: Integer, expected rank of the input.
max_ndim: Integer, maximum rank of the input.
min_ndim: Integer, minimum rank of the input.
axes: Dictionary mapping integer axes to
a specific dimension value.
allow_last_axis_squeeze: If `True`, allow inputs of rank N+1 as long
as the last axis of the input is 1, as well as inputs of rank N-1
as long as the last axis of the spec is 1.
name: Expected key corresponding to this input when passing data as
a dictionary.
optional: Boolean, whether the input is optional or not.
An optional input can accept `None` values.
Example:
```python
class MyLayer(Layer):
def __init__(self):
super().__init__()
# The layer will accept inputs with
# shape (*, 28, 28) & (*, 28, 28, 1)
# and raise an appropriate error message otherwise.
self.input_spec = InputSpec(
shape=(None, 28, 28, 1),
allow_last_axis_squeeze=True)
```
"""
def __init__(
self,
dtype=None,
shape=None,
ndim=None,
max_ndim=None,
min_ndim=None,
axes=None,
allow_last_axis_squeeze=False,
name=None,
optional=False,
):
self.dtype = (
backend.standardize_dtype(dtype) if dtype is not None else None
)
if shape is not None:
self.shape = backend.standardize_shape(shape)
self.ndim = len(shape)
else:
self.ndim = ndim
self.shape = None
self.max_ndim = max_ndim
self.min_ndim = min_ndim
self.name = name
self.optional = optional
self.allow_last_axis_squeeze = allow_last_axis_squeeze
try:
axes = axes or {}
self.axes = {int(k): axes[k] for k in axes}
except (ValueError, TypeError):
raise TypeError(
"Argument `axes` must be a dict with integer keys. "
f"Received: axes={axes}"
)
if self.axes and (self.ndim is not None or self.max_ndim is not None):
max_dim = (self.ndim if self.ndim else self.max_ndim) - 1
max_axis = max(self.axes)
if max_axis > max_dim:
raise ValueError(
"Axis {} is greater than the maximum "
"allowed value: {}".format(max_axis, max_dim)
)
def __repr__(self):
spec = [
(f"dtype={str(self.dtype)}") if self.dtype else "",
(f"shape={str(self.shape)}") if self.shape else "",
(f"ndim={str(self.ndim)}") if self.ndim else "",
(f"max_ndim={str(self.max_ndim)}") if self.max_ndim else "",
(f"min_ndim={str(self.min_ndim)}") if self.min_ndim else "",
(f"axes={str(self.axes)}") if self.axes else "",
]
return f"InputSpec({', '.join(x for x in spec if x)})"
def get_config(self):
return {
"dtype": self.dtype,
"shape": self.shape,
"ndim": self.ndim,
"max_ndim": self.max_ndim,
"min_ndim": self.min_ndim,
"axes": self.axes,
"optional": self.optional,
}
@classmethod
def from_config(cls, config):
return cls(**config)
def assert_input_compatibility(input_spec, inputs, layer_name):
"""Checks compatibility between the layer and provided inputs.
This checks that the tensor(s) `inputs` verify the input assumptions
of a layer (if any). If not, a clear and actional exception gets raised.
Args:
input_spec: An InputSpec instance, list of InputSpec instances, a nested
structure of InputSpec instances, or None.
inputs: Input tensor, list of input tensors, or a nested structure of
input tensors.
layer_name: String, name of the layer (for error message formatting).
Raises:
ValueError: in case of mismatch between
the provided inputs and the expectations of the layer.
"""
if not input_spec:
return
input_spec = tree.flatten(input_spec)
if isinstance(inputs, dict):
# Flatten `inputs` by reference order if input spec names are provided
names = [spec.name for spec in input_spec]
if all(names):
list_inputs = []
for name in names:
if name not in inputs:
raise ValueError(
f'Missing data for input "{name}". '
"You passed a data dictionary with keys "
f"{list(inputs.keys())}. "
f"Expected the following keys: {names}"
)
list_inputs.append(inputs[name])
inputs = list_inputs
inputs = tree.flatten(inputs)
if len(inputs) != len(input_spec):
raise ValueError(
f'Layer "{layer_name}" expects {len(input_spec)} input(s),'
f" but it received {len(inputs)} input tensors. "
f"Inputs received: {inputs}"
)
for input_index, (x, spec) in enumerate(zip(inputs, input_spec)):
if spec is None:
continue
if x is None and spec.optional:
continue
# Having a shape/dtype is the only commonality of the various
# tensor-like objects that may be passed. The most common kind of
# invalid type we are guarding for is a Layer instance (Functional API),
# which does not have a `shape` attribute.
if not hasattr(x, "shape"):
raise ValueError(
f"Inputs to a layer should be tensors. Got '{x}' "
f"(of type {type(x)}) as input for layer '{layer_name}'."
)
shape = backend.standardize_shape(x.shape)
ndim = len(shape)
# Check ndim.
if spec.ndim is not None and not spec.allow_last_axis_squeeze:
if ndim != spec.ndim:
raise ValueError(
f"Input {input_index} with name '{spec.name}' of layer "
f"'{layer_name}' is incompatible with the layer: "
f"expected ndim={spec.ndim}, found ndim={ndim}. "
f"Full shape received: {shape}"
)
if spec.max_ndim is not None:
if ndim is not None and ndim > spec.max_ndim:
raise ValueError(
f"Input {input_index} with name '{spec.name}' of layer "
f"'{layer_name}' is incompatible with the layer: "
f"expected max_ndim={spec.max_ndim}, "
f"found ndim={ndim}"
)
if spec.min_ndim is not None:
if ndim is not None and ndim < spec.min_ndim:
raise ValueError(
f"Input {input_index} with name '{spec.name}' of layer "
f"'{layer_name}' is incompatible with the layer: "
f"expected min_ndim={spec.min_ndim}, "
f"found ndim={ndim}. "
f"Full shape received: {shape}"
)
# Check dtype.
if spec.dtype is not None:
dtype = backend.standardize_dtype(x.dtype)
if dtype != spec.dtype:
raise ValueError(
f"Input {input_index} with name '{spec.name}' of layer "
f"'{layer_name}' is incompatible with the layer: "
f"expected dtype={spec.dtype}, "
f"found dtype={dtype}"
)
# Check specific shape axes.
if spec.axes:
for axis, value in spec.axes.items():
if value is not None and shape[axis] not in {
value,
None,
}:
raise ValueError(
f"Input {input_index} with name '{spec.name}' of layer "
f"'{layer_name}' is incompatible with the layer: "
f"expected axis {axis} of input shape to have value "
f"{value}, but received input with shape {shape}"
)
# Check shape.
if spec.shape is not None:
spec_shape = spec.shape
if spec.allow_last_axis_squeeze:
if shape and shape[-1] == 1:
shape = shape[:-1]
if spec_shape and spec_shape[-1] == 1:
spec_shape = spec_shape[:-1]
for spec_dim, dim in zip(spec_shape, shape):
if spec_dim is not None and dim is not None:
if spec_dim != dim:
raise ValueError(
f"Input {input_index} with name '{spec.name}' of "
f"layer '{layer_name}' is incompatible with the "
f"layer: expected shape={spec.shape}, found "
f"shape={shape}"
)
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/layer.py | keras/src/layers/layer.py | """Layer is an Operation with state.
Takes care of:
- Weights / variables (and tracking thereof)
- deferred build
- trainable argument value inference
- masking
- autocasting
And some more magic:
- add_loss
- metric tracking
- RNG seed tracking
- activity regularization
"""
import collections
import functools
import inspect
import math
import warnings
from functools import wraps
from keras.src import backend
from keras.src import constraints
from keras.src import dtype_policies
from keras.src import initializers
from keras.src import regularizers
from keras.src import tree
from keras.src import utils
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.backend.common import global_state
from keras.src.backend.common import remat
from keras.src.backend.common.keras_tensor import any_symbolic_tensors
from keras.src.backend.common.name_scope import current_path
from keras.src.backend.common.remat import get_current_remat_mode
from keras.src.backend.common.symbolic_scope import in_symbolic_scope
from keras.src.backend.config import is_nnx_enabled
from keras.src.distribution import distribution_lib
from keras.src.dtype_policies import DTypePolicyMap
from keras.src.layers import input_spec
from keras.src.metrics.metric import Metric
from keras.src.ops.node import Node
from keras.src.ops.operation import Operation
from keras.src.quantizers.quantization_config import validate_and_resolve_config
from keras.src.utils import python_utils
from keras.src.utils import summary_utils
from keras.src.utils import traceback_utils
from keras.src.utils import tracking
if backend.backend() == "tensorflow":
from keras.src.backend.tensorflow.layer import TFLayer as BackendLayer
elif backend.backend() == "jax":
from keras.src.backend.jax.layer import JaxLayer as BackendLayer
elif backend.backend() == "torch":
from keras.src.backend.torch.layer import TorchLayer as BackendLayer
elif backend.backend() == "numpy":
from keras.src.backend.numpy.layer import NumpyLayer as BackendLayer
elif backend.backend() == "openvino":
from keras.src.backend.openvino.layer import OpenvinoLayer as BackendLayer
else:
raise RuntimeError(
f"Backend '{backend.backend()}' must implement a layer mixin class."
)
@keras_export(["keras.Layer", "keras.layers.Layer"])
class Layer(BackendLayer, Operation):
"""This is the class from which all layers inherit.
A layer is a callable object that takes as input one or more tensors and
that outputs one or more tensors. It involves *computation*, defined
in the `call()` method, and a *state* (weight variables). State can be
created:
* in `__init__()`, for instance via `self.add_weight()`;
* in the optional `build()` method, which is invoked by the first
`__call__()` to the layer, and supplies the shape(s) of the input(s),
which may not have been known at initialization time.
Layers are recursively composable: If you assign a Layer instance as an
attribute of another Layer, the outer layer will start tracking the weights
created by the inner layer. Nested layers should be instantiated in the
`__init__()` method or `build()` method.
Users will just instantiate a layer and then treat it as a callable.
Args:
trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer.
dtype: The dtype of the layer's computations and weights. Can also be a
`keras.DTypePolicy`, which allows the computation and weight dtype
to differ. Defaults to `None`. `None` means to use
`keras.config.dtype_policy()`, which is a `float32` policy unless
set to different value (via `keras.config.set_dtype_policy()`).
Attributes:
name: The name of the layer (string).
dtype: Dtype of the layer's weights. Alias of `layer.variable_dtype`.
variable_dtype: Dtype of the layer's weights.
compute_dtype: The dtype of the layer's computations.
Layers automatically cast inputs to this dtype, which causes
the computations and output to also be in this dtype.
When mixed precision is used with a
`keras.DTypePolicy`, this will be different
than `variable_dtype`.
trainable_weights: List of variables to be included in backprop.
non_trainable_weights: List of variables that should not be
included in backprop.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
trainable: Whether the layer should be trained (boolean), i.e.
whether its potentially-trainable weights should be returned
as part of `layer.trainable_weights`.
input_spec: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
We recommend that descendants of `Layer` implement the following methods:
* `__init__()`: Defines custom layer attributes, and creates layer weights
that do not depend on input shapes, using `add_weight()`,
or other state.
* `build(self, input_shape)`: This method can be used to create weights that
depend on the shape(s) of the input(s), using `add_weight()`, or other
state. `__call__()` will automatically build the layer
(if it has not been built yet) by calling `build()`.
* `call(self, *args, **kwargs)`: Called in `__call__` after making
sure `build()` has been called. `call()` performs the logic of applying
the layer to the input arguments.
Two reserved keyword arguments you can optionally use in `call()` are:
1. `training` (boolean, whether the call is in inference mode or
training mode).
2. `mask` (boolean tensor encoding masked timesteps in the input,
used e.g. in RNN layers).
A typical signature for this method is `call(self, inputs)`, and user
could optionally add `training` and `mask` if the layer need them.
* `get_config(self)`: Returns a dictionary containing the configuration
used to initialize this layer. If the keys differ from the arguments
in `__init__()`, then override `from_config(self)` as well.
This method is used when saving
the layer or a model that contains this layer.
Examples:
Here's a basic example: a layer with two variables, `w` and `b`,
that returns `y = w . x + b`.
It shows how to implement `build()` and `call()`.
Variables set as attributes of a layer are tracked as weights
of the layers (in `layer.weights`).
```python
class SimpleDense(Layer):
def __init__(self, units=32):
super().__init__()
self.units = units
# Create the state of the layer (weights)
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="glorot_uniform",
trainable=True,
name="kernel",
)
self.bias = self.add_weight(
shape=(self.units,),
initializer="zeros",
trainable=True,
name="bias",
)
# Defines the computation
def call(self, inputs):
return ops.matmul(inputs, self.kernel) + self.bias
# Instantiates the layer.
linear_layer = SimpleDense(4)
# This will also call `build(input_shape)` and create the weights.
y = linear_layer(ops.ones((2, 2)))
assert len(linear_layer.weights) == 2
# These weights are trainable, so they're listed in `trainable_weights`:
assert len(linear_layer.trainable_weights) == 2
```
Besides trainable weights, updated via backpropagation during training,
layers can also have non-trainable weights. These weights are meant to
be updated manually during `call()`. Here's a example layer that computes
the running sum of its inputs:
```python
class ComputeSum(Layer):
def __init__(self, input_dim):
super(ComputeSum, self).__init__()
# Create a non-trainable weight.
self.total = self.add_weight(
shape=(),
initializer="zeros",
trainable=False,
name="total",
)
def call(self, inputs):
self.total.assign(self.total + ops.sum(inputs))
return self.total
my_sum = ComputeSum(2)
x = ops.ones((2, 2))
y = my_sum(x)
assert my_sum.weights == [my_sum.total]
assert my_sum.non_trainable_weights == [my_sum.total]
assert my_sum.trainable_weights == []
```
"""
def __new__(cls, *args, **kwargs):
obj = super().__new__(cls, *args, **kwargs)
# Wrap the user-provided `build` method in the `build_wrapper`
# to add name scope support and serialization support.
original_build_method = obj.build
@wraps(original_build_method)
def build_wrapper(*args, **kwargs):
with obj._open_name_scope():
obj._path = current_path()
original_build_method(*args, **kwargs)
# Record build config.
signature = inspect.signature(original_build_method)
obj._build_shapes_dict = signature.bind(*args, **kwargs).arguments
# Set built, post build actions, and lock state.
obj.built = True
obj._post_build()
obj._lock_state()
obj.build = build_wrapper
# Wrap the user-provided `quantize` method in the `quantize_wrapper`
# to add tracker support.
original_quantize_method = obj.quantize
@wraps(original_quantize_method)
def quantize_wrapper(mode=None, config=None, **kwargs):
config = validate_and_resolve_config(mode, config)
mode = config.mode
obj._check_quantize_args(mode, obj.compute_dtype)
obj._tracker.unlock()
try:
original_quantize_method(mode=mode, config=config, **kwargs)
except Exception:
raise
finally:
obj._tracker.lock()
obj.quantize = quantize_wrapper
return obj
def __init__(
self,
*,
activity_regularizer=None,
trainable=True,
dtype=None,
autocast=True,
name=None,
**kwargs,
):
BackendLayer.__init__(self)
self._lock = False
Operation.__init__(self, name=name)
self._dtype_policy = dtype_policies.get(dtype)
self.activity_regularizer = regularizers.get(activity_regularizer)
input_dim_arg = kwargs.pop("input_dim", None)
if input_dim_arg is not None:
input_shape_arg = (input_dim_arg,)
else:
input_shape_arg = kwargs.pop("input_shape", None)
if input_shape_arg is not None:
warnings.warn(
"Do not pass an `input_shape`/`input_dim` argument to "
"a layer. When using Sequential models, "
"prefer using an `Input(shape)` object as the "
"first layer in the model instead.",
stacklevel=2,
)
self._input_shape_arg = input_shape_arg
if kwargs:
raise ValueError(
"Unrecognized keyword arguments "
f"passed to {self.__class__.__name__}: {kwargs}"
)
self._path = None # Will be determined in `build_wrapper`
self.built = False
self.autocast = autocast
self._input_spec = None
self._called = False
self.supports_jit = True
self._trainable = trainable
self._losses = []
self._loss_ids = set()
self._losses_override = []
self._call_signature = inspect.signature(self.call)
self.call_signature_parameters = [
p.name for p in self._call_signature.parameters.values()
]
self._call_has_training_arg = (
"training" in self.call_signature_parameters
)
self._call_has_mask_arg = "mask" in self.call_signature_parameters
# 1. collect names that should be auto‑propagated
self._call_context_args = {"training"}
# 2. remember which of them exist in *this* call signature
self._call_has_context_arg = {
arg: (arg in self.call_signature_parameters)
for arg in self._call_context_args
}
self._supports_masking = not utils.is_default(self.compute_mask)
# Whether to automatically convert (+ auto-cast) inputs to `call()`.
self._convert_input_args = True
# Whether to allow non-tensors as positional arguments in `call()`.
self._allow_non_tensor_positional_args = False
# Dict of shapes that were used to call `build()`.
self._build_shapes_dict = None
# Parent path
self._parent_path = None
self._remat_mode = get_current_remat_mode()
self._initialize_tracker()
@tracking.no_automatic_dependency_tracking
def _initialize_tracker(self):
if hasattr(self, "_tracker"):
return
trainable_variables = []
non_trainable_variables = []
layers = []
metrics = []
seed_generators = []
self._tracker = tracking.Tracker(
{
"trainable_variables": (
lambda x: isinstance(x, backend.Variable) and x.trainable,
trainable_variables,
),
"non_trainable_variables": (
lambda x: isinstance(x, backend.Variable)
and not x.trainable,
non_trainable_variables,
),
"metrics": (lambda x: isinstance(x, Metric), metrics),
"layers": (
lambda x: isinstance(x, Layer)
and not isinstance(x, Metric),
layers,
),
"seed_generators": (
lambda x: isinstance(x, backend.random.SeedGenerator),
seed_generators,
),
},
exclusions={"non_trainable_variables": ["trainable_variables"]},
)
if backend.backend() == "tensorflow":
# Remove attribute tracking for lists (TF-specific attribute)
_self_setattr_tracking = getattr(
self, "_self_setattr_tracking", True
)
self._self_setattr_tracking = False
self._trainable_variables = trainable_variables
self._non_trainable_variables = non_trainable_variables
self._layers = layers
self._metrics = metrics
self._seed_generators = seed_generators
if backend.backend() == "tensorflow":
# Reset attribute tracking (TF-specific)
self._self_setattr_tracking = _self_setattr_tracking
def _build_at_init(self):
"""Build the layer at `Layer.__init__`.
We can only safely mark the layer as `built=True` in `Layer.__init__` if
`build` is not overridden. Otherwise, it might cause the subclasses to
ignore the user's `build`.
"""
if utils.is_default(self.build):
self.built = True
self._post_build()
self._lock_state()
@property
def path(self):
"""The path of the layer.
If the layer has not been built yet, it will be `None`.
"""
return self._path
@property
def input_spec(self):
return self._input_spec
@input_spec.setter
def input_spec(self, value):
self._input_spec = value
@utils.default
def build(self, input_shape):
self._check_super_called()
if utils.is_default(self.build) and might_have_unbuilt_state(self):
warnings.warn(
f"`build()` was called on layer '{self.name}', however "
"the layer does not have a `build()` method implemented "
"and it looks like it has unbuilt state. This will cause "
"the layer to be marked as built, despite not being "
"actually built, which may cause failures down the line. "
"Make sure to implement a proper `build()` method."
)
self.built = True
def _lock_state(self):
"""Prevent further state updates, called automatically in `build()`."""
if not self._tracker.locked:
self._tracker.lock(
msg=(
"You cannot add new elements of state "
"(variables or sub-layers) "
"to a layer that is already built. All state "
"must be created in the `__init__()` method or "
"in the `build()` method."
)
)
def get_build_config(self):
"""Returns a dictionary with the layer's input shape.
This method returns a config dict that can be used by
`build_from_config(config)` to create all states (e.g. Variables and
Lookup tables) needed by the layer.
By default, the config only contains the input shape that the layer
was built with. If you're writing a custom layer that creates state in
an unusual way, you should override this method to make sure this state
is already created when Keras attempts to load its value upon model
loading.
Returns:
A dict containing the input shape associated with the layer.
"""
if self._build_shapes_dict is not None:
if len(self._build_shapes_dict) == 1:
return {
"input_shape": tuple(self._build_shapes_dict.values())[0],
}
else:
return {"shapes_dict": self._build_shapes_dict}
def build_from_config(self, config):
"""Builds the layer's states with the supplied config dict.
By default, this method calls the `build(config["input_shape"])` method,
which creates weights based on the layer's input shape in the supplied
config. If your config contains other information needed to load the
layer's state, you should override this method.
Args:
config: Dict containing the input shape associated with this layer.
"""
if config:
if "input_shape" in config:
self.build(config["input_shape"])
elif "shapes_dict" in config:
self.build(**config["shapes_dict"])
def _obj_type(self):
return "Layer"
def add_variable(
self,
shape,
initializer,
dtype=None,
trainable=True,
autocast=True,
regularizer=None,
constraint=None,
name=None,
):
"""Add a weight variable to the layer.
Alias of `add_weight()`.
"""
return self.add_weight(
shape=shape,
initializer=initializer,
dtype=dtype,
trainable=trainable,
autocast=autocast,
regularizer=regularizer,
constraint=constraint,
name=name,
)
def add_weight(
self,
shape=None,
initializer=None,
dtype=None,
trainable=True,
autocast=True,
regularizer=None,
constraint=None,
aggregation="none",
overwrite_with_gradient=False,
name=None,
):
"""Add a weight variable to the layer.
Args:
shape: Shape tuple for the variable. Must be fully-defined
(no `None` entries). Defaults to `()` (scalar) if unspecified.
initializer: Initializer object to use to populate the initial
variable value, or string name of a built-in initializer
(e.g. `"random_normal"`). If unspecified, defaults to
`"glorot_uniform"` for floating-point variables and to `"zeros"`
for all other types (e.g. int, bool).
dtype: Dtype of the variable to create, e.g. `"float32"`. If
unspecified, defaults to the layer's variable dtype
(which itself defaults to `"float32"` if unspecified).
trainable: Boolean, whether the variable should be trainable via
backprop or whether its updates are managed manually. Defaults
to `True`.
autocast: Boolean, whether to autocast layers variables when
accessing them. Defaults to `True`.
regularizer: Regularizer object to call to apply penalty on the
weight. These penalties are summed into the loss function
during optimization. Defaults to `None`.
constraint: Contrainst object to call on the variable after any
optimizer update, or string name of a built-in constraint.
Defaults to `None`.
aggregation: Optional string, one of `None`, `"none"`, `"mean"`,
`"sum"` or `"only_first_replica"`. Annotates the variable with
the type of multi-replica aggregation to be used for this
variable when writing custom data parallel training loops.
Defaults to `"none"`.
overwrite_with_gradient: Boolean, whether to overwrite the variable
with the computed gradient. This is useful for float8 training.
Defaults to `False`.
name: String name of the variable. Useful for debugging purposes.
"""
self._check_super_called()
if shape is None:
shape = ()
if dtype is not None:
dtype = backend.standardize_dtype(dtype)
else:
dtype = self.variable_dtype
if initializer is None:
if "float" in dtype:
initializer = "glorot_uniform"
else:
initializer = "zeros"
initializer = initializers.get(initializer)
with backend.name_scope(self.name, caller=self):
variable = backend.Variable(
initializer=initializer,
shape=shape,
dtype=dtype,
trainable=trainable,
autocast=autocast,
aggregation=aggregation,
name=name,
)
# Will be added to layer.losses
variable.regularizer = regularizers.get(regularizer)
variable.constraint = constraints.get(constraint)
variable.overwrite_with_gradient = overwrite_with_gradient
self._track_variable(variable)
return variable
@property
def trainable(self):
"""Settable boolean, whether this layer should be trainable or not."""
return self._trainable
@trainable.setter
def trainable(self, value):
"""Sets trainable attribute for the layer and its sublayers.
When this value is changed during training (e.g. with a
`Callback`) you need to call the parent
`Model.make_train_function` with `force=True` in order to
recompile the training graph.
Args:
value: Boolean with the desired state for the layer's trainable
attribute.
"""
value = bool(value)
self._trainable = value
for v in self._trainable_variables:
v.trainable = value
for layer in self._layers:
layer.trainable = value
@property
def variables(self):
"""List of all layer state, including random seeds.
This extends `layer.weights` to include all state used by the layer
including `SeedGenerator`s.
Note that metrics variables are not included here, use
`metrics_variables` to visit all the metric variables.
"""
# Return all `Variables` associate with the layer including metrics
# and random seeds. Also deduplicate them.
variables = []
seen_ids = set()
for v in self._trainable_variables + self._non_trainable_variables:
if id(v) not in seen_ids:
variables.append(v)
seen_ids.add(id(v))
for sg in self._seed_generators:
variables.append(sg.state)
for layer in self._layers:
for v in layer.variables:
if id(v) not in seen_ids:
variables.append(v)
seen_ids.add(id(v))
return variables
@property
def trainable_variables(self):
"""List of all trainable layer state.
This is equivalent to `layer.trainable_weights`.
"""
if not self.trainable:
return []
return [v for v in self.variables if v.trainable]
@property
def non_trainable_variables(self):
"""List of all non-trainable layer state.
This extends `layer.non_trainable_weights` to include all state used by
the layer including state for metrics and `SeedGenerator`s.
"""
if not self.trainable:
return self.variables
return [v for v in self.variables if not v.trainable]
@property
def weights(self):
"""List of all weight variables of the layer.
Unlike, `layer.variables` this excludes metric state and random seeds.
"""
# Return only `Variables` directly owned by layers and sub-layers.
# Also deduplicate them.
weights = []
seen_ids = set()
for w in self._trainable_variables + self._non_trainable_variables:
if id(w) not in seen_ids:
weights.append(w)
seen_ids.add(id(w))
for layer in self._layers:
for w in layer.weights:
if id(w) not in seen_ids:
weights.append(w)
seen_ids.add(id(w))
return weights
@property
def trainable_weights(self):
"""List of all trainable weight variables of the layer.
These are the weights that get updated by the optimizer during training.
"""
if not self.trainable:
return []
return [v for v in self.weights if v.trainable]
@property
def non_trainable_weights(self):
"""List of all non-trainable weight variables of the layer.
These are the weights that should not be updated by the optimizer during
training. Unlike, `layer.non_trainable_variables` this excludes metric
state and random seeds.
"""
if not self.trainable:
return self.weights
return [v for v in self.weights if not v.trainable]
@property
def metrics(self):
"""List of all metrics."""
metrics = list(self._metrics)
for layer in self._layers:
metrics.extend(layer.metrics)
return metrics
@property
def metrics_variables(self):
"""List of all metric variables."""
vars = []
for metric in self.metrics:
vars.extend(metric.variables)
return vars
def get_weights(self):
"""Return the values of `layer.weights` as a list of NumPy arrays."""
return [v.numpy() for v in self.weights]
def set_weights(self, weights):
"""Sets the values of `layer.weights` from a list of NumPy arrays."""
layer_weights = self.weights
if len(layer_weights) != len(weights):
raise ValueError(
f"You called `set_weights(weights)` on layer '{self.name}' "
f"with a weight list of length {len(weights)}, but the layer "
f"was expecting {len(layer_weights)} weights."
)
for variable, value in zip(layer_weights, weights):
if variable.shape != value.shape:
raise ValueError(
f"Layer {self.name} weight shape {variable.shape} "
"is not compatible with provided weight "
f"shape {value.shape}."
)
variable.assign(value)
@property
def dtype_policy(self):
return self._dtype_policy
@dtype_policy.setter
def dtype_policy(self, value):
policy = dtype_policies.get(value)
if isinstance(self._dtype_policy, DTypePolicyMap) and self.path:
if self.path in self._dtype_policy:
del self._dtype_policy[self.path]
self._dtype_policy[self.path] = policy
else:
self._dtype_policy = policy
if policy.quantization_mode is not None:
if self.built and not getattr(self, "_is_quantized", False):
if policy.quantization_mode == "gptq":
raise ValueError(
"Implicitly enabling GPTQ quantization by setting "
f"`dtype_policy` to '{value}' is not supported. "
"GPTQ requires a calibration dataset and a "
"`GPTQConfig` object.\n\n"
"Please use the `.quantize('gptq', config=...)` method "
"on the layer or model instead."
)
self.quantize(policy.quantization_mode)
@property
def dtype(self):
"""Alias of `layer.variable_dtype`."""
return self.variable_dtype
@property
def compute_dtype(self):
"""The dtype of the computations performed by the layer."""
if isinstance(self._dtype_policy, DTypePolicyMap) and self.path:
policy = self._dtype_policy[self.path]
else:
policy = self._dtype_policy
return policy.compute_dtype
@property
def variable_dtype(self):
"""The dtype of the state (weights) of the layer."""
if isinstance(self._dtype_policy, DTypePolicyMap) and self.path:
policy = self._dtype_policy[self.path]
else:
policy = self._dtype_policy
return policy.variable_dtype
@property
def quantization_mode(self):
"""The quantization mode of this layer, `None` if not quantized."""
if isinstance(self._dtype_policy, DTypePolicyMap) and self.path:
policy = self._dtype_policy[self.path]
else:
policy = self._dtype_policy
return policy.quantization_mode
@property
def input_dtype(self):
"""The dtype layer inputs should be converted to."""
return self.compute_dtype
@property
def supports_masking(self):
"""Whether this layer supports computing a mask using `compute_mask`."""
return self._supports_masking
@supports_masking.setter
def supports_masking(self, value):
self._supports_masking = value
@utils.default
def compute_mask(self, inputs, previous_mask):
return previous_mask
def symbolic_call(self, *args, **kwargs):
# Node is created at the end of `__call__` instead of `symbolic_call`.
return self.compute_output_spec(*args, **kwargs)
@traceback_utils.filter_traceback
def __call__(self, *args, **kwargs):
self._check_super_called()
self._called = True
original_args = args
original_kwargs = kwargs
#############################################################
# 1. Convert any array arguments to tensors of correct dtype.
def maybe_convert(x):
# Prevent _keras_mask from disappearing
mask = backend.get_keras_mask(x)
y = self.dtype_policy.convert_input(
x, self.autocast, self.input_dtype
)
if mask is not None:
backend.set_keras_mask(y, mask)
return y
# Used to avoid expensive `tree` operations in the most common case.
if (
kwargs
or len(args) != 1
or not is_backend_tensor_or_symbolic(args[0], allow_none=False)
or backend.standardize_dtype(args[0].dtype) != self.input_dtype
) and self._convert_input_args:
args = tree.map_structure(maybe_convert, args)
kwargs = tree.map_structure(maybe_convert, kwargs)
##########################################################
# 2. Enforce that only tensors can be passed positionally.
if not self._allow_non_tensor_positional_args:
for arg in tree.flatten(args):
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | true |
keras-team/keras | https://github.com/keras-team/keras/blob/c67eddb4ff8b615886893ca996dc216bc923d598/keras/src/layers/__init__.py | keras/src/layers/__init__.py | from keras.src.api_export import keras_export
from keras.src.layers.activations.activation import Activation
from keras.src.layers.activations.elu import ELU
from keras.src.layers.activations.leaky_relu import LeakyReLU
from keras.src.layers.activations.prelu import PReLU
from keras.src.layers.activations.relu import ReLU
from keras.src.layers.activations.softmax import Softmax
from keras.src.layers.attention.additive_attention import AdditiveAttention
from keras.src.layers.attention.attention import Attention
from keras.src.layers.attention.grouped_query_attention import (
GroupedQueryAttention,
)
from keras.src.layers.attention.multi_head_attention import MultiHeadAttention
from keras.src.layers.convolutional.conv1d import Conv1D
from keras.src.layers.convolutional.conv1d_transpose import Conv1DTranspose
from keras.src.layers.convolutional.conv2d import Conv2D
from keras.src.layers.convolutional.conv2d_transpose import Conv2DTranspose
from keras.src.layers.convolutional.conv3d import Conv3D
from keras.src.layers.convolutional.conv3d_transpose import Conv3DTranspose
from keras.src.layers.convolutional.depthwise_conv1d import DepthwiseConv1D
from keras.src.layers.convolutional.depthwise_conv2d import DepthwiseConv2D
from keras.src.layers.convolutional.separable_conv1d import SeparableConv1D
from keras.src.layers.convolutional.separable_conv2d import SeparableConv2D
from keras.src.layers.core.dense import Dense
from keras.src.layers.core.einsum_dense import EinsumDense
from keras.src.layers.core.embedding import Embedding
from keras.src.layers.core.identity import Identity
from keras.src.layers.core.input_layer import Input
from keras.src.layers.core.input_layer import InputLayer
from keras.src.layers.core.lambda_layer import Lambda
from keras.src.layers.core.masking import Masking
from keras.src.layers.core.reversible_embedding import ReversibleEmbedding
from keras.src.layers.core.wrapper import Wrapper
from keras.src.layers.input_spec import InputSpec
from keras.src.layers.layer import Layer
from keras.src.layers.merging.add import Add
from keras.src.layers.merging.add import add
from keras.src.layers.merging.average import Average
from keras.src.layers.merging.average import average
from keras.src.layers.merging.concatenate import Concatenate
from keras.src.layers.merging.concatenate import concatenate
from keras.src.layers.merging.dot import Dot
from keras.src.layers.merging.dot import dot
from keras.src.layers.merging.maximum import Maximum
from keras.src.layers.merging.maximum import maximum
from keras.src.layers.merging.minimum import Minimum
from keras.src.layers.merging.minimum import minimum
from keras.src.layers.merging.multiply import Multiply
from keras.src.layers.merging.multiply import multiply
from keras.src.layers.merging.subtract import Subtract
from keras.src.layers.merging.subtract import subtract
from keras.src.layers.normalization.batch_normalization import (
BatchNormalization,
)
from keras.src.layers.normalization.group_normalization import (
GroupNormalization,
)
from keras.src.layers.normalization.layer_normalization import (
LayerNormalization,
)
from keras.src.layers.normalization.rms_normalization import RMSNormalization
from keras.src.layers.normalization.spectral_normalization import (
SpectralNormalization,
)
from keras.src.layers.normalization.unit_normalization import UnitNormalization
from keras.src.layers.pooling.adaptive_average_pooling1d import (
AdaptiveAveragePooling1D,
)
from keras.src.layers.pooling.adaptive_average_pooling2d import (
AdaptiveAveragePooling2D,
)
from keras.src.layers.pooling.adaptive_average_pooling3d import (
AdaptiveAveragePooling3D,
)
from keras.src.layers.pooling.adaptive_max_pooling1d import AdaptiveMaxPooling1D
from keras.src.layers.pooling.adaptive_max_pooling2d import AdaptiveMaxPooling2D
from keras.src.layers.pooling.adaptive_max_pooling3d import AdaptiveMaxPooling3D
from keras.src.layers.pooling.average_pooling1d import AveragePooling1D
from keras.src.layers.pooling.average_pooling2d import AveragePooling2D
from keras.src.layers.pooling.average_pooling3d import AveragePooling3D
from keras.src.layers.pooling.global_average_pooling1d import (
GlobalAveragePooling1D,
)
from keras.src.layers.pooling.global_average_pooling2d import (
GlobalAveragePooling2D,
)
from keras.src.layers.pooling.global_average_pooling3d import (
GlobalAveragePooling3D,
)
from keras.src.layers.pooling.global_max_pooling1d import GlobalMaxPooling1D
from keras.src.layers.pooling.global_max_pooling2d import GlobalMaxPooling2D
from keras.src.layers.pooling.global_max_pooling3d import GlobalMaxPooling3D
from keras.src.layers.pooling.max_pooling1d import MaxPooling1D
from keras.src.layers.pooling.max_pooling2d import MaxPooling2D
from keras.src.layers.pooling.max_pooling3d import MaxPooling3D
from keras.src.layers.preprocessing.category_encoding import CategoryEncoding
from keras.src.layers.preprocessing.discretization import Discretization
from keras.src.layers.preprocessing.hashed_crossing import HashedCrossing
from keras.src.layers.preprocessing.hashing import Hashing
from keras.src.layers.preprocessing.image_preprocessing.aug_mix import AugMix
from keras.src.layers.preprocessing.image_preprocessing.auto_contrast import (
AutoContrast,
)
from keras.src.layers.preprocessing.image_preprocessing.center_crop import (
CenterCrop,
)
from keras.src.layers.preprocessing.image_preprocessing.cut_mix import CutMix
from keras.src.layers.preprocessing.image_preprocessing.equalization import (
Equalization,
)
from keras.src.layers.preprocessing.image_preprocessing.max_num_bounding_box import (
MaxNumBoundingBoxes,
)
from keras.src.layers.preprocessing.image_preprocessing.mix_up import MixUp
from keras.src.layers.preprocessing.image_preprocessing.rand_augment import (
RandAugment,
)
from keras.src.layers.preprocessing.image_preprocessing.random_brightness import (
RandomBrightness,
)
from keras.src.layers.preprocessing.image_preprocessing.random_color_degeneration import (
RandomColorDegeneration,
)
from keras.src.layers.preprocessing.image_preprocessing.random_color_jitter import (
RandomColorJitter,
)
from keras.src.layers.preprocessing.image_preprocessing.random_contrast import (
RandomContrast,
)
from keras.src.layers.preprocessing.image_preprocessing.random_crop import (
RandomCrop,
)
from keras.src.layers.preprocessing.image_preprocessing.random_elastic_transform import (
RandomElasticTransform,
)
from keras.src.layers.preprocessing.image_preprocessing.random_erasing import (
RandomErasing,
)
from keras.src.layers.preprocessing.image_preprocessing.random_flip import (
RandomFlip,
)
from keras.src.layers.preprocessing.image_preprocessing.random_gaussian_blur import (
RandomGaussianBlur,
)
from keras.src.layers.preprocessing.image_preprocessing.random_grayscale import (
RandomGrayscale,
)
from keras.src.layers.preprocessing.image_preprocessing.random_hue import (
RandomHue,
)
from keras.src.layers.preprocessing.image_preprocessing.random_invert import (
RandomInvert,
)
from keras.src.layers.preprocessing.image_preprocessing.random_perspective import (
RandomPerspective,
)
from keras.src.layers.preprocessing.image_preprocessing.random_posterization import (
RandomPosterization,
)
from keras.src.layers.preprocessing.image_preprocessing.random_rotation import (
RandomRotation,
)
from keras.src.layers.preprocessing.image_preprocessing.random_saturation import (
RandomSaturation,
)
from keras.src.layers.preprocessing.image_preprocessing.random_sharpness import (
RandomSharpness,
)
from keras.src.layers.preprocessing.image_preprocessing.random_shear import (
RandomShear,
)
from keras.src.layers.preprocessing.image_preprocessing.random_translation import (
RandomTranslation,
)
from keras.src.layers.preprocessing.image_preprocessing.random_zoom import (
RandomZoom,
)
from keras.src.layers.preprocessing.image_preprocessing.resizing import Resizing
from keras.src.layers.preprocessing.image_preprocessing.solarization import (
Solarization,
)
from keras.src.layers.preprocessing.index_lookup import IndexLookup
from keras.src.layers.preprocessing.integer_lookup import IntegerLookup
from keras.src.layers.preprocessing.mel_spectrogram import MelSpectrogram
from keras.src.layers.preprocessing.normalization import Normalization
from keras.src.layers.preprocessing.pipeline import Pipeline
from keras.src.layers.preprocessing.rescaling import Rescaling
from keras.src.layers.preprocessing.stft_spectrogram import STFTSpectrogram
from keras.src.layers.preprocessing.string_lookup import StringLookup
from keras.src.layers.preprocessing.text_vectorization import TextVectorization
from keras.src.layers.regularization.activity_regularization import (
ActivityRegularization,
)
from keras.src.layers.regularization.alpha_dropout import AlphaDropout
from keras.src.layers.regularization.dropout import Dropout
from keras.src.layers.regularization.gaussian_dropout import GaussianDropout
from keras.src.layers.regularization.gaussian_noise import GaussianNoise
from keras.src.layers.regularization.spatial_dropout import SpatialDropout1D
from keras.src.layers.regularization.spatial_dropout import SpatialDropout2D
from keras.src.layers.regularization.spatial_dropout import SpatialDropout3D
from keras.src.layers.reshaping.cropping1d import Cropping1D
from keras.src.layers.reshaping.cropping2d import Cropping2D
from keras.src.layers.reshaping.cropping3d import Cropping3D
from keras.src.layers.reshaping.flatten import Flatten
from keras.src.layers.reshaping.permute import Permute
from keras.src.layers.reshaping.repeat_vector import RepeatVector
from keras.src.layers.reshaping.reshape import Reshape
from keras.src.layers.reshaping.up_sampling1d import UpSampling1D
from keras.src.layers.reshaping.up_sampling2d import UpSampling2D
from keras.src.layers.reshaping.up_sampling3d import UpSampling3D
from keras.src.layers.reshaping.zero_padding1d import ZeroPadding1D
from keras.src.layers.reshaping.zero_padding2d import ZeroPadding2D
from keras.src.layers.reshaping.zero_padding3d import ZeroPadding3D
from keras.src.layers.rnn.bidirectional import Bidirectional
from keras.src.layers.rnn.conv_lstm1d import ConvLSTM1D
from keras.src.layers.rnn.conv_lstm2d import ConvLSTM2D
from keras.src.layers.rnn.conv_lstm3d import ConvLSTM3D
from keras.src.layers.rnn.gru import GRU
from keras.src.layers.rnn.gru import GRUCell
from keras.src.layers.rnn.lstm import LSTM
from keras.src.layers.rnn.lstm import LSTMCell
from keras.src.layers.rnn.rnn import RNN
from keras.src.layers.rnn.simple_rnn import SimpleRNN
from keras.src.layers.rnn.simple_rnn import SimpleRNNCell
from keras.src.layers.rnn.stacked_rnn_cells import StackedRNNCells
from keras.src.layers.rnn.time_distributed import TimeDistributed
from keras.src.saving import serialization_lib
@keras_export("keras.layers.serialize")
def serialize(layer):
"""Returns the layer configuration as a Python dict.
Args:
layer: A `keras.layers.Layer` instance to serialize.
Returns:
Python dict which contains the configuration of the layer.
"""
return serialization_lib.serialize_keras_object(layer)
@keras_export("keras.layers.deserialize")
def deserialize(config, custom_objects=None):
"""Returns a Keras layer object via its configuration.
Args:
config: A python dict containing a serialized layer configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during
deserialization.
Returns:
A Keras layer instance.
"""
obj = serialization_lib.deserialize_keras_object(
config,
custom_objects=custom_objects,
)
if not isinstance(obj, Layer):
raise ValueError(
"`keras.layers.deserialize` was passed a `config` object that is "
f"not a `keras.layers.Layer`. Received: {config}"
)
return obj
| python | Apache-2.0 | c67eddb4ff8b615886893ca996dc216bc923d598 | 2026-01-04T14:38:29.819962Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.