repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/manifold/__init__.py | sklearn/manifold/__init__.py | """Data embedding techniques."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.manifold._classical_mds import ClassicalMDS
from sklearn.manifold._isomap import Isomap
from sklearn.manifold._locally_linear import (
LocallyLinearEmbedding,
locally_linear_embedding,
)
from sklearn.manifold._mds import MDS, smacof
from sklearn.manifold._spectral_embedding import SpectralEmbedding, spectral_embedding
from sklearn.manifold._t_sne import TSNE, trustworthiness
__all__ = [
"MDS",
"TSNE",
"ClassicalMDS",
"Isomap",
"LocallyLinearEmbedding",
"SpectralEmbedding",
"locally_linear_embedding",
"smacof",
"spectral_embedding",
"trustworthiness",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/manifold/_t_sne.py | sklearn/manifold/_t_sne.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# https://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
from numbers import Integral, Real
from time import time
import numpy as np
from scipy import linalg
from scipy.sparse import csr_matrix, issparse
from scipy.spatial.distance import pdist, squareform
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
)
from sklearn.decomposition import PCA
# mypy error: Module 'sklearn.manifold' has no attribute '_utils'
# mypy error: Module 'sklearn.manifold' has no attribute '_barnes_hut_tsne'
from sklearn.manifold import _barnes_hut_tsne, _utils # type: ignore[attr-defined]
from sklearn.metrics.pairwise import _VALID_METRICS, pairwise_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import check_random_state
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
from sklearn.utils.validation import _num_samples, check_non_negative, validate_data
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : ndarray of shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : ndarray of shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = distances.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, desired_perplexity, verbose
)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : sparse matrix of shape (n_samples, n_samples)
Distances of samples to its n_neighbors nearest neighbors. All other
distances are left to zero (and are not materialized in memory).
Matrix should be of CSR format.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : sparse matrix of shape (n_samples, n_samples)
Condensed joint probability matrix with only nearest neighbors. Matrix
will be of CSR format.
"""
t0 = time()
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances.sort_indices()
n_samples = distances.shape[0]
distances_data = distances.data.reshape(n_samples, -1)
distances_data = distances_data.astype(np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances_data, desired_perplexity, verbose
)
assert np.all(np.isfinite(conditional_P)), "All probabilities should be finite"
# Symmetrize the joint probability distribution using sparse operations
P = csr_matrix(
(conditional_P.ravel(), distances.indices, distances.indptr),
shape=(n_samples, n_samples),
)
P = P + P.T
# Normalize the joint probability distribution
sum_P = np.maximum(P.sum(), MACHINE_EPSILON)
P /= sum_P
assert np.all(np.abs(P.data) <= 1.0)
if verbose >= 2:
duration = time() - t0
print("[t-SNE] Computed conditional probabilities in {:.3f}s".format(duration))
return P
def _kl_divergence(
params,
P,
degrees_of_freedom,
n_samples,
n_components,
skip_num_points=0,
compute_error=True,
):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : ndarray of shape (n_params,)
Unraveled embedding.
P : ndarray of shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : int
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int, default=0
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
compute_error: bool, default=True
If False, the kl_divergence is not computed and returns NaN.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : ndarray of shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
dist = pdist(X_embedded, "sqeuclidean")
dist /= degrees_of_freedom
dist += 1.0
dist **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
if compute_error:
kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q))
else:
kl_divergence = np.nan
# Gradient: dC/dY
# pdist always returns double precision distances. Thus we need to take
grad = np.ndarray((n_samples, n_components), dtype=params.dtype)
PQd = squareform((P - Q) * dist)
for i in range(skip_num_points, n_samples):
grad[i] = np.dot(np.ravel(PQd[i], order="K"), X_embedded[i] - X_embedded)
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_bh(
params,
P,
degrees_of_freedom,
n_samples,
n_components,
angle=0.5,
skip_num_points=0,
verbose=False,
compute_error=True,
num_threads=1,
):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2).
Parameters
----------
params : ndarray of shape (n_params,)
Unraveled embedding.
P : sparse matrix of shape (n_samples, n_sample)
Sparse approximate joint probability matrix, computed only for the
k nearest-neighbors and symmetrized. Matrix should be of CSR format.
degrees_of_freedom : int
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float, default=0.5
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int, default=0
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int, default=False
Verbosity level.
compute_error: bool, default=True
If False, the kl_divergence is not computed and returns NaN.
num_threads : int, default=1
Number of threads used to compute the gradient. This is set here to
avoid calling _openmp_effective_n_threads for each gradient step.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : ndarray of shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = params.astype(np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
val_P = P.data.astype(np.float32, copy=False)
neighbors = P.indices.astype(np.int64, copy=False)
indptr = P.indptr.astype(np.int64, copy=False)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(
val_P,
X_embedded,
neighbors,
indptr,
grad,
angle,
n_components,
verbose,
dof=degrees_of_freedom,
compute_error=compute_error,
num_threads=num_threads,
)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(
objective,
p0,
it,
max_iter,
n_iter_check=1,
n_iter_without_progress=300,
momentum=0.8,
learning_rate=200.0,
min_gain=0.01,
min_grad_norm=1e-7,
verbose=0,
args=None,
kwargs=None,
):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like of shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
max_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int, default=1
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
n_iter_without_progress : int, default=300
Maximum number of iterations without progress before we abort the
optimization.
momentum : float within (0.0, 1.0), default=0.8
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, default=200.0
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers.
min_gain : float, default=0.01
Minimum individual gain for each parameter.
min_grad_norm : float, default=1e-7
If the gradient norm is below this threshold, the optimization will
be aborted.
verbose : int, default=0
Verbosity level.
args : sequence, default=None
Arguments to pass to objective function.
kwargs : dict, default=None
Keyword arguments to pass to objective function.
Returns
-------
p : ndarray of shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(float).max
best_error = np.finfo(float).max
best_iter = i = it
tic = time()
for i in range(it, max_iter):
check_convergence = (i + 1) % n_iter_check == 0
# only compute the error when needed
kwargs["compute_error"] = check_convergence or i == max_iter - 1
error, grad = objective(p, *args, **kwargs)
inc = update * grad < 0.0
dec = np.invert(inc)
gains[inc] += 0.2
gains[dec] *= 0.8
np.clip(gains, min_gain, np.inf, out=gains)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if check_convergence:
toc = time()
duration = toc - tic
tic = toc
grad_norm = linalg.norm(grad)
if verbose >= 2:
print(
"[t-SNE] Iteration %d: error = %.7f,"
" gradient norm = %.7f"
" (%s iterations in %0.3fs)"
% (i + 1, error, grad_norm, n_iter_check, duration)
)
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print(
"[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress)
)
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print(
"[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm)
)
break
return p, error, i
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"X_embedded": ["array-like", "sparse matrix"],
"n_neighbors": [Interval(Integral, 1, None, closed="left")],
"metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
},
prefer_skip_nested_validation=True,
)
def trustworthiness(X, X_embedded, *, n_neighbors=5, metric="euclidean"):
r"""Indicate to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in \mathcal{N}_{i}^{k}} \max(0, (r(i, j) - k))
where for each sample i, :math:`\mathcal{N}_{i}^{k}` are its k nearest
neighbors in the output space, and every sample j is its :math:`r(i, j)`-th
nearest neighbor in the input space. In other words, any unexpected nearest
neighbors in the output space are penalised in proportion to their rank in
the input space.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : {array-like, sparse matrix} of shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, default=5
The number of neighbors that will be considered. Should be fewer than
`n_samples / 2` to ensure the trustworthiness to lies within [0, 1], as
mentioned in [1]_. An error will be raised otherwise.
metric : str or callable, default='euclidean'
Which metric to use for computing pairwise distances between samples
from the original input space. If metric is 'precomputed', X must be a
matrix of pairwise distances or squared distances. Otherwise, for a list
of available metrics, see the documentation of argument metric in
`sklearn.pairwise.pairwise_distances` and metrics listed in
`sklearn.metrics.pairwise.PAIRWISE_DISTANCE_FUNCTIONS`. Note that the
"cosine" metric uses :func:`~sklearn.metrics.pairwise.cosine_distances`.
.. versionadded:: 0.20
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
References
----------
.. [1] Jarkko Venna and Samuel Kaski. 2001. Neighborhood
Preservation in Nonlinear Projection Methods: An Experimental Study.
In Proceedings of the International Conference on Artificial Neural Networks
(ICANN '01). Springer-Verlag, Berlin, Heidelberg, 485-491.
.. [2] Laurens van der Maaten. Learning a Parametric Embedding by Preserving
Local Structure. Proceedings of the Twelfth International Conference on
Artificial Intelligence and Statistics, PMLR 5:384-391, 2009.
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> from sklearn.decomposition import PCA
>>> from sklearn.manifold import trustworthiness
>>> X, _ = make_blobs(n_samples=100, n_features=10, centers=3, random_state=42)
>>> X_embedded = PCA(n_components=2).fit_transform(X)
>>> print(f"{trustworthiness(X, X_embedded, n_neighbors=5):.2f}")
0.92
"""
n_samples = _num_samples(X)
if n_neighbors >= n_samples / 2:
raise ValueError(
f"n_neighbors ({n_neighbors}) should be less than n_samples / 2"
f" ({n_samples / 2})"
)
dist_X = pairwise_distances(X, metric=metric)
if metric == "precomputed":
dist_X = dist_X.copy()
# we set the diagonal to np.inf to exclude the points themselves from
# their own neighborhood
np.fill_diagonal(dist_X, np.inf)
ind_X = np.argsort(dist_X, axis=1)
# `ind_X[i]` is the index of sorted distances between i and other samples
ind_X_embedded = (
NearestNeighbors(n_neighbors=n_neighbors)
.fit(X_embedded)
.kneighbors(return_distance=False)
)
# We build an inverted index of neighbors in the input space: For sample i,
# we define `inverted_index[i]` as the inverted index of sorted distances:
# inverted_index[i][ind_X[i]] = np.arange(1, n_sample + 1)
inverted_index = np.zeros((n_samples, n_samples), dtype=int)
ordered_indices = np.arange(n_samples + 1)
inverted_index[ordered_indices[:-1, np.newaxis], ind_X] = ordered_indices[1:]
ranks = (
inverted_index[ordered_indices[:-1, np.newaxis], ind_X_embedded] - n_neighbors
)
t = np.sum(ranks[ranks > 0])
t = 1.0 - t * (
2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0))
)
return t
class TSNE(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""T-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, default=2
Dimension of the embedded space.
perplexity : float, default=30.0
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. Different values can result in significantly
different results. The perplexity must be less than the number
of samples.
early_exaggeration : float, default=12.0
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float or "auto", default="auto"
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
Note that many other t-SNE implementations (bhtsne, FIt-SNE, openTSNE,
etc.) use a definition of learning_rate that is 4 times smaller than
ours. So our learning_rate=200 corresponds to learning_rate=800 in
those other implementations. The 'auto' option sets the learning_rate
to `max(N / early_exaggeration / 4, 50)` where N is the sample size,
following [4] and [5].
.. versionchanged:: 1.2
The default value changed to `"auto"`.
max_iter : int, default=1000
Maximum number of iterations for the optimization. Should be at
least 250.
.. versionchanged:: 1.5
Parameter name changed from `n_iter` to `max_iter`.
n_iter_without_progress : int, default=300
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, default=1e-7
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
.. versionadded:: 1.1
init : {"random", "pca"} or ndarray of shape (n_samples, n_components), \
default="pca"
Initialization of embedding.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
.. versionchanged:: 1.2
The default value changed to `"pca"`.
verbose : int, default=0
Verbosity level.
random_state : int, RandomState instance or None, default=None
Determines the random number generator. Pass an int for reproducible
results across multiple function calls. Note that different
initializations might result in different local minima of the cost
function. See :term:`Glossary <random_state>`.
method : {'barnes_hut', 'exact'}, default='barnes_hut'
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float, default=0.5
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search. This parameter
has no impact when ``metric="precomputed"`` or
(``metric="euclidean"`` and ``method="exact"``).
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.22
Attributes
----------
embedding_ : array-like of shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
learning_rate_ : float
Effective learning rate.
.. versionadded:: 1.2
n_iter_ : int
Number of iterations run.
See Also
--------
sklearn.decomposition.PCA : Principal component analysis that is a linear
dimensionality reduction method.
sklearn.decomposition.KernelPCA : Non-linear dimensionality reduction using
kernels and PCA.
MDS : Manifold learning using multidimensional scaling.
Isomap : Manifold learning based on Isometric Mapping.
LocallyLinearEmbedding : Manifold learning using Locally Linear Embedding.
SpectralEmbedding : Spectral embedding for non-linear dimensionality.
Notes
-----
For an example of using :class:`~sklearn.manifold.TSNE` in combination with
:class:`~sklearn.neighbors.KNeighborsTransformer` see
:ref:`sphx_glr_auto_examples_neighbors_approximate_nearest_neighbors.py`.
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
https://lvdmaaten.github.io/tsne/
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
https://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
[4] Belkina, A. C., Ciccolella, C. O., Anno, R., Halpert, R., Spidlen, J.,
& Snyder-Cappione, J. E. (2019). Automated optimized parameters for
T-distributed stochastic neighbor embedding improve visualization
and analysis of large datasets. Nature Communications, 10(1), 1-12.
[5] Kobak, D., & Berens, P. (2019). The art of using t-SNE for single-cell
transcriptomics. Nature Communications, 10(1), 1-14.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2, learning_rate='auto',
... init='random', perplexity=3).fit_transform(X)
>>> X_embedded.shape
(4, 2)
"""
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 1, None, closed="left")],
"perplexity": [Interval(Real, 0, None, closed="neither")],
"early_exaggeration": [Interval(Real, 1, None, closed="left")],
"learning_rate": [
StrOptions({"auto"}),
Interval(Real, 0, None, closed="neither"),
],
"max_iter": [Interval(Integral, 250, None, closed="left")],
"n_iter_without_progress": [Interval(Integral, -1, None, closed="left")],
"min_grad_norm": [Interval(Real, 0, None, closed="left")],
"metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
"metric_params": [dict, None],
"init": [
StrOptions({"pca", "random"}),
np.ndarray,
],
"verbose": ["verbose"],
"random_state": ["random_state"],
"method": [StrOptions({"barnes_hut", "exact"})],
"angle": [Interval(Real, 0, 1, closed="both")],
"n_jobs": [None, Integral],
}
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_MAX_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(
self,
n_components=2,
*,
perplexity=30.0,
early_exaggeration=12.0,
learning_rate="auto",
max_iter=1000,
n_iter_without_progress=300,
min_grad_norm=1e-7,
metric="euclidean",
metric_params=None,
init="pca",
verbose=0,
random_state=None,
method="barnes_hut",
angle=0.5,
n_jobs=None,
):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.max_iter = max_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.metric_params = metric_params
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
self.n_jobs = n_jobs
def _check_params_vs_input(self, X):
if self.perplexity >= X.shape[0]:
raise ValueError(
f"perplexity ({self.perplexity}) must be less "
f"than n_samples ({X.shape[0]})"
)
def _fit(self, X, skip_num_points=0):
"""Private function to fit the model using X as training data."""
if self.learning_rate == "auto":
# See issue #18018
self.learning_rate_ = X.shape[0] / self.early_exaggeration / 4
self.learning_rate_ = np.maximum(self.learning_rate_, 50)
else:
self.learning_rate_ = self.learning_rate
if self.method == "barnes_hut":
X = validate_data(
self,
X,
accept_sparse=["csr"],
ensure_min_samples=2,
dtype=[np.float32, np.float64],
)
else:
X = validate_data(
self,
X,
accept_sparse=["csr", "csc", "coo"],
dtype=[np.float32, np.float64],
)
if self.metric == "precomputed":
if isinstance(self.init, str) and self.init == "pca":
raise ValueError(
'The parameter init="pca" cannot be used with metric="precomputed".'
)
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
check_non_negative(
X,
(
"TSNE.fit(). With metric='precomputed', X "
"should contain positive distances."
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/manifold/_locally_linear.py | sklearn/manifold/_locally_linear.py | """Locally Linear Embedding"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Integral, Real
import numpy as np
from scipy.linalg import eigh, qr, solve, svd
from scipy.sparse import csr_matrix, eye, lil_matrix
from scipy.sparse.linalg import eigsh
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
_UnstableArchMixin,
)
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import check_array, check_random_state
from sklearn.utils._arpack import _init_arpack_v0
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
from sklearn.utils.validation import FLOAT_DTYPES, check_is_fitted, validate_data
def barycenter_weights(X, Y, indices, reg=1e-3):
"""Compute barycenter weights of X from Y along the first axis
We estimate the weights to assign to each point in Y[indices] to recover
the point X[i]. The barycenter weights sum to 1.
Parameters
----------
X : array-like, shape (n_samples, n_dim)
Y : array-like, shape (n_samples, n_dim)
indices : array-like, shape (n_samples, n_dim)
Indices of the points in Y used to compute the barycenter
reg : float, default=1e-3
Amount of regularization to add for the problem to be
well-posed in the case of n_neighbors > n_dim
Returns
-------
B : array-like, shape (n_samples, n_neighbors)
Notes
-----
See developers note for more information.
"""
X = check_array(X, dtype=FLOAT_DTYPES)
Y = check_array(Y, dtype=FLOAT_DTYPES)
indices = check_array(indices, dtype=int)
n_samples, n_neighbors = indices.shape
assert X.shape[0] == n_samples
B = np.empty((n_samples, n_neighbors), dtype=X.dtype)
v = np.ones(n_neighbors, dtype=X.dtype)
# this might raise a LinalgError if G is singular and has trace
# zero
for i, ind in enumerate(indices):
A = Y[ind]
C = A - X[i] # broadcasting
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[:: n_neighbors + 1] += R
w = solve(G, v, assume_a="pos")
B[i, :] = w / np.sum(w)
return B
def barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=None):
"""Computes the barycenter weighted graph of k-Neighbors for points in X
Parameters
----------
X : {array-like, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array or a NearestNeighbors object.
n_neighbors : int
Number of neighbors for each sample.
reg : float, default=1e-3
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
n_jobs : int or None, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
See Also
--------
sklearn.neighbors.kneighbors_graph
sklearn.neighbors.radius_neighbors_graph
"""
knn = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs).fit(X)
X = knn._fit_X
n_samples = knn.n_samples_fit_
ind = knn.kneighbors(X, return_distance=False)[:, 1:]
data = barycenter_weights(X, X, ind, reg=reg)
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
return csr_matrix((data.ravel(), ind.ravel(), indptr), shape=(n_samples, n_samples))
def null_space(
M, k, k_skip=1, eigen_solver="arpack", tol=1e-6, max_iter=100, random_state=None
):
"""
Find the null space of a matrix M.
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : int
Number of eigenvalues/vectors to return
k_skip : int, default=1
Number of low eigenvalues to skip.
eigen_solver : {'auto', 'arpack', 'dense'}, default='arpack'
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, default=1e-6
Tolerance for 'arpack' method.
Not used if eigen_solver=='dense'.
max_iter : int, default=100
Maximum number of iterations for 'arpack' method.
Not used if eigen_solver=='dense'
random_state : int, RandomState instance, default=None
Determines the random number generator when ``solver`` == 'arpack'.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
"""
if eigen_solver == "auto":
if M.shape[0] > 200 and k + k_skip < 10:
eigen_solver = "arpack"
else:
eigen_solver = "dense"
if eigen_solver == "arpack":
v0 = _init_arpack_v0(M.shape[0], random_state)
try:
eigen_values, eigen_vectors = eigsh(
M, k + k_skip, sigma=0.0, tol=tol, maxiter=max_iter, v0=v0
)
except RuntimeError as e:
raise ValueError(
"Error in determining null-space with ARPACK. Error message: "
"'%s'. Note that eigen_solver='arpack' can fail when the "
"weight matrix is singular or otherwise ill-behaved. In that "
"case, eigen_solver='dense' is recommended. See online "
"documentation for more information." % e
) from e
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == "dense":
if hasattr(M, "toarray"):
M = M.toarray()
eigen_values, eigen_vectors = eigh(
M, subset_by_index=(k_skip, k + k_skip - 1), overwrite_a=True
)
index = np.argsort(np.abs(eigen_values))
return eigen_vectors[:, index], np.sum(eigen_values)
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
def _locally_linear_embedding(
X,
*,
n_neighbors,
n_components,
reg=1e-3,
eigen_solver="auto",
tol=1e-6,
max_iter=100,
method="standard",
hessian_tol=1e-4,
modified_tol=1e-12,
random_state=None,
n_jobs=None,
):
nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs)
nbrs.fit(X)
X = nbrs._fit_X
N, d_in = X.shape
if n_components > d_in:
raise ValueError(
"output dimension must be less than or equal to input dimension"
)
if n_neighbors >= N:
raise ValueError(
"Expected n_neighbors < n_samples, but n_samples = %d, n_neighbors = %d"
% (N, n_neighbors)
)
M_sparse = eigen_solver != "dense"
M_container_constructor = lil_matrix if M_sparse else np.zeros
if method == "standard":
W = barycenter_kneighbors_graph(
nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs
)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
if M_sparse:
M = eye(*W.shape, format=W.format) - W
M = M.T @ M
else:
M = (W.T @ W - W.T - W).toarray()
M.flat[:: M.shape[0] + 1] += 1 # M = W' W - W' - W + I
elif method == "hessian":
dp = n_components * (n_components + 1) // 2
if n_neighbors <= n_components + dp:
raise ValueError(
"for method='hessian', n_neighbors must be "
"greater than "
"[n_components * (n_components + 3) / 2]"
)
neighbors = nbrs.kneighbors(
X, n_neighbors=n_neighbors + 1, return_distance=False
)
neighbors = neighbors[:, 1:]
Yi = np.empty((n_neighbors, 1 + n_components + dp), dtype=np.float64)
Yi[:, 0] = 1
M = M_container_constructor((N, N), dtype=np.float64)
use_svd = n_neighbors > d_in
for i in range(N):
Gi = X[neighbors[i]]
Gi -= Gi.mean(0)
# build Hessian estimator
if use_svd:
U = svd(Gi, full_matrices=0)[0]
else:
Ci = np.dot(Gi, Gi.T)
U = eigh(Ci)[1][:, ::-1]
Yi[:, 1 : 1 + n_components] = U[:, :n_components]
j = 1 + n_components
for k in range(n_components):
Yi[:, j : j + n_components - k] = U[:, k : k + 1] * U[:, k:n_components]
j += n_components - k
Q, R = qr(Yi)
w = Q[:, n_components + 1 :]
S = w.sum(0)
S[np.where(abs(S) < hessian_tol)] = 1
w /= S
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(w, w.T)
elif method == "modified":
if n_neighbors < n_components:
raise ValueError("modified LLE requires n_neighbors >= n_components")
neighbors = nbrs.kneighbors(
X, n_neighbors=n_neighbors + 1, return_distance=False
)
neighbors = neighbors[:, 1:]
# find the eigenvectors and eigenvalues of each local covariance
# matrix. We want V[i] to be a [n_neighbors x n_neighbors] matrix,
# where the columns are eigenvectors
V = np.zeros((N, n_neighbors, n_neighbors))
nev = min(d_in, n_neighbors)
evals = np.zeros([N, nev])
# choose the most efficient way to find the eigenvectors
use_svd = n_neighbors > d_in
if use_svd:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
V[i], evals[i], _ = svd(X_nbrs, full_matrices=True)
evals **= 2
else:
for i in range(N):
X_nbrs = X[neighbors[i]] - X[i]
C_nbrs = np.dot(X_nbrs, X_nbrs.T)
evi, vi = eigh(C_nbrs)
evals[i] = evi[::-1]
V[i] = vi[:, ::-1]
# find regularized weights: this is like normal LLE.
# because we've already computed the SVD of each covariance matrix,
# it's faster to use this rather than np.linalg.solve
reg = 1e-3 * evals.sum(1)
tmp = np.dot(V.transpose(0, 2, 1), np.ones(n_neighbors))
tmp[:, :nev] /= evals + reg[:, None]
tmp[:, nev:] /= reg[:, None]
w_reg = np.zeros((N, n_neighbors))
for i in range(N):
w_reg[i] = np.dot(V[i], tmp[i])
w_reg /= w_reg.sum(1)[:, None]
# calculate eta: the median of the ratio of small to large eigenvalues
# across the points. This is used to determine s_i, below
rho = evals[:, n_components:].sum(1) / evals[:, :n_components].sum(1)
eta = np.median(rho)
# find s_i, the size of the "almost null space" for each point:
# this is the size of the largest set of eigenvalues
# such that Sum[v; v in set]/Sum[v; v not in set] < eta
s_range = np.zeros(N, dtype=int)
evals_cumsum = np.cumsum(evals, 1)
eta_range = evals_cumsum[:, -1:] / evals_cumsum[:, :-1] - 1
for i in range(N):
s_range[i] = np.searchsorted(eta_range[i, ::-1], eta)
s_range += n_neighbors - nev # number of zero eigenvalues
# Now calculate M.
# This is the [N x N] matrix whose null space is the desired embedding
M = M_container_constructor((N, N), dtype=np.float64)
for i in range(N):
s_i = s_range[i]
# select bottom s_i eigenvectors and calculate alpha
Vi = V[i, :, n_neighbors - s_i :]
alpha_i = np.linalg.norm(Vi.sum(0)) / np.sqrt(s_i)
# compute Householder matrix which satisfies
# Hi*Vi.T*ones(n_neighbors) = alpha_i*ones(s)
# using prescription from paper
h = np.full(s_i, alpha_i) - np.dot(Vi.T, np.ones(n_neighbors))
norm_h = np.linalg.norm(h)
if norm_h < modified_tol:
h *= 0
else:
h /= norm_h
# Householder matrix is
# >> Hi = np.identity(s_i) - 2*np.outer(h,h)
# Then the weight matrix is
# >> Wi = np.dot(Vi,Hi) + (1-alpha_i) * w_reg[i,:,None]
# We do this much more efficiently:
Wi = Vi - 2 * np.outer(np.dot(Vi, h), h) + (1 - alpha_i) * w_reg[i, :, None]
# Update M as follows:
# >> W_hat = np.zeros( (N,s_i) )
# >> W_hat[neighbors[i],:] = Wi
# >> W_hat[i] -= 1
# >> M += np.dot(W_hat,W_hat.T)
# We can do this much more efficiently:
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] += np.dot(Wi, Wi.T)
Wi_sum1 = Wi.sum(1)
M[i, neighbors[i]] -= Wi_sum1
M[neighbors[i], [i]] -= Wi_sum1
M[i, i] += s_i
elif method == "ltsa":
neighbors = nbrs.kneighbors(
X, n_neighbors=n_neighbors + 1, return_distance=False
)
neighbors = neighbors[:, 1:]
M = M_container_constructor((N, N), dtype=np.float64)
use_svd = n_neighbors > d_in
for i in range(N):
Xi = X[neighbors[i]]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi @ Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1.0 / np.sqrt(n_neighbors)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors[i], neighbors[i])
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors[i], neighbors[i]] += np.ones(shape=n_neighbors)
if M_sparse:
M = M.tocsr()
return null_space(
M,
n_components,
k_skip=1,
eigen_solver=eigen_solver,
tol=tol,
max_iter=max_iter,
random_state=random_state,
)
@validate_params(
{
"X": ["array-like", NearestNeighbors],
"n_neighbors": [Interval(Integral, 1, None, closed="left")],
"n_components": [Interval(Integral, 1, None, closed="left")],
"reg": [Interval(Real, 0, None, closed="left")],
"eigen_solver": [StrOptions({"auto", "arpack", "dense"})],
"tol": [Interval(Real, 0, None, closed="left")],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"method": [StrOptions({"standard", "hessian", "modified", "ltsa"})],
"hessian_tol": [Interval(Real, 0, None, closed="left")],
"modified_tol": [Interval(Real, 0, None, closed="left")],
"random_state": ["random_state"],
"n_jobs": [None, Integral],
},
prefer_skip_nested_validation=True,
)
def locally_linear_embedding(
X,
*,
n_neighbors,
n_components,
reg=1e-3,
eigen_solver="auto",
tol=1e-6,
max_iter=100,
method="standard",
hessian_tol=1e-4,
modified_tol=1e-12,
random_state=None,
n_jobs=None,
):
"""Perform a Locally Linear Embedding analysis on the data.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
X : {array-like, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array or a NearestNeighbors object.
n_neighbors : int
Number of neighbors to consider for each point.
n_components : int
Number of coordinates for the manifold.
reg : float, default=1e-3
Regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : {'auto', 'arpack', 'dense'}, default='auto'
auto : algorithm will attempt to choose the best method for input data
arpack : use arnoldi iteration in shift-invert mode.
For this method, M may be a dense matrix, sparse matrix,
or general linear operator.
Warning: ARPACK can be unstable for some problems. It is
best to try several random seeds in order to check results.
dense : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array
or matrix type. This method should be avoided for
large problems.
tol : float, default=1e-6
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : int, default=100
Maximum number of iterations for the arpack solver.
method : {'standard', 'hessian', 'modified', 'ltsa'}, default='standard'
standard : use the standard locally linear embedding algorithm.
see reference [1]_
hessian : use the Hessian eigenmap method. This method requires
n_neighbors > n_components * (1 + (n_components + 1) / 2.
see reference [2]_
modified : use the modified locally linear embedding algorithm.
see reference [3]_
ltsa : use local tangent space alignment algorithm
see reference [4]_
hessian_tol : float, default=1e-4
Tolerance for Hessian eigenmapping method.
Only used if method == 'hessian'.
modified_tol : float, default=1e-12
Tolerance for modified LLE method.
Only used if method == 'modified'.
random_state : int, RandomState instance, default=None
Determines the random number generator when ``solver`` == 'arpack'.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int or None, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
Y : ndarray of shape (n_samples, n_components)
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).
.. [2] Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.
<https://citeseerx.ist.psu.edu/doc_view/pid/0b060fdbd92cbcc66b383bcaa9ba5e5e624d7ee3>`_
.. [4] Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import locally_linear_embedding
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding, _ = locally_linear_embedding(X[:100],n_neighbors=5, n_components=2)
>>> embedding.shape
(100, 2)
"""
return _locally_linear_embedding(
X=X,
n_neighbors=n_neighbors,
n_components=n_components,
reg=reg,
eigen_solver=eigen_solver,
tol=tol,
max_iter=max_iter,
method=method,
hessian_tol=hessian_tol,
modified_tol=modified_tol,
random_state=random_state,
n_jobs=n_jobs,
)
class LocallyLinearEmbedding(
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_UnstableArchMixin,
BaseEstimator,
):
"""Locally Linear Embedding.
Read more in the :ref:`User Guide <locally_linear_embedding>`.
Parameters
----------
n_neighbors : int, default=5
Number of neighbors to consider for each point.
n_components : int, default=2
Number of coordinates for the manifold.
reg : float, default=1e-3
Regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : {'auto', 'arpack', 'dense'}, default='auto'
The solver used to compute the eigenvectors. The available options are:
- `'auto'` : algorithm will attempt to choose the best method for input
data.
- `'arpack'` : use arnoldi iteration in shift-invert mode. For this
method, M may be a dense matrix, sparse matrix, or general linear
operator.
- `'dense'` : use standard dense matrix operations for the eigenvalue
decomposition. For this method, M must be an array or matrix type.
This method should be avoided for large problems.
.. warning::
ARPACK can be unstable for some problems. It is best to try several
random seeds in order to check results.
tol : float, default=1e-6
Tolerance for 'arpack' method
Not used if eigen_solver=='dense'.
max_iter : int, default=100
Maximum number of iterations for the arpack solver.
Not used if eigen_solver=='dense'.
method : {'standard', 'hessian', 'modified', 'ltsa'}, default='standard'
- `standard`: use the standard locally linear embedding algorithm. see
reference [1]_
- `hessian`: use the Hessian eigenmap method. This method requires
``n_neighbors > n_components * (1 + (n_components + 1) / 2``. see
reference [2]_
- `modified`: use the modified locally linear embedding algorithm.
see reference [3]_
- `ltsa`: use local tangent space alignment algorithm. see
reference [4]_
hessian_tol : float, default=1e-4
Tolerance for Hessian eigenmapping method.
Only used if ``method == 'hessian'``.
modified_tol : float, default=1e-12
Tolerance for modified LLE method.
Only used if ``method == 'modified'``.
neighbors_algorithm : {'auto', 'brute', 'kd_tree', 'ball_tree'}, \
default='auto'
Algorithm to use for nearest neighbors search, passed to
:class:`~sklearn.neighbors.NearestNeighbors` instance.
random_state : int, RandomState instance, default=None
Determines the random number generator when
``eigen_solver`` == 'arpack'. Pass an int for reproducible results
across multiple function calls. See :term:`Glossary <random_state>`.
n_jobs : int or None, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
embedding_ : array-like, shape [n_samples, n_components]
Stores the embedding vectors
reconstruction_error_ : float
Reconstruction error associated with `embedding_`
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
nbrs_ : NearestNeighbors object
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
See Also
--------
SpectralEmbedding : Spectral embedding for non-linear dimensionality
reduction.
TSNE : Distributed Stochastic Neighbor Embedding.
References
----------
.. [1] Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).
.. [2] Donoho, D. & Grimes, C. Hessian eigenmaps: Locally
linear embedding techniques for high-dimensional data.
Proc Natl Acad Sci U S A. 100:5591 (2003).
.. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear
Embedding Using Multiple Weights.
<https://citeseerx.ist.psu.edu/doc_view/pid/0b060fdbd92cbcc66b383bcaa9ba5e5e624d7ee3>`_
.. [4] Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import LocallyLinearEmbedding
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding = LocallyLinearEmbedding(n_components=2)
>>> X_transformed = embedding.fit_transform(X[:100])
>>> X_transformed.shape
(100, 2)
"""
_parameter_constraints: dict = {
"n_neighbors": [Interval(Integral, 1, None, closed="left")],
"n_components": [Interval(Integral, 1, None, closed="left")],
"reg": [Interval(Real, 0, None, closed="left")],
"eigen_solver": [StrOptions({"auto", "arpack", "dense"})],
"tol": [Interval(Real, 0, None, closed="left")],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"method": [StrOptions({"standard", "hessian", "modified", "ltsa"})],
"hessian_tol": [Interval(Real, 0, None, closed="left")],
"modified_tol": [Interval(Real, 0, None, closed="left")],
"neighbors_algorithm": [StrOptions({"auto", "brute", "kd_tree", "ball_tree"})],
"random_state": ["random_state"],
"n_jobs": [None, Integral],
}
def __init__(
self,
*,
n_neighbors=5,
n_components=2,
reg=1e-3,
eigen_solver="auto",
tol=1e-6,
max_iter=100,
method="standard",
hessian_tol=1e-4,
modified_tol=1e-12,
neighbors_algorithm="auto",
random_state=None,
n_jobs=None,
):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.reg = reg
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.method = method
self.hessian_tol = hessian_tol
self.modified_tol = modified_tol
self.random_state = random_state
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(
n_neighbors=self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs,
)
random_state = check_random_state(self.random_state)
X = validate_data(self, X, dtype=float)
self.nbrs_.fit(X)
self.embedding_, self.reconstruction_error_ = _locally_linear_embedding(
X=self.nbrs_,
n_neighbors=self.n_neighbors,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
tol=self.tol,
max_iter=self.max_iter,
method=self.method,
hessian_tol=self.hessian_tol,
modified_tol=self.modified_tol,
random_state=random_state,
reg=self.reg,
n_jobs=self.n_jobs,
)
self._n_features_out = self.embedding_.shape[1]
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Fitted `LocallyLinearEmbedding` class instance.
"""
self._fit_transform(X)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit_transform(self, X, y=None):
"""Compute the embedding vectors for data X and transform X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Returns the instance itself.
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Returns the instance itself.
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs).
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
ind = self.nbrs_.kneighbors(
X, n_neighbors=self.n_neighbors, return_distance=False
)
weights = barycenter_weights(X, self.nbrs_._fit_X, ind, reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/manifold/_spectral_embedding.py | sklearn/manifold/_spectral_embedding.py | """Spectral Embedding."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from numbers import Integral, Real
import numpy as np
from scipy import sparse
from scipy.linalg import eigh
from scipy.sparse.csgraph import connected_components
from scipy.sparse.linalg import eigsh, lobpcg
from sklearn.base import BaseEstimator, _fit_context
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.neighbors import NearestNeighbors, kneighbors_graph
from sklearn.utils import check_array, check_random_state, check_symmetric
from sklearn.utils._arpack import _init_arpack_v0
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.fixes import laplacian as csgraph_laplacian
from sklearn.utils.fixes import parse_version, sp_version
from sklearn.utils.validation import validate_data
def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components that contains one
given node.
Parameters
----------
graph : array-like of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
node_id : int
The index of the query node of the graph.
Returns
-------
connected_components_matrix : array-like of shape (n_samples,)
An array of bool value indicating the indexes of the nodes
belonging to the largest connected components of the given query
node.
"""
n_node = graph.shape[0]
if sparse.issparse(graph):
# speed up row-wise access to boolean connection mask
graph = graph.tocsr()
connected_nodes = np.zeros(n_node, dtype=bool)
nodes_to_explore = np.zeros(n_node, dtype=bool)
nodes_to_explore[node_id] = True
for _ in range(n_node):
last_num_component = connected_nodes.sum()
np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)
if last_num_component >= connected_nodes.sum():
break
indices = np.where(nodes_to_explore)[0]
nodes_to_explore.fill(False)
for i in indices:
if sparse.issparse(graph):
# scipy not yet implemented 1D sparse slices; can be changed back to
# `neighbors = graph[i].toarray().ravel()` once implemented
neighbors = graph[[i], :].toarray().ravel()
else:
neighbors = graph[i]
np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)
return connected_nodes
def _graph_is_connected(graph):
"""Return whether the graph is connected (True) or Not (False).
Parameters
----------
graph : {array-like, sparse matrix} of shape (n_samples, n_samples)
Adjacency matrix of the graph, non-zero weight means an edge
between the nodes.
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not.
"""
if sparse.issparse(graph):
# Before Scipy 1.11.3, `connected_components` only supports 32-bit indices.
# PR: https://github.com/scipy/scipy/pull/18913
# First integration in 1.11.3: https://github.com/scipy/scipy/pull/19279
# TODO(jjerphan): Once SciPy 1.11.3 is the minimum supported version, use
# `accept_large_sparse=True`.
accept_large_sparse = sp_version >= parse_version("1.11.3")
graph = check_array(
graph, accept_sparse=True, accept_large_sparse=accept_large_sparse
)
# sparse graph, find all the connected components
n_connected_components, _ = connected_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
def _set_diag(laplacian, value, norm_laplacian):
"""Set the diagonal of the laplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition.
Parameters
----------
laplacian : {ndarray, sparse matrix}
The graph laplacian.
value : float
The value of the diagonal.
norm_laplacian : bool
Whether the value of the diagonal should be changed or not.
Returns
-------
laplacian : {array, sparse matrix}
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
"""
n_nodes = laplacian.shape[0]
# We need all entries in the diagonal to values
if not sparse.issparse(laplacian):
if norm_laplacian:
laplacian.flat[:: n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
if norm_laplacian:
diag_idx = laplacian.row == laplacian.col
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices coming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
@validate_params(
{
"adjacency": ["array-like", "sparse matrix"],
"n_components": [Interval(Integral, 1, None, closed="left")],
"eigen_solver": [StrOptions({"arpack", "lobpcg", "amg"}), None],
"random_state": ["random_state"],
"eigen_tol": [Interval(Real, 0, None, closed="left"), StrOptions({"auto"})],
"norm_laplacian": ["boolean"],
"drop_first": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def spectral_embedding(
adjacency,
*,
n_components=8,
eigen_solver=None,
random_state=None,
eigen_tol="auto",
norm_laplacian=True,
drop_first=True,
):
"""Project the sample on the first eigenvectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigenvectors associated to the
smallest eigenvalues) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigenvector decomposition works as expected.
Note : Laplacian Eigenmaps is the actual algorithm implemented here.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
adjacency : {array-like, sparse graph} of shape (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : int, default=8
The dimension of the projection subspace.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used.
random_state : int, RandomState instance or None, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
eigen_tol : float, default="auto"
Stopping criterion for eigendecomposition of the Laplacian matrix.
If `eigen_tol="auto"` then the passed tolerance will depend on the
`eigen_solver`:
- If `eigen_solver="arpack"`, then `eigen_tol=0.0`;
- If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then
`eigen_tol=None` which configures the underlying `lobpcg` solver to
automatically resolve the value according to their heuristics. See,
:func:`scipy.sparse.linalg.lobpcg` for details.
Note that when using `eigen_solver="amg"` values of `tol<1e-5` may lead
to convergence issues and should be avoided.
.. versionadded:: 1.2
Added 'auto' option.
norm_laplacian : bool, default=True
If True, then compute symmetric normalized Laplacian.
drop_first : bool, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
Returns
-------
embedding : ndarray of shape (n_samples, n_components)
The reduced samples.
Notes
-----
Spectral Embedding (Laplacian Eigenmaps) is most useful when the graph
has one connected component. If there graph has many components, the first
few eigenvectors will simply uncover the connected components of the graph.
References
----------
* https://en.wikipedia.org/wiki/LOBPCG
* :doi:`"Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method",
Andrew V. Knyazev
<10.1137/S1064827500366124>`
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.neighbors import kneighbors_graph
>>> from sklearn.manifold import spectral_embedding
>>> X, _ = load_digits(return_X_y=True)
>>> X = X[:100]
>>> affinity_matrix = kneighbors_graph(
... X, n_neighbors=int(X.shape[0] / 10), include_self=True
... )
>>> # make the matrix symmetric
>>> affinity_matrix = 0.5 * (affinity_matrix + affinity_matrix.T)
>>> embedding = spectral_embedding(affinity_matrix, n_components=2, random_state=42)
>>> embedding.shape
(100, 2)
"""
random_state = check_random_state(random_state)
return _spectral_embedding(
adjacency,
n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol,
norm_laplacian=norm_laplacian,
drop_first=drop_first,
)
def _spectral_embedding(
adjacency,
*,
n_components=8,
eigen_solver=None,
random_state=None,
eigen_tol="auto",
norm_laplacian=True,
drop_first=True,
):
adjacency = check_symmetric(adjacency)
if eigen_solver == "amg":
try:
from pyamg import smoothed_aggregation_solver
except ImportError as e:
raise ValueError(
"The eigen_solver was set to 'amg', but pyamg is not available."
) from e
if eigen_solver is None:
eigen_solver = "arpack"
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
if not _graph_is_connected(adjacency):
warnings.warn(
"Graph is not fully connected, spectral embedding may not work as expected."
)
laplacian, dd = csgraph_laplacian(
adjacency, normed=norm_laplacian, return_diag=True
)
if eigen_solver == "arpack" or (
eigen_solver != "lobpcg"
and (not sparse.issparse(laplacian) or n_nodes < 5 * n_components)
):
# lobpcg used with eigen_solver='amg' has bugs for low number of nodes
# for details see the source code in scipy:
# https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
# /lobpcg/lobpcg.py#L237
# or matlab:
# https://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# Here we'll use shift-invert mode for fast eigenvalues
# (see https://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
# We are computing the opposite of the laplacian inplace so as
# to spare a memory allocation of a possibly very large array
tol = 0 if eigen_tol == "auto" else eigen_tol
laplacian *= -1
v0 = _init_arpack_v0(laplacian.shape[0], random_state)
laplacian = check_array(
laplacian, accept_sparse="csr", accept_large_sparse=False
)
_, diffusion_map = eigsh(
laplacian, k=n_components, sigma=1.0, which="LM", tol=tol, v0=v0
)
embedding = diffusion_map.T[n_components::-1]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
eigen_solver = "lobpcg"
# Revert the laplacian to its opposite to have lobpcg work
laplacian *= -1
elif eigen_solver == "amg":
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
if not sparse.issparse(laplacian):
warnings.warn("AMG works better for sparse matrices")
laplacian = check_array(
laplacian, dtype=[np.float64, np.float32], accept_sparse=True
)
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# The Laplacian matrix is always singular, having at least one zero
# eigenvalue, corresponding to the trivial eigenvector, which is a
# constant. Using a singular matrix for preconditioning may result in
# random failures in LOBPCG and is not supported by the existing
# theory:
# see https://doi.org/10.1007/s10208-015-9297-1
# Shift the Laplacian so its diagononal is not all ones. The shift
# does change the eigenpairs however, so we'll feed the shifted
# matrix to the solver and afterward set it back to the original.
diag_shift = 1e-5 * sparse.eye(laplacian.shape[0])
laplacian += diag_shift
if hasattr(sparse, "csr_array") and isinstance(laplacian, sparse.csr_array):
# `pyamg` does not work with `csr_array` and we need to convert it to a
# `csr_matrix` object.
laplacian = sparse.csr_matrix(laplacian)
ml = smoothed_aggregation_solver(check_array(laplacian, accept_sparse="csr"))
laplacian -= diag_shift
M = ml.aspreconditioner()
# Create initial approximation X to eigenvectors
X = random_state.standard_normal(size=(laplacian.shape[0], n_components + 1))
X[:, 0] = dd.ravel()
X = X.astype(laplacian.dtype)
tol = None if eigen_tol == "auto" else eigen_tol
_, diffusion_map = lobpcg(laplacian, X, M=M, tol=tol, largest=False)
embedding = diffusion_map.T
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
if embedding.shape[0] == 1:
raise ValueError
if eigen_solver == "lobpcg":
laplacian = check_array(
laplacian, dtype=[np.float64, np.float32], accept_sparse=True
)
if n_nodes < 5 * n_components + 1:
# see note above under arpack why lobpcg has problems with small
# number of nodes
# lobpcg will fallback to eigh, so we short circuit it
if sparse.issparse(laplacian):
laplacian = laplacian.toarray()
_, diffusion_map = eigh(laplacian, check_finite=False)
embedding = diffusion_map.T[:n_components]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
else:
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension and create initial
# approximation X to eigenvectors
X = random_state.standard_normal(
size=(laplacian.shape[0], n_components + 1)
)
X[:, 0] = dd.ravel()
X = X.astype(laplacian.dtype)
tol = None if eigen_tol == "auto" else eigen_tol
_, diffusion_map = lobpcg(
laplacian, X, tol=tol, largest=False, maxiter=2000
)
embedding = diffusion_map.T[:n_components]
if norm_laplacian:
# recover u = D^-1/2 x from the eigenvector output x
embedding = embedding / dd
if embedding.shape[0] == 1:
raise ValueError
embedding = _deterministic_vector_sign_flip(embedding)
if drop_first:
return embedding[1:n_components].T
else:
return embedding[:n_components].T
class SpectralEmbedding(BaseEstimator):
"""Spectral embedding for non-linear dimensionality reduction.
Forms an affinity matrix given by the specified function and
applies spectral decomposition to the corresponding graph laplacian.
The resulting transformation is given by the value of the
eigenvectors for each data point.
Note : Laplacian Eigenmaps is the actual algorithm implemented here.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
n_components : int, default=2
The dimension of the projected subspace.
affinity : {'nearest_neighbors', 'rbf', 'precomputed', \
'precomputed_nearest_neighbors'} or callable, \
default='nearest_neighbors'
How to construct the affinity matrix.
- 'nearest_neighbors' : construct the affinity matrix by computing a
graph of nearest neighbors.
- 'rbf' : construct the affinity matrix by computing a radial basis
function (RBF) kernel.
- 'precomputed' : interpret ``X`` as a precomputed affinity matrix.
- 'precomputed_nearest_neighbors' : interpret ``X`` as a sparse graph
of precomputed nearest neighbors, and constructs the affinity matrix
by selecting the ``n_neighbors`` nearest neighbors.
- callable : use passed in function as affinity
the function takes in data matrix (n_samples, n_features)
and return affinity matrix (n_samples, n_samples).
gamma : float, default=None
Kernel coefficient for rbf kernel. If None, gamma will be set to
1/n_features.
random_state : int, RandomState instance or None, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems.
If None, then ``'arpack'`` is used.
eigen_tol : float, default="auto"
Stopping criterion for eigendecomposition of the Laplacian matrix.
If `eigen_tol="auto"` then the passed tolerance will depend on the
`eigen_solver`:
- If `eigen_solver="arpack"`, then `eigen_tol=0.0`;
- If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then
`eigen_tol=None` which configures the underlying `lobpcg` solver to
automatically resolve the value according to their heuristics. See,
:func:`scipy.sparse.linalg.lobpcg` for details.
Note that when using `eigen_solver="lobpcg"` or `eigen_solver="amg"`
values of `tol<1e-5` may lead to convergence issues and should be
avoided.
.. versionadded:: 1.2
n_neighbors : int, default=None
Number of nearest neighbors for nearest_neighbors graph building.
If None, n_neighbors will be set to max(n_samples/10, 1).
n_jobs : int, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
embedding_ : ndarray of shape (n_samples, n_components)
Spectral embedding of the training matrix.
affinity_matrix_ : ndarray of shape (n_samples, n_samples)
Affinity_matrix constructed from samples or precomputed.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_neighbors_ : int
Number of nearest neighbors effectively used.
See Also
--------
Isomap : Non-linear dimensionality reduction through Isometric Mapping.
References
----------
- :doi:`A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
<10.1007/s11222-007-9033-z>`
- `On Spectral Clustering: Analysis and an algorithm, 2001
Andrew Y. Ng, Michael I. Jordan, Yair Weiss
<https://citeseerx.ist.psu.edu/doc_view/pid/796c5d6336fc52aa84db575fb821c78918b65f58>`_
- :doi:`Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
<10.1109/34.868688>`
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import SpectralEmbedding
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding = SpectralEmbedding(n_components=2)
>>> X_transformed = embedding.fit_transform(X[:100])
>>> X_transformed.shape
(100, 2)
"""
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 1, None, closed="left")],
"affinity": [
StrOptions(
{
"nearest_neighbors",
"rbf",
"precomputed",
"precomputed_nearest_neighbors",
},
),
callable,
],
"gamma": [Interval(Real, 0, None, closed="left"), None],
"random_state": ["random_state"],
"eigen_solver": [StrOptions({"arpack", "lobpcg", "amg"}), None],
"eigen_tol": [Interval(Real, 0, None, closed="left"), StrOptions({"auto"})],
"n_neighbors": [Interval(Integral, 1, None, closed="left"), None],
"n_jobs": [None, Integral],
}
def __init__(
self,
n_components=2,
*,
affinity="nearest_neighbors",
gamma=None,
random_state=None,
eigen_solver=None,
eigen_tol="auto",
n_neighbors=None,
n_jobs=None,
):
self.n_components = n_components
self.affinity = affinity
self.gamma = gamma
self.random_state = random_state
self.eigen_solver = eigen_solver
self.eigen_tol = eigen_tol
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
tags.input_tags.pairwise = self.affinity in [
"precomputed",
"precomputed_nearest_neighbors",
]
return tags
def _get_affinity_matrix(self, X, Y=None):
"""Calculate the affinity matrix from data
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
If affinity is "precomputed"
X : array-like of shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Y: Ignored
Returns
-------
affinity_matrix of shape (n_samples, n_samples)
"""
if self.affinity == "precomputed":
self.affinity_matrix_ = X
return self.affinity_matrix_
if self.affinity == "precomputed_nearest_neighbors":
estimator = NearestNeighbors(
n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric="precomputed"
).fit(X)
connectivity = estimator.kneighbors_graph(X=X, mode="connectivity")
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
return self.affinity_matrix_
if self.affinity == "nearest_neighbors":
if sparse.issparse(X):
warnings.warn(
"Nearest neighbors affinity currently does "
"not support sparse input, falling back to "
"rbf affinity"
)
self.affinity = "rbf"
else:
self.n_neighbors_ = (
self.n_neighbors
if self.n_neighbors is not None
else max(int(X.shape[0] / 10), 1)
)
self.affinity_matrix_ = kneighbors_graph(
X, self.n_neighbors_, include_self=True, n_jobs=self.n_jobs
)
# currently only symmetric affinity_matrix supported
self.affinity_matrix_ = 0.5 * (
self.affinity_matrix_ + self.affinity_matrix_.T
)
return self.affinity_matrix_
if self.affinity == "rbf":
self.gamma_ = self.gamma if self.gamma is not None else 1.0 / X.shape[1]
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)
return self.affinity_matrix_
self.affinity_matrix_ = self.affinity(X)
return self.affinity_matrix_
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
If affinity is "precomputed"
X : {array-like, sparse matrix}, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X, accept_sparse="csr", ensure_min_samples=2)
random_state = check_random_state(self.random_state)
affinity_matrix = self._get_affinity_matrix(X)
self.embedding_ = _spectral_embedding(
affinity_matrix,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
eigen_tol=self.eigen_tol,
random_state=random_state,
)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
If affinity is "precomputed"
X : {array-like, sparse matrix} of shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : array-like of shape (n_samples, n_components)
Spectral embedding of the training matrix.
"""
self.fit(X)
return self.embedding_
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/manifold/tests/test_locally_linear.py | sklearn/manifold/tests/test_locally_linear.py | from itertools import product
import numpy as np
import pytest
from scipy import linalg
from sklearn import manifold, neighbors
from sklearn.datasets import make_blobs
from sklearn.manifold._locally_linear import barycenter_kneighbors_graph
from sklearn.utils._testing import (
assert_allclose,
assert_array_equal,
ignore_warnings,
)
eigen_solvers = ["dense", "arpack"]
# ----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph(global_dtype):
X = np.array([[0, 1], [1.01, 1.0], [2, 0]], dtype=global_dtype)
graph = barycenter_kneighbors_graph(X, 1)
expected_graph = np.array(
[[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=global_dtype
)
assert graph.dtype == global_dtype
assert_allclose(graph.toarray(), expected_graph)
graph = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_allclose(np.sum(graph.toarray(), axis=1), np.ones(3))
pred = np.dot(graph.toarray(), X)
assert linalg.norm(pred - X) / X.shape[0] < 1
# ----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid(global_dtype):
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 42 because the tests pass.
# for arm64 platforms 2 makes the test fail.
# TODO: rewrite this test to make less sensitive to the random seed,
# irrespective of the platform.
rng = np.random.RandomState(42)
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
X = X.astype(global_dtype, copy=False)
n_components = 2
clf = manifold.LocallyLinearEmbedding(
n_neighbors=5, n_components=n_components, random_state=rng
)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, "fro")
assert reconstruction_error < tol
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert clf.embedding_.shape[1] == n_components
reconstruction_error = (
linalg.norm(np.dot(N, clf.embedding_) - clf.embedding_, "fro") ** 2
)
assert reconstruction_error < tol
assert_allclose(clf.reconstruction_error_, reconstruction_error, atol=1e-1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape).astype(global_dtype, copy=False) / 100
X_reembedded = clf.transform(X + noise)
assert linalg.norm(X_reembedded - clf.embedding_) < tol
@pytest.mark.parametrize("method", ["standard", "hessian", "modified", "ltsa"])
@pytest.mark.parametrize("solver", eigen_solvers)
def test_lle_manifold(global_dtype, method, solver):
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
X = X.astype(global_dtype, copy=False)
n_components = 2
clf = manifold.LocallyLinearEmbedding(
n_neighbors=6, n_components=n_components, method=method, random_state=0
)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert reconstruction_error < tol
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert clf.embedding_.shape[1] == n_components
reconstruction_error = (
linalg.norm(np.dot(N, clf.embedding_) - clf.embedding_, "fro") ** 2
)
details = "solver: %s, method: %s" % (solver, method)
assert reconstruction_error < tol, details
assert (
np.abs(clf.reconstruction_error_ - reconstruction_error)
< tol * reconstruction_error
), details
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import datasets, pipeline
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[
("filter", manifold.LocallyLinearEmbedding(random_state=0)),
("clf", neighbors.KNeighborsClassifier()),
]
)
clf.fit(X, y)
assert 0.9 < clf.score(X, y)
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
M = np.ones((200, 3))
f = ignore_warnings
with pytest.raises(ValueError, match="Error in determining null-space with ARPACK"):
f(
manifold.locally_linear_embedding(
M,
n_neighbors=2,
n_components=1,
method="standard",
eigen_solver="arpack",
)
)
# regression test for #6033
def test_integer_input():
rand = np.random.RandomState(0)
X = rand.randint(0, 100, size=(20, 3))
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(method=method, n_neighbors=10)
clf.fit(X) # this previously raised a TypeError
def test_get_feature_names_out():
"""Check get_feature_names_out for LocallyLinearEmbedding."""
X, y = make_blobs(random_state=0, n_features=4)
n_components = 2
iso = manifold.LocallyLinearEmbedding(n_components=n_components)
iso.fit(X)
names = iso.get_feature_names_out()
assert_array_equal(
[f"locallylinearembedding{i}" for i in range(n_components)], names
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/manifold/tests/test_t_sne.py | sklearn/manifold/tests/test_t_sne.py | import re
import sys
from io import StringIO
import numpy as np
import pytest
import scipy.sparse as sp
from numpy.testing import assert_allclose
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist, squareform
from sklearn import config_context
from sklearn.datasets import make_blobs
# mypy error: Module 'sklearn.manifold' has no attribute '_barnes_hut_tsne'
from sklearn.manifold import ( # type: ignore[attr-defined]
TSNE,
_barnes_hut_tsne,
)
from sklearn.manifold._t_sne import (
_gradient_descent,
_joint_probabilities,
_joint_probabilities_nn,
_kl_divergence,
_kl_divergence_bh,
trustworthiness,
)
from sklearn.manifold._utils import _binary_search_perplexity
from sklearn.metrics.pairwise import (
cosine_distances,
manhattan_distances,
pairwise_distances,
)
from sklearn.neighbors import NearestNeighbors, kneighbors_graph
from sklearn.utils import check_random_state
from sklearn.utils._testing import (
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
skip_if_32bit,
)
from sklearn.utils.fixes import CSR_CONTAINERS, LIL_CONTAINERS
x = np.linspace(0, 1, 10)
xx, yy = np.meshgrid(x, x)
X_2d_grid = np.hstack(
[
xx.ravel().reshape(-1, 1),
yy.ravel().reshape(-1, 1),
]
)
def test_gradient_descent_stops(capsys):
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _, compute_error=True):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_, compute_error=True):
return 0.0, np.ones(1)
# Gradient norm
_, error, it = _gradient_descent(
ObjectiveSmallGradient(),
np.zeros(1),
0,
max_iter=100,
n_iter_without_progress=100,
momentum=0.0,
learning_rate=0.0,
min_gain=0.0,
min_grad_norm=1e-5,
verbose=2,
)
assert error == 1.0
assert it == 0
assert "gradient norm" in capsys.readouterr().out
# Maximum number of iterations without improvement
_, error, it = _gradient_descent(
flat_function,
np.zeros(1),
0,
max_iter=100,
n_iter_without_progress=10,
momentum=0.0,
learning_rate=0.0,
min_gain=0.0,
min_grad_norm=0.0,
verbose=2,
)
assert error == 0.0
assert it == 11
assert "did not make any progress" in capsys.readouterr().out
# Maximum number of iterations
_, error, it = _gradient_descent(
ObjectiveSmallGradient(),
np.zeros(1),
0,
max_iter=11,
n_iter_without_progress=100,
momentum=0.0,
learning_rate=0.0,
min_gain=0.0,
min_grad_norm=0.0,
verbose=2,
)
assert error == 0.0
assert it == 10
assert "Iteration 10" in capsys.readouterr().out
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
data = random_state.randn(50, 5)
distances = pairwise_distances(data).astype(np.float32)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean(
[np.exp(-np.sum(P[i] * np.log(P[i]))) for i in range(P.shape[0])]
)
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_underflow():
# Test if the binary search finds Gaussians with desired perplexity.
# A more challenging case than the one above, producing numeric
# underflow in float precision (see issue #19471 and PR #19472).
random_state = check_random_state(42)
data = random_state.randn(1, 90).astype(np.float32) + 100
desired_perplexity = 30.0
P = _binary_search_perplexity(data, desired_perplexity, verbose=0)
perplexity = 2 ** -np.nansum(P[0, 1:] * np.log2(P[0, 1:]))
assert_almost_equal(perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 200
desired_perplexity = 25.0
random_state = check_random_state(0)
data = random_state.randn(n_samples, 2).astype(np.float32, copy=False)
distances = pairwise_distances(data)
P1 = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
# Test that when we use all the neighbors the results are identical
n_neighbors = n_samples - 1
nn = NearestNeighbors().fit(data)
distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors, mode="distance")
distances_nn = distance_graph.data.astype(np.float32, copy=False)
distances_nn = distances_nn.reshape(n_samples, n_neighbors)
P2 = _binary_search_perplexity(distances_nn, desired_perplexity, verbose=0)
indptr = distance_graph.indptr
P1_nn = np.array(
[
P1[k, distance_graph.indices[indptr[k] : indptr[k + 1]]]
for k in range(n_samples)
]
)
assert_array_almost_equal(P1_nn, P2, decimal=4)
# Test that the highest P_ij are the same when fewer neighbors are used
for k in np.linspace(150, n_samples - 1, 5):
k = int(k)
topn = k * 10 # check the top 10 * k entries out of k * k entries
distance_graph = nn.kneighbors_graph(n_neighbors=k, mode="distance")
distances_nn = distance_graph.data.astype(np.float32, copy=False)
distances_nn = distances_nn.reshape(n_samples, k)
P2k = _binary_search_perplexity(distances_nn, desired_perplexity, verbose=0)
assert_array_almost_equal(P1_nn, P2, decimal=2)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
idx = np.argsort(P2k.ravel())[::-1]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
n_neighbors = 10
n_samples = 100
random_state = check_random_state(0)
data = random_state.randn(n_samples, 5)
nn = NearestNeighbors().fit(data)
distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors, mode="distance")
distances = distance_graph.data.astype(np.float32, copy=False)
distances = distances.reshape(n_samples, n_neighbors)
last_P = None
desired_perplexity = 3
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), desired_perplexity, verbose=0)
P1 = _joint_probabilities_nn(distance_graph, desired_perplexity, verbose=0)
# Convert the sparse matrix to a dense one for testing
P1 = P1.toarray()
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components).astype(np.float32)
P = _joint_probabilities(distances, desired_perplexity=25.0, verbose=0)
def fun(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[0]
def grad(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0, decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert trustworthiness(X, 5.0 + X / 10.0) == 1.0
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert trustworthiness(X, X_embedded) < 0.6
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_trustworthiness_n_neighbors_error():
"""Raise an error when n_neighbors >= n_samples / 2.
Non-regression test for #18567.
"""
regex = "n_neighbors .+ should be less than .+"
rng = np.random.RandomState(42)
X = rng.rand(7, 4)
X_embedded = rng.rand(7, 2)
with pytest.raises(ValueError, match=regex):
trustworthiness(X, X_embedded, n_neighbors=5)
trust = trustworthiness(X, X_embedded, n_neighbors=3)
assert 0 <= trust <= 1
@pytest.mark.parametrize("method", ["exact", "barnes_hut"])
@pytest.mark.parametrize("init", ("random", "pca"))
def test_preserve_trustworthiness_approximately(method, init):
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
n_components = 2
X = random_state.randn(50, n_components).astype(np.float32)
tsne = TSNE(
n_components=n_components,
init=init,
random_state=0,
method=method,
max_iter=700,
learning_rate="auto",
)
X_embedded = tsne.fit_transform(X)
t = trustworthiness(X, X_embedded, n_neighbors=1)
assert t > 0.85
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for max_iter in [250, 300, 350]:
tsne = TSNE(
n_components=2,
init="random",
perplexity=10,
learning_rate=100.0,
max_iter=max_iter,
random_state=0,
)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert kl_divergences[1] <= kl_divergences[0]
assert kl_divergences[2] <= kl_divergences[1]
@pytest.mark.parametrize("method", ["exact", "barnes_hut"])
@pytest.mark.parametrize("init", ["random", "pca"])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_fit_transform_csr_matrix(method, init, csr_container):
# TODO: compare results on dense and sparse data as proposed in:
# https://github.com/scikit-learn/scikit-learn/pull/23585#discussion_r968388186
# X can be a sparse matrix.
rng = check_random_state(0)
X = rng.randn(50, 3)
X[(rng.randint(0, 50, 25), rng.randint(0, 3, 25))] = 0.0
X_csr = csr_container(X)
tsne = TSNE(
n_components=2,
init=init,
perplexity=10,
learning_rate=100.0,
random_state=0,
method=method,
max_iter=750,
)
X_embedded = tsne.fit_transform(X_csr)
assert_allclose(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0, rtol=1.1e-1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
for i in range(3):
X = random_state.randn(80, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(
n_components=2,
perplexity=2,
learning_rate=100.0,
early_exaggeration=2.0,
metric="precomputed",
random_state=i,
verbose=0,
max_iter=500,
init="random",
)
X_embedded = tsne.fit_transform(D)
t = trustworthiness(D, X_embedded, n_neighbors=1, metric="precomputed")
assert t > 0.95
def test_trustworthiness_not_euclidean_metric():
# Test trustworthiness with a metric different from 'euclidean' and
# 'precomputed'
random_state = check_random_state(0)
X = random_state.randn(100, 2)
assert trustworthiness(X, X, metric="cosine") == trustworthiness(
pairwise_distances(X, metric="cosine"), X, metric="precomputed"
)
@pytest.mark.parametrize(
"method, retype",
[
("exact", np.asarray),
("barnes_hut", np.asarray),
*[("barnes_hut", csr_container) for csr_container in CSR_CONTAINERS],
],
)
@pytest.mark.parametrize(
"D, message_regex",
[
([[0.0], [1.0]], ".* square distance matrix"),
([[0.0, -1.0], [1.0, 0.0]], ".* positive.*"),
],
)
def test_bad_precomputed_distances(method, D, retype, message_regex):
tsne = TSNE(
metric="precomputed",
method=method,
init="random",
random_state=42,
perplexity=1,
)
with pytest.raises(ValueError, match=message_regex):
tsne.fit_transform(retype(D))
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_exact_no_precomputed_sparse(csr_container):
tsne = TSNE(
metric="precomputed",
method="exact",
init="random",
random_state=42,
perplexity=1,
)
with pytest.raises(TypeError, match="sparse"):
tsne.fit_transform(csr_container([[0, 5], [5, 0]]))
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_high_perplexity_precomputed_sparse_distances(csr_container):
# Perplexity should be less than 50
dist = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])
bad_dist = csr_container(dist)
tsne = TSNE(metric="precomputed", init="random", random_state=42, perplexity=1)
msg = "3 neighbors per samples are required, but some samples have only 1"
with pytest.raises(ValueError, match=msg):
tsne.fit_transform(bad_dist)
@pytest.mark.filterwarnings(
"ignore:Precomputed sparse input was not sorted by "
"row values:sklearn.exceptions.EfficiencyWarning"
)
@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + LIL_CONTAINERS)
def test_sparse_precomputed_distance(sparse_container):
"""Make sure that TSNE works identically for sparse and dense matrix"""
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D_sparse = kneighbors_graph(X, n_neighbors=100, mode="distance", include_self=True)
D = pairwise_distances(X)
assert sp.issparse(D_sparse)
assert_almost_equal(D_sparse.toarray(), D)
tsne = TSNE(
metric="precomputed", random_state=0, init="random", learning_rate="auto"
)
Xt_dense = tsne.fit_transform(D)
Xt_sparse = tsne.fit_transform(sparse_container(D_sparse))
assert_almost_equal(Xt_dense, Xt_sparse)
def test_non_positive_computed_distances():
# Computed distance matrices must be positive.
def metric(x, y):
return -1
# Negative computed distances should be caught even if result is squared
tsne = TSNE(metric=metric, method="exact", perplexity=1)
X = np.array([[0.0, 0.0], [1.0, 1.0]])
with pytest.raises(ValueError, match="All distances .*metric given.*"):
tsne.fit_transform(X)
def test_init_ndarray():
# Initialize TSNE with ndarray and test fit
tsne = TSNE(init=np.zeros((100, 2)), learning_rate="auto")
X_embedded = tsne.fit_transform(np.ones((100, 5)))
assert_array_equal(np.zeros((100, 2)), X_embedded)
def test_init_ndarray_precomputed():
# Initialize TSNE with ndarray and metric 'precomputed'
# Make sure no FutureWarning is thrown from _fit
tsne = TSNE(
init=np.zeros((100, 2)),
metric="precomputed",
learning_rate=50.0,
)
tsne.fit(np.zeros((100, 100)))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices cannot use PCA initialization.
tsne = TSNE(metric="precomputed", init="pca", perplexity=1)
with pytest.raises(
ValueError,
match='The parameter init="pca" cannot be used with metric="precomputed".',
):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_n_components_range():
# barnes_hut method should only be used with n_components <= 3
tsne = TSNE(n_components=4, method="barnes_hut", perplexity=1)
with pytest.raises(ValueError, match="'n_components' should be .*"):
tsne.fit_transform(np.array([[0.0], [1.0]]))
def test_early_exaggeration_used():
# check that the ``early_exaggeration`` parameter has an effect
random_state = check_random_state(0)
n_components = 2
methods = ["exact", "barnes_hut"]
X = random_state.randn(25, n_components).astype(np.float32)
for method in methods:
tsne = TSNE(
n_components=n_components,
perplexity=1,
learning_rate=100.0,
init="pca",
random_state=0,
method=method,
early_exaggeration=1.0,
max_iter=250,
)
X_embedded1 = tsne.fit_transform(X)
tsne = TSNE(
n_components=n_components,
perplexity=1,
learning_rate=100.0,
init="pca",
random_state=0,
method=method,
early_exaggeration=10.0,
max_iter=250,
)
X_embedded2 = tsne.fit_transform(X)
assert not np.allclose(X_embedded1, X_embedded2)
def test_max_iter_used():
# check that the ``max_iter`` parameter has an effect
random_state = check_random_state(0)
n_components = 2
methods = ["exact", "barnes_hut"]
X = random_state.randn(25, n_components).astype(np.float32)
for method in methods:
for max_iter in [251, 500]:
tsne = TSNE(
n_components=n_components,
perplexity=1,
learning_rate=0.5,
init="random",
random_state=0,
method=method,
early_exaggeration=1.0,
max_iter=max_iter,
)
tsne.fit_transform(X)
assert tsne.n_iter_ == max_iter - 1
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_answer_gradient_two_points(csr_container):
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array(
[[-4.961291e-05, -1.072243e-04], [9.259460e-05, 2.702024e-04]]
)
neighbors = np.array([[1], [0]])
grad_output = np.array(
[[-2.37012478e-05, -6.29044398e-05], [2.37012478e-05, 6.29044398e-05]]
)
_run_answer_test(pos_input, pos_output, neighbors, grad_output, csr_container)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_answer_gradient_four_points(csr_container):
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0], [5.0, 2.0], [7.3, 2.2]])
pos_output = np.array(
[
[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05],
]
)
neighbors = np.array([[1, 2, 3], [0, 2, 3], [1, 0, 3], [1, 2, 0]])
grad_output = np.array(
[
[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09],
]
)
_run_answer_test(pos_input, pos_output, neighbors, grad_output, csr_container)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_skip_num_points_gradient(csr_container):
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0], [5.0, 2.0], [7.3, 2.2]])
pos_output = np.array(
[
[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05],
]
)
neighbors = np.array([[1, 2, 3], [0, 2, 3], [1, 0, 3], [1, 2, 0]])
grad_output = np.array(
[
[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09],
]
)
_run_answer_test(
pos_input, pos_output, neighbors, grad_output, csr_container, False, 0.1, 2
)
def _run_answer_test(
pos_input,
pos_output,
neighbors,
grad_output,
csr_container,
verbose=False,
perplexity=0.1,
skip_num_points=0,
):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64, copy=False)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
P = csr_container(pij_input)
neighbors = P.indices.astype(np.int64)
indptr = P.indptr.astype(np.int64)
_barnes_hut_tsne.gradient(
P.data, pos_output, neighbors, indptr, grad_bh, 0.5, 2, 1, skip_num_points=0
)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
@pytest.mark.thread_unsafe # manually captured stdout
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2, perplexity=4)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert "[t-SNE]" in out
assert "nearest neighbors..." in out
assert "Computed conditional probabilities" in out
assert "Mean sigma" in out
assert "early exaggeration" in out
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev", perplexity=4)
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1, perplexity=4)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert np.all(np.isfinite(X_embedded))
@pytest.mark.parametrize("method", ["barnes_hut", "exact"])
@pytest.mark.parametrize("dt", [np.float32, np.float64])
def test_64bit(method, dt):
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
X = random_state.randn(10, 2).astype(dt, copy=False)
tsne = TSNE(
n_components=2,
perplexity=2,
learning_rate=100.0,
random_state=0,
method=method,
verbose=0,
max_iter=300,
init="random",
)
X_embedded = tsne.fit_transform(X)
effective_type = X_embedded.dtype
# tsne cython code is only single precision, so the output will
# always be single precision, irrespectively of the input dtype
assert effective_type == np.float32
@pytest.mark.parametrize("method", ["barnes_hut", "exact"])
def test_kl_divergence_not_nan(method):
# Ensure kl_divergence_ is computed at last iteration
# even though max_iter % n_iter_check != 0, i.e. 1003 % 50 != 0
random_state = check_random_state(0)
X = random_state.randn(50, 2)
tsne = TSNE(
n_components=2,
perplexity=2,
learning_rate=100.0,
random_state=0,
method=method,
verbose=0,
max_iter=503,
init="random",
)
tsne.fit_transform(X)
assert not np.isnan(tsne.kl_divergence_)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
data = random_state.randn(n_samples, n_features)
distances = pairwise_distances(data)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, verbose=0)
kl_exact, grad_exact = _kl_divergence(
params, P, degrees_of_freedom, n_samples, n_components
)
n_neighbors = n_samples - 1
distances_csr = (
NearestNeighbors()
.fit(data)
.kneighbors_graph(n_neighbors=n_neighbors, mode="distance")
)
P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0)
kl_bh, grad_bh = _kl_divergence_bh(
params,
P_bh,
degrees_of_freedom,
n_samples,
n_components,
angle=angle,
skip_num_points=0,
verbose=0,
)
P = squareform(P)
P_bh = P_bh.toarray()
assert_array_almost_equal(P_bh, P, decimal=5)
assert_almost_equal(kl_exact, kl_bh, decimal=3)
@skip_if_32bit
def test_n_iter_without_progress(capsys):
# Use a dummy negative n_iter_without_progress and check output on stdout
random_state = check_random_state(0)
X = random_state.randn(100, 10)
for method in ["barnes_hut", "exact"]:
tsne = TSNE(
n_iter_without_progress=-1,
verbose=2,
learning_rate=1e8,
random_state=0,
method=method,
max_iter=351,
init="random",
)
tsne._N_ITER_CHECK = 1
tsne._EXPLORATION_MAX_ITER = 0
tsne.fit_transform(X)
# The output needs to contain the value of n_iter_without_progress
assert (
"did not make any progress during the last -1 episodes. Finished."
in capsys.readouterr().out
)
def test_min_grad_norm(capsys):
# Make sure that the parameter min_grad_norm is used correctly
random_state = check_random_state(0)
X = random_state.randn(100, 2)
min_grad_norm = 0.002
tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2, random_state=0, method="exact")
tsne.fit_transform(X)
lines_out = capsys.readouterr().out.split("\n")
# extract the gradient norm from the verbose output
gradient_norm_values = []
for line in lines_out:
# When the computation is Finished just an old gradient norm value
# is repeated that we do not need to store
if "Finished" in line:
break
start_grad_norm = line.find("gradient norm")
if start_grad_norm >= 0:
line = line[start_grad_norm:]
line = line.replace("gradient norm = ", "").split(" ")[0]
gradient_norm_values.append(float(line))
# Compute how often the gradient norm is smaller than min_grad_norm
gradient_norm_values = np.array(gradient_norm_values)
n_smaller_gradient_norms = len(
gradient_norm_values[gradient_norm_values <= min_grad_norm]
)
# The gradient norm can be smaller than min_grad_norm at most once,
# because in the moment it becomes smaller the optimization stops
assert n_smaller_gradient_norms <= 1
def test_accessible_kl_divergence(capsys):
# Ensures that the accessible kl_divergence matches the computed value
random_state = check_random_state(0)
X = random_state.randn(50, 2)
tsne = TSNE(
n_iter_without_progress=2,
verbose=2,
random_state=0,
method="exact",
max_iter=500,
)
tsne.fit_transform(X)
# The output needs to contain the accessible kl_divergence as the error at
# the last iteration
for line in capsys.readouterr().out.split("\n")[::-1]:
if "Iteration" in line:
_, _, error = line.partition("error = ")
if error:
error, _, _ = error.partition(",")
break
assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5)
@pytest.mark.parametrize("method", ["barnes_hut", "exact"])
def test_uniform_grid(method):
"""Make sure that TSNE can approximately recover a uniform 2D grid
Due to ties in distances between point in X_2d_grid, this test is platform
dependent for ``method='barnes_hut'`` due to numerical imprecision.
Also, t-SNE is not assured to converge to the right solution because bad
initialization can lead to convergence to bad local minimum (the
optimization problem is non-convex). To avoid breaking the test too often,
we re-run t-SNE from the final point when the convergence is not good
enough.
"""
seeds = range(3)
max_iter = 500
for seed in seeds:
tsne = TSNE(
n_components=2,
init="random",
random_state=seed,
perplexity=50,
max_iter=max_iter,
method=method,
learning_rate="auto",
)
Y = tsne.fit_transform(X_2d_grid)
try_name = "{}_{}".format(method, seed)
try:
assert_uniform_grid(Y, try_name)
except AssertionError:
# If the test fails a first time, re-run with init=Y to see if
# this was caused by a bad initialization. Note that this will
# also run an early_exaggeration step.
try_name += ":rerun"
tsne.init = Y
Y = tsne.fit_transform(X_2d_grid)
assert_uniform_grid(Y, try_name)
def assert_uniform_grid(Y, try_name=None):
# Ensure that the resulting embedding leads to approximately
# uniformly spaced points: the distance to the closest neighbors
# should be non-zero and approximately constant.
nn = NearestNeighbors(n_neighbors=1).fit(Y)
dist_to_nn = nn.kneighbors(return_distance=True)[0].ravel()
assert dist_to_nn.min() > 0.1
smallest_to_mean = dist_to_nn.min() / np.mean(dist_to_nn)
largest_to_mean = dist_to_nn.max() / np.mean(dist_to_nn)
assert smallest_to_mean > 0.5, try_name
assert largest_to_mean < 2, try_name
def test_bh_match_exact():
# check that the ``barnes_hut`` method match the exact one when
# ``angle = 0`` and ``perplexity > n_samples / 3``
random_state = check_random_state(0)
n_features = 10
X = random_state.randn(30, n_features).astype(np.float32)
X_embeddeds = {}
max_iter = {}
for method in ["exact", "barnes_hut"]:
tsne = TSNE(
n_components=2,
method=method,
learning_rate=1.0,
init="random",
random_state=0,
max_iter=251,
perplexity=29.5,
angle=0,
)
# Kill the early_exaggeration
tsne._EXPLORATION_MAX_ITER = 0
X_embeddeds[method] = tsne.fit_transform(X)
max_iter[method] = tsne.n_iter_
assert max_iter["exact"] == max_iter["barnes_hut"]
assert_allclose(X_embeddeds["exact"], X_embeddeds["barnes_hut"], rtol=1e-4)
def test_gradient_bh_multithread_match_sequential():
# check that the bh gradient with different num_threads gives the same
# results
n_features = 10
n_samples = 30
n_components = 2
degrees_of_freedom = 1
angle = 3
perplexity = 5
random_state = check_random_state(0)
data = random_state.randn(n_samples, n_features).astype(np.float32)
params = random_state.randn(n_samples, n_components)
n_neighbors = n_samples - 1
distances_csr = (
NearestNeighbors()
.fit(data)
.kneighbors_graph(n_neighbors=n_neighbors, mode="distance")
)
P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0)
kl_sequential, grad_sequential = _kl_divergence_bh(
params,
P_bh,
degrees_of_freedom,
n_samples,
n_components,
angle=angle,
skip_num_points=0,
verbose=0,
num_threads=1,
)
for num_threads in [2, 4]:
kl_multithread, grad_multithread = _kl_divergence_bh(
params,
P_bh,
degrees_of_freedom,
n_samples,
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/manifold/tests/test_spectral_embedding.py | sklearn/manifold/tests/test_spectral_embedding.py | import itertools
from unittest.mock import Mock
import numpy as np
import pytest
from scipy import sparse
from scipy.linalg import eigh
from scipy.sparse.linalg import eigsh, lobpcg
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
from sklearn.manifold import SpectralEmbedding, _spectral_embedding, spectral_embedding
from sklearn.manifold._spectral_embedding import (
_graph_connected_component,
_graph_is_connected,
)
from sklearn.metrics import normalized_mutual_info_score, pairwise_distances
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.neighbors import NearestNeighbors
from sklearn.utils._testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.fixes import (
COO_CONTAINERS,
CSC_CONTAINERS,
CSR_CONTAINERS,
parse_version,
sp_version,
)
from sklearn.utils.fixes import laplacian as csgraph_laplacian
try:
from pyamg import smoothed_aggregation_solver # noqa: F401
pyamg_available = True
except ImportError:
pyamg_available = False
skip_if_no_pyamg = pytest.mark.skipif(
not pyamg_available, reason="PyAMG is required for the tests in this function."
)
# non centered, sparse centers to check the
centers = np.array(
[
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
]
)
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(
n_samples=n_samples, centers=centers, cluster_std=1.0, random_state=42
)
def _assert_equal_with_sign_flipping(A, B, tol=0.0):
"""Check array A and B are equal with possible sign flipping on
each column"""
tol_squared = tol**2
for A_col, B_col in zip(A.T, B.T):
assert (
np.max((A_col - B_col) ** 2) <= tol_squared
or np.max((A_col + B_col) ** 2) <= tol_squared
)
@pytest.mark.parametrize("coo_container", COO_CONTAINERS)
def test_sparse_graph_connected_component(coo_container):
rng = np.random.RandomState(42)
n_samples = 300
boundaries = [0, 42, 121, 200, n_samples]
p = rng.permutation(n_samples)
connections = []
for start, stop in itertools.pairwise(boundaries):
group = p[start:stop]
# Connect all elements within the group at least once via an
# arbitrary path that spans the group.
for i in range(len(group) - 1):
connections.append((group[i], group[i + 1]))
# Add some more random connections within the group
min_idx, max_idx = 0, len(group) - 1
n_random_connections = 1000
source = rng.randint(min_idx, max_idx, size=n_random_connections)
target = rng.randint(min_idx, max_idx, size=n_random_connections)
connections.extend(zip(group[source], group[target]))
# Build a symmetric affinity matrix
row_idx, column_idx = tuple(np.array(connections).T)
data = rng.uniform(0.1, 42, size=len(connections))
affinity = coo_container((data, (row_idx, column_idx)))
affinity = 0.5 * (affinity + affinity.T)
for start, stop in itertools.pairwise(boundaries):
component_1 = _graph_connected_component(affinity, p[start])
component_size = stop - start
assert component_1.sum() == component_size
# We should retrieve the same component mask by starting by both ends
# of the group
component_2 = _graph_connected_component(affinity, p[stop - 1])
assert component_2.sum() == component_size
assert_array_equal(component_1, component_2)
# TODO: investigate why this test is seed-sensitive on 32-bit Python
# runtimes. Is this revealing a numerical stability problem ? Or is it
# expected from the test numerical design ? In the latter case the test
# should be made less seed-sensitive instead.
@pytest.mark.parametrize(
"eigen_solver",
[
"arpack",
"lobpcg",
pytest.param("amg", marks=skip_if_no_pyamg),
],
)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_spectral_embedding_two_components(eigen_solver, dtype, seed=0):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2, n_sample * 2])
# first component
affinity[0:n_sample, 0:n_sample] = (
np.abs(random_state.randn(n_sample, n_sample)) + 2
)
# second component
affinity[n_sample::, n_sample::] = (
np.abs(random_state.randn(n_sample, n_sample)) + 2
)
# Test of internal _graph_connected_component before connection
component = _graph_connected_component(affinity, 0)
assert component[:n_sample].all()
assert not component[n_sample:].any()
component = _graph_connected_component(affinity, -1)
assert not component[:n_sample].any()
assert component[n_sample:].all()
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[:: 2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(
n_components=1,
affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver=eigen_solver,
)
embedded_coordinate = se_precomp.fit_transform(affinity.astype(dtype))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype=np.int64)
assert normalized_mutual_info_score(true_label, label_) == pytest.approx(1.0)
@pytest.mark.parametrize("sparse_container", [None, *CSR_CONTAINERS])
@pytest.mark.parametrize(
"eigen_solver",
[
"arpack",
"lobpcg",
pytest.param("amg", marks=skip_if_no_pyamg),
],
)
@pytest.mark.parametrize("dtype", (np.float32, np.float64))
def test_spectral_embedding_precomputed_affinity(
sparse_container, eigen_solver, dtype, seed=36
):
# Test spectral embedding with precomputed kernel
gamma = 1.0
X = S if sparse_container is None else sparse_container(S)
se_precomp = SpectralEmbedding(
n_components=2,
affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver=eigen_solver,
)
se_rbf = SpectralEmbedding(
n_components=2,
affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed),
eigen_solver=eigen_solver,
)
embed_precomp = se_precomp.fit_transform(rbf_kernel(X.astype(dtype), gamma=gamma))
embed_rbf = se_rbf.fit_transform(X.astype(dtype))
assert_array_almost_equal(se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
_assert_equal_with_sign_flipping(embed_precomp, embed_rbf, 0.05)
def test_precomputed_nearest_neighbors_filtering():
# Test precomputed graph filtering when containing too many neighbors
n_neighbors = 2
results = []
for additional_neighbors in [0, 10]:
nn = NearestNeighbors(n_neighbors=n_neighbors + additional_neighbors).fit(S)
graph = nn.kneighbors_graph(S, mode="connectivity")
embedding = (
SpectralEmbedding(
random_state=0,
n_components=2,
affinity="precomputed_nearest_neighbors",
n_neighbors=n_neighbors,
)
.fit(graph)
.embedding_
)
results.append(embedding)
assert_array_equal(results[0], results[1])
@pytest.mark.parametrize("sparse_container", [None, *CSR_CONTAINERS])
def test_spectral_embedding_callable_affinity(sparse_container, seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
X = S if sparse_container is None else sparse_container(S)
se_callable = SpectralEmbedding(
n_components=2,
affinity=(lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed),
)
se_rbf = SpectralEmbedding(
n_components=2,
affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed),
)
embed_rbf = se_rbf.fit_transform(X)
embed_callable = se_callable.fit_transform(X)
assert_array_almost_equal(se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
_assert_equal_with_sign_flipping(embed_rbf, embed_callable, 0.05)
@pytest.mark.skipif(
not pyamg_available, reason="PyAMG is required for the tests in this function."
)
@pytest.mark.parametrize("dtype", (np.float32, np.float64))
@pytest.mark.parametrize("coo_container", COO_CONTAINERS)
def test_spectral_embedding_amg_solver(dtype, coo_container, seed=36):
se_amg = SpectralEmbedding(
n_components=2,
affinity="nearest_neighbors",
eigen_solver="amg",
n_neighbors=5,
random_state=np.random.RandomState(seed),
)
se_arpack = SpectralEmbedding(
n_components=2,
affinity="nearest_neighbors",
eigen_solver="arpack",
n_neighbors=5,
random_state=np.random.RandomState(seed),
)
embed_amg = se_amg.fit_transform(S.astype(dtype))
embed_arpack = se_arpack.fit_transform(S.astype(dtype))
_assert_equal_with_sign_flipping(embed_amg, embed_arpack, 1e-5)
# same with special case in which amg is not actually used
# regression test for #10715
# affinity between nodes
row = np.array([0, 0, 1, 2, 3, 3, 4], dtype=np.int32)
col = np.array([1, 2, 2, 3, 4, 5, 5], dtype=np.int32)
val = np.array([100, 100, 100, 1, 100, 100, 100], dtype=np.int64)
affinity = coo_container(
(np.hstack([val, val]), (np.hstack([row, col]), np.hstack([col, row]))),
shape=(6, 6),
)
se_amg.affinity = "precomputed"
se_arpack.affinity = "precomputed"
embed_amg = se_amg.fit_transform(affinity.astype(dtype))
embed_arpack = se_arpack.fit_transform(affinity.astype(dtype))
_assert_equal_with_sign_flipping(embed_amg, embed_arpack, 1e-5)
# Check that passing a sparse matrix with `np.int64` indices dtype raises an error
# or is successful based on the version of SciPy which is installed.
# Use a CSR matrix to avoid any conversion during the validation
affinity = affinity.tocsr()
affinity.indptr = affinity.indptr.astype(np.int64)
affinity.indices = affinity.indices.astype(np.int64)
# PR: https://github.com/scipy/scipy/pull/18913
# First integration in 1.11.3: https://github.com/scipy/scipy/pull/19279
scipy_graph_traversal_supports_int64_index = sp_version >= parse_version("1.11.3")
if scipy_graph_traversal_supports_int64_index:
se_amg.fit_transform(affinity)
else:
err_msg = "Only sparse matrices with 32-bit integer indices are accepted"
with pytest.raises(ValueError, match=err_msg):
se_amg.fit_transform(affinity)
@pytest.mark.skipif(
not pyamg_available, reason="PyAMG is required for the tests in this function."
)
@pytest.mark.parametrize("dtype", (np.float32, np.float64))
def test_spectral_embedding_amg_solver_failure(dtype, seed=36):
# Non-regression test for amg solver failure (issue #13393 on github)
num_nodes = 100
X = sparse.rand(num_nodes, num_nodes, density=0.1, random_state=seed)
X = X.astype(dtype)
upper = sparse.triu(X) - sparse.diags(X.diagonal())
sym_matrix = upper + upper.T
embedding = spectral_embedding(
sym_matrix, n_components=10, eigen_solver="amg", random_state=0
)
# Check that the learned embedding is stable w.r.t. random solver init:
for i in range(3):
new_embedding = spectral_embedding(
sym_matrix, n_components=10, eigen_solver="amg", random_state=i + 1
)
_assert_equal_with_sign_flipping(embedding, new_embedding, tol=0.05)
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(
n_components=n_clusters, affinity="rbf", random_state=random_state
)
se_knn = SpectralEmbedding(
n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state,
)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state, n_init=10)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(km.labels_, true_labels), 1.0, 2
)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array(
[
[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1],
]
)
assert not _graph_is_connected(graph)
for csr_container in CSR_CONTAINERS:
assert not _graph_is_connected(csr_container(graph))
for csc_container in CSC_CONTAINERS:
assert not _graph_is_connected(csc_container(graph))
graph = np.array(
[
[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1],
]
)
assert _graph_is_connected(graph)
for csr_container in CSR_CONTAINERS:
assert _graph_is_connected(csr_container(graph))
for csc_container in CSC_CONTAINERS:
assert _graph_is_connected(csc_container(graph))
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
def test_spectral_embedding_unnormalized():
# Test that spectral_embedding is also processing unnormalized laplacian
# correctly
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
n_components = 8
embedding_1 = spectral_embedding(
sims, norm_laplacian=False, n_components=n_components, drop_first=False
)
# Verify using manual computation with dense eigh
laplacian, dd = csgraph_laplacian(sims, normed=False, return_diag=True)
_, diffusion_map = eigh(laplacian)
embedding_2 = diffusion_map.T[:n_components]
embedding_2 = _deterministic_vector_sign_flip(embedding_2).T
assert_array_almost_equal(embedding_1, embedding_2)
def test_spectral_embedding_first_eigen_vector():
# Test that the first eigenvector of spectral_embedding
# is constant and that the second is not (for a connected graph)
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
n_components = 2
for seed in range(10):
embedding = spectral_embedding(
sims,
norm_laplacian=False,
n_components=n_components,
drop_first=False,
random_state=seed,
)
assert np.std(embedding[:, 0]) == pytest.approx(0)
assert np.std(embedding[:, 1]) > 1e-3
@pytest.mark.parametrize(
"eigen_solver",
[
"arpack",
"lobpcg",
pytest.param("amg", marks=skip_if_no_pyamg),
],
)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_spectral_embedding_preserves_dtype(eigen_solver, dtype):
"""Check that `SpectralEmbedding is preserving the dtype of the fitted
attribute and transformed data.
Ideally, this test should be covered by the common test
`check_transformer_preserve_dtypes`. However, this test only run
with transformers implementing `transform` while `SpectralEmbedding`
implements only `fit_transform`.
"""
X = S.astype(dtype)
se = SpectralEmbedding(
n_components=2, affinity="rbf", eigen_solver=eigen_solver, random_state=0
)
X_trans = se.fit_transform(X)
assert X_trans.dtype == dtype
assert se.embedding_.dtype == dtype
assert se.affinity_matrix_.dtype == dtype
@pytest.mark.skipif(
pyamg_available,
reason="PyAMG is installed and we should not test for an error.",
)
def test_error_pyamg_not_available():
se_precomp = SpectralEmbedding(
n_components=2,
affinity="rbf",
eigen_solver="amg",
)
err_msg = "The eigen_solver was set to 'amg', but pyamg is not available."
with pytest.raises(ValueError, match=err_msg):
se_precomp.fit_transform(S)
@pytest.mark.parametrize("solver", ["arpack", "amg", "lobpcg"])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_spectral_eigen_tol_auto(monkeypatch, solver, csr_container):
"""Test that `eigen_tol="auto"` is resolved correctly"""
if solver == "amg" and not pyamg_available:
pytest.skip("PyAMG is not available.")
X, _ = make_blobs(
n_samples=200, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
solver_func = eigsh if solver == "arpack" else lobpcg
default_value = 0 if solver == "arpack" else None
if solver == "amg":
S = csr_container(S)
mocked_solver = Mock(side_effect=solver_func)
monkeypatch.setattr(_spectral_embedding, solver_func.__qualname__, mocked_solver)
spectral_embedding(S, random_state=42, eigen_solver=solver, eigen_tol="auto")
mocked_solver.assert_called()
_, kwargs = mocked_solver.call_args
assert kwargs["tol"] == default_value
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/manifold/tests/test_classical_mds.py | sklearn/manifold/tests/test_classical_mds.py | import numpy as np
import pytest
from numpy.testing import assert_allclose
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.manifold import ClassicalMDS
from sklearn.metrics import euclidean_distances
def test_classical_mds_equivalent_to_pca():
X, _ = load_iris(return_X_y=True)
cmds = ClassicalMDS(n_components=2, metric="euclidean")
pca = PCA(n_components=2)
Z1 = cmds.fit_transform(X)
Z2 = pca.fit_transform(X)
# Swap the signs if necessary
for comp in range(2):
if Z1[0, comp] < 0 and Z2[0, comp] > 0:
Z2[:, comp] *= -1
assert_allclose(Z1, Z2)
assert_allclose(np.sqrt(cmds.eigenvalues_), pca.singular_values_)
def test_classical_mds_equivalent_on_data_and_distances():
X, _ = load_iris(return_X_y=True)
cmds = ClassicalMDS(n_components=2, metric="euclidean")
Z1 = cmds.fit_transform(X)
cmds = ClassicalMDS(n_components=2, metric="precomputed")
Z2 = cmds.fit_transform(euclidean_distances(X))
assert_allclose(Z1, Z2)
def test_classical_mds_wrong_inputs():
# Non-symmetric input
dissim = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
with pytest.raises(ValueError, match="Array must be symmetric"):
ClassicalMDS(metric="precomputed").fit(dissim)
# Non-square input
dissim = np.array([[0, 1, 2], [3, 4, 5]])
with pytest.raises(ValueError, match="array must be 2-dimensional and square"):
ClassicalMDS(metric="precomputed").fit(dissim)
def test_classical_mds_metric_params():
X, _ = load_iris(return_X_y=True)
cmds = ClassicalMDS(n_components=2, metric="euclidean")
Z1 = cmds.fit_transform(X)
cmds = ClassicalMDS(n_components=2, metric="minkowski", metric_params={"p": 2})
Z2 = cmds.fit_transform(X)
assert_allclose(Z1, Z2)
cmds = ClassicalMDS(n_components=2, metric="minkowski", metric_params={"p": 1})
Z3 = cmds.fit_transform(X)
assert not np.allclose(Z1, Z3)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/manifold/tests/__init__.py | sklearn/manifold/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/manifold/tests/test_mds.py | sklearn/manifold/tests/test_mds.py | from unittest.mock import Mock
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_almost_equal, assert_equal
from sklearn.datasets import load_digits, load_iris
from sklearn.manifold import ClassicalMDS
from sklearn.manifold import _mds as mds
from sklearn.metrics import euclidean_distances
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]])
Z = np.array([[-0.266, -0.539], [0.451, 0.252], [0.016, -0.238], [-0.200, 0.524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array(
[[-1.415, -2.471], [1.633, 1.107], [0.249, -0.067], [-0.468, 1.431]]
)
assert_array_almost_equal(X, X_true, decimal=3)
def test_nonmetric_lower_normalized_stress():
# Testing that nonmetric MDS results in lower normalized stress compared
# compared to metric MDS (non-regression test for issue 27028)
X, _ = load_iris(return_X_y=True)
sim = euclidean_distances(X)
np.random.seed(42)
Z = np.random.normal(size=(X.shape[0], 2))
_, stress1 = mds.smacof(
sim, init=Z, n_components=2, max_iter=1000, n_init=1, normalized_stress=True
)
_, stress2 = mds.smacof(
sim,
init=Z,
n_components=2,
max_iter=1000,
n_init=1,
normalized_stress=True,
metric=False,
)
assert stress1 > stress2
# A metric MDS solution (local minimum of the raw stress) can be rescaled to
# decrease the stress-1 (which is returned with normalized_stress=True).
# The optimal rescaling can be computed analytically, see Borg & Groenen,
# Modern Multidimensional Scaling, Chapter 11.1. After rescaling, stress-1
# becomes sqrt(s^2 / (1 + s^2)), where s is the value of stress-1 before
# rescaling.
stress1_rescaled = np.sqrt(stress1**2 / (1 + stress1**2))
assert stress1_rescaled > stress2
def test_nonmetric_mds_optimization():
# Test that stress is decreasing during nonmetric MDS optimization
# (non-regression test for issue 27028)
X, _ = load_digits(return_X_y=True)
rng = np.random.default_rng(seed=42)
ind_subset = rng.choice(len(X), size=200, replace=False)
X = X[ind_subset]
mds_est = mds.MDS(
n_components=2,
n_init=1,
max_iter=2,
metric_mds=False,
init="random",
random_state=42,
).fit(X)
stress_after_2_iter = mds_est.stress_
mds_est = mds.MDS(
n_components=2,
n_init=1,
max_iter=3,
metric_mds=False,
init="random",
random_state=42,
).fit(X)
stress_after_3_iter = mds_est.stress_
assert stress_after_2_iter > stress_after_3_iter
@pytest.mark.parametrize("metric_mds", [True, False])
def test_mds_recovers_true_data(metric_mds):
X = np.array([[1, 1], [1, 4], [1, 5], [3, 3]])
mds_est = mds.MDS(
n_components=2,
n_init=1,
eps=1e-15,
max_iter=1000,
metric_mds=metric_mds,
init="random",
random_state=42,
).fit(X)
stress = mds_est.stress_
assert_allclose(stress, 0, atol=1e-6)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]])
with pytest.raises(ValueError):
mds.smacof(sim, n_init=1)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4], [5, 0, 2, 2], [4, 2, 1, 0]])
with pytest.raises(ValueError):
mds.smacof(sim, n_init=1)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]])
Z = np.array([[-0.266, -0.539], [0.016, -0.238], [-0.200, 0.524]])
with pytest.raises(ValueError):
mds.smacof(sim, init=Z, n_init=1)
# TODO: remove mark once loky bug is fixed:
# https://github.com/joblib/loky/issues/458
@pytest.mark.thread_unsafe
def test_MDS():
sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]])
mds_clf = mds.MDS(
metric_mds=False,
n_jobs=3,
n_init=3,
metric="precomputed",
init="random",
)
mds_clf.fit(sim)
# TODO(1.10): remove warning filter
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize("k", [0.5, 1.5, 2])
def test_normed_stress(k):
"""Test that non-metric MDS normalized stress is scale-invariant."""
sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]])
X1, stress1 = mds.smacof(sim, metric=False, max_iter=5, random_state=0)
X2, stress2 = mds.smacof(k * sim, metric=False, max_iter=5, random_state=0)
assert_allclose(stress1, stress2, rtol=1e-5)
assert_allclose(X1, X2, rtol=1e-5)
# TODO(1.10): remove warning filter
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize("metric", [True, False])
def test_normalized_stress_auto(metric, monkeypatch):
rng = np.random.RandomState(0)
X = rng.randn(4, 3)
dist = euclidean_distances(X)
mock = Mock(side_effect=mds._smacof_single)
monkeypatch.setattr("sklearn.manifold._mds._smacof_single", mock)
est = mds.MDS(metric=metric, normalized_stress="auto", random_state=rng)
est.fit_transform(X)
assert mock.call_args[1]["normalized_stress"] != metric
mds.smacof(dist, metric=metric, normalized_stress="auto", random_state=rng)
assert mock.call_args[1]["normalized_stress"] != metric
def test_isotonic_outofbounds():
# This particular configuration can trigger out of bounds error
# in the isotonic regression (non-regression test for issue 26999)
dis = np.array(
[
[0.0, 1.732050807568877, 1.7320508075688772],
[1.732050807568877, 0.0, 6.661338147750939e-16],
[1.7320508075688772, 6.661338147750939e-16, 0.0],
]
)
init = np.array(
[
[0.08665881585055124, 0.7939114643387546],
[0.9959834154297658, 0.7555546025640025],
[0.8766008278401566, 0.4227358815811242],
]
)
mds.smacof(dis, init=init, metric=False, n_init=1)
# TODO(1.10): remove warning filter
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize("normalized_stress", [True, False])
def test_returned_stress(normalized_stress):
# Test that the final stress corresponds to the final embedding
# (non-regression test for issue 16846)
X = np.array([[1, 1], [1, 4], [1, 5], [3, 3]])
D = euclidean_distances(X)
mds_est = mds.MDS(
n_components=2,
random_state=42,
normalized_stress=normalized_stress,
).fit(X)
Z = mds_est.embedding_
stress = mds_est.stress_
D_mds = euclidean_distances(Z)
stress_Z = ((D_mds.ravel() - D.ravel()) ** 2).sum() / 2
if normalized_stress:
stress_Z = np.sqrt(stress_Z / ((D_mds.ravel() ** 2).sum() / 2))
assert_allclose(stress, stress_Z)
# TODO(1.10): remove warning filter
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize("metric_mds", [True, False])
def test_convergence_does_not_depend_on_scale(metric_mds):
# Test that the number of iterations until convergence does not depend on
# the scale of the input data
X = np.array([[1, 1], [1, 4], [1, 5], [3, 3]])
mds_est = mds.MDS(
n_components=2,
random_state=42,
metric_mds=metric_mds,
)
mds_est.fit(X * 100)
n_iter1 = mds_est.n_iter_
mds_est.fit(X / 100)
n_iter2 = mds_est.n_iter_
assert_equal(n_iter1, n_iter2)
# TODO(1.9): delete this test
def test_future_warning_n_init():
X = np.array([[1, 1], [1, 4], [1, 5], [3, 3]])
sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]])
with pytest.warns(FutureWarning):
mds.smacof(sim)
with pytest.warns(FutureWarning):
mds.MDS(init="random").fit(X)
# TODO(1.9): delete the n_init warning check
# TODO(1.10): delete this test
def test_future_warning_init_and_metric():
X = np.array([[1, 1], [1, 4], [1, 5], [3, 3]])
sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]])
# dissimilarity argument deprecated
with pytest.warns(FutureWarning, match="`dissimilarity` parameter is"):
mds.MDS(dissimilarity="precomputed", init="random", n_init=1).fit(sim)
# metric=True deprecated
with pytest.warns(FutureWarning, match="Use metric_mds"):
mds.MDS(metric=True, init="random", n_init=1).fit(X)
# metric=False deprecated
with pytest.warns(FutureWarning, match="Use metric_mds"):
mds.MDS(metric=False, init="random", n_init=1).fit(X)
# default init will become classical_mds in the future
with pytest.warns(FutureWarning, match="The default value of `init`"):
mds.MDS(metric="euclidean", n_init=1).fit(X)
# TODO (1.9): delete this check
# n_init=1 will become default in the future
with pytest.warns(FutureWarning, match="The default value of `n_init`"):
mds.MDS(metric="euclidean", init="random").fit(X)
# providing both metric and dissimilarity raises an error
with pytest.raises(ValueError, match="provided both `dissimilarity`"):
mds.MDS(
metric="cosine", dissimilarity="euclidean", init="random", n_init=1
).fit(X)
# TODO(1.9): remove warning filter
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_classical_mds_init_to_mds():
X, _ = load_iris(return_X_y=True)
cmds = ClassicalMDS()
Z_classical = cmds.fit_transform(X)
mds1 = mds.MDS(init="classical_mds")
Z1 = mds1.fit_transform(X)
mds2 = mds.MDS(init="random")
Z2 = mds1.fit_transform(X, init=Z_classical)
assert_allclose(Z1, Z2)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/manifold/tests/test_isomap.py | sklearn/manifold/tests/test_isomap.py | import math
from itertools import product
import numpy as np
import pytest
from scipy.sparse import rand as sparse_rand
from sklearn import clone, datasets, manifold, neighbors, pipeline, preprocessing
from sklearn.datasets import make_blobs
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.utils._testing import (
assert_allclose,
assert_allclose_dense_sparse,
assert_array_equal,
)
from sklearn.utils.fixes import CSR_CONTAINERS
eigen_solvers = ["auto", "dense", "arpack"]
path_methods = ["auto", "FW", "D"]
def create_sample_data(dtype, n_pts=25, add_noise=False):
# grid of equidistant points in 2D, n_components = n_dim
n_per_side = int(math.sqrt(n_pts))
X = np.array(list(product(range(n_per_side), repeat=2))).astype(dtype, copy=False)
if add_noise:
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(n_pts, 1).astype(dtype, copy=False)
X = np.concatenate((X, noise), 1)
return X
@pytest.mark.parametrize("n_neighbors, radius", [(24, None), (None, np.inf)])
@pytest.mark.parametrize("eigen_solver", eigen_solvers)
@pytest.mark.parametrize("path_method", path_methods)
def test_isomap_simple_grid(
global_dtype, n_neighbors, radius, eigen_solver, path_method
):
# Isomap should preserve distances when all neighbors are used
n_pts = 25
X = create_sample_data(global_dtype, n_pts=n_pts, add_noise=False)
# distances from each point to all others
if n_neighbors is not None:
G = neighbors.kneighbors_graph(X, n_neighbors, mode="distance")
else:
G = neighbors.radius_neighbors_graph(X, radius, mode="distance")
clf = manifold.Isomap(
n_neighbors=n_neighbors,
radius=radius,
n_components=2,
eigen_solver=eigen_solver,
path_method=path_method,
)
clf.fit(X)
if n_neighbors is not None:
G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode="distance")
else:
G_iso = neighbors.radius_neighbors_graph(
clf.embedding_, radius, mode="distance"
)
atol = 1e-5 if global_dtype == np.float32 else 0
assert_allclose_dense_sparse(G, G_iso, atol=atol)
@pytest.mark.parametrize("n_neighbors, radius", [(24, None), (None, np.inf)])
@pytest.mark.parametrize("eigen_solver", eigen_solvers)
@pytest.mark.parametrize("path_method", path_methods)
def test_isomap_reconstruction_error(
global_dtype, n_neighbors, radius, eigen_solver, path_method
):
if global_dtype is np.float32:
pytest.skip(
"Skipping test due to numerical instabilities on float32 data"
"from KernelCenterer used in the reconstruction_error method"
)
# Same setup as in test_isomap_simple_grid, with an added dimension
n_pts = 25
X = create_sample_data(global_dtype, n_pts=n_pts, add_noise=True)
# compute input kernel
if n_neighbors is not None:
G = neighbors.kneighbors_graph(X, n_neighbors, mode="distance").toarray()
else:
G = neighbors.radius_neighbors_graph(X, radius, mode="distance").toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G**2)
clf = manifold.Isomap(
n_neighbors=n_neighbors,
radius=radius,
n_components=2,
eigen_solver=eigen_solver,
path_method=path_method,
)
clf.fit(X)
# compute output kernel
if n_neighbors is not None:
G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode="distance")
else:
G_iso = neighbors.radius_neighbors_graph(
clf.embedding_, radius, mode="distance"
)
G_iso = G_iso.toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso**2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / n_pts
atol = 1e-5 if global_dtype == np.float32 else 0
assert_allclose(reconstruction_error, clf.reconstruction_error(), atol=atol)
@pytest.mark.parametrize("n_neighbors, radius", [(2, None), (None, 0.5)])
def test_transform(global_dtype, n_neighbors, radius):
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.make_s_curve(n_samples, random_state=0)
X = X.astype(global_dtype, copy=False)
# Compute isomap embedding
iso = manifold.Isomap(
n_components=n_components, n_neighbors=n_neighbors, radius=radius
)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert np.sqrt(np.mean((X_iso - X_iso2) ** 2)) < 2 * noise_scale
@pytest.mark.parametrize("n_neighbors, radius", [(2, None), (None, 10.0)])
def test_pipeline(n_neighbors, radius, global_dtype):
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
X = X.astype(global_dtype, copy=False)
clf = pipeline.Pipeline(
[
("isomap", manifold.Isomap(n_neighbors=n_neighbors, radius=radius)),
("clf", neighbors.KNeighborsClassifier()),
]
)
clf.fit(X, y)
assert 0.9 < clf.score(X, y)
def test_pipeline_with_nearest_neighbors_transformer(global_dtype):
# Test chaining NearestNeighborsTransformer and Isomap with
# neighbors_algorithm='precomputed'
algorithm = "auto"
n_neighbors = 10
X, _ = datasets.make_blobs(random_state=0)
X2, _ = datasets.make_blobs(random_state=1)
X = X.astype(global_dtype, copy=False)
X2 = X2.astype(global_dtype, copy=False)
# compare the chained version and the compact version
est_chain = pipeline.make_pipeline(
neighbors.KNeighborsTransformer(
n_neighbors=n_neighbors, algorithm=algorithm, mode="distance"
),
manifold.Isomap(n_neighbors=n_neighbors, metric="precomputed"),
)
est_compact = manifold.Isomap(
n_neighbors=n_neighbors, neighbors_algorithm=algorithm
)
Xt_chain = est_chain.fit_transform(X)
Xt_compact = est_compact.fit_transform(X)
assert_allclose(Xt_chain, Xt_compact)
Xt_chain = est_chain.transform(X2)
Xt_compact = est_compact.transform(X2)
assert_allclose(Xt_chain, Xt_compact)
@pytest.mark.parametrize(
"metric, p, is_euclidean",
[
("euclidean", 2, True),
("manhattan", 1, False),
("minkowski", 1, False),
("minkowski", 2, True),
(lambda x1, x2: np.sqrt(np.sum(x1**2 + x2**2)), 2, False),
],
)
def test_different_metric(global_dtype, metric, p, is_euclidean):
# Isomap must work on various metric parameters work correctly
# and must default to euclidean.
X, _ = datasets.make_blobs(random_state=0)
X = X.astype(global_dtype, copy=False)
reference = manifold.Isomap().fit_transform(X)
embedding = manifold.Isomap(metric=metric, p=p).fit_transform(X)
if is_euclidean:
assert_allclose(embedding, reference)
else:
with pytest.raises(AssertionError, match="Not equal to tolerance"):
assert_allclose(embedding, reference)
def test_isomap_clone_bug():
# regression test for bug reported in #6062
model = manifold.Isomap()
for n_neighbors in [10, 15, 20]:
model.set_params(n_neighbors=n_neighbors)
model.fit(np.random.rand(50, 2))
assert model.nbrs_.n_neighbors == n_neighbors
@pytest.mark.parametrize("eigen_solver", eigen_solvers)
@pytest.mark.parametrize("path_method", path_methods)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse_input(
global_dtype, eigen_solver, path_method, global_random_seed, csr_container
):
# TODO: compare results on dense and sparse data as proposed in:
# https://github.com/scikit-learn/scikit-learn/pull/23585#discussion_r968388186
X = csr_container(
sparse_rand(
100,
3,
density=0.1,
format="csr",
dtype=global_dtype,
random_state=global_random_seed,
)
)
iso_dense = manifold.Isomap(
n_components=2,
eigen_solver=eigen_solver,
path_method=path_method,
n_neighbors=8,
)
iso_sparse = clone(iso_dense)
X_trans_dense = iso_dense.fit_transform(X.toarray())
X_trans_sparse = iso_sparse.fit_transform(X)
assert_allclose(X_trans_sparse, X_trans_dense, rtol=1e-4, atol=1e-4)
def test_isomap_fit_precomputed_radius_graph(global_dtype):
# Isomap.fit_transform must yield similar result when using
# a precomputed distance matrix.
X, y = datasets.make_s_curve(200, random_state=0)
X = X.astype(global_dtype, copy=False)
radius = 10
g = neighbors.radius_neighbors_graph(X, radius=radius, mode="distance")
isomap = manifold.Isomap(n_neighbors=None, radius=radius, metric="precomputed")
isomap.fit(g)
precomputed_result = isomap.embedding_
isomap = manifold.Isomap(n_neighbors=None, radius=radius, metric="minkowski")
result = isomap.fit_transform(X)
atol = 1e-5 if global_dtype == np.float32 else 0
assert_allclose(precomputed_result, result, atol=atol)
def test_isomap_fitted_attributes_dtype(global_dtype):
"""Check that the fitted attributes are stored accordingly to the
data type of X."""
iso = manifold.Isomap(n_neighbors=2)
X = np.array([[1, 2], [3, 4], [5, 6]], dtype=global_dtype)
iso.fit(X)
assert iso.dist_matrix_.dtype == global_dtype
assert iso.embedding_.dtype == global_dtype
def test_isomap_dtype_equivalence():
"""Check the equivalence of the results with 32 and 64 bits input."""
iso_32 = manifold.Isomap(n_neighbors=2)
X_32 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32)
iso_32.fit(X_32)
iso_64 = manifold.Isomap(n_neighbors=2)
X_64 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float64)
iso_64.fit(X_64)
assert_allclose(iso_32.dist_matrix_, iso_64.dist_matrix_)
def test_isomap_raise_error_when_neighbor_and_radius_both_set():
# Isomap.fit_transform must raise a ValueError if
# radius and n_neighbors are provided.
X, _ = datasets.load_digits(return_X_y=True)
isomap = manifold.Isomap(n_neighbors=3, radius=5.5)
msg = "Both n_neighbors and radius are provided"
with pytest.raises(ValueError, match=msg):
isomap.fit_transform(X)
def test_multiple_connected_components():
# Test that a warning is raised when the graph has multiple components
X = np.array([0, 1, 2, 5, 6, 7])[:, None]
with pytest.warns(UserWarning, match="number of connected components"):
manifold.Isomap(n_neighbors=2).fit(X)
def test_multiple_connected_components_metric_precomputed(global_dtype):
# Test that an error is raised when the graph has multiple components
# and when X is a precomputed neighbors graph.
X = np.array([0, 1, 2, 5, 6, 7])[:, None].astype(global_dtype, copy=False)
# works with a precomputed distance matrix (dense)
X_distances = pairwise_distances(X)
with pytest.warns(UserWarning, match="number of connected components"):
manifold.Isomap(n_neighbors=1, metric="precomputed").fit(X_distances)
# does not work with a precomputed neighbors graph (sparse)
X_graph = neighbors.kneighbors_graph(X, n_neighbors=2, mode="distance")
with pytest.raises(RuntimeError, match="number of connected components"):
manifold.Isomap(n_neighbors=1, metric="precomputed").fit(X_graph)
def test_get_feature_names_out():
"""Check get_feature_names_out for Isomap."""
X, y = make_blobs(random_state=0, n_features=4)
n_components = 2
iso = manifold.Isomap(n_components=n_components)
iso.fit_transform(X)
names = iso.get_feature_names_out()
assert_array_equal([f"isomap{i}" for i in range(n_components)], names)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neural_network/_rbm.py | sklearn/neural_network/_rbm.py | """Restricted Boltzmann Machine"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import time
from numbers import Integral, Real
import numpy as np
import scipy.sparse as sp
from scipy.special import expit # logistic function
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
)
from sklearn.utils import check_random_state, gen_even_slices
from sklearn.utils._param_validation import Interval
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.validation import check_is_fitted, validate_data
class BernoulliRBM(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, default=256
Number of binary hidden units.
learning_rate : float, default=0.1
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, default=10
Number of examples per minibatch.
n_iter : int, default=10
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, default=0
The verbosity level. The default, zero, means silent mode. Range
of values is [0, inf].
random_state : int, RandomState instance or None, default=None
Determines random number generation for:
- Gibbs sampling from visible and hidden layers.
- Initializing components, sampling from layers during fit.
- Corrupting the data when scoring samples.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
intercept_hidden_ : array-like of shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like of shape (n_features,)
Biases of the visible units.
components_ : array-like of shape (n_components, n_features)
Weight matrix, where `n_features` is the number of
visible units and `n_components` is the number of hidden units.
h_samples_ : array-like of shape (batch_size, n_components)
Hidden Activation sampled from the model distribution,
where `batch_size` is the number of examples per minibatch and
`n_components` is the number of hidden units.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
sklearn.neural_network.MLPRegressor : Multi-layer Perceptron regressor.
sklearn.neural_network.MLPClassifier : Multi-layer Perceptron classifier.
sklearn.decomposition.PCA : An unsupervised linear dimensionality
reduction model.
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
https://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(n_components=2)
For a more detailed example usage, see
:ref:`sphx_glr_auto_examples_neural_networks_plot_rbm_logistic_classification.py`.
"""
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 1, None, closed="left")],
"learning_rate": [Interval(Real, 0, None, closed="neither")],
"batch_size": [Interval(Integral, 1, None, closed="left")],
"n_iter": [Interval(Integral, 0, None, closed="left")],
"verbose": ["verbose"],
"random_state": ["random_state"],
}
def __init__(
self,
n_components=256,
*,
learning_rate=0.1,
batch_size=10,
n_iter=10,
verbose=0,
random_state=None,
):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : ndarray of shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self)
X = validate_data(
self, X, accept_sparse="csr", reset=False, dtype=(np.float64, np.float32)
)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : ndarray of shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : ndarray of shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : ndarray of shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState instance
Random number generator to use.
Returns
-------
h : ndarray of shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return rng.uniform(size=p.shape) < p
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : ndarray of shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState instance
Random number generator to use.
Returns
-------
v : ndarray of shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return rng.uniform(size=p.shape) < p
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : ndarray of shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : ndarray of shape (n_samples,)
The value of the free energy.
"""
return -safe_sparse_dot(v, self.intercept_visible_) - np.logaddexp(
0, safe_sparse_dot(v, self.components_.T) + self.intercept_hidden_
).sum(axis=1)
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : ndarray of shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : ndarray of shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self)
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y=None):
"""Fit the model to the partial segment of the data X.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None
Target values (None for unsupervised transformations).
Returns
-------
self : BernoulliRBM
The fitted model.
"""
first_pass = not hasattr(self, "components_")
X = validate_data(
self, X, accept_sparse="csr", dtype=np.float64, reset=first_pass
)
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, "components_"):
self.components_ = np.asarray(
self.random_state_.normal(0, 0.01, (self.n_components, X.shape[1])),
order="F",
)
self._n_features_out = self.components_.shape[0]
if not hasattr(self, "intercept_hidden_"):
self.intercept_hidden_ = np.zeros(
self.n_components,
)
if not hasattr(self, "intercept_visible_"):
self.intercept_visible_ = np.zeros(
X.shape[1],
)
if not hasattr(self, "h_samples_"):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : ndarray of shape (n_samples, n_features)
The data to use for training.
rng : RandomState instance
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (
np.asarray(v_pos.sum(axis=0)).squeeze() - v_neg.sum(axis=0)
)
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : ndarray of shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self)
v = validate_data(self, X, accept_sparse="csr", reset=False)
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]), rng.randint(0, v.shape[1], v.shape[0]))
if sp.issparse(v):
data = -2 * v[ind] + 1
if isinstance(data, np.matrix): # v is a sparse matrix
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else: # v is a sparse array
v_ = v + sp.csr_array((data.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
# log(expit(x)) = log(1 / (1 + exp(-x)) = -np.logaddexp(0, -x)
return -v.shape[1] * np.logaddexp(0, -(fe_ - fe))
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None
Target values (None for unsupervised transformations).
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = validate_data(self, X, accept_sparse="csr", dtype=(np.float64, np.float32))
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order="F",
dtype=X.dtype,
)
self._n_features_out = self.components_.shape[0]
self.intercept_hidden_ = np.zeros(self.n_components, dtype=X.dtype)
self.intercept_visible_ = np.zeros(X.shape[1], dtype=X.dtype)
self.h_samples_ = np.zeros((self.batch_size, self.n_components), dtype=X.dtype)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(
gen_even_slices(n_batches * self.batch_size, n_batches, n_samples=n_samples)
)
verbose = self.verbose
begin = time.time()
for iteration in range(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print(
"[%s] Iteration %d, pseudo-likelihood = %.2f, time = %.2fs"
% (
type(self).__name__,
iteration,
self.score_samples(X).mean(),
end - begin,
)
)
begin = end
return self
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neural_network/_base.py | sklearn/neural_network/_base.py | """Utilities for the neural network modules"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy.special import expit as logistic_sigmoid
from scipy.special import xlogy
def inplace_identity(X):
"""Simply leave the input array unchanged.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data, where `n_samples` is the number of samples
and `n_features` is the number of features.
"""
# Nothing to do
def inplace_exp(X):
"""Compute the exponential inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
"""
np.exp(X, out=X)
def inplace_logistic(X):
"""Compute the logistic function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
"""
logistic_sigmoid(X, out=X)
def inplace_tanh(X):
"""Compute the hyperbolic tan function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
"""
np.tanh(X, out=X)
def inplace_relu(X):
"""Compute the rectified linear unit function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
"""
np.maximum(X, 0, out=X)
def inplace_softmax(X):
"""Compute the K-way softmax function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
"""
tmp = X - X.max(axis=1)[:, np.newaxis]
np.exp(tmp, out=X)
X /= X.sum(axis=1)[:, np.newaxis]
ACTIVATIONS = {
"identity": inplace_identity,
"exp": inplace_exp,
"tanh": inplace_tanh,
"logistic": inplace_logistic,
"relu": inplace_relu,
"softmax": inplace_softmax,
}
def inplace_identity_derivative(Z, delta):
"""Apply the derivative of the identity function: do nothing.
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The data which was output from the identity activation function during
the forward pass.
delta : {array-like}, shape (n_samples, n_features)
The backpropagated error signal to be modified inplace.
"""
# Nothing to do
def inplace_logistic_derivative(Z, delta):
"""Apply the derivative of the logistic sigmoid function.
It exploits the fact that the derivative is a simple function of the output
value from logistic function.
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The data which was output from the logistic activation function during
the forward pass.
delta : {array-like}, shape (n_samples, n_features)
The backpropagated error signal to be modified inplace.
"""
delta *= Z
delta *= 1 - Z
def inplace_tanh_derivative(Z, delta):
"""Apply the derivative of the hyperbolic tanh function.
It exploits the fact that the derivative is a simple function of the output
value from hyperbolic tangent.
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The data which was output from the hyperbolic tangent activation
function during the forward pass.
delta : {array-like}, shape (n_samples, n_features)
The backpropagated error signal to be modified inplace.
"""
delta *= 1 - Z**2
def inplace_relu_derivative(Z, delta):
"""Apply the derivative of the relu function.
It exploits the fact that the derivative is a simple function of the output
value from rectified linear units activation function.
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The data which was output from the rectified linear units activation
function during the forward pass.
delta : {array-like}, shape (n_samples, n_features)
The backpropagated error signal to be modified inplace.
"""
delta[Z == 0] = 0
DERIVATIVES = {
"identity": inplace_identity_derivative,
"tanh": inplace_tanh_derivative,
"logistic": inplace_logistic_derivative,
"relu": inplace_relu_derivative,
}
def squared_loss(y_true, y_pred, sample_weight=None):
"""Compute the squared loss for regression.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) values.
y_pred : array-like or label indicator matrix
Predicted values, as returned by a regression estimator.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
return (
0.5 * np.average((y_true - y_pred) ** 2, weights=sample_weight, axis=0).mean()
)
def poisson_loss(y_true, y_pred, sample_weight=None):
"""Compute (half of the) Poisson deviance loss for regression.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_pred : array-like or label indicator matrix
Predicted values, as returned by a regression estimator.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
# TODO: Decide what to do with the term `xlogy(y_true, y_true) - y_true`. For now,
# it is included. But the _loss module doesn't use it (for performance reasons) and
# only adds it as return of constant_to_optimal_zero (mainly for testing).
return np.average(
xlogy(y_true, y_true / y_pred) - y_true + y_pred, weights=sample_weight, axis=0
).sum()
def log_loss(y_true, y_prob, sample_weight=None):
"""Compute Logistic loss for classification.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_prob : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
eps = np.finfo(y_prob.dtype).eps
y_prob = np.clip(y_prob, eps, 1 - eps)
if y_prob.shape[1] == 1:
y_prob = np.append(1 - y_prob, y_prob, axis=1)
if y_true.shape[1] == 1:
y_true = np.append(1 - y_true, y_true, axis=1)
return -np.average(xlogy(y_true, y_prob), weights=sample_weight, axis=0).sum()
def binary_log_loss(y_true, y_prob, sample_weight=None):
"""Compute binary logistic loss for classification.
This is identical to log_loss in binary classification case,
but is kept for its use in multilabel case.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_prob : array-like of float, shape = (n_samples, 1)
Predicted probabilities, as returned by a classifier's
predict_proba method.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
eps = np.finfo(y_prob.dtype).eps
y_prob = np.clip(y_prob, eps, 1 - eps)
return -np.average(
xlogy(y_true, y_prob) + xlogy(1 - y_true, 1 - y_prob),
weights=sample_weight,
axis=0,
).sum()
LOSS_FUNCTIONS = {
"squared_error": squared_loss,
"poisson": poisson_loss,
"log_loss": log_loss,
"binary_log_loss": binary_log_loss,
}
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neural_network/_stochastic_optimizers.py | sklearn/neural_network/_stochastic_optimizers.py | """Stochastic optimization methods for MLP"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
class BaseOptimizer:
"""Base (Stochastic) gradient descent optimizer
Parameters
----------
learning_rate_init : float, default=0.1
The initial learning rate used. It controls the step-size in updating
the weights
Attributes
----------
learning_rate : float
the current learning rate
"""
def __init__(self, learning_rate_init=0.1):
self.learning_rate_init = learning_rate_init
self.learning_rate = float(learning_rate_init)
def update_params(self, params, grads):
"""Update parameters with given gradients
Parameters
----------
params : list of length = len(coefs_) + len(intercepts_)
The concatenated list containing coefs_ and intercepts_ in MLP
model. Used for initializing velocities and updating params
grads : list of length = len(params)
Containing gradients with respect to coefs_ and intercepts_ in MLP
model. So length should be aligned with params
"""
updates = self._get_updates(grads)
for param, update in zip((p for p in params), updates):
param += update
def iteration_ends(self, time_step):
"""Perform update to learning rate and potentially other states at the
end of an iteration
"""
pass
def trigger_stopping(self, msg, verbose):
"""Decides whether it is time to stop training
Parameters
----------
msg : str
Message passed in for verbose output
verbose : bool
Print message to stdin if True
Returns
-------
is_stopping : bool
True if training needs to stop
"""
if verbose:
print(msg + " Stopping.")
return True
class SGDOptimizer(BaseOptimizer):
"""Stochastic gradient descent optimizer with momentum
Parameters
----------
params : list, length = len(coefs_) + len(intercepts_)
The concatenated list containing coefs_ and intercepts_ in MLP model.
Used for initializing velocities and updating params
learning_rate_init : float, default=0.1
The initial learning rate used. It controls the step-size in updating
the weights
lr_schedule : {'constant', 'adaptive', 'invscaling'}, default='constant'
Learning rate schedule for weight updates.
-'constant', is a constant learning rate given by
'learning_rate_init'.
-'invscaling' gradually decreases the learning rate 'learning_rate_' at
each time step 't' using an inverse scaling exponent of 'power_t'.
learning_rate_ = learning_rate_init / pow(t, power_t)
-'adaptive', keeps the learning rate constant to
'learning_rate_init' as long as the training keeps decreasing.
Each time 2 consecutive epochs fail to decrease the training loss by
tol, or fail to increase validation score by tol if 'early_stopping'
is on, the current learning rate is divided by 5.
momentum : float, default=0.9
Value of momentum used, must be larger than or equal to 0
nesterov : bool, default=True
Whether to use nesterov's momentum or not. Use nesterov's if True
power_t : float, default=0.5
Power of time step 't' in inverse scaling. See `lr_schedule` for
more details.
Attributes
----------
learning_rate : float
the current learning rate
velocities : list, length = len(params)
velocities that are used to update params
"""
def __init__(
self,
params,
learning_rate_init=0.1,
lr_schedule="constant",
momentum=0.9,
nesterov=True,
power_t=0.5,
):
super().__init__(learning_rate_init)
self.lr_schedule = lr_schedule
self.momentum = momentum
self.nesterov = nesterov
self.power_t = power_t
self.velocities = [np.zeros_like(param) for param in params]
def iteration_ends(self, time_step):
"""Perform updates to learning rate and potential other states at the
end of an iteration
Parameters
----------
time_step : int
number of training samples trained on so far, used to update
learning rate for 'invscaling'
"""
if self.lr_schedule == "invscaling":
self.learning_rate = (
float(self.learning_rate_init) / (time_step + 1) ** self.power_t
)
def trigger_stopping(self, msg, verbose):
if self.lr_schedule != "adaptive":
if verbose:
print(msg + " Stopping.")
return True
if self.learning_rate <= 1e-6:
if verbose:
print(msg + " Learning rate too small. Stopping.")
return True
self.learning_rate /= 5.0
if verbose:
print(msg + " Setting learning rate to %f" % self.learning_rate)
return False
def _get_updates(self, grads):
"""Get the values used to update params with given gradients
Parameters
----------
grads : list, length = len(coefs_) + len(intercepts_)
Containing gradients with respect to coefs_ and intercepts_ in MLP
model. So length should be aligned with params
Returns
-------
updates : list, length = len(grads)
The values to add to params
"""
updates = [
self.momentum * velocity - self.learning_rate * grad
for velocity, grad in zip(self.velocities, grads)
]
self.velocities = updates
if self.nesterov:
updates = [
self.momentum * velocity - self.learning_rate * grad
for velocity, grad in zip(self.velocities, grads)
]
return updates
class AdamOptimizer(BaseOptimizer):
"""Stochastic gradient descent optimizer with Adam
Note: All default values are from the original Adam paper
Parameters
----------
params : list, length = len(coefs_) + len(intercepts_)
The concatenated list containing coefs_ and intercepts_ in MLP model.
Used for initializing velocities and updating params
learning_rate_init : float, default=0.001
The initial learning rate used. It controls the step-size in updating
the weights
beta_1 : float, default=0.9
Exponential decay rate for estimates of first moment vector, should be
in [0, 1)
beta_2 : float, default=0.999
Exponential decay rate for estimates of second moment vector, should be
in [0, 1)
epsilon : float, default=1e-8
Value for numerical stability
Attributes
----------
learning_rate : float
The current learning rate
t : int
Timestep
ms : list, length = len(params)
First moment vectors
vs : list, length = len(params)
Second moment vectors
References
----------
:arxiv:`Kingma, Diederik, and Jimmy Ba (2014) "Adam: A method for
stochastic optimization." <1412.6980>
"""
def __init__(
self, params, learning_rate_init=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8
):
super().__init__(learning_rate_init)
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.t = 0
self.ms = [np.zeros_like(param) for param in params]
self.vs = [np.zeros_like(param) for param in params]
def _get_updates(self, grads):
"""Get the values used to update params with given gradients
Parameters
----------
grads : list, length = len(coefs_) + len(intercepts_)
Containing gradients with respect to coefs_ and intercepts_ in MLP
model. So length should be aligned with params
Returns
-------
updates : list, length = len(grads)
The values to add to params
"""
self.t += 1
self.ms = [
self.beta_1 * m + (1 - self.beta_1) * grad
for m, grad in zip(self.ms, grads)
]
self.vs = [
self.beta_2 * v + (1 - self.beta_2) * (grad**2)
for v, grad in zip(self.vs, grads)
]
self.learning_rate = (
self.learning_rate_init
* np.sqrt(1 - self.beta_2**self.t)
/ (1 - self.beta_1**self.t)
)
updates = [
-self.learning_rate * m / (np.sqrt(v) + self.epsilon)
for m, v in zip(self.ms, self.vs)
]
return updates
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neural_network/_multilayer_perceptron.py | sklearn/neural_network/_multilayer_perceptron.py | """Multi-layer Perceptron"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from abc import ABC, abstractmethod
from itertools import chain, pairwise
from numbers import Integral, Real
import numpy as np
import scipy.optimize
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
RegressorMixin,
_fit_context,
is_classifier,
)
from sklearn.exceptions import ConvergenceWarning
from sklearn.metrics import accuracy_score, r2_score
from sklearn.model_selection import train_test_split
from sklearn.neural_network._base import ACTIVATIONS, DERIVATIVES, LOSS_FUNCTIONS
from sklearn.neural_network._stochastic_optimizers import AdamOptimizer, SGDOptimizer
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import (
_safe_indexing,
check_random_state,
column_or_1d,
gen_batches,
shuffle,
)
from sklearn.utils._param_validation import Interval, Options, StrOptions
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.fixes import _get_additional_lbfgs_options_dict
from sklearn.utils.metaestimators import available_if
from sklearn.utils.multiclass import (
_check_partial_fit_first_call,
type_of_target,
unique_labels,
)
from sklearn.utils.optimize import _check_optimize_result
from sklearn.utils.validation import (
_check_sample_weight,
check_is_fitted,
validate_data,
)
_STOCHASTIC_SOLVERS = ["sgd", "adam"]
def _pack(coefs_, intercepts_):
"""Pack the parameters into a single vector."""
return np.hstack([l.ravel() for l in coefs_ + intercepts_])
class BaseMultilayerPerceptron(BaseEstimator, ABC):
"""Base class for MLP classification and regression.
Warning: This class should not be used directly.
Use derived classes instead.
.. versionadded:: 0.18
"""
_parameter_constraints: dict = {
"hidden_layer_sizes": [
"array-like",
Interval(Integral, 1, None, closed="left"),
],
"activation": [StrOptions({"identity", "logistic", "tanh", "relu"})],
"solver": [StrOptions({"lbfgs", "sgd", "adam"})],
"alpha": [Interval(Real, 0, None, closed="left")],
"batch_size": [
StrOptions({"auto"}),
Interval(Integral, 1, None, closed="left"),
],
"learning_rate": [StrOptions({"constant", "invscaling", "adaptive"})],
"learning_rate_init": [Interval(Real, 0, None, closed="neither")],
"power_t": [Interval(Real, 0, None, closed="left")],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"shuffle": ["boolean"],
"random_state": ["random_state"],
"tol": [Interval(Real, 0, None, closed="left")],
"verbose": ["verbose"],
"warm_start": ["boolean"],
"momentum": [Interval(Real, 0, 1, closed="both")],
"nesterovs_momentum": ["boolean"],
"early_stopping": ["boolean"],
"validation_fraction": [Interval(Real, 0, 1, closed="left")],
"beta_1": [Interval(Real, 0, 1, closed="left")],
"beta_2": [Interval(Real, 0, 1, closed="left")],
"epsilon": [Interval(Real, 0, None, closed="neither")],
"n_iter_no_change": [
Interval(Integral, 1, None, closed="left"),
Options(Real, {np.inf}),
],
"max_fun": [Interval(Integral, 1, None, closed="left")],
}
@abstractmethod
def __init__(
self,
hidden_layer_sizes,
activation,
solver,
alpha,
batch_size,
learning_rate,
learning_rate_init,
power_t,
max_iter,
loss,
shuffle,
random_state,
tol,
verbose,
warm_start,
momentum,
nesterovs_momentum,
early_stopping,
validation_fraction,
beta_1,
beta_2,
epsilon,
n_iter_no_change,
max_fun,
):
self.activation = activation
self.solver = solver
self.alpha = alpha
self.batch_size = batch_size
self.learning_rate = learning_rate
self.learning_rate_init = learning_rate_init
self.power_t = power_t
self.max_iter = max_iter
self.loss = loss
self.hidden_layer_sizes = hidden_layer_sizes
self.shuffle = shuffle
self.random_state = random_state
self.tol = tol
self.verbose = verbose
self.warm_start = warm_start
self.momentum = momentum
self.nesterovs_momentum = nesterovs_momentum
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.n_iter_no_change = n_iter_no_change
self.max_fun = max_fun
def _unpack(self, packed_parameters):
"""Extract the coefficients and intercepts from packed_parameters."""
for i in range(self.n_layers_ - 1):
start, end, shape = self._coef_indptr[i]
self.coefs_[i] = np.reshape(packed_parameters[start:end], shape)
start, end = self._intercept_indptr[i]
self.intercepts_[i] = packed_parameters[start:end]
def _forward_pass(self, activations):
"""Perform a forward pass on the network by computing the values
of the neurons in the hidden layers and the output layer.
Parameters
----------
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
"""
hidden_activation = ACTIVATIONS[self.activation]
# Iterate over the hidden layers
for i in range(self.n_layers_ - 1):
activations[i + 1] = safe_sparse_dot(activations[i], self.coefs_[i])
activations[i + 1] += self.intercepts_[i]
# For the hidden layers
if (i + 1) != (self.n_layers_ - 1):
hidden_activation(activations[i + 1])
# For the last layer
output_activation = ACTIVATIONS[self.out_activation_]
output_activation(activations[i + 1])
return activations
def _forward_pass_fast(self, X, check_input=True):
"""Predict using the trained model
This is the same as _forward_pass but does not record the activations
of all layers and only returns the last layer's activation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
check_input : bool, default=True
Perform input data validation or not.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The decision function of the samples for each class in the model.
"""
if check_input:
X = validate_data(self, X, accept_sparse=["csr", "csc"], reset=False)
# Initialize first layer
activation = X
# Forward propagate
hidden_activation = ACTIVATIONS[self.activation]
for i in range(self.n_layers_ - 1):
activation = safe_sparse_dot(activation, self.coefs_[i])
activation += self.intercepts_[i]
if i != self.n_layers_ - 2:
hidden_activation(activation)
output_activation = ACTIVATIONS[self.out_activation_]
output_activation(activation)
return activation
def _compute_loss_grad(
self, layer, sw_sum, activations, deltas, coef_grads, intercept_grads
):
"""Compute the gradient of loss with respect to coefs and intercept for
specified layer.
This function does backpropagation for the specified one layer.
"""
coef_grads[layer] = safe_sparse_dot(activations[layer].T, deltas[layer])
coef_grads[layer] += self.alpha * self.coefs_[layer]
coef_grads[layer] /= sw_sum
intercept_grads[layer] = np.sum(deltas[layer], axis=0) / sw_sum
def _loss_grad_lbfgs(
self,
packed_coef_inter,
X,
y,
sample_weight,
activations,
deltas,
coef_grads,
intercept_grads,
):
"""Compute the MLP loss function and its corresponding derivatives
with respect to the different parameters given in the initialization.
Returned gradients are packed in a single vector so it can be used
in lbfgs
Parameters
----------
packed_coef_inter : ndarray
A vector comprising the flattened coefficients and intercepts.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,)
The target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
grad : array-like, shape (number of nodes of all layers,)
"""
self._unpack(packed_coef_inter)
loss, coef_grads, intercept_grads = self._backprop(
X, y, sample_weight, activations, deltas, coef_grads, intercept_grads
)
grad = _pack(coef_grads, intercept_grads)
return loss, grad
def _backprop(
self, X, y, sample_weight, activations, deltas, coef_grads, intercept_grads
):
"""Compute the MLP loss function and its corresponding derivatives
with respect to each parameter: weights and bias vectors.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,)
The target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
activations : list, length = n_layers - 1
The ith element of the list holds the values of the ith layer.
deltas : list, length = n_layers - 1
The ith element of the list holds the difference between the
activations of the i + 1 layer and the backpropagated error.
More specifically, deltas are gradients of loss with respect to z
in each layer, where z = wx + b is the value of a particular layer
before passing through the activation function
coef_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
coefficient parameters of the ith layer in an iteration.
intercept_grads : list, length = n_layers - 1
The ith element contains the amount of change used to update the
intercept parameters of the ith layer in an iteration.
Returns
-------
loss : float
coef_grads : list, length = n_layers - 1
intercept_grads : list, length = n_layers - 1
"""
n_samples = X.shape[0]
# Forward propagate
activations = self._forward_pass(activations)
# Get loss
loss_func_name = self.loss
if loss_func_name == "log_loss" and self.out_activation_ == "logistic":
loss_func_name = "binary_log_loss"
loss = LOSS_FUNCTIONS[loss_func_name](y, activations[-1], sample_weight)
# Add L2 regularization term to loss
values = 0
for s in self.coefs_:
s = s.ravel()
values += np.dot(s, s)
if sample_weight is None:
sw_sum = n_samples
else:
sw_sum = sample_weight.sum()
loss += (0.5 * self.alpha) * values / sw_sum
# Backward propagate
last = self.n_layers_ - 2
# The calculation of delta[last] is as follows:
# delta[last] = d/dz loss(y, act(z)) = act(z) - y
# with z=x@w + b being the output of the last layer before passing through the
# output activation, act(z) = activations[-1].
# The simple formula for delta[last] here works with following (canonical
# loss-link) combinations of output activation and loss function:
# sigmoid and binary cross entropy, softmax and categorical cross
# entropy, and identity with squared loss
deltas[last] = activations[-1] - y
if sample_weight is not None:
deltas[last] *= sample_weight.reshape(-1, 1)
# Compute gradient for the last layer
self._compute_loss_grad(
last, sw_sum, activations, deltas, coef_grads, intercept_grads
)
inplace_derivative = DERIVATIVES[self.activation]
# Iterate over the hidden layers
for i in range(last, 0, -1):
deltas[i - 1] = safe_sparse_dot(deltas[i], self.coefs_[i].T)
inplace_derivative(activations[i], deltas[i - 1])
self._compute_loss_grad(
i - 1, sw_sum, activations, deltas, coef_grads, intercept_grads
)
return loss, coef_grads, intercept_grads
def _initialize(self, y, layer_units, dtype):
# set all attributes, allocate weights etc. for first call
# Initialize parameters
self.n_iter_ = 0
self.t_ = 0
self.n_outputs_ = y.shape[1]
# Compute the number of layers
self.n_layers_ = len(layer_units)
# Output for regression
if not is_classifier(self):
if self.loss == "poisson":
self.out_activation_ = "exp"
else:
# loss = "squared_error"
self.out_activation_ = "identity"
# Output for multi class
elif self._label_binarizer.y_type_ == "multiclass":
self.out_activation_ = "softmax"
# Output for binary class and multi-label
else:
self.out_activation_ = "logistic"
# Initialize coefficient and intercept layers
self.coefs_ = []
self.intercepts_ = []
for i in range(self.n_layers_ - 1):
coef_init, intercept_init = self._init_coef(
layer_units[i], layer_units[i + 1], dtype
)
self.coefs_.append(coef_init)
self.intercepts_.append(intercept_init)
self._best_coefs = [c.copy() for c in self.coefs_]
self._best_intercepts = [i.copy() for i in self.intercepts_]
if self.solver in _STOCHASTIC_SOLVERS:
self.loss_curve_ = []
self._no_improvement_count = 0
if self.early_stopping:
self.validation_scores_ = []
self.best_validation_score_ = -np.inf
self.best_loss_ = None
else:
self.best_loss_ = np.inf
self.validation_scores_ = None
self.best_validation_score_ = None
def _init_coef(self, fan_in, fan_out, dtype):
# Use the initialization method recommended by
# Glorot et al.
factor = 6.0
if self.activation == "logistic":
factor = 2.0
init_bound = np.sqrt(factor / (fan_in + fan_out))
# Generate weights and bias:
coef_init = self._random_state.uniform(
-init_bound, init_bound, (fan_in, fan_out)
)
intercept_init = self._random_state.uniform(-init_bound, init_bound, fan_out)
coef_init = coef_init.astype(dtype, copy=False)
intercept_init = intercept_init.astype(dtype, copy=False)
return coef_init, intercept_init
def _fit(self, X, y, sample_weight=None, incremental=False):
# Make sure self.hidden_layer_sizes is a list
hidden_layer_sizes = self.hidden_layer_sizes
if not hasattr(hidden_layer_sizes, "__iter__"):
hidden_layer_sizes = [hidden_layer_sizes]
hidden_layer_sizes = list(hidden_layer_sizes)
if np.any(np.array(hidden_layer_sizes) <= 0):
raise ValueError(
"hidden_layer_sizes must be > 0, got %s." % hidden_layer_sizes
)
first_pass = not hasattr(self, "coefs_") or (
not self.warm_start and not incremental
)
X, y = self._validate_input(X, y, incremental, reset=first_pass)
n_samples, n_features = X.shape
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
# Ensure y is 2D
if y.ndim == 1:
y = y.reshape((-1, 1))
self.n_outputs_ = y.shape[1]
layer_units = [n_features] + hidden_layer_sizes + [self.n_outputs_]
# check random state
self._random_state = check_random_state(self.random_state)
if first_pass:
# First time training the model
self._initialize(y, layer_units, X.dtype)
# Initialize lists
activations = [X] + [None] * (len(layer_units) - 1)
deltas = [None] * (len(activations) - 1)
coef_grads = [
np.empty((n_fan_in_, n_fan_out_), dtype=X.dtype)
for n_fan_in_, n_fan_out_ in pairwise(layer_units)
]
intercept_grads = [
np.empty(n_fan_out_, dtype=X.dtype) for n_fan_out_ in layer_units[1:]
]
# Run the Stochastic optimization solver
if self.solver in _STOCHASTIC_SOLVERS:
self._fit_stochastic(
X,
y,
sample_weight,
activations,
deltas,
coef_grads,
intercept_grads,
layer_units,
incremental,
)
# Run the LBFGS solver
elif self.solver == "lbfgs":
self._fit_lbfgs(
X,
y,
sample_weight,
activations,
deltas,
coef_grads,
intercept_grads,
layer_units,
)
# validate parameter weights
weights = chain(self.coefs_, self.intercepts_)
if not all(np.isfinite(w).all() for w in weights):
raise ValueError(
"Solver produced non-finite parameter weights. The input data may"
" contain large values and need to be preprocessed."
)
return self
def _fit_lbfgs(
self,
X,
y,
sample_weight,
activations,
deltas,
coef_grads,
intercept_grads,
layer_units,
):
# Store meta information for the parameters
self._coef_indptr = []
self._intercept_indptr = []
start = 0
# Save sizes and indices of coefficients for faster unpacking
for i in range(self.n_layers_ - 1):
n_fan_in, n_fan_out = layer_units[i], layer_units[i + 1]
end = start + (n_fan_in * n_fan_out)
self._coef_indptr.append((start, end, (n_fan_in, n_fan_out)))
start = end
# Save sizes and indices of intercepts for faster unpacking
for i in range(self.n_layers_ - 1):
end = start + layer_units[i + 1]
self._intercept_indptr.append((start, end))
start = end
# Run LBFGS
packed_coef_inter = _pack(self.coefs_, self.intercepts_)
if self.verbose is True or self.verbose >= 1:
iprint = 1
else:
iprint = -1
opt_res = scipy.optimize.minimize(
self._loss_grad_lbfgs,
packed_coef_inter,
method="L-BFGS-B",
jac=True,
options={
"maxfun": self.max_fun,
"maxiter": self.max_iter,
"gtol": self.tol,
**_get_additional_lbfgs_options_dict("iprint", iprint),
},
args=(
X,
y,
sample_weight,
activations,
deltas,
coef_grads,
intercept_grads,
),
)
self.n_iter_ = _check_optimize_result("lbfgs", opt_res, self.max_iter)
self.loss_ = opt_res.fun
self._unpack(opt_res.x)
def _fit_stochastic(
self,
X,
y,
sample_weight,
activations,
deltas,
coef_grads,
intercept_grads,
layer_units,
incremental,
):
params = self.coefs_ + self.intercepts_
if not incremental or not hasattr(self, "_optimizer"):
if self.solver == "sgd":
self._optimizer = SGDOptimizer(
params,
self.learning_rate_init,
self.learning_rate,
self.momentum,
self.nesterovs_momentum,
self.power_t,
)
elif self.solver == "adam":
self._optimizer = AdamOptimizer(
params,
self.learning_rate_init,
self.beta_1,
self.beta_2,
self.epsilon,
)
# early_stopping in partial_fit doesn't make sense
if self.early_stopping and incremental:
raise ValueError("partial_fit does not support early_stopping=True")
early_stopping = self.early_stopping
if early_stopping:
# don't stratify in multilabel classification
should_stratify = is_classifier(self) and self.n_outputs_ == 1
stratify = y if should_stratify else None
if sample_weight is None:
X_train, X_val, y_train, y_val = train_test_split(
X,
y,
random_state=self._random_state,
test_size=self.validation_fraction,
stratify=stratify,
)
sample_weight_train = sample_weight_val = None
else:
# TODO: incorporate sample_weight in sampling here.
(
X_train,
X_val,
y_train,
y_val,
sample_weight_train,
sample_weight_val,
) = train_test_split(
X,
y,
sample_weight,
random_state=self._random_state,
test_size=self.validation_fraction,
stratify=stratify,
)
if X_val.shape[0] < 2:
raise ValueError(
"The validation set is too small. Increase 'validation_fraction' "
"or the size of your dataset."
)
if is_classifier(self):
y_val = self._label_binarizer.inverse_transform(y_val)
else:
X_train, y_train, sample_weight_train = X, y, sample_weight
X_val = y_val = sample_weight_val = None
n_samples = X_train.shape[0]
sample_idx = np.arange(n_samples, dtype=int)
if self.batch_size == "auto":
batch_size = min(200, n_samples)
else:
if self.batch_size > n_samples:
warnings.warn(
"Got `batch_size` less than 1 or larger than "
"sample size. It is going to be clipped"
)
batch_size = np.clip(self.batch_size, 1, n_samples)
try:
self.n_iter_ = 0
for it in range(self.max_iter):
if self.shuffle:
# Only shuffle the sample indices instead of X and y to
# reduce the memory footprint. These indices will be used
# to slice the X and y.
sample_idx = shuffle(sample_idx, random_state=self._random_state)
accumulated_loss = 0.0
for batch_slice in gen_batches(n_samples, batch_size):
if self.shuffle:
batch_idx = sample_idx[batch_slice]
X_batch = _safe_indexing(X_train, batch_idx)
else:
batch_idx = batch_slice
X_batch = X_train[batch_idx]
y_batch = y_train[batch_idx]
if sample_weight is None:
sample_weight_batch = None
else:
sample_weight_batch = sample_weight_train[batch_idx]
activations[0] = X_batch
batch_loss, coef_grads, intercept_grads = self._backprop(
X_batch,
y_batch,
sample_weight_batch,
activations,
deltas,
coef_grads,
intercept_grads,
)
accumulated_loss += batch_loss * (
batch_slice.stop - batch_slice.start
)
# update weights
grads = coef_grads + intercept_grads
self._optimizer.update_params(params, grads)
self.n_iter_ += 1
self.loss_ = accumulated_loss / X_train.shape[0]
self.t_ += n_samples
self.loss_curve_.append(self.loss_)
if self.verbose:
print("Iteration %d, loss = %.8f" % (self.n_iter_, self.loss_))
# update no_improvement_count based on training loss or
# validation score according to early_stopping
self._update_no_improvement_count(
early_stopping, X_val, y_val, sample_weight_val
)
# for learning rate that needs to be updated at iteration end
self._optimizer.iteration_ends(self.t_)
if self._no_improvement_count > self.n_iter_no_change:
# not better than last `n_iter_no_change` iterations by tol
# stop or decrease learning rate
if early_stopping:
msg = (
"Validation score did not improve more than "
"tol=%f for %d consecutive epochs."
% (self.tol, self.n_iter_no_change)
)
else:
msg = (
"Training loss did not improve more than tol=%f"
" for %d consecutive epochs."
% (self.tol, self.n_iter_no_change)
)
is_stopping = self._optimizer.trigger_stopping(msg, self.verbose)
if is_stopping:
break
else:
self._no_improvement_count = 0
if incremental:
break
if self.n_iter_ == self.max_iter:
warnings.warn(
"Stochastic Optimizer: Maximum iterations (%d) "
"reached and the optimization hasn't converged yet."
% self.max_iter,
ConvergenceWarning,
)
except KeyboardInterrupt:
warnings.warn("Training interrupted by user.")
if early_stopping:
# restore best weights
self.coefs_ = self._best_coefs
self.intercepts_ = self._best_intercepts
def _update_no_improvement_count(self, early_stopping, X, y, sample_weight):
if early_stopping:
# compute validation score (can be NaN), use that for stopping
val_score = self._score(X, y, sample_weight=sample_weight)
self.validation_scores_.append(val_score)
if self.verbose:
print("Validation score: %f" % self.validation_scores_[-1])
# update best parameters
# use validation_scores_, not loss_curve_
# let's hope no-one overloads .score with mse
last_valid_score = self.validation_scores_[-1]
if last_valid_score < (self.best_validation_score_ + self.tol):
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if last_valid_score > self.best_validation_score_:
self.best_validation_score_ = last_valid_score
self._best_coefs = [c.copy() for c in self.coefs_]
self._best_intercepts = [i.copy() for i in self.intercepts_]
else:
if self.loss_curve_[-1] > self.best_loss_ - self.tol:
self._no_improvement_count += 1
else:
self._no_improvement_count = 0
if self.loss_curve_[-1] < self.best_loss_:
self.best_loss_ = self.loss_curve_[-1]
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit the model to data matrix X and target(s) y.
Parameters
----------
X : ndarray or sparse matrix of shape (n_samples, n_features)
The input data.
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
.. versionadded:: 1.7
Returns
-------
self : object
Returns a trained MLP model.
"""
return self._fit(X, y, sample_weight=sample_weight, incremental=False)
def _check_solver(self):
if self.solver not in _STOCHASTIC_SOLVERS:
raise AttributeError(
"partial_fit is only available for stochastic"
" optimizers. %s is not stochastic." % self.solver
)
return True
def _score_with_function(self, X, y, sample_weight, score_function):
"""Private score method without input validation."""
# Input validation would remove feature names, so we disable it
y_pred = self._predict(X, check_input=False)
if np.isnan(y_pred).any() or np.isinf(y_pred).any():
return np.nan
return score_function(y, y_pred, sample_weight=sample_weight)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
class MLPClassifier(ClassifierMixin, BaseMultilayerPerceptron):
"""Multi-layer Perceptron classifier.
This model optimizes the log-loss function using LBFGS or stochastic
gradient descent.
.. versionadded:: 0.18
Parameters
----------
hidden_layer_sizes : array-like of shape(n_layers - 2,), default=(100,)
The ith element represents the number of neurons in the ith
hidden layer.
activation : {'identity', 'logistic', 'tanh', 'relu'}, default='relu'
Activation function for the hidden layer.
- 'identity', no-op activation, useful to implement linear bottleneck,
returns f(x) = x
- 'logistic', the logistic sigmoid function,
returns f(x) = 1 / (1 + exp(-x)).
- 'tanh', the hyperbolic tan function,
returns f(x) = tanh(x).
- 'relu', the rectified linear unit function,
returns f(x) = max(0, x)
solver : {'lbfgs', 'sgd', 'adam'}, default='adam'
The solver for weight optimization.
- 'lbfgs' is an optimizer in the family of quasi-Newton methods.
- 'sgd' refers to stochastic gradient descent.
- 'adam' refers to a stochastic gradient-based optimizer proposed
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neural_network/__init__.py | sklearn/neural_network/__init__.py | """Models based on neural networks."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.neural_network._multilayer_perceptron import MLPClassifier, MLPRegressor
from sklearn.neural_network._rbm import BernoulliRBM
__all__ = ["BernoulliRBM", "MLPClassifier", "MLPRegressor"]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neural_network/tests/test_base.py | sklearn/neural_network/tests/test_base.py | import numpy as np
import pytest
from sklearn._loss import HalfPoissonLoss
from sklearn.neural_network._base import binary_log_loss, log_loss, poisson_loss
def test_binary_log_loss_1_prob_finite():
# y_proba is equal to one should result in a finite logloss
y_true = np.array([[0, 0, 1]]).T
y_prob = np.array([[0.9, 1.0, 1.0]]).T
loss = binary_log_loss(y_true, y_prob)
assert np.isfinite(loss)
@pytest.mark.parametrize(
"y_true, y_prob",
[
(
np.array([[1, 0, 0], [0, 1, 0]]),
np.array([[0.0, 1.0, 0.0], [0.9, 0.05, 0.05]]),
),
(np.array([[0, 0, 1]]).T, np.array([[0.9, 1.0, 1.0]]).T),
],
)
def test_log_loss_1_prob_finite(y_true, y_prob):
# y_proba is equal to 1 should result in a finite logloss
loss = log_loss(y_true, y_prob)
assert np.isfinite(loss)
def test_poisson_loss(global_random_seed):
"""Test Poisson loss against well tested HalfPoissonLoss."""
n = 1000
rng = np.random.default_rng(global_random_seed)
y_true = rng.integers(low=0, high=10, size=n).astype(float)
y_raw = rng.standard_normal(n)
y_pred = np.exp(y_raw)
sw = rng.uniform(low=0.1, high=10, size=n)
assert 0 in y_true
loss = poisson_loss(y_true=y_true, y_pred=y_pred, sample_weight=sw)
pl = HalfPoissonLoss()
loss_ref = (
pl(y_true=y_true, raw_prediction=y_raw, sample_weight=sw)
+ pl.constant_to_optimal_zero(y_true=y_true, sample_weight=sw).mean()
/ sw.mean()
)
assert loss == pytest.approx(loss_ref, rel=1e-12)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neural_network/tests/test_mlp.py | sklearn/neural_network/tests/test_mlp.py | """
Testing for Multi-layer Perceptron module (sklearn.neural_network)
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import re
import warnings
import joblib
import numpy as np
import pytest
from sklearn.datasets import (
load_digits,
load_iris,
make_multilabel_classification,
make_regression,
)
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import PoissonRegressor
from sklearn.metrics import roc_auc_score
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.preprocessing import LabelBinarizer, MinMaxScaler, scale
from sklearn.utils._testing import (
assert_allclose,
assert_almost_equal,
assert_array_equal,
ignore_warnings,
)
from sklearn.utils.fixes import CSR_CONTAINERS
ACTIVATION_TYPES = ["identity", "logistic", "tanh", "relu"]
X_digits, y_digits = load_digits(n_class=3, return_X_y=True)
X_digits_multi = MinMaxScaler().fit_transform(X_digits[:200])
y_digits_multi = y_digits[:200]
X_digits, y_digits = load_digits(n_class=2, return_X_y=True)
X_digits_binary = MinMaxScaler().fit_transform(X_digits[:200])
y_digits_binary = y_digits[:200]
classification_datasets = [
(X_digits_multi, y_digits_multi),
(X_digits_binary, y_digits_binary),
]
X_reg, y_reg = make_regression(
n_samples=200, n_features=10, bias=20.0, noise=100.0, random_state=7
)
y_reg = scale(y_reg)
regression_datasets = [(X_reg, y_reg)]
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
def test_alpha():
# Test that larger alpha yields weights closer to zero
X = X_digits_binary[:100]
y = y_digits_binary[:100]
alpha_vectors = []
alpha_values = np.arange(2)
absolute_sum = lambda x: np.sum(np.abs(x))
for alpha in alpha_values:
mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
alpha_vectors.append(
np.array([absolute_sum(mlp.coefs_[0]), absolute_sum(mlp.coefs_[1])])
)
for i in range(len(alpha_values) - 1):
assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
def test_fit():
# Test that the algorithm solution is equal to a worked out example.
X = np.array([[0.6, 0.8, 0.7]])
y = np.array([0])
mlp = MLPClassifier(
solver="sgd",
learning_rate_init=0.1,
alpha=0.1,
activation="logistic",
random_state=1,
max_iter=1,
hidden_layer_sizes=2,
momentum=0,
)
# set weights
mlp.coefs_ = [0] * 2
mlp.intercepts_ = [0] * 2
mlp.n_outputs_ = 1
mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
mlp.coefs_[1] = np.array([[0.1], [0.2]])
mlp.intercepts_[0] = np.array([0.1, 0.1])
mlp.intercepts_[1] = np.array([1.0])
mlp._coef_grads = [] * 2
mlp._intercept_grads = [] * 2
mlp.n_features_in_ = 3
# Initialize parameters
mlp.n_iter_ = 0
mlp.learning_rate_ = 0.1
# Compute the number of layers
mlp.n_layers_ = 3
# Pre-allocate gradient matrices
mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
mlp.out_activation_ = "logistic"
mlp.t_ = 0
mlp.best_loss_ = np.inf
mlp.loss_curve_ = []
mlp._no_improvement_count = 0
mlp._intercept_velocity = [
np.zeros_like(intercepts) for intercepts in mlp.intercepts_
]
mlp._coef_velocity = [np.zeros_like(coefs) for coefs in mlp.coefs_]
mlp.partial_fit(X, y, classes=[0, 1])
# Manually worked out example
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
# = 0.679178699175393
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
# = 0.574442516811659
# o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
# = 0.7654329236196236
# d21 = -(0 - 0.765) = 0.765
# d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
# d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
# W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
# W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
# W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
# W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
# W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
# W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
# W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
# W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
# b1grad1 = d11 = 0.01667
# b1grad2 = d12 = 0.0374
# b2grad = d21 = 0.765
# W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
# [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
# [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
# 0.096008], [0.4939998, -0.002244]]
# W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
# [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
# b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
# = [0.098333, 0.09626]
# b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
assert_almost_equal(
mlp.coefs_[0],
np.array([[0.098, 0.195756], [0.2956664, 0.096008], [0.4939998, -0.002244]]),
decimal=3,
)
assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]), decimal=3)
assert_almost_equal(mlp.intercepts_[0], np.array([0.098333, 0.09626]), decimal=3)
assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
# Testing output
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
# 0.7 * 0.4939998 + 0.098333) = 0.677
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
# 0.7 * -0.002244 + 0.09626) = 0.572
# o1 = h * W2 + b21 = 0.677 * 0.04706 +
# 0.572 * 0.154089 + 0.9235 = 1.043
# prob = sigmoid(o1) = 0.739
assert_almost_equal(mlp.predict_proba(X)[0, 1], 0.739, decimal=3)
def test_gradient():
# Test gradient.
# This makes sure that the activation functions and their derivatives
# are correct. The numerical and analytical computation of the gradient
# should be close.
for n_labels in [2, 3]:
n_samples = 5
n_features = 10
random_state = np.random.RandomState(seed=42)
X = random_state.rand(n_samples, n_features)
y = 1 + np.mod(np.arange(n_samples) + 1, n_labels)
Y = LabelBinarizer().fit_transform(y)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(
activation=activation,
hidden_layer_sizes=10,
solver="lbfgs",
alpha=1e-5,
learning_rate_init=0.2,
max_iter=1,
random_state=1,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
mlp.fit(X, y)
theta = np.hstack([l.ravel() for l in mlp.coefs_ + mlp.intercepts_])
layer_units = [X.shape[1]] + [mlp.hidden_layer_sizes] + [mlp.n_outputs_]
activations = []
deltas = []
coef_grads = []
intercept_grads = []
activations.append(X)
for i in range(mlp.n_layers_ - 1):
activations.append(np.empty((X.shape[0], layer_units[i + 1])))
deltas.append(np.empty((X.shape[0], layer_units[i + 1])))
fan_in = layer_units[i]
fan_out = layer_units[i + 1]
coef_grads.append(np.empty((fan_in, fan_out)))
intercept_grads.append(np.empty(fan_out))
# analytically compute the gradients
def loss_grad_fun(t):
return mlp._loss_grad_lbfgs(
t, X, Y, None, activations, deltas, coef_grads, intercept_grads
)
[value, grad] = loss_grad_fun(theta)
numgrad = np.zeros(np.size(theta))
n = np.size(theta, 0)
E = np.eye(n)
epsilon = 1e-5
# numerically compute the gradients
for i in range(n):
dtheta = E[:, i] * epsilon
numgrad[i] = (
loss_grad_fun(theta + dtheta)[0] - loss_grad_fun(theta - dtheta)[0]
) / (epsilon * 2.0)
assert_almost_equal(numgrad, grad)
@pytest.mark.parametrize("X,y", classification_datasets)
def test_lbfgs_classification(X, y):
# Test lbfgs on classification.
# It should achieve a score higher than 0.95 for the binary and multi-class
# versions of the digits dataset.
X_train = X[:150]
y_train = y[:150]
X_test = X[150:]
expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(
solver="lbfgs",
hidden_layer_sizes=50,
max_iter=150,
shuffle=True,
random_state=1,
activation=activation,
)
mlp.fit(X_train, y_train)
y_predict = mlp.predict(X_test)
assert mlp.score(X_train, y_train) > 0.95
assert (y_predict.shape[0], y_predict.dtype.kind) == expected_shape_dtype
@pytest.mark.parametrize("X,y", regression_datasets)
def test_lbfgs_regression(X, y):
# Test lbfgs on the regression dataset.
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(
solver="lbfgs",
hidden_layer_sizes=50,
max_iter=200,
tol=1e-3,
shuffle=True,
random_state=1,
activation=activation,
)
mlp.fit(X, y)
if activation == "identity":
assert mlp.score(X, y) > 0.80
else:
# Non linear models perform much better than linear bottleneck:
assert mlp.score(X, y) > 0.98
@pytest.mark.parametrize("X,y", classification_datasets)
def test_lbfgs_classification_maxfun(X, y):
# Test lbfgs parameter max_fun.
# It should independently limit the number of iterations for lbfgs.
max_fun = 10
# classification tests
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(
solver="lbfgs",
hidden_layer_sizes=50,
max_iter=150,
max_fun=max_fun,
shuffle=True,
random_state=1,
activation=activation,
)
with pytest.warns(ConvergenceWarning):
mlp.fit(X, y)
assert max_fun >= mlp.n_iter_
@pytest.mark.parametrize("X,y", regression_datasets)
def test_lbfgs_regression_maxfun(X, y):
# Test lbfgs parameter max_fun.
# It should independently limit the number of iterations for lbfgs.
max_fun = 10
# regression tests
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(
solver="lbfgs",
hidden_layer_sizes=50,
tol=0.0,
max_iter=150,
max_fun=max_fun,
shuffle=True,
random_state=1,
activation=activation,
)
with pytest.warns(ConvergenceWarning):
mlp.fit(X, y)
assert max_fun >= mlp.n_iter_
def test_learning_rate_warmstart():
# Tests that warm_start reuse past solutions.
X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
y = [1, 1, 1, 0]
for learning_rate in ["invscaling", "constant"]:
mlp = MLPClassifier(
solver="sgd",
hidden_layer_sizes=4,
learning_rate=learning_rate,
max_iter=1,
power_t=0.25,
warm_start=True,
)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
prev_eta = mlp._optimizer.learning_rate
mlp.fit(X, y)
post_eta = mlp._optimizer.learning_rate
if learning_rate == "constant":
assert prev_eta == post_eta
elif learning_rate == "invscaling":
assert mlp.learning_rate_init / pow(8 + 1, mlp.power_t) == post_eta
def test_multilabel_classification():
# Test that multi-label classification works as expected.
# test fit method
X, y = make_multilabel_classification(
n_samples=50, random_state=0, return_indicator=True
)
mlp = MLPClassifier(
solver="lbfgs",
hidden_layer_sizes=50,
alpha=1e-5,
max_iter=150,
random_state=0,
activation="logistic",
learning_rate_init=0.2,
)
mlp.fit(X, y)
assert mlp.score(X, y) > 0.97
# test partial fit method
mlp = MLPClassifier(
solver="sgd",
hidden_layer_sizes=50,
max_iter=150,
random_state=0,
activation="logistic",
alpha=1e-5,
learning_rate_init=0.2,
)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert mlp.score(X, y) > 0.9
# Make sure early stopping still work now that splitting is stratified by
# default (it is disabled for multilabel classification)
mlp = MLPClassifier(early_stopping=True)
mlp.fit(X, y).predict(X)
def test_multioutput_regression():
# Test that multi-output regression works as expected
X, y = make_regression(n_samples=200, n_targets=5, random_state=11)
mlp = MLPRegressor(
solver="lbfgs", hidden_layer_sizes=50, max_iter=200, tol=1e-2, random_state=1
)
mlp.fit(X, y)
assert mlp.score(X, y) > 0.9
def test_partial_fit_classes_error():
# Tests that passing different classes to partial_fit raises an error
X = [[3, 2]]
y = [0]
clf = MLPClassifier(solver="sgd")
clf.partial_fit(X, y, classes=[0, 1])
with pytest.raises(ValueError):
clf.partial_fit(X, y, classes=[1, 2])
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit' for binary and
# multi-class classification.
for X, y in classification_datasets:
mlp = MLPClassifier(
solver="sgd",
max_iter=100,
random_state=1,
tol=0,
alpha=1e-5,
learning_rate_init=0.2,
)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(
solver="sgd", random_state=1, alpha=1e-5, learning_rate_init=0.2
)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert mlp.score(X, y) > 0.95
def test_partial_fit_unseen_classes():
# Non regression test for bug 6994
# Tests for labeling errors in partial fit
clf = MLPClassifier(random_state=0)
clf.partial_fit([[1], [2], [3]], ["a", "b", "c"], classes=["a", "b", "c", "d"])
clf.partial_fit([[4]], ["d"])
assert clf.score([[1], [2], [3], [4]], ["a", "b", "c", "d"]) > 0
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = X_reg
y = y_reg
for momentum in [0, 0.9]:
mlp = MLPRegressor(
solver="sgd",
max_iter=100,
activation="relu",
random_state=1,
learning_rate_init=0.01,
batch_size=X.shape[0],
momentum=momentum,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(
solver="sgd",
activation="relu",
learning_rate_init=0.01,
random_state=1,
batch_size=X.shape[0],
momentum=momentum,
)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_allclose(pred1, pred2)
score = mlp.score(X, y)
assert score > 0.65
def test_partial_fit_errors():
# Test partial_fit error handling.
X = [[3, 2], [1, 6]]
y = [1, 0]
# no classes passed
with pytest.raises(ValueError):
MLPClassifier(solver="sgd").partial_fit(X, y, classes=[2])
# lbfgs doesn't support partial_fit
assert not hasattr(MLPClassifier(solver="lbfgs"), "partial_fit")
def test_nonfinite_params():
# Check that MLPRegressor throws ValueError when dealing with non-finite
# parameter values
rng = np.random.RandomState(0)
n_samples = 10
fmax = np.finfo(np.float64).max
X = fmax * rng.uniform(size=(n_samples, 2))
y = rng.standard_normal(size=n_samples)
clf = MLPRegressor()
msg = (
"Solver produced non-finite parameter weights. The input data may contain large"
" values and need to be preprocessed."
)
with pytest.raises(ValueError, match=msg):
with warnings.catch_warnings():
# RuntimeWarning: overflow encountered in square
warnings.simplefilter("ignore")
clf.fit(X, y)
def test_predict_proba_binary():
# Test that predict_proba works as expected for binary class.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
clf = MLPClassifier(hidden_layer_sizes=5, activation="logistic", random_state=1)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], 2
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert y_proba.shape == (n_samples, n_classes)
assert_array_equal(proba_max, proba_log_max)
assert_allclose(y_log_proba, np.log(y_proba))
assert roc_auc_score(y, y_proba[:, 1]) == 1.0
def test_predict_proba_multiclass():
# Test that predict_proba works as expected for multi class.
X = X_digits_multi[:10]
y = y_digits_multi[:10]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], np.unique(y).size
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert y_proba.shape == (n_samples, n_classes)
assert_array_equal(proba_max, proba_log_max)
assert_allclose(y_log_proba, np.log(y_proba))
def test_predict_proba_multilabel():
# Test that predict_proba works as expected for multilabel.
# Multilabel should not use softmax which makes probabilities sum to 1
X, Y = make_multilabel_classification(
n_samples=50, random_state=0, return_indicator=True
)
n_samples, n_classes = Y.shape
clf = MLPClassifier(solver="lbfgs", hidden_layer_sizes=30, random_state=0)
clf.fit(X, Y)
y_proba = clf.predict_proba(X)
assert y_proba.shape == (n_samples, n_classes)
assert_array_equal(y_proba > 0.5, Y)
y_log_proba = clf.predict_log_proba(X)
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert (y_proba.sum(1) - 1).dot(y_proba.sum(1) - 1) > 1e-10
assert_array_equal(proba_max, proba_log_max)
assert_allclose(y_log_proba, np.log(y_proba))
def test_shuffle():
# Test that the shuffle parameter affects the training process (it should)
X, y = make_regression(n_samples=50, n_features=5, n_targets=1, random_state=0)
# The coefficients will be identical if both do or do not shuffle
for shuffle in [True, False]:
mlp1 = MLPRegressor(
hidden_layer_sizes=1,
max_iter=1,
batch_size=1,
random_state=0,
shuffle=shuffle,
)
mlp2 = MLPRegressor(
hidden_layer_sizes=1,
max_iter=1,
batch_size=1,
random_state=0,
shuffle=shuffle,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
mlp1.fit(X, y)
mlp2.fit(X, y)
assert np.array_equal(mlp1.coefs_[0], mlp2.coefs_[0])
# The coefficients will be slightly different if shuffle=True
mlp1 = MLPRegressor(
hidden_layer_sizes=1, max_iter=1, batch_size=1, random_state=0, shuffle=True
)
mlp2 = MLPRegressor(
hidden_layer_sizes=1, max_iter=1, batch_size=1, random_state=0, shuffle=False
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
mlp1.fit(X, y)
mlp2.fit(X, y)
assert not np.array_equal(mlp1.coefs_[0], mlp2.coefs_[0])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse_matrices(csr_container):
# Test that sparse and dense input matrices output the same results.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
X_sparse = csr_container(X)
mlp = MLPClassifier(solver="lbfgs", hidden_layer_sizes=15, random_state=1)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp.fit(X_sparse, y)
pred2 = mlp.predict(X_sparse)
assert_almost_equal(pred1, pred2)
pred1 = mlp.predict(X)
pred2 = mlp.predict(X_sparse)
assert_array_equal(pred1, pred2)
def test_tolerance():
# Test tolerance.
# It should force the solver to exit the loop when it converges.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver="sgd")
clf.fit(X, y)
assert clf.max_iter > clf.n_iter_
def test_verbose_sgd(capsys):
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(solver="sgd", max_iter=2, verbose=10, hidden_layer_sizes=2)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
clf.partial_fit(X, y)
out, _ = capsys.readouterr()
assert "Iteration" in out
@pytest.mark.parametrize("MLPEstimator", [MLPClassifier, MLPRegressor])
def test_early_stopping(MLPEstimator):
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.2
mlp_estimator = MLPEstimator(
tol=tol, max_iter=3000, solver="sgd", early_stopping=True
)
mlp_estimator.fit(X, y)
assert mlp_estimator.max_iter > mlp_estimator.n_iter_
assert mlp_estimator.best_loss_ is None
assert isinstance(mlp_estimator.validation_scores_, list)
valid_scores = mlp_estimator.validation_scores_
best_valid_score = mlp_estimator.best_validation_score_
assert max(valid_scores) == best_valid_score
assert best_valid_score + tol > valid_scores[-2]
assert best_valid_score + tol > valid_scores[-1]
# check that the attributes `validation_scores_` and `best_validation_score_`
# are set to None when `early_stopping=False`
mlp_estimator = MLPEstimator(
tol=tol, max_iter=3000, solver="sgd", early_stopping=False
)
mlp_estimator.fit(X, y)
assert mlp_estimator.validation_scores_ is None
assert mlp_estimator.best_validation_score_ is None
assert mlp_estimator.best_loss_ is not None
def test_adaptive_learning_rate():
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver="sgd", learning_rate="adaptive")
clf.fit(X, y)
assert clf.max_iter > clf.n_iter_
assert 1e-6 > clf._optimizer.learning_rate
def test_warm_start():
X = X_iris
y = y_iris
y_2classes = np.array([0] * 75 + [1] * 75)
y_3classes = np.array([0] * 40 + [1] * 40 + [2] * 70)
y_3classes_alt = np.array([0] * 50 + [1] * 50 + [3] * 50)
y_4classes = np.array([0] * 37 + [1] * 37 + [2] * 38 + [3] * 38)
y_5classes = np.array([0] * 30 + [1] * 30 + [2] * 30 + [3] * 30 + [4] * 30)
# No error raised
clf = MLPClassifier(
hidden_layer_sizes=2, solver="lbfgs", warm_start=True, random_state=42, tol=1e-2
).fit(X, y)
clf.fit(X, y)
clf.fit(X, y_3classes)
for y_i in (y_2classes, y_3classes_alt, y_4classes, y_5classes):
clf = MLPClassifier(
hidden_layer_sizes=2,
solver="lbfgs",
warm_start=True,
random_state=42,
tol=1e-2,
).fit(X, y)
message = (
"warm_start can only be used where `y` has the same "
"classes as in the previous call to fit."
" Previously got [0 1 2], `y` has %s" % np.unique(y_i)
)
with pytest.raises(ValueError, match=re.escape(message)):
clf.fit(X, y_i)
@pytest.mark.parametrize("MLPEstimator", [MLPClassifier, MLPRegressor])
def test_warm_start_full_iteration(MLPEstimator):
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/16812
# Check that the MLP estimator accomplish `max_iter` with a
# warm started estimator.
X, y = X_iris, y_iris
max_iter = 3
clf = MLPEstimator(
hidden_layer_sizes=2, solver="sgd", warm_start=True, max_iter=max_iter
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
clf.fit(X, y)
assert max_iter == clf.n_iter_
clf.fit(X, y)
assert max_iter == clf.n_iter_
def test_n_iter_no_change():
# test n_iter_no_change using binary data set
# the classifying fitting process is not prone to loss curve fluctuations
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.01
max_iter = 3000
# test multiple n_iter_no_change
for n_iter_no_change in [2, 5, 10, 50, 100]:
clf = MLPClassifier(
tol=tol, max_iter=max_iter, solver="sgd", n_iter_no_change=n_iter_no_change
)
clf.fit(X, y)
# validate n_iter_no_change
assert clf._no_improvement_count == n_iter_no_change + 1
assert max_iter > clf.n_iter_
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
def test_n_iter_no_change_inf():
# test n_iter_no_change using binary data set
# the fitting process should go to max_iter iterations
X = X_digits_binary[:100]
y = y_digits_binary[:100]
# set a ridiculous tolerance
# this should always trigger _update_no_improvement_count()
tol = 1e9
# fit
n_iter_no_change = np.inf
max_iter = 3000
clf = MLPClassifier(
tol=tol, max_iter=max_iter, solver="sgd", n_iter_no_change=n_iter_no_change
)
clf.fit(X, y)
# validate n_iter_no_change doesn't cause early stopping
assert clf.n_iter_ == max_iter
# validate _update_no_improvement_count() was always triggered
assert clf._no_improvement_count == clf.n_iter_ - 1
def test_early_stopping_stratified():
# Make sure data splitting for early stopping is stratified
X = [[1, 2], [2, 3], [3, 4], [4, 5]]
y = [0, 0, 0, 1]
mlp = MLPClassifier(early_stopping=True)
with pytest.raises(
ValueError,
match=(
r"The least populated classes in y have only 1 member.*Classes with "
r"too few members are: \['True'\]"
),
):
mlp.fit(X, y)
def test_mlp_classifier_dtypes_casting():
# Compare predictions for different dtypes
mlp_64 = MLPClassifier(
alpha=1e-5, hidden_layer_sizes=(5, 3), random_state=1, max_iter=100, tol=1e-1
)
mlp_64.fit(X_digits[:300], y_digits[:300])
pred_64 = mlp_64.predict(X_digits[300:])
proba_64 = mlp_64.predict_proba(X_digits[300:])
mlp_32 = MLPClassifier(
alpha=1e-5, hidden_layer_sizes=(5, 3), random_state=1, max_iter=100, tol=1e-1
)
mlp_32.fit(X_digits[:300].astype(np.float32), y_digits[:300])
pred_32 = mlp_32.predict(X_digits[300:].astype(np.float32))
proba_32 = mlp_32.predict_proba(X_digits[300:].astype(np.float32))
assert_array_equal(pred_64, pred_32)
assert_allclose(proba_64, proba_32, rtol=1e-02)
def test_mlp_regressor_dtypes_casting():
mlp_64 = MLPRegressor(
alpha=1e-5, hidden_layer_sizes=(5, 3), random_state=1, max_iter=150, tol=1e-3
)
mlp_64.fit(X_digits[:300], y_digits[:300])
pred_64 = mlp_64.predict(X_digits[300:])
mlp_32 = MLPRegressor(
alpha=1e-5, hidden_layer_sizes=(5, 3), random_state=1, max_iter=150, tol=1e-3
)
mlp_32.fit(X_digits[:300].astype(np.float32), y_digits[:300])
pred_32 = mlp_32.predict(X_digits[300:].astype(np.float32))
assert_allclose(pred_64, pred_32, rtol=5e-04)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("Estimator", [MLPClassifier, MLPRegressor])
def test_mlp_param_dtypes(dtype, Estimator):
# Checks if input dtype is used for network parameters
# and predictions
X, y = X_digits.astype(dtype), y_digits
mlp = Estimator(
alpha=1e-5, hidden_layer_sizes=(5, 3), random_state=1, max_iter=50, tol=1e-1
)
mlp.fit(X[:300], y[:300])
pred = mlp.predict(X[300:])
assert all([intercept.dtype == dtype for intercept in mlp.intercepts_])
assert all([coef.dtype == dtype for coef in mlp.coefs_])
if Estimator == MLPRegressor:
assert pred.dtype == dtype
def test_mlp_loading_from_joblib_partial_fit(tmp_path):
"""Loading from MLP and partial fitting updates weights. Non-regression
test for #19626."""
pre_trained_estimator = MLPRegressor(
hidden_layer_sizes=(42,), random_state=42, learning_rate_init=0.01, max_iter=200
)
features, target = [[2]], [4]
# Fit on x=2, y=4
pre_trained_estimator.fit(features, target)
# dump and load model
pickled_file = tmp_path / "mlp.pkl"
joblib.dump(pre_trained_estimator, pickled_file)
load_estimator = joblib.load(pickled_file)
# Train for a more epochs on point x=2, y=1
fine_tune_features, fine_tune_target = [[2]], [1]
for _ in range(200):
load_estimator.partial_fit(fine_tune_features, fine_tune_target)
# finetuned model learned the new target
predicted_value = load_estimator.predict(fine_tune_features)
assert_allclose(predicted_value, fine_tune_target, rtol=1e-4)
@pytest.mark.parametrize("Estimator", [MLPClassifier, MLPRegressor])
def test_preserve_feature_names(Estimator):
"""Check that feature names are preserved when early stopping is enabled.
Feature names are required for consistency checks during scoring.
Non-regression test for gh-24846
"""
pd = pytest.importorskip("pandas")
rng = np.random.RandomState(0)
X = pd.DataFrame(data=rng.randn(10, 2), columns=["colname_a", "colname_b"])
y = pd.Series(data=np.full(10, 1), name="colname_y")
model = Estimator(early_stopping=True, validation_fraction=0.2)
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
model.fit(X, y)
@pytest.mark.parametrize("MLPEstimator", [MLPClassifier, MLPRegressor])
def test_mlp_warm_start_with_early_stopping(MLPEstimator):
"""Check that early stopping works with warm start."""
mlp = MLPEstimator(
max_iter=10, random_state=0, warm_start=True, early_stopping=True
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
mlp.fit(X_iris, y_iris)
n_validation_scores = len(mlp.validation_scores_)
mlp.set_params(max_iter=20)
mlp.fit(X_iris, y_iris)
assert len(mlp.validation_scores_) > n_validation_scores
@pytest.mark.parametrize("MLPEstimator", [MLPClassifier, MLPRegressor])
@pytest.mark.parametrize("solver", ["sgd", "adam", "lbfgs"])
def test_mlp_warm_start_no_convergence(MLPEstimator, solver):
"""Check that we stop the number of iteration at `max_iter` when warm starting.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/24764
"""
model = MLPEstimator(
solver=solver,
warm_start=True,
early_stopping=False,
max_iter=10,
n_iter_no_change=np.inf,
random_state=0,
)
with pytest.warns(ConvergenceWarning):
model.fit(X_iris, y_iris)
assert model.n_iter_ == 10
model.set_params(max_iter=20)
with pytest.warns(ConvergenceWarning):
model.fit(X_iris, y_iris)
assert model.n_iter_ == 20
@pytest.mark.parametrize("MLPEstimator", [MLPClassifier, MLPRegressor])
def test_mlp_partial_fit_after_fit(MLPEstimator):
"""Check partial fit does not fail after fit when early_stopping=True.
Non-regression test for gh-25693.
"""
mlp = MLPEstimator(early_stopping=True, random_state=0).fit(X_iris, y_iris)
msg = "partial_fit does not support early_stopping=True"
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neural_network/tests/test_stochastic_optimizers.py | sklearn/neural_network/tests/test_stochastic_optimizers.py | import numpy as np
from sklearn.neural_network._stochastic_optimizers import (
AdamOptimizer,
BaseOptimizer,
SGDOptimizer,
)
from sklearn.utils._testing import assert_array_equal
shapes = [(4, 6), (6, 8), (7, 8, 9)]
def test_base_optimizer():
for lr in [10**i for i in range(-3, 4)]:
optimizer = BaseOptimizer(lr)
assert optimizer.trigger_stopping("", False)
def test_sgd_optimizer_no_momentum():
params = [np.zeros(shape) for shape in shapes]
rng = np.random.RandomState(0)
for lr in [10**i for i in range(-3, 4)]:
optimizer = SGDOptimizer(params, lr, momentum=0, nesterov=False)
grads = [rng.random_sample(shape) for shape in shapes]
expected = [param - lr * grad for param, grad in zip(params, grads)]
optimizer.update_params(params, grads)
for exp, param in zip(expected, params):
assert_array_equal(exp, param)
def test_sgd_optimizer_momentum():
params = [np.zeros(shape) for shape in shapes]
lr = 0.1
rng = np.random.RandomState(0)
for momentum in np.arange(0.5, 0.9, 0.1):
optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=False)
velocities = [rng.random_sample(shape) for shape in shapes]
optimizer.velocities = velocities
grads = [rng.random_sample(shape) for shape in shapes]
updates = [
momentum * velocity - lr * grad for velocity, grad in zip(velocities, grads)
]
expected = [param + update for param, update in zip(params, updates)]
optimizer.update_params(params, grads)
for exp, param in zip(expected, params):
assert_array_equal(exp, param)
def test_sgd_optimizer_trigger_stopping():
params = [np.zeros(shape) for shape in shapes]
lr = 2e-6
optimizer = SGDOptimizer(params, lr, lr_schedule="adaptive")
assert not optimizer.trigger_stopping("", False)
assert lr / 5 == optimizer.learning_rate
assert optimizer.trigger_stopping("", False)
def test_sgd_optimizer_nesterovs_momentum():
params = [np.zeros(shape) for shape in shapes]
lr = 0.1
rng = np.random.RandomState(0)
for momentum in np.arange(0.5, 0.9, 0.1):
optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=True)
velocities = [rng.random_sample(shape) for shape in shapes]
optimizer.velocities = velocities
grads = [rng.random_sample(shape) for shape in shapes]
updates = [
momentum * velocity - lr * grad for velocity, grad in zip(velocities, grads)
]
updates = [
momentum * update - lr * grad for update, grad in zip(updates, grads)
]
expected = [param + update for param, update in zip(params, updates)]
optimizer.update_params(params, grads)
for exp, param in zip(expected, params):
assert_array_equal(exp, param)
def test_adam_optimizer():
params = [np.zeros(shape) for shape in shapes]
lr = 0.001
epsilon = 1e-8
rng = np.random.RandomState(0)
for beta_1 in np.arange(0.9, 1.0, 0.05):
for beta_2 in np.arange(0.995, 1.0, 0.001):
optimizer = AdamOptimizer(params, lr, beta_1, beta_2, epsilon)
ms = [rng.random_sample(shape) for shape in shapes]
vs = [rng.random_sample(shape) for shape in shapes]
t = 10
optimizer.ms = ms
optimizer.vs = vs
optimizer.t = t - 1
grads = [rng.random_sample(shape) for shape in shapes]
ms = [beta_1 * m + (1 - beta_1) * grad for m, grad in zip(ms, grads)]
vs = [beta_2 * v + (1 - beta_2) * (grad**2) for v, grad in zip(vs, grads)]
learning_rate = lr * np.sqrt(1 - beta_2**t) / (1 - beta_1**t)
updates = [
-learning_rate * m / (np.sqrt(v) + epsilon) for m, v in zip(ms, vs)
]
expected = [param + update for param, update in zip(params, updates)]
optimizer.update_params(params, grads)
for exp, param in zip(expected, params):
assert_array_equal(exp, param)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neural_network/tests/__init__.py | sklearn/neural_network/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neural_network/tests/test_rbm.py | sklearn/neural_network/tests/test_rbm.py | import re
import sys
from io import StringIO
import numpy as np
import pytest
from sklearn.datasets import load_digits
from sklearn.neural_network import BernoulliRBM
from sklearn.utils._testing import (
assert_allclose,
assert_almost_equal,
assert_array_equal,
)
from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS, LIL_CONTAINERS
from sklearn.utils.validation import assert_all_finite
Xdigits, _ = load_digits(return_X_y=True)
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(
n_components=64, learning_rate=0.1, batch_size=10, n_iter=7, random_state=9
)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21.0, decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(
n_components=64, learning_rate=0.1, batch_size=20, random_state=9
)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21.0, decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_small_sparse(csr_container):
# BernoulliRBM should work on small sparse matrices.
X = csr_container(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS)
def test_small_sparse_partial_fit(sparse_container):
X_sparse = sparse_container(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(
n_components=64, learning_rate=0.1, batch_size=10, random_state=9
)
rbm2 = BernoulliRBM(
n_components=64, learning_rate=0.1, batch_size=10, random_state=9
)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(
rbm1.score_samples(X).mean(), rbm2.score_samples(X).mean(), decimal=0
)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5, n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_fit_gibbs(csc_container):
# XXX: this test is very seed-dependent! It probably needs to be rewritten.
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.0], [1.0]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2, n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(
rbm1.components_, np.array([[0.02649814], [0.02009084]]), decimal=4
)
assert_almost_equal(rbm1.gibbs(X), X)
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rng = np.random.RandomState(42)
X = csc_container([[0.0], [1.0]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2, n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(
rbm2.components_, np.array([[0.02649814], [0.02009084]]), decimal=4
)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40, n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert np.all((X_sampled != X_sampled2).max(axis=1))
@pytest.mark.parametrize("lil_containers", LIL_CONTAINERS)
def test_score_samples(lil_containers):
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2, n_iter=10, random_state=rng)
rbm1.fit(X)
assert (rbm1.score_samples(X) < -300).all()
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_containers(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under="ignore"):
rbm1.score_samples([np.arange(1000) * 100])
@pytest.mark.thread_unsafe # manually captured stdout
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_sparse_and_verbose(csc_container, capsys):
# Make sure RBM works with sparse input when verbose=True
X = csc_container([[0.0], [1.0]])
rbm = BernoulliRBM(
n_components=2, batch_size=2, n_iter=1, random_state=42, verbose=True
)
rbm.fit(X)
# Make sure the captured standard output is sound.
assert re.match(
r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
capsys.readouterr().out,
)
@pytest.mark.parametrize(
"dtype_in, dtype_out",
[(np.float32, np.float32), (np.float64, np.float64), (int, np.float64)],
)
def test_transformer_dtypes_casting(dtype_in, dtype_out):
X = Xdigits[:100].astype(dtype_in)
rbm = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
Xt = rbm.fit_transform(X)
# dtype_in and dtype_out should be consistent
assert Xt.dtype == dtype_out, "transform dtype: {} - original dtype: {}".format(
Xt.dtype, X.dtype
)
def test_convergence_dtype_consistency():
# float 64 transformer
X_64 = Xdigits[:100].astype(np.float64)
rbm_64 = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
Xt_64 = rbm_64.fit_transform(X_64)
# float 32 transformer
X_32 = Xdigits[:100].astype(np.float32)
rbm_32 = BernoulliRBM(n_components=16, batch_size=5, n_iter=5, random_state=42)
Xt_32 = rbm_32.fit_transform(X_32)
# results and attributes should be close enough in 32 bit and 64 bit
assert_allclose(Xt_64, Xt_32, rtol=1e-06, atol=0)
assert_allclose(
rbm_64.intercept_hidden_, rbm_32.intercept_hidden_, rtol=1e-06, atol=0
)
assert_allclose(
rbm_64.intercept_visible_, rbm_32.intercept_visible_, rtol=1e-05, atol=0
)
assert_allclose(rbm_64.components_, rbm_32.components_, rtol=1e-03, atol=0)
assert_allclose(rbm_64.h_samples_, rbm_32.h_samples_)
@pytest.mark.parametrize("method", ["fit", "partial_fit"])
def test_feature_names_out(method):
"""Check `get_feature_names_out` for `BernoulliRBM`."""
n_components = 10
rbm = BernoulliRBM(n_components=n_components)
getattr(rbm, method)(Xdigits)
names = rbm.get_feature_names_out()
expected_names = [f"bernoullirbm{i}" for i in range(n_components)]
assert_array_equal(expected_names, names)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/_arff.py | sklearn/externals/_arff.py | # =============================================================================
# Federal University of Rio Grande do Sul (UFRGS)
# Connectionist Artificial Intelligence Laboratory (LIAC)
# Renato de Pontes Pereira - rppereira@inf.ufrgs.br
# =============================================================================
# Copyright (c) 2011 Renato de Pontes Pereira, renato.ppontes at gmail dot com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
'''
The liac-arff module implements functions to read and write ARFF files in
Python. It was created in the Connectionist Artificial Intelligence Laboratory
(LIAC), which takes place at the Federal University of Rio Grande do Sul
(UFRGS), in Brazil.
ARFF (Attribute-Relation File Format) is an file format specially created for
describe datasets which are commonly used for machine learning experiments and
software. This file format was created to be used in Weka, the best
representative software for machine learning automated experiments.
An ARFF file can be divided into two sections: header and data. The Header
describes the metadata of the dataset, including a general description of the
dataset, its name and its attributes. The source below is an example of a
header section in a XOR dataset::
%
% XOR Dataset
%
% Created by Renato Pereira
% rppereira@inf.ufrgs.br
% http://inf.ufrgs.br/~rppereira
%
%
@RELATION XOR
@ATTRIBUTE input1 REAL
@ATTRIBUTE input2 REAL
@ATTRIBUTE y REAL
The Data section of an ARFF file describes the observations of the dataset, in
the case of XOR dataset::
@DATA
0.0,0.0,0.0
0.0,1.0,1.0
1.0,0.0,1.0
1.0,1.0,0.0
%
%
%
Notice that several lines are starting with an ``%`` symbol, denoting a
comment, thus, lines with ``%`` at the beginning will be ignored, except by the
description part at the beginning of the file. The declarations ``@RELATION``,
``@ATTRIBUTE``, and ``@DATA`` are all case insensitive and obligatory.
For more information and details about the ARFF file description, consult
http://www.cs.waikato.ac.nz/~ml/weka/arff.html
ARFF Files in Python
~~~~~~~~~~~~~~~~~~~~
This module uses built-ins python objects to represent a deserialized ARFF
file. A dictionary is used as the container of the data and metadata of ARFF,
and have the following keys:
- **description**: (OPTIONAL) a string with the description of the dataset.
- **relation**: (OBLIGATORY) a string with the name of the dataset.
- **attributes**: (OBLIGATORY) a list of attributes with the following
template::
(attribute_name, attribute_type)
the attribute_name is a string, and attribute_type must be an string
or a list of strings.
- **data**: (OBLIGATORY) a list of data instances. Each data instance must be
a list with values, depending on the attributes.
The above keys must follow the case which were described, i.e., the keys are
case sensitive. The attribute type ``attribute_type`` must be one of these
strings (they are not case sensitive): ``NUMERIC``, ``INTEGER``, ``REAL`` or
``STRING``. For nominal attributes, the ``atribute_type`` must be a list of
strings.
In this format, the XOR dataset presented above can be represented as a python
object as::
xor_dataset = {
'description': 'XOR Dataset',
'relation': 'XOR',
'attributes': [
('input1', 'REAL'),
('input2', 'REAL'),
('y', 'REAL'),
],
'data': [
[0.0, 0.0, 0.0],
[0.0, 1.0, 1.0],
[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0]
]
}
Features
~~~~~~~~
This module provides several features, including:
- Read and write ARFF files using python built-in structures, such dictionaries
and lists;
- Supports `scipy.sparse.coo <http://docs.scipy
.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.html#scipy.sparse.coo_matrix>`_
and lists of dictionaries as used by SVMLight
- Supports the following attribute types: NUMERIC, REAL, INTEGER, STRING, and
NOMINAL;
- Has an interface similar to other built-in modules such as ``json``, or
``zipfile``;
- Supports read and write the descriptions of files;
- Supports missing values and names with spaces;
- Supports unicode values and names;
- Fully compatible with Python 2.7+, Python 3.5+, pypy and pypy3;
- Under `MIT License <http://opensource.org/licenses/MIT>`_
'''
__author__ = 'Renato de Pontes Pereira, Matthias Feurer, Joel Nothman'
__author_email__ = ('renato.ppontes@gmail.com, '
'feurerm@informatik.uni-freiburg.de, '
'joel.nothman@gmail.com')
__version__ = '2.4.0'
import re
import csv
from typing import TYPE_CHECKING
from typing import Optional, List, Dict, Any, Iterator, Union, Tuple
# CONSTANTS ===================================================================
_SIMPLE_TYPES = ['NUMERIC', 'REAL', 'INTEGER', 'STRING']
_TK_DESCRIPTION = '%'
_TK_COMMENT = '%'
_TK_RELATION = '@RELATION'
_TK_ATTRIBUTE = '@ATTRIBUTE'
_TK_DATA = '@DATA'
_RE_RELATION = re.compile(r'^([^\{\}%,\s]*|\".*\"|\'.*\')$', re.UNICODE)
_RE_ATTRIBUTE = re.compile(r'^(\".*\"|\'.*\'|[^\{\}%,\s]*)\s+(.+)$', re.UNICODE)
_RE_QUOTE_CHARS = re.compile(r'["\'\\\s%,\000-\031]', re.UNICODE)
_RE_ESCAPE_CHARS = re.compile(r'(?=["\'\\%])|[\n\r\t\000-\031]')
_RE_SPARSE_LINE = re.compile(r'^\s*\{.*\}\s*$', re.UNICODE)
_RE_NONTRIVIAL_DATA = re.compile('["\'{}\\s]', re.UNICODE)
ArffDenseDataType = Iterator[List]
ArffSparseDataType = Tuple[List, ...]
if TYPE_CHECKING:
# typing_extensions is available when mypy is installed
from typing_extensions import TypedDict
class ArffContainerType(TypedDict):
description: str
relation: str
attributes: List
data: Union[ArffDenseDataType, ArffSparseDataType]
else:
ArffContainerType = Dict[str, Any]
def _build_re_values():
quoted_re = r'''
" # open quote followed by zero or more of:
(?:
(?<!\\) # no additional backslash
(?:\\\\)* # maybe escaped backslashes
\\" # escaped quote
|
\\[^"] # escaping a non-quote
|
[^"\\] # non-quote char
)*
" # close quote
'''
# a value is surrounded by " or by ' or contains no quotables
value_re = r'''(?:
%s| # a value may be surrounded by "
%s| # or by '
[^,\s"'{}]+ # or may contain no characters requiring quoting
)''' % (quoted_re,
quoted_re.replace('"', "'"))
# This captures (value, error) groups. Because empty values are allowed,
# we cannot just look for empty values to handle syntax errors.
# We presume the line has had ',' prepended...
dense = re.compile(r'''(?x)
, # may follow ','
\s*
((?=,)|$|{value_re}) # empty or value
|
(\S.*) # error
'''.format(value_re=value_re))
# This captures (key, value) groups and will have an empty key/value
# in case of syntax errors.
# It does not ensure that the line starts with '{' or ends with '}'.
sparse = re.compile(r'''(?x)
(?:^\s*\{|,) # may follow ',', or '{' at line start
\s*
(\d+) # attribute key
\s+
(%(value_re)s) # value
|
(?!}\s*$) # not an error if it's }$
(?!^\s*{\s*}\s*$) # not an error if it's ^{}$
\S.* # error
''' % {'value_re': value_re})
return dense, sparse
_RE_DENSE_VALUES, _RE_SPARSE_KEY_VALUES = _build_re_values()
_ESCAPE_SUB_MAP = {
'\\\\': '\\',
'\\"': '"',
"\\'": "'",
'\\t': '\t',
'\\n': '\n',
'\\r': '\r',
'\\b': '\b',
'\\f': '\f',
'\\%': '%',
}
_UNESCAPE_SUB_MAP = {chr(i): '\\%03o' % i for i in range(32)}
_UNESCAPE_SUB_MAP.update({v: k for k, v in _ESCAPE_SUB_MAP.items()})
_UNESCAPE_SUB_MAP[''] = '\\'
_ESCAPE_SUB_MAP.update({'\\%d' % i: chr(i) for i in range(10)})
def _escape_sub_callback(match):
s = match.group()
if len(s) == 2:
try:
return _ESCAPE_SUB_MAP[s]
except KeyError:
raise ValueError('Unsupported escape sequence: %s' % s)
if s[1] == 'u':
return chr(int(s[2:], 16))
else:
return chr(int(s[1:], 8))
def _unquote(v):
if v[:1] in ('"', "'"):
return re.sub(r'\\([0-9]{1,3}|u[0-9a-f]{4}|.)', _escape_sub_callback,
v[1:-1])
elif v in ('?', ''):
return None
else:
return v
def _parse_values(s):
'''(INTERNAL) Split a line into a list of values'''
if not _RE_NONTRIVIAL_DATA.search(s):
# Fast path for trivial cases (unfortunately we have to handle missing
# values because of the empty string case :(.)
return [None if s in ('?', '') else s
for s in next(csv.reader([s]))]
# _RE_DENSE_VALUES tokenizes despite quoting, whitespace, etc.
values, errors = zip(*_RE_DENSE_VALUES.findall(',' + s))
if not any(errors):
return [_unquote(v) for v in values]
if _RE_SPARSE_LINE.match(s):
try:
return {int(k): _unquote(v)
for k, v in _RE_SPARSE_KEY_VALUES.findall(s)}
except ValueError:
# an ARFF syntax error in sparse data
for match in _RE_SPARSE_KEY_VALUES.finditer(s):
if not match.group(1):
raise BadLayout('Error parsing %r' % match.group())
raise BadLayout('Unknown parsing error')
else:
# an ARFF syntax error
for match in _RE_DENSE_VALUES.finditer(s):
if match.group(2):
raise BadLayout('Error parsing %r' % match.group())
raise BadLayout('Unknown parsing error')
DENSE = 0 # Constant value representing a dense matrix
COO = 1 # Constant value representing a sparse matrix in coordinate format
LOD = 2 # Constant value representing a sparse matrix in list of
# dictionaries format
DENSE_GEN = 3 # Generator of dictionaries
LOD_GEN = 4 # Generator of dictionaries
_SUPPORTED_DATA_STRUCTURES = [DENSE, COO, LOD, DENSE_GEN, LOD_GEN]
# EXCEPTIONS ==================================================================
class ArffException(Exception):
message: Optional[str] = None
def __init__(self):
self.line = -1
def __str__(self):
return self.message%self.line
class BadRelationFormat(ArffException):
'''Error raised when the relation declaration is in an invalid format.'''
message = 'Bad @RELATION format, at line %d.'
class BadAttributeFormat(ArffException):
'''Error raised when some attribute declaration is in an invalid format.'''
message = 'Bad @ATTRIBUTE format, at line %d.'
class BadDataFormat(ArffException):
'''Error raised when some data instance is in an invalid format.'''
def __init__(self, value):
super().__init__()
self.message = (
'Bad @DATA instance format in line %d: ' +
('%s' % value)
)
class BadAttributeType(ArffException):
'''Error raised when some invalid type is provided into the attribute
declaration.'''
message = 'Bad @ATTRIBUTE type, at line %d.'
class BadAttributeName(ArffException):
'''Error raised when an attribute name is provided twice the attribute
declaration.'''
def __init__(self, value, value2):
super().__init__()
self.message = (
('Bad @ATTRIBUTE name %s at line' % value) +
' %d, this name is already in use in line' +
(' %d.' % value2)
)
class BadNominalValue(ArffException):
'''Error raised when a value in used in some data instance but is not
declared into it respective attribute declaration.'''
def __init__(self, value):
super().__init__()
self.message = (
('Data value %s not found in nominal declaration, ' % value)
+ 'at line %d.'
)
class BadNominalFormatting(ArffException):
'''Error raised when a nominal value with space is not properly quoted.'''
def __init__(self, value):
super().__init__()
self.message = (
('Nominal data value "%s" not properly quoted in line ' % value) +
'%d.'
)
class BadNumericalValue(ArffException):
'''Error raised when and invalid numerical value is used in some data
instance.'''
message = 'Invalid numerical value, at line %d.'
class BadStringValue(ArffException):
'''Error raise when a string contains space but is not quoted.'''
message = 'Invalid string value at line %d.'
class BadLayout(ArffException):
'''Error raised when the layout of the ARFF file has something wrong.'''
message = 'Invalid layout of the ARFF file, at line %d.'
def __init__(self, msg=''):
super().__init__()
if msg:
self.message = BadLayout.message + ' ' + msg.replace('%', '%%')
class BadObject(ArffException):
'''Error raised when the object representing the ARFF file has something
wrong.'''
def __init__(self, msg='Invalid object.'):
self.msg = msg
def __str__(self):
return '%s' % self.msg
# =============================================================================
# INTERNAL ====================================================================
def _unescape_sub_callback(match):
return _UNESCAPE_SUB_MAP[match.group()]
def encode_string(s):
if _RE_QUOTE_CHARS.search(s):
return "'%s'" % _RE_ESCAPE_CHARS.sub(_unescape_sub_callback, s)
return s
class EncodedNominalConversor:
def __init__(self, values):
self.values = {v: i for i, v in enumerate(values)}
self.values[0] = 0
def __call__(self, value):
try:
return self.values[value]
except KeyError:
raise BadNominalValue(value)
class NominalConversor:
def __init__(self, values):
self.values = set(values)
self.zero_value = values[0]
def __call__(self, value):
if value not in self.values:
if value == 0:
# Sparse decode
# See issue #52: nominals should take their first value when
# unspecified in a sparse matrix. Naturally, this is consistent
# with EncodedNominalConversor.
return self.zero_value
raise BadNominalValue(value)
return str(value)
class DenseGeneratorData:
'''Internal helper class to allow for different matrix types without
making the code a huge collection of if statements.'''
def decode_rows(self, stream, conversors):
for row in stream:
values = _parse_values(row)
if isinstance(values, dict):
if values and max(values) >= len(conversors):
raise BadDataFormat(row)
# XXX: int 0 is used for implicit values, not '0'
values = [values[i] if i in values else 0 for i in
range(len(conversors))]
else:
if len(values) != len(conversors):
raise BadDataFormat(row)
yield self._decode_values(values, conversors)
@staticmethod
def _decode_values(values, conversors):
try:
values = [None if value is None else conversor(value)
for conversor, value
in zip(conversors, values)]
except ValueError as exc:
if 'float: ' in str(exc):
raise BadNumericalValue()
return values
def encode_data(self, data, attributes):
'''(INTERNAL) Encodes a line of data.
Data instances follow the csv format, i.e, attribute values are
delimited by commas. After converted from csv.
:param data: a list of values.
:param attributes: a list of attributes. Used to check if data is valid.
:return: a string with the encoded data line.
'''
current_row = 0
for inst in data:
if len(inst) != len(attributes):
raise BadObject(
'Instance %d has %d attributes, expected %d' %
(current_row, len(inst), len(attributes))
)
new_data = []
for value in inst:
if value is None or value == '' or value != value:
s = '?'
else:
s = encode_string(str(value))
new_data.append(s)
current_row += 1
yield ','.join(new_data)
class _DataListMixin:
"""Mixin to return a list from decode_rows instead of a generator"""
def decode_rows(self, stream, conversors):
return list(super().decode_rows(stream, conversors))
class Data(_DataListMixin, DenseGeneratorData):
pass
class COOData:
def decode_rows(self, stream, conversors):
data, rows, cols = [], [], []
for i, row in enumerate(stream):
values = _parse_values(row)
if not isinstance(values, dict):
raise BadLayout()
if not values:
continue
row_cols, values = zip(*sorted(values.items()))
try:
values = [value if value is None else conversors[key](value)
for key, value in zip(row_cols, values)]
except ValueError as exc:
if 'float: ' in str(exc):
raise BadNumericalValue()
raise
except IndexError:
# conversor out of range
raise BadDataFormat(row)
data.extend(values)
rows.extend([i] * len(values))
cols.extend(row_cols)
return data, rows, cols
def encode_data(self, data, attributes):
num_attributes = len(attributes)
new_data = []
current_row = 0
row = data.row
col = data.col
data = data.data
# Check if the rows are sorted
if not all(row[i] <= row[i + 1] for i in range(len(row) - 1)):
raise ValueError("liac-arff can only output COO matrices with "
"sorted rows.")
for v, col, row in zip(data, col, row):
if row > current_row:
# Add empty rows if necessary
while current_row < row:
yield " ".join(["{", ','.join(new_data), "}"])
new_data = []
current_row += 1
if col >= num_attributes:
raise BadObject(
'Instance %d has at least %d attributes, expected %d' %
(current_row, col + 1, num_attributes)
)
if v is None or v == '' or v != v:
s = '?'
else:
s = encode_string(str(v))
new_data.append("%d %s" % (col, s))
yield " ".join(["{", ','.join(new_data), "}"])
class LODGeneratorData:
def decode_rows(self, stream, conversors):
for row in stream:
values = _parse_values(row)
if not isinstance(values, dict):
raise BadLayout()
try:
yield {key: None if value is None else conversors[key](value)
for key, value in values.items()}
except ValueError as exc:
if 'float: ' in str(exc):
raise BadNumericalValue()
raise
except IndexError:
# conversor out of range
raise BadDataFormat(row)
def encode_data(self, data, attributes):
current_row = 0
num_attributes = len(attributes)
for row in data:
new_data = []
if len(row) > 0 and max(row) >= num_attributes:
raise BadObject(
'Instance %d has %d attributes, expected %d' %
(current_row, max(row) + 1, num_attributes)
)
for col in sorted(row):
v = row[col]
if v is None or v == '' or v != v:
s = '?'
else:
s = encode_string(str(v))
new_data.append("%d %s" % (col, s))
current_row += 1
yield " ".join(["{", ','.join(new_data), "}"])
class LODData(_DataListMixin, LODGeneratorData):
pass
def _get_data_object_for_decoding(matrix_type):
if matrix_type == DENSE:
return Data()
elif matrix_type == COO:
return COOData()
elif matrix_type == LOD:
return LODData()
elif matrix_type == DENSE_GEN:
return DenseGeneratorData()
elif matrix_type == LOD_GEN:
return LODGeneratorData()
else:
raise ValueError("Matrix type %s not supported." % str(matrix_type))
def _get_data_object_for_encoding(matrix):
# Probably a scipy.sparse
if hasattr(matrix, 'format'):
if matrix.format == 'coo':
return COOData()
else:
raise ValueError('Cannot guess matrix format!')
elif isinstance(matrix[0], dict):
return LODData()
else:
return Data()
# =============================================================================
# ADVANCED INTERFACE ==========================================================
class ArffDecoder:
'''An ARFF decoder.'''
def __init__(self):
'''Constructor.'''
self._conversors = []
self._current_line = 0
def _decode_comment(self, s):
'''(INTERNAL) Decodes a comment line.
Comments are single line strings starting, obligatorily, with the ``%``
character, and can have any symbol, including whitespaces or special
characters.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a string with the decoded comment.
'''
res = re.sub(r'^\%( )?', '', s)
return res
def _decode_relation(self, s):
'''(INTERNAL) Decodes a relation line.
The relation declaration is a line with the format ``@RELATION
<relation-name>``, where ``relation-name`` is a string. The string must
start with alphabetic character and must be quoted if the name includes
spaces, otherwise this method will raise a `BadRelationFormat` exception.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a string with the decoded relation name.
'''
_, v = s.split(' ', 1)
v = v.strip()
if not _RE_RELATION.match(v):
raise BadRelationFormat()
res = str(v.strip('"\''))
return res
def _decode_attribute(self, s):
'''(INTERNAL) Decodes an attribute line.
The attribute is the most complex declaration in an arff file. All
attributes must follow the template::
@attribute <attribute-name> <datatype>
where ``attribute-name`` is a string, quoted if the name contains any
whitespace, and ``datatype`` can be:
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
- Strings as ``STRING``.
- Dates (NOT IMPLEMENTED).
- Nominal attributes with format:
{<nominal-name1>, <nominal-name2>, <nominal-name3>, ...}
The nominal names follow the rules for the attribute names, i.e., they
must be quoted if the name contains whitespaces.
This method must receive a normalized string, i.e., a string without
padding, including the "\r\n" characters.
:param s: a normalized string.
:return: a tuple (ATTRIBUTE_NAME, TYPE_OR_VALUES).
'''
_, v = s.split(' ', 1)
v = v.strip()
# Verify the general structure of declaration
m = _RE_ATTRIBUTE.match(v)
if not m:
raise BadAttributeFormat()
# Extracts the raw name and type
name, type_ = m.groups()
# Extracts the final name
name = str(name.strip('"\''))
# Extracts the final type
if type_[:1] == "{" and type_[-1:] == "}":
try:
type_ = _parse_values(type_.strip('{} '))
except Exception:
raise BadAttributeType()
if isinstance(type_, dict):
raise BadAttributeType()
else:
# If not nominal, verify the type name
type_ = str(type_).upper()
if type_ not in ['NUMERIC', 'REAL', 'INTEGER', 'STRING']:
raise BadAttributeType()
return (name, type_)
def _decode(self, s, encode_nominal=False, matrix_type=DENSE):
'''Do the job the ``encode``.'''
# Make sure this method is idempotent
self._current_line = 0
# If string, convert to a list of lines
if isinstance(s, str):
s = s.strip('\r\n ').replace('\r\n', '\n').split('\n')
# Create the return object
obj: ArffContainerType = {
'description': '',
'relation': '',
'attributes': [],
'data': []
}
attribute_names = {}
# Create the data helper object
data = _get_data_object_for_decoding(matrix_type)
# Read all lines
STATE = _TK_DESCRIPTION
s = iter(s)
for row in s:
self._current_line += 1
# Ignore empty lines
row = row.strip(' \r\n')
if not row: continue
u_row = row.upper()
# DESCRIPTION -----------------------------------------------------
if u_row.startswith(_TK_DESCRIPTION) and STATE == _TK_DESCRIPTION:
obj['description'] += self._decode_comment(row) + '\n'
# -----------------------------------------------------------------
# RELATION --------------------------------------------------------
elif u_row.startswith(_TK_RELATION):
if STATE != _TK_DESCRIPTION:
raise BadLayout()
STATE = _TK_RELATION
obj['relation'] = self._decode_relation(row)
# -----------------------------------------------------------------
# ATTRIBUTE -------------------------------------------------------
elif u_row.startswith(_TK_ATTRIBUTE):
if STATE != _TK_RELATION and STATE != _TK_ATTRIBUTE:
raise BadLayout()
STATE = _TK_ATTRIBUTE
attr = self._decode_attribute(row)
if attr[0] in attribute_names:
raise BadAttributeName(attr[0], attribute_names[attr[0]])
else:
attribute_names[attr[0]] = self._current_line
obj['attributes'].append(attr)
if isinstance(attr[1], (list, tuple)):
if encode_nominal:
conversor = EncodedNominalConversor(attr[1])
else:
conversor = NominalConversor(attr[1])
else:
CONVERSOR_MAP = {'STRING': str,
'INTEGER': lambda x: int(float(x)),
'NUMERIC': float,
'REAL': float}
conversor = CONVERSOR_MAP[attr[1]]
self._conversors.append(conversor)
# -----------------------------------------------------------------
# DATA ------------------------------------------------------------
elif u_row.startswith(_TK_DATA):
if STATE != _TK_ATTRIBUTE:
raise BadLayout()
break
# -----------------------------------------------------------------
# COMMENT ---------------------------------------------------------
elif u_row.startswith(_TK_COMMENT):
pass
# -----------------------------------------------------------------
else:
# Never found @DATA
raise BadLayout()
def stream():
for row in s:
self._current_line += 1
row = row.strip()
# Ignore empty lines and comment lines.
if row and not row.startswith(_TK_COMMENT):
yield row
# Alter the data object
obj['data'] = data.decode_rows(stream(), self._conversors)
if obj['description'].endswith('\n'):
obj['description'] = obj['description'][:-1]
return obj
def decode(self, s, encode_nominal=False, return_type=DENSE):
'''Returns the Python representation of a given ARFF file.
When a file object is passed as an argument, this method reads lines
iteratively, avoiding to load unnecessary information to the memory.
:param s: a string or file object with the ARFF file.
:param encode_nominal: boolean, if True perform a label encoding
while reading the .arff file.
:param return_type: determines the data structure used to store the
dataset. Can be one of `arff.DENSE`, `arff.COO`, `arff.LOD`,
`arff.DENSE_GEN` or `arff.LOD_GEN`.
Consult the sections on `working with sparse data`_ and `loading
progressively`_.
'''
try:
return self._decode(s, encode_nominal=encode_nominal,
matrix_type=return_type)
except ArffException as e:
e.line = self._current_line
raise e
class ArffEncoder:
'''An ARFF encoder.'''
def _encode_comment(self, s=''):
'''(INTERNAL) Encodes a comment line.
Comments are single line strings starting, obligatorily, with the ``%``
character, and can have any symbol, including whitespaces or special
characters.
If ``s`` is None, this method will simply return an empty comment.
:param s: (OPTIONAL) string.
:return: a string with the encoded comment line.
'''
if s:
return '%s %s'%(_TK_COMMENT, s)
else:
return '%s' % _TK_COMMENT
def _encode_relation(self, name):
'''(INTERNAL) Decodes a relation line.
The relation declaration is a line with the format ``@RELATION
<relation-name>``, where ``relation-name`` is a string.
:param name: a string.
:return: a string with the encoded relation declaration.
'''
for char in ' %{},':
if char in name:
name = '"%s"'%name
break
return '%s %s'%(_TK_RELATION, name)
def _encode_attribute(self, name, type_):
'''(INTERNAL) Encodes an attribute line.
The attribute follow the template::
@attribute <attribute-name> <datatype>
where ``attribute-name`` is a string, and ``datatype`` can be:
- Numerical attributes as ``NUMERIC``, ``INTEGER`` or ``REAL``.
- Strings as ``STRING``.
- Dates (NOT IMPLEMENTED).
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/conftest.py | sklearn/externals/conftest.py | # Do not collect any tests in externals. This is more robust than using
# --ignore because --ignore needs a path and it is not convenient to pass in
# the externals path (very long install-dependent path in site-packages) when
# using --pyargs
def pytest_ignore_collect(collection_path, config):
return True
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/__init__.py | sklearn/externals/__init__.py |
"""
External, bundled dependencies.
"""
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/_array_api_compat_vendor.py | sklearn/externals/_array_api_compat_vendor.py | # DO NOT RENAME THIS FILE
# This is a hook for array_api_extra/_lib/_compat.py
# to co-vendor array_api_compat and potentially override its functions.
from .array_api_compat import * # noqa: F403
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/_scipy/__init__.py | sklearn/externals/_scipy/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/_scipy/sparse/__init__.py | sklearn/externals/_scipy/sparse/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/_scipy/sparse/csgraph/_laplacian.py | sklearn/externals/_scipy/sparse/csgraph/_laplacian.py | """
This file is a copy of the scipy.sparse.csgraph._laplacian module from SciPy 1.12
scipy.sparse.csgraph.laplacian supports sparse arrays only starting from Scipy 1.12,
see https://github.com/scipy/scipy/pull/19156. This vendored file can be removed as
soon as Scipy 1.12 becomes the minimum supported version.
Laplacian of a compressed-sparse graph
"""
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy.sparse import issparse
from scipy.sparse.linalg import LinearOperator
###############################################################################
# Graph laplacian
def laplacian(
csgraph,
normed=False,
return_diag=False,
use_out_degree=False,
*,
copy=True,
form="array",
dtype=None,
symmetrized=False,
):
"""
Return the Laplacian of a directed graph.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
Compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute symmetrically normalized Laplacian.
Default: False.
return_diag : bool, optional
If True, then also return an array related to vertex degrees.
Default: False.
use_out_degree : bool, optional
If True, then use out-degree instead of in-degree.
This distinction matters only if the graph is asymmetric.
Default: False.
copy : bool, optional
If False, then change `csgraph` in place if possible,
avoiding doubling the memory use.
Default: True, for backward compatibility.
form : 'array', or 'function', or 'lo'
Determines the format of the output Laplacian:
* 'array' is a numpy array;
* 'function' is a pointer to evaluating the Laplacian-vector
or Laplacian-matrix product;
* 'lo' results in the format of the `LinearOperator`.
Choosing 'function' or 'lo' always avoids doubling
the memory use, ignoring `copy` value.
Default: 'array', for backward compatibility.
dtype : None or one of numeric numpy dtypes, optional
The dtype of the output. If ``dtype=None``, the dtype of the
output matches the dtype of the input csgraph, except for
the case ``normed=True`` and integer-like csgraph, where
the output dtype is 'float' allowing accurate normalization,
but dramatically increasing the memory use.
Default: None, for backward compatibility.
symmetrized : bool, optional
If True, then the output Laplacian is symmetric/Hermitian.
The symmetrization is done by ``csgraph + csgraph.T.conj``
without dividing by 2 to preserve integer dtypes if possible
prior to the construction of the Laplacian.
The symmetrization will increase the memory footprint of
sparse matrices unless the sparsity pattern is symmetric or
`form` is 'function' or 'lo'.
Default: False, for backward compatibility.
Returns
-------
lap : ndarray, or sparse matrix, or `LinearOperator`
The N x N Laplacian of csgraph. It will be a NumPy array (dense)
if the input was dense, or a sparse matrix otherwise, or
the format of a function or `LinearOperator` if
`form` equals 'function' or 'lo', respectively.
diag : ndarray, optional
The length-N main diagonal of the Laplacian matrix.
For the normalized Laplacian, this is the array of square roots
of vertex degrees or 1 if the degree is zero.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchhoff matrix" or just the "Laplacian", and is useful in many
parts of spectral graph theory.
In particular, the eigen-decomposition of the Laplacian can give
insight into many properties of the graph, e.g.,
is commonly used for spectral data embedding and clustering.
The constructed Laplacian doubles the memory use if ``copy=True`` and
``form="array"`` which is the default.
Choosing ``copy=False`` has no effect unless ``form="array"``
or the matrix is sparse in the ``coo`` format, or dense array, except
for the integer input with ``normed=True`` that forces the float output.
Sparse input is reformatted into ``coo`` if ``form="array"``,
which is the default.
If the input adjacency matrix is not symmetric, the Laplacian is
also non-symmetric unless ``symmetrized=True`` is used.
Diagonal entries of the input adjacency matrix are ignored and
replaced with zeros for the purpose of normalization where ``normed=True``.
The normalization uses the inverse square roots of row-sums of the input
adjacency matrix, and thus may fail if the row-sums contain
negative or complex with a non-zero imaginary part values.
The normalization is symmetric, making the normalized Laplacian also
symmetric if the input csgraph was symmetric.
References
----------
.. [1] Laplacian matrix. https://en.wikipedia.org/wiki/Laplacian_matrix
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csgraph
Our first illustration is the symmetric graph
>>> G = np.arange(4) * np.arange(4)[:, np.newaxis]
>>> G
array([[0, 0, 0, 0],
[0, 1, 2, 3],
[0, 2, 4, 6],
[0, 3, 6, 9]])
and its symmetric Laplacian matrix
>>> csgraph.laplacian(G)
array([[ 0, 0, 0, 0],
[ 0, 5, -2, -3],
[ 0, -2, 8, -6],
[ 0, -3, -6, 9]])
The non-symmetric graph
>>> G = np.arange(9).reshape(3, 3)
>>> G
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
has different row- and column sums, resulting in two varieties
of the Laplacian matrix, using an in-degree, which is the default
>>> L_in_degree = csgraph.laplacian(G)
>>> L_in_degree
array([[ 9, -1, -2],
[-3, 8, -5],
[-6, -7, 7]])
or alternatively an out-degree
>>> L_out_degree = csgraph.laplacian(G, use_out_degree=True)
>>> L_out_degree
array([[ 3, -1, -2],
[-3, 8, -5],
[-6, -7, 13]])
Constructing a symmetric Laplacian matrix, one can add the two as
>>> L_in_degree + L_out_degree.T
array([[ 12, -4, -8],
[ -4, 16, -12],
[ -8, -12, 20]])
or use the ``symmetrized=True`` option
>>> csgraph.laplacian(G, symmetrized=True)
array([[ 12, -4, -8],
[ -4, 16, -12],
[ -8, -12, 20]])
that is equivalent to symmetrizing the original graph
>>> csgraph.laplacian(G + G.T)
array([[ 12, -4, -8],
[ -4, 16, -12],
[ -8, -12, 20]])
The goal of normalization is to make the non-zero diagonal entries
of the Laplacian matrix to be all unit, also scaling off-diagonal
entries correspondingly. The normalization can be done manually, e.g.,
>>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
>>> L, d = csgraph.laplacian(G, return_diag=True)
>>> L
array([[ 2, -1, -1],
[-1, 2, -1],
[-1, -1, 2]])
>>> d
array([2, 2, 2])
>>> scaling = np.sqrt(d)
>>> scaling
array([1.41421356, 1.41421356, 1.41421356])
>>> (1/scaling)*L*(1/scaling)
array([[ 1. , -0.5, -0.5],
[-0.5, 1. , -0.5],
[-0.5, -0.5, 1. ]])
Or using ``normed=True`` option
>>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
>>> L
array([[ 1. , -0.5, -0.5],
[-0.5, 1. , -0.5],
[-0.5, -0.5, 1. ]])
which now instead of the diagonal returns the scaling coefficients
>>> d
array([1.41421356, 1.41421356, 1.41421356])
Zero scaling coefficients are substituted with 1s, where scaling
has thus no effect, e.g.,
>>> G = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0]])
>>> G
array([[0, 0, 0],
[0, 0, 1],
[0, 1, 0]])
>>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
>>> L
array([[ 0., -0., -0.],
[-0., 1., -1.],
[-0., -1., 1.]])
>>> d
array([1., 1., 1.])
Only the symmetric normalization is implemented, resulting
in a symmetric Laplacian matrix if and only if its graph is symmetric
and has all non-negative degrees, like in the examples above.
The output Laplacian matrix is by default a dense array or a sparse matrix
inferring its shape, format, and dtype from the input graph matrix:
>>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]).astype(np.float32)
>>> G
array([[0., 1., 1.],
[1., 0., 1.],
[1., 1., 0.]], dtype=float32)
>>> csgraph.laplacian(G)
array([[ 2., -1., -1.],
[-1., 2., -1.],
[-1., -1., 2.]], dtype=float32)
but can alternatively be generated matrix-free as a LinearOperator:
>>> L = csgraph.laplacian(G, form="lo")
>>> L
<3x3 _CustomLinearOperator with dtype=float32>
>>> L(np.eye(3))
array([[ 2., -1., -1.],
[-1., 2., -1.],
[-1., -1., 2.]])
or as a lambda-function:
>>> L = csgraph.laplacian(G, form="function")
>>> L
<function _laplace.<locals>.<lambda> at 0x0000012AE6F5A598>
>>> L(np.eye(3))
array([[ 2., -1., -1.],
[-1., 2., -1.],
[-1., -1., 2.]])
The Laplacian matrix is used for
spectral data clustering and embedding
as well as for spectral graph partitioning.
Our final example illustrates the latter
for a noisy directed linear graph.
>>> from scipy.sparse import diags, random
>>> from scipy.sparse.linalg import lobpcg
Create a directed linear graph with ``N=35`` vertices
using a sparse adjacency matrix ``G``:
>>> N = 35
>>> G = diags(np.ones(N-1), 1, format="csr")
Fix a random seed ``rng`` and add a random sparse noise to the graph ``G``:
>>> rng = np.random.default_rng()
>>> G += 1e-2 * random(N, N, density=0.1, random_state=rng)
Set initial approximations for eigenvectors:
>>> X = rng.random((N, 2))
The constant vector of ones is always a trivial eigenvector
of the non-normalized Laplacian to be filtered out:
>>> Y = np.ones((N, 1))
Alternating (1) the sign of the graph weights allows determining
labels for spectral max- and min- cuts in a single loop.
Since the graph is undirected, the option ``symmetrized=True``
must be used in the construction of the Laplacian.
The option ``normed=True`` cannot be used in (2) for the negative weights
here as the symmetric normalization evaluates square roots.
The option ``form="lo"`` in (2) is matrix-free, i.e., guarantees
a fixed memory footprint and read-only access to the graph.
Calling the eigenvalue solver ``lobpcg`` (3) computes the Fiedler vector
that determines the labels as the signs of its components in (5).
Since the sign in an eigenvector is not deterministic and can flip,
we fix the sign of the first component to be always +1 in (4).
>>> for cut in ["max", "min"]:
... G = -G # 1.
... L = csgraph.laplacian(G, symmetrized=True, form="lo") # 2.
... _, eves = lobpcg(L, X, Y=Y, largest=False, tol=1e-3) # 3.
... eves *= np.sign(eves[0, 0]) # 4.
... print(cut + "-cut labels:\\n", 1 * (eves[:, 0]>0)) # 5.
max-cut labels:
[1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1]
min-cut labels:
[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
As anticipated for a (slightly noisy) linear graph,
the max-cut strips all the edges of the graph coloring all
odd vertices into one color and all even vertices into another one,
while the balanced min-cut partitions the graph
in the middle by deleting a single edge.
Both determined partitions are optimal.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError("csgraph must be a square matrix or array")
if normed and (
np.issubdtype(csgraph.dtype, np.signedinteger)
or np.issubdtype(csgraph.dtype, np.uint)
):
csgraph = csgraph.astype(np.float64)
if form == "array":
create_lap = _laplacian_sparse if issparse(csgraph) else _laplacian_dense
else:
create_lap = (
_laplacian_sparse_flo if issparse(csgraph) else _laplacian_dense_flo
)
degree_axis = 1 if use_out_degree else 0
lap, d = create_lap(
csgraph,
normed=normed,
axis=degree_axis,
copy=copy,
form=form,
dtype=dtype,
symmetrized=symmetrized,
)
if return_diag:
return lap, d
return lap
def _setdiag_dense(m, d):
step = len(d) + 1
m.flat[::step] = d
def _laplace(m, d):
return lambda v: v * d[:, np.newaxis] - m @ v
def _laplace_normed(m, d, nd):
laplace = _laplace(m, d)
return lambda v: nd[:, np.newaxis] * laplace(v * nd[:, np.newaxis])
def _laplace_sym(m, d):
return (
lambda v: v * d[:, np.newaxis]
- m @ v
- np.transpose(np.conjugate(np.transpose(np.conjugate(v)) @ m))
)
def _laplace_normed_sym(m, d, nd):
laplace_sym = _laplace_sym(m, d)
return lambda v: nd[:, np.newaxis] * laplace_sym(v * nd[:, np.newaxis])
def _linearoperator(mv, shape, dtype):
return LinearOperator(matvec=mv, matmat=mv, shape=shape, dtype=dtype)
def _laplacian_sparse_flo(graph, normed, axis, copy, form, dtype, symmetrized):
# The keyword argument `copy` is unused and has no effect here.
del copy
if dtype is None:
dtype = graph.dtype
graph_sum = np.asarray(graph.sum(axis=axis)).ravel()
graph_diagonal = graph.diagonal()
diag = graph_sum - graph_diagonal
if symmetrized:
graph_sum += np.asarray(graph.sum(axis=1 - axis)).ravel()
diag = graph_sum - graph_diagonal - graph_diagonal
if normed:
isolated_node_mask = diag == 0
w = np.where(isolated_node_mask, 1, np.sqrt(diag))
if symmetrized:
md = _laplace_normed_sym(graph, graph_sum, 1.0 / w)
else:
md = _laplace_normed(graph, graph_sum, 1.0 / w)
if form == "function":
return md, w.astype(dtype, copy=False)
elif form == "lo":
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
return m, w.astype(dtype, copy=False)
else:
raise ValueError(f"Invalid form: {form!r}")
else:
if symmetrized:
md = _laplace_sym(graph, graph_sum)
else:
md = _laplace(graph, graph_sum)
if form == "function":
return md, diag.astype(dtype, copy=False)
elif form == "lo":
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
return m, diag.astype(dtype, copy=False)
else:
raise ValueError(f"Invalid form: {form!r}")
def _laplacian_sparse(graph, normed, axis, copy, form, dtype, symmetrized):
# The keyword argument `form` is unused and has no effect here.
del form
if dtype is None:
dtype = graph.dtype
needs_copy = False
if graph.format in ("lil", "dok"):
m = graph.tocoo()
else:
m = graph
if copy:
needs_copy = True
if symmetrized:
m += m.T.conj()
w = np.asarray(m.sum(axis=axis)).ravel() - m.diagonal()
if normed:
m = m.tocoo(copy=needs_copy)
isolated_node_mask = w == 0
w = np.where(isolated_node_mask, 1, np.sqrt(w))
m.data /= w[m.row]
m.data /= w[m.col]
m.data *= -1
m.setdiag(1 - isolated_node_mask)
else:
if m.format == "dia":
m = m.copy()
else:
m = m.tocoo(copy=needs_copy)
m.data *= -1
m.setdiag(w)
return m.astype(dtype, copy=False), w.astype(dtype)
def _laplacian_dense_flo(graph, normed, axis, copy, form, dtype, symmetrized):
if copy:
m = np.array(graph)
else:
m = np.asarray(graph)
if dtype is None:
dtype = m.dtype
graph_sum = m.sum(axis=axis)
graph_diagonal = m.diagonal()
diag = graph_sum - graph_diagonal
if symmetrized:
graph_sum += m.sum(axis=1 - axis)
diag = graph_sum - graph_diagonal - graph_diagonal
if normed:
isolated_node_mask = diag == 0
w = np.where(isolated_node_mask, 1, np.sqrt(diag))
if symmetrized:
md = _laplace_normed_sym(m, graph_sum, 1.0 / w)
else:
md = _laplace_normed(m, graph_sum, 1.0 / w)
if form == "function":
return md, w.astype(dtype, copy=False)
elif form == "lo":
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
return m, w.astype(dtype, copy=False)
else:
raise ValueError(f"Invalid form: {form!r}")
else:
if symmetrized:
md = _laplace_sym(m, graph_sum)
else:
md = _laplace(m, graph_sum)
if form == "function":
return md, diag.astype(dtype, copy=False)
elif form == "lo":
m = _linearoperator(md, shape=graph.shape, dtype=dtype)
return m, diag.astype(dtype, copy=False)
else:
raise ValueError(f"Invalid form: {form!r}")
def _laplacian_dense(graph, normed, axis, copy, form, dtype, symmetrized):
if form != "array":
raise ValueError(f'{form!r} must be "array"')
if dtype is None:
dtype = graph.dtype
if copy:
m = np.array(graph)
else:
m = np.asarray(graph)
if dtype is None:
dtype = m.dtype
if symmetrized:
m += m.T.conj()
np.fill_diagonal(m, 0)
w = m.sum(axis=axis)
if normed:
isolated_node_mask = w == 0
w = np.where(isolated_node_mask, 1, np.sqrt(w))
m /= w
m /= w[:, np.newaxis]
m *= -1
_setdiag_dense(m, 1 - isolated_node_mask)
else:
m *= -1
_setdiag_dense(m, w)
return m.astype(dtype, copy=False), w.astype(dtype, copy=False)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/_scipy/sparse/csgraph/__init__.py | sklearn/externals/_scipy/sparse/csgraph/__init__.py | from ._laplacian import laplacian
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/_packaging/_structures.py | sklearn/externals/_packaging/_structures.py | """Vendoered from
https://github.com/pypa/packaging/blob/main/packaging/_structures.py
"""
# Copyright (c) Donald Stufft and individual contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class InfinityType:
def __repr__(self) -> str:
return "Infinity"
def __hash__(self) -> int:
return hash(repr(self))
def __lt__(self, other: object) -> bool:
return False
def __le__(self, other: object) -> bool:
return False
def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__)
def __ne__(self, other: object) -> bool:
return not isinstance(other, self.__class__)
def __gt__(self, other: object) -> bool:
return True
def __ge__(self, other: object) -> bool:
return True
def __neg__(self: object) -> "NegativeInfinityType":
return NegativeInfinity
Infinity = InfinityType()
class NegativeInfinityType:
def __repr__(self) -> str:
return "-Infinity"
def __hash__(self) -> int:
return hash(repr(self))
def __lt__(self, other: object) -> bool:
return True
def __le__(self, other: object) -> bool:
return True
def __eq__(self, other: object) -> bool:
return isinstance(other, self.__class__)
def __ne__(self, other: object) -> bool:
return not isinstance(other, self.__class__)
def __gt__(self, other: object) -> bool:
return False
def __ge__(self, other: object) -> bool:
return False
def __neg__(self: object) -> InfinityType:
return Infinity
NegativeInfinity = NegativeInfinityType()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/_packaging/version.py | sklearn/externals/_packaging/version.py | """Vendored from
https://github.com/pypa/packaging/blob/main/packaging/version.py
"""
# Copyright (c) Donald Stufft and individual contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
import itertools
import re
import warnings
from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
InfiniteTypes = Union[InfinityType, NegativeInfinityType]
PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
SubLocalType = Union[InfiniteTypes, int, str]
LocalType = Union[
NegativeInfinityType,
Tuple[
Union[
SubLocalType,
Tuple[SubLocalType, str],
Tuple[NegativeInfinityType, SubLocalType],
],
...,
],
]
CmpKey = Tuple[
int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
]
LegacyCmpKey = Tuple[int, Tuple[str, ...]]
VersionComparisonMethod = Callable[
[Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
]
_Version = collections.namedtuple(
"_Version", ["epoch", "release", "dev", "pre", "post", "local"]
)
def parse(version: str) -> Union["LegacyVersion", "Version"]:
"""Parse the given version from a string to an appropriate class.
Parameters
----------
version : str
Version in a string format, eg. "0.9.1" or "1.2.dev0".
Returns
-------
version : :class:`Version` object or a :class:`LegacyVersion` object
Returned class depends on the given version: if is a valid
PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion:
_key: Union[CmpKey, LegacyCmpKey]
def __hash__(self) -> int:
return hash(self._key)
# Please keep the duplicated `isinstance` check
# in the six comparisons hereunder
# unless you find a way to avoid adding overhead function calls.
def __lt__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key < other._key
def __le__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key <= other._key
def __eq__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key == other._key
def __ge__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key >= other._key
def __gt__(self, other: "_BaseVersion") -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key > other._key
def __ne__(self, other: object) -> bool:
if not isinstance(other, _BaseVersion):
return NotImplemented
return self._key != other._key
class LegacyVersion(_BaseVersion):
def __init__(self, version: str) -> None:
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
warnings.warn(
"Creating a LegacyVersion has been deprecated and will be "
"removed in the next major release",
DeprecationWarning,
)
def __str__(self) -> str:
return self._version
def __repr__(self) -> str:
return f"<LegacyVersion('{self}')>"
@property
def public(self) -> str:
return self._version
@property
def base_version(self) -> str:
return self._version
@property
def epoch(self) -> int:
return -1
@property
def release(self) -> None:
return None
@property
def pre(self) -> None:
return None
@property
def post(self) -> None:
return None
@property
def dev(self) -> None:
return None
@property
def local(self) -> None:
return None
@property
def is_prerelease(self) -> bool:
return False
@property
def is_postrelease(self) -> bool:
return False
@property
def is_devrelease(self) -> bool:
return False
_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
_legacy_version_replacement_map = {
"pre": "c",
"preview": "c",
"-": "final-",
"rc": "c",
"dev": "@",
}
def _parse_version_parts(s: str) -> Iterator[str]:
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version: str) -> LegacyCmpKey:
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts: List[str] = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
return epoch, tuple(parts)
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
def __init__(self, version: str) -> None:
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion(f"Invalid version: '{version}'")
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self) -> str:
return f"<Version('{self}')>"
def __str__(self) -> str:
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(f".post{self.post}")
# Development release
if self.dev is not None:
parts.append(f".dev{self.dev}")
# Local version segment
if self.local is not None:
parts.append(f"+{self.local}")
return "".join(parts)
@property
def epoch(self) -> int:
_epoch: int = self._version.epoch
return _epoch
@property
def release(self) -> Tuple[int, ...]:
_release: Tuple[int, ...] = self._version.release
return _release
@property
def pre(self) -> Optional[Tuple[str, int]]:
_pre: Optional[Tuple[str, int]] = self._version.pre
return _pre
@property
def post(self) -> Optional[int]:
return self._version.post[1] if self._version.post else None
@property
def dev(self) -> Optional[int]:
return self._version.dev[1] if self._version.dev else None
@property
def local(self) -> Optional[str]:
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
@property
def public(self) -> str:
return str(self).split("+", 1)[0]
@property
def base_version(self) -> str:
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
@property
def is_prerelease(self) -> bool:
return self.dev is not None or self.pre is not None
@property
def is_postrelease(self) -> bool:
return self.post is not None
@property
def is_devrelease(self) -> bool:
return self.dev is not None
@property
def major(self) -> int:
return self.release[0] if len(self.release) >= 1 else 0
@property
def minor(self) -> int:
return self.release[1] if len(self.release) >= 2 else 0
@property
def micro(self) -> int:
return self.release[2] if len(self.release) >= 3 else 0
def _parse_letter_version(
letter: str, number: Union[str, bytes, SupportsInt]
) -> Optional[Tuple[str, int]]:
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
return None
_local_version_separators = re.compile(r"[\._-]")
def _parse_local_version(local: str) -> Optional[LocalType]:
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_separators.split(local)
)
return None
def _cmpkey(
epoch: int,
release: Tuple[int, ...],
pre: Optional[Tuple[str, int]],
post: Optional[Tuple[str, int]],
dev: Optional[Tuple[str, int]],
local: Optional[Tuple[SubLocalType]],
) -> CmpKey:
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
_release = tuple(
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
_pre: PrePostDevType = NegativeInfinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
_pre = Infinity
else:
_pre = pre
# Versions without a post segment should sort before those with one.
if post is None:
_post: PrePostDevType = NegativeInfinity
else:
_post = post
# Versions without a development segment should sort after those with one.
if dev is None:
_dev: PrePostDevType = Infinity
else:
_dev = dev
if local is None:
# Versions without a local segment should sort before those with one.
_local: LocalType = NegativeInfinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
_local = tuple(
(i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
)
return epoch, _release, _pre, _post, _dev, _local
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/_packaging/__init__.py | sklearn/externals/_packaging/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/_internal.py | sklearn/externals/array_api_compat/_internal.py | """
Internal helpers
"""
from collections.abc import Callable
from functools import wraps
from inspect import signature
from types import ModuleType
from typing import TypeVar
_T = TypeVar("_T")
def get_xp(xp: ModuleType) -> Callable[[Callable[..., _T]], Callable[..., _T]]:
"""
Decorator to automatically replace xp with the corresponding array module.
Use like
import numpy as np
@get_xp(np)
def func(x, /, xp, kwarg=None):
return xp.func(x, kwarg=kwarg)
Note that xp must be a keyword argument and come after all non-keyword
arguments.
"""
def inner(f: Callable[..., _T], /) -> Callable[..., _T]:
@wraps(f)
def wrapped_f(*args: object, **kwargs: object) -> object:
return f(*args, xp=xp, **kwargs)
sig = signature(f)
new_sig = sig.replace(
parameters=[par for i, par in sig.parameters.items() if i != "xp"]
)
if wrapped_f.__doc__ is None:
wrapped_f.__doc__ = f"""\
Array API compatibility wrapper for {f.__name__}.
See the corresponding documentation in NumPy/CuPy and/or the array API
specification for more details.
"""
wrapped_f.__signature__ = new_sig # pyright: ignore[reportAttributeAccessIssue]
return wrapped_f # pyright: ignore[reportReturnType]
return inner
__all__ = ["get_xp"]
def __dir__() -> list[str]:
return __all__
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/__init__.py | sklearn/externals/array_api_compat/__init__.py | """
NumPy Array API compatibility library
This is a small wrapper around NumPy, CuPy, JAX, sparse and others that are
compatible with the Array API standard https://data-apis.org/array-api/latest/.
See also NEP 47 https://numpy.org/neps/nep-0047-array-api-standard.html.
Unlike array_api_strict, this is not a strict minimal implementation of the
Array API, but rather just an extension of the main NumPy namespace with
changes needed to be compliant with the Array API. See
https://numpy.org/doc/stable/reference/array_api.html for a full list of
changes. In particular, unlike array_api_strict, this package does not use a
separate Array object, but rather just uses numpy.ndarray directly.
Library authors using the Array API may wish to test against array_api_strict
to ensure they are not using functionality outside of the standard, but prefer
this implementation for the default when working with NumPy arrays.
"""
__version__ = '1.12.0'
from .common import * # noqa: F401, F403
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/dask/__init__.py | sklearn/externals/array_api_compat/dask/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/dask/array/_aliases.py | sklearn/externals/array_api_compat/dask/array/_aliases.py | # pyright: reportPrivateUsage=false
# pyright: reportUnknownArgumentType=false
# pyright: reportUnknownMemberType=false
# pyright: reportUnknownVariableType=false
from __future__ import annotations
from builtins import bool as py_bool
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from typing_extensions import TypeIs
import dask.array as da
import numpy as np
from numpy import bool_ as bool
from numpy import (
can_cast,
complex64,
complex128,
float32,
float64,
int8,
int16,
int32,
int64,
result_type,
uint8,
uint16,
uint32,
uint64,
)
from ..._internal import get_xp
from ...common import _aliases, _helpers, array_namespace
from ...common._typing import (
Array,
Device,
DType,
NestedSequence,
SupportsBufferProtocol,
)
from ._info import __array_namespace_info__
isdtype = get_xp(np)(_aliases.isdtype)
unstack = get_xp(da)(_aliases.unstack)
# da.astype doesn't respect copy=True
def astype(
x: Array,
dtype: DType,
/,
*,
copy: py_bool = True,
device: Device | None = None,
) -> Array:
"""
Array API compatibility wrapper for astype().
See the corresponding documentation in the array library and/or the array API
specification for more details.
"""
# TODO: respect device keyword?
_helpers._check_device(da, device)
if not copy and dtype == x.dtype:
return x
x = x.astype(dtype)
return x.copy() if copy else x
# Common aliases
# This arange func is modified from the common one to
# not pass stop/step as keyword arguments, which will cause
# an error with dask
def arange(
start: float,
/,
stop: float | None = None,
step: float = 1,
*,
dtype: DType | None = None,
device: Device | None = None,
**kwargs: object,
) -> Array:
"""
Array API compatibility wrapper for arange().
See the corresponding documentation in the array library and/or the array API
specification for more details.
"""
# TODO: respect device keyword?
_helpers._check_device(da, device)
args: list[Any] = [start]
if stop is not None:
args.append(stop)
else:
# stop is None, so start is actually stop
# prepend the default value for start which is 0
args.insert(0, 0)
args.append(step)
return da.arange(*args, dtype=dtype, **kwargs)
eye = get_xp(da)(_aliases.eye)
linspace = get_xp(da)(_aliases.linspace)
UniqueAllResult = get_xp(da)(_aliases.UniqueAllResult)
UniqueCountsResult = get_xp(da)(_aliases.UniqueCountsResult)
UniqueInverseResult = get_xp(da)(_aliases.UniqueInverseResult)
unique_all = get_xp(da)(_aliases.unique_all)
unique_counts = get_xp(da)(_aliases.unique_counts)
unique_inverse = get_xp(da)(_aliases.unique_inverse)
unique_values = get_xp(da)(_aliases.unique_values)
permute_dims = get_xp(da)(_aliases.permute_dims)
std = get_xp(da)(_aliases.std)
var = get_xp(da)(_aliases.var)
cumulative_sum = get_xp(da)(_aliases.cumulative_sum)
cumulative_prod = get_xp(da)(_aliases.cumulative_prod)
empty = get_xp(da)(_aliases.empty)
empty_like = get_xp(da)(_aliases.empty_like)
full = get_xp(da)(_aliases.full)
full_like = get_xp(da)(_aliases.full_like)
ones = get_xp(da)(_aliases.ones)
ones_like = get_xp(da)(_aliases.ones_like)
zeros = get_xp(da)(_aliases.zeros)
zeros_like = get_xp(da)(_aliases.zeros_like)
reshape = get_xp(da)(_aliases.reshape)
matrix_transpose = get_xp(da)(_aliases.matrix_transpose)
vecdot = get_xp(da)(_aliases.vecdot)
nonzero = get_xp(da)(_aliases.nonzero)
ceil = get_xp(np)(_aliases.ceil)
floor = get_xp(np)(_aliases.floor)
trunc = get_xp(np)(_aliases.trunc)
matmul = get_xp(np)(_aliases.matmul)
tensordot = get_xp(np)(_aliases.tensordot)
sign = get_xp(np)(_aliases.sign)
finfo = get_xp(np)(_aliases.finfo)
iinfo = get_xp(np)(_aliases.iinfo)
# asarray also adds the copy keyword, which is not present in numpy 1.0.
def asarray(
obj: complex | NestedSequence[complex] | Array | SupportsBufferProtocol,
/,
*,
dtype: DType | None = None,
device: Device | None = None,
copy: py_bool | None = None,
**kwargs: object,
) -> Array:
"""
Array API compatibility wrapper for asarray().
See the corresponding documentation in the array library and/or the array API
specification for more details.
"""
# TODO: respect device keyword?
_helpers._check_device(da, device)
if isinstance(obj, da.Array):
if dtype is not None and dtype != obj.dtype:
if copy is False:
raise ValueError("Unable to avoid copy when changing dtype")
obj = obj.astype(dtype)
return obj.copy() if copy else obj # pyright: ignore[reportAttributeAccessIssue]
if copy is False:
raise ValueError(
"Unable to avoid copy when converting a non-dask object to dask"
)
# copy=None to be uniform across dask < 2024.12 and >= 2024.12
# see https://github.com/dask/dask/pull/11524/
obj = np.array(obj, dtype=dtype, copy=True)
return da.from_array(obj)
# Element wise aliases
from dask.array import arccos as acos
from dask.array import arccosh as acosh
from dask.array import arcsin as asin
from dask.array import arcsinh as asinh
from dask.array import arctan as atan
from dask.array import arctan2 as atan2
from dask.array import arctanh as atanh
# Other
from dask.array import concatenate as concat
from dask.array import invert as bitwise_invert
from dask.array import left_shift as bitwise_left_shift
from dask.array import power as pow
from dask.array import right_shift as bitwise_right_shift
# dask.array.clip does not work unless all three arguments are provided.
# Furthermore, the masking workaround in common._aliases.clip cannot work with
# dask (meaning uint64 promoting to float64 is going to just be unfixed for
# now).
def clip(
x: Array,
/,
min: float | Array | None = None,
max: float | Array | None = None,
) -> Array:
"""
Array API compatibility wrapper for clip().
See the corresponding documentation in the array library and/or the array API
specification for more details.
"""
def _isscalar(a: float | Array | None, /) -> TypeIs[float | None]:
return a is None or isinstance(a, (int, float))
min_shape = () if _isscalar(min) else min.shape
max_shape = () if _isscalar(max) else max.shape
# TODO: This won't handle dask unknown shapes
result_shape = np.broadcast_shapes(x.shape, min_shape, max_shape)
if min is not None:
min = da.broadcast_to(da.asarray(min), result_shape)
if max is not None:
max = da.broadcast_to(da.asarray(max), result_shape)
if min is None and max is None:
return da.positive(x)
if min is None:
return astype(da.minimum(x, max), x.dtype)
if max is None:
return astype(da.maximum(x, min), x.dtype)
return astype(da.minimum(da.maximum(x, min), max), x.dtype)
def _ensure_single_chunk(x: Array, axis: int) -> tuple[Array, Callable[[Array], Array]]:
"""
Make sure that Array is not broken into multiple chunks along axis.
Returns
-------
x : Array
The input Array with a single chunk along axis.
restore : Callable[Array, Array]
function to apply to the output to rechunk it back into reasonable chunks
"""
if axis < 0:
axis += x.ndim
if x.numblocks[axis] < 2:
return x, lambda x: x
# Break chunks on other axes in an attempt to keep chunk size low
x = x.rechunk({i: -1 if i == axis else "auto" for i in range(x.ndim)})
# Rather than reconstructing the original chunks, which can be a
# very expensive affair, just break down oversized chunks without
# incurring in any transfers over the network.
# This has the downside of a risk of overchunking if the array is
# then used in operations against other arrays that match the
# original chunking pattern.
return x, lambda x: x.rechunk()
def sort(
x: Array,
/,
*,
axis: int = -1,
descending: py_bool = False,
stable: py_bool = True,
) -> Array:
"""
Array API compatibility layer around the lack of sort() in Dask.
Warnings
--------
This function temporarily rechunks the array along `axis` to a single chunk.
This can be extremely inefficient and can lead to out-of-memory errors.
See the corresponding documentation in the array library and/or the array API
specification for more details.
"""
x, restore = _ensure_single_chunk(x, axis)
meta_xp = array_namespace(x._meta)
x = da.map_blocks(
meta_xp.sort,
x,
axis=axis,
meta=x._meta,
dtype=x.dtype,
descending=descending,
stable=stable,
)
return restore(x)
def argsort(
x: Array,
/,
*,
axis: int = -1,
descending: py_bool = False,
stable: py_bool = True,
) -> Array:
"""
Array API compatibility layer around the lack of argsort() in Dask.
See the corresponding documentation in the array library and/or the array API
specification for more details.
Warnings
--------
This function temporarily rechunks the array along `axis` into a single chunk.
This can be extremely inefficient and can lead to out-of-memory errors.
"""
x, restore = _ensure_single_chunk(x, axis)
meta_xp = array_namespace(x._meta)
dtype = meta_xp.argsort(x._meta).dtype
meta = meta_xp.astype(x._meta, dtype)
x = da.map_blocks(
meta_xp.argsort,
x,
axis=axis,
meta=meta,
dtype=dtype,
descending=descending,
stable=stable,
)
return restore(x)
# dask.array.count_nonzero does not have keepdims
def count_nonzero(
x: Array,
axis: int | None = None,
keepdims: py_bool = False,
) -> Array:
result = da.count_nonzero(x, axis)
if keepdims:
if axis is None:
return da.reshape(result, [1] * x.ndim)
return da.expand_dims(result, axis)
return result
__all__ = [
"__array_namespace_info__",
"count_nonzero",
"bool",
"int8", "int16", "int32", "int64",
"uint8", "uint16", "uint32", "uint64",
"float32", "float64",
"complex64", "complex128",
"asarray", "astype", "can_cast", "result_type",
"pow",
"concat",
"acos", "acosh", "asin", "asinh", "atan", "atan2", "atanh",
"bitwise_left_shift", "bitwise_right_shift", "bitwise_invert",
] # fmt: skip
__all__ += _aliases.__all__
_all_ignore = ["array_namespace", "get_xp", "da", "np"]
def __dir__() -> list[str]:
return __all__
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/dask/array/linalg.py | sklearn/externals/array_api_compat/dask/array/linalg.py | from __future__ import annotations
from typing import Literal
import dask.array as da
# The `matmul` and `tensordot` functions are in both the main and linalg namespaces
from dask.array import matmul, outer, tensordot
# Exports
from dask.array.linalg import * # noqa: F403
from ..._internal import get_xp
from ...common import _linalg
from ...common._typing import Array as _Array
from ._aliases import matrix_transpose, vecdot
# dask.array.linalg doesn't have __all__. If it is added, replace this with
#
# from dask.array.linalg import __all__ as linalg_all
_n = {}
exec('from dask.array.linalg import *', _n)
for k in ('__builtins__', 'annotations', 'operator', 'warnings', 'Array'):
_n.pop(k, None)
linalg_all = list(_n)
del _n, k
EighResult = _linalg.EighResult
QRResult = _linalg.QRResult
SlogdetResult = _linalg.SlogdetResult
SVDResult = _linalg.SVDResult
# TODO: use the QR wrapper once dask
# supports the mode keyword on QR
# https://github.com/dask/dask/issues/10388
#qr = get_xp(da)(_linalg.qr)
def qr(
x: _Array,
mode: Literal["reduced", "complete"] = "reduced",
**kwargs: object,
) -> QRResult:
if mode != "reduced":
raise ValueError("dask arrays only support using mode='reduced'")
return QRResult(*da.linalg.qr(x, **kwargs))
trace = get_xp(da)(_linalg.trace)
cholesky = get_xp(da)(_linalg.cholesky)
matrix_rank = get_xp(da)(_linalg.matrix_rank)
matrix_norm = get_xp(da)(_linalg.matrix_norm)
# Wrap the svd functions to not pass full_matrices to dask
# when full_matrices=False (as that is the default behavior for dask),
# and dask doesn't have the full_matrices keyword
def svd(x: _Array, full_matrices: bool = True, **kwargs) -> SVDResult:
if full_matrices:
raise ValueError("full_matrics=True is not supported by dask.")
return da.linalg.svd(x, coerce_signs=False, **kwargs)
def svdvals(x: _Array) -> _Array:
# TODO: can't avoid computing U or V for dask
_, s, _ = svd(x)
return s
vector_norm = get_xp(da)(_linalg.vector_norm)
diagonal = get_xp(da)(_linalg.diagonal)
__all__ = linalg_all + ["trace", "outer", "matmul", "tensordot",
"matrix_transpose", "vecdot", "EighResult",
"QRResult", "SlogdetResult", "SVDResult", "qr",
"cholesky", "matrix_rank", "matrix_norm", "svdvals",
"vector_norm", "diagonal"]
_all_ignore = ['get_xp', 'da', 'linalg_all', 'warnings']
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/dask/array/__init__.py | sklearn/externals/array_api_compat/dask/array/__init__.py | from typing import Final
from dask.array import * # noqa: F403
# These imports may overwrite names from the import * above.
from ._aliases import * # noqa: F403
__array_api_version__: Final = "2024.12"
# See the comment in the numpy __init__.py
__import__(__package__ + '.linalg')
__import__(__package__ + '.fft')
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/dask/array/_info.py | sklearn/externals/array_api_compat/dask/array/_info.py | """
Array API Inspection namespace
This is the namespace for inspection functions as defined by the array API
standard. See
https://data-apis.org/array-api/latest/API_specification/inspection.html for
more details.
"""
# pyright: reportPrivateUsage=false
from __future__ import annotations
from typing import Literal as L
from typing import TypeAlias, overload
from numpy import bool_ as bool
from numpy import (
complex64,
complex128,
dtype,
float32,
float64,
int8,
int16,
int32,
int64,
intp,
uint8,
uint16,
uint32,
uint64,
)
from ...common._helpers import _DASK_DEVICE, _dask_device
from ...common._typing import (
Capabilities,
DefaultDTypes,
DType,
DTypeKind,
DTypesAll,
DTypesAny,
DTypesBool,
DTypesComplex,
DTypesIntegral,
DTypesNumeric,
DTypesReal,
DTypesSigned,
DTypesUnsigned,
)
_Device: TypeAlias = L["cpu"] | _dask_device
class __array_namespace_info__:
"""
Get the array API inspection namespace for Dask.
The array API inspection namespace defines the following functions:
- capabilities()
- default_device()
- default_dtypes()
- dtypes()
- devices()
See
https://data-apis.org/array-api/latest/API_specification/inspection.html
for more details.
Returns
-------
info : ModuleType
The array API inspection namespace for Dask.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': dask.float64,
'complex floating': dask.complex128,
'integral': dask.int64,
'indexing': dask.int64}
"""
__module__ = "dask.array"
def capabilities(self) -> Capabilities:
"""
Return a dictionary of array API library capabilities.
The resulting dictionary has the following keys:
- **"boolean indexing"**: boolean indicating whether an array library
supports boolean indexing.
Dask support boolean indexing as long as both the index
and the indexed arrays have known shapes.
Note however that the output .shape and .size properties
will contain a non-compliant math.nan instead of None.
- **"data-dependent shapes"**: boolean indicating whether an array
library supports data-dependent output shapes.
Dask implements unique_values et.al.
Note however that the output .shape and .size properties
will contain a non-compliant math.nan instead of None.
- **"max dimensions"**: integer indicating the maximum number of
dimensions supported by the array library.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
for more details.
See Also
--------
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
capabilities : dict
A dictionary of array API library capabilities.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.capabilities()
{'boolean indexing': True,
'data-dependent shapes': True,
'max dimensions': 64}
"""
return {
"boolean indexing": True,
"data-dependent shapes": True,
"max dimensions": 64,
}
def default_device(self) -> L["cpu"]:
"""
The default device used for new Dask arrays.
For Dask, this always returns ``'cpu'``.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
device : Device
The default device used for new Dask arrays.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_device()
'cpu'
"""
return "cpu"
def default_dtypes(self, /, *, device: _Device | None = None) -> DefaultDTypes:
"""
The default data types used for new Dask arrays.
For Dask, this always returns the following dictionary:
- **"real floating"**: ``numpy.float64``
- **"complex floating"**: ``numpy.complex128``
- **"integral"**: ``numpy.intp``
- **"indexing"**: ``numpy.intp``
Parameters
----------
device : str, optional
The device to get the default data types for.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new Dask
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': dask.float64,
'complex floating': dask.complex128,
'integral': dask.int64,
'indexing': dask.int64}
"""
if device not in ["cpu", _DASK_DEVICE, None]:
raise ValueError(
f'Device not understood. Only "cpu" or _DASK_DEVICE is allowed, '
f"but received: {device!r}"
)
return {
"real floating": dtype(float64),
"complex floating": dtype(complex128),
"integral": dtype(intp),
"indexing": dtype(intp),
}
@overload
def dtypes(
self, /, *, device: _Device | None = None, kind: None = None
) -> DTypesAll: ...
@overload
def dtypes(
self, /, *, device: _Device | None = None, kind: L["bool"]
) -> DTypesBool: ...
@overload
def dtypes(
self, /, *, device: _Device | None = None, kind: L["signed integer"]
) -> DTypesSigned: ...
@overload
def dtypes(
self, /, *, device: _Device | None = None, kind: L["unsigned integer"]
) -> DTypesUnsigned: ...
@overload
def dtypes(
self, /, *, device: _Device | None = None, kind: L["integral"]
) -> DTypesIntegral: ...
@overload
def dtypes(
self, /, *, device: _Device | None = None, kind: L["real floating"]
) -> DTypesReal: ...
@overload
def dtypes(
self, /, *, device: _Device | None = None, kind: L["complex floating"]
) -> DTypesComplex: ...
@overload
def dtypes(
self, /, *, device: _Device | None = None, kind: L["numeric"]
) -> DTypesNumeric: ...
def dtypes(
self, /, *, device: _Device | None = None, kind: DTypeKind | None = None
) -> DTypesAny:
"""
The array API data types supported by Dask.
Note that this function only returns data types that are defined by
the array API.
Parameters
----------
device : str, optional
The device to get the data types for.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
If a tuple, a dictionary containing the union of the given kinds
is returned. The following kinds are supported:
- ``'bool'``: boolean data types (i.e., ``bool``).
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
``int16``, ``int32``, ``int64``).
- ``'unsigned integer'``: unsigned integer data types (i.e.,
``uint8``, ``uint16``, ``uint32``, ``uint64``).
- ``'integral'``: integer data types. Shorthand for ``('signed
integer', 'unsigned integer')``.
- ``'real floating'``: real-valued floating-point data types
(i.e., ``float32``, ``float64``).
- ``'complex floating'``: complex floating-point data types (i.e.,
``complex64``, ``complex128``).
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
'real floating', 'complex floating')``.
Returns
-------
dtypes : dict
A dictionary mapping the names of data types to the corresponding
Dask data types.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.dtypes(kind='signed integer')
{'int8': dask.int8,
'int16': dask.int16,
'int32': dask.int32,
'int64': dask.int64}
"""
if device not in ["cpu", _DASK_DEVICE, None]:
raise ValueError(
'Device not understood. Only "cpu" or _DASK_DEVICE is allowed, but received:'
f" {device}"
)
if kind is None:
return {
"bool": dtype(bool),
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "bool":
return {"bool": bool}
if kind == "signed integer":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
}
if kind == "unsigned integer":
return {
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "integral":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "real floating":
return {
"float32": dtype(float32),
"float64": dtype(float64),
}
if kind == "complex floating":
return {
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "numeric":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if isinstance(kind, tuple): # type: ignore[reportUnnecessaryIsinstanceCall]
res: dict[str, DType] = {}
for k in kind:
res.update(self.dtypes(kind=k))
return res
raise ValueError(f"unsupported kind: {kind!r}")
def devices(self) -> list[_Device]:
"""
The devices supported by Dask.
For Dask, this always returns ``['cpu', DASK_DEVICE]``.
Returns
-------
devices : list[Device]
The devices supported by Dask.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.devices()
['cpu', DASK_DEVICE]
"""
return ["cpu", _DASK_DEVICE]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/dask/array/fft.py | sklearn/externals/array_api_compat/dask/array/fft.py | from dask.array.fft import * # noqa: F403
# dask.array.fft doesn't have __all__. If it is added, replace this with
#
# from dask.array.fft import __all__ as linalg_all
_n = {}
exec('from dask.array.fft import *', _n)
for k in ("__builtins__", "Sequence", "annotations", "warnings"):
_n.pop(k, None)
fft_all = list(_n)
del _n, k
from ...common import _fft
from ..._internal import get_xp
import dask.array as da
fftfreq = get_xp(da)(_fft.fftfreq)
rfftfreq = get_xp(da)(_fft.rfftfreq)
__all__ = fft_all + ["fftfreq", "rfftfreq"]
_all_ignore = ["da", "fft_all", "get_xp", "warnings"]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/common/_typing.py | sklearn/externals/array_api_compat/common/_typing.py | from __future__ import annotations
from collections.abc import Mapping
from types import ModuleType as Namespace
from typing import (
TYPE_CHECKING,
Literal,
Protocol,
TypeAlias,
TypedDict,
TypeVar,
final,
)
if TYPE_CHECKING:
from _typeshed import Incomplete
SupportsBufferProtocol: TypeAlias = Incomplete
Array: TypeAlias = Incomplete
Device: TypeAlias = Incomplete
DType: TypeAlias = Incomplete
else:
SupportsBufferProtocol = object
Array = object
Device = object
DType = object
_T_co = TypeVar("_T_co", covariant=True)
# These "Just" types are equivalent to the `Just` type from the `optype` library,
# apart from them not being `@runtime_checkable`.
# - docs: https://github.com/jorenham/optype/blob/master/README.md#just
# - code: https://github.com/jorenham/optype/blob/master/optype/_core/_just.py
@final
class JustInt(Protocol):
@property
def __class__(self, /) -> type[int]: ...
@__class__.setter
def __class__(self, value: type[int], /) -> None: ... # pyright: ignore[reportIncompatibleMethodOverride]
@final
class JustFloat(Protocol):
@property
def __class__(self, /) -> type[float]: ...
@__class__.setter
def __class__(self, value: type[float], /) -> None: ... # pyright: ignore[reportIncompatibleMethodOverride]
@final
class JustComplex(Protocol):
@property
def __class__(self, /) -> type[complex]: ...
@__class__.setter
def __class__(self, value: type[complex], /) -> None: ... # pyright: ignore[reportIncompatibleMethodOverride]
#
class NestedSequence(Protocol[_T_co]):
def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]: ...
def __len__(self, /) -> int: ...
class SupportsArrayNamespace(Protocol[_T_co]):
def __array_namespace__(self, /, *, api_version: str | None) -> _T_co: ...
class HasShape(Protocol[_T_co]):
@property
def shape(self, /) -> _T_co: ...
# Return type of `__array_namespace_info__.default_dtypes`
Capabilities = TypedDict(
"Capabilities",
{
"boolean indexing": bool,
"data-dependent shapes": bool,
"max dimensions": int,
},
)
# Return type of `__array_namespace_info__.default_dtypes`
DefaultDTypes = TypedDict(
"DefaultDTypes",
{
"real floating": DType,
"complex floating": DType,
"integral": DType,
"indexing": DType,
},
)
_DTypeKind: TypeAlias = Literal[
"bool",
"signed integer",
"unsigned integer",
"integral",
"real floating",
"complex floating",
"numeric",
]
# Type of the `kind` parameter in `__array_namespace_info__.dtypes`
DTypeKind: TypeAlias = _DTypeKind | tuple[_DTypeKind, ...]
# `__array_namespace_info__.dtypes(kind="bool")`
class DTypesBool(TypedDict):
bool: DType
# `__array_namespace_info__.dtypes(kind="signed integer")`
class DTypesSigned(TypedDict):
int8: DType
int16: DType
int32: DType
int64: DType
# `__array_namespace_info__.dtypes(kind="unsigned integer")`
class DTypesUnsigned(TypedDict):
uint8: DType
uint16: DType
uint32: DType
uint64: DType
# `__array_namespace_info__.dtypes(kind="integral")`
class DTypesIntegral(DTypesSigned, DTypesUnsigned):
pass
# `__array_namespace_info__.dtypes(kind="real floating")`
class DTypesReal(TypedDict):
float32: DType
float64: DType
# `__array_namespace_info__.dtypes(kind="complex floating")`
class DTypesComplex(TypedDict):
complex64: DType
complex128: DType
# `__array_namespace_info__.dtypes(kind="numeric")`
class DTypesNumeric(DTypesIntegral, DTypesReal, DTypesComplex):
pass
# `__array_namespace_info__.dtypes(kind=None)` (default)
class DTypesAll(DTypesBool, DTypesNumeric):
pass
# `__array_namespace_info__.dtypes(kind=?)` (fallback)
DTypesAny: TypeAlias = Mapping[str, DType]
__all__ = [
"Array",
"Capabilities",
"DType",
"DTypeKind",
"DTypesAny",
"DTypesAll",
"DTypesBool",
"DTypesNumeric",
"DTypesIntegral",
"DTypesSigned",
"DTypesUnsigned",
"DTypesReal",
"DTypesComplex",
"DefaultDTypes",
"Device",
"HasShape",
"Namespace",
"JustInt",
"JustFloat",
"JustComplex",
"NestedSequence",
"SupportsArrayNamespace",
"SupportsBufferProtocol",
]
def __dir__() -> list[str]:
return __all__
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/common/_fft.py | sklearn/externals/array_api_compat/common/_fft.py | from __future__ import annotations
from collections.abc import Sequence
from typing import Literal, TypeAlias
from ._typing import Array, Device, DType, Namespace
_Norm: TypeAlias = Literal["backward", "ortho", "forward"]
# Note: NumPy fft functions improperly upcast float32 and complex64 to
# complex128, which is why we require wrapping them all here.
def fft(
x: Array,
/,
xp: Namespace,
*,
n: int | None = None,
axis: int = -1,
norm: _Norm = "backward",
) -> Array:
res = xp.fft.fft(x, n=n, axis=axis, norm=norm)
if x.dtype in [xp.float32, xp.complex64]:
return res.astype(xp.complex64)
return res
def ifft(
x: Array,
/,
xp: Namespace,
*,
n: int | None = None,
axis: int = -1,
norm: _Norm = "backward",
) -> Array:
res = xp.fft.ifft(x, n=n, axis=axis, norm=norm)
if x.dtype in [xp.float32, xp.complex64]:
return res.astype(xp.complex64)
return res
def fftn(
x: Array,
/,
xp: Namespace,
*,
s: Sequence[int] | None = None,
axes: Sequence[int] | None = None,
norm: _Norm = "backward",
) -> Array:
res = xp.fft.fftn(x, s=s, axes=axes, norm=norm)
if x.dtype in [xp.float32, xp.complex64]:
return res.astype(xp.complex64)
return res
def ifftn(
x: Array,
/,
xp: Namespace,
*,
s: Sequence[int] | None = None,
axes: Sequence[int] | None = None,
norm: _Norm = "backward",
) -> Array:
res = xp.fft.ifftn(x, s=s, axes=axes, norm=norm)
if x.dtype in [xp.float32, xp.complex64]:
return res.astype(xp.complex64)
return res
def rfft(
x: Array,
/,
xp: Namespace,
*,
n: int | None = None,
axis: int = -1,
norm: _Norm = "backward",
) -> Array:
res = xp.fft.rfft(x, n=n, axis=axis, norm=norm)
if x.dtype == xp.float32:
return res.astype(xp.complex64)
return res
def irfft(
x: Array,
/,
xp: Namespace,
*,
n: int | None = None,
axis: int = -1,
norm: _Norm = "backward",
) -> Array:
res = xp.fft.irfft(x, n=n, axis=axis, norm=norm)
if x.dtype == xp.complex64:
return res.astype(xp.float32)
return res
def rfftn(
x: Array,
/,
xp: Namespace,
*,
s: Sequence[int] | None = None,
axes: Sequence[int] | None = None,
norm: _Norm = "backward",
) -> Array:
res = xp.fft.rfftn(x, s=s, axes=axes, norm=norm)
if x.dtype == xp.float32:
return res.astype(xp.complex64)
return res
def irfftn(
x: Array,
/,
xp: Namespace,
*,
s: Sequence[int] | None = None,
axes: Sequence[int] | None = None,
norm: _Norm = "backward",
) -> Array:
res = xp.fft.irfftn(x, s=s, axes=axes, norm=norm)
if x.dtype == xp.complex64:
return res.astype(xp.float32)
return res
def hfft(
x: Array,
/,
xp: Namespace,
*,
n: int | None = None,
axis: int = -1,
norm: _Norm = "backward",
) -> Array:
res = xp.fft.hfft(x, n=n, axis=axis, norm=norm)
if x.dtype in [xp.float32, xp.complex64]:
return res.astype(xp.float32)
return res
def ihfft(
x: Array,
/,
xp: Namespace,
*,
n: int | None = None,
axis: int = -1,
norm: _Norm = "backward",
) -> Array:
res = xp.fft.ihfft(x, n=n, axis=axis, norm=norm)
if x.dtype in [xp.float32, xp.complex64]:
return res.astype(xp.complex64)
return res
def fftfreq(
n: int,
/,
xp: Namespace,
*,
d: float = 1.0,
dtype: DType | None = None,
device: Device | None = None,
) -> Array:
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
res = xp.fft.fftfreq(n, d=d)
if dtype is not None:
return res.astype(dtype)
return res
def rfftfreq(
n: int,
/,
xp: Namespace,
*,
d: float = 1.0,
dtype: DType | None = None,
device: Device | None = None,
) -> Array:
if device not in ["cpu", None]:
raise ValueError(f"Unsupported device {device!r}")
res = xp.fft.rfftfreq(n, d=d)
if dtype is not None:
return res.astype(dtype)
return res
def fftshift(
x: Array, /, xp: Namespace, *, axes: int | Sequence[int] | None = None
) -> Array:
return xp.fft.fftshift(x, axes=axes)
def ifftshift(
x: Array, /, xp: Namespace, *, axes: int | Sequence[int] | None = None
) -> Array:
return xp.fft.ifftshift(x, axes=axes)
__all__ = [
"fft",
"ifft",
"fftn",
"ifftn",
"rfft",
"irfft",
"rfftn",
"irfftn",
"hfft",
"ihfft",
"fftfreq",
"rfftfreq",
"fftshift",
"ifftshift",
]
def __dir__() -> list[str]:
return __all__
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/common/_aliases.py | sklearn/externals/array_api_compat/common/_aliases.py | """
These are functions that are just aliases of existing functions in NumPy.
"""
from __future__ import annotations
import inspect
from typing import TYPE_CHECKING, Any, NamedTuple, Optional, Sequence, cast
from ._helpers import _check_device, array_namespace
from ._helpers import device as _get_device
from ._helpers import is_cupy_namespace as _is_cupy_namespace
from ._typing import Array, Device, DType, Namespace
if TYPE_CHECKING:
# TODO: import from typing (requires Python >=3.13)
from typing_extensions import TypeIs
# These functions are modified from the NumPy versions.
# Creation functions add the device keyword (which does nothing for NumPy and Dask)
def arange(
start: float,
/,
stop: float | None = None,
step: float = 1,
*,
xp: Namespace,
dtype: DType | None = None,
device: Device | None = None,
**kwargs: object,
) -> Array:
_check_device(xp, device)
return xp.arange(start, stop=stop, step=step, dtype=dtype, **kwargs)
def empty(
shape: int | tuple[int, ...],
xp: Namespace,
*,
dtype: DType | None = None,
device: Device | None = None,
**kwargs: object,
) -> Array:
_check_device(xp, device)
return xp.empty(shape, dtype=dtype, **kwargs)
def empty_like(
x: Array,
/,
xp: Namespace,
*,
dtype: DType | None = None,
device: Device | None = None,
**kwargs: object,
) -> Array:
_check_device(xp, device)
return xp.empty_like(x, dtype=dtype, **kwargs)
def eye(
n_rows: int,
n_cols: int | None = None,
/,
*,
xp: Namespace,
k: int = 0,
dtype: DType | None = None,
device: Device | None = None,
**kwargs: object,
) -> Array:
_check_device(xp, device)
return xp.eye(n_rows, M=n_cols, k=k, dtype=dtype, **kwargs)
def full(
shape: int | tuple[int, ...],
fill_value: complex,
xp: Namespace,
*,
dtype: DType | None = None,
device: Device | None = None,
**kwargs: object,
) -> Array:
_check_device(xp, device)
return xp.full(shape, fill_value, dtype=dtype, **kwargs)
def full_like(
x: Array,
/,
fill_value: complex,
*,
xp: Namespace,
dtype: DType | None = None,
device: Device | None = None,
**kwargs: object,
) -> Array:
_check_device(xp, device)
return xp.full_like(x, fill_value, dtype=dtype, **kwargs)
def linspace(
start: float,
stop: float,
/,
num: int,
*,
xp: Namespace,
dtype: DType | None = None,
device: Device | None = None,
endpoint: bool = True,
**kwargs: object,
) -> Array:
_check_device(xp, device)
return xp.linspace(start, stop, num, dtype=dtype, endpoint=endpoint, **kwargs)
def ones(
shape: int | tuple[int, ...],
xp: Namespace,
*,
dtype: DType | None = None,
device: Device | None = None,
**kwargs: object,
) -> Array:
_check_device(xp, device)
return xp.ones(shape, dtype=dtype, **kwargs)
def ones_like(
x: Array,
/,
xp: Namespace,
*,
dtype: DType | None = None,
device: Device | None = None,
**kwargs: object,
) -> Array:
_check_device(xp, device)
return xp.ones_like(x, dtype=dtype, **kwargs)
def zeros(
shape: int | tuple[int, ...],
xp: Namespace,
*,
dtype: DType | None = None,
device: Device | None = None,
**kwargs: object,
) -> Array:
_check_device(xp, device)
return xp.zeros(shape, dtype=dtype, **kwargs)
def zeros_like(
x: Array,
/,
xp: Namespace,
*,
dtype: DType | None = None,
device: Device | None = None,
**kwargs: object,
) -> Array:
_check_device(xp, device)
return xp.zeros_like(x, dtype=dtype, **kwargs)
# np.unique() is split into four functions in the array API:
# unique_all, unique_counts, unique_inverse, and unique_values (this is done
# to remove polymorphic return types).
# The functions here return namedtuples (np.unique() returns a normal
# tuple).
# Note that these named tuples aren't actually part of the standard namespace,
# but I don't see any issue with exporting the names here regardless.
class UniqueAllResult(NamedTuple):
values: Array
indices: Array
inverse_indices: Array
counts: Array
class UniqueCountsResult(NamedTuple):
values: Array
counts: Array
class UniqueInverseResult(NamedTuple):
values: Array
inverse_indices: Array
def _unique_kwargs(xp: Namespace) -> dict[str, bool]:
# Older versions of NumPy and CuPy do not have equal_nan. Rather than
# trying to parse version numbers, just check if equal_nan is in the
# signature.
s = inspect.signature(xp.unique)
if "equal_nan" in s.parameters:
return {"equal_nan": False}
return {}
def unique_all(x: Array, /, xp: Namespace) -> UniqueAllResult:
kwargs = _unique_kwargs(xp)
values, indices, inverse_indices, counts = xp.unique(
x,
return_counts=True,
return_index=True,
return_inverse=True,
**kwargs,
)
# np.unique() flattens inverse indices, but they need to share x's shape
# See https://github.com/numpy/numpy/issues/20638
inverse_indices = inverse_indices.reshape(x.shape)
return UniqueAllResult(
values,
indices,
inverse_indices,
counts,
)
def unique_counts(x: Array, /, xp: Namespace) -> UniqueCountsResult:
kwargs = _unique_kwargs(xp)
res = xp.unique(
x, return_counts=True, return_index=False, return_inverse=False, **kwargs
)
return UniqueCountsResult(*res)
def unique_inverse(x: Array, /, xp: Namespace) -> UniqueInverseResult:
kwargs = _unique_kwargs(xp)
values, inverse_indices = xp.unique(
x,
return_counts=False,
return_index=False,
return_inverse=True,
**kwargs,
)
# xp.unique() flattens inverse indices, but they need to share x's shape
# See https://github.com/numpy/numpy/issues/20638
inverse_indices = inverse_indices.reshape(x.shape)
return UniqueInverseResult(values, inverse_indices)
def unique_values(x: Array, /, xp: Namespace) -> Array:
kwargs = _unique_kwargs(xp)
return xp.unique(
x,
return_counts=False,
return_index=False,
return_inverse=False,
**kwargs,
)
# These functions have different keyword argument names
def std(
x: Array,
/,
xp: Namespace,
*,
axis: int | tuple[int, ...] | None = None,
correction: float = 0.0, # correction instead of ddof
keepdims: bool = False,
**kwargs: object,
) -> Array:
return xp.std(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs)
def var(
x: Array,
/,
xp: Namespace,
*,
axis: int | tuple[int, ...] | None = None,
correction: float = 0.0, # correction instead of ddof
keepdims: bool = False,
**kwargs: object,
) -> Array:
return xp.var(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs)
# cumulative_sum is renamed from cumsum, and adds the include_initial keyword
# argument
def cumulative_sum(
x: Array,
/,
xp: Namespace,
*,
axis: int | None = None,
dtype: DType | None = None,
include_initial: bool = False,
**kwargs: object,
) -> Array:
wrapped_xp = array_namespace(x)
# TODO: The standard is not clear about what should happen when x.ndim == 0.
if axis is None:
if x.ndim > 1:
raise ValueError(
"axis must be specified in cumulative_sum for more than one dimension"
)
axis = 0
res = xp.cumsum(x, axis=axis, dtype=dtype, **kwargs)
# np.cumsum does not support include_initial
if include_initial:
initial_shape = list(x.shape)
initial_shape[axis] = 1
res = xp.concatenate(
[
wrapped_xp.zeros(
shape=initial_shape, dtype=res.dtype, device=_get_device(res)
),
res,
],
axis=axis,
)
return res
def cumulative_prod(
x: Array,
/,
xp: Namespace,
*,
axis: int | None = None,
dtype: DType | None = None,
include_initial: bool = False,
**kwargs: object,
) -> Array:
wrapped_xp = array_namespace(x)
if axis is None:
if x.ndim > 1:
raise ValueError(
"axis must be specified in cumulative_prod for more than one dimension"
)
axis = 0
res = xp.cumprod(x, axis=axis, dtype=dtype, **kwargs)
# np.cumprod does not support include_initial
if include_initial:
initial_shape = list(x.shape)
initial_shape[axis] = 1
res = xp.concatenate(
[
wrapped_xp.ones(
shape=initial_shape, dtype=res.dtype, device=_get_device(res)
),
res,
],
axis=axis,
)
return res
# The min and max argument names in clip are different and not optional in numpy, and type
# promotion behavior is different.
def clip(
x: Array,
/,
min: float | Array | None = None,
max: float | Array | None = None,
*,
xp: Namespace,
# TODO: np.clip has other ufunc kwargs
out: Array | None = None,
) -> Array:
def _isscalar(a: object) -> TypeIs[int | float | None]:
return isinstance(a, (int, float, type(None)))
min_shape = () if _isscalar(min) else min.shape
max_shape = () if _isscalar(max) else max.shape
wrapped_xp = array_namespace(x)
result_shape = xp.broadcast_shapes(x.shape, min_shape, max_shape)
# np.clip does type promotion but the array API clip requires that the
# output have the same dtype as x. We do this instead of just downcasting
# the result of xp.clip() to handle some corner cases better (e.g.,
# avoiding uint64 -> float64 promotion).
# Note: cases where min or max overflow (integer) or round (float) in the
# wrong direction when downcasting to x.dtype are unspecified. This code
# just does whatever NumPy does when it downcasts in the assignment, but
# other behavior could be preferred, especially for integers. For example,
# this code produces:
# >>> clip(asarray(0, dtype=int8), asarray(128, dtype=int16), None)
# -128
# but an answer of 0 might be preferred. See
# https://github.com/numpy/numpy/issues/24976 for more discussion on this issue.
# At least handle the case of Python integers correctly (see
# https://github.com/numpy/numpy/pull/26892).
if wrapped_xp.isdtype(x.dtype, "integral"):
if type(min) is int and min <= wrapped_xp.iinfo(x.dtype).min:
min = None
if type(max) is int and max >= wrapped_xp.iinfo(x.dtype).max:
max = None
dev = _get_device(x)
if out is None:
out = wrapped_xp.empty(result_shape, dtype=x.dtype, device=dev)
assert out is not None # workaround for a type-narrowing issue in pyright
out[()] = x
if min is not None:
a = wrapped_xp.asarray(min, dtype=x.dtype, device=dev)
a = xp.broadcast_to(a, result_shape)
ia = (out < a) | xp.isnan(a)
out[ia] = a[ia]
if max is not None:
b = wrapped_xp.asarray(max, dtype=x.dtype, device=dev)
b = xp.broadcast_to(b, result_shape)
ib = (out > b) | xp.isnan(b)
out[ib] = b[ib]
# Return a scalar for 0-D
return out[()]
# Unlike transpose(), the axes argument to permute_dims() is required.
def permute_dims(x: Array, /, axes: tuple[int, ...], xp: Namespace) -> Array:
return xp.transpose(x, axes)
# np.reshape calls the keyword argument 'newshape' instead of 'shape'
def reshape(
x: Array,
/,
shape: tuple[int, ...],
xp: Namespace,
*,
copy: Optional[bool] = None,
**kwargs: object,
) -> Array:
if copy is True:
x = x.copy()
elif copy is False:
y = x.view()
y.shape = shape
return y
return xp.reshape(x, shape, **kwargs)
# The descending keyword is new in sort and argsort, and 'kind' replaced with
# 'stable'
def argsort(
x: Array,
/,
xp: Namespace,
*,
axis: int = -1,
descending: bool = False,
stable: bool = True,
**kwargs: object,
) -> Array:
# Note: this keyword argument is different, and the default is different.
# We set it in kwargs like this because numpy.sort uses kind='quicksort'
# as the default whereas cupy.sort uses kind=None.
if stable:
kwargs["kind"] = "stable"
if not descending:
res = xp.argsort(x, axis=axis, **kwargs)
else:
# As NumPy has no native descending sort, we imitate it here. Note that
# simply flipping the results of xp.argsort(x, ...) would not
# respect the relative order like it would in native descending sorts.
res = xp.flip(
xp.argsort(xp.flip(x, axis=axis), axis=axis, **kwargs),
axis=axis,
)
# Rely on flip()/argsort() to validate axis
normalised_axis = axis if axis >= 0 else x.ndim + axis
max_i = x.shape[normalised_axis] - 1
res = max_i - res
return res
def sort(
x: Array,
/,
xp: Namespace,
*,
axis: int = -1,
descending: bool = False,
stable: bool = True,
**kwargs: object,
) -> Array:
# Note: this keyword argument is different, and the default is different.
# We set it in kwargs like this because numpy.sort uses kind='quicksort'
# as the default whereas cupy.sort uses kind=None.
if stable:
kwargs["kind"] = "stable"
res = xp.sort(x, axis=axis, **kwargs)
if descending:
res = xp.flip(res, axis=axis)
return res
# nonzero should error for zero-dimensional arrays
def nonzero(x: Array, /, xp: Namespace, **kwargs: object) -> tuple[Array, ...]:
if x.ndim == 0:
raise ValueError("nonzero() does not support zero-dimensional arrays")
return xp.nonzero(x, **kwargs)
# ceil, floor, and trunc return integers for integer inputs
def ceil(x: Array, /, xp: Namespace, **kwargs: object) -> Array:
if xp.issubdtype(x.dtype, xp.integer):
return x
return xp.ceil(x, **kwargs)
def floor(x: Array, /, xp: Namespace, **kwargs: object) -> Array:
if xp.issubdtype(x.dtype, xp.integer):
return x
return xp.floor(x, **kwargs)
def trunc(x: Array, /, xp: Namespace, **kwargs: object) -> Array:
if xp.issubdtype(x.dtype, xp.integer):
return x
return xp.trunc(x, **kwargs)
# linear algebra functions
def matmul(x1: Array, x2: Array, /, xp: Namespace, **kwargs: object) -> Array:
return xp.matmul(x1, x2, **kwargs)
# Unlike transpose, matrix_transpose only transposes the last two axes.
def matrix_transpose(x: Array, /, xp: Namespace) -> Array:
if x.ndim < 2:
raise ValueError("x must be at least 2-dimensional for matrix_transpose")
return xp.swapaxes(x, -1, -2)
def tensordot(
x1: Array,
x2: Array,
/,
xp: Namespace,
*,
axes: int | tuple[Sequence[int], Sequence[int]] = 2,
**kwargs: object,
) -> Array:
return xp.tensordot(x1, x2, axes=axes, **kwargs)
def vecdot(x1: Array, x2: Array, /, xp: Namespace, *, axis: int = -1) -> Array:
if x1.shape[axis] != x2.shape[axis]:
raise ValueError("x1 and x2 must have the same size along the given axis")
if hasattr(xp, "broadcast_tensors"):
_broadcast = xp.broadcast_tensors
else:
_broadcast = xp.broadcast_arrays
x1_ = xp.moveaxis(x1, axis, -1)
x2_ = xp.moveaxis(x2, axis, -1)
x1_, x2_ = _broadcast(x1_, x2_)
res = xp.conj(x1_[..., None, :]) @ x2_[..., None]
return res[..., 0, 0]
# isdtype is a new function in the 2022.12 array API specification.
def isdtype(
dtype: DType,
kind: DType | str | tuple[DType | str, ...],
xp: Namespace,
*,
_tuple: bool = True, # Disallow nested tuples
) -> bool:
"""
Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``.
Note that outside of this function, this compat library does not yet fully
support complex numbers.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
for more details
"""
if isinstance(kind, tuple) and _tuple:
return any(
isdtype(dtype, k, xp, _tuple=False)
for k in cast("tuple[DType | str, ...]", kind)
)
elif isinstance(kind, str):
if kind == "bool":
return dtype == xp.bool_
elif kind == "signed integer":
return xp.issubdtype(dtype, xp.signedinteger)
elif kind == "unsigned integer":
return xp.issubdtype(dtype, xp.unsignedinteger)
elif kind == "integral":
return xp.issubdtype(dtype, xp.integer)
elif kind == "real floating":
return xp.issubdtype(dtype, xp.floating)
elif kind == "complex floating":
return xp.issubdtype(dtype, xp.complexfloating)
elif kind == "numeric":
return xp.issubdtype(dtype, xp.number)
else:
raise ValueError(f"Unrecognized data type kind: {kind!r}")
else:
# This will allow things that aren't required by the spec, like
# isdtype(np.float64, float) or isdtype(np.int64, 'l'). Should we be
# more strict here to match the type annotation? Note that the
# array_api_strict implementation will be very strict.
return dtype == kind
# unstack is a new function in the 2023.12 array API standard
def unstack(x: Array, /, xp: Namespace, *, axis: int = 0) -> tuple[Array, ...]:
if x.ndim == 0:
raise ValueError("Input array must be at least 1-d.")
return tuple(xp.moveaxis(x, axis, 0))
# numpy 1.26 does not use the standard definition for sign on complex numbers
def sign(x: Array, /, xp: Namespace, **kwargs: object) -> Array:
if isdtype(x.dtype, "complex floating", xp=xp):
out = (x / xp.abs(x, **kwargs))[...]
# sign(0) = 0 but the above formula would give nan
out[x == 0j] = 0j
else:
out = xp.sign(x, **kwargs)
# CuPy sign() does not propagate nans. See
# https://github.com/data-apis/array-api-compat/issues/136
if _is_cupy_namespace(xp) and isdtype(x.dtype, "real floating", xp=xp):
out[xp.isnan(x)] = xp.nan
return out[()]
def finfo(type_: DType | Array, /, xp: Namespace) -> Any:
# It is surprisingly difficult to recognize a dtype apart from an array.
# np.int64 is not the same as np.asarray(1).dtype!
try:
return xp.finfo(type_)
except (ValueError, TypeError):
return xp.finfo(type_.dtype)
def iinfo(type_: DType | Array, /, xp: Namespace) -> Any:
try:
return xp.iinfo(type_)
except (ValueError, TypeError):
return xp.iinfo(type_.dtype)
__all__ = [
"arange",
"empty",
"empty_like",
"eye",
"full",
"full_like",
"linspace",
"ones",
"ones_like",
"zeros",
"zeros_like",
"UniqueAllResult",
"UniqueCountsResult",
"UniqueInverseResult",
"unique_all",
"unique_counts",
"unique_inverse",
"unique_values",
"std",
"var",
"cumulative_sum",
"cumulative_prod",
"clip",
"permute_dims",
"reshape",
"argsort",
"sort",
"nonzero",
"ceil",
"floor",
"trunc",
"matmul",
"matrix_transpose",
"tensordot",
"vecdot",
"isdtype",
"unstack",
"sign",
"finfo",
"iinfo",
]
_all_ignore = ["inspect", "array_namespace", "NamedTuple"]
def __dir__() -> list[str]:
return __all__
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/common/_linalg.py | sklearn/externals/array_api_compat/common/_linalg.py | from __future__ import annotations
import math
from typing import Literal, NamedTuple, cast
import numpy as np
if np.__version__[0] == "2":
from numpy.lib.array_utils import normalize_axis_tuple
else:
from numpy.core.numeric import normalize_axis_tuple
from .._internal import get_xp
from ._aliases import isdtype, matmul, matrix_transpose, tensordot, vecdot
from ._typing import Array, DType, JustFloat, JustInt, Namespace
# These are in the main NumPy namespace but not in numpy.linalg
def cross(
x1: Array,
x2: Array,
/,
xp: Namespace,
*,
axis: int = -1,
**kwargs: object,
) -> Array:
return xp.cross(x1, x2, axis=axis, **kwargs)
def outer(x1: Array, x2: Array, /, xp: Namespace, **kwargs: object) -> Array:
return xp.outer(x1, x2, **kwargs)
class EighResult(NamedTuple):
eigenvalues: Array
eigenvectors: Array
class QRResult(NamedTuple):
Q: Array
R: Array
class SlogdetResult(NamedTuple):
sign: Array
logabsdet: Array
class SVDResult(NamedTuple):
U: Array
S: Array
Vh: Array
# These functions are the same as their NumPy counterparts except they return
# a namedtuple.
def eigh(x: Array, /, xp: Namespace, **kwargs: object) -> EighResult:
return EighResult(*xp.linalg.eigh(x, **kwargs))
def qr(
x: Array,
/,
xp: Namespace,
*,
mode: Literal["reduced", "complete"] = "reduced",
**kwargs: object,
) -> QRResult:
return QRResult(*xp.linalg.qr(x, mode=mode, **kwargs))
def slogdet(x: Array, /, xp: Namespace, **kwargs: object) -> SlogdetResult:
return SlogdetResult(*xp.linalg.slogdet(x, **kwargs))
def svd(
x: Array,
/,
xp: Namespace,
*,
full_matrices: bool = True,
**kwargs: object,
) -> SVDResult:
return SVDResult(*xp.linalg.svd(x, full_matrices=full_matrices, **kwargs))
# These functions have additional keyword arguments
# The upper keyword argument is new from NumPy
def cholesky(
x: Array,
/,
xp: Namespace,
*,
upper: bool = False,
**kwargs: object,
) -> Array:
L = xp.linalg.cholesky(x, **kwargs)
if upper:
U = get_xp(xp)(matrix_transpose)(L)
if get_xp(xp)(isdtype)(U.dtype, 'complex floating'):
U = xp.conj(U) # pyright: ignore[reportConstantRedefinition]
return U
return L
# The rtol keyword argument of matrix_rank() and pinv() is new from NumPy.
# Note that it has a different semantic meaning from tol and rcond.
def matrix_rank(
x: Array,
/,
xp: Namespace,
*,
rtol: float | Array | None = None,
**kwargs: object,
) -> Array:
# this is different from xp.linalg.matrix_rank, which supports 1
# dimensional arrays.
if x.ndim < 2:
raise xp.linalg.LinAlgError("1-dimensional array given. Array must be at least two-dimensional")
S: Array = get_xp(xp)(svdvals)(x, **kwargs)
if rtol is None:
tol = S.max(axis=-1, keepdims=True) * max(x.shape[-2:]) * xp.finfo(S.dtype).eps
else:
# this is different from xp.linalg.matrix_rank, which does not
# multiply the tolerance by the largest singular value.
tol = S.max(axis=-1, keepdims=True)*xp.asarray(rtol)[..., xp.newaxis]
return xp.count_nonzero(S > tol, axis=-1)
def pinv(
x: Array,
/,
xp: Namespace,
*,
rtol: float | Array | None = None,
**kwargs: object,
) -> Array:
# this is different from xp.linalg.pinv, which does not multiply the
# default tolerance by max(M, N).
if rtol is None:
rtol = max(x.shape[-2:]) * xp.finfo(x.dtype).eps
return xp.linalg.pinv(x, rcond=rtol, **kwargs)
# These functions are new in the array API spec
def matrix_norm(
x: Array,
/,
xp: Namespace,
*,
keepdims: bool = False,
ord: Literal[1, 2, -1, -2] | JustFloat | Literal["fro", "nuc"] | None = "fro",
) -> Array:
return xp.linalg.norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord)
# svdvals is not in NumPy (but it is in SciPy). It is equivalent to
# xp.linalg.svd(compute_uv=False).
def svdvals(x: Array, /, xp: Namespace) -> Array | tuple[Array, ...]:
return xp.linalg.svd(x, compute_uv=False)
def vector_norm(
x: Array,
/,
xp: Namespace,
*,
axis: int | tuple[int, ...] | None = None,
keepdims: bool = False,
ord: JustInt | JustFloat = 2,
) -> Array:
# xp.linalg.norm tries to do a matrix norm whenever axis is a 2-tuple or
# when axis=None and the input is 2-D, so to force a vector norm, we make
# it so the input is 1-D (for axis=None), or reshape so that norm is done
# on a single dimension.
if axis is None:
# Note: xp.linalg.norm() doesn't handle 0-D arrays
_x = x.ravel()
_axis = 0
elif isinstance(axis, tuple):
# Note: The axis argument supports any number of axes, whereas
# xp.linalg.norm() only supports a single axis for vector norm.
normalized_axis = cast(
"tuple[int, ...]",
normalize_axis_tuple(axis, x.ndim), # pyright: ignore[reportCallIssue]
)
rest = tuple(i for i in range(x.ndim) if i not in normalized_axis)
newshape = axis + rest
_x = xp.transpose(x, newshape).reshape(
(math.prod([x.shape[i] for i in axis]), *[x.shape[i] for i in rest]))
_axis = 0
else:
_x = x
_axis = axis
res = xp.linalg.norm(_x, axis=_axis, ord=ord)
if keepdims:
# We can't reuse xp.linalg.norm(keepdims) because of the reshape hacks
# above to avoid matrix norm logic.
shape = list(x.shape)
_axis = cast(
"tuple[int, ...]",
normalize_axis_tuple( # pyright: ignore[reportCallIssue]
range(x.ndim) if axis is None else axis,
x.ndim,
),
)
for i in _axis:
shape[i] = 1
res = xp.reshape(res, tuple(shape))
return res
# xp.diagonal and xp.trace operate on the first two axes whereas these
# operates on the last two
def diagonal(x: Array, /, xp: Namespace, *, offset: int = 0, **kwargs: object) -> Array:
return xp.diagonal(x, offset=offset, axis1=-2, axis2=-1, **kwargs)
def trace(
x: Array,
/,
xp: Namespace,
*,
offset: int = 0,
dtype: DType | None = None,
**kwargs: object,
) -> Array:
return xp.asarray(
xp.trace(x, offset=offset, dtype=dtype, axis1=-2, axis2=-1, **kwargs)
)
__all__ = ['cross', 'matmul', 'outer', 'tensordot', 'EighResult',
'QRResult', 'SlogdetResult', 'SVDResult', 'eigh', 'qr', 'slogdet',
'svd', 'cholesky', 'matrix_rank', 'pinv', 'matrix_norm',
'matrix_transpose', 'svdvals', 'vecdot', 'vector_norm', 'diagonal',
'trace']
_all_ignore = ['math', 'normalize_axis_tuple', 'get_xp', 'np', 'isdtype']
def __dir__() -> list[str]:
return __all__
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/common/_helpers.py | sklearn/externals/array_api_compat/common/_helpers.py | """
Various helper functions which are not part of the spec.
Functions which start with an underscore are for internal use only but helpers
that are in __all__ are intended as additional helper functions for use by end
users of the compat library.
"""
from __future__ import annotations
import inspect
import math
import sys
import warnings
from collections.abc import Collection, Hashable
from functools import lru_cache
from typing import (
TYPE_CHECKING,
Any,
Final,
Literal,
SupportsIndex,
TypeAlias,
TypeGuard,
TypeVar,
cast,
overload,
)
from ._typing import Array, Device, HasShape, Namespace, SupportsArrayNamespace
if TYPE_CHECKING:
import dask.array as da
import jax
import ndonnx as ndx
import numpy as np
import numpy.typing as npt
import sparse # pyright: ignore[reportMissingTypeStubs]
import torch
# TODO: import from typing (requires Python >=3.13)
from typing_extensions import TypeIs, TypeVar
_SizeT = TypeVar("_SizeT", bound = int | None)
_ZeroGradientArray: TypeAlias = npt.NDArray[np.void]
_CupyArray: TypeAlias = Any # cupy has no py.typed
_ArrayApiObj: TypeAlias = (
npt.NDArray[Any]
| da.Array
| jax.Array
| ndx.Array
| sparse.SparseArray
| torch.Tensor
| SupportsArrayNamespace[Any]
| _CupyArray
)
_API_VERSIONS_OLD: Final = frozenset({"2021.12", "2022.12", "2023.12"})
_API_VERSIONS: Final = _API_VERSIONS_OLD | frozenset({"2024.12"})
@lru_cache(100)
def _issubclass_fast(cls: type, modname: str, clsname: str) -> bool:
try:
mod = sys.modules[modname]
except KeyError:
return False
parent_cls = getattr(mod, clsname)
return issubclass(cls, parent_cls)
def _is_jax_zero_gradient_array(x: object) -> TypeGuard[_ZeroGradientArray]:
"""Return True if `x` is a zero-gradient array.
These arrays are a design quirk of Jax that may one day be removed.
See https://github.com/google/jax/issues/20620.
"""
# Fast exit
try:
dtype = x.dtype # type: ignore[attr-defined]
except AttributeError:
return False
cls = cast(Hashable, type(dtype))
if not _issubclass_fast(cls, "numpy.dtypes", "VoidDType"):
return False
if "jax" not in sys.modules:
return False
import jax
# jax.float0 is a np.dtype([('float0', 'V')])
return dtype == jax.float0
def is_numpy_array(x: object) -> TypeGuard[npt.NDArray[Any]]:
"""
Return True if `x` is a NumPy array.
This function does not import NumPy if it has not already been imported
and is therefore cheap to use.
This also returns True for `ndarray` subclasses and NumPy scalar objects.
See Also
--------
array_namespace
is_array_api_obj
is_cupy_array
is_torch_array
is_ndonnx_array
is_dask_array
is_jax_array
is_pydata_sparse_array
"""
# TODO: Should we reject ndarray subclasses?
cls = cast(Hashable, type(x))
return (
_issubclass_fast(cls, "numpy", "ndarray")
or _issubclass_fast(cls, "numpy", "generic")
) and not _is_jax_zero_gradient_array(x)
def is_cupy_array(x: object) -> bool:
"""
Return True if `x` is a CuPy array.
This function does not import CuPy if it has not already been imported
and is therefore cheap to use.
This also returns True for `cupy.ndarray` subclasses and CuPy scalar objects.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_torch_array
is_ndonnx_array
is_dask_array
is_jax_array
is_pydata_sparse_array
"""
cls = cast(Hashable, type(x))
return _issubclass_fast(cls, "cupy", "ndarray")
def is_torch_array(x: object) -> TypeIs[torch.Tensor]:
"""
Return True if `x` is a PyTorch tensor.
This function does not import PyTorch if it has not already been imported
and is therefore cheap to use.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_cupy_array
is_dask_array
is_jax_array
is_pydata_sparse_array
"""
cls = cast(Hashable, type(x))
return _issubclass_fast(cls, "torch", "Tensor")
def is_ndonnx_array(x: object) -> TypeIs[ndx.Array]:
"""
Return True if `x` is a ndonnx Array.
This function does not import ndonnx if it has not already been imported
and is therefore cheap to use.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_cupy_array
is_ndonnx_array
is_dask_array
is_jax_array
is_pydata_sparse_array
"""
cls = cast(Hashable, type(x))
return _issubclass_fast(cls, "ndonnx", "Array")
def is_dask_array(x: object) -> TypeIs[da.Array]:
"""
Return True if `x` is a dask.array Array.
This function does not import dask if it has not already been imported
and is therefore cheap to use.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_cupy_array
is_torch_array
is_ndonnx_array
is_jax_array
is_pydata_sparse_array
"""
cls = cast(Hashable, type(x))
return _issubclass_fast(cls, "dask.array", "Array")
def is_jax_array(x: object) -> TypeIs[jax.Array]:
"""
Return True if `x` is a JAX array.
This function does not import JAX if it has not already been imported
and is therefore cheap to use.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_cupy_array
is_torch_array
is_ndonnx_array
is_dask_array
is_pydata_sparse_array
"""
cls = cast(Hashable, type(x))
return _issubclass_fast(cls, "jax", "Array") or _is_jax_zero_gradient_array(x)
def is_pydata_sparse_array(x: object) -> TypeIs[sparse.SparseArray]:
"""
Return True if `x` is an array from the `sparse` package.
This function does not import `sparse` if it has not already been imported
and is therefore cheap to use.
See Also
--------
array_namespace
is_array_api_obj
is_numpy_array
is_cupy_array
is_torch_array
is_ndonnx_array
is_dask_array
is_jax_array
"""
# TODO: Account for other backends.
cls = cast(Hashable, type(x))
return _issubclass_fast(cls, "sparse", "SparseArray")
def is_array_api_obj(x: object) -> TypeIs[_ArrayApiObj]: # pyright: ignore[reportUnknownParameterType]
"""
Return True if `x` is an array API compatible array object.
See Also
--------
array_namespace
is_numpy_array
is_cupy_array
is_torch_array
is_ndonnx_array
is_dask_array
is_jax_array
"""
return (
hasattr(x, '__array_namespace__')
or _is_array_api_cls(cast(Hashable, type(x)))
)
@lru_cache(100)
def _is_array_api_cls(cls: type) -> bool:
return (
# TODO: drop support for numpy<2 which didn't have __array_namespace__
_issubclass_fast(cls, "numpy", "ndarray")
or _issubclass_fast(cls, "numpy", "generic")
or _issubclass_fast(cls, "cupy", "ndarray")
or _issubclass_fast(cls, "torch", "Tensor")
or _issubclass_fast(cls, "dask.array", "Array")
or _issubclass_fast(cls, "sparse", "SparseArray")
# TODO: drop support for jax<0.4.32 which didn't have __array_namespace__
or _issubclass_fast(cls, "jax", "Array")
)
def _compat_module_name() -> str:
assert __name__.endswith(".common._helpers")
return __name__.removesuffix(".common._helpers")
@lru_cache(100)
def is_numpy_namespace(xp: Namespace) -> bool:
"""
Returns True if `xp` is a NumPy namespace.
This includes both NumPy itself and the version wrapped by array-api-compat.
See Also
--------
array_namespace
is_cupy_namespace
is_torch_namespace
is_ndonnx_namespace
is_dask_namespace
is_jax_namespace
is_pydata_sparse_namespace
is_array_api_strict_namespace
"""
return xp.__name__ in {"numpy", _compat_module_name() + ".numpy"}
@lru_cache(100)
def is_cupy_namespace(xp: Namespace) -> bool:
"""
Returns True if `xp` is a CuPy namespace.
This includes both CuPy itself and the version wrapped by array-api-compat.
See Also
--------
array_namespace
is_numpy_namespace
is_torch_namespace
is_ndonnx_namespace
is_dask_namespace
is_jax_namespace
is_pydata_sparse_namespace
is_array_api_strict_namespace
"""
return xp.__name__ in {"cupy", _compat_module_name() + ".cupy"}
@lru_cache(100)
def is_torch_namespace(xp: Namespace) -> bool:
"""
Returns True if `xp` is a PyTorch namespace.
This includes both PyTorch itself and the version wrapped by array-api-compat.
See Also
--------
array_namespace
is_numpy_namespace
is_cupy_namespace
is_ndonnx_namespace
is_dask_namespace
is_jax_namespace
is_pydata_sparse_namespace
is_array_api_strict_namespace
"""
return xp.__name__ in {"torch", _compat_module_name() + ".torch"}
def is_ndonnx_namespace(xp: Namespace) -> bool:
"""
Returns True if `xp` is an NDONNX namespace.
See Also
--------
array_namespace
is_numpy_namespace
is_cupy_namespace
is_torch_namespace
is_dask_namespace
is_jax_namespace
is_pydata_sparse_namespace
is_array_api_strict_namespace
"""
return xp.__name__ == "ndonnx"
@lru_cache(100)
def is_dask_namespace(xp: Namespace) -> bool:
"""
Returns True if `xp` is a Dask namespace.
This includes both ``dask.array`` itself and the version wrapped by array-api-compat.
See Also
--------
array_namespace
is_numpy_namespace
is_cupy_namespace
is_torch_namespace
is_ndonnx_namespace
is_jax_namespace
is_pydata_sparse_namespace
is_array_api_strict_namespace
"""
return xp.__name__ in {"dask.array", _compat_module_name() + ".dask.array"}
def is_jax_namespace(xp: Namespace) -> bool:
"""
Returns True if `xp` is a JAX namespace.
This includes ``jax.numpy`` and ``jax.experimental.array_api`` which existed in
older versions of JAX.
See Also
--------
array_namespace
is_numpy_namespace
is_cupy_namespace
is_torch_namespace
is_ndonnx_namespace
is_dask_namespace
is_pydata_sparse_namespace
is_array_api_strict_namespace
"""
return xp.__name__ in {"jax.numpy", "jax.experimental.array_api"}
def is_pydata_sparse_namespace(xp: Namespace) -> bool:
"""
Returns True if `xp` is a pydata/sparse namespace.
See Also
--------
array_namespace
is_numpy_namespace
is_cupy_namespace
is_torch_namespace
is_ndonnx_namespace
is_dask_namespace
is_jax_namespace
is_array_api_strict_namespace
"""
return xp.__name__ == "sparse"
def is_array_api_strict_namespace(xp: Namespace) -> bool:
"""
Returns True if `xp` is an array-api-strict namespace.
See Also
--------
array_namespace
is_numpy_namespace
is_cupy_namespace
is_torch_namespace
is_ndonnx_namespace
is_dask_namespace
is_jax_namespace
is_pydata_sparse_namespace
"""
return xp.__name__ == "array_api_strict"
def _check_api_version(api_version: str | None) -> None:
if api_version in _API_VERSIONS_OLD:
warnings.warn(
f"The {api_version} version of the array API specification was requested but the returned namespace is actually version 2024.12"
)
elif api_version is not None and api_version not in _API_VERSIONS:
raise ValueError(
"Only the 2024.12 version of the array API specification is currently supported"
)
def array_namespace(
*xs: Array | complex | None,
api_version: str | None = None,
use_compat: bool | None = None,
) -> Namespace:
"""
Get the array API compatible namespace for the arrays `xs`.
Parameters
----------
xs: arrays
one or more arrays. xs can also be Python scalars (bool, int, float,
complex, or None), which are ignored.
api_version: str
The newest version of the spec that you need support for (currently
the compat library wrapped APIs support v2024.12).
use_compat: bool or None
If None (the default), the native namespace will be returned if it is
already array API compatible, otherwise a compat wrapper is used. If
True, the compat library wrapped library will be returned. If False,
the native library namespace is returned.
Returns
-------
out: namespace
The array API compatible namespace corresponding to the arrays in `xs`.
Raises
------
TypeError
If `xs` contains arrays from different array libraries or contains a
non-array.
Typical usage is to pass the arguments of a function to
`array_namespace()` at the top of a function to get the corresponding
array API namespace:
.. code:: python
def your_function(x, y):
xp = array_api_compat.array_namespace(x, y)
# Now use xp as the array library namespace
return xp.mean(x, axis=0) + 2*xp.std(y, axis=0)
Wrapped array namespaces can also be imported directly. For example,
`array_namespace(np.array(...))` will return `array_api_compat.numpy`.
This function will also work for any array library not wrapped by
array-api-compat if it explicitly defines `__array_namespace__
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__array_namespace__.html>`__
(the wrapped namespace is always preferred if it exists).
See Also
--------
is_array_api_obj
is_numpy_array
is_cupy_array
is_torch_array
is_dask_array
is_jax_array
is_pydata_sparse_array
"""
if use_compat not in [None, True, False]:
raise ValueError("use_compat must be None, True, or False")
_use_compat = use_compat in [None, True]
namespaces: set[Namespace] = set()
for x in xs:
if is_numpy_array(x):
import numpy as np
from .. import numpy as numpy_namespace
if use_compat is True:
_check_api_version(api_version)
namespaces.add(numpy_namespace)
elif use_compat is False:
namespaces.add(np)
else:
# numpy 2.0+ have __array_namespace__, however, they are not yet fully array API
# compatible.
namespaces.add(numpy_namespace)
elif is_cupy_array(x):
if _use_compat:
_check_api_version(api_version)
from .. import cupy as cupy_namespace
namespaces.add(cupy_namespace)
else:
import cupy as cp # pyright: ignore[reportMissingTypeStubs]
namespaces.add(cp)
elif is_torch_array(x):
if _use_compat:
_check_api_version(api_version)
from .. import torch as torch_namespace
namespaces.add(torch_namespace)
else:
import torch
namespaces.add(torch)
elif is_dask_array(x):
if _use_compat:
_check_api_version(api_version)
from ..dask import array as dask_namespace
namespaces.add(dask_namespace)
else:
import dask.array as da
namespaces.add(da)
elif is_jax_array(x):
if use_compat is True:
_check_api_version(api_version)
raise ValueError("JAX does not have an array-api-compat wrapper")
elif use_compat is False:
import jax.numpy as jnp
else:
# JAX v0.4.32 and newer implements the array API directly in jax.numpy.
# For older JAX versions, it is available via jax.experimental.array_api.
import jax.numpy
if hasattr(jax.numpy, "__array_api_version__"):
jnp = jax.numpy
else:
import jax.experimental.array_api as jnp # pyright: ignore[reportMissingImports]
namespaces.add(jnp)
elif is_pydata_sparse_array(x):
if use_compat is True:
_check_api_version(api_version)
raise ValueError("`sparse` does not have an array-api-compat wrapper")
else:
import sparse # pyright: ignore[reportMissingTypeStubs]
# `sparse` is already an array namespace. We do not have a wrapper
# submodule for it.
namespaces.add(sparse)
elif hasattr(x, "__array_namespace__"):
if use_compat is True:
raise ValueError(
"The given array does not have an array-api-compat wrapper"
)
x = cast("SupportsArrayNamespace[Any]", x)
namespaces.add(x.__array_namespace__(api_version=api_version))
elif isinstance(x, (bool, int, float, complex, type(None))):
continue
else:
# TODO: Support Python scalars?
raise TypeError(f"{type(x).__name__} is not a supported array type")
if not namespaces:
raise TypeError("Unrecognized array input")
if len(namespaces) != 1:
raise TypeError(f"Multiple namespaces for array inputs: {namespaces}")
(xp,) = namespaces
return xp
# backwards compatibility alias
get_namespace = array_namespace
def _check_device(bare_xp: Namespace, device: Device) -> None: # pyright: ignore[reportUnusedFunction]
"""
Validate dummy device on device-less array backends.
Notes
-----
This function is also invoked by CuPy, which does have multiple devices
if there are multiple GPUs available.
However, CuPy multi-device support is currently impossible
without using the global device or a context manager:
https://github.com/data-apis/array-api-compat/pull/293
"""
if bare_xp is sys.modules.get("numpy"):
if device not in ("cpu", None):
raise ValueError(f"Unsupported device for NumPy: {device!r}")
elif bare_xp is sys.modules.get("dask.array"):
if device not in ("cpu", _DASK_DEVICE, None):
raise ValueError(f"Unsupported device for Dask: {device!r}")
# Placeholder object to represent the dask device
# when the array backend is not the CPU.
# (since it is not easy to tell which device a dask array is on)
class _dask_device:
def __repr__(self) -> Literal["DASK_DEVICE"]:
return "DASK_DEVICE"
_DASK_DEVICE = _dask_device()
# device() is not on numpy.ndarray or dask.array and to_device() is not on numpy.ndarray
# or cupy.ndarray. They are not included in array objects of this library
# because this library just reuses the respective ndarray classes without
# wrapping or subclassing them. These helper functions can be used instead of
# the wrapper functions for libraries that need to support both NumPy/CuPy and
# other libraries that use devices.
def device(x: _ArrayApiObj, /) -> Device:
"""
Hardware device the array data resides on.
This is equivalent to `x.device` according to the `standard
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.device.html>`__.
This helper is included because some array libraries either do not have
the `device` attribute or include it with an incompatible API.
Parameters
----------
x: array
array instance from an array API compatible library.
Returns
-------
out: device
a ``device`` object (see the `Device Support <https://data-apis.org/array-api/latest/design_topics/device_support.html>`__
section of the array API specification).
Notes
-----
For NumPy the device is always `"cpu"`. For Dask, the device is always a
special `DASK_DEVICE` object.
See Also
--------
to_device : Move array data to a different device.
"""
if is_numpy_array(x):
return "cpu"
elif is_dask_array(x):
# Peek at the metadata of the Dask array to determine type
if is_numpy_array(x._meta): # pyright: ignore
# Must be on CPU since backed by numpy
return "cpu"
return _DASK_DEVICE
elif is_jax_array(x):
# FIXME Jitted JAX arrays do not have a device attribute
# https://github.com/jax-ml/jax/issues/26000
# Return None in this case. Note that this workaround breaks
# the standard and will result in new arrays being created on the
# default device instead of the same device as the input array(s).
x_device = getattr(x, "device", None)
# Older JAX releases had .device() as a method, which has been replaced
# with a property in accordance with the standard.
if inspect.ismethod(x_device):
return x_device()
else:
return x_device
elif is_pydata_sparse_array(x):
# `sparse` will gain `.device`, so check for this first.
x_device = getattr(x, "device", None)
if x_device is not None:
return x_device
# Everything but DOK has this attr.
try:
inner = x.data # pyright: ignore
except AttributeError:
return "cpu"
# Return the device of the constituent array
return device(inner) # pyright: ignore
return x.device # pyright: ignore
# Prevent shadowing, used below
_device = device
# Based on cupy.array_api.Array.to_device
def _cupy_to_device(
x: _CupyArray,
device: Device,
/,
stream: int | Any | None = None,
) -> _CupyArray:
import cupy as cp
if device == "cpu":
# allowing us to use `to_device(x, "cpu")`
# is useful for portable test swapping between
# host and device backends
return x.get()
if not isinstance(device, cp.cuda.Device):
raise TypeError(f"Unsupported device type {device!r}")
if stream is None:
with device:
return cp.asarray(x)
# stream can be an int as specified in __dlpack__, or a CuPy stream
if isinstance(stream, int):
stream = cp.cuda.ExternalStream(stream)
elif not isinstance(stream, cp.cuda.Stream):
raise TypeError(f"Unsupported stream type {stream!r}")
with device, stream:
return cp.asarray(x)
def _torch_to_device(
x: torch.Tensor,
device: torch.device | str | int,
/,
stream: None = None,
) -> torch.Tensor:
if stream is not None:
raise NotImplementedError
return x.to(device)
def to_device(x: Array, device: Device, /, *, stream: int | Any | None = None) -> Array:
"""
Copy the array from the device on which it currently resides to the specified ``device``.
This is equivalent to `x.to_device(device, stream=stream)` according to
the `standard
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.to_device.html>`__.
This helper is included because some array libraries do not have the
`to_device` method.
Parameters
----------
x: array
array instance from an array API compatible library.
device: device
a ``device`` object (see the `Device Support <https://data-apis.org/array-api/latest/design_topics/device_support.html>`__
section of the array API specification).
stream: int | Any | None
stream object to use during copy. In addition to the types supported
in ``array.__dlpack__``, implementations may choose to support any
library-specific stream object with the caveat that any code using
such an object would not be portable.
Returns
-------
out: array
an array with the same data and data type as ``x`` and located on the
specified ``device``.
Notes
-----
For NumPy, this function effectively does nothing since the only supported
device is the CPU. For CuPy, this method supports CuPy CUDA
:external+cupy:class:`Device <cupy.cuda.Device>` and
:external+cupy:class:`Stream <cupy.cuda.Stream>` objects. For PyTorch,
this is the same as :external+torch:meth:`x.to(device) <torch.Tensor.to>`
(the ``stream`` argument is not supported in PyTorch).
See Also
--------
device : Hardware device the array data resides on.
"""
if is_numpy_array(x):
if stream is not None:
raise ValueError("The stream argument to to_device() is not supported")
if device == "cpu":
return x
raise ValueError(f"Unsupported device {device!r}")
elif is_cupy_array(x):
# cupy does not yet have to_device
return _cupy_to_device(x, device, stream=stream)
elif is_torch_array(x):
return _torch_to_device(x, device, stream=stream) # pyright: ignore[reportArgumentType]
elif is_dask_array(x):
if stream is not None:
raise ValueError("The stream argument to to_device() is not supported")
# TODO: What if our array is on the GPU already?
if device == "cpu":
return x
raise ValueError(f"Unsupported device {device!r}")
elif is_jax_array(x):
if not hasattr(x, "__array_namespace__"):
# In JAX v0.4.31 and older, this import adds to_device method to x...
import jax.experimental.array_api # noqa: F401 # pyright: ignore
# ... but only on eager JAX. It won't work inside jax.jit.
if not hasattr(x, "to_device"):
return x
return x.to_device(device, stream=stream)
elif is_pydata_sparse_array(x) and device == _device(x):
# Perform trivial check to return the same array if
# device is same instead of err-ing.
return x
return x.to_device(device, stream=stream) # pyright: ignore
@overload
def size(x: HasShape[Collection[SupportsIndex]]) -> int: ...
@overload
def size(x: HasShape[Collection[None]]) -> None: ...
@overload
def size(x: HasShape[Collection[SupportsIndex | None]]) -> int | None: ...
def size(x: HasShape[Collection[SupportsIndex | None]]) -> int | None:
"""
Return the total number of elements of x.
This is equivalent to `x.size` according to the `standard
<https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.size.html>`__.
This helper is included because PyTorch defines `size` in an
:external+torch:meth:`incompatible way <torch.Tensor.size>`.
It also fixes dask.array's behaviour which returns nan for unknown sizes, whereas
the standard requires None.
"""
# Lazy API compliant arrays, such as ndonnx, can contain None in their shape
if None in x.shape:
return None
out = math.prod(cast("Collection[SupportsIndex]", x.shape))
# dask.array.Array.shape can contain NaN
return None if math.isnan(out) else out
@lru_cache(100)
def _is_writeable_cls(cls: type) -> bool | None:
if (
_issubclass_fast(cls, "numpy", "generic")
or _issubclass_fast(cls, "jax", "Array")
or _issubclass_fast(cls, "sparse", "SparseArray")
):
return False
if _is_array_api_cls(cls):
return True
return None
def is_writeable_array(x: object) -> bool:
"""
Return False if ``x.__setitem__`` is expected to raise; True otherwise.
Return False if `x` is not an array API compatible object.
Warning
-------
As there is no standard way to check if an array is writeable without actually
writing to it, this function blindly returns True for all unknown array types.
"""
cls = cast(Hashable, type(x))
if _issubclass_fast(cls, "numpy", "ndarray"):
return cast("npt.NDArray", x).flags.writeable
res = _is_writeable_cls(cls)
if res is not None:
return res
return hasattr(x, '__array_namespace__')
@lru_cache(100)
def _is_lazy_cls(cls: type) -> bool | None:
if (
_issubclass_fast(cls, "numpy", "ndarray")
or _issubclass_fast(cls, "numpy", "generic")
or _issubclass_fast(cls, "cupy", "ndarray")
or _issubclass_fast(cls, "torch", "Tensor")
or _issubclass_fast(cls, "sparse", "SparseArray")
):
return False
if (
_issubclass_fast(cls, "jax", "Array")
or _issubclass_fast(cls, "dask.array", "Array")
or _issubclass_fast(cls, "ndonnx", "Array")
):
return True
return None
def is_lazy_array(x: object) -> bool:
"""Return True if x is potentially a future or it may be otherwise impossible or
expensive to eagerly read its contents, regardless of their size, e.g. by
calling ``bool(x)`` or ``float(x)``.
Return False otherwise; e.g. ``bool(x)`` etc. is guaranteed to succeed and to be
cheap as long as the array has the right dtype and size.
Note
----
This function errs on the side of caution for array types that may or may not be
lazy, e.g. JAX arrays, by always returning True for them.
"""
# **JAX note:** while it is possible to determine if you're inside or outside
# jax.jit by testing the subclass of a jax.Array object, as well as testing bool()
# as we do below for unknown arrays, this is not recommended by JAX best practices.
# **Dask note:** Dask eagerly computes the graph on __bool__, __float__, and so on.
# This behaviour, while impossible to change without breaking backwards
# compatibility, is highly detrimental to performance as the whole graph will end
# up being computed multiple times.
# Note: skipping reclassification of JAX zero gradient arrays, as one will
# exclusively get them once they leave a jax.grad JIT context.
cls = cast(Hashable, type(x))
res = _is_lazy_cls(cls)
if res is not None:
return res
if not hasattr(x, "__array_namespace__"):
return False
# Unknown Array API compatible object. Note that this test may have dire consequences
# in terms of performance, e.g. for a lazy object that eagerly computes the graph
# on __bool__ (dask is one such example, which however is special-cased above).
# Select a single point of the array
s = size(cast("HasShape[Collection[SupportsIndex | None]]", x))
if s is None:
return True
xp = array_namespace(x)
if s > 1:
x = xp.reshape(x, (-1,))[0]
# Cast to dtype=bool and deal with size 0 arrays
x = xp.any(x)
try:
bool(x)
return False
# The Array API standard dictactes that __bool__ should raise TypeError if the
# output cannot be defined.
# Here we allow for it to raise arbitrary exceptions, e.g. like Dask does.
except Exception:
return True
__all__ = [
"array_namespace",
"device",
"get_namespace",
"is_array_api_obj",
"is_array_api_strict_namespace",
"is_cupy_array",
"is_cupy_namespace",
"is_dask_array",
"is_dask_namespace",
"is_jax_array",
"is_jax_namespace",
"is_numpy_array",
"is_numpy_namespace",
"is_torch_array",
"is_torch_namespace",
"is_ndonnx_array",
"is_ndonnx_namespace",
"is_pydata_sparse_array",
"is_pydata_sparse_namespace",
"is_writeable_array",
"is_lazy_array",
"size",
"to_device",
]
_all_ignore = ['lru_cache', 'sys', 'math', 'inspect', 'warnings']
def __dir__() -> list[str]:
return __all__
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/common/__init__.py | sklearn/externals/array_api_compat/common/__init__.py | from ._helpers import * # noqa: F403
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/cupy/_typing.py | sklearn/externals/array_api_compat/cupy/_typing.py | from __future__ import annotations
__all__ = ["Array", "DType", "Device"]
_all_ignore = ["cp"]
from typing import TYPE_CHECKING
import cupy as cp
from cupy import ndarray as Array
from cupy.cuda.device import Device
if TYPE_CHECKING:
# NumPy 1.x on Python 3.10 fails to parse np.dtype[]
DType = cp.dtype[
cp.intp
| cp.int8
| cp.int16
| cp.int32
| cp.int64
| cp.uint8
| cp.uint16
| cp.uint32
| cp.uint64
| cp.float32
| cp.float64
| cp.complex64
| cp.complex128
| cp.bool_
]
else:
DType = cp.dtype
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/cupy/_aliases.py | sklearn/externals/array_api_compat/cupy/_aliases.py | from __future__ import annotations
from typing import Optional
import cupy as cp
from ..common import _aliases, _helpers
from ..common._typing import NestedSequence, SupportsBufferProtocol
from .._internal import get_xp
from ._info import __array_namespace_info__
from ._typing import Array, Device, DType
bool = cp.bool_
# Basic renames
acos = cp.arccos
acosh = cp.arccosh
asin = cp.arcsin
asinh = cp.arcsinh
atan = cp.arctan
atan2 = cp.arctan2
atanh = cp.arctanh
bitwise_left_shift = cp.left_shift
bitwise_invert = cp.invert
bitwise_right_shift = cp.right_shift
concat = cp.concatenate
pow = cp.power
arange = get_xp(cp)(_aliases.arange)
empty = get_xp(cp)(_aliases.empty)
empty_like = get_xp(cp)(_aliases.empty_like)
eye = get_xp(cp)(_aliases.eye)
full = get_xp(cp)(_aliases.full)
full_like = get_xp(cp)(_aliases.full_like)
linspace = get_xp(cp)(_aliases.linspace)
ones = get_xp(cp)(_aliases.ones)
ones_like = get_xp(cp)(_aliases.ones_like)
zeros = get_xp(cp)(_aliases.zeros)
zeros_like = get_xp(cp)(_aliases.zeros_like)
UniqueAllResult = get_xp(cp)(_aliases.UniqueAllResult)
UniqueCountsResult = get_xp(cp)(_aliases.UniqueCountsResult)
UniqueInverseResult = get_xp(cp)(_aliases.UniqueInverseResult)
unique_all = get_xp(cp)(_aliases.unique_all)
unique_counts = get_xp(cp)(_aliases.unique_counts)
unique_inverse = get_xp(cp)(_aliases.unique_inverse)
unique_values = get_xp(cp)(_aliases.unique_values)
std = get_xp(cp)(_aliases.std)
var = get_xp(cp)(_aliases.var)
cumulative_sum = get_xp(cp)(_aliases.cumulative_sum)
cumulative_prod = get_xp(cp)(_aliases.cumulative_prod)
clip = get_xp(cp)(_aliases.clip)
permute_dims = get_xp(cp)(_aliases.permute_dims)
reshape = get_xp(cp)(_aliases.reshape)
argsort = get_xp(cp)(_aliases.argsort)
sort = get_xp(cp)(_aliases.sort)
nonzero = get_xp(cp)(_aliases.nonzero)
ceil = get_xp(cp)(_aliases.ceil)
floor = get_xp(cp)(_aliases.floor)
trunc = get_xp(cp)(_aliases.trunc)
matmul = get_xp(cp)(_aliases.matmul)
matrix_transpose = get_xp(cp)(_aliases.matrix_transpose)
tensordot = get_xp(cp)(_aliases.tensordot)
sign = get_xp(cp)(_aliases.sign)
finfo = get_xp(cp)(_aliases.finfo)
iinfo = get_xp(cp)(_aliases.iinfo)
# asarray also adds the copy keyword, which is not present in numpy 1.0.
def asarray(
obj: (
Array
| bool | int | float | complex
| NestedSequence[bool | int | float | complex]
| SupportsBufferProtocol
),
/,
*,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
copy: Optional[bool] = None,
**kwargs,
) -> Array:
"""
Array API compatibility wrapper for asarray().
See the corresponding documentation in the array library and/or the array API
specification for more details.
"""
with cp.cuda.Device(device):
if copy is None:
return cp.asarray(obj, dtype=dtype, **kwargs)
else:
res = cp.array(obj, dtype=dtype, copy=copy, **kwargs)
if not copy and res is not obj:
raise ValueError("Unable to avoid copy while creating an array as requested")
return res
def astype(
x: Array,
dtype: DType,
/,
*,
copy: bool = True,
device: Optional[Device] = None,
) -> Array:
if device is None:
return x.astype(dtype=dtype, copy=copy)
out = _helpers.to_device(x.astype(dtype=dtype, copy=False), device)
return out.copy() if copy and out is x else out
# cupy.count_nonzero does not have keepdims
def count_nonzero(
x: Array,
axis=None,
keepdims=False
) -> Array:
result = cp.count_nonzero(x, axis)
if keepdims:
if axis is None:
return cp.reshape(result, [1]*x.ndim)
return cp.expand_dims(result, axis)
return result
# take_along_axis: axis defaults to -1 but in cupy (and numpy) axis is a required arg
def take_along_axis(x: Array, indices: Array, /, *, axis: int = -1):
return cp.take_along_axis(x, indices, axis=axis)
# These functions are completely new here. If the library already has them
# (i.e., numpy 2.0), use the library version instead of our wrapper.
if hasattr(cp, 'vecdot'):
vecdot = cp.vecdot
else:
vecdot = get_xp(cp)(_aliases.vecdot)
if hasattr(cp, 'isdtype'):
isdtype = cp.isdtype
else:
isdtype = get_xp(cp)(_aliases.isdtype)
if hasattr(cp, 'unstack'):
unstack = cp.unstack
else:
unstack = get_xp(cp)(_aliases.unstack)
__all__ = _aliases.__all__ + ['__array_namespace_info__', 'asarray', 'astype',
'acos', 'acosh', 'asin', 'asinh', 'atan',
'atan2', 'atanh', 'bitwise_left_shift',
'bitwise_invert', 'bitwise_right_shift',
'bool', 'concat', 'count_nonzero', 'pow', 'sign',
'take_along_axis']
_all_ignore = ['cp', 'get_xp']
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/cupy/linalg.py | sklearn/externals/array_api_compat/cupy/linalg.py | from cupy.linalg import * # noqa: F403
# cupy.linalg doesn't have __all__. If it is added, replace this with
#
# from cupy.linalg import __all__ as linalg_all
_n = {}
exec('from cupy.linalg import *', _n)
del _n['__builtins__']
linalg_all = list(_n)
del _n
from ..common import _linalg
from .._internal import get_xp
import cupy as cp
# These functions are in both the main and linalg namespaces
from ._aliases import matmul, matrix_transpose, tensordot, vecdot # noqa: F401
cross = get_xp(cp)(_linalg.cross)
outer = get_xp(cp)(_linalg.outer)
EighResult = _linalg.EighResult
QRResult = _linalg.QRResult
SlogdetResult = _linalg.SlogdetResult
SVDResult = _linalg.SVDResult
eigh = get_xp(cp)(_linalg.eigh)
qr = get_xp(cp)(_linalg.qr)
slogdet = get_xp(cp)(_linalg.slogdet)
svd = get_xp(cp)(_linalg.svd)
cholesky = get_xp(cp)(_linalg.cholesky)
matrix_rank = get_xp(cp)(_linalg.matrix_rank)
pinv = get_xp(cp)(_linalg.pinv)
matrix_norm = get_xp(cp)(_linalg.matrix_norm)
svdvals = get_xp(cp)(_linalg.svdvals)
diagonal = get_xp(cp)(_linalg.diagonal)
trace = get_xp(cp)(_linalg.trace)
# These functions are completely new here. If the library already has them
# (i.e., numpy 2.0), use the library version instead of our wrapper.
if hasattr(cp.linalg, 'vector_norm'):
vector_norm = cp.linalg.vector_norm
else:
vector_norm = get_xp(cp)(_linalg.vector_norm)
__all__ = linalg_all + _linalg.__all__
del get_xp
del cp
del linalg_all
del _linalg
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/cupy/__init__.py | sklearn/externals/array_api_compat/cupy/__init__.py | from cupy import * # noqa: F403
# from cupy import * doesn't overwrite these builtin names
from cupy import abs, max, min, round # noqa: F401
# These imports may overwrite names from the import * above.
from ._aliases import * # noqa: F403
# See the comment in the numpy __init__.py
__import__(__package__ + '.linalg')
__import__(__package__ + '.fft')
__array_api_version__ = '2024.12'
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/cupy/_info.py | sklearn/externals/array_api_compat/cupy/_info.py | """
Array API Inspection namespace
This is the namespace for inspection functions as defined by the array API
standard. See
https://data-apis.org/array-api/latest/API_specification/inspection.html for
more details.
"""
from cupy import (
dtype,
cuda,
bool_ as bool,
intp,
int8,
int16,
int32,
int64,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
complex64,
complex128,
)
class __array_namespace_info__:
"""
Get the array API inspection namespace for CuPy.
The array API inspection namespace defines the following functions:
- capabilities()
- default_device()
- default_dtypes()
- dtypes()
- devices()
See
https://data-apis.org/array-api/latest/API_specification/inspection.html
for more details.
Returns
-------
info : ModuleType
The array API inspection namespace for CuPy.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': cupy.float64,
'complex floating': cupy.complex128,
'integral': cupy.int64,
'indexing': cupy.int64}
"""
__module__ = 'cupy'
def capabilities(self):
"""
Return a dictionary of array API library capabilities.
The resulting dictionary has the following keys:
- **"boolean indexing"**: boolean indicating whether an array library
supports boolean indexing. Always ``True`` for CuPy.
- **"data-dependent shapes"**: boolean indicating whether an array
library supports data-dependent output shapes. Always ``True`` for
CuPy.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
for more details.
See Also
--------
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
capabilities : dict
A dictionary of array API library capabilities.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.capabilities()
{'boolean indexing': True,
'data-dependent shapes': True,
'max dimensions': 64}
"""
return {
"boolean indexing": True,
"data-dependent shapes": True,
"max dimensions": 64,
}
def default_device(self):
"""
The default device used for new CuPy arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
device : Device
The default device used for new CuPy arrays.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_device()
Device(0)
Notes
-----
This method returns the static default device when CuPy is initialized.
However, the *current* device used by creation functions (``empty`` etc.)
can be changed globally or with a context manager.
See Also
--------
https://github.com/data-apis/array-api/issues/835
"""
return cuda.Device(0)
def default_dtypes(self, *, device=None):
"""
The default data types used for new CuPy arrays.
For CuPy, this always returns the following dictionary:
- **"real floating"**: ``cupy.float64``
- **"complex floating"**: ``cupy.complex128``
- **"integral"**: ``cupy.intp``
- **"indexing"**: ``cupy.intp``
Parameters
----------
device : str, optional
The device to get the default data types for.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new CuPy
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': cupy.float64,
'complex floating': cupy.complex128,
'integral': cupy.int64,
'indexing': cupy.int64}
"""
# TODO: Does this depend on device?
return {
"real floating": dtype(float64),
"complex floating": dtype(complex128),
"integral": dtype(intp),
"indexing": dtype(intp),
}
def dtypes(self, *, device=None, kind=None):
"""
The array API data types supported by CuPy.
Note that this function only returns data types that are defined by
the array API.
Parameters
----------
device : str, optional
The device to get the data types for.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
If a tuple, a dictionary containing the union of the given kinds
is returned. The following kinds are supported:
- ``'bool'``: boolean data types (i.e., ``bool``).
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
``int16``, ``int32``, ``int64``).
- ``'unsigned integer'``: unsigned integer data types (i.e.,
``uint8``, ``uint16``, ``uint32``, ``uint64``).
- ``'integral'``: integer data types. Shorthand for ``('signed
integer', 'unsigned integer')``.
- ``'real floating'``: real-valued floating-point data types
(i.e., ``float32``, ``float64``).
- ``'complex floating'``: complex floating-point data types (i.e.,
``complex64``, ``complex128``).
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
'real floating', 'complex floating')``.
Returns
-------
dtypes : dict
A dictionary mapping the names of data types to the corresponding
CuPy data types.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.dtypes(kind='signed integer')
{'int8': cupy.int8,
'int16': cupy.int16,
'int32': cupy.int32,
'int64': cupy.int64}
"""
# TODO: Does this depend on device?
if kind is None:
return {
"bool": dtype(bool),
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "bool":
return {"bool": bool}
if kind == "signed integer":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
}
if kind == "unsigned integer":
return {
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "integral":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "real floating":
return {
"float32": dtype(float32),
"float64": dtype(float64),
}
if kind == "complex floating":
return {
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "numeric":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if isinstance(kind, tuple):
res = {}
for k in kind:
res.update(self.dtypes(kind=k))
return res
raise ValueError(f"unsupported kind: {kind!r}")
def devices(self):
"""
The devices supported by CuPy.
Returns
-------
devices : list[Device]
The devices supported by CuPy.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes
"""
return [cuda.Device(i) for i in range(cuda.runtime.getDeviceCount())]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/cupy/fft.py | sklearn/externals/array_api_compat/cupy/fft.py | from cupy.fft import * # noqa: F403
# cupy.fft doesn't have __all__. If it is added, replace this with
#
# from cupy.fft import __all__ as linalg_all
_n = {}
exec('from cupy.fft import *', _n)
del _n['__builtins__']
fft_all = list(_n)
del _n
from ..common import _fft
from .._internal import get_xp
import cupy as cp
fft = get_xp(cp)(_fft.fft)
ifft = get_xp(cp)(_fft.ifft)
fftn = get_xp(cp)(_fft.fftn)
ifftn = get_xp(cp)(_fft.ifftn)
rfft = get_xp(cp)(_fft.rfft)
irfft = get_xp(cp)(_fft.irfft)
rfftn = get_xp(cp)(_fft.rfftn)
irfftn = get_xp(cp)(_fft.irfftn)
hfft = get_xp(cp)(_fft.hfft)
ihfft = get_xp(cp)(_fft.ihfft)
fftfreq = get_xp(cp)(_fft.fftfreq)
rfftfreq = get_xp(cp)(_fft.rfftfreq)
fftshift = get_xp(cp)(_fft.fftshift)
ifftshift = get_xp(cp)(_fft.ifftshift)
__all__ = fft_all + _fft.__all__
del get_xp
del cp
del fft_all
del _fft
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/numpy/_typing.py | sklearn/externals/array_api_compat/numpy/_typing.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, Literal, TypeAlias
import numpy as np
Device: TypeAlias = Literal["cpu"]
if TYPE_CHECKING:
# NumPy 1.x on Python 3.10 fails to parse np.dtype[]
DType: TypeAlias = np.dtype[
np.bool_
| np.integer[Any]
| np.float32
| np.float64
| np.complex64
| np.complex128
]
Array: TypeAlias = np.ndarray[Any, DType]
else:
DType: TypeAlias = np.dtype
Array: TypeAlias = np.ndarray
__all__ = ["Array", "DType", "Device"]
_all_ignore = ["np"]
def __dir__() -> list[str]:
return __all__
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/numpy/_aliases.py | sklearn/externals/array_api_compat/numpy/_aliases.py | # pyright: reportPrivateUsage=false
from __future__ import annotations
from builtins import bool as py_bool
from typing import TYPE_CHECKING, Any, Literal, TypeAlias, cast
import numpy as np
from .._internal import get_xp
from ..common import _aliases, _helpers
from ..common._typing import NestedSequence, SupportsBufferProtocol
from ._info import __array_namespace_info__
from ._typing import Array, Device, DType
if TYPE_CHECKING:
from typing_extensions import Buffer, TypeIs
# The values of the `_CopyMode` enum can be either `False`, `True`, or `2`:
# https://github.com/numpy/numpy/blob/5a8a6a79d9c2fff8f07dcab5d41e14f8508d673f/numpy/_globals.pyi#L7-L10
_Copy: TypeAlias = py_bool | Literal[2] | np._CopyMode
bool = np.bool_
# Basic renames
acos = np.arccos
acosh = np.arccosh
asin = np.arcsin
asinh = np.arcsinh
atan = np.arctan
atan2 = np.arctan2
atanh = np.arctanh
bitwise_left_shift = np.left_shift
bitwise_invert = np.invert
bitwise_right_shift = np.right_shift
concat = np.concatenate
pow = np.power
arange = get_xp(np)(_aliases.arange)
empty = get_xp(np)(_aliases.empty)
empty_like = get_xp(np)(_aliases.empty_like)
eye = get_xp(np)(_aliases.eye)
full = get_xp(np)(_aliases.full)
full_like = get_xp(np)(_aliases.full_like)
linspace = get_xp(np)(_aliases.linspace)
ones = get_xp(np)(_aliases.ones)
ones_like = get_xp(np)(_aliases.ones_like)
zeros = get_xp(np)(_aliases.zeros)
zeros_like = get_xp(np)(_aliases.zeros_like)
UniqueAllResult = get_xp(np)(_aliases.UniqueAllResult)
UniqueCountsResult = get_xp(np)(_aliases.UniqueCountsResult)
UniqueInverseResult = get_xp(np)(_aliases.UniqueInverseResult)
unique_all = get_xp(np)(_aliases.unique_all)
unique_counts = get_xp(np)(_aliases.unique_counts)
unique_inverse = get_xp(np)(_aliases.unique_inverse)
unique_values = get_xp(np)(_aliases.unique_values)
std = get_xp(np)(_aliases.std)
var = get_xp(np)(_aliases.var)
cumulative_sum = get_xp(np)(_aliases.cumulative_sum)
cumulative_prod = get_xp(np)(_aliases.cumulative_prod)
clip = get_xp(np)(_aliases.clip)
permute_dims = get_xp(np)(_aliases.permute_dims)
reshape = get_xp(np)(_aliases.reshape)
argsort = get_xp(np)(_aliases.argsort)
sort = get_xp(np)(_aliases.sort)
nonzero = get_xp(np)(_aliases.nonzero)
ceil = get_xp(np)(_aliases.ceil)
floor = get_xp(np)(_aliases.floor)
trunc = get_xp(np)(_aliases.trunc)
matmul = get_xp(np)(_aliases.matmul)
matrix_transpose = get_xp(np)(_aliases.matrix_transpose)
tensordot = get_xp(np)(_aliases.tensordot)
sign = get_xp(np)(_aliases.sign)
finfo = get_xp(np)(_aliases.finfo)
iinfo = get_xp(np)(_aliases.iinfo)
def _supports_buffer_protocol(obj: object) -> TypeIs[Buffer]: # pyright: ignore[reportUnusedFunction]
try:
memoryview(obj) # pyright: ignore[reportArgumentType]
except TypeError:
return False
return True
# asarray also adds the copy keyword, which is not present in numpy 1.0.
# asarray() is different enough between numpy, cupy, and dask, the logic
# complicated enough that it's easier to define it separately for each module
# rather than trying to combine everything into one function in common/
def asarray(
obj: Array | complex | NestedSequence[complex] | SupportsBufferProtocol,
/,
*,
dtype: DType | None = None,
device: Device | None = None,
copy: _Copy | None = None,
**kwargs: Any,
) -> Array:
"""
Array API compatibility wrapper for asarray().
See the corresponding documentation in the array library and/or the array API
specification for more details.
"""
_helpers._check_device(np, device)
if copy is None:
copy = np._CopyMode.IF_NEEDED
elif copy is False:
copy = np._CopyMode.NEVER
elif copy is True:
copy = np._CopyMode.ALWAYS
return np.array(obj, copy=copy, dtype=dtype, **kwargs) # pyright: ignore
def astype(
x: Array,
dtype: DType,
/,
*,
copy: py_bool = True,
device: Device | None = None,
) -> Array:
_helpers._check_device(np, device)
return x.astype(dtype=dtype, copy=copy)
# count_nonzero returns a python int for axis=None and keepdims=False
# https://github.com/numpy/numpy/issues/17562
def count_nonzero(
x: Array,
axis: int | tuple[int, ...] | None = None,
keepdims: py_bool = False,
) -> Array:
# NOTE: this is currently incorrectly typed in numpy, but will be fixed in
# numpy 2.2.5 and 2.3.0: https://github.com/numpy/numpy/pull/28750
result = cast("Any", np.count_nonzero(x, axis=axis, keepdims=keepdims)) # pyright: ignore[reportArgumentType, reportCallIssue]
if axis is None and not keepdims:
return np.asarray(result)
return result
# take_along_axis: axis defaults to -1 but in numpy axis is a required arg
def take_along_axis(x: Array, indices: Array, /, *, axis: int = -1):
return np.take_along_axis(x, indices, axis=axis)
# These functions are completely new here. If the library already has them
# (i.e., numpy 2.0), use the library version instead of our wrapper.
if hasattr(np, "vecdot"):
vecdot = np.vecdot
else:
vecdot = get_xp(np)(_aliases.vecdot)
if hasattr(np, "isdtype"):
isdtype = np.isdtype
else:
isdtype = get_xp(np)(_aliases.isdtype)
if hasattr(np, "unstack"):
unstack = np.unstack
else:
unstack = get_xp(np)(_aliases.unstack)
__all__ = [
"__array_namespace_info__",
"asarray",
"astype",
"acos",
"acosh",
"asin",
"asinh",
"atan",
"atan2",
"atanh",
"bitwise_left_shift",
"bitwise_invert",
"bitwise_right_shift",
"bool",
"concat",
"count_nonzero",
"pow",
"take_along_axis"
]
__all__ += _aliases.__all__
_all_ignore = ["np", "get_xp"]
def __dir__() -> list[str]:
return __all__
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/numpy/linalg.py | sklearn/externals/array_api_compat/numpy/linalg.py | # pyright: reportAttributeAccessIssue=false
# pyright: reportUnknownArgumentType=false
# pyright: reportUnknownMemberType=false
# pyright: reportUnknownVariableType=false
from __future__ import annotations
import numpy as np
# intersection of `np.linalg.__all__` on numpy 1.22 and 2.2, minus `_linalg.__all__`
from numpy.linalg import (
LinAlgError,
cond,
det,
eig,
eigvals,
eigvalsh,
inv,
lstsq,
matrix_power,
multi_dot,
norm,
tensorinv,
tensorsolve,
)
from .._internal import get_xp
from ..common import _linalg
# These functions are in both the main and linalg namespaces
from ._aliases import matmul, matrix_transpose, tensordot, vecdot # noqa: F401
from ._typing import Array
cross = get_xp(np)(_linalg.cross)
outer = get_xp(np)(_linalg.outer)
EighResult = _linalg.EighResult
QRResult = _linalg.QRResult
SlogdetResult = _linalg.SlogdetResult
SVDResult = _linalg.SVDResult
eigh = get_xp(np)(_linalg.eigh)
qr = get_xp(np)(_linalg.qr)
slogdet = get_xp(np)(_linalg.slogdet)
svd = get_xp(np)(_linalg.svd)
cholesky = get_xp(np)(_linalg.cholesky)
matrix_rank = get_xp(np)(_linalg.matrix_rank)
pinv = get_xp(np)(_linalg.pinv)
matrix_norm = get_xp(np)(_linalg.matrix_norm)
svdvals = get_xp(np)(_linalg.svdvals)
diagonal = get_xp(np)(_linalg.diagonal)
trace = get_xp(np)(_linalg.trace)
# Note: unlike np.linalg.solve, the array API solve() only accepts x2 as a
# vector when it is exactly 1-dimensional. All other cases treat x2 as a stack
# of matrices. The np.linalg.solve behavior of allowing stacks of both
# matrices and vectors is ambiguous c.f.
# https://github.com/numpy/numpy/issues/15349 and
# https://github.com/data-apis/array-api/issues/285.
# To workaround this, the below is the code from np.linalg.solve except
# only calling solve1 in the exactly 1D case.
# This code is here instead of in common because it is numpy specific. Also
# note that CuPy's solve() does not currently support broadcasting (see
# https://github.com/cupy/cupy/blob/main/cupy/cublas.py#L43).
def solve(x1: Array, x2: Array, /) -> Array:
try:
from numpy.linalg._linalg import (
_assert_stacked_2d,
_assert_stacked_square,
_commonType,
_makearray,
_raise_linalgerror_singular,
isComplexType,
)
except ImportError:
from numpy.linalg.linalg import (
_assert_stacked_2d,
_assert_stacked_square,
_commonType,
_makearray,
_raise_linalgerror_singular,
isComplexType,
)
from numpy.linalg import _umath_linalg
x1, _ = _makearray(x1)
_assert_stacked_2d(x1)
_assert_stacked_square(x1)
x2, wrap = _makearray(x2)
t, result_t = _commonType(x1, x2)
# This part is different from np.linalg.solve
gufunc: np.ufunc
if x2.ndim == 1:
gufunc = _umath_linalg.solve1
else:
gufunc = _umath_linalg.solve
# This does nothing currently but is left in because it will be relevant
# when complex dtype support is added to the spec in 2022.
signature = "DD->D" if isComplexType(t) else "dd->d"
with np.errstate(
call=_raise_linalgerror_singular,
invalid="call",
over="ignore",
divide="ignore",
under="ignore",
):
r: Array = gufunc(x1, x2, signature=signature)
return wrap(r.astype(result_t, copy=False))
# These functions are completely new here. If the library already has them
# (i.e., numpy 2.0), use the library version instead of our wrapper.
if hasattr(np.linalg, "vector_norm"):
vector_norm = np.linalg.vector_norm
else:
vector_norm = get_xp(np)(_linalg.vector_norm)
__all__ = [
"LinAlgError",
"cond",
"det",
"eig",
"eigvals",
"eigvalsh",
"inv",
"lstsq",
"matrix_power",
"multi_dot",
"norm",
"tensorinv",
"tensorsolve",
]
__all__ += _linalg.__all__
__all__ += ["solve", "vector_norm"]
def __dir__() -> list[str]:
return __all__
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/numpy/__init__.py | sklearn/externals/array_api_compat/numpy/__init__.py | # ruff: noqa: PLC0414
from typing import Final
from numpy import * # noqa: F403 # pyright: ignore[reportWildcardImportFromLibrary]
# from numpy import * doesn't overwrite these builtin names
from numpy import abs as abs
from numpy import max as max
from numpy import min as min
from numpy import round as round
# These imports may overwrite names from the import * above.
from ._aliases import * # noqa: F403
# Don't know why, but we have to do an absolute import to import linalg. If we
# instead do
#
# from . import linalg
#
# It doesn't overwrite np.linalg from above. The import is generated
# dynamically so that the library can be vendored.
__import__(__package__ + ".linalg")
__import__(__package__ + ".fft")
from .linalg import matrix_transpose, vecdot # type: ignore[no-redef] # noqa: F401
__array_api_version__: Final = "2024.12"
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/numpy/_info.py | sklearn/externals/array_api_compat/numpy/_info.py | """
Array API Inspection namespace
This is the namespace for inspection functions as defined by the array API
standard. See
https://data-apis.org/array-api/latest/API_specification/inspection.html for
more details.
"""
from __future__ import annotations
from numpy import bool_ as bool
from numpy import (
complex64,
complex128,
dtype,
float32,
float64,
int8,
int16,
int32,
int64,
intp,
uint8,
uint16,
uint32,
uint64,
)
from ._typing import Device, DType
class __array_namespace_info__:
"""
Get the array API inspection namespace for NumPy.
The array API inspection namespace defines the following functions:
- capabilities()
- default_device()
- default_dtypes()
- dtypes()
- devices()
See
https://data-apis.org/array-api/latest/API_specification/inspection.html
for more details.
Returns
-------
info : ModuleType
The array API inspection namespace for NumPy.
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': numpy.float64,
'complex floating': numpy.complex128,
'integral': numpy.int64,
'indexing': numpy.int64}
"""
__module__ = 'numpy'
def capabilities(self):
"""
Return a dictionary of array API library capabilities.
The resulting dictionary has the following keys:
- **"boolean indexing"**: boolean indicating whether an array library
supports boolean indexing. Always ``True`` for NumPy.
- **"data-dependent shapes"**: boolean indicating whether an array
library supports data-dependent output shapes. Always ``True`` for
NumPy.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
for more details.
See Also
--------
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
capabilities : dict
A dictionary of array API library capabilities.
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.capabilities()
{'boolean indexing': True,
'data-dependent shapes': True,
'max dimensions': 64}
"""
return {
"boolean indexing": True,
"data-dependent shapes": True,
"max dimensions": 64,
}
def default_device(self):
"""
The default device used for new NumPy arrays.
For NumPy, this always returns ``'cpu'``.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
device : Device
The default device used for new NumPy arrays.
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.default_device()
'cpu'
"""
return "cpu"
def default_dtypes(
self,
*,
device: Device | None = None,
) -> dict[str, dtype[intp | float64 | complex128]]:
"""
The default data types used for new NumPy arrays.
For NumPy, this always returns the following dictionary:
- **"real floating"**: ``numpy.float64``
- **"complex floating"**: ``numpy.complex128``
- **"integral"**: ``numpy.intp``
- **"indexing"**: ``numpy.intp``
Parameters
----------
device : str, optional
The device to get the default data types for. For NumPy, only
``'cpu'`` is allowed.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new NumPy
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': numpy.float64,
'complex floating': numpy.complex128,
'integral': numpy.int64,
'indexing': numpy.int64}
"""
if device not in ["cpu", None]:
raise ValueError(
'Device not understood. Only "cpu" is allowed, but received:'
f' {device}'
)
return {
"real floating": dtype(float64),
"complex floating": dtype(complex128),
"integral": dtype(intp),
"indexing": dtype(intp),
}
def dtypes(
self,
*,
device: Device | None = None,
kind: str | tuple[str, ...] | None = None,
) -> dict[str, DType]:
"""
The array API data types supported by NumPy.
Note that this function only returns data types that are defined by
the array API.
Parameters
----------
device : str, optional
The device to get the data types for. For NumPy, only ``'cpu'`` is
allowed.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
If a tuple, a dictionary containing the union of the given kinds
is returned. The following kinds are supported:
- ``'bool'``: boolean data types (i.e., ``bool``).
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
``int16``, ``int32``, ``int64``).
- ``'unsigned integer'``: unsigned integer data types (i.e.,
``uint8``, ``uint16``, ``uint32``, ``uint64``).
- ``'integral'``: integer data types. Shorthand for ``('signed
integer', 'unsigned integer')``.
- ``'real floating'``: real-valued floating-point data types
(i.e., ``float32``, ``float64``).
- ``'complex floating'``: complex floating-point data types (i.e.,
``complex64``, ``complex128``).
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
'real floating', 'complex floating')``.
Returns
-------
dtypes : dict
A dictionary mapping the names of data types to the corresponding
NumPy data types.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.dtypes(kind='signed integer')
{'int8': numpy.int8,
'int16': numpy.int16,
'int32': numpy.int32,
'int64': numpy.int64}
"""
if device not in ["cpu", None]:
raise ValueError(
'Device not understood. Only "cpu" is allowed, but received:'
f' {device}'
)
if kind is None:
return {
"bool": dtype(bool),
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "bool":
return {"bool": dtype(bool)}
if kind == "signed integer":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
}
if kind == "unsigned integer":
return {
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "integral":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
}
if kind == "real floating":
return {
"float32": dtype(float32),
"float64": dtype(float64),
}
if kind == "complex floating":
return {
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if kind == "numeric":
return {
"int8": dtype(int8),
"int16": dtype(int16),
"int32": dtype(int32),
"int64": dtype(int64),
"uint8": dtype(uint8),
"uint16": dtype(uint16),
"uint32": dtype(uint32),
"uint64": dtype(uint64),
"float32": dtype(float32),
"float64": dtype(float64),
"complex64": dtype(complex64),
"complex128": dtype(complex128),
}
if isinstance(kind, tuple):
res: dict[str, DType] = {}
for k in kind:
res.update(self.dtypes(kind=k))
return res
raise ValueError(f"unsupported kind: {kind!r}")
def devices(self) -> list[Device]:
"""
The devices supported by NumPy.
For NumPy, this always returns ``['cpu']``.
Returns
-------
devices : list[Device]
The devices supported by NumPy.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes
Examples
--------
>>> info = np.__array_namespace_info__()
>>> info.devices()
['cpu']
"""
return ["cpu"]
__all__ = ["__array_namespace_info__"]
def __dir__() -> list[str]:
return __all__
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/numpy/fft.py | sklearn/externals/array_api_compat/numpy/fft.py | import numpy as np
from numpy.fft import __all__ as fft_all
from numpy.fft import fft2, ifft2, irfft2, rfft2
from .._internal import get_xp
from ..common import _fft
fft = get_xp(np)(_fft.fft)
ifft = get_xp(np)(_fft.ifft)
fftn = get_xp(np)(_fft.fftn)
ifftn = get_xp(np)(_fft.ifftn)
rfft = get_xp(np)(_fft.rfft)
irfft = get_xp(np)(_fft.irfft)
rfftn = get_xp(np)(_fft.rfftn)
irfftn = get_xp(np)(_fft.irfftn)
hfft = get_xp(np)(_fft.hfft)
ihfft = get_xp(np)(_fft.ihfft)
fftfreq = get_xp(np)(_fft.fftfreq)
rfftfreq = get_xp(np)(_fft.rfftfreq)
fftshift = get_xp(np)(_fft.fftshift)
ifftshift = get_xp(np)(_fft.ifftshift)
__all__ = ["rfft2", "irfft2", "fft2", "ifft2"]
__all__ += _fft.__all__
def __dir__() -> list[str]:
return __all__
del get_xp
del np
del fft_all
del _fft
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/torch/_typing.py | sklearn/externals/array_api_compat/torch/_typing.py | __all__ = ["Array", "Device", "DType"]
from torch import device as Device, dtype as DType, Tensor as Array
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/torch/_aliases.py | sklearn/externals/array_api_compat/torch/_aliases.py | from __future__ import annotations
from functools import reduce as _reduce, wraps as _wraps
from builtins import all as _builtin_all, any as _builtin_any
from typing import Any, List, Optional, Sequence, Tuple, Union, Literal
import torch
from .._internal import get_xp
from ..common import _aliases
from ..common._typing import NestedSequence, SupportsBufferProtocol
from ._info import __array_namespace_info__
from ._typing import Array, Device, DType
_int_dtypes = {
torch.uint8,
torch.int8,
torch.int16,
torch.int32,
torch.int64,
}
try:
# torch >=2.3
_int_dtypes |= {torch.uint16, torch.uint32, torch.uint64}
except AttributeError:
pass
_array_api_dtypes = {
torch.bool,
*_int_dtypes,
torch.float32,
torch.float64,
torch.complex64,
torch.complex128,
}
_promotion_table = {
# ints
(torch.int8, torch.int16): torch.int16,
(torch.int8, torch.int32): torch.int32,
(torch.int8, torch.int64): torch.int64,
(torch.int16, torch.int32): torch.int32,
(torch.int16, torch.int64): torch.int64,
(torch.int32, torch.int64): torch.int64,
# ints and uints (mixed sign)
(torch.uint8, torch.int8): torch.int16,
(torch.uint8, torch.int16): torch.int16,
(torch.uint8, torch.int32): torch.int32,
(torch.uint8, torch.int64): torch.int64,
# floats
(torch.float32, torch.float64): torch.float64,
# complexes
(torch.complex64, torch.complex128): torch.complex128,
# Mixed float and complex
(torch.float32, torch.complex64): torch.complex64,
(torch.float32, torch.complex128): torch.complex128,
(torch.float64, torch.complex64): torch.complex128,
(torch.float64, torch.complex128): torch.complex128,
}
_promotion_table.update({(b, a): c for (a, b), c in _promotion_table.items()})
_promotion_table.update({(a, a): a for a in _array_api_dtypes})
def _two_arg(f):
@_wraps(f)
def _f(x1, x2, /, **kwargs):
x1, x2 = _fix_promotion(x1, x2)
return f(x1, x2, **kwargs)
if _f.__doc__ is None:
_f.__doc__ = f"""\
Array API compatibility wrapper for torch.{f.__name__}.
See the corresponding PyTorch documentation and/or the array API specification
for more details.
"""
return _f
def _fix_promotion(x1, x2, only_scalar=True):
if not isinstance(x1, torch.Tensor) or not isinstance(x2, torch.Tensor):
return x1, x2
if x1.dtype not in _array_api_dtypes or x2.dtype not in _array_api_dtypes:
return x1, x2
# If an argument is 0-D pytorch downcasts the other argument
if not only_scalar or x1.shape == ():
dtype = result_type(x1, x2)
x2 = x2.to(dtype)
if not only_scalar or x2.shape == ():
dtype = result_type(x1, x2)
x1 = x1.to(dtype)
return x1, x2
_py_scalars = (bool, int, float, complex)
def result_type(
*arrays_and_dtypes: Array | DType | bool | int | float | complex
) -> DType:
num = len(arrays_and_dtypes)
if num == 0:
raise ValueError("At least one array or dtype must be provided")
elif num == 1:
x = arrays_and_dtypes[0]
if isinstance(x, torch.dtype):
return x
return x.dtype
if num == 2:
x, y = arrays_and_dtypes
return _result_type(x, y)
else:
# sort scalars so that they are treated last
scalars, others = [], []
for x in arrays_and_dtypes:
if isinstance(x, _py_scalars):
scalars.append(x)
else:
others.append(x)
if not others:
raise ValueError("At least one array or dtype must be provided")
# combine left-to-right
return _reduce(_result_type, others + scalars)
def _result_type(
x: Array | DType | bool | int | float | complex,
y: Array | DType | bool | int | float | complex,
) -> DType:
if not (isinstance(x, _py_scalars) or isinstance(y, _py_scalars)):
xdt = x if isinstance(x, torch.dtype) else x.dtype
ydt = y if isinstance(y, torch.dtype) else y.dtype
try:
return _promotion_table[xdt, ydt]
except KeyError:
pass
# This doesn't result_type(dtype, dtype) for non-array API dtypes
# because torch.result_type only accepts tensors. This does however, allow
# cross-kind promotion.
x = torch.tensor([], dtype=x) if isinstance(x, torch.dtype) else x
y = torch.tensor([], dtype=y) if isinstance(y, torch.dtype) else y
return torch.result_type(x, y)
def can_cast(from_: Union[DType, Array], to: DType, /) -> bool:
if not isinstance(from_, torch.dtype):
from_ = from_.dtype
return torch.can_cast(from_, to)
# Basic renames
bitwise_invert = torch.bitwise_not
newaxis = None
# torch.conj sets the conjugation bit, which breaks conversion to other
# libraries. See https://github.com/data-apis/array-api-compat/issues/173
conj = torch.conj_physical
# Two-arg elementwise functions
# These require a wrapper to do the correct type promotion on 0-D tensors
add = _two_arg(torch.add)
atan2 = _two_arg(torch.atan2)
bitwise_and = _two_arg(torch.bitwise_and)
bitwise_left_shift = _two_arg(torch.bitwise_left_shift)
bitwise_or = _two_arg(torch.bitwise_or)
bitwise_right_shift = _two_arg(torch.bitwise_right_shift)
bitwise_xor = _two_arg(torch.bitwise_xor)
copysign = _two_arg(torch.copysign)
divide = _two_arg(torch.divide)
# Also a rename. torch.equal does not broadcast
equal = _two_arg(torch.eq)
floor_divide = _two_arg(torch.floor_divide)
greater = _two_arg(torch.greater)
greater_equal = _two_arg(torch.greater_equal)
hypot = _two_arg(torch.hypot)
less = _two_arg(torch.less)
less_equal = _two_arg(torch.less_equal)
logaddexp = _two_arg(torch.logaddexp)
# logical functions are not included here because they only accept bool in the
# spec, so type promotion is irrelevant.
maximum = _two_arg(torch.maximum)
minimum = _two_arg(torch.minimum)
multiply = _two_arg(torch.multiply)
not_equal = _two_arg(torch.not_equal)
pow = _two_arg(torch.pow)
remainder = _two_arg(torch.remainder)
subtract = _two_arg(torch.subtract)
def asarray(
obj: (
Array
| bool | int | float | complex
| NestedSequence[bool | int | float | complex]
| SupportsBufferProtocol
),
/,
*,
dtype: DType | None = None,
device: Device | None = None,
copy: bool | None = None,
**kwargs: Any,
) -> Array:
# torch.asarray does not respect input->output device propagation
# https://github.com/pytorch/pytorch/issues/150199
if device is None and isinstance(obj, torch.Tensor):
device = obj.device
return torch.asarray(obj, dtype=dtype, device=device, copy=copy, **kwargs)
# These wrappers are mostly based on the fact that pytorch uses 'dim' instead
# of 'axis'.
# torch.min and torch.max return a tuple and don't support multiple axes https://github.com/pytorch/pytorch/issues/58745
def max(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> Array:
# https://github.com/pytorch/pytorch/issues/29137
if axis == ():
return torch.clone(x)
return torch.amax(x, axis, keepdims=keepdims)
def min(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> Array:
# https://github.com/pytorch/pytorch/issues/29137
if axis == ():
return torch.clone(x)
return torch.amin(x, axis, keepdims=keepdims)
clip = get_xp(torch)(_aliases.clip)
unstack = get_xp(torch)(_aliases.unstack)
cumulative_sum = get_xp(torch)(_aliases.cumulative_sum)
cumulative_prod = get_xp(torch)(_aliases.cumulative_prod)
finfo = get_xp(torch)(_aliases.finfo)
iinfo = get_xp(torch)(_aliases.iinfo)
# torch.sort also returns a tuple
# https://github.com/pytorch/pytorch/issues/70921
def sort(x: Array, /, *, axis: int = -1, descending: bool = False, stable: bool = True, **kwargs) -> Array:
return torch.sort(x, dim=axis, descending=descending, stable=stable, **kwargs).values
def _normalize_axes(axis, ndim):
axes = []
if ndim == 0 and axis:
# Better error message in this case
raise IndexError(f"Dimension out of range: {axis[0]}")
lower, upper = -ndim, ndim - 1
for a in axis:
if a < lower or a > upper:
# Match torch error message (e.g., from sum())
raise IndexError(f"Dimension out of range (expected to be in range of [{lower}, {upper}], but got {a}")
if a < 0:
a = a + ndim
if a in axes:
# Use IndexError instead of RuntimeError, and "axis" instead of "dim"
raise IndexError(f"Axis {a} appears multiple times in the list of axes")
axes.append(a)
return sorted(axes)
def _axis_none_keepdims(x, ndim, keepdims):
# Apply keepdims when axis=None
# (https://github.com/pytorch/pytorch/issues/71209)
# Note that this is only valid for the axis=None case.
if keepdims:
for i in range(ndim):
x = torch.unsqueeze(x, 0)
return x
def _reduce_multiple_axes(f, x, axis, keepdims=False, **kwargs):
# Some reductions don't support multiple axes
# (https://github.com/pytorch/pytorch/issues/56586).
axes = _normalize_axes(axis, x.ndim)
for a in reversed(axes):
x = torch.movedim(x, a, -1)
x = torch.flatten(x, -len(axes))
out = f(x, -1, **kwargs)
if keepdims:
for a in axes:
out = torch.unsqueeze(out, a)
return out
def _sum_prod_no_axis(x: Array, dtype: DType | None) -> Array:
"""
Implements `sum(..., axis=())` and `prod(..., axis=())`.
Works around https://github.com/pytorch/pytorch/issues/29137
"""
if dtype is not None:
return x.clone() if dtype == x.dtype else x.to(dtype)
# We can't upcast uint8 according to the spec because there is no
# torch.uint64, so at least upcast to int64 which is what prod does
# when axis=None.
if x.dtype in (torch.uint8, torch.int8, torch.int16, torch.int32):
return x.to(torch.int64)
return x.clone()
def prod(x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
dtype: Optional[DType] = None,
keepdims: bool = False,
**kwargs) -> Array:
if axis == ():
return _sum_prod_no_axis(x, dtype)
# torch.prod doesn't support multiple axes
# (https://github.com/pytorch/pytorch/issues/56586).
if isinstance(axis, tuple):
return _reduce_multiple_axes(torch.prod, x, axis, keepdims=keepdims, dtype=dtype, **kwargs)
if axis is None:
# torch doesn't support keepdims with axis=None
# (https://github.com/pytorch/pytorch/issues/71209)
res = torch.prod(x, dtype=dtype, **kwargs)
res = _axis_none_keepdims(res, x.ndim, keepdims)
return res
return torch.prod(x, axis, dtype=dtype, keepdims=keepdims, **kwargs)
def sum(x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
dtype: Optional[DType] = None,
keepdims: bool = False,
**kwargs) -> Array:
if axis == ():
return _sum_prod_no_axis(x, dtype)
if axis is None:
# torch doesn't support keepdims with axis=None
# (https://github.com/pytorch/pytorch/issues/71209)
res = torch.sum(x, dtype=dtype, **kwargs)
res = _axis_none_keepdims(res, x.ndim, keepdims)
return res
return torch.sum(x, axis, dtype=dtype, keepdims=keepdims, **kwargs)
def any(x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
**kwargs) -> Array:
if axis == ():
return x.to(torch.bool)
# torch.any doesn't support multiple axes
# (https://github.com/pytorch/pytorch/issues/56586).
if isinstance(axis, tuple):
res = _reduce_multiple_axes(torch.any, x, axis, keepdims=keepdims, **kwargs)
return res.to(torch.bool)
if axis is None:
# torch doesn't support keepdims with axis=None
# (https://github.com/pytorch/pytorch/issues/71209)
res = torch.any(x, **kwargs)
res = _axis_none_keepdims(res, x.ndim, keepdims)
return res.to(torch.bool)
# torch.any doesn't return bool for uint8
return torch.any(x, axis, keepdims=keepdims).to(torch.bool)
def all(x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
**kwargs) -> Array:
if axis == ():
return x.to(torch.bool)
# torch.all doesn't support multiple axes
# (https://github.com/pytorch/pytorch/issues/56586).
if isinstance(axis, tuple):
res = _reduce_multiple_axes(torch.all, x, axis, keepdims=keepdims, **kwargs)
return res.to(torch.bool)
if axis is None:
# torch doesn't support keepdims with axis=None
# (https://github.com/pytorch/pytorch/issues/71209)
res = torch.all(x, **kwargs)
res = _axis_none_keepdims(res, x.ndim, keepdims)
return res.to(torch.bool)
# torch.all doesn't return bool for uint8
return torch.all(x, axis, keepdims=keepdims).to(torch.bool)
def mean(x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
**kwargs) -> Array:
# https://github.com/pytorch/pytorch/issues/29137
if axis == ():
return torch.clone(x)
if axis is None:
# torch doesn't support keepdims with axis=None
# (https://github.com/pytorch/pytorch/issues/71209)
res = torch.mean(x, **kwargs)
res = _axis_none_keepdims(res, x.ndim, keepdims)
return res
return torch.mean(x, axis, keepdims=keepdims, **kwargs)
def std(x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
correction: Union[int, float] = 0.0,
keepdims: bool = False,
**kwargs) -> Array:
# Note, float correction is not supported
# https://github.com/pytorch/pytorch/issues/61492. We don't try to
# implement it here for now.
if isinstance(correction, float):
_correction = int(correction)
if correction != _correction:
raise NotImplementedError("float correction in torch std() is not yet supported")
else:
_correction = correction
# https://github.com/pytorch/pytorch/issues/29137
if axis == ():
return torch.zeros_like(x)
if isinstance(axis, int):
axis = (axis,)
if axis is None:
# torch doesn't support keepdims with axis=None
# (https://github.com/pytorch/pytorch/issues/71209)
res = torch.std(x, tuple(range(x.ndim)), correction=_correction, **kwargs)
res = _axis_none_keepdims(res, x.ndim, keepdims)
return res
return torch.std(x, axis, correction=_correction, keepdims=keepdims, **kwargs)
def var(x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
correction: Union[int, float] = 0.0,
keepdims: bool = False,
**kwargs) -> Array:
# Note, float correction is not supported
# https://github.com/pytorch/pytorch/issues/61492. We don't try to
# implement it here for now.
# if isinstance(correction, float):
# correction = int(correction)
# https://github.com/pytorch/pytorch/issues/29137
if axis == ():
return torch.zeros_like(x)
if isinstance(axis, int):
axis = (axis,)
if axis is None:
# torch doesn't support keepdims with axis=None
# (https://github.com/pytorch/pytorch/issues/71209)
res = torch.var(x, tuple(range(x.ndim)), correction=correction, **kwargs)
res = _axis_none_keepdims(res, x.ndim, keepdims)
return res
return torch.var(x, axis, correction=correction, keepdims=keepdims, **kwargs)
# torch.concat doesn't support dim=None
# https://github.com/pytorch/pytorch/issues/70925
def concat(arrays: Union[Tuple[Array, ...], List[Array]],
/,
*,
axis: Optional[int] = 0,
**kwargs) -> Array:
if axis is None:
arrays = tuple(ar.flatten() for ar in arrays)
axis = 0
return torch.concat(arrays, axis, **kwargs)
# torch.squeeze only accepts int dim and doesn't require it
# https://github.com/pytorch/pytorch/issues/70924. Support for tuple dim was
# added at https://github.com/pytorch/pytorch/pull/89017.
def squeeze(x: Array, /, axis: Union[int, Tuple[int, ...]]) -> Array:
if isinstance(axis, int):
axis = (axis,)
for a in axis:
if x.shape[a] != 1:
raise ValueError("squeezed dimensions must be equal to 1")
axes = _normalize_axes(axis, x.ndim)
# Remove this once pytorch 1.14 is released with the above PR #89017.
sequence = [a - i for i, a in enumerate(axes)]
for a in sequence:
x = torch.squeeze(x, a)
return x
# torch.broadcast_to uses size instead of shape
def broadcast_to(x: Array, /, shape: Tuple[int, ...], **kwargs) -> Array:
return torch.broadcast_to(x, shape, **kwargs)
# torch.permute uses dims instead of axes
def permute_dims(x: Array, /, axes: Tuple[int, ...]) -> Array:
return torch.permute(x, axes)
# The axis parameter doesn't work for flip() and roll()
# https://github.com/pytorch/pytorch/issues/71210. Also torch.flip() doesn't
# accept axis=None
def flip(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, **kwargs) -> Array:
if axis is None:
axis = tuple(range(x.ndim))
# torch.flip doesn't accept dim as an int but the method does
# https://github.com/pytorch/pytorch/issues/18095
return x.flip(axis, **kwargs)
def roll(x: Array, /, shift: Union[int, Tuple[int, ...]], *, axis: Optional[Union[int, Tuple[int, ...]]] = None, **kwargs) -> Array:
return torch.roll(x, shift, axis, **kwargs)
def nonzero(x: Array, /, **kwargs) -> Tuple[Array, ...]:
if x.ndim == 0:
raise ValueError("nonzero() does not support zero-dimensional arrays")
return torch.nonzero(x, as_tuple=True, **kwargs)
# torch uses `dim` instead of `axis`
def diff(
x: Array,
/,
*,
axis: int = -1,
n: int = 1,
prepend: Optional[Array] = None,
append: Optional[Array] = None,
) -> Array:
return torch.diff(x, dim=axis, n=n, prepend=prepend, append=append)
# torch uses `dim` instead of `axis`, does not have keepdims
def count_nonzero(
x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
) -> Array:
result = torch.count_nonzero(x, dim=axis)
if keepdims:
if isinstance(axis, int):
return result.unsqueeze(axis)
elif isinstance(axis, tuple):
n_axis = [x.ndim + ax if ax < 0 else ax for ax in axis]
sh = [1 if i in n_axis else x.shape[i] for i in range(x.ndim)]
return torch.reshape(result, sh)
return _axis_none_keepdims(result, x.ndim, keepdims)
else:
return result
# "repeat" is torch.repeat_interleave; also the dim argument
def repeat(x: Array, repeats: int | Array, /, *, axis: int | None = None) -> Array:
return torch.repeat_interleave(x, repeats, axis)
def where(
condition: Array,
x1: Array | bool | int | float | complex,
x2: Array | bool | int | float | complex,
/,
) -> Array:
x1, x2 = _fix_promotion(x1, x2)
return torch.where(condition, x1, x2)
# torch.reshape doesn't have the copy keyword
def reshape(x: Array,
/,
shape: Tuple[int, ...],
*,
copy: Optional[bool] = None,
**kwargs) -> Array:
if copy is not None:
raise NotImplementedError("torch.reshape doesn't yet support the copy keyword")
return torch.reshape(x, shape, **kwargs)
# torch.arange doesn't support returning empty arrays
# (https://github.com/pytorch/pytorch/issues/70915), and doesn't support some
# keyword argument combinations
# (https://github.com/pytorch/pytorch/issues/70914)
def arange(start: Union[int, float],
/,
stop: Optional[Union[int, float]] = None,
step: Union[int, float] = 1,
*,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
**kwargs) -> Array:
if stop is None:
start, stop = 0, start
if step > 0 and stop <= start or step < 0 and stop >= start:
if dtype is None:
if _builtin_all(isinstance(i, int) for i in [start, stop, step]):
dtype = torch.int64
else:
dtype = torch.float32
return torch.empty(0, dtype=dtype, device=device, **kwargs)
return torch.arange(start, stop, step, dtype=dtype, device=device, **kwargs)
# torch.eye does not accept None as a default for the second argument and
# doesn't support off-diagonals (https://github.com/pytorch/pytorch/issues/70910)
def eye(n_rows: int,
n_cols: Optional[int] = None,
/,
*,
k: int = 0,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
**kwargs) -> Array:
if n_cols is None:
n_cols = n_rows
z = torch.zeros(n_rows, n_cols, dtype=dtype, device=device, **kwargs)
if abs(k) <= n_rows + n_cols:
z.diagonal(k).fill_(1)
return z
# torch.linspace doesn't have the endpoint parameter
def linspace(start: Union[int, float],
stop: Union[int, float],
/,
num: int,
*,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
endpoint: bool = True,
**kwargs) -> Array:
if not endpoint:
return torch.linspace(start, stop, num+1, dtype=dtype, device=device, **kwargs)[:-1]
return torch.linspace(start, stop, num, dtype=dtype, device=device, **kwargs)
# torch.full does not accept an int size
# https://github.com/pytorch/pytorch/issues/70906
def full(shape: Union[int, Tuple[int, ...]],
fill_value: bool | int | float | complex,
*,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
**kwargs) -> Array:
if isinstance(shape, int):
shape = (shape,)
return torch.full(shape, fill_value, dtype=dtype, device=device, **kwargs)
# ones, zeros, and empty do not accept shape as a keyword argument
def ones(shape: Union[int, Tuple[int, ...]],
*,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
**kwargs) -> Array:
return torch.ones(shape, dtype=dtype, device=device, **kwargs)
def zeros(shape: Union[int, Tuple[int, ...]],
*,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
**kwargs) -> Array:
return torch.zeros(shape, dtype=dtype, device=device, **kwargs)
def empty(shape: Union[int, Tuple[int, ...]],
*,
dtype: Optional[DType] = None,
device: Optional[Device] = None,
**kwargs) -> Array:
return torch.empty(shape, dtype=dtype, device=device, **kwargs)
# tril and triu do not call the keyword argument k
def tril(x: Array, /, *, k: int = 0) -> Array:
return torch.tril(x, k)
def triu(x: Array, /, *, k: int = 0) -> Array:
return torch.triu(x, k)
# Functions that aren't in torch https://github.com/pytorch/pytorch/issues/58742
def expand_dims(x: Array, /, *, axis: int = 0) -> Array:
return torch.unsqueeze(x, axis)
def astype(
x: Array,
dtype: DType,
/,
*,
copy: bool = True,
device: Optional[Device] = None,
) -> Array:
if device is not None:
return x.to(device, dtype=dtype, copy=copy)
return x.to(dtype=dtype, copy=copy)
def broadcast_arrays(*arrays: Array) -> List[Array]:
shape = torch.broadcast_shapes(*[a.shape for a in arrays])
return [torch.broadcast_to(a, shape) for a in arrays]
# Note that these named tuples aren't actually part of the standard namespace,
# but I don't see any issue with exporting the names here regardless.
from ..common._aliases import (UniqueAllResult, UniqueCountsResult,
UniqueInverseResult)
# https://github.com/pytorch/pytorch/issues/70920
def unique_all(x: Array) -> UniqueAllResult:
# torch.unique doesn't support returning indices.
# https://github.com/pytorch/pytorch/issues/36748. The workaround
# suggested in that issue doesn't actually function correctly (it relies
# on non-deterministic behavior of scatter()).
raise NotImplementedError("unique_all() not yet implemented for pytorch (see https://github.com/pytorch/pytorch/issues/36748)")
# values, inverse_indices, counts = torch.unique(x, return_counts=True, return_inverse=True)
# # torch.unique incorrectly gives a 0 count for nan values.
# # https://github.com/pytorch/pytorch/issues/94106
# counts[torch.isnan(values)] = 1
# return UniqueAllResult(values, indices, inverse_indices, counts)
def unique_counts(x: Array) -> UniqueCountsResult:
values, counts = torch.unique(x, return_counts=True)
# torch.unique incorrectly gives a 0 count for nan values.
# https://github.com/pytorch/pytorch/issues/94106
counts[torch.isnan(values)] = 1
return UniqueCountsResult(values, counts)
def unique_inverse(x: Array) -> UniqueInverseResult:
values, inverse = torch.unique(x, return_inverse=True)
return UniqueInverseResult(values, inverse)
def unique_values(x: Array) -> Array:
return torch.unique(x)
def matmul(x1: Array, x2: Array, /, **kwargs) -> Array:
# torch.matmul doesn't type promote (but differently from _fix_promotion)
x1, x2 = _fix_promotion(x1, x2, only_scalar=False)
return torch.matmul(x1, x2, **kwargs)
matrix_transpose = get_xp(torch)(_aliases.matrix_transpose)
_vecdot = get_xp(torch)(_aliases.vecdot)
def vecdot(x1: Array, x2: Array, /, *, axis: int = -1) -> Array:
x1, x2 = _fix_promotion(x1, x2, only_scalar=False)
return _vecdot(x1, x2, axis=axis)
# torch.tensordot uses dims instead of axes
def tensordot(
x1: Array,
x2: Array,
/,
*,
axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2,
**kwargs,
) -> Array:
# Note: torch.tensordot fails with integer dtypes when there is only 1
# element in the axis (https://github.com/pytorch/pytorch/issues/84530).
x1, x2 = _fix_promotion(x1, x2, only_scalar=False)
return torch.tensordot(x1, x2, dims=axes, **kwargs)
def isdtype(
dtype: DType, kind: Union[DType, str, Tuple[Union[DType, str], ...]],
*, _tuple=True, # Disallow nested tuples
) -> bool:
"""
Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``.
Note that outside of this function, this compat library does not yet fully
support complex numbers.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
for more details
"""
if isinstance(kind, tuple) and _tuple:
return _builtin_any(isdtype(dtype, k, _tuple=False) for k in kind)
elif isinstance(kind, str):
if kind == 'bool':
return dtype == torch.bool
elif kind == 'signed integer':
return dtype in _int_dtypes and dtype.is_signed
elif kind == 'unsigned integer':
return dtype in _int_dtypes and not dtype.is_signed
elif kind == 'integral':
return dtype in _int_dtypes
elif kind == 'real floating':
return dtype.is_floating_point
elif kind == 'complex floating':
return dtype.is_complex
elif kind == 'numeric':
return isdtype(dtype, ('integral', 'real floating', 'complex floating'))
else:
raise ValueError(f"Unrecognized data type kind: {kind!r}")
else:
return dtype == kind
def take(x: Array, indices: Array, /, *, axis: Optional[int] = None, **kwargs) -> Array:
if axis is None:
if x.ndim != 1:
raise ValueError("axis must be specified when ndim > 1")
axis = 0
return torch.index_select(x, axis, indices, **kwargs)
def take_along_axis(x: Array, indices: Array, /, *, axis: int = -1) -> Array:
return torch.take_along_dim(x, indices, dim=axis)
def sign(x: Array, /) -> Array:
# torch sign() does not support complex numbers and does not propagate
# nans. See https://github.com/data-apis/array-api-compat/issues/136
if x.dtype.is_complex:
out = x/torch.abs(x)
# sign(0) = 0 but the above formula would give nan
out[x == 0+0j] = 0+0j
return out
else:
out = torch.sign(x)
if x.dtype.is_floating_point:
out[torch.isnan(x)] = torch.nan
return out
def meshgrid(*arrays: Array, indexing: Literal['xy', 'ij'] = 'xy') -> List[Array]:
# enforce the default of 'xy'
# TODO: is the return type a list or a tuple
return list(torch.meshgrid(*arrays, indexing='xy'))
__all__ = ['__array_namespace_info__', 'asarray', 'result_type', 'can_cast',
'permute_dims', 'bitwise_invert', 'newaxis', 'conj', 'add',
'atan2', 'bitwise_and', 'bitwise_left_shift', 'bitwise_or',
'bitwise_right_shift', 'bitwise_xor', 'copysign', 'count_nonzero',
'diff', 'divide',
'equal', 'floor_divide', 'greater', 'greater_equal', 'hypot',
'less', 'less_equal', 'logaddexp', 'maximum', 'minimum',
'multiply', 'not_equal', 'pow', 'remainder', 'subtract', 'max',
'min', 'clip', 'unstack', 'cumulative_sum', 'cumulative_prod', 'sort', 'prod', 'sum',
'any', 'all', 'mean', 'std', 'var', 'concat', 'squeeze',
'broadcast_to', 'flip', 'roll', 'nonzero', 'where', 'reshape',
'arange', 'eye', 'linspace', 'full', 'ones', 'zeros', 'empty',
'tril', 'triu', 'expand_dims', 'astype', 'broadcast_arrays',
'UniqueAllResult', 'UniqueCountsResult', 'UniqueInverseResult',
'unique_all', 'unique_counts', 'unique_inverse', 'unique_values',
'matmul', 'matrix_transpose', 'vecdot', 'tensordot', 'isdtype',
'take', 'take_along_axis', 'sign', 'finfo', 'iinfo', 'repeat', 'meshgrid']
_all_ignore = ['torch', 'get_xp']
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/torch/linalg.py | sklearn/externals/array_api_compat/torch/linalg.py | from __future__ import annotations
import torch
from typing import Optional, Union, Tuple
from torch.linalg import * # noqa: F403
# torch.linalg doesn't define __all__
# from torch.linalg import __all__ as linalg_all
from torch import linalg as torch_linalg
linalg_all = [i for i in dir(torch_linalg) if not i.startswith('_')]
# outer is implemented in torch but aren't in the linalg namespace
from torch import outer
from ._aliases import _fix_promotion, sum
# These functions are in both the main and linalg namespaces
from ._aliases import matmul, matrix_transpose, tensordot
from ._typing import Array, DType
from ..common._typing import JustInt, JustFloat
# Note: torch.linalg.cross does not default to axis=-1 (it defaults to the
# first axis with size 3), see https://github.com/pytorch/pytorch/issues/58743
# torch.cross also does not support broadcasting when it would add new
# dimensions https://github.com/pytorch/pytorch/issues/39656
def cross(x1: Array, x2: Array, /, *, axis: int = -1) -> Array:
x1, x2 = _fix_promotion(x1, x2, only_scalar=False)
if not (-min(x1.ndim, x2.ndim) <= axis < max(x1.ndim, x2.ndim)):
raise ValueError(f"axis {axis} out of bounds for cross product of arrays with shapes {x1.shape} and {x2.shape}")
if not (x1.shape[axis] == x2.shape[axis] == 3):
raise ValueError(f"cross product axis must have size 3, got {x1.shape[axis]} and {x2.shape[axis]}")
x1, x2 = torch.broadcast_tensors(x1, x2)
return torch_linalg.cross(x1, x2, dim=axis)
def vecdot(x1: Array, x2: Array, /, *, axis: int = -1, **kwargs) -> Array:
from ._aliases import isdtype
x1, x2 = _fix_promotion(x1, x2, only_scalar=False)
# torch.linalg.vecdot incorrectly allows broadcasting along the contracted dimension
if x1.shape[axis] != x2.shape[axis]:
raise ValueError("x1 and x2 must have the same size along the given axis")
# torch.linalg.vecdot doesn't support integer dtypes
if isdtype(x1.dtype, 'integral') or isdtype(x2.dtype, 'integral'):
if kwargs:
raise RuntimeError("vecdot kwargs not supported for integral dtypes")
x1_ = torch.moveaxis(x1, axis, -1)
x2_ = torch.moveaxis(x2, axis, -1)
x1_, x2_ = torch.broadcast_tensors(x1_, x2_)
res = x1_[..., None, :] @ x2_[..., None]
return res[..., 0, 0]
return torch.linalg.vecdot(x1, x2, dim=axis, **kwargs)
def solve(x1: Array, x2: Array, /, **kwargs) -> Array:
x1, x2 = _fix_promotion(x1, x2, only_scalar=False)
# Torch tries to emulate NumPy 1 solve behavior by using batched 1-D solve
# whenever
# 1. x1.ndim - 1 == x2.ndim
# 2. x1.shape[:-1] == x2.shape
#
# See linalg_solve_is_vector_rhs in
# aten/src/ATen/native/LinearAlgebraUtils.h and
# TORCH_META_FUNC(_linalg_solve_ex) in
# aten/src/ATen/native/BatchLinearAlgebra.cpp in the PyTorch source code.
#
# The easiest way to work around this is to prepend a size 1 dimension to
# x2, since x2 is already one dimension less than x1.
#
# See https://github.com/pytorch/pytorch/issues/52915
if x2.ndim != 1 and x1.ndim - 1 == x2.ndim and x1.shape[:-1] == x2.shape:
x2 = x2[None]
return torch.linalg.solve(x1, x2, **kwargs)
# torch.trace doesn't support the offset argument and doesn't support stacking
def trace(x: Array, /, *, offset: int = 0, dtype: Optional[DType] = None) -> Array:
# Use our wrapped sum to make sure it does upcasting correctly
return sum(torch.diagonal(x, offset=offset, dim1=-2, dim2=-1), axis=-1, dtype=dtype)
def vector_norm(
x: Array,
/,
*,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
# JustFloat stands for inf | -inf, which are not valid for Literal
ord: JustInt | JustFloat = 2,
**kwargs,
) -> Array:
# torch.vector_norm incorrectly treats axis=() the same as axis=None
if axis == ():
out = kwargs.get('out')
if out is None:
dtype = None
if x.dtype == torch.complex64:
dtype = torch.float32
elif x.dtype == torch.complex128:
dtype = torch.float64
out = torch.zeros_like(x, dtype=dtype)
# The norm of a single scalar works out to abs(x) in every case except
# for ord=0, which is x != 0.
if ord == 0:
out[:] = (x != 0)
else:
out[:] = torch.abs(x)
return out
return torch.linalg.vector_norm(x, ord=ord, axis=axis, keepdim=keepdims, **kwargs)
__all__ = linalg_all + ['outer', 'matmul', 'matrix_transpose', 'tensordot',
'cross', 'vecdot', 'solve', 'trace', 'vector_norm']
_all_ignore = ['torch_linalg', 'sum']
del linalg_all
def __dir__() -> list[str]:
return __all__
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/torch/__init__.py | sklearn/externals/array_api_compat/torch/__init__.py | from torch import * # noqa: F403
# Several names are not included in the above import *
import torch
for n in dir(torch):
if (n.startswith('_')
or n.endswith('_')
or 'cuda' in n
or 'cpu' in n
or 'backward' in n):
continue
exec(f"{n} = torch.{n}")
del n
# These imports may overwrite names from the import * above.
from ._aliases import * # noqa: F403
# See the comment in the numpy __init__.py
__import__(__package__ + '.linalg')
__import__(__package__ + '.fft')
__array_api_version__ = '2024.12'
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/torch/_info.py | sklearn/externals/array_api_compat/torch/_info.py | """
Array API Inspection namespace
This is the namespace for inspection functions as defined by the array API
standard. See
https://data-apis.org/array-api/latest/API_specification/inspection.html for
more details.
"""
import torch
from functools import cache
class __array_namespace_info__:
"""
Get the array API inspection namespace for PyTorch.
The array API inspection namespace defines the following functions:
- capabilities()
- default_device()
- default_dtypes()
- dtypes()
- devices()
See
https://data-apis.org/array-api/latest/API_specification/inspection.html
for more details.
Returns
-------
info : ModuleType
The array API inspection namespace for PyTorch.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': numpy.float64,
'complex floating': numpy.complex128,
'integral': numpy.int64,
'indexing': numpy.int64}
"""
__module__ = 'torch'
def capabilities(self):
"""
Return a dictionary of array API library capabilities.
The resulting dictionary has the following keys:
- **"boolean indexing"**: boolean indicating whether an array library
supports boolean indexing. Always ``True`` for PyTorch.
- **"data-dependent shapes"**: boolean indicating whether an array
library supports data-dependent output shapes. Always ``True`` for
PyTorch.
See
https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html
for more details.
See Also
--------
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
capabilities : dict
A dictionary of array API library capabilities.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.capabilities()
{'boolean indexing': True,
'data-dependent shapes': True,
'max dimensions': 64}
"""
return {
"boolean indexing": True,
"data-dependent shapes": True,
"max dimensions": 64,
}
def default_device(self):
"""
The default device used for new PyTorch arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Returns
-------
device : Device
The default device used for new PyTorch arrays.
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_device()
device(type='cpu')
Notes
-----
This method returns the static default device when PyTorch is initialized.
However, the *current* device used by creation functions (``empty`` etc.)
can be changed at runtime.
See Also
--------
https://github.com/data-apis/array-api/issues/835
"""
return torch.device("cpu")
def default_dtypes(self, *, device=None):
"""
The default data types used for new PyTorch arrays.
Parameters
----------
device : Device, optional
The device to get the default data types for.
Unused for PyTorch, as all devices use the same default dtypes.
Returns
-------
dtypes : dict
A dictionary describing the default data types used for new PyTorch
arrays.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.default_dtypes()
{'real floating': torch.float32,
'complex floating': torch.complex64,
'integral': torch.int64,
'indexing': torch.int64}
"""
# Note: if the default is set to float64, the devices like MPS that
# don't support float64 will error. We still return the default_dtype
# value here because this error doesn't represent a different default
# per-device.
default_floating = torch.get_default_dtype()
default_complex = torch.complex64 if default_floating == torch.float32 else torch.complex128
default_integral = torch.int64
return {
"real floating": default_floating,
"complex floating": default_complex,
"integral": default_integral,
"indexing": default_integral,
}
def _dtypes(self, kind):
bool = torch.bool
int8 = torch.int8
int16 = torch.int16
int32 = torch.int32
int64 = torch.int64
uint8 = torch.uint8
# uint16, uint32, and uint64 are present in newer versions of pytorch,
# but they aren't generally supported by the array API functions, so
# we omit them from this function.
float32 = torch.float32
float64 = torch.float64
complex64 = torch.complex64
complex128 = torch.complex128
if kind is None:
return {
"bool": bool,
"int8": int8,
"int16": int16,
"int32": int32,
"int64": int64,
"uint8": uint8,
"float32": float32,
"float64": float64,
"complex64": complex64,
"complex128": complex128,
}
if kind == "bool":
return {"bool": bool}
if kind == "signed integer":
return {
"int8": int8,
"int16": int16,
"int32": int32,
"int64": int64,
}
if kind == "unsigned integer":
return {
"uint8": uint8,
}
if kind == "integral":
return {
"int8": int8,
"int16": int16,
"int32": int32,
"int64": int64,
"uint8": uint8,
}
if kind == "real floating":
return {
"float32": float32,
"float64": float64,
}
if kind == "complex floating":
return {
"complex64": complex64,
"complex128": complex128,
}
if kind == "numeric":
return {
"int8": int8,
"int16": int16,
"int32": int32,
"int64": int64,
"uint8": uint8,
"float32": float32,
"float64": float64,
"complex64": complex64,
"complex128": complex128,
}
if isinstance(kind, tuple):
res = {}
for k in kind:
res.update(self.dtypes(kind=k))
return res
raise ValueError(f"unsupported kind: {kind!r}")
@cache
def dtypes(self, *, device=None, kind=None):
"""
The array API data types supported by PyTorch.
Note that this function only returns data types that are defined by
the array API.
Parameters
----------
device : Device, optional
The device to get the data types for.
Unused for PyTorch, as all devices use the same dtypes.
kind : str or tuple of str, optional
The kind of data types to return. If ``None``, all data types are
returned. If a string, only data types of that kind are returned.
If a tuple, a dictionary containing the union of the given kinds
is returned. The following kinds are supported:
- ``'bool'``: boolean data types (i.e., ``bool``).
- ``'signed integer'``: signed integer data types (i.e., ``int8``,
``int16``, ``int32``, ``int64``).
- ``'unsigned integer'``: unsigned integer data types (i.e.,
``uint8``, ``uint16``, ``uint32``, ``uint64``).
- ``'integral'``: integer data types. Shorthand for ``('signed
integer', 'unsigned integer')``.
- ``'real floating'``: real-valued floating-point data types
(i.e., ``float32``, ``float64``).
- ``'complex floating'``: complex floating-point data types (i.e.,
``complex64``, ``complex128``).
- ``'numeric'``: numeric data types. Shorthand for ``('integral',
'real floating', 'complex floating')``.
Returns
-------
dtypes : dict
A dictionary mapping the names of data types to the corresponding
PyTorch data types.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.devices
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.dtypes(kind='signed integer')
{'int8': numpy.int8,
'int16': numpy.int16,
'int32': numpy.int32,
'int64': numpy.int64}
"""
res = self._dtypes(kind)
for k, v in res.copy().items():
try:
torch.empty((0,), dtype=v, device=device)
except:
del res[k]
return res
@cache
def devices(self):
"""
The devices supported by PyTorch.
Returns
-------
devices : list[Device]
The devices supported by PyTorch.
See Also
--------
__array_namespace_info__.capabilities,
__array_namespace_info__.default_device,
__array_namespace_info__.default_dtypes,
__array_namespace_info__.dtypes
Examples
--------
>>> info = xp.__array_namespace_info__()
>>> info.devices()
[device(type='cpu'), device(type='mps', index=0), device(type='meta')]
"""
# Torch doesn't have a straightforward way to get the list of all
# currently supported devices. To do this, we first parse the error
# message of torch.device to get the list of all possible types of
# device:
try:
torch.device('notadevice')
raise AssertionError("unreachable") # pragma: nocover
except RuntimeError as e:
# The error message is something like:
# "Expected one of cpu, cuda, ipu, xpu, mkldnn, opengl, opencl, ideep, hip, ve, fpga, ort, xla, lazy, vulkan, mps, meta, hpu, mtia, privateuseone device type at start of device string: notadevice"
devices_names = e.args[0].split('Expected one of ')[1].split(' device type')[0].split(', ')
# Next we need to check for different indices for different devices.
# device(device_name, index=index) doesn't actually check if the
# device name or index is valid. We have to try to create a tensor
# with it (which is why this function is cached).
devices = []
for device_name in devices_names:
i = 0
while True:
try:
a = torch.empty((0,), device=torch.device(device_name, index=i))
if a.device in devices:
break
devices.append(a.device)
except:
break
i += 1
return devices
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_compat/torch/fft.py | sklearn/externals/array_api_compat/torch/fft.py | from __future__ import annotations
from typing import Union, Sequence, Literal
import torch
import torch.fft
from torch.fft import * # noqa: F403
from ._typing import Array
# Several torch fft functions do not map axes to dim
def fftn(
x: Array,
/,
*,
s: Sequence[int] = None,
axes: Sequence[int] = None,
norm: Literal["backward", "ortho", "forward"] = "backward",
**kwargs,
) -> Array:
return torch.fft.fftn(x, s=s, dim=axes, norm=norm, **kwargs)
def ifftn(
x: Array,
/,
*,
s: Sequence[int] = None,
axes: Sequence[int] = None,
norm: Literal["backward", "ortho", "forward"] = "backward",
**kwargs,
) -> Array:
return torch.fft.ifftn(x, s=s, dim=axes, norm=norm, **kwargs)
def rfftn(
x: Array,
/,
*,
s: Sequence[int] = None,
axes: Sequence[int] = None,
norm: Literal["backward", "ortho", "forward"] = "backward",
**kwargs,
) -> Array:
return torch.fft.rfftn(x, s=s, dim=axes, norm=norm, **kwargs)
def irfftn(
x: Array,
/,
*,
s: Sequence[int] = None,
axes: Sequence[int] = None,
norm: Literal["backward", "ortho", "forward"] = "backward",
**kwargs,
) -> Array:
return torch.fft.irfftn(x, s=s, dim=axes, norm=norm, **kwargs)
def fftshift(
x: Array,
/,
*,
axes: Union[int, Sequence[int]] = None,
**kwargs,
) -> Array:
return torch.fft.fftshift(x, dim=axes, **kwargs)
def ifftshift(
x: Array,
/,
*,
axes: Union[int, Sequence[int]] = None,
**kwargs,
) -> Array:
return torch.fft.ifftshift(x, dim=axes, **kwargs)
__all__ = torch.fft.__all__ + [
"fftn",
"ifftn",
"rfftn",
"irfftn",
"fftshift",
"ifftshift",
]
_all_ignore = ['torch']
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/_numpydoc/docscrape.py | sklearn/externals/_numpydoc/docscrape.py | """Extract reference documentation from the NumPy source tree."""
import copy
import inspect
import pydoc
import re
import sys
import textwrap
from collections import namedtuple
from collections.abc import Callable, Mapping
from functools import cached_property
from warnings import warn
def strip_blank_lines(l):
"Remove leading and trailing blank lines from a list of lines"
while l and not l[0].strip():
del l[0]
while l and not l[-1].strip():
del l[-1]
return l
class Reader:
"""A line-based string reader."""
def __init__(self, data):
"""
Parameters
----------
data : str
String with lines separated by '\\n'.
"""
if isinstance(data, list):
self._str = data
else:
self._str = data.split("\n") # store string as list of lines
self.reset()
def __getitem__(self, n):
return self._str[n]
def reset(self):
self._l = 0 # current line nr
def read(self):
if not self.eof():
out = self[self._l]
self._l += 1
return out
else:
return ""
def seek_next_non_empty_line(self):
for l in self[self._l :]:
if l.strip():
break
else:
self._l += 1
def eof(self):
return self._l >= len(self._str)
def read_to_condition(self, condition_func):
start = self._l
for line in self[start:]:
if condition_func(line):
return self[start : self._l]
self._l += 1
if self.eof():
return self[start : self._l + 1]
return []
def read_to_next_empty_line(self):
self.seek_next_non_empty_line()
def is_empty(line):
return not line.strip()
return self.read_to_condition(is_empty)
def read_to_next_unindented_line(self):
def is_unindented(line):
return line.strip() and (len(line.lstrip()) == len(line))
return self.read_to_condition(is_unindented)
def peek(self, n=0):
if self._l + n < len(self._str):
return self[self._l + n]
else:
return ""
def is_empty(self):
return not "".join(self._str).strip()
class ParseError(Exception):
def __str__(self):
message = self.args[0]
if hasattr(self, "docstring"):
message = f"{message} in {self.docstring!r}"
return message
Parameter = namedtuple("Parameter", ["name", "type", "desc"])
class NumpyDocString(Mapping):
"""Parses a numpydoc string to an abstract representation
Instances define a mapping from section title to structured data.
"""
sections = {
"Signature": "",
"Summary": [""],
"Extended Summary": [],
"Parameters": [],
"Attributes": [],
"Methods": [],
"Returns": [],
"Yields": [],
"Receives": [],
"Other Parameters": [],
"Raises": [],
"Warns": [],
"Warnings": [],
"See Also": [],
"Notes": [],
"References": "",
"Examples": "",
"index": {},
}
def __init__(self, docstring, config=None):
orig_docstring = docstring
docstring = textwrap.dedent(docstring).split("\n")
self._doc = Reader(docstring)
self._parsed_data = copy.deepcopy(self.sections)
try:
self._parse()
except ParseError as e:
e.docstring = orig_docstring
raise
def __getitem__(self, key):
return self._parsed_data[key]
def __setitem__(self, key, val):
if key not in self._parsed_data:
self._error_location(f"Unknown section {key}", error=False)
else:
self._parsed_data[key] = val
def __iter__(self):
return iter(self._parsed_data)
def __len__(self):
return len(self._parsed_data)
def _is_at_section(self):
self._doc.seek_next_non_empty_line()
if self._doc.eof():
return False
l1 = self._doc.peek().strip() # e.g. Parameters
if l1.startswith(".. index::"):
return True
l2 = self._doc.peek(1).strip() # ---------- or ==========
if len(l2) >= 3 and (set(l2) in ({"-"}, {"="})) and len(l2) != len(l1):
snip = "\n".join(self._doc._str[:2]) + "..."
self._error_location(
f"potentially wrong underline length... \n{l1} \n{l2} in \n{snip}",
error=False,
)
return l2.startswith("-" * len(l1)) or l2.startswith("=" * len(l1))
def _strip(self, doc):
i = 0
j = 0
for i, line in enumerate(doc):
if line.strip():
break
for j, line in enumerate(doc[::-1]):
if line.strip():
break
return doc[i : len(doc) - j]
def _read_to_next_section(self):
section = self._doc.read_to_next_empty_line()
while not self._is_at_section() and not self._doc.eof():
if not self._doc.peek(-1).strip(): # previous line was empty
section += [""]
section += self._doc.read_to_next_empty_line()
return section
def _read_sections(self):
while not self._doc.eof():
data = self._read_to_next_section()
name = data[0].strip()
if name.startswith(".."): # index section
yield name, data[1:]
elif len(data) < 2:
yield StopIteration
else:
yield name, self._strip(data[2:])
def _parse_param_list(self, content, single_element_is_type=False):
content = dedent_lines(content)
r = Reader(content)
params = []
while not r.eof():
header = r.read().strip()
if " : " in header:
arg_name, arg_type = header.split(" : ", maxsplit=1)
else:
# NOTE: param line with single element should never have a
# a " :" before the description line, so this should probably
# warn.
header = header.removesuffix(" :")
if single_element_is_type:
arg_name, arg_type = "", header
else:
arg_name, arg_type = header, ""
desc = r.read_to_next_unindented_line()
desc = dedent_lines(desc)
desc = strip_blank_lines(desc)
params.append(Parameter(arg_name, arg_type, desc))
return params
# See also supports the following formats.
#
# <FUNCNAME>
# <FUNCNAME> SPACE* COLON SPACE+ <DESC> SPACE*
# <FUNCNAME> ( COMMA SPACE+ <FUNCNAME>)+ (COMMA | PERIOD)? SPACE*
# <FUNCNAME> ( COMMA SPACE+ <FUNCNAME>)* SPACE* COLON SPACE+ <DESC> SPACE*
# <FUNCNAME> is one of
# <PLAIN_FUNCNAME>
# COLON <ROLE> COLON BACKTICK <PLAIN_FUNCNAME> BACKTICK
# where
# <PLAIN_FUNCNAME> is a legal function name, and
# <ROLE> is any nonempty sequence of word characters.
# Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`
# <DESC> is a string describing the function.
_role = r":(?P<role>(py:)?\w+):"
_funcbacktick = r"`(?P<name>(?:~\w+\.)?[a-zA-Z0-9_\.-]+)`"
_funcplain = r"(?P<name2>[a-zA-Z0-9_\.-]+)"
_funcname = r"(" + _role + _funcbacktick + r"|" + _funcplain + r")"
_funcnamenext = _funcname.replace("role", "rolenext")
_funcnamenext = _funcnamenext.replace("name", "namenext")
_description = r"(?P<description>\s*:(\s+(?P<desc>\S+.*))?)?\s*$"
_func_rgx = re.compile(r"^\s*" + _funcname + r"\s*")
_line_rgx = re.compile(
r"^\s*"
+ r"(?P<allfuncs>"
+ _funcname # group for all function names
+ r"(?P<morefuncs>([,]\s+"
+ _funcnamenext
+ r")*)"
+ r")"
+ r"(?P<trailing>[,\.])?" # end of "allfuncs"
+ _description # Some function lists have a trailing comma (or period) '\s*'
)
# Empty <DESC> elements are replaced with '..'
empty_description = ".."
def _parse_see_also(self, content):
"""
func_name : Descriptive text
continued text
another_func_name : Descriptive text
func_name1, func_name2, :meth:`func_name`, func_name3
"""
content = dedent_lines(content)
items = []
def parse_item_name(text):
"""Match ':role:`name`' or 'name'."""
m = self._func_rgx.match(text)
if not m:
self._error_location(f"Error parsing See Also entry {line!r}")
role = m.group("role")
name = m.group("name") if role else m.group("name2")
return name, role, m.end()
rest = []
for line in content:
if not line.strip():
continue
line_match = self._line_rgx.match(line)
description = None
if line_match:
description = line_match.group("desc")
if line_match.group("trailing") and description:
self._error_location(
"Unexpected comma or period after function list at index %d of "
'line "%s"' % (line_match.end("trailing"), line),
error=False,
)
if not description and line.startswith(" "):
rest.append(line.strip())
elif line_match:
funcs = []
text = line_match.group("allfuncs")
while True:
if not text.strip():
break
name, role, match_end = parse_item_name(text)
funcs.append((name, role))
text = text[match_end:].strip()
if text and text[0] == ",":
text = text[1:].strip()
rest = list(filter(None, [description]))
items.append((funcs, rest))
else:
self._error_location(f"Error parsing See Also entry {line!r}")
return items
def _parse_index(self, section, content):
"""
.. index:: default
:refguide: something, else, and more
"""
def strip_each_in(lst):
return [s.strip() for s in lst]
out = {}
section = section.split("::")
if len(section) > 1:
out["default"] = strip_each_in(section[1].split(","))[0]
for line in content:
line = line.split(":")
if len(line) > 2:
out[line[1]] = strip_each_in(line[2].split(","))
return out
def _parse_summary(self):
"""Grab signature (if given) and summary"""
if self._is_at_section():
return
# If several signatures present, take the last one
while True:
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
compiled = re.compile(r"^([\w., ]+=)?\s*[\w\.]+\(.*\)$")
if compiled.match(summary_str):
self["Signature"] = summary_str
if not self._is_at_section():
continue
break
if summary is not None:
self["Summary"] = summary
if not self._is_at_section():
self["Extended Summary"] = self._read_to_next_section()
def _parse(self):
self._doc.reset()
self._parse_summary()
sections = list(self._read_sections())
section_names = {section for section, content in sections}
has_yields = "Yields" in section_names
# We could do more tests, but we are not. Arbitrarily.
if not has_yields and "Receives" in section_names:
msg = "Docstring contains a Receives section but not Yields."
raise ValueError(msg)
for section, content in sections:
if not section.startswith(".."):
section = (s.capitalize() for s in section.split(" "))
section = " ".join(section)
if self.get(section):
self._error_location(
"The section %s appears twice in %s"
% (section, "\n".join(self._doc._str))
)
if section in ("Parameters", "Other Parameters", "Attributes", "Methods"):
self[section] = self._parse_param_list(content)
elif section in ("Returns", "Yields", "Raises", "Warns", "Receives"):
self[section] = self._parse_param_list(
content, single_element_is_type=True
)
elif section.startswith(".. index::"):
self["index"] = self._parse_index(section, content)
elif section == "See Also":
self["See Also"] = self._parse_see_also(content)
else:
self[section] = content
@property
def _obj(self):
if hasattr(self, "_cls"):
return self._cls
elif hasattr(self, "_f"):
return self._f
return None
def _error_location(self, msg, error=True):
if self._obj is not None:
# we know where the docs came from:
try:
filename = inspect.getsourcefile(self._obj)
except TypeError:
filename = None
# Make UserWarning more descriptive via object introspection.
# Skip if introspection fails
name = getattr(self._obj, "__name__", None)
if name is None:
name = getattr(getattr(self._obj, "__class__", None), "__name__", None)
if name is not None:
msg += f" in the docstring of {name}"
msg += f" in {filename}." if filename else ""
if error:
raise ValueError(msg)
else:
warn(msg, stacklevel=3)
# string conversion routines
def _str_header(self, name, symbol="-"):
return [name, len(name) * symbol]
def _str_indent(self, doc, indent=4):
return [" " * indent + line for line in doc]
def _str_signature(self):
if self["Signature"]:
return [self["Signature"].replace("*", r"\*")] + [""]
return [""]
def _str_summary(self):
if self["Summary"]:
return self["Summary"] + [""]
return []
def _str_extended_summary(self):
if self["Extended Summary"]:
return self["Extended Summary"] + [""]
return []
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_header(name)
for param in self[name]:
parts = []
if param.name:
parts.append(param.name)
if param.type:
parts.append(param.type)
out += [" : ".join(parts)]
if param.desc and "".join(param.desc).strip():
out += self._str_indent(param.desc)
out += [""]
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += self[name]
out += [""]
return out
def _str_see_also(self, func_role):
if not self["See Also"]:
return []
out = []
out += self._str_header("See Also")
out += [""]
last_had_desc = True
for funcs, desc in self["See Also"]:
assert isinstance(funcs, list)
links = []
for func, role in funcs:
if role:
link = f":{role}:`{func}`"
elif func_role:
link = f":{func_role}:`{func}`"
else:
link = f"`{func}`_"
links.append(link)
link = ", ".join(links)
out += [link]
if desc:
out += self._str_indent([" ".join(desc)])
last_had_desc = True
else:
last_had_desc = False
out += self._str_indent([self.empty_description])
if last_had_desc:
out += [""]
out += [""]
return out
def _str_index(self):
idx = self["index"]
out = []
output_index = False
default_index = idx.get("default", "")
if default_index:
output_index = True
out += [f".. index:: {default_index}"]
for section, references in idx.items():
if section == "default":
continue
output_index = True
out += [f" :{section}: {', '.join(references)}"]
if output_index:
return out
return ""
def __str__(self, func_role=""):
out = []
out += self._str_signature()
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list("Parameters")
for param_list in ("Attributes", "Methods"):
out += self._str_param_list(param_list)
for param_list in (
"Returns",
"Yields",
"Receives",
"Other Parameters",
"Raises",
"Warns",
):
out += self._str_param_list(param_list)
out += self._str_section("Warnings")
out += self._str_see_also(func_role)
for s in ("Notes", "References", "Examples"):
out += self._str_section(s)
out += self._str_index()
return "\n".join(out)
def dedent_lines(lines):
"""Deindent a list of lines maximally"""
return textwrap.dedent("\n".join(lines)).split("\n")
class FunctionDoc(NumpyDocString):
def __init__(self, func, role="func", doc=None, config=None):
self._f = func
self._role = role # e.g. "func" or "meth"
if doc is None:
if func is None:
raise ValueError("No function or docstring given")
doc = inspect.getdoc(func) or ""
if config is None:
config = {}
NumpyDocString.__init__(self, doc, config)
def get_func(self):
func_name = getattr(self._f, "__name__", self.__class__.__name__)
if inspect.isclass(self._f):
func = getattr(self._f, "__call__", self._f.__init__)
else:
func = self._f
return func, func_name
def __str__(self):
out = ""
func, func_name = self.get_func()
roles = {"func": "function", "meth": "method"}
if self._role:
if self._role not in roles:
print(f"Warning: invalid role {self._role}")
out += f".. {roles.get(self._role, '')}:: {func_name}\n \n\n"
out += super().__str__(func_role=self._role)
return out
class ObjDoc(NumpyDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
if config is None:
config = {}
NumpyDocString.__init__(self, doc, config=config)
class ClassDoc(NumpyDocString):
extra_public_methods = ["__call__"]
def __init__(self, cls, doc=None, modulename="", func_doc=FunctionDoc, config=None):
if not inspect.isclass(cls) and cls is not None:
raise ValueError(f"Expected a class or None, but got {cls!r}")
self._cls = cls
if "sphinx" in sys.modules:
from sphinx.ext.autodoc import ALL
else:
ALL = object()
if config is None:
config = {}
self.show_inherited_members = config.get("show_inherited_class_members", True)
if modulename and not modulename.endswith("."):
modulename += "."
self._mod = modulename
if doc is None:
if cls is None:
raise ValueError("No class or documentation string given")
doc = pydoc.getdoc(cls)
NumpyDocString.__init__(self, doc)
_members = config.get("members", [])
if _members is ALL:
_members = None
_exclude = config.get("exclude-members", [])
if config.get("show_class_members", True) and _exclude is not ALL:
def splitlines_x(s):
if not s:
return []
else:
return s.splitlines()
for field, items in [
("Methods", self.methods),
("Attributes", self.properties),
]:
if not self[field]:
doc_list = []
for name in sorted(items):
if name in _exclude or (_members and name not in _members):
continue
try:
doc_item = pydoc.getdoc(getattr(self._cls, name))
doc_list.append(Parameter(name, "", splitlines_x(doc_item)))
except AttributeError:
pass # method doesn't exist
self[field] = doc_list
@property
def methods(self):
if self._cls is None:
return []
return [
name
for name, func in inspect.getmembers(self._cls)
if (
(not name.startswith("_") or name in self.extra_public_methods)
and isinstance(func, Callable)
and self._is_show_member(name)
)
]
@property
def properties(self):
if self._cls is None:
return []
return [
name
for name, func in inspect.getmembers(self._cls)
if (
not name.startswith("_")
and not self._should_skip_member(name, self._cls)
and (
func is None
or isinstance(func, (property, cached_property))
or inspect.isdatadescriptor(func)
)
and self._is_show_member(name)
)
]
@staticmethod
def _should_skip_member(name, klass):
return (
# Namedtuples should skip everything in their ._fields as the
# docstrings for each of the members is: "Alias for field number X"
issubclass(klass, tuple)
and hasattr(klass, "_asdict")
and hasattr(klass, "_fields")
and name in klass._fields
)
def _is_show_member(self, name):
return (
# show all class members
self.show_inherited_members
# or class member is not inherited
or name in self._cls.__dict__
)
def get_doc_object(
obj,
what=None,
doc=None,
config=None,
class_doc=ClassDoc,
func_doc=FunctionDoc,
obj_doc=ObjDoc,
):
if what is None:
if inspect.isclass(obj):
what = "class"
elif inspect.ismodule(obj):
what = "module"
elif isinstance(obj, Callable):
what = "function"
else:
what = "object"
if config is None:
config = {}
if what == "class":
return class_doc(obj, func_doc=func_doc, doc=doc, config=config)
elif what in ("function", "method"):
return func_doc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return obj_doc(obj, doc, config=config) | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_extra/testing.py | sklearn/externals/array_api_extra/testing.py | """
Public testing utilities.
See also _lib._testing for additional private testing utilities.
"""
from __future__ import annotations
import contextlib
import enum
import warnings
from collections.abc import Callable, Generator, Iterator, Sequence
from functools import wraps
from types import ModuleType
from typing import TYPE_CHECKING, Any, ParamSpec, TypeVar, cast
from ._lib._utils._compat import is_dask_namespace, is_jax_namespace
from ._lib._utils._helpers import jax_autojit, pickle_flatten, pickle_unflatten
__all__ = ["lazy_xp_function", "patch_lazy_xp_functions"]
if TYPE_CHECKING: # pragma: no cover
# TODO import override from typing (requires Python >=3.12)
import pytest
from dask.typing import Graph, Key, SchedulerGetCallable
from typing_extensions import override
else:
# Sphinx hacks
SchedulerGetCallable = object
def override(func):
return func
P = ParamSpec("P")
T = TypeVar("T")
_ufuncs_tags: dict[object, dict[str, Any]] = {}
class Deprecated(enum.Enum):
"""Unique type for deprecated parameters."""
DEPRECATED = 1
DEPRECATED = Deprecated.DEPRECATED
def lazy_xp_function(
func: Callable[..., Any],
*,
allow_dask_compute: bool | int = False,
jax_jit: bool = True,
static_argnums: Deprecated = DEPRECATED,
static_argnames: Deprecated = DEPRECATED,
) -> None: # numpydoc ignore=GL07
"""
Tag a function to be tested on lazy backends.
Tag a function so that when any tests are executed with ``xp=jax.numpy`` the
function is replaced with a jitted version of itself, and when it is executed with
``xp=dask.array`` the function will raise if it attempts to materialize the graph.
This will be later expanded to provide test coverage for other lazy backends.
In order for the tag to be effective, the test or a fixture must call
:func:`patch_lazy_xp_functions`.
Parameters
----------
func : callable
Function to be tested.
allow_dask_compute : bool | int, optional
Whether `func` is allowed to internally materialize the Dask graph, or maximum
number of times it is allowed to do so. This is typically triggered by
``bool()``, ``float()``, or ``np.asarray()``.
Set to 1 if you are aware that `func` converts the input parameters to NumPy and
want to let it do so at least for the time being, knowing that it is going to be
extremely detrimental for performance.
If a test needs values higher than 1 to pass, it is a canary that the conversion
to NumPy/bool/float is happening multiple times, which translates to multiple
computations of the whole graph. Short of making the function fully lazy, you
should at least add explicit calls to ``np.asarray()`` early in the function.
*Note:* the counter of `allow_dask_compute` resets after each call to `func`, so
a test function that invokes `func` multiple times should still work with this
parameter set to 1.
Set to True to allow `func` to materialize the graph an unlimited number
of times.
Default: False, meaning that `func` must be fully lazy and never materialize the
graph.
jax_jit : bool, optional
Set to True to replace `func` with a smart variant of ``jax.jit(func)`` after
calling the :func:`patch_lazy_xp_functions` test helper with ``xp=jax.numpy``.
This is the default behaviour.
Set to False if `func` is only compatible with eager (non-jitted) JAX.
Unlike with vanilla ``jax.jit``, all arguments and return types that are not JAX
arrays are treated as static; the function can accept and return arbitrary
wrappers around JAX arrays. This difference is because, in real life, most users
won't wrap the function directly with ``jax.jit`` but rather they will use it
within their own code, which is itself then wrapped by ``jax.jit``, and
internally consume the function's outputs.
In other words, the pattern that is being tested is::
>>> @jax.jit
... def user_func(x):
... y = user_prepares_inputs(x)
... z = func(y, some_static_arg=True)
... return user_consumes(z)
Default: True.
static_argnums :
Deprecated; ignored
static_argnames :
Deprecated; ignored
See Also
--------
patch_lazy_xp_functions : Companion function to call from the test or fixture.
jax.jit : JAX function to compile a function for performance.
Examples
--------
In ``test_mymodule.py``::
from array_api_extra.testing import lazy_xp_function from mymodule import myfunc
lazy_xp_function(myfunc)
def test_myfunc(xp):
a = xp.asarray([1, 2])
# When xp=jax.numpy, this is similar to `b = jax.jit(myfunc)(a)`
# When xp=dask.array, crash on compute() or persist()
b = myfunc(a)
Notes
-----
In order for this tag to be effective, the test function must be imported into the
test module globals without its namespace; alternatively its namespace must be
declared in a ``lazy_xp_modules`` list in the test module globals.
Example 1::
from mymodule import myfunc
lazy_xp_function(myfunc)
def test_myfunc(xp):
x = myfunc(xp.asarray([1, 2]))
Example 2::
import mymodule
lazy_xp_modules = [mymodule]
lazy_xp_function(mymodule.myfunc)
def test_myfunc(xp):
x = mymodule.myfunc(xp.asarray([1, 2]))
A test function can circumvent this monkey-patching system by using a namespace
outside of the two above patterns. You need to sanitize your code to make sure this
only happens intentionally.
Example 1::
import mymodule
from mymodule import myfunc
lazy_xp_function(myfunc)
def test_myfunc(xp):
a = xp.asarray([1, 2])
b = myfunc(a) # This is wrapped when xp=jax.numpy or xp=dask.array
c = mymodule.myfunc(a) # This is not
Example 2::
import mymodule
class naked:
myfunc = mymodule.myfunc
lazy_xp_modules = [mymodule]
lazy_xp_function(mymodule.myfunc)
def test_myfunc(xp):
a = xp.asarray([1, 2])
b = mymodule.myfunc(a) # This is wrapped when xp=jax.numpy or xp=dask.array
c = naked.myfunc(a) # This is not
"""
if static_argnums is not DEPRECATED or static_argnames is not DEPRECATED:
warnings.warn(
(
"The `static_argnums` and `static_argnames` parameters are deprecated "
"and ignored. They will be removed in a future version."
),
DeprecationWarning,
stacklevel=2,
)
tags = {
"allow_dask_compute": allow_dask_compute,
"jax_jit": jax_jit,
}
try:
func._lazy_xp_function = tags # type: ignore[attr-defined] # pylint: disable=protected-access # pyright: ignore[reportFunctionMemberAccess]
except AttributeError: # @cython.vectorize
_ufuncs_tags[func] = tags
def patch_lazy_xp_functions(
request: pytest.FixtureRequest,
monkeypatch: pytest.MonkeyPatch | None = None,
*,
xp: ModuleType,
) -> contextlib.AbstractContextManager[None]:
"""
Test lazy execution of functions tagged with :func:`lazy_xp_function`.
If ``xp==jax.numpy``, search for all functions which have been tagged with
:func:`lazy_xp_function` in the globals of the module that defines the current test,
as well as in the ``lazy_xp_modules`` list in the globals of the same module,
and wrap them with :func:`jax.jit`. Unwrap them at the end of the test.
If ``xp==dask.array``, wrap the functions with a decorator that disables
``compute()`` and ``persist()`` and ensures that exceptions and warnings are raised
eagerly.
This function should be typically called by your library's `xp` fixture that runs
tests on multiple backends::
@pytest.fixture(params=[
numpy,
array_api_strict,
pytest.param(jax.numpy, marks=pytest.mark.thread_unsafe),
pytest.param(dask.array, marks=pytest.mark.thread_unsafe),
])
def xp(request):
with patch_lazy_xp_functions(request, xp=request.param):
yield request.param
but it can be otherwise be called by the test itself too.
Parameters
----------
request : pytest.FixtureRequest
Pytest fixture, as acquired by the test itself or by one of its fixtures.
monkeypatch : pytest.MonkeyPatch
Deprecated
xp : array_namespace
Array namespace to be tested.
See Also
--------
lazy_xp_function : Tag a function to be tested on lazy backends.
pytest.FixtureRequest : `request` test function parameter.
Notes
-----
This context manager monkey-patches modules and as such is thread unsafe
on Dask and JAX. If you run your test suite with
`pytest-run-parallel <https://github.com/Quansight-Labs/pytest-run-parallel/>`_,
you should mark these backends with ``@pytest.mark.thread_unsafe``, as shown in
the example above.
"""
mod = cast(ModuleType, request.module)
mods = [mod, *cast(list[ModuleType], getattr(mod, "lazy_xp_modules", []))]
to_revert: list[tuple[ModuleType, str, object]] = []
def temp_setattr(mod: ModuleType, name: str, func: object) -> None:
"""
Variant of monkeypatch.setattr, which allows monkey-patching only selected
parameters of a test so that pytest-run-parallel can run on the remainder.
"""
assert hasattr(mod, name)
to_revert.append((mod, name, getattr(mod, name)))
setattr(mod, name, func)
if monkeypatch is not None:
warnings.warn(
(
"The `monkeypatch` parameter is deprecated and will be removed in a "
"future version. "
"Use `patch_lazy_xp_function` as a context manager instead."
),
DeprecationWarning,
stacklevel=2,
)
# Enable using patch_lazy_xp_function not as a context manager
temp_setattr = monkeypatch.setattr # type: ignore[assignment] # pyright: ignore[reportAssignmentType]
def iter_tagged() -> Iterator[
tuple[ModuleType, str, Callable[..., Any], dict[str, Any]]
]:
for mod in mods:
for name, func in mod.__dict__.items():
tags: dict[str, Any] | None = None
with contextlib.suppress(AttributeError):
tags = func._lazy_xp_function # pylint: disable=protected-access
if tags is None:
with contextlib.suppress(KeyError, TypeError):
tags = _ufuncs_tags[func]
if tags is not None:
yield mod, name, func, tags
if is_dask_namespace(xp):
for mod, name, func, tags in iter_tagged():
n = tags["allow_dask_compute"]
if n is True:
n = 1_000_000
elif n is False:
n = 0
wrapped = _dask_wrap(func, n)
temp_setattr(mod, name, wrapped)
elif is_jax_namespace(xp):
for mod, name, func, tags in iter_tagged():
if tags["jax_jit"]:
wrapped = jax_autojit(func)
temp_setattr(mod, name, wrapped)
# We can't just decorate patch_lazy_xp_functions with
# @contextlib.contextmanager because it would not work with the
# deprecated monkeypatch when not used as a context manager.
@contextlib.contextmanager
def revert_on_exit() -> Generator[None]:
try:
yield
finally:
for mod, name, orig_func in to_revert:
setattr(mod, name, orig_func)
return revert_on_exit()
class CountingDaskScheduler(SchedulerGetCallable):
"""
Dask scheduler that counts how many times `dask.compute` is called.
If the number of times exceeds 'max_count', it raises an error.
This is a wrapper around Dask's own 'synchronous' scheduler.
Parameters
----------
max_count : int
Maximum number of allowed calls to `dask.compute`.
msg : str
Assertion to raise when the count exceeds `max_count`.
"""
count: int
max_count: int
msg: str
def __init__(self, max_count: int, msg: str): # numpydoc ignore=GL08
self.count = 0
self.max_count = max_count
self.msg = msg
@override
def __call__(
self, dsk: Graph, keys: Sequence[Key] | Key, **kwargs: Any
) -> Any: # numpydoc ignore=GL08
import dask
self.count += 1
# This should yield a nice traceback to the
# offending line in the user's code
assert self.count <= self.max_count, self.msg
return dask.get(dsk, keys, **kwargs) # type: ignore[attr-defined] # pyright: ignore[reportPrivateImportUsage]
def _dask_wrap(
func: Callable[P, T], n: int
) -> Callable[P, T]: # numpydoc ignore=PR01,RT01
"""
Wrap `func` to raise if it attempts to call `dask.compute` more than `n` times.
After the function returns, materialize the graph in order to re-raise exceptions.
"""
import dask
import dask.array as da
func_name = getattr(func, "__name__", str(func))
n_str = f"only up to {n}" if n else "no"
msg = (
f"Called `dask.compute()` or `dask.persist()` {n + 1} times, "
f"but {n_str} calls are allowed. Set "
f"`lazy_xp_function({func_name}, allow_dask_compute={n + 1})` "
"to allow for more (but note that this will harm performance). "
)
@wraps(func)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> T: # numpydoc ignore=GL08
scheduler = CountingDaskScheduler(n, msg)
with dask.config.set({"scheduler": scheduler}): # pyright: ignore[reportPrivateImportUsage]
out = func(*args, **kwargs)
# Block until the graph materializes and reraise exceptions. This allows
# `pytest.raises` and `pytest.warns` to work as expected. Note that this would
# not work on scheduler='distributed', as it would not block.
arrays, rest = pickle_flatten(out, da.Array)
arrays = dask.persist(arrays, scheduler="threads")[0] # type: ignore[attr-defined,no-untyped-call] # pyright: ignore[reportPrivateImportUsage]
return pickle_unflatten(arrays, rest) # pyright: ignore[reportUnknownArgumentType]
return wrapper
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_extra/_delegation.py | sklearn/externals/array_api_extra/_delegation.py | """Delegation to existing implementations for Public API Functions."""
from collections.abc import Sequence
from types import ModuleType
from typing import Literal
from ._lib import _funcs
from ._lib._utils._compat import (
array_namespace,
is_cupy_namespace,
is_dask_namespace,
is_jax_namespace,
is_numpy_namespace,
is_pydata_sparse_namespace,
is_torch_namespace,
)
from ._lib._utils._compat import device as get_device
from ._lib._utils._helpers import asarrays
from ._lib._utils._typing import Array, DType
__all__ = ["isclose", "nan_to_num", "one_hot", "pad"]
def isclose(
a: Array | complex,
b: Array | complex,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
xp: ModuleType | None = None,
) -> Array:
"""
Return a boolean array where two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The relative
difference ``(rtol * abs(b))`` and the absolute difference `atol` are added together
to compare against the absolute difference between `a` and `b`.
NaNs are treated as equal if they are in the same place and if ``equal_nan=True``.
Infs are treated as equal if they are in the same place and of the same sign in both
arrays.
Parameters
----------
a, b : Array | int | float | complex | bool
Input objects to compare. At least one must be an array.
rtol : array_like, optional
The relative tolerance parameter (see Notes).
atol : array_like, optional
The absolute tolerance parameter (see Notes).
equal_nan : bool, optional
Whether to compare NaN's as equal. If True, NaN's in `a` will be considered
equal to NaN's in `b` in the output array.
xp : array_namespace, optional
The standard-compatible namespace for `a` and `b`. Default: infer.
Returns
-------
Array
A boolean array of shape broadcasted from `a` and `b`, containing ``True`` where
`a` is close to `b`, and ``False`` otherwise.
Warnings
--------
The default `atol` is not appropriate for comparing numbers with magnitudes much
smaller than one (see notes).
See Also
--------
math.isclose : Similar function in stdlib for Python scalars.
Notes
-----
For finite values, `isclose` uses the following equation to test whether two
floating point values are equivalent::
absolute(a - b) <= (atol + rtol * absolute(b))
Unlike the built-in `math.isclose`,
the above equation is not symmetric in `a` and `b`,
so that ``isclose(a, b)`` might be different from ``isclose(b, a)`` in some rare
cases.
The default value of `atol` is not appropriate when the reference value `b` has
magnitude smaller than one. For example, it is unlikely that ``a = 1e-9`` and
``b = 2e-9`` should be considered "close", yet ``isclose(1e-9, 2e-9)`` is ``True``
with default settings. Be sure to select `atol` for the use case at hand, especially
for defining the threshold below which a non-zero value in `a` will be considered
"close" to a very small or zero value in `b`.
The comparison of `a` and `b` uses standard broadcasting, which means that `a` and
`b` need not have the same shape in order for ``isclose(a, b)`` to evaluate to
``True``.
`isclose` is not defined for non-numeric data types.
``bool`` is considered a numeric data-type for this purpose.
"""
xp = array_namespace(a, b) if xp is None else xp
if (
is_numpy_namespace(xp)
or is_cupy_namespace(xp)
or is_dask_namespace(xp)
or is_jax_namespace(xp)
):
return xp.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
if is_torch_namespace(xp):
a, b = asarrays(a, b, xp=xp) # Array API 2024.12 support
return xp.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
return _funcs.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan, xp=xp)
def nan_to_num(
x: Array | float | complex,
/,
*,
fill_value: int | float = 0.0,
xp: ModuleType | None = None,
) -> Array:
"""
Replace NaN with zero and infinity with large finite numbers (default behaviour).
If `x` is inexact, NaN is replaced by zero or by the user defined value in the
`fill_value` keyword, infinity is replaced by the largest finite floating
point value representable by ``x.dtype``, and -infinity is replaced by the
most negative finite floating point value representable by ``x.dtype``.
For complex dtypes, the above is applied to each of the real and
imaginary components of `x` separately.
Parameters
----------
x : array | float | complex
Input data.
fill_value : int | float, optional
Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
`x`, with the non-finite values replaced.
See Also
--------
array_api.isnan : Shows which elements are Not a Number (NaN).
Examples
--------
>>> import array_api_extra as xpx
>>> import array_api_strict as xp
>>> xpx.nan_to_num(xp.inf)
1.7976931348623157e+308
>>> xpx.nan_to_num(-xp.inf)
-1.7976931348623157e+308
>>> xpx.nan_to_num(xp.nan)
0.0
>>> x = xp.asarray([xp.inf, -xp.inf, xp.nan, -128, 128])
>>> xpx.nan_to_num(x)
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
-1.28000000e+002, 1.28000000e+002])
>>> y = xp.asarray([complex(xp.inf, xp.nan), xp.nan, complex(xp.nan, xp.inf)])
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
-1.28000000e+002, 1.28000000e+002])
>>> xpx.nan_to_num(y)
array([ 1.79769313e+308 +0.00000000e+000j, # may vary
0.00000000e+000 +0.00000000e+000j,
0.00000000e+000 +1.79769313e+308j])
"""
if isinstance(fill_value, complex):
msg = "Complex fill values are not supported."
raise TypeError(msg)
xp = array_namespace(x) if xp is None else xp
# for scalars we want to output an array
y = xp.asarray(x)
if (
is_cupy_namespace(xp)
or is_jax_namespace(xp)
or is_numpy_namespace(xp)
or is_torch_namespace(xp)
):
return xp.nan_to_num(y, nan=fill_value)
return _funcs.nan_to_num(y, fill_value=fill_value, xp=xp)
def one_hot(
x: Array,
/,
num_classes: int,
*,
dtype: DType | None = None,
axis: int = -1,
xp: ModuleType | None = None,
) -> Array:
"""
One-hot encode the given indices.
Each index in the input `x` is encoded as a vector of zeros of length `num_classes`
with the element at the given index set to one.
Parameters
----------
x : array
An array with integral dtype whose values are between `0` and `num_classes - 1`.
num_classes : int
Number of classes in the one-hot dimension.
dtype : DType, optional
The dtype of the return value. Defaults to the default float dtype (usually
float64).
axis : int, optional
Position in the expanded axes where the new axis is placed. Default: -1.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
An array having the same shape as `x` except for a new axis at the position
given by `axis` having size `num_classes`. If `axis` is unspecified, it
defaults to -1, which appends a new axis.
If ``x < 0`` or ``x >= num_classes``, then the result is undefined, may raise
an exception, or may even cause a bad state. `x` is not checked.
Examples
--------
>>> import array_api_extra as xpx
>>> import array_api_strict as xp
>>> xpx.one_hot(xp.asarray([1, 2, 0]), 3)
Array([[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]], dtype=array_api_strict.float64)
"""
# Validate inputs.
if xp is None:
xp = array_namespace(x)
if not xp.isdtype(x.dtype, "integral"):
msg = "x must have an integral dtype."
raise TypeError(msg)
if dtype is None:
dtype = _funcs.default_dtype(xp, device=get_device(x))
# Delegate where possible.
if is_jax_namespace(xp):
from jax.nn import one_hot as jax_one_hot
return jax_one_hot(x, num_classes, dtype=dtype, axis=axis)
if is_torch_namespace(xp):
from torch.nn.functional import one_hot as torch_one_hot
x = xp.astype(x, xp.int64) # PyTorch only supports int64 here.
try:
out = torch_one_hot(x, num_classes)
except RuntimeError as e:
raise IndexError from e
else:
out = _funcs.one_hot(x, num_classes, xp=xp)
out = xp.astype(out, dtype, copy=False)
if axis != -1:
out = xp.moveaxis(out, -1, axis)
return out
def pad(
x: Array,
pad_width: int | tuple[int, int] | Sequence[tuple[int, int]],
mode: Literal["constant"] = "constant",
*,
constant_values: complex = 0,
xp: ModuleType | None = None,
) -> Array:
"""
Pad the input array.
Parameters
----------
x : array
Input array.
pad_width : int or tuple of ints or sequence of pairs of ints
Pad the input array with this many elements from each side.
If a sequence of tuples, ``[(before_0, after_0), ... (before_N, after_N)]``,
each pair applies to the corresponding axis of ``x``.
A single tuple, ``(before, after)``, is equivalent to a list of ``x.ndim``
copies of this tuple.
mode : str, optional
Only "constant" mode is currently supported, which pads with
the value passed to `constant_values`.
constant_values : python scalar, optional
Use this value to pad the input. Default is zero.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
The input array,
padded with ``pad_width`` elements equal to ``constant_values``.
"""
xp = array_namespace(x) if xp is None else xp
if mode != "constant":
msg = "Only `'constant'` mode is currently supported"
raise NotImplementedError(msg)
if (
is_numpy_namespace(xp)
or is_cupy_namespace(xp)
or is_jax_namespace(xp)
or is_pydata_sparse_namespace(xp)
):
return xp.pad(x, pad_width, mode, constant_values=constant_values)
# https://github.com/pytorch/pytorch/blob/cf76c05b4dc629ac989d1fb8e789d4fac04a095a/torch/_numpy/_funcs_impl.py#L2045-L2056
if is_torch_namespace(xp):
pad_width = xp.asarray(pad_width)
pad_width = xp.broadcast_to(pad_width, (x.ndim, 2))
pad_width = xp.flip(pad_width, axis=(0,)).flatten()
return xp.nn.functional.pad(x, tuple(pad_width), value=constant_values) # type: ignore[arg-type] # pyright: ignore[reportArgumentType]
return _funcs.pad(x, pad_width, constant_values=constant_values, xp=xp)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_extra/__init__.py | sklearn/externals/array_api_extra/__init__.py | """Extra array functions built on top of the array API standard."""
from ._delegation import isclose, nan_to_num, one_hot, pad
from ._lib._at import at
from ._lib._funcs import (
apply_where,
atleast_nd,
broadcast_shapes,
cov,
create_diagonal,
default_dtype,
expand_dims,
kron,
nunique,
setdiff1d,
sinc,
)
from ._lib._lazy import lazy_apply
__version__ = "0.8.2"
# pylint: disable=duplicate-code
__all__ = [
"__version__",
"apply_where",
"at",
"atleast_nd",
"broadcast_shapes",
"cov",
"create_diagonal",
"default_dtype",
"expand_dims",
"isclose",
"kron",
"lazy_apply",
"nan_to_num",
"nunique",
"one_hot",
"pad",
"setdiff1d",
"sinc",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_extra/_lib/_lazy.py | sklearn/externals/array_api_extra/_lib/_lazy.py | """Public API Functions."""
from __future__ import annotations
import math
from collections.abc import Callable, Sequence
from functools import partial, wraps
from types import ModuleType
from typing import TYPE_CHECKING, Any, ParamSpec, TypeAlias, cast, overload
from ._funcs import broadcast_shapes
from ._utils import _compat
from ._utils._compat import (
array_namespace,
is_dask_namespace,
is_jax_namespace,
)
from ._utils._helpers import is_python_scalar
from ._utils._typing import Array, DType
if TYPE_CHECKING: # pragma: no cover
import numpy as np
from numpy.typing import ArrayLike
NumPyObject: TypeAlias = np.ndarray[Any, Any] | np.generic
else:
# Sphinx hack
NumPyObject = Any
P = ParamSpec("P")
@overload
def lazy_apply( # type: ignore[valid-type]
func: Callable[P, Array | ArrayLike],
*args: Array | complex | None,
shape: tuple[int | None, ...] | None = None,
dtype: DType | None = None,
as_numpy: bool = False,
xp: ModuleType | None = None,
**kwargs: P.kwargs, # pyright: ignore[reportGeneralTypeIssues]
) -> Array: ... # numpydoc ignore=GL08
@overload
def lazy_apply( # type: ignore[valid-type]
func: Callable[P, Sequence[Array | ArrayLike]],
*args: Array | complex | None,
shape: Sequence[tuple[int | None, ...]],
dtype: Sequence[DType] | None = None,
as_numpy: bool = False,
xp: ModuleType | None = None,
**kwargs: P.kwargs, # pyright: ignore[reportGeneralTypeIssues]
) -> tuple[Array, ...]: ... # numpydoc ignore=GL08
def lazy_apply( # type: ignore[valid-type] # numpydoc ignore=GL07,SA04
func: Callable[P, Array | ArrayLike | Sequence[Array | ArrayLike]],
*args: Array | complex | None,
shape: tuple[int | None, ...] | Sequence[tuple[int | None, ...]] | None = None,
dtype: DType | Sequence[DType] | None = None,
as_numpy: bool = False,
xp: ModuleType | None = None,
**kwargs: P.kwargs, # pyright: ignore[reportGeneralTypeIssues]
) -> Array | tuple[Array, ...]:
"""
Lazily apply an eager function.
If the backend of the input arrays is lazy, e.g. Dask or jitted JAX, the execution
of the function is delayed until the graph is materialized; if it's eager, the
function is executed immediately.
Parameters
----------
func : callable
The function to apply.
It must accept one or more array API compliant arrays as positional arguments.
If `as_numpy=True`, inputs are converted to NumPy before they are passed to
`func`.
It must return either a single array-like or a sequence of array-likes.
`func` must be a pure function, i.e. without side effects, as depending on the
backend it may be executed more than once or never.
*args : Array | int | float | complex | bool | None
One or more Array API compliant arrays, Python scalars, or None's.
If `as_numpy=True`, you need to be able to apply :func:`numpy.asarray` to
non-None args to convert them to NumPy; read notes below about specific
backends.
shape : tuple[int | None, ...] | Sequence[tuple[int | None, ...]], optional
Output shape or sequence of output shapes, one for each output of `func`.
Default: assume single output and broadcast shapes of the input arrays.
dtype : DType | Sequence[DType], optional
Output dtype or sequence of output dtypes, one for each output of `func`.
dtype(s) must belong to the same array namespace as the input arrays.
Default: infer the result type(s) from the input arrays.
as_numpy : bool, optional
If True, convert the input arrays to NumPy before passing them to `func`.
This is particularly useful to make NumPy-only functions, e.g. written in Cython
or Numba, work transparently with array API-compliant arrays.
Default: False.
xp : array_namespace, optional
The standard-compatible namespace for `args`. Default: infer.
**kwargs : Any, optional
Additional keyword arguments to pass verbatim to `func`.
They cannot contain Array objects.
Returns
-------
Array | tuple[Array, ...]
The result(s) of `func` applied to the input arrays, wrapped in the same
array namespace as the inputs.
If shape is omitted or a single `tuple[int | None, ...]`, return a single array.
Otherwise, return a tuple of arrays.
Notes
-----
JAX
This allows applying eager functions to jitted JAX arrays, which are lazy.
The function won't be applied until the JAX array is materialized.
When running inside ``jax.jit``, `shape` must be fully known, i.e. it cannot
contain any `None` elements.
.. warning::
`func` must never raise inside ``jax.jit``, as the resulting behavior is
undefined.
Using this with `as_numpy=False` is particularly useful to apply non-jittable
JAX functions to arrays on GPU devices.
If ``as_numpy=True``, the :doc:`jax:transfer_guard` may prevent arrays on a GPU
device from being transferred back to CPU. This is treated as an implicit
transfer.
PyTorch, CuPy
If ``as_numpy=True``, these backends raise by default if you attempt to convert
arrays on a GPU device to NumPy.
Sparse
If ``as_numpy=True``, by default sparse prevents implicit densification through
:func:`numpy.asarray`. `This safety mechanism can be disabled
<https://sparse.pydata.org/en/stable/operations.html#package-configuration>`_.
Dask
This allows applying eager functions to Dask arrays.
The Dask graph won't be computed until the user calls ``compute()`` or
``persist()`` down the line.
The function name will be prominently visible on the user-facing Dask
dashboard and on Prometheus metrics, so it is recommended for it to be
meaningful.
`lazy_apply` doesn't know if `func` reduces along any axes; also, shape
changes are non-trivial in chunked Dask arrays. For these reasons, all inputs
will be rechunked into a single chunk.
.. warning::
The whole operation needs to fit in memory all at once on a single worker.
The outputs will also be returned as a single chunk and you should consider
rechunking them into smaller chunks afterwards.
If you want to distribute the calculation across multiple workers, you
should use :func:`dask.array.map_blocks`, :func:`dask.array.map_overlap`,
:func:`dask.array.blockwise`, or a native Dask wrapper instead of
`lazy_apply`.
Dask wrapping around other backends
If ``as_numpy=False``, `func` will receive in input eager arrays of the meta
namespace, as defined by the ``._meta`` attribute of the input Dask arrays.
The outputs of `func` will be wrapped by the meta namespace, and then wrapped
again by Dask.
Raises
------
ValueError
When ``xp=jax.numpy``, the output `shape` is unknown (it contains ``None`` on
one or more axes) and this function was called inside ``jax.jit``.
RuntimeError
When ``xp=sparse`` and auto-densification is disabled.
Exception (backend-specific)
When the backend disallows implicit device to host transfers and the input
arrays are on a non-CPU device, e.g. on GPU.
See Also
--------
jax.transfer_guard
jax.pure_callback
dask.array.map_blocks
dask.array.map_overlap
dask.array.blockwise
"""
args_not_none = [arg for arg in args if arg is not None]
array_args = [arg for arg in args_not_none if not is_python_scalar(arg)]
if not array_args:
msg = "Must have at least one argument array"
raise ValueError(msg)
if xp is None:
xp = array_namespace(*args)
# Normalize and validate shape and dtype
shapes: list[tuple[int | None, ...]]
dtypes: list[DType]
multi_output = False
if shape is None:
shapes = [broadcast_shapes(*(arg.shape for arg in array_args))]
elif all(isinstance(s, int | None) for s in shape):
# Do not test for shape to be a tuple
# https://github.com/data-apis/array-api/issues/891#issuecomment-2637430522
shapes = [cast(tuple[int | None, ...], shape)]
else:
shapes = list(shape) # type: ignore[arg-type] # pyright: ignore[reportAssignmentType]
multi_output = True
if dtype is None:
dtypes = [xp.result_type(*args_not_none)] * len(shapes)
elif multi_output:
if not isinstance(dtype, Sequence):
msg = "Got multiple shapes but only one dtype"
raise ValueError(msg)
dtypes = list(dtype) # pyright: ignore[reportUnknownArgumentType]
else:
if isinstance(dtype, Sequence):
msg = "Got single shape but multiple dtypes"
raise ValueError(msg)
dtypes = [dtype]
if len(shapes) != len(dtypes):
msg = f"Got {len(shapes)} shapes and {len(dtypes)} dtypes"
raise ValueError(msg)
del shape
del dtype
# End of shape and dtype parsing
# Backend-specific branches
if is_dask_namespace(xp):
import dask
metas: list[Array] = [arg._meta for arg in array_args] # pylint: disable=protected-access # pyright: ignore[reportAttributeAccessIssue]
meta_xp = array_namespace(*metas)
wrapped = dask.delayed( # type: ignore[attr-defined] # pyright: ignore[reportPrivateImportUsage]
_lazy_apply_wrapper(func, as_numpy, multi_output, meta_xp),
pure=True,
)
# This finalizes each arg, which is the same as arg.rechunk(-1).
# Please read docstring above for why we're not using
# dask.array.map_blocks or dask.array.blockwise!
delayed_out = wrapped(*args, **kwargs)
out = tuple(
xp.from_delayed(
delayed_out[i], # pyright: ignore[reportIndexIssue]
# Dask's unknown shapes diverge from the Array API specification
shape=tuple(math.nan if s is None else s for s in shape),
dtype=dtype,
meta=metas[0],
)
for i, (shape, dtype) in enumerate(zip(shapes, dtypes, strict=True))
)
elif is_jax_namespace(xp) and _is_jax_jit_enabled(xp):
# Delay calling func with jax.pure_callback, which will forward to func eager
# JAX arrays. Do not use jax.pure_callback when running outside of the JIT,
# as it does not support raising exceptions:
# https://github.com/jax-ml/jax/issues/26102
import jax
if any(None in shape for shape in shapes):
msg = "Output shape must be fully known when running inside jax.jit"
raise ValueError(msg)
# Shield kwargs from being coerced into JAX arrays.
# jax.pure_callback calls jax.jit under the hood, but without the chance of
# passing static_argnames / static_argnums.
wrapped = _lazy_apply_wrapper(
partial(func, **kwargs), as_numpy, multi_output, xp
)
# suppress unused-ignore to run mypy in -e lint as well as -e dev
out = cast( # type: ignore[bad-cast,unused-ignore]
tuple[Array, ...],
jax.pure_callback(
wrapped,
tuple(
jax.ShapeDtypeStruct(shape, dtype) # pyright: ignore[reportUnknownArgumentType]
for shape, dtype in zip(shapes, dtypes, strict=True)
),
*args,
),
)
else:
# Eager backends, including non-jitted JAX
wrapped = _lazy_apply_wrapper(func, as_numpy, multi_output, xp)
out = wrapped(*args, **kwargs)
return out if multi_output else out[0]
def _is_jax_jit_enabled(xp: ModuleType) -> bool: # numpydoc ignore=PR01,RT01
"""Return True if this function is being called inside ``jax.jit``."""
import jax # pylint: disable=import-outside-toplevel
x = xp.asarray(False)
try:
return bool(x)
except jax.errors.TracerBoolConversionError:
return True
def _lazy_apply_wrapper( # numpydoc ignore=PR01,RT01
func: Callable[..., Array | ArrayLike | Sequence[Array | ArrayLike]],
as_numpy: bool,
multi_output: bool,
xp: ModuleType,
) -> Callable[..., tuple[Array, ...]]:
"""
Helper of `lazy_apply`.
Given a function that accepts one or more arrays as positional arguments and returns
a single array-like or a sequence of array-likes, return a function that accepts the
same number of Array API arrays and always returns a tuple of Array API array.
Any keyword arguments are passed through verbatim to the wrapped function.
"""
# On Dask, @wraps causes the graph key to contain the wrapped function's name
@wraps(func)
def wrapper(
*args: Array | complex | None, **kwargs: Any
) -> tuple[Array, ...]: # numpydoc ignore=GL08
args_list = []
device = None
for arg in args:
if arg is not None and not is_python_scalar(arg):
if device is None:
device = _compat.device(arg)
if as_numpy:
import numpy as np
arg = cast(Array, np.asarray(arg)) # pyright: ignore[reportInvalidCast] # noqa: PLW2901
args_list.append(arg)
assert device is not None
out = func(*args_list, **kwargs)
if multi_output:
assert isinstance(out, Sequence)
return tuple(xp.asarray(o, device=device) for o in out)
return (xp.asarray(out, device=device),)
return wrapper
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_extra/_lib/_funcs.py | sklearn/externals/array_api_extra/_lib/_funcs.py | """Array-agnostic implementations for the public API."""
import math
import warnings
from collections.abc import Callable, Sequence
from types import ModuleType, NoneType
from typing import Literal, cast, overload
from ._at import at
from ._utils import _compat, _helpers
from ._utils._compat import array_namespace, is_dask_namespace, is_jax_array
from ._utils._helpers import (
asarrays,
capabilities,
eager_shape,
meta_namespace,
ndindex,
)
from ._utils._typing import Array, Device, DType
__all__ = [
"apply_where",
"atleast_nd",
"broadcast_shapes",
"cov",
"create_diagonal",
"expand_dims",
"kron",
"nunique",
"pad",
"setdiff1d",
"sinc",
]
@overload
def apply_where( # numpydoc ignore=GL08
cond: Array,
args: Array | tuple[Array, ...],
f1: Callable[..., Array],
f2: Callable[..., Array],
/,
*,
xp: ModuleType | None = None,
) -> Array: ...
@overload
def apply_where( # numpydoc ignore=GL08
cond: Array,
args: Array | tuple[Array, ...],
f1: Callable[..., Array],
/,
*,
fill_value: Array | complex,
xp: ModuleType | None = None,
) -> Array: ...
def apply_where( # numpydoc ignore=PR01,PR02
cond: Array,
args: Array | tuple[Array, ...],
f1: Callable[..., Array],
f2: Callable[..., Array] | None = None,
/,
*,
fill_value: Array | complex | None = None,
xp: ModuleType | None = None,
) -> Array:
"""
Run one of two elementwise functions depending on a condition.
Equivalent to ``f1(*args) if cond else fill_value`` performed elementwise
when `fill_value` is defined, otherwise to ``f1(*args) if cond else f2(*args)``.
Parameters
----------
cond : array
The condition, expressed as a boolean array.
args : Array or tuple of Arrays
Argument(s) to `f1` (and `f2`). Must be broadcastable with `cond`.
f1 : callable
Elementwise function of `args`, returning a single array.
Where `cond` is True, output will be ``f1(arg0[cond], arg1[cond], ...)``.
f2 : callable, optional
Elementwise function of `args`, returning a single array.
Where `cond` is False, output will be ``f2(arg0[cond], arg1[cond], ...)``.
Mutually exclusive with `fill_value`.
fill_value : Array or scalar, optional
If provided, value with which to fill output array where `cond` is False.
It does not need to be scalar; it needs however to be broadcastable with
`cond` and `args`.
Mutually exclusive with `f2`. You must provide one or the other.
xp : array_namespace, optional
The standard-compatible namespace for `cond` and `args`. Default: infer.
Returns
-------
Array
An array with elements from the output of `f1` where `cond` is True and either
the output of `f2` or `fill_value` where `cond` is False. The returned array has
data type determined by type promotion rules between the output of `f1` and
either `fill_value` or the output of `f2`.
Notes
-----
``xp.where(cond, f1(*args), f2(*args))`` requires explicitly evaluating `f1` even
when `cond` is False, and `f2` when cond is True. This function evaluates each
function only for their matching condition, if the backend allows for it.
On Dask, `f1` and `f2` are applied to the individual chunks and should use functions
from the namespace of the chunks.
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> a = xp.asarray([5, 4, 3])
>>> b = xp.asarray([0, 2, 2])
>>> def f(a, b):
... return a // b
>>> xpx.apply_where(b != 0, (a, b), f, fill_value=xp.nan)
array([ nan, 2., 1.])
"""
# Parse and normalize arguments
if (f2 is None) == (fill_value is None):
msg = "Exactly one of `fill_value` or `f2` must be given."
raise TypeError(msg)
args_ = list(args) if isinstance(args, tuple) else [args]
del args
xp = array_namespace(cond, fill_value, *args_) if xp is None else xp
if isinstance(fill_value, int | float | complex | NoneType):
cond, *args_ = xp.broadcast_arrays(cond, *args_)
else:
cond, fill_value, *args_ = xp.broadcast_arrays(cond, fill_value, *args_)
if is_dask_namespace(xp):
meta_xp = meta_namespace(cond, fill_value, *args_, xp=xp)
# map_blocks doesn't descend into tuples of Arrays
return xp.map_blocks(_apply_where, cond, f1, f2, fill_value, *args_, xp=meta_xp)
return _apply_where(cond, f1, f2, fill_value, *args_, xp=xp)
def _apply_where( # numpydoc ignore=PR01,RT01
cond: Array,
f1: Callable[..., Array],
f2: Callable[..., Array] | None,
fill_value: Array | int | float | complex | bool | None,
*args: Array,
xp: ModuleType,
) -> Array:
"""Helper of `apply_where`. On Dask, this runs on a single chunk."""
if not capabilities(xp, device=_compat.device(cond))["boolean indexing"]:
# jax.jit does not support assignment by boolean mask
return xp.where(cond, f1(*args), f2(*args) if f2 is not None else fill_value)
temp1 = f1(*(arr[cond] for arr in args))
if f2 is None:
dtype = xp.result_type(temp1, fill_value)
if isinstance(fill_value, int | float | complex):
out = xp.full_like(cond, dtype=dtype, fill_value=fill_value)
else:
out = xp.astype(fill_value, dtype, copy=True)
else:
ncond = ~cond
temp2 = f2(*(arr[ncond] for arr in args))
dtype = xp.result_type(temp1, temp2)
out = xp.empty_like(cond, dtype=dtype)
out = at(out, ncond).set(temp2)
return at(out, cond).set(temp1)
def atleast_nd(x: Array, /, *, ndim: int, xp: ModuleType | None = None) -> Array:
"""
Recursively expand the dimension of an array to at least `ndim`.
Parameters
----------
x : array
Input array.
ndim : int
The minimum number of dimensions for the result.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
An array with ``res.ndim`` >= `ndim`.
If ``x.ndim`` >= `ndim`, `x` is returned.
If ``x.ndim`` < `ndim`, `x` is expanded by prepending new axes
until ``res.ndim`` equals `ndim`.
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> x = xp.asarray([1])
>>> xpx.atleast_nd(x, ndim=3, xp=xp)
Array([[[1]]], dtype=array_api_strict.int64)
>>> x = xp.asarray([[[1, 2],
... [3, 4]]])
>>> xpx.atleast_nd(x, ndim=1, xp=xp) is x
True
"""
if xp is None:
xp = array_namespace(x)
if x.ndim < ndim:
x = xp.expand_dims(x, axis=0)
x = atleast_nd(x, ndim=ndim, xp=xp)
return x
# `float` in signature to accept `math.nan` for Dask.
# `int`s are still accepted as `float` is a superclass of `int` in typing
def broadcast_shapes(*shapes: tuple[float | None, ...]) -> tuple[int | None, ...]:
"""
Compute the shape of the broadcasted arrays.
Duplicates :func:`numpy.broadcast_shapes`, with additional support for
None and NaN sizes.
This is equivalent to ``xp.broadcast_arrays(arr1, arr2, ...)[0].shape``
without needing to worry about the backend potentially deep copying
the arrays.
Parameters
----------
*shapes : tuple[int | None, ...]
Shapes of the arrays to broadcast.
Returns
-------
tuple[int | None, ...]
The shape of the broadcasted arrays.
See Also
--------
numpy.broadcast_shapes : Equivalent NumPy function.
array_api.broadcast_arrays : Function to broadcast actual arrays.
Notes
-----
This function accepts the Array API's ``None`` for unknown sizes,
as well as Dask's non-standard ``math.nan``.
Regardless of input, the output always contains ``None`` for unknown sizes.
Examples
--------
>>> import array_api_extra as xpx
>>> xpx.broadcast_shapes((2, 3), (2, 1))
(2, 3)
>>> xpx.broadcast_shapes((4, 2, 3), (2, 1), (1, 3))
(4, 2, 3)
"""
if not shapes:
return () # Match NumPy output
ndim = max(len(shape) for shape in shapes)
out: list[int | None] = []
for axis in range(-ndim, 0):
sizes = {shape[axis] for shape in shapes if axis >= -len(shape)}
# Dask uses NaN for unknown shape, which predates the Array API spec for None
none_size = None in sizes or math.nan in sizes # noqa: PLW0177
sizes -= {1, None, math.nan}
if len(sizes) > 1:
msg = (
"shape mismatch: objects cannot be broadcast to a single shape: "
f"{shapes}."
)
raise ValueError(msg)
out.append(None if none_size else cast(int, sizes.pop()) if sizes else 1)
return tuple(out)
def cov(m: Array, /, *, xp: ModuleType | None = None) -> Array:
"""
Estimate a covariance matrix.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
This provides a subset of the functionality of ``numpy.cov``.
Parameters
----------
m : array
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables.
xp : array_namespace, optional
The standard-compatible namespace for `m`. Default: infer.
Returns
-------
array
The covariance matrix of the variables.
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = xp.asarray([[0, 2], [1, 1], [2, 0]]).T
>>> x
Array([[0, 1, 2],
[2, 1, 0]], dtype=array_api_strict.int64)
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> xpx.cov(x, xp=xp)
Array([[ 1., -1.],
[-1., 1.]], dtype=array_api_strict.float64)
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = xp.asarray([-2.1, -1, 4.3])
>>> y = xp.asarray([3, 1.1, 0.12])
>>> X = xp.stack((x, y), axis=0)
>>> xpx.cov(X, xp=xp)
Array([[11.71 , -4.286 ],
[-4.286 , 2.14413333]], dtype=array_api_strict.float64)
>>> xpx.cov(x, xp=xp)
Array(11.71, dtype=array_api_strict.float64)
>>> xpx.cov(y, xp=xp)
Array(2.14413333, dtype=array_api_strict.float64)
"""
if xp is None:
xp = array_namespace(m)
m = xp.asarray(m, copy=True)
dtype = (
xp.float64 if xp.isdtype(m.dtype, "integral") else xp.result_type(m, xp.float64)
)
m = atleast_nd(m, ndim=2, xp=xp)
m = xp.astype(m, dtype)
avg = _helpers.mean(m, axis=1, xp=xp)
m_shape = eager_shape(m)
fact = m_shape[1] - 1
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, stacklevel=2)
fact = 0
m -= avg[:, None]
m_transpose = m.T
if xp.isdtype(m_transpose.dtype, "complex floating"):
m_transpose = xp.conj(m_transpose)
c = m @ m_transpose
c /= fact
axes = tuple(axis for axis, length in enumerate(c.shape) if length == 1)
return xp.squeeze(c, axis=axes)
def one_hot(
x: Array,
/,
num_classes: int,
*,
xp: ModuleType,
) -> Array: # numpydoc ignore=PR01,RT01
"""See docstring in `array_api_extra._delegation.py`."""
# TODO: Benchmark whether this is faster on the NumPy backend:
# if is_numpy_array(x):
# out = xp.zeros((x.size, num_classes), dtype=dtype)
# out[xp.arange(x.size), xp.reshape(x, (-1,))] = 1
# return xp.reshape(out, (*x.shape, num_classes))
range_num_classes = xp.arange(num_classes, dtype=x.dtype, device=_compat.device(x))
return x[..., xp.newaxis] == range_num_classes
def create_diagonal(
x: Array, /, *, offset: int = 0, xp: ModuleType | None = None
) -> Array:
"""
Construct a diagonal array.
Parameters
----------
x : array
An array having shape ``(*batch_dims, k)``.
offset : int, optional
Offset from the leading diagonal (default is ``0``).
Use positive ints for diagonals above the leading diagonal,
and negative ints for diagonals below the leading diagonal.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
An array having shape ``(*batch_dims, k+abs(offset), k+abs(offset))`` with `x`
on the diagonal (offset by `offset`).
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> x = xp.asarray([2, 4, 8])
>>> xpx.create_diagonal(x, xp=xp)
Array([[2, 0, 0],
[0, 4, 0],
[0, 0, 8]], dtype=array_api_strict.int64)
>>> xpx.create_diagonal(x, offset=-2, xp=xp)
Array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[2, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 8, 0, 0]], dtype=array_api_strict.int64)
"""
if xp is None:
xp = array_namespace(x)
if x.ndim == 0:
err_msg = "`x` must be at least 1-dimensional."
raise ValueError(err_msg)
x_shape = eager_shape(x)
batch_dims = x_shape[:-1]
n = x_shape[-1] + abs(offset)
diag = xp.zeros((*batch_dims, n**2), dtype=x.dtype, device=_compat.device(x))
target_slice = slice(
offset if offset >= 0 else abs(offset) * n,
min(n * (n - offset), diag.shape[-1]),
n + 1,
)
for index in ndindex(*batch_dims):
diag = at(diag)[(*index, target_slice)].set(x[(*index, slice(None))])
return xp.reshape(diag, (*batch_dims, n, n))
def default_dtype(
xp: ModuleType,
kind: Literal[
"real floating", "complex floating", "integral", "indexing"
] = "real floating",
*,
device: Device | None = None,
) -> DType:
"""
Return the default dtype for the given namespace and device.
This is a convenience shorthand for
``xp.__array_namespace_info__().default_dtypes(device=device)[kind]``.
Parameters
----------
xp : array_namespace
The standard-compatible namespace for which to get the default dtype.
kind : {'real floating', 'complex floating', 'integral', 'indexing'}, optional
The kind of dtype to return. Default is 'real floating'.
device : Device, optional
The device for which to get the default dtype. Default: current device.
Returns
-------
dtype
The default dtype for the given namespace, kind, and device.
"""
dtypes = xp.__array_namespace_info__().default_dtypes(device=device)
try:
return dtypes[kind]
except KeyError as e:
domain = ("real floating", "complex floating", "integral", "indexing")
assert set(dtypes) == set(domain), f"Non-compliant namespace: {dtypes}"
msg = f"Unknown kind '{kind}'. Expected one of {domain}."
raise ValueError(msg) from e
def expand_dims(
a: Array, /, *, axis: int | tuple[int, ...] = (0,), xp: ModuleType | None = None
) -> Array:
"""
Expand the shape of an array.
Insert (a) new axis/axes that will appear at the position(s) specified by
`axis` in the expanded array shape.
This is ``xp.expand_dims`` for `axis` an int *or a tuple of ints*.
Roughly equivalent to ``numpy.expand_dims`` for NumPy arrays.
Parameters
----------
a : array
Array to have its shape expanded.
axis : int or tuple of ints, optional
Position(s) in the expanded axes where the new axis (or axes) is/are placed.
If multiple positions are provided, they should be unique (note that a position
given by a positive index could also be referred to by a negative index -
that will also result in an error).
Default: ``(0,)``.
xp : array_namespace, optional
The standard-compatible namespace for `a`. Default: infer.
Returns
-------
array
`a` with an expanded shape.
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> x = xp.asarray([1, 2])
>>> x.shape
(2,)
The following is equivalent to ``x[xp.newaxis, :]`` or ``x[xp.newaxis]``:
>>> y = xpx.expand_dims(x, axis=0, xp=xp)
>>> y
Array([[1, 2]], dtype=array_api_strict.int64)
>>> y.shape
(1, 2)
The following is equivalent to ``x[:, xp.newaxis]``:
>>> y = xpx.expand_dims(x, axis=1, xp=xp)
>>> y
Array([[1],
[2]], dtype=array_api_strict.int64)
>>> y.shape
(2, 1)
``axis`` may also be a tuple:
>>> y = xpx.expand_dims(x, axis=(0, 1), xp=xp)
>>> y
Array([[[1, 2]]], dtype=array_api_strict.int64)
>>> y = xpx.expand_dims(x, axis=(2, 0), xp=xp)
>>> y
Array([[[1],
[2]]], dtype=array_api_strict.int64)
"""
if xp is None:
xp = array_namespace(a)
if not isinstance(axis, tuple):
axis = (axis,)
ndim = a.ndim + len(axis)
if axis != () and (min(axis) < -ndim or max(axis) >= ndim):
err_msg = (
f"a provided axis position is out of bounds for array of dimension {a.ndim}"
)
raise IndexError(err_msg)
axis = tuple(dim % ndim for dim in axis)
if len(set(axis)) != len(axis):
err_msg = "Duplicate dimensions specified in `axis`."
raise ValueError(err_msg)
for i in sorted(axis):
a = xp.expand_dims(a, axis=i)
return a
def isclose(
a: Array | complex,
b: Array | complex,
*,
rtol: float = 1e-05,
atol: float = 1e-08,
equal_nan: bool = False,
xp: ModuleType,
) -> Array: # numpydoc ignore=PR01,RT01
"""See docstring in array_api_extra._delegation."""
a, b = asarrays(a, b, xp=xp)
a_inexact = xp.isdtype(a.dtype, ("real floating", "complex floating"))
b_inexact = xp.isdtype(b.dtype, ("real floating", "complex floating"))
if a_inexact or b_inexact:
# prevent warnings on NumPy and Dask on inf - inf
mxp = meta_namespace(a, b, xp=xp)
out = apply_where(
xp.isinf(a) | xp.isinf(b),
(a, b),
lambda a, b: mxp.isinf(a) & mxp.isinf(b) & (mxp.sign(a) == mxp.sign(b)), # pyright: ignore[reportUnknownArgumentType]
# Note: inf <= inf is True!
lambda a, b: mxp.abs(a - b) <= (atol + rtol * mxp.abs(b)), # pyright: ignore[reportUnknownArgumentType]
xp=xp,
)
if equal_nan:
out = xp.where(xp.isnan(a) & xp.isnan(b), True, out)
return out
if xp.isdtype(a.dtype, "bool") or xp.isdtype(b.dtype, "bool"):
if atol >= 1 or rtol >= 1:
return xp.ones_like(a == b)
return a == b
# integer types
atol = int(atol)
if rtol == 0:
return xp.abs(a - b) <= atol
# Don't rely on OverflowError, as it is not guaranteed by the Array API.
nrtol = int(1.0 / rtol)
if nrtol > xp.iinfo(b.dtype).max:
# rtol * max_int < 1, so it's inconsequential
return xp.abs(a - b) <= atol
return xp.abs(a - b) <= (atol + xp.abs(b) // nrtol)
def kron(
a: Array | complex,
b: Array | complex,
/,
*,
xp: ModuleType | None = None,
) -> Array:
"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Equivalent to ``numpy.kron`` for NumPy arrays.
Parameters
----------
a, b : Array | int | float | complex
Input arrays or scalars. At least one must be an array.
xp : array_namespace, optional
The standard-compatible namespace for `a` and `b`. Default: infer.
Returns
-------
array
The Kronecker product of `a` and `b`.
Notes
-----
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``,
the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> xpx.kron(xp.asarray([1, 10, 100]), xp.asarray([5, 6, 7]), xp=xp)
Array([ 5, 6, 7, 50, 60, 70, 500,
600, 700], dtype=array_api_strict.int64)
>>> xpx.kron(xp.asarray([5, 6, 7]), xp.asarray([1, 10, 100]), xp=xp)
Array([ 5, 50, 500, 6, 60, 600, 7,
70, 700], dtype=array_api_strict.int64)
>>> xpx.kron(xp.eye(2), xp.ones((2, 2)), xp=xp)
Array([[1., 1., 0., 0.],
[1., 1., 0., 0.],
[0., 0., 1., 1.],
[0., 0., 1., 1.]], dtype=array_api_strict.float64)
>>> a = xp.reshape(xp.arange(100), (2, 5, 2, 5))
>>> b = xp.reshape(xp.arange(24), (2, 3, 4))
>>> c = xpx.kron(a, b, xp=xp)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1, 3, 0, 2)
>>> J = (0, 2, 1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(xp.asarray(I) * xp.asarray(S1) + xp.asarray(J1))
>>> c[K] == a[I]*b[J]
Array(True, dtype=array_api_strict.bool)
"""
if xp is None:
xp = array_namespace(a, b)
a, b = asarrays(a, b, xp=xp)
singletons = (1,) * (b.ndim - a.ndim)
a = cast(Array, xp.broadcast_to(a, singletons + a.shape))
nd_b, nd_a = b.ndim, a.ndim
nd_max = max(nd_b, nd_a)
if nd_a == 0 or nd_b == 0:
return xp.multiply(a, b)
a_shape = eager_shape(a)
b_shape = eager_shape(b)
# Equalise the shapes by prepending smaller one with 1s
a_shape = (1,) * max(0, nd_b - nd_a) + a_shape
b_shape = (1,) * max(0, nd_a - nd_b) + b_shape
# Insert empty dimensions
a_arr = expand_dims(a, axis=tuple(range(nd_b - nd_a)), xp=xp)
b_arr = expand_dims(b, axis=tuple(range(nd_a - nd_b)), xp=xp)
# Compute the product
a_arr = expand_dims(a_arr, axis=tuple(range(1, nd_max * 2, 2)), xp=xp)
b_arr = expand_dims(b_arr, axis=tuple(range(0, nd_max * 2, 2)), xp=xp)
result = xp.multiply(a_arr, b_arr)
# Reshape back and return
res_shape = tuple(a_s * b_s for a_s, b_s in zip(a_shape, b_shape, strict=True))
return xp.reshape(result, res_shape)
def nan_to_num( # numpydoc ignore=PR01,RT01
x: Array,
/,
fill_value: int | float = 0.0,
*,
xp: ModuleType,
) -> Array:
"""See docstring in `array_api_extra._delegation.py`."""
def perform_replacements( # numpydoc ignore=PR01,RT01
x: Array,
fill_value: int | float,
xp: ModuleType,
) -> Array:
"""Internal function to perform the replacements."""
x = xp.where(xp.isnan(x), fill_value, x)
# convert infinities to finite values
finfo = xp.finfo(x.dtype)
idx_posinf = xp.isinf(x) & ~xp.signbit(x)
idx_neginf = xp.isinf(x) & xp.signbit(x)
x = xp.where(idx_posinf, finfo.max, x)
return xp.where(idx_neginf, finfo.min, x)
if xp.isdtype(x.dtype, "complex floating"):
return perform_replacements(
xp.real(x),
fill_value,
xp,
) + 1j * perform_replacements(
xp.imag(x),
fill_value,
xp,
)
if xp.isdtype(x.dtype, "numeric"):
return perform_replacements(x, fill_value, xp)
return x
def nunique(x: Array, /, *, xp: ModuleType | None = None) -> Array:
"""
Count the number of unique elements in an array.
Compatible with JAX and Dask, whose laziness would be otherwise
problematic.
Parameters
----------
x : Array
Input array.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array: 0-dimensional integer array
The number of unique elements in `x`. It can be lazy.
"""
if xp is None:
xp = array_namespace(x)
if is_jax_array(x):
# size= is JAX-specific
# https://github.com/data-apis/array-api/issues/883
_, counts = xp.unique_counts(x, size=_compat.size(x))
return (counts > 0).sum()
# There are 3 general use cases:
# 1. backend has unique_counts and it returns an array with known shape
# 2. backend has unique_counts and it returns a None-sized array;
# e.g. Dask, ndonnx
# 3. backend does not have unique_counts; e.g. wrapped JAX
if capabilities(xp, device=_compat.device(x))["data-dependent shapes"]:
# xp has unique_counts; O(n) complexity
_, counts = xp.unique_counts(x)
n = _compat.size(counts)
if n is None:
return xp.sum(xp.ones_like(counts))
return xp.asarray(n, device=_compat.device(x))
# xp does not have unique_counts; O(n*logn) complexity
x = xp.reshape(x, (-1,))
x = xp.sort(x)
mask = x != xp.roll(x, -1)
default_int = default_dtype(xp, "integral", device=_compat.device(x))
return xp.maximum(
# Special cases:
# - array is size 0
# - array has all elements equal to each other
xp.astype(xp.any(~mask), default_int),
xp.sum(xp.astype(mask, default_int)),
)
def pad(
x: Array,
pad_width: int | tuple[int, int] | Sequence[tuple[int, int]],
*,
constant_values: complex = 0,
xp: ModuleType,
) -> Array: # numpydoc ignore=PR01,RT01
"""See docstring in `array_api_extra._delegation.py`."""
# make pad_width a list of length-2 tuples of ints
if isinstance(pad_width, int):
pad_width_seq = [(pad_width, pad_width)] * x.ndim
elif (
isinstance(pad_width, tuple)
and len(pad_width) == 2
and all(isinstance(i, int) for i in pad_width)
):
pad_width_seq = [cast(tuple[int, int], pad_width)] * x.ndim
else:
pad_width_seq = cast(list[tuple[int, int]], list(pad_width))
slices: list[slice] = []
newshape: list[int] = []
for ax, w_tpl in enumerate(pad_width_seq):
if len(w_tpl) != 2:
msg = f"expect a 2-tuple (before, after), got {w_tpl}."
raise ValueError(msg)
sh = eager_shape(x)[ax]
if w_tpl[0] == 0 and w_tpl[1] == 0:
sl = slice(None, None, None)
else:
stop: int | None
start, stop = w_tpl
stop = None if stop == 0 else -stop
sl = slice(start, stop, None)
sh += w_tpl[0] + w_tpl[1]
newshape.append(sh)
slices.append(sl)
padded = xp.full(
tuple(newshape),
fill_value=constant_values,
dtype=x.dtype,
device=_compat.device(x),
)
return at(padded, tuple(slices)).set(x)
def setdiff1d(
x1: Array | complex,
x2: Array | complex,
/,
*,
assume_unique: bool = False,
xp: ModuleType | None = None,
) -> Array:
"""
Find the set difference of two arrays.
Return the unique values in `x1` that are not in `x2`.
Parameters
----------
x1 : array | int | float | complex | bool
Input array.
x2 : array
Input comparison array.
assume_unique : bool
If ``True``, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is ``False``.
xp : array_namespace, optional
The standard-compatible namespace for `x1` and `x2`. Default: infer.
Returns
-------
array
1D array of values in `x1` that are not in `x2`. The result
is sorted when `assume_unique` is ``False``, but otherwise only sorted
if the input is sorted.
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> x1 = xp.asarray([1, 2, 3, 2, 4, 1])
>>> x2 = xp.asarray([3, 4, 5, 6])
>>> xpx.setdiff1d(x1, x2, xp=xp)
Array([1, 2], dtype=array_api_strict.int64)
"""
if xp is None:
xp = array_namespace(x1, x2)
# https://github.com/microsoft/pyright/issues/10103
x1_, x2_ = asarrays(x1, x2, xp=xp)
if assume_unique:
x1_ = xp.reshape(x1_, (-1,))
x2_ = xp.reshape(x2_, (-1,))
else:
x1_ = xp.unique_values(x1_)
x2_ = xp.unique_values(x2_)
return x1_[_helpers.in1d(x1_, x2_, assume_unique=True, invert=True, xp=xp)]
def sinc(x: Array, /, *, xp: ModuleType | None = None) -> Array:
r"""
Return the normalized sinc function.
The sinc function is equal to :math:`\sin(\pi x)/(\pi x)` for any argument
:math:`x\ne 0`. ``sinc(0)`` takes the limit value 1, making ``sinc`` not
only everywhere continuous but also infinitely differentiable.
.. note::
Note the normalization factor of ``pi`` used in the definition.
This is the most commonly used definition in signal processing.
Use ``sinc(x / xp.pi)`` to obtain the unnormalized sinc function
:math:`\sin(x)/x` that is more common in mathematics.
Parameters
----------
x : array
Array (possibly multi-dimensional) of values for which to calculate
``sinc(x)``. Must have a real floating point dtype.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
array
``sinc(x)`` calculated elementwise, which has the same shape as the input.
Notes
-----
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a Lanczos resampling
filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
#. Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. https://mathworld.wolfram.com/SincFunction.html
#. Wikipedia, "Sinc function",
https://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> import array_api_strict as xp
>>> import array_api_extra as xpx
>>> x = xp.linspace(-4, 4, 41)
>>> xpx.sinc(x, xp=xp)
Array([-3.89817183e-17, -4.92362781e-02,
-8.40918587e-02, -8.90384387e-02,
-5.84680802e-02, 3.89817183e-17,
6.68206631e-02, 1.16434881e-01,
1.26137788e-01, 8.50444803e-02,
-3.89817183e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01,
-1.55914881e-01, 3.89817183e-17,
2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01,
1.00000000e+00, 9.35489284e-01,
7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89817183e-17,
-1.55914881e-01, -2.16236208e-01,
-1.89206682e-01, -1.03943254e-01,
-3.89817183e-17, 8.50444803e-02,
1.26137788e-01, 1.16434881e-01,
6.68206631e-02, 3.89817183e-17,
-5.84680802e-02, -8.90384387e-02,
-8.40918587e-02, -4.92362781e-02,
-3.89817183e-17], dtype=array_api_strict.float64)
"""
if xp is None:
xp = array_namespace(x)
if not xp.isdtype(x.dtype, "real floating"):
err_msg = "`x` must have a real floating data type."
raise ValueError(err_msg)
# no scalars in `where` - array-api#807
y = xp.pi * xp.where(
xp.astype(x, xp.bool),
x,
xp.asarray(xp.finfo(x.dtype).eps, dtype=x.dtype, device=_compat.device(x)),
)
return xp.sin(y) / y
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_extra/_lib/_testing.py | sklearn/externals/array_api_extra/_lib/_testing.py | """
Testing utilities.
Note that this is private API; don't expect it to be stable.
See also ..testing for public testing utilities.
"""
from __future__ import annotations
import math
from types import ModuleType
from typing import Any, cast
import numpy as np
import pytest
from ._utils._compat import (
array_namespace,
is_array_api_strict_namespace,
is_cupy_namespace,
is_dask_namespace,
is_jax_namespace,
is_numpy_namespace,
is_pydata_sparse_namespace,
is_torch_array,
is_torch_namespace,
to_device,
)
from ._utils._typing import Array, Device
__all__ = ["as_numpy_array", "xp_assert_close", "xp_assert_equal", "xp_assert_less"]
def _check_ns_shape_dtype(
actual: Array,
desired: Array,
check_dtype: bool,
check_shape: bool,
check_scalar: bool,
) -> ModuleType: # numpydoc ignore=RT03
"""
Assert that namespace, shape and dtype of the two arrays match.
Parameters
----------
actual : Array
The array produced by the tested function.
desired : Array
The expected array (typically hardcoded).
check_dtype, check_shape : bool, default: True
Whether to check agreement between actual and desired dtypes and shapes
check_scalar : bool, default: False
NumPy only: whether to check agreement between actual and desired types -
0d array vs scalar.
Returns
-------
Arrays namespace.
"""
actual_xp = array_namespace(actual) # Raises on scalars and lists
desired_xp = array_namespace(desired)
msg = f"namespaces do not match: {actual_xp} != f{desired_xp}"
assert actual_xp == desired_xp, msg
# Dask uses nan instead of None for unknown shapes
actual_shape = cast(tuple[float, ...], actual.shape)
desired_shape = cast(tuple[float, ...], desired.shape)
assert None not in actual_shape # Requires explicit support
assert None not in desired_shape
if is_dask_namespace(desired_xp):
if any(math.isnan(i) for i in actual_shape):
actual_shape = actual.compute().shape # type: ignore[attr-defined] # pyright: ignore[reportAttributeAccessIssue]
if any(math.isnan(i) for i in desired_shape):
desired_shape = desired.compute().shape # type: ignore[attr-defined] # pyright: ignore[reportAttributeAccessIssue]
if check_shape:
msg = f"shapes do not match: {actual_shape} != f{desired_shape}"
assert actual_shape == desired_shape, msg
else:
# Ignore shape, but check flattened size. This is normally done by
# np.testing.assert_array_equal etc even when strict=False, but not for
# non-materializable arrays.
actual_size = math.prod(actual_shape) # pyright: ignore[reportUnknownArgumentType]
desired_size = math.prod(desired_shape) # pyright: ignore[reportUnknownArgumentType]
msg = f"sizes do not match: {actual_size} != f{desired_size}"
assert actual_size == desired_size, msg
if check_dtype:
msg = f"dtypes do not match: {actual.dtype} != {desired.dtype}"
assert actual.dtype == desired.dtype, msg
if is_numpy_namespace(actual_xp) and check_scalar:
# only NumPy distinguishes between scalars and arrays; we do if check_scalar.
_msg = (
"array-ness does not match:\n Actual: "
f"{type(actual)}\n Desired: {type(desired)}"
)
assert np.isscalar(actual) == np.isscalar(desired), _msg
return desired_xp
def _is_materializable(x: Array) -> bool:
"""
Return True if you can call `as_numpy_array(x)`; False otherwise.
"""
# Important: here we assume that we're not tracing -
# e.g. we're not inside `jax.jit`` nor `cupy.cuda.Stream.begin_capture`.
return not is_torch_array(x) or x.device.type != "meta" # type: ignore[attr-defined] # pyright: ignore[reportAttributeAccessIssue]
def as_numpy_array(array: Array, *, xp: ModuleType) -> np.typing.NDArray[Any]:
"""
Convert array to NumPy, bypassing GPU-CPU transfer guards and densification guards.
"""
if is_cupy_namespace(xp):
return xp.asnumpy(array)
if is_pydata_sparse_namespace(xp):
return array.todense() # type: ignore[attr-defined] # pyright: ignore[reportAttributeAccessIssue]
if is_torch_namespace(xp):
array = to_device(array, "cpu")
if is_array_api_strict_namespace(xp):
cpu: Device = xp.Device("CPU_DEVICE")
array = to_device(array, cpu)
if is_jax_namespace(xp):
import jax
# Note: only needed if the transfer guard is enabled
cpu = cast(Device, jax.devices("cpu")[0])
array = to_device(array, cpu)
return np.asarray(array)
def xp_assert_equal(
actual: Array,
desired: Array,
*,
err_msg: str = "",
check_dtype: bool = True,
check_shape: bool = True,
check_scalar: bool = False,
) -> None:
"""
Array-API compatible version of `np.testing.assert_array_equal`.
Parameters
----------
actual : Array
The array produced by the tested function.
desired : Array
The expected array (typically hardcoded).
err_msg : str, optional
Error message to display on failure.
check_dtype, check_shape : bool, default: True
Whether to check agreement between actual and desired dtypes and shapes
check_scalar : bool, default: False
NumPy only: whether to check agreement between actual and desired types -
0d array vs scalar.
See Also
--------
xp_assert_close : Similar function for inexact equality checks.
numpy.testing.assert_array_equal : Similar function for NumPy arrays.
"""
xp = _check_ns_shape_dtype(actual, desired, check_dtype, check_shape, check_scalar)
if not _is_materializable(actual):
return
actual_np = as_numpy_array(actual, xp=xp)
desired_np = as_numpy_array(desired, xp=xp)
np.testing.assert_array_equal(actual_np, desired_np, err_msg=err_msg)
def xp_assert_less(
x: Array,
y: Array,
*,
err_msg: str = "",
check_dtype: bool = True,
check_shape: bool = True,
check_scalar: bool = False,
) -> None:
"""
Array-API compatible version of `np.testing.assert_array_less`.
Parameters
----------
x, y : Array
The arrays to compare according to ``x < y`` (elementwise).
err_msg : str, optional
Error message to display on failure.
check_dtype, check_shape : bool, default: True
Whether to check agreement between actual and desired dtypes and shapes
check_scalar : bool, default: False
NumPy only: whether to check agreement between actual and desired types -
0d array vs scalar.
See Also
--------
xp_assert_close : Similar function for inexact equality checks.
numpy.testing.assert_array_equal : Similar function for NumPy arrays.
"""
xp = _check_ns_shape_dtype(x, y, check_dtype, check_shape, check_scalar)
if not _is_materializable(x):
return
x_np = as_numpy_array(x, xp=xp)
y_np = as_numpy_array(y, xp=xp)
np.testing.assert_array_less(x_np, y_np, err_msg=err_msg)
def xp_assert_close(
actual: Array,
desired: Array,
*,
rtol: float | None = None,
atol: float = 0,
err_msg: str = "",
check_dtype: bool = True,
check_shape: bool = True,
check_scalar: bool = False,
) -> None:
"""
Array-API compatible version of `np.testing.assert_allclose`.
Parameters
----------
actual : Array
The array produced by the tested function.
desired : Array
The expected array (typically hardcoded).
rtol : float, optional
Relative tolerance. Default: dtype-dependent.
atol : float, optional
Absolute tolerance. Default: 0.
err_msg : str, optional
Error message to display on failure.
check_dtype, check_shape : bool, default: True
Whether to check agreement between actual and desired dtypes and shapes
check_scalar : bool, default: False
NumPy only: whether to check agreement between actual and desired types -
0d array vs scalar.
See Also
--------
xp_assert_equal : Similar function for exact equality checks.
isclose : Public function for checking closeness.
numpy.testing.assert_allclose : Similar function for NumPy arrays.
Notes
-----
The default `atol` and `rtol` differ from `xp.all(xpx.isclose(a, b))`.
"""
xp = _check_ns_shape_dtype(actual, desired, check_dtype, check_shape, check_scalar)
if not _is_materializable(actual):
return
if rtol is None:
if xp.isdtype(actual.dtype, ("real floating", "complex floating")):
# multiplier of 4 is used as for `np.float64` this puts the default `rtol`
# roughly half way between sqrt(eps) and the default for
# `numpy.testing.assert_allclose`, 1e-7
rtol = xp.finfo(actual.dtype).eps ** 0.5 * 4
else:
rtol = 1e-7
actual_np = as_numpy_array(actual, xp=xp)
desired_np = as_numpy_array(desired, xp=xp)
np.testing.assert_allclose( # pyright: ignore[reportCallIssue]
actual_np,
desired_np,
rtol=rtol, # pyright: ignore[reportArgumentType]
atol=atol,
err_msg=err_msg,
)
def xfail(
request: pytest.FixtureRequest, *, reason: str, strict: bool | None = None
) -> None:
"""
XFAIL the currently running test.
Unlike ``pytest.xfail``, allow rest of test to execute instead of immediately
halting it, so that it may result in a XPASS.
xref https://github.com/pandas-dev/pandas/issues/38902
Parameters
----------
request : pytest.FixtureRequest
``request`` argument of the test function.
reason : str
Reason for the expected failure.
strict: bool, optional
If True, the test will be marked as failed if it passes.
If False, the test will be marked as passed if it fails.
Default: ``xfail_strict`` value in ``pyproject.toml``, or False if absent.
"""
if strict is not None:
marker = pytest.mark.xfail(reason=reason, strict=strict)
else:
marker = pytest.mark.xfail(reason=reason)
request.node.add_marker(marker)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_extra/_lib/__init__.py | sklearn/externals/array_api_extra/_lib/__init__.py | """Internals of array-api-extra."""
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_extra/_lib/_backends.py | sklearn/externals/array_api_extra/_lib/_backends.py | """Backends against which array-api-extra runs its tests."""
from __future__ import annotations
from enum import Enum
from typing import Any
import numpy as np
import pytest
__all__ = ["NUMPY_VERSION", "Backend"]
NUMPY_VERSION = tuple(int(v) for v in np.__version__.split(".")[:3]) # pyright: ignore[reportUnknownArgumentType]
class Backend(Enum): # numpydoc ignore=PR02
"""
All array library backends explicitly tested by array-api-extra.
Parameters
----------
value : str
Tag of the backend's module, in the format ``<namespace>[:<extra tag>]``.
"""
# Use :<tag> to prevent Enum from deduplicating items with the same value
ARRAY_API_STRICT = "array_api_strict"
ARRAY_API_STRICTEST = "array_api_strict:strictest"
NUMPY = "numpy"
NUMPY_READONLY = "numpy:readonly"
CUPY = "cupy"
TORCH = "torch"
TORCH_GPU = "torch:gpu"
DASK = "dask.array"
SPARSE = "sparse"
JAX = "jax.numpy"
JAX_GPU = "jax.numpy:gpu"
@property
def modname(self) -> str: # numpydoc ignore=RT01
"""Module name to be imported."""
return self.value.split(":")[0]
def like(self, *others: Backend) -> bool: # numpydoc ignore=PR01,RT01
"""Check if this backend uses the same module as others."""
return any(self.modname == other.modname for other in others)
def pytest_param(self) -> Any:
"""
Backend as a pytest parameter
Returns
-------
pytest.mark.ParameterSet
"""
id_ = (
self.name.lower().replace("_gpu", ":gpu").replace("_readonly", ":readonly")
)
marks = []
if self.like(Backend.ARRAY_API_STRICT):
marks.append(
pytest.mark.skipif(
NUMPY_VERSION < (1, 26),
reason="array_api_strict is untested on NumPy <1.26",
)
)
if self.like(Backend.DASK, Backend.JAX):
# Monkey-patched by lazy_xp_function
marks.append(pytest.mark.thread_unsafe)
return pytest.param(self, id=id_, marks=marks) # pyright: ignore[reportUnknownArgumentType]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_extra/_lib/_at.py | sklearn/externals/array_api_extra/_lib/_at.py | """Update operations for read-only arrays."""
from __future__ import annotations
import operator
from collections.abc import Callable
from enum import Enum
from types import ModuleType
from typing import TYPE_CHECKING, ClassVar, cast
from ._utils import _compat
from ._utils._compat import (
array_namespace,
is_dask_array,
is_jax_array,
is_torch_array,
is_writeable_array,
)
from ._utils._helpers import meta_namespace
from ._utils._typing import Array, SetIndex
if TYPE_CHECKING: # pragma: no cover
# TODO import from typing (requires Python >=3.11)
from typing_extensions import Self
class _AtOp(Enum):
"""Operations for use in `xpx.at`."""
SET = "set"
ADD = "add"
SUBTRACT = "subtract"
MULTIPLY = "multiply"
DIVIDE = "divide"
POWER = "power"
MIN = "min"
MAX = "max"
# @override from Python 3.12
def __str__(self) -> str: # pyright: ignore[reportImplicitOverride]
"""
Return string representation (useful for pytest logs).
Returns
-------
str
The operation's name.
"""
return self.value
class Undef(Enum):
"""Sentinel for undefined values."""
UNDEF = 0
_undef = Undef.UNDEF
class at: # pylint: disable=invalid-name # numpydoc ignore=PR02
"""
Update operations for read-only arrays.
This implements ``jax.numpy.ndarray.at`` for all writeable
backends (those that support ``__setitem__``) and routes
to the ``.at[]`` method for JAX arrays.
Parameters
----------
x : array
Input array.
idx : index, optional
Only `array API standard compliant indices
<https://data-apis.org/array-api/latest/API_specification/indexing.html>`_
are supported.
You may use two alternate syntaxes::
>>> import array_api_extra as xpx
>>> xpx.at(x, idx).set(value) # or add(value), etc.
>>> xpx.at(x)[idx].set(value)
copy : bool, optional
None (default)
The array parameter *may* be modified in place if it is
possible and beneficial for performance.
You should not reuse it after calling this function.
True
Ensure that the inputs are not modified.
False
Ensure that the update operation writes back to the input.
Raise ``ValueError`` if a copy cannot be avoided.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
Updated input array.
Warnings
--------
(a) When you omit the ``copy`` parameter, you should never reuse the parameter
array later on; ideally, you should reassign it immediately::
>>> import array_api_extra as xpx
>>> x = xpx.at(x, 0).set(2)
The above best practice pattern ensures that the behaviour won't change depending
on whether ``x`` is writeable or not, as the original ``x`` object is dereferenced
as soon as ``xpx.at`` returns; this way there is no risk to accidentally update it
twice.
On the reverse, the anti-pattern below must be avoided, as it will result in
different behaviour on read-only versus writeable arrays::
>>> x = xp.asarray([0, 0, 0])
>>> y = xpx.at(x, 0).set(2)
>>> z = xpx.at(x, 1).set(3)
In the above example, both calls to ``xpx.at`` update ``x`` in place *if possible*.
This causes the behaviour to diverge depending on whether ``x`` is writeable or not:
- If ``x`` is writeable, then after the snippet above you'll have
``x == y == z == [2, 3, 0]``
- If ``x`` is read-only, then you'll end up with
``x == [0, 0, 0]``, ``y == [2, 0, 0]`` and ``z == [0, 3, 0]``.
The correct pattern to use if you want diverging outputs from the same input is
to enforce copies::
>>> x = xp.asarray([0, 0, 0])
>>> y = xpx.at(x, 0).set(2, copy=True) # Never updates x
>>> z = xpx.at(x, 1).set(3) # May or may not update x in place
>>> del x # avoid accidental reuse of x as we don't know its state anymore
(b) The array API standard does not support integer array indices.
The behaviour of update methods when the index is an array of integers is
undefined and will vary between backends; this is particularly true when the
index contains multiple occurrences of the same index, e.g.::
>>> import numpy as np
>>> import jax.numpy as jnp
>>> import array_api_extra as xpx
>>> xpx.at(np.asarray([123]), np.asarray([0, 0])).add(1)
array([124])
>>> xpx.at(jnp.asarray([123]), jnp.asarray([0, 0])).add(1)
Array([125], dtype=int32)
See Also
--------
jax.numpy.ndarray.at : Equivalent array method in JAX.
Notes
-----
`sparse <https://sparse.pydata.org/>`_, as well as read-only arrays from libraries
not explicitly covered by ``array-api-compat``, are not supported by update
methods.
Boolean masks are supported on Dask and jitted JAX arrays exclusively
when `idx` has the same shape as `x` and `y` is 0-dimensional.
Note that this support is not available in JAX's native
``x.at[mask].set(y)``.
This pattern::
>>> mask = m(x)
>>> x[mask] = f(x[mask])
Can't be replaced by `at`, as it won't work on Dask and JAX inside jax.jit::
>>> mask = m(x)
>>> x = xpx.at(x, mask).set(f(x[mask]) # Crash on Dask and jax.jit
You should instead use::
>>> x = xp.where(m(x), f(x), x)
Examples
--------
Given either of these equivalent expressions::
>>> import array_api_extra as xpx
>>> x = xpx.at(x)[1].add(2)
>>> x = xpx.at(x, 1).add(2)
If x is a JAX array, they are the same as::
>>> x = x.at[1].add(2)
If x is a read-only NumPy array, they are the same as::
>>> x = x.copy()
>>> x[1] += 2
For other known backends, they are the same as::
>>> x[1] += 2
"""
_x: Array
_idx: SetIndex | Undef
__slots__: ClassVar[tuple[str, ...]] = ("_idx", "_x")
def __init__(
self, x: Array, idx: SetIndex | Undef = _undef, /
) -> None: # numpydoc ignore=GL08
self._x = x
self._idx = idx
def __getitem__(self, idx: SetIndex, /) -> Self: # numpydoc ignore=PR01,RT01
"""
Allow for the alternate syntax ``at(x)[start:stop:step]``.
It looks prettier than ``at(x, slice(start, stop, step))``
and feels more intuitive coming from the JAX documentation.
"""
if self._idx is not _undef:
msg = "Index has already been set"
raise ValueError(msg)
return type(self)(self._x, idx)
def _op(
self,
at_op: _AtOp,
in_place_op: Callable[[Array, Array | complex], Array] | None,
out_of_place_op: Callable[[Array, Array], Array] | None,
y: Array | complex,
/,
copy: bool | None,
xp: ModuleType | None,
) -> Array:
"""
Implement all update operations.
Parameters
----------
at_op : _AtOp
Method of JAX's Array.at[].
in_place_op : Callable[[Array, Array | complex], Array] | None
In-place operation to apply on mutable backends::
x[idx] = in_place_op(x[idx], y)
If None::
x[idx] = y
out_of_place_op : Callable[[Array, Array], Array] | None
Out-of-place operation to apply when idx is a boolean mask and the backend
doesn't support in-place updates::
x = xp.where(idx, out_of_place_op(x, y), x)
If None::
x = xp.where(idx, y, x)
y : array or complex
Right-hand side of the operation.
copy : bool or None
Whether to copy the input array. See the class docstring for details.
xp : array_namespace, optional
The array namespace for the input array. Default: infer.
Returns
-------
Array
Updated `x`.
"""
from ._funcs import apply_where # pylint: disable=cyclic-import
x, idx = self._x, self._idx
xp = array_namespace(x, y) if xp is None else xp
if isinstance(idx, Undef):
msg = (
"Index has not been set.\n"
"Usage: either\n"
" at(x, idx).set(value)\n"
"or\n"
" at(x)[idx].set(value)\n"
"(same for all other methods)."
)
raise ValueError(msg)
if copy not in (True, False, None):
msg = f"copy must be True, False, or None; got {copy!r}"
raise ValueError(msg)
writeable = None if copy else is_writeable_array(x)
# JAX inside jax.jit doesn't support in-place updates with boolean
# masks; Dask exclusively supports __setitem__ but not iops.
# We can handle the common special case of 0-dimensional y
# with where(idx, y, x) instead.
if (
(is_dask_array(idx) or is_jax_array(idx))
and idx.dtype == xp.bool
and idx.shape == x.shape
):
y_xp = xp.asarray(y, dtype=x.dtype, device=_compat.device(x))
if y_xp.ndim == 0:
if out_of_place_op: # add(), subtract(), ...
# suppress inf warnings on Dask
out = apply_where(
idx, (x, y_xp), out_of_place_op, fill_value=x, xp=xp
)
# Undo int->float promotion on JAX after _AtOp.DIVIDE
out = xp.astype(out, x.dtype, copy=False)
else: # set()
out = xp.where(idx, y_xp, x)
if copy is False:
x[()] = out
return x
return out
# else: this will work on eager JAX and crash on jax.jit and Dask
if copy or (copy is None and not writeable):
if is_jax_array(x):
# Use JAX's at[]
func = cast(
Callable[[Array | complex], Array],
getattr(x.at[idx], at_op.value), # type: ignore[attr-defined] # pyright: ignore[reportAttributeAccessIssue,reportUnknownArgumentType]
)
out = func(y)
# Undo int->float promotion on JAX after _AtOp.DIVIDE
return xp.astype(out, x.dtype, copy=False)
# Emulate at[] behaviour for non-JAX arrays
# with a copy followed by an update
x = xp.asarray(x, copy=True)
# A copy of a read-only numpy array is writeable
# Note: this assumes that a copy of a writeable array is writeable
assert not writeable
writeable = None
if writeable is None:
writeable = is_writeable_array(x)
if not writeable:
# sparse crashes here
msg = f"Can't update read-only array {x}"
raise ValueError(msg)
# Work around bug in PyTorch where __setitem__ doesn't
# always support mismatched dtypes
# https://github.com/pytorch/pytorch/issues/150017
if is_torch_array(y):
y = xp.astype(y, x.dtype, copy=False)
# Backends without boolean indexing (other than JAX) crash here
if in_place_op: # add(), subtract(), ...
x[idx] = in_place_op(x[idx], y)
else: # set()
x[idx] = y
return x
def set(
self,
y: Array | complex,
/,
copy: bool | None = None,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""Apply ``x[idx] = y`` and return the update array."""
return self._op(_AtOp.SET, None, None, y, copy=copy, xp=xp)
def add(
self,
y: Array | complex,
/,
copy: bool | None = None,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""Apply ``x[idx] += y`` and return the updated array."""
# Note for this and all other methods based on _iop:
# operator.iadd and operator.add subtly differ in behaviour, as
# only iadd will trigger exceptions when y has an incompatible dtype.
return self._op(_AtOp.ADD, operator.iadd, operator.add, y, copy=copy, xp=xp)
def subtract(
self,
y: Array | complex,
/,
copy: bool | None = None,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""Apply ``x[idx] -= y`` and return the updated array."""
return self._op(
_AtOp.SUBTRACT, operator.isub, operator.sub, y, copy=copy, xp=xp
)
def multiply(
self,
y: Array | complex,
/,
copy: bool | None = None,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""Apply ``x[idx] *= y`` and return the updated array."""
return self._op(
_AtOp.MULTIPLY, operator.imul, operator.mul, y, copy=copy, xp=xp
)
def divide(
self,
y: Array | complex,
/,
copy: bool | None = None,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""Apply ``x[idx] /= y`` and return the updated array."""
return self._op(
_AtOp.DIVIDE, operator.itruediv, operator.truediv, y, copy=copy, xp=xp
)
def power(
self,
y: Array | complex,
/,
copy: bool | None = None,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""Apply ``x[idx] **= y`` and return the updated array."""
return self._op(_AtOp.POWER, operator.ipow, operator.pow, y, copy=copy, xp=xp)
def min(
self,
y: Array | complex,
/,
copy: bool | None = None,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""Apply ``x[idx] = minimum(x[idx], y)`` and return the updated array."""
# On Dask, this function runs on the chunks, so we need to determine the
# namespace that Dask is wrapping.
# Note that da.minimum _incidentally_ works on NumPy, CuPy, and sparse
# thanks to all these meta-namespaces implementing the __array_ufunc__
# interface, but there's no guarantee that it will work for other
# wrapped libraries in the future.
xp = array_namespace(self._x) if xp is None else xp
mxp = meta_namespace(self._x, xp=xp)
y = xp.asarray(y)
return self._op(_AtOp.MIN, mxp.minimum, mxp.minimum, y, copy=copy, xp=xp)
def max(
self,
y: Array | complex,
/,
copy: bool | None = None,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""Apply ``x[idx] = maximum(x[idx], y)`` and return the updated array."""
# See note on min()
xp = array_namespace(self._x) if xp is None else xp
mxp = meta_namespace(self._x, xp=xp)
y = xp.asarray(y)
return self._op(_AtOp.MAX, mxp.maximum, mxp.maximum, y, copy=copy, xp=xp)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_extra/_lib/_utils/_typing.py | sklearn/externals/array_api_extra/_lib/_utils/_typing.py | # numpydoc ignore=GL08
# pylint: disable=missing-module-docstring,duplicate-code
Array = object
DType = object
Device = object
GetIndex = object
SetIndex = object
__all__ = ["Array", "DType", "Device", "GetIndex", "SetIndex"]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_extra/_lib/_utils/_compat.py | sklearn/externals/array_api_extra/_lib/_utils/_compat.py | """Acquire helpers from array-api-compat."""
# Allow packages that vendor both `array-api-extra` and
# `array-api-compat` to override the import location
# pylint: disable=duplicate-code
try:
from ...._array_api_compat_vendor import (
array_namespace,
device,
is_array_api_obj,
is_array_api_strict_namespace,
is_cupy_array,
is_cupy_namespace,
is_dask_array,
is_dask_namespace,
is_jax_array,
is_jax_namespace,
is_lazy_array,
is_numpy_array,
is_numpy_namespace,
is_pydata_sparse_array,
is_pydata_sparse_namespace,
is_torch_array,
is_torch_namespace,
is_writeable_array,
size,
to_device,
)
except ImportError:
from array_api_compat import (
array_namespace,
device,
is_array_api_obj,
is_array_api_strict_namespace,
is_cupy_array,
is_cupy_namespace,
is_dask_array,
is_dask_namespace,
is_jax_array,
is_jax_namespace,
is_lazy_array,
is_numpy_array,
is_numpy_namespace,
is_pydata_sparse_array,
is_pydata_sparse_namespace,
is_torch_array,
is_torch_namespace,
is_writeable_array,
size,
to_device,
)
__all__ = [
"array_namespace",
"device",
"is_array_api_obj",
"is_array_api_strict_namespace",
"is_cupy_array",
"is_cupy_namespace",
"is_dask_array",
"is_dask_namespace",
"is_jax_array",
"is_jax_namespace",
"is_lazy_array",
"is_numpy_array",
"is_numpy_namespace",
"is_pydata_sparse_array",
"is_pydata_sparse_namespace",
"is_torch_array",
"is_torch_namespace",
"is_writeable_array",
"size",
"to_device",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_extra/_lib/_utils/_helpers.py | sklearn/externals/array_api_extra/_lib/_utils/_helpers.py | """Helper functions used by `array_api_extra/_funcs.py`."""
from __future__ import annotations
import io
import math
import pickle
import types
from collections.abc import Callable, Generator, Iterable
from functools import wraps
from types import ModuleType
from typing import (
TYPE_CHECKING,
Any,
ClassVar,
Generic,
Literal,
ParamSpec,
TypeAlias,
TypeVar,
cast,
)
from . import _compat
from ._compat import (
array_namespace,
is_array_api_obj,
is_dask_namespace,
is_jax_namespace,
is_numpy_array,
is_pydata_sparse_namespace,
is_torch_namespace,
)
from ._typing import Array, Device
if TYPE_CHECKING: # pragma: no cover
# TODO import from typing (requires Python >=3.12 and >=3.13)
from typing_extensions import TypeIs, override
else:
def override(func):
return func
P = ParamSpec("P")
T = TypeVar("T")
__all__ = [
"asarrays",
"capabilities",
"eager_shape",
"in1d",
"is_python_scalar",
"jax_autojit",
"mean",
"meta_namespace",
"pickle_flatten",
"pickle_unflatten",
]
def in1d(
x1: Array,
x2: Array,
/,
*,
assume_unique: bool = False,
invert: bool = False,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""
Check whether each element of an array is also present in a second array.
Returns a boolean array the same length as `x1` that is True
where an element of `x1` is in `x2` and False otherwise.
This function has been adapted using the original implementation
present in numpy:
https://github.com/numpy/numpy/blob/v1.26.0/numpy/lib/arraysetops.py#L524-L758
"""
if xp is None:
xp = array_namespace(x1, x2)
x1_shape = eager_shape(x1)
x2_shape = eager_shape(x2)
# This code is run to make the code significantly faster
if x2_shape[0] < 10 * x1_shape[0] ** 0.145 and isinstance(x2, Iterable):
if invert:
mask = xp.ones(x1_shape[0], dtype=xp.bool, device=_compat.device(x1))
for a in x2:
mask &= x1 != a
else:
mask = xp.zeros(x1_shape[0], dtype=xp.bool, device=_compat.device(x1))
for a in x2:
mask |= x1 == a
return mask
rev_idx = xp.empty(0) # placeholder
if not assume_unique:
x1, rev_idx = xp.unique_inverse(x1)
x2 = xp.unique_values(x2)
ar = xp.concat((x1, x2))
device_ = _compat.device(ar)
# We need this to be a stable sort.
order = xp.argsort(ar, stable=True)
reverse_order = xp.argsort(order, stable=True)
sar = xp.take(ar, order, axis=0)
ar_size = _compat.size(sar)
assert ar_size is not None, "xp.unique*() on lazy backends raises"
if ar_size >= 1:
bool_ar = sar[1:] != sar[:-1] if invert else sar[1:] == sar[:-1]
else:
bool_ar = xp.asarray([False]) if invert else xp.asarray([True])
flag = xp.concat((bool_ar, xp.asarray([invert], device=device_)))
ret = xp.take(flag, reverse_order, axis=0)
if assume_unique:
return ret[: x1.shape[0]]
return xp.take(ret, rev_idx, axis=0)
def mean(
x: Array,
/,
*,
axis: int | tuple[int, ...] | None = None,
keepdims: bool = False,
xp: ModuleType | None = None,
) -> Array: # numpydoc ignore=PR01,RT01
"""
Complex mean, https://github.com/data-apis/array-api/issues/846.
"""
if xp is None:
xp = array_namespace(x)
if xp.isdtype(x.dtype, "complex floating"):
x_real = xp.real(x)
x_imag = xp.imag(x)
mean_real = xp.mean(x_real, axis=axis, keepdims=keepdims)
mean_imag = xp.mean(x_imag, axis=axis, keepdims=keepdims)
return mean_real + (mean_imag * xp.asarray(1j))
return xp.mean(x, axis=axis, keepdims=keepdims)
def is_python_scalar(x: object) -> TypeIs[complex]: # numpydoc ignore=PR01,RT01
"""Return True if `x` is a Python scalar, False otherwise."""
# isinstance(x, float) returns True for np.float64
# isinstance(x, complex) returns True for np.complex128
# bool is a subclass of int
return isinstance(x, int | float | complex) and not is_numpy_array(x)
def asarrays(
a: Array | complex,
b: Array | complex,
xp: ModuleType,
) -> tuple[Array, Array]:
"""
Ensure both `a` and `b` are arrays.
If `b` is a python scalar, it is converted to the same dtype as `a`, and vice versa.
Behavior is not specified when mixing a Python ``float`` and an array with an
integer data type; this may give ``float32``, ``float64``, or raise an exception.
Behavior is implementation-specific.
Similarly, behavior is not specified when mixing a Python ``complex`` and an array
with a real-valued data type; this may give ``complex64``, ``complex128``, or raise
an exception. Behavior is implementation-specific.
Parameters
----------
a, b : Array | int | float | complex | bool
Input arrays or scalars. At least one must be an array.
xp : array_namespace, optional
The standard-compatible namespace for `x`. Default: infer.
Returns
-------
Array, Array
The input arrays, possibly converted to arrays if they were scalars.
See Also
--------
mixing-arrays-with-python-scalars : Array API specification for the behavior.
"""
a_scalar = is_python_scalar(a)
b_scalar = is_python_scalar(b)
if not a_scalar and not b_scalar:
# This includes misc. malformed input e.g. str
return a, b # type: ignore[return-value]
swap = False
if a_scalar:
swap = True
b, a = a, b
if is_array_api_obj(a):
# a is an Array API object
# b is a int | float | complex | bool
xa = a
# https://data-apis.org/array-api/draft/API_specification/type_promotion.html#mixing-arrays-with-python-scalars
same_dtype = {
bool: "bool",
int: ("integral", "real floating", "complex floating"),
float: ("real floating", "complex floating"),
complex: "complex floating",
}
kind = same_dtype[type(cast(complex, b))]
if xp.isdtype(a.dtype, kind):
xb = xp.asarray(b, dtype=a.dtype)
else:
# Undefined behaviour. Let the function deal with it, if it can.
xb = xp.asarray(b)
else:
# Neither a nor b are Array API objects.
# Note: we can only reach this point when one explicitly passes
# xp=xp to the calling function; otherwise we fail earlier on
# array_namespace(a, b).
xa, xb = xp.asarray(a), xp.asarray(b)
return (xb, xa) if swap else (xa, xb)
def ndindex(*x: int) -> Generator[tuple[int, ...]]:
"""
Generate all N-dimensional indices for a given array shape.
Given the shape of an array, an ndindex instance iterates over the N-dimensional
index of the array. At each iteration a tuple of indices is returned, the last
dimension is iterated over first.
This has an identical API to numpy.ndindex.
Parameters
----------
*x : int
The shape of the array.
"""
if not x:
yield ()
return
for i in ndindex(*x[:-1]):
for j in range(x[-1]):
yield *i, j
def eager_shape(x: Array, /) -> tuple[int, ...]:
"""
Return shape of an array. Raise if shape is not fully defined.
Parameters
----------
x : Array
Input array.
Returns
-------
tuple[int, ...]
Shape of the array.
"""
shape = x.shape
# Dask arrays uses non-standard NaN instead of None
if any(s is None or math.isnan(s) for s in shape):
msg = "Unsupported lazy shape"
raise TypeError(msg)
return cast(tuple[int, ...], shape)
def meta_namespace(
*arrays: Array | complex | None, xp: ModuleType | None = None
) -> ModuleType:
"""
Get the namespace of Dask chunks.
On all other backends, just return the namespace of the arrays.
Parameters
----------
*arrays : Array | int | float | complex | bool | None
Input arrays.
xp : array_namespace, optional
The standard-compatible namespace for the input arrays. Default: infer.
Returns
-------
array_namespace
If xp is Dask, the namespace of the Dask chunks;
otherwise, the namespace of the arrays.
"""
xp = array_namespace(*arrays) if xp is None else xp
if not is_dask_namespace(xp):
return xp
# Quietly skip scalars and None's
metas = [cast(Array | None, getattr(a, "_meta", None)) for a in arrays]
return array_namespace(*metas)
def capabilities(
xp: ModuleType, *, device: Device | None = None
) -> dict[str, int | None]:
"""
Return patched ``xp.__array_namespace_info__().capabilities()``.
TODO this helper should be eventually removed once all the special cases
it handles are fixed in the respective backends.
Parameters
----------
xp : array_namespace
The standard-compatible namespace.
device : Device, optional
The device to use.
Returns
-------
dict
Capabilities of the namespace.
"""
out = xp.__array_namespace_info__().capabilities()
if is_pydata_sparse_namespace(xp):
if out["boolean indexing"]:
# FIXME https://github.com/pydata/sparse/issues/876
# boolean indexing is supported, but not when the index is a sparse array.
# boolean indexing by list or numpy array is not part of the Array API.
out = out.copy()
out["boolean indexing"] = False
elif is_jax_namespace(xp):
if out["boolean indexing"]: # pragma: no cover
# Backwards compatibility with jax <0.6.0
# https://github.com/jax-ml/jax/issues/27418
out = out.copy()
out["boolean indexing"] = False
elif is_torch_namespace(xp):
# FIXME https://github.com/data-apis/array-api/issues/945
device = xp.get_default_device() if device is None else xp.device(device)
if device.type == "meta": # type: ignore[union-attr] # pyright: ignore[reportAttributeAccessIssue,reportOptionalMemberAccess]
out = out.copy()
out["boolean indexing"] = False
out["data-dependent shapes"] = False
return out
_BASIC_PICKLED_TYPES = frozenset((
bool, int, float, complex, str, bytes, bytearray,
list, tuple, dict, set, frozenset, range, slice,
types.NoneType, types.EllipsisType,
)) # fmt: skip
_BASIC_REST_TYPES = frozenset((
type, types.BuiltinFunctionType, types.FunctionType, types.ModuleType
)) # fmt: skip
FlattenRest: TypeAlias = tuple[object, ...]
def pickle_flatten(
obj: object, cls: type[T] | tuple[type[T], ...]
) -> tuple[list[T], FlattenRest]:
"""
Use the pickle machinery to extract objects out of an arbitrary container.
Unlike regular ``pickle.dumps``, this function always succeeds.
Parameters
----------
obj : object
The object to pickle.
cls : type | tuple[type, ...]
One or multiple classes to extract from the object.
The instances of these classes inside ``obj`` will not be pickled.
Returns
-------
instances : list[cls]
All instances of ``cls`` found inside ``obj`` (not pickled).
rest
Opaque object containing the pickled bytes plus all other objects where
``__reduce__`` / ``__reduce_ex__`` is either not implemented or raised.
These are unpickleable objects, types, modules, and functions.
This object is *typically* hashable save for fairly exotic objects
that are neither pickleable nor hashable.
This object is pickleable if everything except ``instances`` was pickleable
in the input object.
See Also
--------
pickle_unflatten : Reverse function.
Examples
--------
>>> class A:
... def __repr__(self):
... return "<A>"
>>> class NS:
... def __repr__(self):
... return "<NS>"
... def __reduce__(self):
... assert False, "not serializable"
>>> obj = {1: A(), 2: [A(), NS(), A()]}
>>> instances, rest = pickle_flatten(obj, A)
>>> instances
[<A>, <A>, <A>]
>>> pickle_unflatten(instances, rest)
{1: <A>, 2: [<A>, <NS>, <A>]}
This can be also used to swap inner objects; the only constraint is that
the number of objects in and out must be the same:
>>> pickle_unflatten(["foo", "bar", "baz"], rest)
{1: "foo", 2: ["bar", <NS>, "baz"]}
"""
instances: list[T] = []
rest: list[object] = []
class Pickler(pickle.Pickler): # numpydoc ignore=GL08
"""
Use the `pickle.Pickler.persistent_id` hook to extract objects.
"""
@override
def persistent_id(
self, obj: object
) -> Literal[0, 1, None]: # numpydoc ignore=GL08
if isinstance(obj, cls):
instances.append(obj) # type: ignore[arg-type]
return 0
typ_ = type(obj)
if typ_ in _BASIC_PICKLED_TYPES: # No subclasses!
# If obj is a collection, recursively descend inside it
return None
if typ_ in _BASIC_REST_TYPES:
rest.append(obj)
return 1
try:
# Note: a class that defines __slots__ without defining __getstate__
# cannot be pickled with __reduce__(), but can with __reduce_ex__(5)
_ = obj.__reduce_ex__(pickle.HIGHEST_PROTOCOL)
except Exception: # pylint: disable=broad-exception-caught
rest.append(obj)
return 1
# Object can be pickled. Let the Pickler recursively descend inside it.
return None
f = io.BytesIO()
p = Pickler(f, protocol=pickle.HIGHEST_PROTOCOL)
p.dump(obj)
return instances, (f.getvalue(), *rest)
def pickle_unflatten(instances: Iterable[object], rest: FlattenRest) -> Any:
"""
Reverse of ``pickle_flatten``.
Parameters
----------
instances : Iterable
Inner objects to be reinserted into the flattened container.
rest : FlattenRest
Extra bits, as returned by ``pickle_flatten``.
Returns
-------
object
The outer object originally passed to ``pickle_flatten`` after a
pickle->unpickle round-trip.
See Also
--------
pickle_flatten : Serializing function.
pickle.loads : Standard unpickle function.
Notes
-----
The `instances` iterable must yield at least the same number of elements as the ones
returned by ``pickle_flatten``, but the elements do not need to be the same objects
or even the same types of objects. Excess elements, if any, will be left untouched.
"""
iters = iter(instances), iter(rest)
pik = cast(bytes, next(iters[1]))
class Unpickler(pickle.Unpickler): # numpydoc ignore=GL08
"""Mirror of the overridden Pickler in pickle_flatten."""
@override
def persistent_load(self, pid: Literal[0, 1]) -> object: # numpydoc ignore=GL08
try:
return next(iters[pid])
except StopIteration as e:
msg = "Not enough objects to unpickle"
raise ValueError(msg) from e
f = io.BytesIO(pik)
return Unpickler(f).load()
class _AutoJITWrapper(Generic[T]): # numpydoc ignore=PR01
"""
Helper of :func:`jax_autojit`.
Wrap arbitrary inputs and outputs of the jitted function and
convert them to/from PyTrees.
"""
obj: T
_registered: ClassVar[bool] = False
__slots__: tuple[str, ...] = ("obj",)
def __init__(self, obj: T) -> None: # numpydoc ignore=GL08
self._register()
self.obj = obj
@classmethod
def _register(cls) -> None: # numpydoc ignore=SS06
"""
Register upon first use instead of at import time, to avoid
globally importing JAX.
"""
if not cls._registered:
import jax
jax.tree_util.register_pytree_node(
cls,
lambda obj: pickle_flatten(obj, jax.Array), # pyright: ignore[reportUnknownArgumentType]
lambda aux_data, children: pickle_unflatten(children, aux_data), # pyright: ignore[reportUnknownArgumentType]
)
cls._registered = True
def jax_autojit(
func: Callable[P, T],
) -> Callable[P, T]: # numpydoc ignore=PR01,RT01,SS03
"""
Wrap `func` with ``jax.jit``, with the following differences:
- Python scalar arguments and return values are not automatically converted to
``jax.Array`` objects.
- All non-array arguments are automatically treated as static.
Unlike ``jax.jit``, static arguments must be either hashable or serializable with
``pickle``.
- Unlike ``jax.jit``, non-array arguments and return values are not limited to
tuple/list/dict, but can be any object serializable with ``pickle``.
- Automatically descend into non-array arguments and find ``jax.Array`` objects
inside them, then rebuild the arguments when entering `func`, swapping the JAX
concrete arrays with tracer objects.
- Automatically descend into non-array return values and find ``jax.Array`` objects
inside them, then rebuild them downstream of exiting the JIT, swapping the JAX
tracer objects with concrete arrays.
See Also
--------
jax.jit : JAX JIT compilation function.
Notes
-----
These are useful choices *for testing purposes only*, which is how this function is
intended to be used. The output of ``jax.jit`` is a C++ level callable, that
directly dispatches to the compiled kernel after the initial call. In comparison,
``jax_autojit`` incurs a much higher dispatch time.
Additionally, consider::
def f(x: Array, y: float, plus: bool) -> Array:
return x + y if plus else x - y
j1 = jax.jit(f, static_argnames="plus")
j2 = jax_autojit(f)
In the above example, ``j2`` requires a lot less setup to be tested effectively than
``j1``, but on the flip side it means that it will be re-traced for every different
value of ``y``, which likely makes it not fit for purpose in production.
"""
import jax
@jax.jit # type: ignore[misc] # pyright: ignore[reportUntypedFunctionDecorator]
def inner( # numpydoc ignore=GL08
wargs: _AutoJITWrapper[Any],
) -> _AutoJITWrapper[T]:
args, kwargs = wargs.obj
res = func(*args, **kwargs) # pyright: ignore[reportCallIssue]
return _AutoJITWrapper(res)
@wraps(func)
def outer(*args: P.args, **kwargs: P.kwargs) -> T: # numpydoc ignore=GL08
wargs = _AutoJITWrapper((args, kwargs))
return inner(wargs).obj
return outer
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/externals/array_api_extra/_lib/_utils/__init__.py | sklearn/externals/array_api_extra/_lib/_utils/__init__.py | """Modules housing private utility functions."""
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/svm/_base.py | sklearn/svm/_base.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from abc import ABCMeta, abstractmethod
from numbers import Integral, Real
import numpy as np
import scipy.sparse as sp
from sklearn.base import BaseEstimator, ClassifierMixin, _fit_context
from sklearn.exceptions import ConvergenceWarning, NotFittedError
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import _liblinear as liblinear # type: ignore[attr-defined]
# mypy error: error: Module 'sklearn.svm' has no attribute '_libsvm'
# (and same for other imports)
from sklearn.svm import _libsvm as libsvm # type: ignore[attr-defined]
from sklearn.svm import _libsvm_sparse as libsvm_sparse # type: ignore[attr-defined]
from sklearn.utils import (
check_array,
check_random_state,
column_or_1d,
compute_class_weight,
)
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.metaestimators import available_if
from sklearn.utils.multiclass import (
_ovr_decision_function,
check_classification_targets,
)
from sklearn.utils.validation import (
_check_large_sparse,
_check_sample_weight,
_num_samples,
check_consistent_length,
check_is_fitted,
validate_data,
)
LIBSVM_IMPL = ["c_svc", "nu_svc", "one_class", "epsilon_svr", "nu_svr"]
def _one_vs_one_coef(dual_coef, n_support, support_vectors):
"""Generate primal coefficients from dual coefficients
for the one-vs-one multi class LibSVM in the case
of a linear kernel."""
# get 1vs1 weights for all n*(n-1) classifiers.
# this is somewhat messy.
# shape of dual_coef_ is nSV * (n_classes -1)
# see docs for details
n_class = dual_coef.shape[0] + 1
# XXX we could do preallocation of coef but
# would have to take care in the sparse case
coef = []
sv_locs = np.cumsum(np.hstack([[0], n_support]))
for class1 in range(n_class):
# SVs for class1:
sv1 = support_vectors[sv_locs[class1] : sv_locs[class1 + 1], :]
for class2 in range(class1 + 1, n_class):
# SVs for class1:
sv2 = support_vectors[sv_locs[class2] : sv_locs[class2 + 1], :]
# dual coef for class1 SVs:
alpha1 = dual_coef[class2 - 1, sv_locs[class1] : sv_locs[class1 + 1]]
# dual coef for class2 SVs:
alpha2 = dual_coef[class1, sv_locs[class2] : sv_locs[class2 + 1]]
# build weight for class1 vs class2
coef.append(safe_sparse_dot(alpha1, sv1) + safe_sparse_dot(alpha2, sv2))
return coef
class BaseLibSVM(BaseEstimator, metaclass=ABCMeta):
"""Base class for estimators that use libsvm as backing library.
This implements support vector machine classification and regression.
Parameter documentation is in the derived `SVC` class.
"""
_parameter_constraints: dict = {
"kernel": [
StrOptions({"linear", "poly", "rbf", "sigmoid", "precomputed"}),
callable,
],
"degree": [Interval(Integral, 0, None, closed="left")],
"gamma": [
StrOptions({"scale", "auto"}),
Interval(Real, 0.0, None, closed="left"),
],
"coef0": [Interval(Real, None, None, closed="neither")],
"tol": [Interval(Real, 0.0, None, closed="neither")],
"C": [Interval(Real, 0.0, None, closed="right")],
"nu": [Interval(Real, 0.0, 1.0, closed="right")],
"epsilon": [Interval(Real, 0.0, None, closed="left")],
"shrinking": ["boolean"],
"probability": ["boolean"],
"cache_size": [Interval(Real, 0, None, closed="neither")],
"class_weight": [StrOptions({"balanced"}), dict, None],
"verbose": ["verbose"],
"max_iter": [Interval(Integral, -1, None, closed="left")],
"random_state": ["random_state"],
}
# The order of these must match the integer values in LibSVM.
# XXX These are actually the same in the dense case. Need to factor
# this out.
_sparse_kernels = ["linear", "poly", "rbf", "sigmoid", "precomputed"]
@abstractmethod
def __init__(
self,
kernel,
degree,
gamma,
coef0,
tol,
C,
nu,
epsilon,
shrinking,
probability,
cache_size,
class_weight,
verbose,
max_iter,
random_state,
):
if self._impl not in LIBSVM_IMPL:
raise ValueError(
"impl should be one of %s, %s was given" % (LIBSVM_IMPL, self._impl)
)
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.tol = tol
self.C = C
self.nu = nu
self.epsilon = epsilon
self.shrinking = shrinking
self.probability = probability
self.cache_size = cache_size
self.class_weight = class_weight
self.verbose = verbose
self.max_iter = max_iter
self.random_state = random_state
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
# Used by cross_val_score.
tags.input_tags.pairwise = self.kernel == "precomputed"
tags.input_tags.sparse = self.kernel != "precomputed"
return tags
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit the SVM model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) \
or (n_samples, n_samples)
Training vectors, where `n_samples` is the number of samples
and `n_features` is the number of features.
For kernel="precomputed", the expected shape of X is
(n_samples, n_samples).
y : array-like of shape (n_samples,)
Target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Fitted estimator.
Notes
-----
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
rnd = check_random_state(self.random_state)
sparse = sp.issparse(X)
if sparse and self.kernel == "precomputed":
raise TypeError("Sparse precomputed kernels are not supported.")
self._sparse = sparse and not callable(self.kernel)
if callable(self.kernel):
check_consistent_length(X, y)
else:
X, y = validate_data(
self,
X,
y,
dtype=np.float64,
order="C",
accept_sparse="csr",
accept_large_sparse=False,
)
y = self._validate_targets(y)
sample_weight = np.asarray(
[] if sample_weight is None else sample_weight, dtype=np.float64
)
solver_type = LIBSVM_IMPL.index(self._impl)
# input validation
n_samples = _num_samples(X)
if solver_type != 2 and n_samples != y.shape[0]:
raise ValueError(
"X and y have incompatible shapes.\n"
+ "X has %s samples, but y has %s." % (n_samples, y.shape[0])
)
if self.kernel == "precomputed" and n_samples != X.shape[1]:
raise ValueError(
"Precomputed matrix must be a square matrix."
" Input is a {}x{} matrix.".format(X.shape[0], X.shape[1])
)
if sample_weight.shape[0] > 0 and sample_weight.shape[0] != n_samples:
raise ValueError(
"sample_weight and X have incompatible shapes: "
"%r vs %r\n"
"Note: Sparse matrices cannot be indexed w/"
"boolean masks (use `indices=True` in CV)."
% (sample_weight.shape, X.shape)
)
kernel = "precomputed" if callable(self.kernel) else self.kernel
if kernel == "precomputed":
# unused but needs to be a float for cython code that ignores
# it anyway
self._gamma = 0.0
elif isinstance(self.gamma, str):
if self.gamma == "scale":
# var = E[X^2] - E[X]^2 if sparse
X_var = (X.multiply(X)).mean() - (X.mean()) ** 2 if sparse else X.var()
self._gamma = 1.0 / (X.shape[1] * X_var) if X_var != 0 else 1.0
elif self.gamma == "auto":
self._gamma = 1.0 / X.shape[1]
elif isinstance(self.gamma, Real):
self._gamma = self.gamma
fit = self._sparse_fit if self._sparse else self._dense_fit
if self.verbose:
print("[LibSVM]", end="")
seed = rnd.randint(np.iinfo("i").max)
fit(X, y, sample_weight, solver_type, kernel, random_seed=seed)
# see comment on the other call to np.iinfo in this file
self.shape_fit_ = X.shape if hasattr(X, "shape") else (n_samples,)
# In binary case, we need to flip the sign of coef, intercept and
# decision function. Use self._intercept_ and self._dual_coef_
# internally.
self._intercept_ = self.intercept_.copy()
self._dual_coef_ = self.dual_coef_
if self._impl in ["c_svc", "nu_svc"] and len(self.classes_) == 2:
self.intercept_ *= -1
self.dual_coef_ = -self.dual_coef_
dual_coef = self._dual_coef_.data if self._sparse else self._dual_coef_
intercept_finiteness = np.isfinite(self._intercept_).all()
dual_coef_finiteness = np.isfinite(dual_coef).all()
if not (intercept_finiteness and dual_coef_finiteness):
raise ValueError(
"The dual coefficients or intercepts are not finite."
" The input data may contain large values and need to be"
" preprocessed."
)
# Since, in the case of SVC and NuSVC, the number of models optimized by
# libSVM could be greater than one (depending on the input), `n_iter_`
# stores an ndarray.
# For the other sub-classes (SVR, NuSVR, and OneClassSVM), the number of
# models optimized by libSVM is always one, so `n_iter_` stores an
# integer.
if self._impl in ["c_svc", "nu_svc"]:
self.n_iter_ = self._num_iter
else:
self.n_iter_ = self._num_iter.item()
return self
def _validate_targets(self, y):
"""Validation of y and class_weight.
Default implementation for SVR and one-class; overridden in BaseSVC.
"""
return column_or_1d(y, warn=True).astype(np.float64, copy=False)
def _warn_from_fit_status(self):
assert self.fit_status_ in (0, 1)
if self.fit_status_ == 1:
warnings.warn(
"Solver terminated early (max_iter=%i)."
" Consider pre-processing your data with"
" StandardScaler or MinMaxScaler." % self.max_iter,
ConvergenceWarning,
)
def _dense_fit(self, X, y, sample_weight, solver_type, kernel, random_seed):
if callable(self.kernel):
# you must store a reference to X to compute the kernel in predict
# TODO: add keyword copy to copy on demand
self.__Xfit = X
X = self._compute_kernel(X)
if X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
libsvm.set_verbosity_wrap(self.verbose)
# we don't pass **self.get_params() to allow subclasses to
# add other parameters to __init__
(
self.support_,
self.support_vectors_,
self._n_support,
self.dual_coef_,
self.intercept_,
self._probA,
self._probB,
self.fit_status_,
self._num_iter,
) = libsvm.fit(
X,
y,
svm_type=solver_type,
sample_weight=sample_weight,
class_weight=getattr(self, "class_weight_", np.empty(0)),
kernel=kernel,
C=self.C,
nu=self.nu,
probability=self.probability,
degree=self.degree,
shrinking=self.shrinking,
tol=self.tol,
cache_size=self.cache_size,
coef0=self.coef0,
gamma=self._gamma,
epsilon=self.epsilon,
max_iter=self.max_iter,
random_seed=random_seed,
)
self._warn_from_fit_status()
def _sparse_fit(self, X, y, sample_weight, solver_type, kernel, random_seed):
X.data = np.asarray(X.data, dtype=np.float64, order="C")
X.sort_indices()
kernel_type = self._sparse_kernels.index(kernel)
libsvm_sparse.set_verbosity_wrap(self.verbose)
(
self.support_,
self.support_vectors_,
dual_coef_data,
self.intercept_,
self._n_support,
self._probA,
self._probB,
self.fit_status_,
self._num_iter,
) = libsvm_sparse.libsvm_sparse_train(
X.shape[1],
X.data,
X.indices,
X.indptr,
y,
solver_type,
kernel_type,
self.degree,
self._gamma,
self.coef0,
self.tol,
self.C,
getattr(self, "class_weight_", np.empty(0)),
sample_weight,
self.nu,
self.cache_size,
self.epsilon,
int(self.shrinking),
int(self.probability),
self.max_iter,
random_seed,
)
self._warn_from_fit_status()
if hasattr(self, "classes_"):
n_class = len(self.classes_) - 1
else: # regression
n_class = 1
n_SV = self.support_vectors_.shape[0]
dual_coef_indices = np.tile(np.arange(n_SV), n_class)
if not n_SV:
self.dual_coef_ = sp.csr_matrix([])
else:
dual_coef_indptr = np.arange(
0, dual_coef_indices.size + 1, dual_coef_indices.size / n_class
)
self.dual_coef_ = sp.csr_matrix(
(dual_coef_data, dual_coef_indices, dual_coef_indptr), (n_class, n_SV)
)
def predict(self, X):
"""Perform regression on samples in X.
For a one-class model, +1 (inlier) or -1 (outlier) is returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : ndarray of shape (n_samples,)
The predicted values.
"""
X = self._validate_for_predict(X)
predict = self._sparse_predict if self._sparse else self._dense_predict
return predict(X)
def _dense_predict(self, X):
X = self._compute_kernel(X)
if X.ndim == 1:
X = check_array(X, order="C", accept_large_sparse=False)
kernel = self.kernel
if callable(self.kernel):
kernel = "precomputed"
if X.shape[1] != self.shape_fit_[0]:
raise ValueError(
"X.shape[1] = %d should be equal to %d, "
"the number of samples at training time"
% (X.shape[1], self.shape_fit_[0])
)
svm_type = LIBSVM_IMPL.index(self._impl)
return libsvm.predict(
X,
self.support_,
self.support_vectors_,
self._n_support,
self._dual_coef_,
self._intercept_,
self._probA,
self._probB,
svm_type=svm_type,
kernel=kernel,
degree=self.degree,
coef0=self.coef0,
gamma=self._gamma,
cache_size=self.cache_size,
)
def _sparse_predict(self, X):
# Precondition: X is a csr_matrix of dtype np.float64.
kernel = self.kernel
if callable(kernel):
kernel = "precomputed"
kernel_type = self._sparse_kernels.index(kernel)
C = 0.0 # C is not useful here
return libsvm_sparse.libsvm_sparse_predict(
X.data,
X.indices,
X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data,
self._intercept_,
LIBSVM_IMPL.index(self._impl),
kernel_type,
self.degree,
self._gamma,
self.coef0,
self.tol,
C,
getattr(self, "class_weight_", np.empty(0)),
self.nu,
self.epsilon,
self.shrinking,
self.probability,
self._n_support,
self._probA,
self._probB,
)
def _compute_kernel(self, X):
"""Return the data transformed by a callable kernel"""
if callable(self.kernel):
# in the case of precomputed kernel given as a function, we
# have to compute explicitly the kernel matrix
kernel = self.kernel(X, self.__Xfit)
if sp.issparse(kernel):
kernel = kernel.toarray()
X = np.asarray(kernel, dtype=np.float64, order="C")
return X
def _decision_function(self, X):
"""Evaluates the decision function for the samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
X : array-like of shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
# NOTE: _validate_for_predict contains check for is_fitted
# hence must be placed before any other attributes are used.
X = self._validate_for_predict(X)
X = self._compute_kernel(X)
if self._sparse:
dec_func = self._sparse_decision_function(X)
else:
dec_func = self._dense_decision_function(X)
# In binary case, we need to flip the sign of coef, intercept and
# decision function.
if self._impl in ["c_svc", "nu_svc"] and len(self.classes_) == 2:
return -dec_func.ravel()
return dec_func
def _dense_decision_function(self, X):
X = check_array(X, dtype=np.float64, order="C", accept_large_sparse=False)
kernel = self.kernel
if callable(kernel):
kernel = "precomputed"
return libsvm.decision_function(
X,
self.support_,
self.support_vectors_,
self._n_support,
self._dual_coef_,
self._intercept_,
self._probA,
self._probB,
svm_type=LIBSVM_IMPL.index(self._impl),
kernel=kernel,
degree=self.degree,
cache_size=self.cache_size,
coef0=self.coef0,
gamma=self._gamma,
)
def _sparse_decision_function(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order="C")
kernel = self.kernel
if hasattr(kernel, "__call__"):
kernel = "precomputed"
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_decision_function(
X.data,
X.indices,
X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data,
self._intercept_,
LIBSVM_IMPL.index(self._impl),
kernel_type,
self.degree,
self._gamma,
self.coef0,
self.tol,
self.C,
getattr(self, "class_weight_", np.empty(0)),
self.nu,
self.epsilon,
self.shrinking,
self.probability,
self._n_support,
self._probA,
self._probB,
)
def _validate_for_predict(self, X):
check_is_fitted(self)
if not callable(self.kernel):
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=np.float64,
order="C",
accept_large_sparse=False,
reset=False,
)
if self._sparse and not sp.issparse(X):
X = sp.csr_matrix(X)
if self._sparse:
X.sort_indices()
if sp.issparse(X) and not self._sparse and not callable(self.kernel):
raise ValueError(
"cannot use sparse input in %r trained on dense data"
% type(self).__name__
)
if self.kernel == "precomputed":
if X.shape[1] != self.shape_fit_[0]:
raise ValueError(
"X.shape[1] = %d should be equal to %d, "
"the number of samples at training time"
% (X.shape[1], self.shape_fit_[0])
)
# Fixes https://nvd.nist.gov/vuln/detail/CVE-2020-28975
# Check that _n_support is consistent with support_vectors
sv = self.support_vectors_
if not self._sparse and sv.size > 0 and self.n_support_.sum() != sv.shape[0]:
raise ValueError(
f"The internal representation of {self.__class__.__name__} was altered"
)
return X
@property
def coef_(self):
"""Weights assigned to the features when `kernel="linear"`.
Returns
-------
ndarray of shape (n_features, n_classes)
"""
if self.kernel != "linear":
raise AttributeError("coef_ is only available when using a linear kernel")
coef = self._get_coef()
# coef_ being a read-only property, it's better to mark the value as
# immutable to avoid hiding potential bugs for the unsuspecting user.
if sp.issparse(coef):
# sparse matrix do not have global flags
coef.data.flags.writeable = False
else:
# regular dense array
coef.flags.writeable = False
return coef
def _get_coef(self):
return safe_sparse_dot(self._dual_coef_, self.support_vectors_)
@property
def n_support_(self):
"""Number of support vectors for each class."""
try:
check_is_fitted(self)
except NotFittedError:
raise AttributeError
svm_type = LIBSVM_IMPL.index(self._impl)
if svm_type in (0, 1):
return self._n_support
else:
# SVR and OneClass
# _n_support has size 2, we make it size 1
return np.array([self._n_support[0]])
class BaseSVC(ClassifierMixin, BaseLibSVM, metaclass=ABCMeta):
"""ABC for LibSVM-based classifiers."""
_parameter_constraints: dict = {
**BaseLibSVM._parameter_constraints,
"decision_function_shape": [StrOptions({"ovr", "ovo"})],
"break_ties": ["boolean"],
}
for unused_param in ["epsilon", "nu"]:
_parameter_constraints.pop(unused_param)
@abstractmethod
def __init__(
self,
kernel,
degree,
gamma,
coef0,
tol,
C,
nu,
shrinking,
probability,
cache_size,
class_weight,
verbose,
max_iter,
decision_function_shape,
random_state,
break_ties,
):
self.decision_function_shape = decision_function_shape
self.break_ties = break_ties
super().__init__(
kernel=kernel,
degree=degree,
gamma=gamma,
coef0=coef0,
tol=tol,
C=C,
nu=nu,
epsilon=0.0,
shrinking=shrinking,
probability=probability,
cache_size=cache_size,
class_weight=class_weight,
verbose=verbose,
max_iter=max_iter,
random_state=random_state,
)
def _validate_targets(self, y):
y_ = column_or_1d(y, warn=True)
check_classification_targets(y)
cls, y = np.unique(y_, return_inverse=True)
self.class_weight_ = compute_class_weight(self.class_weight, classes=cls, y=y_)
if len(cls) < 2:
raise ValueError(
"The number of classes has to be greater than one; got %d class"
% len(cls)
)
self.classes_ = cls
return np.asarray(y, dtype=np.float64, order="C")
def decision_function(self, X):
"""Evaluate the decision function for the samples in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
Returns
-------
X : ndarray of shape (n_samples, n_classes * (n_classes-1) / 2)
Returns the decision function of the sample for each class
in the model.
If decision_function_shape='ovr', the shape is (n_samples,
n_classes).
Notes
-----
If decision_function_shape='ovo', the function values are proportional
to the distance of the samples X to the separating hyperplane. If the
exact distances are required, divide the function values by the norm of
the weight vector (``coef_``). See also `this question
<https://stats.stackexchange.com/questions/14876/
interpreting-distance-from-hyperplane-in-svm>`_ for further details.
If decision_function_shape='ovr', the decision function is a monotonic
transformation of ovo decision function.
"""
dec = self._decision_function(X)
if self.decision_function_shape == "ovr" and len(self.classes_) > 2:
return _ovr_decision_function(dec < 0, -dec, len(self.classes_))
return dec
def predict(self, X):
"""Perform classification on samples in X.
For a one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples_test, n_samples_train)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : ndarray of shape (n_samples,)
Class labels for samples in X.
"""
check_is_fitted(self)
if self.break_ties and self.decision_function_shape == "ovo":
raise ValueError(
"break_ties must be False when decision_function_shape is 'ovo'"
)
if (
self.break_ties
and self.decision_function_shape == "ovr"
and len(self.classes_) > 2
):
y = np.argmax(self.decision_function(X), axis=1)
else:
y = super().predict(X)
return self.classes_.take(np.asarray(y, dtype=np.intp))
# Hacky way of getting predict_proba to raise an AttributeError when
# probability=False using properties. Do not use this in new code; when
# probabilities are not available depending on a setting, introduce two
# estimators.
def _check_proba(self):
if not self.probability:
raise AttributeError(
"predict_proba is not available when probability=False"
)
if self._impl not in ("c_svc", "nu_svc"):
raise AttributeError("predict_proba only implemented for SVC and NuSVC")
return True
@available_if(_check_proba)
def predict_proba(self, X):
"""Compute probabilities of possible outcomes for samples in X.
The model needs to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like of shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
T : ndarray of shape (n_samples, n_classes)
Returns the probability of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
X = self._validate_for_predict(X)
if self.probA_.size == 0 or self.probB_.size == 0:
raise NotFittedError(
"predict_proba is not available when fitted with probability=False"
)
pred_proba = (
self._sparse_predict_proba if self._sparse else self._dense_predict_proba
)
return pred_proba(X)
@available_if(_check_proba)
def predict_log_proba(self, X):
"""Compute log probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples_test, n_samples_train)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
T : ndarray of shape (n_samples, n_classes)
Returns the log-probabilities of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
return np.log(self.predict_proba(X))
def _dense_predict_proba(self, X):
X = self._compute_kernel(X)
kernel = self.kernel
if callable(kernel):
kernel = "precomputed"
svm_type = LIBSVM_IMPL.index(self._impl)
pprob = libsvm.predict_proba(
X,
self.support_,
self.support_vectors_,
self._n_support,
self._dual_coef_,
self._intercept_,
self._probA,
self._probB,
svm_type=svm_type,
kernel=kernel,
degree=self.degree,
cache_size=self.cache_size,
coef0=self.coef0,
gamma=self._gamma,
)
return pprob
def _sparse_predict_proba(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order="C")
kernel = self.kernel
if callable(kernel):
kernel = "precomputed"
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_predict_proba(
X.data,
X.indices,
X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data,
self._intercept_,
LIBSVM_IMPL.index(self._impl),
kernel_type,
self.degree,
self._gamma,
self.coef0,
self.tol,
self.C,
getattr(self, "class_weight_", np.empty(0)),
self.nu,
self.epsilon,
self.shrinking,
self.probability,
self._n_support,
self._probA,
self._probB,
)
def _get_coef(self):
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/svm/__init__.py | sklearn/svm/__init__.py | """Support vector machine algorithms."""
# See http://scikit-learn.sourceforge.net/modules/svm.html for complete
# documentation.
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.svm._bounds import l1_min_c
from sklearn.svm._classes import (
SVC,
SVR,
LinearSVC,
LinearSVR,
NuSVC,
NuSVR,
OneClassSVM,
)
__all__ = [
"SVC",
"SVR",
"LinearSVC",
"LinearSVR",
"NuSVC",
"NuSVR",
"OneClassSVM",
"l1_min_c",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/svm/_classes.py | sklearn/svm/_classes.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Integral, Real
import numpy as np
from sklearn.base import BaseEstimator, OutlierMixin, RegressorMixin, _fit_context
from sklearn.linear_model._base import (
LinearClassifierMixin,
LinearModel,
SparseCoefMixin,
)
from sklearn.svm._base import (
BaseLibSVM,
BaseSVC,
_fit_liblinear,
_get_liblinear_solver_type,
)
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import _num_samples, validate_data
def _validate_dual_parameter(dual, loss, penalty, multi_class, X):
"""Helper function to assign the value of dual parameter."""
if dual == "auto":
if X.shape[0] < X.shape[1]:
try:
_get_liblinear_solver_type(multi_class, penalty, loss, True)
return True
except ValueError: # dual not supported for the combination
return False
else:
try:
_get_liblinear_solver_type(multi_class, penalty, loss, False)
return False
except ValueError: # primal not supported by the combination
return True
else:
return dual
class LinearSVC(LinearClassifierMixin, SparseCoefMixin, BaseEstimator):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
The main differences between :class:`~sklearn.svm.LinearSVC` and
:class:`~sklearn.svm.SVC` lie in the loss function used by default, and in
the handling of intercept regularization between those two implementations.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
penalty : {'l1', 'l2'}, default='l2'
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to ``coef_``
vectors that are sparse.
loss : {'hinge', 'squared_hinge'}, default='squared_hinge'
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss. The combination of ``penalty='l1'``
and ``loss='hinge'`` is not supported.
dual : "auto" or bool, default="auto"
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
`dual="auto"` will choose the value of the parameter automatically,
based on the values of `n_samples`, `n_features`, `loss`, `multi_class`
and `penalty`. If `n_samples` < `n_features` and optimizer supports
chosen `loss`, `multi_class` and `penalty`, then dual will be set to True,
otherwise it will be set to False.
.. versionchanged:: 1.3
The `"auto"` option is added in version 1.3 and will be the default
in version 1.5.
tol : float, default=1e-4
Tolerance for stopping criteria.
C : float, default=1.0
Regularization parameter. The strength of the regularization is
inversely proportional to C. Must be strictly positive.
For an intuitive visualization of the effects of scaling
the regularization parameter C, see
:ref:`sphx_glr_auto_examples_svm_plot_svm_scale_c.py`.
multi_class : {'ovr', 'crammer_singer'}, default='ovr'
Determines the multi-class strategy if `y` contains more than
two classes.
``"ovr"`` trains n_classes one-vs-rest classifiers, while
``"crammer_singer"`` optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If ``"crammer_singer"`` is chosen, the options loss, penalty and dual
will be ignored.
fit_intercept : bool, default=True
Whether or not to fit an intercept. If set to True, the feature vector
is extended to include an intercept term: `[x_1, ..., x_n, 1]`, where
1 corresponds to the intercept. If set to False, no intercept will be
used in calculations (i.e. data is expected to be already centered).
intercept_scaling : float, default=1.0
When `fit_intercept` is True, the instance vector x becomes ``[x_1,
..., x_n, intercept_scaling]``, i.e. a "synthetic" feature with a
constant value equal to `intercept_scaling` is appended to the instance
vector. The intercept becomes intercept_scaling * synthetic feature
weight. Note that liblinear internally penalizes the intercept,
treating it like any other term in the feature vector. To reduce the
impact of the regularization on the intercept, the `intercept_scaling`
parameter can be set to a value greater than 1; the higher the value of
`intercept_scaling`, the lower the impact of regularization on it.
Then, the weights become `[w_x_1, ..., w_x_n,
w_intercept*intercept_scaling]`, where `w_x_1, ..., w_x_n` represent
the feature weights and the intercept weight is scaled by
`intercept_scaling`. This scaling allows the intercept term to have a
different regularization behavior compared to the other features.
class_weight : dict or 'balanced', default=None
Set the parameter C of class i to ``class_weight[i]*C`` for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
verbose : int, default=0
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generation for shuffling the data for
the dual coordinate descent (if ``dual=True``). When ``dual=False`` the
underlying implementation of :class:`LinearSVC` is not random and
``random_state`` has no effect on the results.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
max_iter : int, default=1000
The maximum number of iterations to be run.
Attributes
----------
coef_ : ndarray of shape (1, n_features) if n_classes == 2 \
else (n_classes, n_features)
Weights assigned to the features (coefficients in the primal
problem).
``coef_`` is a readonly property derived from ``raw_coef_`` that
follows the internal memory layout of liblinear.
intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
classes_ : ndarray of shape (n_classes,)
The unique classes labels.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
Maximum number of iterations run across all classes.
See Also
--------
SVC : Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`~sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier : SGDClassifier can optimize the same
cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation, liblinear, uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
`LIBLINEAR: A Library for Large Linear Classification
<https://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.pipeline import make_pipeline
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_features=4, random_state=0)
>>> clf = make_pipeline(StandardScaler(),
... LinearSVC(random_state=0, tol=1e-5))
>>> clf.fit(X, y)
Pipeline(steps=[('standardscaler', StandardScaler()),
('linearsvc', LinearSVC(random_state=0, tol=1e-05))])
>>> print(clf.named_steps['linearsvc'].coef_)
[[0.141 0.526 0.679 0.493]]
>>> print(clf.named_steps['linearsvc'].intercept_)
[0.1693]
>>> print(clf.predict([[0, 0, 0, 0]]))
[1]
"""
_parameter_constraints: dict = {
"penalty": [StrOptions({"l1", "l2"})],
"loss": [StrOptions({"hinge", "squared_hinge"})],
"dual": ["boolean", StrOptions({"auto"})],
"tol": [Interval(Real, 0.0, None, closed="neither")],
"C": [Interval(Real, 0.0, None, closed="neither")],
"multi_class": [StrOptions({"ovr", "crammer_singer"})],
"fit_intercept": ["boolean"],
"intercept_scaling": [Interval(Real, 0, None, closed="neither")],
"class_weight": [None, dict, StrOptions({"balanced"})],
"verbose": ["verbose"],
"random_state": ["random_state"],
"max_iter": [Interval(Integral, 0, None, closed="left")],
}
def __init__(
self,
penalty="l2",
loss="squared_hinge",
*,
dual="auto",
tol=1e-4,
C=1.0,
multi_class="ovr",
fit_intercept=True,
intercept_scaling=1,
class_weight=None,
verbose=0,
random_state=None,
max_iter=1000,
):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
.. versionadded:: 0.18
Returns
-------
self : object
An instance of the estimator.
"""
X, y = validate_data(
self,
X,
y,
accept_sparse="csr",
dtype=np.float64,
order="C",
accept_large_sparse=False,
)
check_classification_targets(y)
self.classes_ = np.unique(y)
_dual = _validate_dual_parameter(
self.dual, self.loss, self.penalty, self.multi_class, X
)
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X,
y,
self.C,
self.fit_intercept,
self.intercept_scaling,
self.class_weight,
self.penalty,
_dual,
self.verbose,
self.max_iter,
self.tol,
self.random_state,
self.multi_class,
self.loss,
sample_weight=sample_weight,
)
# Backward compatibility: _fit_liblinear is used both by LinearSVC/R
# and LogisticRegression but LogisticRegression sets a structured
# `n_iter_` attribute with information about the underlying OvR fits
# while LinearSVC/R only reports the maximum value.
self.n_iter_ = n_iter_.max().item()
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
class LinearSVR(RegressorMixin, LinearModel):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
The main differences between :class:`~sklearn.svm.LinearSVR` and
:class:`~sklearn.svm.SVR` lie in the loss function used by default, and in
the handling of intercept regularization between those two implementations.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
.. versionadded:: 0.16
Parameters
----------
epsilon : float, default=0.0
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set ``epsilon=0``.
tol : float, default=1e-4
Tolerance for stopping criteria.
C : float, default=1.0
Regularization parameter. The strength of the regularization is
inversely proportional to C. Must be strictly positive.
loss : {'epsilon_insensitive', 'squared_epsilon_insensitive'}, \
default='epsilon_insensitive'
Specifies the loss function. The epsilon-insensitive loss
(standard SVR) is the L1 loss, while the squared epsilon-insensitive
loss ('squared_epsilon_insensitive') is the L2 loss.
fit_intercept : bool, default=True
Whether or not to fit an intercept. If set to True, the feature vector
is extended to include an intercept term: `[x_1, ..., x_n, 1]`, where
1 corresponds to the intercept. If set to False, no intercept will be
used in calculations (i.e. data is expected to be already centered).
intercept_scaling : float, default=1.0
When `fit_intercept` is True, the instance vector x becomes `[x_1, ...,
x_n, intercept_scaling]`, i.e. a "synthetic" feature with a constant
value equal to `intercept_scaling` is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight.
Note that liblinear internally penalizes the intercept, treating it
like any other term in the feature vector. To reduce the impact of the
regularization on the intercept, the `intercept_scaling` parameter can
be set to a value greater than 1; the higher the value of
`intercept_scaling`, the lower the impact of regularization on it.
Then, the weights become `[w_x_1, ..., w_x_n,
w_intercept*intercept_scaling]`, where `w_x_1, ..., w_x_n` represent
the feature weights and the intercept weight is scaled by
`intercept_scaling`. This scaling allows the intercept term to have a
different regularization behavior compared to the other features.
dual : "auto" or bool, default="auto"
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
`dual="auto"` will choose the value of the parameter automatically,
based on the values of `n_samples`, `n_features` and `loss`. If
`n_samples` < `n_features` and optimizer supports chosen `loss`,
then dual will be set to True, otherwise it will be set to False.
.. versionchanged:: 1.3
The `"auto"` option is added in version 1.3 and will be the default
in version 1.5.
verbose : int, default=0
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generation for shuffling the data.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
max_iter : int, default=1000
The maximum number of iterations to be run.
Attributes
----------
coef_ : ndarray of shape (n_features) if n_classes == 2 \
else (n_classes, n_features)
Weights assigned to the features (coefficients in the primal
problem).
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : ndarray of shape (1) if n_classes == 2 else (n_classes)
Constants in decision function.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
Maximum number of iterations run across all classes.
See Also
--------
LinearSVC : Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR : Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not scale to
large number of samples as :class:`~sklearn.svm.LinearSVR` does.
sklearn.linear_model.SGDRegressor : SGDRegressor can optimize the same cost
function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
Examples
--------
>>> from sklearn.svm import LinearSVR
>>> from sklearn.pipeline import make_pipeline
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=4, random_state=0)
>>> regr = make_pipeline(StandardScaler(),
... LinearSVR(random_state=0, tol=1e-5))
>>> regr.fit(X, y)
Pipeline(steps=[('standardscaler', StandardScaler()),
('linearsvr', LinearSVR(random_state=0, tol=1e-05))])
>>> print(regr.named_steps['linearsvr'].coef_)
[18.582 27.023 44.357 64.522]
>>> print(regr.named_steps['linearsvr'].intercept_)
[-4.]
>>> print(regr.predict([[0, 0, 0, 0]]))
[-2.384]
"""
_parameter_constraints: dict = {
"epsilon": [Real],
"tol": [Interval(Real, 0.0, None, closed="neither")],
"C": [Interval(Real, 0.0, None, closed="neither")],
"loss": [StrOptions({"epsilon_insensitive", "squared_epsilon_insensitive"})],
"fit_intercept": ["boolean"],
"intercept_scaling": [Interval(Real, 0, None, closed="neither")],
"dual": ["boolean", StrOptions({"auto"})],
"verbose": ["verbose"],
"random_state": ["random_state"],
"max_iter": [Interval(Integral, 0, None, closed="left")],
}
def __init__(
self,
*,
epsilon=0.0,
tol=1e-4,
C=1.0,
loss="epsilon_insensitive",
fit_intercept=True,
intercept_scaling=1.0,
dual="auto",
verbose=0,
random_state=None,
max_iter=1000,
):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
sample_weight : array-like of shape (n_samples,), default=None
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
.. versionadded:: 0.18
Returns
-------
self : object
An instance of the estimator.
"""
X, y = validate_data(
self,
X,
y,
accept_sparse="csr",
dtype=np.float64,
order="C",
accept_large_sparse=False,
)
penalty = "l2" # SVR only accepts l2 penalty
_dual = _validate_dual_parameter(self.dual, self.loss, penalty, "ovr", X)
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X,
y,
self.C,
self.fit_intercept,
self.intercept_scaling,
None,
penalty,
_dual,
self.verbose,
self.max_iter,
self.tol,
self.random_state,
loss=self.loss,
epsilon=self.epsilon,
sample_weight=sample_weight,
)
self.coef_ = self.coef_.ravel()
# Backward compatibility: _fit_liblinear is used both by LinearSVC/R
# and LogisticRegression but LogisticRegression sets a structured
# `n_iter_` attribute with information about the underlying OvR fits
# while LinearSVC/R only reports the maximum value.
self.n_iter_ = n_iter_.max().item()
return self
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time scales at least
quadratically with the number of samples and may be impractical
beyond tens of thousands of samples. For large datasets
consider using :class:`~sklearn.svm.LinearSVC` or
:class:`~sklearn.linear_model.SGDClassifier` instead, possibly after a
:class:`~sklearn.kernel_approximation.Nystroem` transformer or
other :ref:`kernel_approximation`.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
To learn how to tune SVC's hyperparameters, see the following example:
:ref:`sphx_glr_auto_examples_model_selection_plot_nested_cross_validation_iris.py`
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, default=1.0
Regularization parameter. The strength of the regularization is
inversely proportional to C. Must be strictly positive. The penalty
is a squared l2 penalty. For an intuitive visualization of the effects
of scaling the regularization parameter C, see
:ref:`sphx_glr_auto_examples_svm_plot_svm_scale_c.py`.
kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'precomputed'} or callable, \
default='rbf'
Specifies the kernel type to be used in the algorithm. If
none is given, 'rbf' will be used. If a callable is given it is used to
pre-compute the kernel matrix from data matrices; that matrix should be
an array of shape ``(n_samples, n_samples)``. For an intuitive
visualization of different kernel types see
:ref:`sphx_glr_auto_examples_svm_plot_svm_kernels.py`.
degree : int, default=3
Degree of the polynomial kernel function ('poly').
Must be non-negative. Ignored by all other kernels.
gamma : {'scale', 'auto'} or float, default='scale'
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
- if ``gamma='scale'`` (default) is passed then it uses
1 / (n_features * X.var()) as value of gamma,
- if 'auto', uses 1 / n_features
- if float, must be non-negative.
.. versionchanged:: 0.22
The default value of ``gamma`` changed from 'auto' to 'scale'.
coef0 : float, default=0.0
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : bool, default=True
Whether to use the shrinking heuristic.
See the :ref:`User Guide <shrinking_svm>`.
probability : bool, default=False
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, will slow down that method as it internally uses
5-fold cross-validation, and `predict_proba` may be inconsistent with
`predict`. Read more in the :ref:`User Guide <scores_probabilities>`.
tol : float, default=1e-3
Tolerance for stopping criterion.
cache_size : float, default=200
Specify the size of the kernel cache (in MB).
class_weight : dict or 'balanced', default=None
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
verbose : bool, default=False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, default=-1
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : {'ovo', 'ovr'}, default='ovr'
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2). However, note that
internally, one-vs-one ('ovo') is always used as a multi-class strategy
to train models; an ovr matrix is only constructed from the ovo matrix.
The parameter is ignored for binary classification.
.. versionchanged:: 0.19
decision_function_shape is 'ovr' by default.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
break_ties : bool, default=False
If true, ``decision_function_shape='ovr'``, and number of classes > 2,
:term:`predict` will break ties according to the confidence values of
:term:`decision_function`; otherwise the first class among the tied
classes is returned. Please note that breaking ties comes at a
relatively high computational cost compared to a simple predict. See
:ref:`sphx_glr_auto_examples_svm_plot_svm_tie_breaking.py` for an
example of its usage with ``decision_function_shape='ovr'``.
.. versionadded:: 0.22
random_state : int, RandomState instance or None, default=None
Controls the pseudo random number generation for shuffling the data for
probability estimates. Ignored when `probability` is False.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
class_weight_ : ndarray of shape (n_classes,)
Multipliers of parameter C for each class.
Computed based on the ``class_weight`` parameter.
classes_ : ndarray of shape (n_classes,)
The classes labels.
coef_ : ndarray of shape (n_classes * (n_classes - 1) / 2, n_features)
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
dual_coef_ : ndarray of shape (n_classes -1, n_SV)
Dual coefficients of the support vector in the decision
function (see :ref:`sgd_mathematical_formulation`), multiplied by
their targets.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the :ref:`multi-class section of the User Guide
<svm_multi_class>` for details.
fit_status_ : int
0 if correctly fitted, 1 otherwise (will raise warning)
intercept_ : ndarray of shape (n_classes * (n_classes - 1) / 2,)
Constants in decision function.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : ndarray of shape (n_classes * (n_classes - 1) // 2,)
Number of iterations run by the optimization routine to fit the model.
The shape of this attribute depends on the number of models optimized
which in turn depends on the number of classes.
.. versionadded:: 1.1
support_ : ndarray of shape (n_SV)
Indices of support vectors.
support_vectors_ : ndarray of shape (n_SV, n_features)
Support vectors. An empty array if kernel is precomputed.
n_support_ : ndarray of shape (n_classes,), dtype=int32
Number of support vectors for each class.
probA_ : ndarray of shape (n_classes * (n_classes - 1) / 2)
probB_ : ndarray of shape (n_classes * (n_classes - 1) / 2)
If `probability=True`, it corresponds to the parameters learned in
Platt scaling to produce probability estimates from decision values.
If `probability=False`, it's an empty array. Platt scaling uses the
logistic function
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/svm/_bounds.py | sklearn/svm/_bounds.py | """Determination of parameter bounds"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Real
import numpy as np
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.validation import check_array, check_consistent_length
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"y": ["array-like"],
"loss": [StrOptions({"squared_hinge", "log"})],
"fit_intercept": ["boolean"],
"intercept_scaling": [Interval(Real, 0, None, closed="neither")],
},
prefer_skip_nested_validation=True,
)
def l1_min_c(X, y, *, loss="squared_hinge", fit_intercept=True, intercept_scaling=1.0):
"""Return the lowest bound for `C`.
The lower bound for `C` is computed such that for `C` in `(l1_min_C, infinity)`
the model is guaranteed not to be empty. This applies to l1 penalized
classifiers, such as :class:`sklearn.svm.LinearSVC` with penalty='l1' and
:class:`sklearn.linear_model.LogisticRegression` with `l1_ratio=1`.
This value is valid if `class_weight` parameter in `fit()` is not set.
For an example of how to use this function, see
:ref:`sphx_glr_auto_examples_linear_model_plot_logistic_path.py`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
loss : {'squared_hinge', 'log'}, default='squared_hinge'
Specifies the loss function.
With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss).
With 'log' it is the loss of logistic regression models.
fit_intercept : bool, default=True
Specifies if the intercept should be fitted by the model.
It must match the fit() method parameter.
intercept_scaling : float, default=1.0
When fit_intercept is True, instance vector x becomes
[x, intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
It must match the fit() method parameter.
Returns
-------
l1_min_c : float
Minimum value for C.
Examples
--------
>>> from sklearn.svm import l1_min_c
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=100, n_features=20, random_state=42)
>>> print(f"{l1_min_c(X, y, loss='squared_hinge', fit_intercept=True):.4f}")
0.0044
"""
X = check_array(X, accept_sparse="csc")
check_consistent_length(X, y)
Y = LabelBinarizer(neg_label=-1).fit_transform(y).T
# maximum absolute value over classes and features
den = np.max(np.abs(safe_sparse_dot(Y, X)))
if fit_intercept:
bias = np.full(
(np.size(y), 1), intercept_scaling, dtype=np.array(intercept_scaling).dtype
)
den = max(den, abs(np.dot(Y, bias)).max())
if den == 0.0:
raise ValueError(
"Ill-posed l1_min_c calculation: l1 will always "
"select zero coefficients for this data"
)
if loss == "squared_hinge":
return 0.5 / den
else: # loss == 'log':
return 2.0 / den
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/svm/tests/test_sparse.py | sklearn/svm/tests/test_sparse.py | import numpy as np
import pytest
from scipy import sparse
from sklearn import base, datasets, linear_model, svm
from sklearn.datasets import load_digits, make_blobs, make_classification
from sklearn.exceptions import ConvergenceWarning
from sklearn.svm.tests import test_svm
from sklearn.utils._testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
ignore_warnings,
skip_if_32bit,
)
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.fixes import (
CSR_CONTAINERS,
DOK_CONTAINERS,
LIL_CONTAINERS,
)
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array(
[
[0, 0, 0],
[1, 1, 1],
[2, 0, 0],
[0, 0, 2],
[3, 3, 3],
]
)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
def check_svm_model_equal(dense_svm, X_train, y_train, X_test):
# Use the original svm model for dense fit and clone an exactly same
# svm model for sparse fit
sparse_svm = base.clone(dense_svm)
dense_svm.fit(X_train.toarray(), y_train)
if sparse.issparse(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert sparse.issparse(sparse_svm.support_vectors_)
assert sparse.issparse(sparse_svm.dual_coef_)
assert_allclose(dense_svm.support_vectors_, sparse_svm.support_vectors_.toarray())
assert_allclose(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert sparse.issparse(sparse_svm.coef_)
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_allclose(dense_svm.support_, sparse_svm.support_)
assert_allclose(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(
dense_svm.decision_function(X_test_dense), sparse_svm.decision_function(X_test)
)
assert_array_almost_equal(
dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense),
)
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
if hasattr(dense_svm, "predict_proba"):
assert_array_almost_equal(
dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test),
decimal=4,
)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.issparse(X_test):
with pytest.raises(ValueError, match=msg):
dense_svm.predict(X_test)
# XXX: probability=True is not thread-safe:
# https://github.com/scikit-learn/scikit-learn/issues/31885
@pytest.mark.thread_unsafe
@skip_if_32bit
@pytest.mark.parametrize(
"X_train, y_train, X_test",
[
[X, Y, T],
[X2, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data],
],
)
@pytest.mark.parametrize("kernel", ["linear", "poly", "rbf", "sigmoid"])
@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + LIL_CONTAINERS)
def test_svc(X_train, y_train, X_test, kernel, sparse_container):
"""Check that sparse SVC gives the same result as SVC."""
X_train = sparse_container(X_train)
clf = svm.SVC(
gamma=1,
kernel=kernel,
probability=True,
random_state=0,
decision_function_shape="ovo",
)
check_svm_model_equal(clf, X_train, y_train, X_test)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_unsorted_indices(csr_container):
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
X, y = load_digits(return_X_y=True)
X_test = csr_container(X[50:100])
X, y = X[:50], y[:50]
tols = dict(rtol=1e-12, atol=1e-14)
X_sparse = csr_container(X)
coef_dense = (
svm.SVC(kernel="linear", probability=True, random_state=0).fit(X, y).coef_
)
sparse_svc = svm.SVC(kernel="linear", probability=True, random_state=0).fit(
X_sparse, y
)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_allclose(coef_dense, coef_sorted.toarray(), **tols)
# reverse each row's indices
def scramble_indices(X):
new_data = []
new_indices = []
for i in range(1, len(X.indptr)):
row_slice = slice(*X.indptr[i - 1 : i + 1])
new_data.extend(X.data[row_slice][::-1])
new_indices.extend(X.indices[row_slice][::-1])
return csr_container((new_data, new_indices, X.indptr), shape=X.shape)
X_sparse_unsorted = scramble_indices(X_sparse)
X_test_unsorted = scramble_indices(X_test)
assert not X_sparse_unsorted.has_sorted_indices
assert not X_test_unsorted.has_sorted_indices
unsorted_svc = svm.SVC(kernel="linear", probability=True, random_state=0).fit(
X_sparse_unsorted, y
)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_allclose(coef_unsorted.toarray(), coef_sorted.toarray(), **tols)
assert_allclose(
sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test),
**tols,
)
@pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
def test_svc_with_custom_kernel(lil_container):
def kfunc(x, y):
return safe_sparse_dot(x, y.T)
X_sp = lil_container(X)
clf_lin = svm.SVC(kernel="linear").fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
@skip_if_32bit
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
@pytest.mark.parametrize("kernel", ["linear", "poly", "rbf"])
def test_svc_iris(csr_container, kernel):
# Test the sparse SVC with the iris dataset
iris_data_sp = csr_container(iris.data)
sp_clf = svm.SVC(kernel=kernel).fit(iris_data_sp, iris.target)
clf = svm.SVC(kernel=kernel).fit(iris.data, iris.target)
assert_allclose(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_allclose(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_allclose(clf.predict(iris.data), sp_clf.predict(iris_data_sp))
if kernel == "linear":
assert_allclose(clf.coef_, sp_clf.coef_.toarray())
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse_decision_function(csr_container):
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
iris_data_sp = csr_container(iris.data)
svc = svm.SVC(kernel="linear", C=0.1, decision_function_shape="ovo")
clf = svc.fit(iris_data_sp, iris.target)
dec = safe_sparse_dot(iris_data_sp, clf.coef_.T) + clf.intercept_
assert_allclose(dec, clf.decision_function(iris_data_sp))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_allclose(dec.ravel(), clf.decision_function(X))
assert_allclose(
prediction, clf.classes_[(clf.decision_function(X) > 0).astype(int).ravel()]
)
expected = np.array([-1.0, -0.66, -1.0, 0.66, 1.0, 1.0])
assert_array_almost_equal(clf.decision_function(X), expected, decimal=2)
@pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
def test_error(lil_container):
# Test that it gives proper exception on deficient input
clf = svm.SVC()
X_sp = lil_container(X)
Y2 = Y[:-1] # wrong dimensions for labels
with pytest.raises(ValueError):
clf.fit(X_sp, Y2)
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
@pytest.mark.parametrize(
"lil_container, dok_container", zip(LIL_CONTAINERS, DOK_CONTAINERS)
)
def test_linearsvc(lil_container, dok_container):
# Similar to test_SVC
X_sp = lil_container(X)
X2_sp = dok_container(X2)
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert sp_clf.fit_intercept
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_allclose(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_linearsvc_iris(csr_container):
# Test the sparse LinearSVC with the iris dataset
iris_data_sp = csr_container(iris.data)
sp_clf = svm.LinearSVC(random_state=0).fit(iris_data_sp, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
assert clf.fit_intercept == sp_clf.fit_intercept
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_allclose(clf.predict(iris.data), sp_clf.predict(iris_data_sp))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris_data_sp), axis=1)
assert_allclose(pred, clf.predict(iris.data))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris_data_sp))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris_data_sp))
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_weight(csr_container):
# Test class weights
X_, y_ = make_classification(
n_samples=200, n_features=100, weights=[0.833, 0.167], random_state=0
)
X_ = csr_container(X_)
for clf in (
linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC(),
):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert np.sum(y_pred == y_[180:]) >= 11
@pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
def test_sample_weights(lil_container):
# Test weights on individual samples
X_sp = lil_container(X)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.0])
sample_weight = [0.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.0])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
@pytest.mark.parametrize(
"X_train, y_train, X_test",
[
[X, None, T],
[X2, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data],
],
)
@pytest.mark.parametrize("kernel", ["linear", "poly", "rbf", "sigmoid"])
@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + LIL_CONTAINERS)
@skip_if_32bit
def test_sparse_oneclasssvm(X_train, y_train, X_test, kernel, sparse_container):
# Check that sparse OneClassSVM gives the same result as dense OneClassSVM
X_train = sparse_container(X_train)
clf = svm.OneClassSVM(gamma=1, kernel=kernel)
check_svm_model_equal(clf, X_train, y_train, X_test)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse_realdata(csr_container):
# Test on a subset from the 20newsgroups dataset.
# This catches some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
# SVC does not support large sparse, so we specify int32 indices
# In this case, `csr_matrix` automatically uses int32 regardless of the dtypes of
# `indices` and `indptr` but `csr_array` may or may not use the same dtype as
# `indices` and `indptr`, which would be int64 if not specified
indices = np.array([6, 5, 35, 31], dtype=np.int32)
indptr = np.array([0] * 8 + [1] * 32 + [2] * 38 + [4] * 3, dtype=np.int32)
X = csr_container((data, indices, indptr))
y = np.array(
[
1.0,
0.0,
2.0,
2.0,
1.0,
1.0,
1.0,
2.0,
2.0,
0.0,
1.0,
2.0,
2.0,
0.0,
2.0,
0.0,
3.0,
0.0,
3.0,
0.0,
1.0,
1.0,
3.0,
2.0,
3.0,
2.0,
0.0,
3.0,
1.0,
0.0,
2.0,
1.0,
2.0,
0.0,
1.0,
0.0,
2.0,
3.0,
1.0,
3.0,
0.0,
1.0,
0.0,
0.0,
2.0,
0.0,
1.0,
2.0,
2.0,
2.0,
3.0,
2.0,
0.0,
3.0,
2.0,
1.0,
2.0,
3.0,
2.0,
2.0,
0.0,
1.0,
0.0,
1.0,
2.0,
3.0,
0.0,
0.0,
2.0,
2.0,
1.0,
3.0,
1.0,
1.0,
0.0,
1.0,
2.0,
1.0,
1.0,
3.0,
]
)
clf = svm.SVC(kernel="linear").fit(X.toarray(), y)
sp_clf = svm.SVC(kernel="linear").fit(X.tocoo(), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
@pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
def test_sparse_svc_clone_with_callable_kernel(lil_container):
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x @ y.T, probability=True, random_state=0)
b = base.clone(a)
X_sp = lil_container(X)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(
C=1, kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0
)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
@pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
def test_timeout(lil_container):
sp = svm.SVC(
C=1, kernel=lambda x, y: x @ y.T, probability=True, random_state=0, max_iter=1
)
warning_msg = (
r"Solver terminated early \(max_iter=1\). Consider pre-processing "
r"your data with StandardScaler or MinMaxScaler."
)
with pytest.warns(ConvergenceWarning, match=warning_msg):
sp.fit(lil_container(X), Y)
# XXX: probability=True is not thread-safe:
# https://github.com/scikit-learn/scikit-learn/issues/31885
@pytest.mark.thread_unsafe
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_2 = a.fit(X, Y).predict_proba(X)
assert_allclose(proba_1, proba_2)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/svm/tests/__init__.py | sklearn/svm/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/svm/tests/test_bounds.py | sklearn/svm/tests/test_bounds.py | import numpy as np
import pytest
from scipy import stats
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
from sklearn.svm._bounds import l1_min_c
from sklearn.svm._newrand import bounded_rand_int_wrap, set_seed_wrap
from sklearn.utils.fixes import CSR_CONTAINERS
@pytest.mark.parametrize("X_container", CSR_CONTAINERS + [np.array])
@pytest.mark.parametrize("loss", ["squared_hinge", "log"])
@pytest.mark.parametrize("intercept_label", ["no-intercept", "fit-intercept"])
def test_l1_min_c(X_container, loss, intercept_label):
intercepts = {
"no-intercept": {"fit_intercept": False},
"fit-intercept": {"fit_intercept": True, "intercept_scaling": 10},
}
X = X_container([[-1, 0], [0, 1], [1, 1], [1, 1]])
Y = [0, 1, 1, 1]
intercept_params = intercepts[intercept_label]
check_l1_min_c(X, Y, loss, **intercept_params)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=1.0):
min_c = l1_min_c(
X,
y,
loss=loss,
fit_intercept=fit_intercept,
intercept_scaling=intercept_scaling,
)
clf = {
"log": LogisticRegression(l1_ratio=1, solver="liblinear"),
"squared_hinge": LinearSVC(loss="squared_hinge", penalty="l1", dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert (np.asarray(clf.coef_) == 0).all()
assert (np.asarray(clf.intercept_) == 0).all()
clf.C = min_c * 1.01
clf.fit(X, y)
assert (np.asarray(clf.coef_) != 0).any() or (np.asarray(clf.intercept_) != 0).any()
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
with pytest.raises(ValueError):
l1_min_c(X, y)
_MAX_UNSIGNED_INT = 4294967295
def test_newrand_default():
"""Test that bounded_rand_int_wrap without seeding respects the range
Note this test should pass either if executed alone, or in conjunctions
with other tests that call set_seed explicit in any order: it checks
invariants on the RNG instead of specific values.
"""
generated = [bounded_rand_int_wrap(100) for _ in range(10)]
assert all(0 <= x < 100 for x in generated)
assert not all(x == generated[0] for x in generated)
@pytest.mark.thread_unsafe
@pytest.mark.parametrize("seed, expected", [(0, 54), (_MAX_UNSIGNED_INT, 9)])
def test_newrand_set_seed(seed, expected):
"""Test that `set_seed` produces deterministic results"""
set_seed_wrap(seed)
generated = bounded_rand_int_wrap(100)
assert generated == expected
@pytest.mark.parametrize("seed", [-1, _MAX_UNSIGNED_INT + 1])
def test_newrand_set_seed_overflow(seed):
"""Test that `set_seed_wrap` is defined for unsigned 32bits ints"""
with pytest.raises(OverflowError):
set_seed_wrap(seed)
@pytest.mark.thread_unsafe
@pytest.mark.parametrize("range_, n_pts", [(_MAX_UNSIGNED_INT, 10000), (100, 25)])
def test_newrand_bounded_rand_int(range_, n_pts):
"""Test that `bounded_rand_int` follows a uniform distribution"""
# XXX: this test is very seed sensitive: either it is wrong (too strict?)
# or the wrapped RNG is not uniform enough, at least on some platforms.
set_seed_wrap(42)
n_iter = 100
ks_pvals = []
uniform_dist = stats.uniform(loc=0, scale=range_)
# perform multiple samplings to make chance of outlier sampling negligible
for _ in range(n_iter):
# Deterministic random sampling
sample = [bounded_rand_int_wrap(range_) for _ in range(n_pts)]
res = stats.kstest(sample, uniform_dist.cdf)
ks_pvals.append(res.pvalue)
# Null hypothesis = samples come from an uniform distribution.
# Under the null hypothesis, p-values should be uniformly distributed
# and not concentrated on low values
# (this may seem counter-intuitive but is backed by multiple refs)
# So we can do two checks:
# (1) check uniformity of p-values
uniform_p_vals_dist = stats.uniform(loc=0, scale=1)
res_pvals = stats.kstest(ks_pvals, uniform_p_vals_dist.cdf)
assert res_pvals.pvalue > 0.05, (
"Null hypothesis rejected: generated random numbers are not uniform."
" Details: the (meta) p-value of the test of uniform distribution"
f" of p-values is {res_pvals.pvalue} which is not > 0.05"
)
# (2) (safety belt) check that 90% of p-values are above 0.05
min_10pct_pval = np.percentile(ks_pvals, q=10)
# lower 10th quantile pvalue <= 0.05 means that the test rejects the
# null hypothesis that the sample came from the uniform distribution
assert min_10pct_pval > 0.05, (
"Null hypothesis rejected: generated random numbers are not uniform. "
f"Details: lower 10th quantile p-value of {min_10pct_pval} not > 0.05."
)
@pytest.mark.parametrize("range_", [-1, _MAX_UNSIGNED_INT + 1])
def test_newrand_bounded_rand_int_limits(range_):
"""Test that `bounded_rand_int_wrap` is defined for unsigned 32bits ints"""
with pytest.raises(OverflowError):
bounded_rand_int_wrap(range_)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/svm/tests/test_svm.py | sklearn/svm/tests/test_svm.py | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import pytest
from numpy.testing import (
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
)
from sklearn import base, datasets, linear_model, metrics, svm
from sklearn.datasets import make_blobs, make_classification, make_regression
from sklearn.exceptions import (
ConvergenceWarning,
NotFittedError,
)
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
# mypy error: Module 'sklearn.svm' has no attribute '_libsvm'
from sklearn.svm import ( # type: ignore[attr-defined]
SVR,
LinearSVC,
LinearSVR,
NuSVR,
OneClassSVM,
_libsvm,
)
from sklearn.svm._classes import _validate_dual_parameter
from sklearn.utils import check_random_state, shuffle
from sklearn.utils.fixes import _IS_32BIT, CSR_CONTAINERS, LIL_CONTAINERS
from sklearn.utils.validation import _num_samples
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
def get_iris_dataset(random_seed):
iris = datasets.load_iris()
rng = check_random_state(random_seed)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
return iris
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel="linear").fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.0])
assert_array_equal(clf.predict(X), Y)
# XXX: this test is thread-unsafe because it uses _libsvm.cross_validation:
# https://github.com/scikit-learn/scikit-learn/issues/31885
@pytest.mark.thread_unsafe
def test_libsvm_iris(global_random_seed):
# Check consistency on dataset iris.
iris = get_iris_dataset(global_random_seed)
# shuffle the dataset so that labels are not ordered
for k in ("linear", "rbf"):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert np.mean(clf.predict(iris.data) == iris.target) > 0.9
assert hasattr(clf, "coef_") == (k == "linear")
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
# We unpack the values to create a dictionary with some of the return values
# from Libsvm's fit.
(
libsvm_support,
libsvm_support_vectors,
libsvm_n_class_SV,
libsvm_sv_coef,
libsvm_intercept,
libsvm_probA,
libsvm_probB,
# libsvm_fit_status and libsvm_n_iter won't be used below.
libsvm_fit_status,
libsvm_n_iter,
) = _libsvm.fit(iris.data, iris.target.astype(np.float64))
model_params = {
"support": libsvm_support,
"SV": libsvm_support_vectors,
"nSV": libsvm_n_class_SV,
"sv_coef": libsvm_sv_coef,
"intercept": libsvm_intercept,
"probA": libsvm_probA,
"probB": libsvm_probB,
}
pred = _libsvm.predict(iris.data, **model_params)
assert np.mean(pred == iris.target) > 0.95
# We unpack the values to create a dictionary with some of the return values
# from Libsvm's fit.
(
libsvm_support,
libsvm_support_vectors,
libsvm_n_class_SV,
libsvm_sv_coef,
libsvm_intercept,
libsvm_probA,
libsvm_probB,
# libsvm_fit_status and libsvm_n_iter won't be used below.
libsvm_fit_status,
libsvm_n_iter,
) = _libsvm.fit(iris.data, iris.target.astype(np.float64), kernel="linear")
model_params = {
"support": libsvm_support,
"SV": libsvm_support_vectors,
"nSV": libsvm_n_class_SV,
"sv_coef": libsvm_sv_coef,
"intercept": libsvm_intercept,
"probA": libsvm_probA,
"probB": libsvm_probB,
}
pred = _libsvm.predict(iris.data, **model_params, kernel="linear")
assert np.mean(pred == iris.target) > 0.95
pred = _libsvm.cross_validation(
iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0
)
assert np.mean(pred == iris.target) > 0.95
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deterministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = _libsvm.cross_validation(
iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0
)
assert_array_equal(pred, pred2)
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel="precomputed")
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
with pytest.raises(ValueError):
clf.predict(KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
def kfunc(x, y):
return np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(np.array(X), Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel="precomputed")
clf2 = svm.SVC(kernel="linear")
iris = get_iris_dataset(42)
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), 0.99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (
svm.NuSVR(kernel="linear", nu=0.4, C=1.0),
svm.NuSVR(kernel="linear", nu=0.4, C=10.0),
svm.SVR(kernel="linear", C=10.0),
svm.LinearSVR(C=10.0),
svm.LinearSVR(C=10.0),
):
clf.fit(diabetes.data, diabetes.target)
assert clf.score(diabetes.data, diabetes.target) > 0.02
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel="linear", C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_), np.linalg.norm(svr.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
def test_linearsvr_fit_sampleweight(global_random_seed):
# check correct result when sample_weight is 1
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
n_samples = len(diabetes.target)
unit_weight = np.ones(n_samples)
lsvr = svm.LinearSVR(C=1e3, tol=1e-12, max_iter=10000).fit(
diabetes.data, diabetes.target, sample_weight=unit_weight
)
score1 = lsvr.score(diabetes.data, diabetes.target)
lsvr_no_weight = svm.LinearSVR(C=1e3, tol=1e-12, max_iter=10000).fit(
diabetes.data, diabetes.target
)
score2 = lsvr_no_weight.score(diabetes.data, diabetes.target)
assert_allclose(
np.linalg.norm(lsvr.coef_), np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001
)
assert_almost_equal(score1, score2, 2)
# check that fit(X) = fit([X1, X2, X3], sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
rng = np.random.RandomState(global_random_seed)
random_weight = rng.randint(0, 10, n_samples)
lsvr_unflat = svm.LinearSVR(C=1e3, tol=1e-12, max_iter=10000).fit(
diabetes.data, diabetes.target, sample_weight=random_weight
)
score3 = lsvr_unflat.score(
diabetes.data, diabetes.target, sample_weight=random_weight
)
X_flat = np.repeat(diabetes.data, random_weight, axis=0)
y_flat = np.repeat(diabetes.target, random_weight, axis=0)
lsvr_flat = svm.LinearSVR(C=1e3, tol=1e-12, max_iter=10000).fit(X_flat, y_flat)
score4 = lsvr_flat.score(X_flat, y_flat)
assert_almost_equal(score3, score4, 2)
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
with pytest.raises(ValueError):
clf.predict(X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_equal(pred, [1, -1, -1])
assert pred.dtype == np.dtype("intp")
assert_array_almost_equal(clf.intercept_, [-1.218], decimal=3)
assert_array_almost_equal(clf.dual_coef_, [[0.750, 0.750, 0.750, 0.750]], decimal=3)
with pytest.raises(AttributeError):
(lambda: clf.coef_)()
# TODO: rework this test to be independent of the random seeds.
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert np.mean(y_pred_test == 1) > 0.9
y_pred_outliers = clf.predict(X_outliers)
assert np.mean(y_pred_outliers == -1) > 0.9
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_oneclass_score_samples():
X_train = [[1, 1], [1, 2], [2, 1]]
clf = svm.OneClassSVM(gamma=1).fit(X_train)
assert_array_equal(
clf.score_samples([[2.0, 2.0]]),
clf.decision_function([[2.0, 2.0]]) + clf.offset_,
)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel="linear", C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, 0.25]])
assert_array_equal(clf.predict([[-0.1, -0.1]]), [1])
clf._dual_coef_ = np.array([[0.0, 1.0]])
assert_array_equal(clf.predict([[-0.1, -0.1]]), [2])
# XXX: this test is thread-unsafe because it uses probability=True:
# https://github.com/scikit-learn/scikit-learn/issues/31885
@pytest.mark.thread_unsafe
def test_probability(global_random_seed):
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
iris = get_iris_dataset(global_random_seed)
for clf in (
svm.SVC(probability=True, random_state=global_random_seed, C=1.0),
svm.NuSVC(probability=True, random_state=global_random_seed),
):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert np.mean(np.argmax(prob_predict, 1) == clf.predict(iris.data)) > 0.9
assert_almost_equal(
clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8
)
def test_decision_function(global_random_seed):
iris = get_iris_dataset(global_random_seed)
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel="linear", C=0.1, decision_function_shape="ovo").fit(
iris.data, iris.target
)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction, clf.classes_[(clf.decision_function(X) > 0).astype(int)]
)
expected = np.array([-1.0, -0.66, -1.0, 0.66, 1.0, 1.0])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel="rbf", gamma=1, decision_function_shape="ovo")
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
@pytest.mark.parametrize("SVM", (svm.SVC, svm.NuSVC))
def test_decision_function_shape(SVM, global_random_seed):
# check that decision_function_shape='ovr' or 'ovo' gives
# correct shape and is consistent with predict
iris = get_iris_dataset(global_random_seed)
linear_ovr_svm = SVM(
kernel="linear",
decision_function_shape="ovr",
random_state=global_random_seed,
break_ties=True,
)
# we need to use break_ties here so that the prediction won't break ties randomly
# but use the argmax of the decision function.
linear_ovr_svm.fit(iris.data, iris.target)
dec = linear_ovr_svm.decision_function(iris.data)
assert dec.shape == (len(iris.data), 3)
assert_array_equal(linear_ovr_svm.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=global_random_seed)
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=global_random_seed
)
linear_ovr_svm.fit(X_train, y_train)
dec = linear_ovr_svm.decision_function(X_test)
assert dec.shape == (len(X_test), 5)
assert_array_equal(linear_ovr_svm.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decision_function=True
linear_ovo_svm = SVM(
kernel="linear",
decision_function_shape="ovo",
random_state=global_random_seed,
break_ties=True,
)
linear_ovo_svm.fit(X_train, y_train)
dec = linear_ovo_svm.decision_function(X_train)
assert dec.shape == (len(X_train), 10)
def test_svr_predict(global_random_seed):
# Test SVR's decision_function
# Sanity check, test that predict implemented in python
# returns the same as the one in libsvm
iris = get_iris_dataset(global_random_seed)
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel="linear", C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
# rbf kernel
reg = svm.SVR(kernel="rbf", gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
# TODO: rework this test to be independent of the random seeds.
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(
n_samples=200,
n_features=10,
weights=[0.833, 0.167],
random_state=2,
)
for clf in (
linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC(),
):
clf.set_params(class_weight={0: 0.1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert f1_score(y_[100:], y_pred) > 0.3
@pytest.mark.parametrize("estimator", [svm.SVC(C=1e-2), svm.NuSVC()])
def test_svm_classifier_sided_sample_weight(estimator):
estimator = base.clone(estimator) # Avoid side effects from previous tests.
# fit a linear SVM and check that giving more weight to opposed samples
# in the space will flip the decision toward these samples.
X = [[-2, 0], [-1, -1], [0, -2], [0, 2], [1, 1], [2, 0]]
estimator.set_params(kernel="linear")
# check that with unit weights, a sample is supposed to be predicted on
# the boundary
sample_weight = [1] * 6
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.decision_function([[-1.0, 1.0]])
assert y_pred == pytest.approx(0)
# give more weights to opposed samples
sample_weight = [10.0, 0.1, 0.1, 0.1, 0.1, 10]
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.decision_function([[-1.0, 1.0]])
assert y_pred < 0
sample_weight = [1.0, 0.1, 10.0, 10.0, 0.1, 0.1]
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.decision_function([[-1.0, 1.0]])
assert y_pred > 0
@pytest.mark.parametrize("estimator", [svm.SVR(C=1e-2), svm.NuSVR(C=1e-2)])
def test_svm_regressor_sided_sample_weight(estimator):
estimator = base.clone(estimator) # Avoid side effects from previous tests.
# similar test to test_svm_classifier_sided_sample_weight but for
# SVM regressors
X = [[-2, 0], [-1, -1], [0, -2], [0, 2], [1, 1], [2, 0]]
estimator.set_params(kernel="linear")
# check that with unit weights, a sample is supposed to be predicted on
# the boundary
sample_weight = [1] * 6
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.predict([[-1.0, 1.0]])
assert y_pred == pytest.approx(1.5)
# give more weights to opposed samples
sample_weight = [10.0, 0.1, 0.1, 0.1, 0.1, 10]
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.predict([[-1.0, 1.0]])
assert y_pred < 1.5
sample_weight = [1.0, 0.1, 10.0, 10.0, 0.1, 0.1]
estimator.fit(X, Y, sample_weight=sample_weight)
y_pred = estimator.predict([[-1.0, 1.0]])
assert y_pred > 1.5
def test_svm_equivalence_sample_weight_C():
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_allclose(dual_coef_no_weight, clf.dual_coef_)
@pytest.mark.parametrize(
"Estimator, err_msg",
[
(svm.SVC, "Invalid input - all samples have zero or negative weights."),
(svm.NuSVC, "(negative dimensions are not allowed|nu is infeasible)"),
(svm.SVR, "Invalid input - all samples have zero or negative weights."),
(svm.NuSVR, "Invalid input - all samples have zero or negative weights."),
(svm.OneClassSVM, "Invalid input - all samples have zero or negative weights."),
],
ids=["SVC", "NuSVC", "SVR", "NuSVR", "OneClassSVM"],
)
@pytest.mark.parametrize(
"sample_weight",
[[0] * len(Y), [-0.3] * len(Y)],
ids=["weights-are-zero", "weights-are-negative"],
)
def test_negative_sample_weights_mask_all_samples(Estimator, err_msg, sample_weight):
est = Estimator(kernel="linear")
with pytest.raises(ValueError, match=err_msg):
est.fit(X, Y, sample_weight=sample_weight)
@pytest.mark.parametrize(
"Classifier, err_msg",
[
(
svm.SVC,
(
"Invalid input - all samples with positive weights belong to the same"
" class"
),
),
(svm.NuSVC, "specified nu is infeasible"),
],
ids=["SVC", "NuSVC"],
)
@pytest.mark.parametrize(
"sample_weight",
[[0, -0.5, 0, 1, 1, 1], [1, 1, 1, 0, -0.1, -0.3]],
ids=["mask-label-1", "mask-label-2"],
)
def test_negative_weights_svc_leave_just_one_label(Classifier, err_msg, sample_weight):
clf = Classifier(kernel="linear")
with pytest.raises(ValueError, match=err_msg):
clf.fit(X, Y, sample_weight=sample_weight)
@pytest.mark.parametrize(
"Classifier, model",
[
(svm.SVC, {"when-left": [0.3998, 0.4], "when-right": [0.4, 0.3999]}),
(svm.NuSVC, {"when-left": [0.3333, 0.3333], "when-right": [0.3333, 0.3333]}),
],
ids=["SVC", "NuSVC"],
)
@pytest.mark.parametrize(
"sample_weight, mask_side",
[([1, -0.5, 1, 1, 1, 1], "when-left"), ([1, 1, 1, 0, 1, 1], "when-right")],
ids=["partial-mask-label-1", "partial-mask-label-2"],
)
def test_negative_weights_svc_leave_two_labels(
Classifier, model, sample_weight, mask_side
):
clf = Classifier(kernel="linear")
clf.fit(X, Y, sample_weight=sample_weight)
assert_allclose(clf.coef_, [model[mask_side]], rtol=1e-3)
@pytest.mark.parametrize(
"Estimator", [svm.SVC, svm.NuSVC, svm.NuSVR], ids=["SVC", "NuSVC", "NuSVR"]
)
@pytest.mark.parametrize(
"sample_weight",
[[1, -0.5, 1, 1, 1, 1], [1, 1, 1, 0, 1, 1]],
ids=["partial-mask-label-1", "partial-mask-label-2"],
)
def test_negative_weight_equal_coeffs(Estimator, sample_weight):
# model generates equal coefficients
est = Estimator(kernel="linear")
est.fit(X, Y, sample_weight=sample_weight)
coef = np.abs(est.coef_).ravel()
assert coef[0] == pytest.approx(coef[1], rel=1e-3)
# TODO: rework this test to be independent of the random seeds.
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test:
# class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
iris = get_iris_dataset(42)
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight("balanced", classes=classes, y=y[unbalanced])
assert np.argmax(class_weights) == 2
for clf in (
svm.SVC(kernel="linear"),
svm.LinearSVC(random_state=0),
LogisticRegression(),
):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight="balanced")
y_pred_balanced = clf.fit(
X[unbalanced],
y[unbalanced],
).predict(X)
assert metrics.f1_score(y, y_pred, average="macro") <= metrics.f1_score(
y, y_pred_balanced, average="macro"
)
@pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
def test_bad_input(lil_container, global_random_seed):
# Test dimensions for labels
Y2 = Y[:-1] # wrong dimensions for labels
with pytest.raises(ValueError):
svm.SVC().fit(X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=global_random_seed)):
Xf = np.asfortranarray(X)
assert not Xf.flags["C_CONTIGUOUS"]
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert not yf.flags["F_CONTIGUOUS"]
assert not yf.flags["C_CONTIGUOUS"]
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel="precomputed")
with pytest.raises(ValueError):
clf.fit(X, Y)
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
with pytest.raises(ValueError):
clf.predict(lil_container(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
with pytest.raises(ValueError):
clf.predict(X)
clf = svm.SVC()
clf.fit(X, Y)
with pytest.raises(ValueError):
clf.predict(Xt)
def test_svc_nonfinite_params(global_random_seed):
# Check SVC throws ValueError when dealing with non-finite parameter values
rng = np.random.RandomState(global_random_seed)
n_samples = 10
fmax = np.finfo(np.float64).max
X = fmax * rng.uniform(size=(n_samples, 2))
y = rng.randint(0, 2, size=n_samples)
clf = svm.SVC()
msg = "The dual coefficients or intercepts are not finite"
with pytest.raises(ValueError, match=msg):
clf.fit(X, y)
def test_unicode_kernel(global_random_seed):
# Test that a unicode kernel name does not cause a TypeError
iris = get_iris_dataset(global_random_seed)
clf = svm.SVC(kernel="linear", probability=True)
clf.fit(X, Y)
clf.predict_proba(T)
_libsvm.cross_validation(
iris.data, iris.target.astype(np.float64), 5, kernel="linear", random_seed=0
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse_precomputed(csr_container):
clf = svm.SVC(kernel="precomputed")
sparse_gram = csr_container([[1, 0], [0, 1]])
with pytest.raises(TypeError, match="Sparse precomputed"):
clf.fit(sparse_gram, [0, 1])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse_fit_support_vectors_empty(csr_container):
# Regression test for #14893
X_train = csr_container([[0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 0, 1]])
y_train = np.array([0.04, 0.04, 0.10, 0.16])
model = svm.SVR(kernel="linear")
model.fit(X_train, y_train)
assert not model.support_vectors_.data.size
assert not model.dual_coef_.data.size
@pytest.mark.parametrize("loss", ["hinge", "squared_hinge"])
@pytest.mark.parametrize("penalty", ["l1", "l2"])
@pytest.mark.parametrize("dual", [True, False])
def test_linearsvc_parameters(loss, penalty, dual, global_random_seed):
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
X, y = make_classification(
n_samples=5, n_features=5, random_state=global_random_seed
)
clf = svm.LinearSVC(
penalty=penalty, loss=loss, dual=dual, random_state=global_random_seed
)
if (
(loss, penalty) == ("hinge", "l1")
or (loss, penalty, dual) == ("hinge", "l2", False)
or (penalty, dual) == ("l1", True)
):
with pytest.raises(
ValueError,
match="Unsupported set of arguments.*penalty='%s.*loss='%s.*dual=%s"
% (penalty, loss, dual),
):
clf.fit(X, y)
else:
clf.fit(X, y)
def test_linearsvc(global_random_seed):
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=global_random_seed).fit(X, Y)
# by default should have intercept
assert clf.fit_intercept
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(
penalty="l1",
loss="squared_hinge",
dual=False,
random_state=global_random_seed,
).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty="l2", dual=True, random_state=global_random_seed).fit(
X, Y
)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(
penalty="l2", loss="hinge", dual=True, random_state=global_random_seed
)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer(global_random_seed):
# Test LinearSVC with crammer_singer multi-class svm
iris = get_iris_dataset(global_random_seed)
ovr_clf = svm.LinearSVC(random_state=global_random_seed).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(
multi_class="crammer_singer", random_state=global_random_seed
)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert (ovr_clf.predict(iris.data) == cs_clf.predict(iris.data)).mean() > 0.9
# classifiers shouldn't be the same
assert (ovr_clf.coef_ != cs_clf.coef_).all()
# test decision function
assert_array_equal(
cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1),
)
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_linearsvc_fit_sampleweight(global_random_seed):
# check correct result when sample_weight is 1
n_samples = len(X)
unit_weight = np.ones(n_samples)
clf = svm.LinearSVC(random_state=global_random_seed, tol=1e-12, max_iter=1000).fit(
X, Y
)
clf_unitweight = svm.LinearSVC(
random_state=global_random_seed, tol=1e-12, max_iter=1000
).fit(X, Y, sample_weight=unit_weight)
# check if same as sample_weight=None
assert_array_equal(clf_unitweight.predict(T), clf.predict(T))
assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_weight = np.random.RandomState(global_random_seed).randint(0, 10, n_samples)
lsvc_unflat = svm.LinearSVC(
random_state=global_random_seed, tol=1e-12, max_iter=1000
).fit(X, Y, sample_weight=random_weight)
pred1 = lsvc_unflat.predict(T)
X_flat = np.repeat(X, random_weight, axis=0)
y_flat = np.repeat(Y, random_weight, axis=0)
lsvc_flat = svm.LinearSVC(
random_state=global_random_seed, tol=1e-12, max_iter=1000
).fit(X_flat, y_flat)
pred2 = lsvc_flat.predict(T)
assert_array_equal(pred1, pred2)
assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001)
def test_crammer_singer_binary(global_random_seed):
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(
n_classes=2, class_sep=1.5, random_state=global_random_seed
)
for fit_intercept in (True, False):
acc = (
svm.LinearSVC(
fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=global_random_seed,
)
.fit(X, y)
.score(X, y)
)
assert acc > 0.9
def test_linearsvc_iris(global_random_seed):
iris = get_iris_dataset(global_random_seed)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tree/__init__.py | sklearn/tree/__init__.py | """Decision tree based models for classification and regression."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.tree._classes import (
BaseDecisionTree,
DecisionTreeClassifier,
DecisionTreeRegressor,
ExtraTreeClassifier,
ExtraTreeRegressor,
)
from sklearn.tree._export import export_graphviz, export_text, plot_tree
__all__ = [
"BaseDecisionTree",
"DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor",
"export_graphviz",
"export_text",
"plot_tree",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tree/_classes.py | sklearn/tree/_classes.py | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import copy
import numbers
from abc import ABCMeta, abstractmethod
from math import ceil
from numbers import Integral, Real
import numpy as np
from scipy.sparse import issparse
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
MultiOutputMixin,
RegressorMixin,
_fit_context,
clone,
is_classifier,
)
from sklearn.tree import _criterion, _splitter, _tree
from sklearn.tree._criterion import Criterion
from sklearn.tree._splitter import Splitter
from sklearn.tree._tree import (
BestFirstTreeBuilder,
DepthFirstTreeBuilder,
Tree,
_build_pruned_tree_ccp,
ccp_pruning_path,
)
from sklearn.tree._utils import _any_isnan_axis0
from sklearn.utils import (
Bunch,
check_random_state,
compute_sample_weight,
metadata_routing,
)
from sklearn.utils._param_validation import Hidden, Interval, RealNotInt, StrOptions
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import (
_assert_all_finite_element_wise,
_check_n_features,
_check_sample_weight,
assert_all_finite,
check_is_fitted,
validate_data,
)
__all__ = [
"DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor",
]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {
"gini": _criterion.Gini,
"log_loss": _criterion.Entropy,
"entropy": _criterion.Entropy,
}
CRITERIA_REG = {
"squared_error": _criterion.MSE,
"friedman_mse": _criterion.FriedmanMSE,
"absolute_error": _criterion.MAE,
"poisson": _criterion.Poisson,
}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter, "random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {
"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter,
}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
# "check_input" is used for optimisation and isn't something to be passed
# around in a pipeline.
__metadata_request__predict = {"check_input": metadata_routing.UNUSED}
_parameter_constraints: dict = {
"splitter": [StrOptions({"best", "random"})],
"max_depth": [Interval(Integral, 1, None, closed="left"), None],
"min_samples_split": [
Interval(Integral, 2, None, closed="left"),
Interval(RealNotInt, 0.0, 1.0, closed="right"),
],
"min_samples_leaf": [
Interval(Integral, 1, None, closed="left"),
Interval(RealNotInt, 0.0, 1.0, closed="neither"),
],
"min_weight_fraction_leaf": [Interval(Real, 0.0, 0.5, closed="both")],
"max_features": [
Interval(Integral, 1, None, closed="left"),
Interval(RealNotInt, 0.0, 1.0, closed="right"),
StrOptions({"sqrt", "log2"}),
None,
],
"random_state": ["random_state"],
"max_leaf_nodes": [Interval(Integral, 2, None, closed="left"), None],
"min_impurity_decrease": [Interval(Real, 0.0, None, closed="left")],
"ccp_alpha": [Interval(Real, 0.0, None, closed="left")],
"monotonic_cst": ["array-like", None],
}
@abstractmethod
def __init__(
self,
*,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_decrease,
class_weight=None,
ccp_alpha=0.0,
monotonic_cst=None,
):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.random_state = random_state
self.min_impurity_decrease = min_impurity_decrease
self.class_weight = class_weight
self.ccp_alpha = ccp_alpha
self.monotonic_cst = monotonic_cst
def get_depth(self):
"""Return the depth of the decision tree.
The depth of a tree is the maximum distance between the root
and any leaf.
Returns
-------
self.tree_.max_depth : int
The maximum depth of the tree.
"""
check_is_fitted(self)
return self.tree_.max_depth
def get_n_leaves(self):
"""Return the number of leaves of the decision tree.
Returns
-------
self.tree_.n_leaves : int
Number of leaves.
"""
check_is_fitted(self)
return self.tree_.n_leaves
def _support_missing_values(self, X):
return (
not issparse(X)
and self.__sklearn_tags__().input_tags.allow_nan
and self.monotonic_cst is None
)
def _compute_missing_values_in_feature_mask(self, X, estimator_name=None):
"""Return boolean mask denoting if there are missing values for each feature.
This method also ensures that X is finite.
Parameter
---------
X : array-like of shape (n_samples, n_features), dtype=DOUBLE
Input data.
estimator_name : str or None, default=None
Name to use when raising an error. Defaults to the class name.
Returns
-------
missing_values_in_feature_mask : ndarray of shape (n_features,), or None
Missing value mask. If missing values are not supported or there
are no missing values, return None.
"""
estimator_name = estimator_name or self.__class__.__name__
common_kwargs = dict(estimator_name=estimator_name, input_name="X")
if not self._support_missing_values(X):
assert_all_finite(X, **common_kwargs)
return None
with np.errstate(over="ignore"):
overall_sum = np.sum(X)
if not np.isfinite(overall_sum):
# Raise a ValueError in case of the presence of an infinite element.
_assert_all_finite_element_wise(X, xp=np, allow_nan=True, **common_kwargs)
# If the sum is not nan, then there are no missing values
if not np.isnan(overall_sum):
return None
missing_values_in_feature_mask = _any_isnan_axis0(X)
return missing_values_in_feature_mask
def _fit(
self,
X,
y,
sample_weight=None,
check_input=True,
missing_values_in_feature_mask=None,
):
random_state = check_random_state(self.random_state)
if check_input:
# Need to validate separately here.
# We can't pass multi_output=True because that would allow y to be
# csr.
# _compute_missing_values_in_feature_mask will check for finite values and
# compute the missing mask if the tree supports missing values
check_X_params = dict(
dtype=DTYPE, accept_sparse="csc", ensure_all_finite=False
)
check_y_params = dict(ensure_2d=False, dtype=None)
X, y = validate_data(
self, X, y, validate_separately=(check_X_params, check_y_params)
)
missing_values_in_feature_mask = (
self._compute_missing_values_in_feature_mask(X)
)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError(
"No support for np.int64 index based sparse matrices"
)
if self.criterion == "poisson":
if np.any(y < 0):
raise ValueError(
"Some value(s) of y are negative which is"
" not allowed for Poisson regression."
)
if np.sum(y) <= 0:
raise ValueError(
"Sum of y is not positive which is "
"necessary for Poisson regression."
)
# Determine output settings
n_samples, self.n_features_in_ = X.shape
is_classification = is_classifier(self)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original
)
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
max_depth = np.iinfo(np.int32).max if self.max_depth is None else self.max_depth
if isinstance(self.min_samples_leaf, numbers.Integral):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = ceil(self.min_samples_leaf * n_samples)
if isinstance(self.min_samples_split, numbers.Integral):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = ceil(self.min_samples_split * n_samples)
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, str):
if self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_in_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_in_)))
elif self.max_features is None:
max_features = self.n_features_in_
elif isinstance(self.max_features, numbers.Integral):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_in_))
else:
max_features = 0
self.max_features_ = max_features
max_leaf_nodes = -1 if self.max_leaf_nodes is None else self.max_leaf_nodes
if len(y) != n_samples:
raise ValueError(
"Number of labels=%d does not match number of samples=%d"
% (len(y), n_samples)
)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if sample_weight is None:
min_weight_leaf = self.min_weight_fraction_leaf * n_samples
else:
min_weight_leaf = self.min_weight_fraction_leaf * np.sum(sample_weight)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](
self.n_outputs_, self.n_classes_
)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_, n_samples)
else:
# Make a deepcopy in case the criterion has mutable attributes that
# might be shared and modified concurrently during parallel fitting
criterion = copy.deepcopy(criterion)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if self.monotonic_cst is None:
monotonic_cst = None
else:
if self.n_outputs_ > 1:
raise ValueError(
"Monotonicity constraints are not supported with multiple outputs."
)
# Check to correct monotonicity constraint' specification,
# by applying element-wise logical conjunction
# Note: we do not cast `np.asarray(self.monotonic_cst, dtype=np.int8)`
# straight away here so as to generate error messages for invalid
# values using the original values prior to any dtype related conversion.
monotonic_cst = np.asarray(self.monotonic_cst)
if monotonic_cst.shape[0] != X.shape[1]:
raise ValueError(
"monotonic_cst has shape {} but the input data "
"X has {} features.".format(monotonic_cst.shape[0], X.shape[1])
)
valid_constraints = np.isin(monotonic_cst, (-1, 0, 1))
if not np.all(valid_constraints):
unique_constaints_value = np.unique(monotonic_cst)
raise ValueError(
"monotonic_cst must be None or an array-like of -1, 0 or 1, but"
f" got {unique_constaints_value}"
)
monotonic_cst = np.asarray(monotonic_cst, dtype=np.int8)
if is_classifier(self):
if self.n_classes_[0] > 2:
raise ValueError(
"Monotonicity constraints are not supported with multiclass "
"classification"
)
# Binary classification trees are built by constraining probabilities
# of the *negative class* in order to make the implementation similar
# to regression trees.
# Since self.monotonic_cst encodes constraints on probabilities of the
# *positive class*, all signs must be flipped.
monotonic_cst *= -1
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](
criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
monotonic_cst,
)
if is_classifier(self):
self.tree_ = Tree(self.n_features_in_, self.n_classes_, self.n_outputs_)
else:
self.tree_ = Tree(
self.n_features_in_,
# TODO: tree shouldn't need this in this case
np.array([1] * self.n_outputs_, dtype=np.intp),
self.n_outputs_,
)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(
splitter,
min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
self.min_impurity_decrease,
)
else:
builder = BestFirstTreeBuilder(
splitter,
min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes,
self.min_impurity_decrease,
)
builder.build(self.tree_, X, y, sample_weight, missing_values_in_feature_mask)
if self.n_outputs_ == 1 and is_classifier(self):
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self._prune_tree()
return self
def _validate_X_predict(self, X, check_input):
"""Validate the training data on predict (probabilities)."""
if check_input:
if self._support_missing_values(X):
ensure_all_finite = "allow-nan"
else:
ensure_all_finite = True
X = validate_data(
self,
X,
dtype=DTYPE,
accept_sparse="csr",
reset=False,
ensure_all_finite=ensure_all_finite,
)
if issparse(X) and (
X.indices.dtype != np.intc or X.indptr.dtype != np.intc
):
raise ValueError("No support for np.int64 index based sparse matrices")
else:
# The number of features is checked regardless of `check_input`
_check_n_features(self, X, reset=False)
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes, or the predict values.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if is_classifier(self):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
class_type = self.classes_[0].dtype
predictions = np.zeros((n_samples, self.n_outputs_), dtype=class_type)
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1), axis=0
)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""Return the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
X_leaves : array-like of shape (n_samples,)
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
check_is_fitted(self)
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree.
.. versionadded:: 0.18
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : bool, default=True
Allow to bypass several input checking.
Don't use this parameter unless you know what you're doing.
Returns
-------
indicator : sparse matrix of shape (n_samples, n_nodes)
Return a node indicator CSR matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
def _prune_tree(self):
"""Prune tree using Minimal Cost-Complexity Pruning."""
check_is_fitted(self)
if self.ccp_alpha == 0.0:
return
# build pruned tree
if is_classifier(self):
n_classes = np.atleast_1d(self.n_classes_)
pruned_tree = Tree(self.n_features_in_, n_classes, self.n_outputs_)
else:
pruned_tree = Tree(
self.n_features_in_,
# TODO: the tree shouldn't need this param
np.array([1] * self.n_outputs_, dtype=np.intp),
self.n_outputs_,
)
_build_pruned_tree_ccp(pruned_tree, self.tree_, self.ccp_alpha)
self.tree_ = pruned_tree
def cost_complexity_pruning_path(self, X, y, sample_weight=None):
"""Compute the pruning path during Minimal Cost-Complexity Pruning.
See :ref:`minimal_cost_complexity_pruning` for details on the pruning
process.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels) as integers or strings.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
Returns
-------
ccp_path : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
ccp_alphas : ndarray
Effective alphas of subtree during pruning.
impurities : ndarray
Sum of the impurities of the subtree leaves for the
corresponding alpha value in ``ccp_alphas``.
"""
est = clone(self).set_params(ccp_alpha=0.0)
est.fit(X, y, sample_weight=sample_weight)
return Bunch(**ccp_pruning_path(est.tree_))
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
Returns
-------
feature_importances_ : ndarray of shape (n_features,)
Normalized total reduction of criteria by feature
(Gini importance).
"""
check_is_fitted(self)
return self.tree_.compute_feature_importances()
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(ClassifierMixin, BaseDecisionTree):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : {"gini", "entropy", "log_loss"}, default="gini"
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "log_loss" and "entropy" both for the
Shannon information gain, see :ref:`tree_mathematical_formulation`.
splitter : {"best", "random"}, default="best"
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float or {"sqrt", "log2"}, default=None
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`max(1, int(max_features * n_features_in_))` features are considered at
each split.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
.. note::
The search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the estimator. The features are always
randomly permuted at each split, even if ``splitter`` is set to
``"best"``. When ``max_features < n_features``, the algorithm will
select ``max_features`` at random at each split before finding the best
split among them. But the best found split may vary across different
runs, even if ``max_features=n_features``. That is the case, if the
improvement of the criterion is identical for several splits and one
split has to be selected at random. To obtain a deterministic behaviour
during fitting, ``random_state`` has to be fixed to an integer.
See :term:`Glossary <random_state>` for details.
max_leaf_nodes : int, default=None
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
class_weight : dict, list of dict or "balanced", default=None
Weights associated with classes in the form ``{class_label: weight}``.
If None, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details. See
:ref:`sphx_glr_auto_examples_tree_plot_cost_complexity_pruning.py`
for an example of such pruning.
.. versionadded:: 0.22
monotonic_cst : array-like of int of shape (n_features), default=None
Indicates the monotonicity constraint to enforce on each feature.
- 1: monotonic increase
- 0: no constraint
- -1: monotonic decrease
If monotonic_cst is None, no constraints are applied.
Monotonicity constraints are not supported for:
- multiclass classifications (i.e. when `n_classes > 2`),
- multioutput classifications (i.e. when `n_outputs_ > 1`),
- classifications trained on data with missing values.
The constraints hold over the probability of the positive class.
Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
.. versionadded:: 1.4
Attributes
----------
classes_ : ndarray of shape (n_classes,) or list of ndarray
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tree/_reingold_tilford.py | sklearn/tree/_reingold_tilford.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
class DrawTree:
def __init__(self, tree, parent=None, depth=0, number=1):
self.x = -1.0
self.y = depth
self.tree = tree
self.children = [
DrawTree(c, self, depth + 1, i + 1) for i, c in enumerate(tree.children)
]
self.parent = parent
self.thread = None
self.mod = 0
self.ancestor = self
self.change = self.shift = 0
self._lmost_sibling = None
# this is the number of the node in its group of siblings 1..n
self.number = number
def left(self):
return self.thread or (len(self.children) and self.children[0])
def right(self):
return self.thread or (len(self.children) and self.children[-1])
def lbrother(self):
n = None
if self.parent:
for node in self.parent.children:
if node == self:
return n
else:
n = node
return n
def get_lmost_sibling(self):
if not self._lmost_sibling and self.parent and self != self.parent.children[0]:
self._lmost_sibling = self.parent.children[0]
return self._lmost_sibling
lmost_sibling = property(get_lmost_sibling)
def __str__(self):
return "%s: x=%s mod=%s" % (self.tree, self.x, self.mod)
def __repr__(self):
return self.__str__()
def max_extents(self):
extents = [c.max_extents() for c in self.children]
extents.append((self.x, self.y))
return np.max(extents, axis=0)
def buchheim(tree):
dt = first_walk(DrawTree(tree))
min = second_walk(dt)
if min < 0:
third_walk(dt, -min)
return dt
def third_walk(tree, n):
tree.x += n
for c in tree.children:
third_walk(c, n)
def first_walk(v, distance=1.0):
if len(v.children) == 0:
if v.lmost_sibling:
v.x = v.lbrother().x + distance
else:
v.x = 0.0
else:
default_ancestor = v.children[0]
for w in v.children:
first_walk(w)
default_ancestor = apportion(w, default_ancestor, distance)
# print("finished v =", v.tree, "children")
execute_shifts(v)
midpoint = (v.children[0].x + v.children[-1].x) / 2
w = v.lbrother()
if w:
v.x = w.x + distance
v.mod = v.x - midpoint
else:
v.x = midpoint
return v
def apportion(v, default_ancestor, distance):
w = v.lbrother()
if w is not None:
# in buchheim notation:
# i == inner; o == outer; r == right; l == left; r = +; l = -
vir = vor = v
vil = w
vol = v.lmost_sibling
sir = sor = v.mod
sil = vil.mod
sol = vol.mod
while vil.right() and vir.left():
vil = vil.right()
vir = vir.left()
vol = vol.left()
vor = vor.right()
vor.ancestor = v
shift = (vil.x + sil) - (vir.x + sir) + distance
if shift > 0:
move_subtree(ancestor(vil, v, default_ancestor), v, shift)
sir = sir + shift
sor = sor + shift
sil += vil.mod
sir += vir.mod
sol += vol.mod
sor += vor.mod
if vil.right() and not vor.right():
vor.thread = vil.right()
vor.mod += sil - sor
else:
if vir.left() and not vol.left():
vol.thread = vir.left()
vol.mod += sir - sol
default_ancestor = v
return default_ancestor
def move_subtree(wl, wr, shift):
subtrees = wr.number - wl.number
# print(wl.tree, "is conflicted with", wr.tree, 'moving', subtrees,
# 'shift', shift)
# print wl, wr, wr.number, wl.number, shift, subtrees, shift/subtrees
wr.change -= shift / subtrees
wr.shift += shift
wl.change += shift / subtrees
wr.x += shift
wr.mod += shift
def execute_shifts(v):
shift = change = 0
for w in v.children[::-1]:
# print("shift:", w, shift, w.change)
w.x += shift
w.mod += shift
change += w.change
shift += w.shift + change
def ancestor(vil, v, default_ancestor):
# the relevant text is at the bottom of page 7 of
# "Improving Walker's Algorithm to Run in Linear Time" by Buchheim et al,
# (2002)
# https://citeseerx.ist.psu.edu/doc_view/pid/1f41c3c2a4880dc49238e46d555f16d28da2940d
if vil.ancestor in v.parent.children:
return vil.ancestor
else:
return default_ancestor
def second_walk(v, m=0, depth=0, min=None):
v.x += m
v.y = depth
if min is None or v.x < min:
min = v.x
for w in v.children:
min = second_walk(w, m + v.mod, depth + 1, min)
return min
class Tree:
def __init__(self, label="", node_id=-1, *children):
self.label = label
self.node_id = node_id
if children:
self.children = children
else:
self.children = []
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tree/_export.py | sklearn/tree/_export.py | """
This module defines export functions for decision trees.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from collections.abc import Iterable
from io import StringIO
from numbers import Integral
import numpy as np
from sklearn.base import is_classifier
from sklearn.tree import (
DecisionTreeClassifier,
DecisionTreeRegressor,
_criterion,
_tree,
)
from sklearn.tree._reingold_tilford import Tree, buchheim
from sklearn.utils._param_validation import (
HasMethods,
Interval,
StrOptions,
validate_params,
)
from sklearn.utils.validation import check_array, check_is_fitted
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360.0 / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.0
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [
(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0),
]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))), (int(255 * (g + m))), (int(255 * (b + m)))]
color_list.append(rgb)
return color_list
class Sentinel:
def __repr__(self):
return '"tree.dot"'
SENTINEL = Sentinel()
@validate_params(
{
"decision_tree": [DecisionTreeClassifier, DecisionTreeRegressor],
"max_depth": [Interval(Integral, 0, None, closed="left"), None],
"feature_names": ["array-like", None],
"class_names": ["array-like", "boolean", None],
"label": [StrOptions({"all", "root", "none"})],
"filled": ["boolean"],
"impurity": ["boolean"],
"node_ids": ["boolean"],
"proportion": ["boolean"],
"rounded": ["boolean"],
"precision": [Interval(Integral, 0, None, closed="left"), None],
"ax": "no_validation", # delegate validation to matplotlib
"fontsize": [Interval(Integral, 0, None, closed="left"), None],
},
prefer_skip_nested_validation=True,
)
def plot_tree(
decision_tree,
*,
max_depth=None,
feature_names=None,
class_names=None,
label="all",
filled=False,
impurity=True,
node_ids=False,
proportion=False,
rounded=False,
precision=3,
ax=None,
fontsize=None,
):
"""Plot a decision tree.
The sample counts that are shown are weighted with any sample_weights that
might be present.
The visualization is fit automatically to the size of the axis.
Use the ``figsize`` or ``dpi`` arguments of ``plt.figure`` to control
the size of the rendering.
Read more in the :ref:`User Guide <tree>`.
.. versionadded:: 0.21
Parameters
----------
decision_tree : decision tree regressor or classifier
The decision tree to be plotted.
max_depth : int, default=None
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : array-like of str, default=None
Names of each of the features.
If None, generic names will be used ("x[0]", "x[1]", ...).
class_names : array-like of str or True, default=None
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, default='all'
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, default=False
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
impurity : bool, default=True
When set to ``True``, show the impurity at each node.
node_ids : bool, default=False
When set to ``True``, show the ID number on each node.
proportion : bool, default=False
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rounded : bool, default=False
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
precision : int, default=3
Number of digits of precision for floating point in the values of
impurity, threshold and value attributes of each node.
ax : matplotlib axis, default=None
Axes to plot to. If None, use current axis. Any previous content
is cleared.
fontsize : int, default=None
Size of text font. If None, determined automatically to fit figure.
Returns
-------
annotations : list of artists
List containing the artists for the annotation boxes making up the
tree.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.plot_tree(clf)
[...]
"""
check_is_fitted(decision_tree)
exporter = _MPLTreeExporter(
max_depth=max_depth,
feature_names=feature_names,
class_names=class_names,
label=label,
filled=filled,
impurity=impurity,
node_ids=node_ids,
proportion=proportion,
rounded=rounded,
precision=precision,
fontsize=fontsize,
)
return exporter.export(decision_tree, ax=ax)
class _BaseTreeExporter:
def __init__(
self,
max_depth=None,
feature_names=None,
class_names=None,
label="all",
filled=False,
impurity=True,
node_ids=False,
proportion=False,
rounded=False,
precision=3,
fontsize=None,
):
self.max_depth = max_depth
self.feature_names = feature_names
self.class_names = class_names
self.label = label
self.filled = filled
self.impurity = impurity
self.node_ids = node_ids
self.proportion = proportion
self.rounded = rounded
self.precision = precision
self.fontsize = fontsize
def get_color(self, value):
# Find the appropriate color & intensity for a node
if self.colors["bounds"] is None:
# Classification tree
color = list(self.colors["rgb"][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0.0
else:
alpha = (sorted_values[0] - sorted_values[1]) / (1 - sorted_values[1])
else:
# Regression tree or multi-output
color = list(self.colors["rgb"][0])
alpha = (value - self.colors["bounds"][0]) / (
self.colors["bounds"][1] - self.colors["bounds"][0]
)
# compute the color as alpha against white
color = [int(round(alpha * c + (1 - alpha) * 255, 0)) for c in color]
# Return html color code in #RRGGBB format
return "#%2x%2x%2x" % tuple(color)
def get_fill_color(self, tree, node_id):
# Fetch appropriate color for node
if "rgb" not in self.colors:
# Initialize colors and bounds if required
self.colors["rgb"] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
# The next line uses -max(impurity) instead of min(-impurity)
# and -min(impurity) instead of max(-impurity) on purpose, in
# order to avoid what looks like an issue with SIMD on non
# memory aligned arrays on 32bit OS. For more details see
# https://github.com/scikit-learn/scikit-learn/issues/27506.
self.colors["bounds"] = (-np.max(tree.impurity), -np.min(tree.impurity))
elif tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1:
# Find max and min values in leaf nodes for regression
self.colors["bounds"] = (np.min(tree.value), np.max(tree.value))
if tree.n_outputs == 1:
node_val = tree.value[node_id][0, :]
if (
tree.n_classes[0] == 1
and isinstance(node_val, Iterable)
and self.colors["bounds"] is not None
):
# Unpack the float only for the regression tree case.
# Classification tree requires an Iterable in `get_color`.
node_val = node_val.item()
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
return self.get_color(node_val)
def node_to_str(self, tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (self.label == "root" and node_id == 0) or self.label == "all"
characters = self.characters
node_string = characters[-1]
# Write node ID
if self.node_ids:
if labels:
node_string += "node "
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if self.feature_names is not None:
feature = self.feature_names[tree.feature[node_id]]
feature = self.str_escape(feature)
else:
feature = "x%s%s%s" % (
characters[1],
tree.feature[node_id],
characters[2],
)
node_string += "%s %s %s%s" % (
feature,
characters[3],
round(tree.threshold[node_id], self.precision),
characters[4],
)
# Write impurity
if self.impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif isinstance(criterion, _criterion.MSE) or criterion == "squared_error":
criterion = "squared_error"
elif not isinstance(criterion, str):
criterion = "impurity"
if labels:
node_string += "%s = " % criterion
node_string += (
str(round(tree.impurity[node_id], self.precision)) + characters[4]
)
# Write node sample count
if labels:
node_string += "samples = "
if self.proportion:
percent = (
100.0 * tree.n_node_samples[node_id] / float(tree.n_node_samples[0])
)
node_string += str(round(percent, 1)) + "%" + characters[4]
else:
node_string += str(tree.n_node_samples[node_id]) + characters[4]
# Write node class distribution / regression value
if not self.proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value * tree.weighted_n_node_samples[node_id]
if labels:
node_string += "value = "
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, self.precision)
elif self.proportion:
# Classification
value_text = np.around(value, self.precision)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, self.precision)
# Strip whitespace
value_text = str(value_text.astype("S32")).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (
self.class_names is not None
and tree.n_classes[0] != 1
and tree.n_outputs == 1
):
# Only done for single-output classification trees
if labels:
node_string += "class = "
if self.class_names is not True:
class_name = self.class_names[np.argmax(value)]
class_name = self.str_escape(class_name)
else:
class_name = "y%s%s%s" % (
characters[1],
np.argmax(value),
characters[2],
)
node_string += class_name
# Clean up any trailing newlines
if node_string.endswith(characters[4]):
node_string = node_string[: -len(characters[4])]
return node_string + characters[5]
def str_escape(self, string):
return string
class _DOTTreeExporter(_BaseTreeExporter):
def __init__(
self,
out_file=SENTINEL,
max_depth=None,
feature_names=None,
class_names=None,
label="all",
filled=False,
leaves_parallel=False,
impurity=True,
node_ids=False,
proportion=False,
rotate=False,
rounded=False,
special_characters=False,
precision=3,
fontname="helvetica",
):
super().__init__(
max_depth=max_depth,
feature_names=feature_names,
class_names=class_names,
label=label,
filled=filled,
impurity=impurity,
node_ids=node_ids,
proportion=proportion,
rounded=rounded,
precision=precision,
)
self.leaves_parallel = leaves_parallel
self.out_file = out_file
self.special_characters = special_characters
self.fontname = fontname
self.rotate = rotate
# PostScript compatibility for special characters
if special_characters:
self.characters = ["#", "<SUB>", "</SUB>", "≤", "<br/>", ">", "<"]
else:
self.characters = ["#", "[", "]", "<=", "\\n", '"', '"']
# The depth of each node for plotting with 'leaf' option
self.ranks = {"leaves": []}
# The colors to render each node with
self.colors = {"bounds": None}
def export(self, decision_tree):
# Check length of feature_names before getting into the tree node
# Raise error if length of feature_names does not match
# n_features_in_ in the decision_tree
if self.feature_names is not None:
if len(self.feature_names) != decision_tree.n_features_in_:
raise ValueError(
"Length of feature_names, %d does not match number of features, %d"
% (len(self.feature_names), decision_tree.n_features_in_)
)
# each part writes to out_file
self.head()
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
self.recurse(decision_tree, 0, criterion="impurity")
else:
self.recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
self.tail()
def tail(self):
# If required, draw leaf nodes at same depth as each other
if self.leaves_parallel:
for rank in sorted(self.ranks):
self.out_file.write(
"{rank=same ; " + "; ".join(r for r in self.ranks[rank]) + "} ;\n"
)
self.out_file.write("}")
def head(self):
self.out_file.write("digraph Tree {\n")
# Specify node aesthetics
self.out_file.write("node [shape=box")
rounded_filled = []
if self.filled:
rounded_filled.append("filled")
if self.rounded:
rounded_filled.append("rounded")
if len(rounded_filled) > 0:
self.out_file.write(
', style="%s", color="black"' % ", ".join(rounded_filled)
)
self.out_file.write(', fontname="%s"' % self.fontname)
self.out_file.write("] ;\n")
# Specify graph & edge aesthetics
if self.leaves_parallel:
self.out_file.write("graph [ranksep=equally, splines=polyline] ;\n")
self.out_file.write('edge [fontname="%s"] ;\n' % self.fontname)
if self.rotate:
self.out_file.write("rankdir=LR ;\n")
def recurse(self, tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if self.max_depth is None or depth <= self.max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
self.ranks["leaves"].append(str(node_id))
elif str(depth) not in self.ranks:
self.ranks[str(depth)] = [str(node_id)]
else:
self.ranks[str(depth)].append(str(node_id))
self.out_file.write(
"%d [label=%s" % (node_id, self.node_to_str(tree, node_id, criterion))
)
if self.filled:
self.out_file.write(
', fillcolor="%s"' % self.get_fill_color(tree, node_id)
)
self.out_file.write("] ;\n")
if parent is not None:
# Add edge to parent
self.out_file.write("%d -> %d" % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((self.rotate - 0.5) * -2)
self.out_file.write(" [labeldistance=2.5, labelangle=")
if node_id == 1:
self.out_file.write('%d, headlabel="True"]' % angles[0])
else:
self.out_file.write('%d, headlabel="False"]' % angles[1])
self.out_file.write(" ;\n")
if left_child != _tree.TREE_LEAF:
self.recurse(
tree,
left_child,
criterion=criterion,
parent=node_id,
depth=depth + 1,
)
self.recurse(
tree,
right_child,
criterion=criterion,
parent=node_id,
depth=depth + 1,
)
else:
self.ranks["leaves"].append(str(node_id))
self.out_file.write('%d [label="(...)"' % node_id)
if self.filled:
# color cropped nodes grey
self.out_file.write(', fillcolor="#C0C0C0"')
self.out_file.write("] ;\n" % node_id)
if parent is not None:
# Add edge to parent
self.out_file.write("%d -> %d ;\n" % (parent, node_id))
def str_escape(self, string):
# override default escaping for graphviz
return string.replace('"', r"\"")
class _MPLTreeExporter(_BaseTreeExporter):
def __init__(
self,
max_depth=None,
feature_names=None,
class_names=None,
label="all",
filled=False,
impurity=True,
node_ids=False,
proportion=False,
rounded=False,
precision=3,
fontsize=None,
):
super().__init__(
max_depth=max_depth,
feature_names=feature_names,
class_names=class_names,
label=label,
filled=filled,
impurity=impurity,
node_ids=node_ids,
proportion=proportion,
rounded=rounded,
precision=precision,
)
self.fontsize = fontsize
# The depth of each node for plotting with 'leaf' option
self.ranks = {"leaves": []}
# The colors to render each node with
self.colors = {"bounds": None}
self.characters = ["#", "[", "]", "<=", "\n", "", ""]
self.bbox_args = dict()
if self.rounded:
self.bbox_args["boxstyle"] = "round"
self.arrow_args = dict(arrowstyle="<-")
def _make_tree(self, node_id, et, criterion, depth=0):
# traverses _tree.Tree recursively, builds intermediate
# "_reingold_tilford.Tree" object
name = self.node_to_str(et, node_id, criterion=criterion)
if et.children_left[node_id] != _tree.TREE_LEAF and (
self.max_depth is None or depth <= self.max_depth
):
children = [
self._make_tree(
et.children_left[node_id], et, criterion, depth=depth + 1
),
self._make_tree(
et.children_right[node_id], et, criterion, depth=depth + 1
),
]
else:
return Tree(name, node_id)
return Tree(name, node_id, *children)
def export(self, decision_tree, ax=None):
import matplotlib.pyplot as plt
from matplotlib.text import Annotation
if ax is None:
ax = plt.gca()
ax.clear()
ax.set_axis_off()
my_tree = self._make_tree(0, decision_tree.tree_, decision_tree.criterion)
draw_tree = buchheim(my_tree)
# important to make sure we're still
# inside the axis after drawing the box
# this makes sense because the width of a box
# is about the same as the distance between boxes
max_x, max_y = draw_tree.max_extents() + 1
ax_width = ax.get_window_extent().width
ax_height = ax.get_window_extent().height
scale_x = ax_width / max_x
scale_y = ax_height / max_y
self.recurse(draw_tree, decision_tree.tree_, ax, max_x, max_y)
anns = [ann for ann in ax.get_children() if isinstance(ann, Annotation)]
# update sizes of all bboxes
renderer = ax.figure.canvas.get_renderer()
for ann in anns:
ann.update_bbox_position_size(renderer)
if self.fontsize is None:
# get figure to data transform
# adjust fontsize to avoid overlap
# get max box width and height
extents = [
bbox_patch.get_window_extent()
for ann in anns
if (bbox_patch := ann.get_bbox_patch()) is not None
]
max_width = max([extent.width for extent in extents])
max_height = max([extent.height for extent in extents])
# width should be around scale_x in axis coordinates
size = anns[0].get_fontsize() * min(
scale_x / max_width, scale_y / max_height
)
for ann in anns:
ann.set_fontsize(size)
return anns
def recurse(self, node, tree, ax, max_x, max_y, depth=0):
import matplotlib.pyplot as plt
# kwargs for annotations without a bounding box
common_kwargs = dict(
zorder=100 - 10 * depth,
xycoords="axes fraction",
)
if self.fontsize is not None:
common_kwargs["fontsize"] = self.fontsize
# kwargs for annotations with a bounding box
kwargs = dict(
ha="center",
va="center",
bbox=self.bbox_args.copy(),
arrowprops=self.arrow_args.copy(),
**common_kwargs,
)
kwargs["arrowprops"]["edgecolor"] = plt.rcParams["text.color"]
# offset things by .5 to center them in plot
xy = ((node.x + 0.5) / max_x, (max_y - node.y - 0.5) / max_y)
if self.max_depth is None or depth <= self.max_depth:
if self.filled:
kwargs["bbox"]["fc"] = self.get_fill_color(tree, node.tree.node_id)
else:
kwargs["bbox"]["fc"] = ax.get_facecolor()
if node.parent is None:
# root
ax.annotate(node.tree.label, xy, **kwargs)
else:
xy_parent = (
(node.parent.x + 0.5) / max_x,
(max_y - node.parent.y - 0.5) / max_y,
)
ax.annotate(node.tree.label, xy_parent, xy, **kwargs)
# Draw True/False labels if parent is root node
if node.parent.parent is None:
# Adjust the position for the text to be slightly above the arrow
text_pos = (
(xy_parent[0] + xy[0]) / 2,
(xy_parent[1] + xy[1]) / 2,
)
# Annotate the arrow with the edge label to indicate the child
# where the sample-split condition is satisfied
if node.parent.left() == node:
label_text, label_ha = ("True ", "right")
else:
label_text, label_ha = (" False", "left")
ax.annotate(label_text, text_pos, ha=label_ha, **common_kwargs)
for child in node.children:
self.recurse(child, tree, ax, max_x, max_y, depth=depth + 1)
else:
xy_parent = (
(node.parent.x + 0.5) / max_x,
(max_y - node.parent.y - 0.5) / max_y,
)
kwargs["bbox"]["fc"] = "grey"
ax.annotate("\n (...) \n", xy_parent, xy, **kwargs)
@validate_params(
{
"decision_tree": "no_validation",
"out_file": [str, None, HasMethods("write")],
"max_depth": [Interval(Integral, 0, None, closed="left"), None],
"feature_names": ["array-like", None],
"class_names": ["array-like", "boolean", None],
"label": [StrOptions({"all", "root", "none"})],
"filled": ["boolean"],
"leaves_parallel": ["boolean"],
"impurity": ["boolean"],
"node_ids": ["boolean"],
"proportion": ["boolean"],
"rotate": ["boolean"],
"rounded": ["boolean"],
"special_characters": ["boolean"],
"precision": [Interval(Integral, 0, None, closed="left"), None],
"fontname": [str],
},
prefer_skip_nested_validation=True,
)
def export_graphviz(
decision_tree,
out_file=None,
*,
max_depth=None,
feature_names=None,
class_names=None,
label="all",
filled=False,
leaves_parallel=False,
impurity=True,
node_ids=False,
proportion=False,
rotate=False,
rounded=False,
special_characters=False,
precision=3,
fontname="helvetica",
):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : object
The decision tree estimator to be exported to GraphViz.
out_file : object or str, default=None
Handle or name of the output file. If ``None``, the result is
returned as a string.
.. versionchanged:: 0.20
Default of out_file changed from "tree.dot" to None.
max_depth : int, default=None
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : array-like of shape (n_features,), default=None
An array containing the feature names.
If None, generic names will be used ("x[0]", "x[1]", ...).
class_names : array-like of shape (n_classes,) or bool, default=None
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, default='all'
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, default=False
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, default=False
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, default=True
When set to ``True``, show the impurity at each node.
node_ids : bool, default=False
When set to ``True``, show the ID number on each node.
proportion : bool, default=False
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, default=False
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, default=False
When set to ``True``, draw node boxes with rounded corners.
special_characters : bool, default=False
When set to ``False``, ignore special characters for PostScript
compatibility.
precision : int, default=3
Number of digits of precision for floating point in the values of
impurity, threshold and value attributes of each node.
fontname : str, default='helvetica'
Name of font used to render text.
Returns
-------
dot_data : str
String representation of the input tree in GraphViz dot format.
Only returned if ``out_file`` is None.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf)
'digraph Tree {...
"""
if feature_names is not None:
if any((not isinstance(name, str) for name in feature_names)):
raise ValueError("All feature names must be strings.")
feature_names = check_array(
feature_names, ensure_2d=False, dtype=None, ensure_min_samples=0
)
if class_names is not None and not isinstance(class_names, bool):
class_names = check_array(
class_names, ensure_2d=False, dtype=None, ensure_min_samples=0
)
check_is_fitted(decision_tree)
own_file = False
return_string = False
try:
if isinstance(out_file, str):
out_file = open(out_file, "w", encoding="utf-8")
own_file = True
if out_file is None:
return_string = True
out_file = StringIO()
exporter = _DOTTreeExporter(
out_file=out_file,
max_depth=max_depth,
feature_names=feature_names,
class_names=class_names,
label=label,
filled=filled,
leaves_parallel=leaves_parallel,
impurity=impurity,
node_ids=node_ids,
proportion=proportion,
rotate=rotate,
rounded=rounded,
special_characters=special_characters,
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tree/tests/test_export.py | sklearn/tree/tests/test_export.py | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from io import StringIO
from re import finditer, search
from textwrap import dedent
import numpy as np
import pytest
from numpy.random import RandomState
from sklearn.base import is_classifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.exceptions import NotFittedError
from sklearn.tree import (
DecisionTreeClassifier,
DecisionTreeRegressor,
export_graphviz,
export_text,
plot_tree,
)
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 1], [-1, 1], [1, 2], [1, 2], [1, 3]]
w = [1, 1, 1, 0.5, 0.5, 0.5]
y_degraded = [1, 1, 1, 1, 1, 1]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(
max_depth=3, min_samples_split=2, criterion="gini", random_state=2
)
clf.fit(X, y)
# Test export code
contents1 = export_graphviz(clf, out_file=None)
contents2 = (
"digraph Tree {\n"
'node [shape=box, fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="x[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n'
'value = [3, 3]"] ;\n'
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
"}"
)
assert contents1 == contents2
# Test with feature_names
contents1 = export_graphviz(
clf, feature_names=["feature0", "feature1"], out_file=None
)
contents2 = (
"digraph Tree {\n"
'node [shape=box, fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n'
'value = [3, 3]"] ;\n'
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
"}"
)
assert contents1 == contents2
# Test with feature_names (escaped)
contents1 = export_graphviz(
clf, feature_names=['feature"0"', 'feature"1"'], out_file=None
)
contents2 = (
"digraph Tree {\n"
'node [shape=box, fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="feature\\"0\\" <= 0.0\\n'
"gini = 0.5\\nsamples = 6\\n"
'value = [3, 3]"] ;\n'
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
"}"
)
assert contents1 == contents2
# Test with class_names
contents1 = export_graphviz(clf, class_names=["yes", "no"], out_file=None)
contents2 = (
"digraph Tree {\n"
'node [shape=box, fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="x[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n'
'value = [3, 3]\\nclass = yes"] ;\n'
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n'
'class = yes"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n'
'class = no"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
"}"
)
assert contents1 == contents2
# Test with class_names (escaped)
contents1 = export_graphviz(clf, class_names=['"yes"', '"no"'], out_file=None)
contents2 = (
"digraph Tree {\n"
'node [shape=box, fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="x[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n'
'value = [3, 3]\\nclass = \\"yes\\""] ;\n'
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n'
'class = \\"yes\\""] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n'
'class = \\"no\\""] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
"}"
)
assert contents1 == contents2
# Test plot_options
contents1 = export_graphviz(
clf,
filled=True,
impurity=False,
proportion=True,
special_characters=True,
rounded=True,
out_file=None,
fontname="sans",
)
contents2 = (
"digraph Tree {\n"
'node [shape=box, style="filled, rounded", color="black", '
'fontname="sans"] ;\n'
'edge [fontname="sans"] ;\n'
"0 [label=<x<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>"
'value = [0.5, 0.5]>, fillcolor="#ffffff"] ;\n'
"1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, "
'fillcolor="#e58139"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
"2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, "
'fillcolor="#399de5"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
"}"
)
assert contents1 == contents2
# Test max_depth
contents1 = export_graphviz(clf, max_depth=0, class_names=True, out_file=None)
contents2 = (
"digraph Tree {\n"
'node [shape=box, fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="x[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n'
'value = [3, 3]\\nclass = y[0]"] ;\n'
'1 [label="(...)"] ;\n'
"0 -> 1 ;\n"
'2 [label="(...)"] ;\n'
"0 -> 2 ;\n"
"}"
)
assert contents1 == contents2
# Test max_depth with plot_options
contents1 = export_graphviz(
clf, max_depth=0, filled=True, out_file=None, node_ids=True
)
contents2 = (
"digraph Tree {\n"
'node [shape=box, style="filled", color="black", '
'fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="node #0\\nx[0] <= 0.0\\ngini = 0.5\\n'
'samples = 6\\nvalue = [3, 3]", fillcolor="#ffffff"] ;\n'
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n'
"0 -> 1 ;\n"
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n'
"0 -> 2 ;\n"
"}"
)
assert contents1 == contents2
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(
max_depth=2, min_samples_split=2, criterion="gini", random_state=2
)
clf = clf.fit(X, y2, sample_weight=w)
contents1 = export_graphviz(clf, filled=True, impurity=False, out_file=None)
contents2 = (
"digraph Tree {\n"
'node [shape=box, style="filled", color="black", '
'fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="x[0] <= 0.0\\nsamples = 6\\n'
"value = [[3.0, 1.5, 0.0]\\n"
'[3.0, 1.0, 0.5]]", fillcolor="#ffffff"] ;\n'
'1 [label="samples = 3\\nvalue = [[3, 0, 0]\\n'
'[3, 0, 0]]", fillcolor="#e58139"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
'2 [label="x[0] <= 1.5\\nsamples = 3\\n'
"value = [[0.0, 1.5, 0.0]\\n"
'[0.0, 1.0, 0.5]]", fillcolor="#f1bd97"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
'3 [label="samples = 2\\nvalue = [[0, 1, 0]\\n'
'[0, 1, 0]]", fillcolor="#e58139"] ;\n'
"2 -> 3 ;\n"
'4 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n'
'[0.0, 0.0, 0.5]]", fillcolor="#e58139"] ;\n'
"2 -> 4 ;\n"
"}"
)
assert contents1 == contents2
# Test regression output with plot_options
clf = DecisionTreeRegressor(
max_depth=3, min_samples_split=2, criterion="squared_error", random_state=2
)
clf.fit(X, y)
contents1 = export_graphviz(
clf,
filled=True,
leaves_parallel=True,
out_file=None,
rotate=True,
rounded=True,
fontname="sans",
)
contents2 = (
"digraph Tree {\n"
'node [shape=box, style="filled, rounded", color="black", '
'fontname="sans"] ;\n'
"graph [ranksep=equally, splines=polyline] ;\n"
'edge [fontname="sans"] ;\n'
"rankdir=LR ;\n"
'0 [label="x[0] <= 0.0\\nsquared_error = 1.0\\nsamples = 6\\n'
'value = 0.0", fillcolor="#f2c09c"] ;\n'
'1 [label="squared_error = 0.0\\nsamples = 3\\'
'nvalue = -1.0", '
'fillcolor="#ffffff"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=-45, "
'headlabel="True"] ;\n'
'2 [label="squared_error = 0.0\\nsamples = 3\\nvalue = 1.0", '
'fillcolor="#e58139"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=45, "
'headlabel="False"] ;\n'
"{rank=same ; 0} ;\n"
"{rank=same ; 1; 2} ;\n"
"}"
)
assert contents1 == contents2
# Test classifier with degraded learning set
clf = DecisionTreeClassifier(max_depth=3)
clf.fit(X, y_degraded)
contents1 = export_graphviz(clf, filled=True, out_file=None)
contents2 = (
"digraph Tree {\n"
'node [shape=box, style="filled", color="black", '
'fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="gini = 0.0\\nsamples = 6\\nvalue = 6.0", '
'fillcolor="#ffffff"] ;\n'
"}"
)
@pytest.mark.parametrize("constructor", [list, np.array])
def test_graphviz_feature_class_names_array_support(constructor):
# Check that export_graphviz treats feature names
# and class names correctly and supports arrays
clf = DecisionTreeClassifier(
max_depth=3, min_samples_split=2, criterion="gini", random_state=2
)
clf.fit(X, y)
# Test with feature_names
contents1 = export_graphviz(
clf, feature_names=constructor(["feature0", "feature1"]), out_file=None
)
contents2 = (
"digraph Tree {\n"
'node [shape=box, fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n'
'value = [3, 3]"] ;\n'
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
"}"
)
assert contents1 == contents2
# Test with class_names
contents1 = export_graphviz(
clf, class_names=constructor(["yes", "no"]), out_file=None
)
contents2 = (
"digraph Tree {\n"
'node [shape=box, fontname="helvetica"] ;\n'
'edge [fontname="helvetica"] ;\n'
'0 [label="x[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n'
'value = [3, 3]\\nclass = yes"] ;\n'
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n'
'class = yes"] ;\n'
"0 -> 1 [labeldistance=2.5, labelangle=45, "
'headlabel="True"] ;\n'
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n'
'class = no"] ;\n'
"0 -> 2 [labeldistance=2.5, labelangle=-45, "
'headlabel="False"] ;\n'
"}"
)
assert contents1 == contents2
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=2)
# Check not-fitted decision tree error
out = StringIO()
with pytest.raises(NotFittedError):
export_graphviz(clf, out)
clf.fit(X, y)
# Check if it errors when length of feature_names
# mismatches with number of features
message = "Length of feature_names, 1 does not match number of features, 2"
with pytest.raises(ValueError, match=message):
export_graphviz(clf, None, feature_names=["a"])
message = "Length of feature_names, 3 does not match number of features, 2"
with pytest.raises(ValueError, match=message):
export_graphviz(clf, None, feature_names=["a", "b", "c"])
# Check error when feature_names contains non-string elements
message = "All feature names must be strings."
with pytest.raises(ValueError, match=message):
export_graphviz(clf, None, feature_names=["a", 1])
# Check error when argument is not an estimator
message = "is not an estimator instance"
with pytest.raises(TypeError, match=message):
export_graphviz(clf.fit(X, y).tree_)
# Check class_names error
out = StringIO()
with pytest.raises(IndexError):
export_graphviz(clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer(r"\[.*?samples.*?\]", dot_data.getvalue()):
assert "friedman_mse" in finding.group()
def test_precision():
rng_reg = RandomState(2)
rng_clf = RandomState(8)
for X, y, clf in zip(
(rng_reg.random_sample((5, 2)), rng_clf.random_sample((1000, 4))),
(rng_reg.random_sample((5,)), rng_clf.randint(2, size=(1000,))),
(
DecisionTreeRegressor(
criterion="friedman_mse", random_state=0, max_depth=1
),
DecisionTreeClassifier(max_depth=1, random_state=0),
),
):
clf.fit(X, y)
for precision in (4, 3):
dot_data = export_graphviz(
clf, out_file=None, precision=precision, proportion=True
)
# With the current random state, the impurity and the threshold
# will have the number of precision set in the export_graphviz
# function. We will check the number of precision with a strict
# equality. The value reported will have only 2 precision and
# therefore, only a less equal comparison will be done.
# check value
for finding in finditer(r"value = \d+\.\d+", dot_data):
assert len(search(r"\.\d+", finding.group()).group()) <= precision + 1
# check impurity
if is_classifier(clf):
pattern = r"gini = \d+\.\d+"
else:
pattern = r"friedman_mse = \d+\.\d+"
# check impurity
for finding in finditer(pattern, dot_data):
assert len(search(r"\.\d+", finding.group()).group()) == precision + 1
# check threshold
for finding in finditer(r"<= \d+\.\d+", dot_data):
assert len(search(r"\.\d+", finding.group()).group()) == precision + 1
def test_export_text_errors():
clf = DecisionTreeClassifier(max_depth=2, random_state=0)
clf.fit(X, y)
err_msg = "feature_names must contain 2 elements, got 1"
with pytest.raises(ValueError, match=err_msg):
export_text(clf, feature_names=["a"])
err_msg = (
"When `class_names` is an array, it should contain as"
" many items as `decision_tree.classes_`. Got 1 while"
" the tree was fitted with 2 classes."
)
with pytest.raises(ValueError, match=err_msg):
export_text(clf, class_names=["a"])
def test_export_text():
clf = DecisionTreeClassifier(max_depth=2, random_state=0)
clf.fit(X, y)
expected_report = dedent(
"""
|--- feature_1 <= 0.00
| |--- class: -1
|--- feature_1 > 0.00
| |--- class: 1
"""
).lstrip()
assert export_text(clf) == expected_report
# testing that leaves at level 1 are not truncated
assert export_text(clf, max_depth=0) == expected_report
# testing that the rest of the tree is truncated
assert export_text(clf, max_depth=10) == expected_report
expected_report = dedent(
"""
|--- feature_1 <= 0.00
| |--- weights: [3.00, 0.00] class: -1
|--- feature_1 > 0.00
| |--- weights: [0.00, 3.00] class: 1
"""
).lstrip()
assert export_text(clf, show_weights=True) == expected_report
expected_report = dedent(
"""
|- feature_1 <= 0.00
| |- class: -1
|- feature_1 > 0.00
| |- class: 1
"""
).lstrip()
assert export_text(clf, spacing=1) == expected_report
X_l = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-1, 1]]
y_l = [-1, -1, -1, 1, 1, 1, 2]
clf = DecisionTreeClassifier(max_depth=4, random_state=0)
clf.fit(X_l, y_l)
expected_report = dedent(
"""
|--- feature_1 <= 0.00
| |--- class: -1
|--- feature_1 > 0.00
| |--- truncated branch of depth 2
"""
).lstrip()
assert export_text(clf, max_depth=0) == expected_report
X_mo = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_mo = [[-1, -1], [-1, -1], [-1, -1], [1, 1], [1, 1], [1, 1]]
reg = DecisionTreeRegressor(max_depth=2, random_state=0)
reg.fit(X_mo, y_mo)
expected_report = dedent(
"""
|--- feature_1 <= 0.0
| |--- value: [-1.0, -1.0]
|--- feature_1 > 0.0
| |--- value: [1.0, 1.0]
"""
).lstrip()
assert export_text(reg, decimals=1) == expected_report
assert export_text(reg, decimals=1, show_weights=True) == expected_report
X_single = [[-2], [-1], [-1], [1], [1], [2]]
reg = DecisionTreeRegressor(max_depth=2, random_state=0)
reg.fit(X_single, y_mo)
expected_report = dedent(
"""
|--- first <= 0.0
| |--- value: [-1.0, -1.0]
|--- first > 0.0
| |--- value: [1.0, 1.0]
"""
).lstrip()
assert export_text(reg, decimals=1, feature_names=["first"]) == expected_report
assert (
export_text(reg, decimals=1, show_weights=True, feature_names=["first"])
== expected_report
)
@pytest.mark.parametrize("constructor", [list, np.array])
def test_export_text_feature_class_names_array_support(constructor):
# Check that export_graphviz treats feature names
# and class names correctly and supports arrays
clf = DecisionTreeClassifier(max_depth=2, random_state=0)
clf.fit(X, y)
expected_report = dedent(
"""
|--- b <= 0.00
| |--- class: -1
|--- b > 0.00
| |--- class: 1
"""
).lstrip()
assert export_text(clf, feature_names=constructor(["a", "b"])) == expected_report
expected_report = dedent(
"""
|--- feature_1 <= 0.00
| |--- class: cat
|--- feature_1 > 0.00
| |--- class: dog
"""
).lstrip()
assert export_text(clf, class_names=constructor(["cat", "dog"])) == expected_report
def test_plot_tree_entropy(pyplot):
# mostly smoke tests
# Check correctness of export_graphviz for criterion = entropy
clf = DecisionTreeClassifier(
max_depth=3, min_samples_split=2, criterion="entropy", random_state=2
)
clf.fit(X, y)
# Test export code
feature_names = ["first feat", "sepal_width"]
nodes = plot_tree(clf, feature_names=feature_names)
assert len(nodes) == 5
assert (
nodes[0].get_text()
== "first feat <= 0.0\nentropy = 1.0\nsamples = 6\nvalue = [3, 3]"
)
assert nodes[1].get_text() == "entropy = 0.0\nsamples = 3\nvalue = [3, 0]"
assert nodes[2].get_text() == "True "
assert nodes[3].get_text() == "entropy = 0.0\nsamples = 3\nvalue = [0, 3]"
assert nodes[4].get_text() == " False"
@pytest.mark.parametrize("fontsize", [None, 10, 20])
def test_plot_tree_gini(pyplot, fontsize):
# mostly smoke tests
# Check correctness of export_graphviz for criterion = gini
clf = DecisionTreeClassifier(
max_depth=3,
min_samples_split=2,
criterion="gini",
random_state=2,
)
clf.fit(X, y)
# Test export code
feature_names = ["first feat", "sepal_width"]
nodes = plot_tree(clf, feature_names=feature_names, fontsize=fontsize)
assert len(nodes) == 5
if fontsize is not None:
assert all(node.get_fontsize() == fontsize for node in nodes)
assert (
nodes[0].get_text()
== "first feat <= 0.0\ngini = 0.5\nsamples = 6\nvalue = [3, 3]"
)
assert nodes[1].get_text() == "gini = 0.0\nsamples = 3\nvalue = [3, 0]"
assert nodes[2].get_text() == "True "
assert nodes[3].get_text() == "gini = 0.0\nsamples = 3\nvalue = [0, 3]"
assert nodes[4].get_text() == " False"
def test_not_fitted_tree(pyplot):
# Testing if not fitted tree throws the correct error
clf = DecisionTreeRegressor()
with pytest.raises(NotFittedError):
plot_tree(clf)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tree/tests/test_reingold_tilford.py | sklearn/tree/tests/test_reingold_tilford.py | import numpy as np
import pytest
from sklearn.tree._reingold_tilford import Tree, buchheim
simple_tree = Tree("", 0, Tree("", 1), Tree("", 2))
bigger_tree = Tree(
"",
0,
Tree(
"",
1,
Tree("", 3),
Tree("", 4, Tree("", 7), Tree("", 8)),
),
Tree("", 2, Tree("", 5), Tree("", 6)),
)
@pytest.mark.parametrize("tree, n_nodes", [(simple_tree, 3), (bigger_tree, 9)])
def test_buchheim(tree, n_nodes):
def walk_tree(draw_tree):
res = [(draw_tree.x, draw_tree.y)]
for child in draw_tree.children:
# parents higher than children:
assert child.y == draw_tree.y + 1
res.extend(walk_tree(child))
if len(draw_tree.children):
# these trees are always binary
# parents are centered above children
assert (
draw_tree.x == (draw_tree.children[0].x + draw_tree.children[1].x) / 2
)
return res
layout = buchheim(tree)
coordinates = walk_tree(layout)
assert len(coordinates) == n_nodes
# test that x values are unique per depth / level
# we could also do it quicker using defaultdicts..
depth = 0
while True:
x_at_this_depth = [node[0] for node in coordinates if node[1] == depth]
if not x_at_this_depth:
# reached all leafs
break
assert len(np.unique(x_at_this_depth)) == len(x_at_this_depth)
depth += 1
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tree/tests/test_monotonic_tree.py | sklearn/tree/tests/test_monotonic_tree.py | import numpy as np
import pytest
from sklearn.datasets import make_classification, make_regression
from sklearn.ensemble import (
ExtraTreesClassifier,
ExtraTreesRegressor,
RandomForestClassifier,
RandomForestRegressor,
)
from sklearn.tree import (
DecisionTreeClassifier,
DecisionTreeRegressor,
ExtraTreeClassifier,
ExtraTreeRegressor,
)
from sklearn.utils._testing import assert_allclose
from sklearn.utils.fixes import CSC_CONTAINERS
TREE_CLASSIFIER_CLASSES = [DecisionTreeClassifier, ExtraTreeClassifier]
TREE_REGRESSOR_CLASSES = [DecisionTreeRegressor, ExtraTreeRegressor]
TREE_BASED_CLASSIFIER_CLASSES = TREE_CLASSIFIER_CLASSES + [
RandomForestClassifier,
ExtraTreesClassifier,
]
TREE_BASED_REGRESSOR_CLASSES = TREE_REGRESSOR_CLASSES + [
RandomForestRegressor,
ExtraTreesRegressor,
]
@pytest.mark.parametrize("TreeClassifier", TREE_BASED_CLASSIFIER_CLASSES)
@pytest.mark.parametrize("depth_first_builder", (True, False))
@pytest.mark.parametrize("sparse_splitter", (True, False))
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_monotonic_constraints_classifications(
TreeClassifier,
depth_first_builder,
sparse_splitter,
global_random_seed,
csc_container,
):
n_samples = 1000
n_samples_train = 900
X, y = make_classification(
n_samples=n_samples,
n_classes=2,
n_features=5,
n_informative=5,
n_redundant=0,
random_state=global_random_seed,
)
X_train, y_train = X[:n_samples_train], y[:n_samples_train]
X_test, _ = X[n_samples_train:], y[n_samples_train:]
X_test_0incr, X_test_0decr = np.copy(X_test), np.copy(X_test)
X_test_1incr, X_test_1decr = np.copy(X_test), np.copy(X_test)
X_test_0incr[:, 0] += 10
X_test_0decr[:, 0] -= 10
X_test_1incr[:, 1] += 10
X_test_1decr[:, 1] -= 10
monotonic_cst = np.zeros(X.shape[1])
monotonic_cst[0] = 1
monotonic_cst[1] = -1
if depth_first_builder:
est = TreeClassifier(max_depth=None, monotonic_cst=monotonic_cst)
else:
est = TreeClassifier(
max_depth=None,
monotonic_cst=monotonic_cst,
max_leaf_nodes=n_samples_train,
)
if hasattr(est, "random_state"):
est.set_params(**{"random_state": global_random_seed})
if hasattr(est, "n_estimators"):
est.set_params(**{"n_estimators": 5})
if sparse_splitter:
X_train = csc_container(X_train)
est.fit(X_train, y_train)
proba_test = est.predict_proba(X_test)
assert np.logical_and(proba_test >= 0.0, proba_test <= 1.0).all(), (
"Probability should always be in [0, 1] range."
)
assert_allclose(proba_test.sum(axis=1), 1.0)
# Monotonic increase constraint, it applies to the positive class
assert np.all(est.predict_proba(X_test_0incr)[:, 1] >= proba_test[:, 1])
assert np.all(est.predict_proba(X_test_0decr)[:, 1] <= proba_test[:, 1])
# Monotonic decrease constraint, it applies to the positive class
assert np.all(est.predict_proba(X_test_1incr)[:, 1] <= proba_test[:, 1])
assert np.all(est.predict_proba(X_test_1decr)[:, 1] >= proba_test[:, 1])
@pytest.mark.parametrize("TreeRegressor", TREE_BASED_REGRESSOR_CLASSES)
@pytest.mark.parametrize("depth_first_builder", (True, False))
@pytest.mark.parametrize("sparse_splitter", (True, False))
@pytest.mark.parametrize("criterion", ("absolute_error", "squared_error"))
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_monotonic_constraints_regressions(
TreeRegressor,
depth_first_builder,
sparse_splitter,
criterion,
global_random_seed,
csc_container,
):
n_samples = 1000
n_samples_train = 900
# Build a regression task using 5 informative features
X, y = make_regression(
n_samples=n_samples,
n_features=5,
n_informative=5,
random_state=global_random_seed,
)
train = np.arange(n_samples_train)
test = np.arange(n_samples_train, n_samples)
X_train = X[train]
y_train = y[train]
X_test = np.copy(X[test])
X_test_incr = np.copy(X_test)
X_test_decr = np.copy(X_test)
X_test_incr[:, 0] += 10
X_test_decr[:, 1] += 10
monotonic_cst = np.zeros(X.shape[1])
monotonic_cst[0] = 1
monotonic_cst[1] = -1
if depth_first_builder:
est = TreeRegressor(
max_depth=None,
monotonic_cst=monotonic_cst,
criterion=criterion,
)
else:
est = TreeRegressor(
max_depth=8,
monotonic_cst=monotonic_cst,
criterion=criterion,
max_leaf_nodes=n_samples_train,
)
if hasattr(est, "random_state"):
est.set_params(random_state=global_random_seed)
if hasattr(est, "n_estimators"):
est.set_params(**{"n_estimators": 5})
if sparse_splitter:
X_train = csc_container(X_train)
est.fit(X_train, y_train)
y = est.predict(X_test)
# Monotonic increase constraint
y_incr = est.predict(X_test_incr)
# y_incr should always be greater than y
assert np.all(y_incr >= y)
# Monotonic decrease constraint
y_decr = est.predict(X_test_decr)
# y_decr should always be lower than y
assert np.all(y_decr <= y)
@pytest.mark.parametrize("TreeClassifier", TREE_BASED_CLASSIFIER_CLASSES)
def test_multiclass_raises(TreeClassifier):
X, y = make_classification(
n_samples=100, n_features=5, n_classes=3, n_informative=3, random_state=0
)
y[0] = 0
monotonic_cst = np.zeros(X.shape[1])
monotonic_cst[0] = -1
monotonic_cst[1] = 1
est = TreeClassifier(max_depth=None, monotonic_cst=monotonic_cst, random_state=0)
msg = "Monotonicity constraints are not supported with multiclass classification"
with pytest.raises(ValueError, match=msg):
est.fit(X, y)
@pytest.mark.parametrize("TreeClassifier", TREE_BASED_CLASSIFIER_CLASSES)
def test_multiple_output_raises(TreeClassifier):
X = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]
y = [[1, 0, 1, 0, 1], [1, 0, 1, 0, 1]]
est = TreeClassifier(
max_depth=None, monotonic_cst=np.array([-1, 1]), random_state=0
)
msg = "Monotonicity constraints are not supported with multiple output"
with pytest.raises(ValueError, match=msg):
est.fit(X, y)
@pytest.mark.parametrize(
"Tree",
[
DecisionTreeClassifier,
DecisionTreeRegressor,
ExtraTreeClassifier,
ExtraTreeRegressor,
],
)
def test_missing_values_raises(Tree):
X, y = make_classification(
n_samples=100, n_features=5, n_classes=2, n_informative=3, random_state=0
)
X[0, 0] = np.nan
monotonic_cst = np.zeros(X.shape[1])
monotonic_cst[0] = 1
est = Tree(max_depth=None, monotonic_cst=monotonic_cst, random_state=0)
msg = "Input X contains NaN"
with pytest.raises(ValueError, match=msg):
est.fit(X, y)
@pytest.mark.parametrize("TreeClassifier", TREE_BASED_CLASSIFIER_CLASSES)
def test_bad_monotonic_cst_raises(TreeClassifier):
X = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
y = [1, 0, 1, 0, 1]
msg = "monotonic_cst has shape 3 but the input data X has 2 features."
est = TreeClassifier(
max_depth=None, monotonic_cst=np.array([-1, 1, 0]), random_state=0
)
with pytest.raises(ValueError, match=msg):
est.fit(X, y)
msg = "monotonic_cst must be None or an array-like of -1, 0 or 1."
est = TreeClassifier(
max_depth=None, monotonic_cst=np.array([-2, 2]), random_state=0
)
with pytest.raises(ValueError, match=msg):
est.fit(X, y)
est = TreeClassifier(
max_depth=None, monotonic_cst=np.array([-1, 0.8]), random_state=0
)
with pytest.raises(ValueError, match=msg + "(.*)0.8]"):
est.fit(X, y)
def assert_1d_reg_tree_children_monotonic_bounded(tree_, monotonic_sign):
values = tree_.value
for i in range(tree_.node_count):
if tree_.children_left[i] > i and tree_.children_right[i] > i:
# Check monotonicity on children
i_left = tree_.children_left[i]
i_right = tree_.children_right[i]
if monotonic_sign == 1:
assert values[i_left] <= values[i_right]
elif monotonic_sign == -1:
assert values[i_left] >= values[i_right]
val_middle = (values[i_left] + values[i_right]) / 2
# Check bounds on grand-children, filtering out leaf nodes
if tree_.feature[i_left] >= 0:
i_left_right = tree_.children_right[i_left]
if monotonic_sign == 1:
assert values[i_left_right] <= val_middle
elif monotonic_sign == -1:
assert values[i_left_right] >= val_middle
if tree_.feature[i_right] >= 0:
i_right_left = tree_.children_left[i_right]
if monotonic_sign == 1:
assert val_middle <= values[i_right_left]
elif monotonic_sign == -1:
assert val_middle >= values[i_right_left]
def test_assert_1d_reg_tree_children_monotonic_bounded():
X = np.linspace(-1, 1, 7).reshape(-1, 1)
y = np.sin(2 * np.pi * X.ravel())
reg = DecisionTreeRegressor(max_depth=None, random_state=0).fit(X, y)
with pytest.raises(AssertionError):
assert_1d_reg_tree_children_monotonic_bounded(reg.tree_, 1)
with pytest.raises(AssertionError):
assert_1d_reg_tree_children_monotonic_bounded(reg.tree_, -1)
def assert_1d_reg_monotonic(clf, monotonic_sign, min_x, max_x, n_steps):
X_grid = np.linspace(min_x, max_x, n_steps).reshape(-1, 1)
y_pred_grid = clf.predict(X_grid)
if monotonic_sign == 1:
assert (np.diff(y_pred_grid) >= 0.0).all()
elif monotonic_sign == -1:
assert (np.diff(y_pred_grid) <= 0.0).all()
@pytest.mark.parametrize("TreeRegressor", TREE_REGRESSOR_CLASSES)
def test_1d_opposite_monotonicity_cst_data(TreeRegressor):
# Check that positive monotonic data with negative monotonic constraint
# yield constant predictions, equal to the average of target values
X = np.linspace(-2, 2, 10).reshape(-1, 1)
y = X.ravel()
clf = TreeRegressor(monotonic_cst=[-1])
clf.fit(X, y)
assert clf.tree_.node_count == 1
assert clf.tree_.value[0] == 0.0
# Swap monotonicity
clf = TreeRegressor(monotonic_cst=[1])
clf.fit(X, -y)
assert clf.tree_.node_count == 1
assert clf.tree_.value[0] == 0.0
@pytest.mark.parametrize("TreeRegressor", TREE_REGRESSOR_CLASSES)
@pytest.mark.parametrize("monotonic_sign", (-1, 1))
@pytest.mark.parametrize("depth_first_builder", (True, False))
@pytest.mark.parametrize("criterion", ("absolute_error", "squared_error"))
def test_1d_tree_nodes_values(
TreeRegressor, monotonic_sign, depth_first_builder, criterion, global_random_seed
):
# Adaptation from test_nodes_values in test_monotonic_constraints.py
# in sklearn.ensemble._hist_gradient_boosting
# Build a single tree with only one feature, and make sure the node
# values respect the monotonicity constraints.
# Considering the following tree with a monotonic +1 constraint, we
# should have:
#
# root
# / \
# a b
# / \ / \
# c d e f
#
# a <= root <= b
# c <= d <= (a + b) / 2 <= e <= f
rng = np.random.RandomState(global_random_seed)
n_samples = 1000
n_features = 1
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
if depth_first_builder:
# No max_leaf_nodes, default depth first tree builder
clf = TreeRegressor(
monotonic_cst=[monotonic_sign],
criterion=criterion,
random_state=global_random_seed,
)
else:
# max_leaf_nodes triggers best first tree builder
clf = TreeRegressor(
monotonic_cst=[monotonic_sign],
max_leaf_nodes=n_samples,
criterion=criterion,
random_state=global_random_seed,
)
clf.fit(X, y)
assert_1d_reg_tree_children_monotonic_bounded(clf.tree_, monotonic_sign)
assert_1d_reg_monotonic(clf, monotonic_sign, np.min(X), np.max(X), 100)
def assert_nd_reg_tree_children_monotonic_bounded(tree_, monotonic_cst):
upper_bound = np.full(tree_.node_count, np.inf)
lower_bound = np.full(tree_.node_count, -np.inf)
for i in range(tree_.node_count):
feature = tree_.feature[i]
node_value = tree_.value[i][0][0] # unpack value from nx1x1 array
# While building the tree, the computed middle value is slightly
# different from the average of the siblings values, because
# sum_right / weighted_n_right
# is slightly different from the value of the right sibling.
# This can cause a discrepancy up to numerical noise when clipping,
# which is resolved by comparing with some loss of precision.
assert np.float32(node_value) <= np.float32(upper_bound[i])
assert np.float32(node_value) >= np.float32(lower_bound[i])
if feature < 0:
# Leaf: nothing to do
continue
# Split node: check and update bounds for the children.
i_left = tree_.children_left[i]
i_right = tree_.children_right[i]
# unpack value from nx1x1 array
middle_value = (tree_.value[i_left][0][0] + tree_.value[i_right][0][0]) / 2
if monotonic_cst[feature] == 0:
# Feature without monotonicity constraint: propagate bounds
# down the tree to both children.
# Otherwise, with 2 features and a monotonic increase constraint
# (encoded by +1) on feature 0, the following tree can be accepted,
# although it does not respect the monotonic increase constraint:
#
# X[0] <= 0
# value = 100
# / \
# X[0] <= -1 X[1] <= 0
# value = 50 value = 150
# / \ / \
# leaf leaf leaf leaf
# value = 25 value = 75 value = 50 value = 250
lower_bound[i_left] = lower_bound[i]
upper_bound[i_left] = upper_bound[i]
lower_bound[i_right] = lower_bound[i]
upper_bound[i_right] = upper_bound[i]
elif monotonic_cst[feature] == 1:
# Feature with constraint: check monotonicity
assert tree_.value[i_left] <= tree_.value[i_right]
# Propagate bounds down the tree to both children.
lower_bound[i_left] = lower_bound[i]
upper_bound[i_left] = middle_value
lower_bound[i_right] = middle_value
upper_bound[i_right] = upper_bound[i]
elif monotonic_cst[feature] == -1:
# Feature with constraint: check monotonicity
assert tree_.value[i_left] >= tree_.value[i_right]
# Update and propagate bounds down the tree to both children.
lower_bound[i_left] = middle_value
upper_bound[i_left] = upper_bound[i]
lower_bound[i_right] = lower_bound[i]
upper_bound[i_right] = middle_value
else: # pragma: no cover
raise ValueError(f"monotonic_cst[{feature}]={monotonic_cst[feature]}")
def test_assert_nd_reg_tree_children_monotonic_bounded():
# Check that assert_nd_reg_tree_children_monotonic_bounded can detect
# non-monotonic tree predictions.
X = np.linspace(0, 2 * np.pi, 30).reshape(-1, 1)
y = np.sin(X).ravel()
reg = DecisionTreeRegressor(max_depth=None, random_state=0).fit(X, y)
with pytest.raises(AssertionError):
assert_nd_reg_tree_children_monotonic_bounded(reg.tree_, [1])
with pytest.raises(AssertionError):
assert_nd_reg_tree_children_monotonic_bounded(reg.tree_, [-1])
assert_nd_reg_tree_children_monotonic_bounded(reg.tree_, [0])
# Check that assert_nd_reg_tree_children_monotonic_bounded raises
# when the data (and therefore the model) is naturally monotonic in the
# opposite direction.
X = np.linspace(-5, 5, 5).reshape(-1, 1)
y = X.ravel() ** 3
reg = DecisionTreeRegressor(max_depth=None, random_state=0).fit(X, y)
with pytest.raises(AssertionError):
assert_nd_reg_tree_children_monotonic_bounded(reg.tree_, [-1])
# For completeness, check that the converse holds when swapping the sign.
reg = DecisionTreeRegressor(max_depth=None, random_state=0).fit(X, -y)
with pytest.raises(AssertionError):
assert_nd_reg_tree_children_monotonic_bounded(reg.tree_, [1])
@pytest.mark.parametrize("TreeRegressor", TREE_REGRESSOR_CLASSES)
@pytest.mark.parametrize("monotonic_sign", (-1, 1))
@pytest.mark.parametrize("depth_first_builder", (True, False))
@pytest.mark.parametrize("criterion", ("absolute_error", "squared_error"))
def test_nd_tree_nodes_values(
TreeRegressor, monotonic_sign, depth_first_builder, criterion, global_random_seed
):
# Build tree with several features, and make sure the nodes
# values respect the monotonicity constraints.
# Considering the following tree with a monotonic increase constraint on X[0],
# we should have:
#
# root
# X[0]<=t
# / \
# a b
# X[0]<=u X[1]<=v
# / \ / \
# c d e f
#
# i) a <= root <= b
# ii) c <= a <= d <= (a+b)/2
# iii) (a+b)/2 <= min(e,f)
# For iii) we check that each node value is within the proper lower and
# upper bounds.
rng = np.random.RandomState(global_random_seed)
n_samples = 1000
n_features = 2
monotonic_cst = [monotonic_sign, 0]
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
if depth_first_builder:
# No max_leaf_nodes, default depth first tree builder
clf = TreeRegressor(
monotonic_cst=monotonic_cst,
criterion=criterion,
random_state=global_random_seed,
)
else:
# max_leaf_nodes triggers best first tree builder
clf = TreeRegressor(
monotonic_cst=monotonic_cst,
max_leaf_nodes=n_samples,
criterion=criterion,
random_state=global_random_seed,
)
clf.fit(X, y)
assert_nd_reg_tree_children_monotonic_bounded(clf.tree_, monotonic_cst)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tree/tests/test_tree.py | sklearn/tree/tests/test_tree.py | """
Testing for the tree module (sklearn.tree).
"""
import copy
import copyreg
import io
import pickle
import re
import struct
from itertools import chain, pairwise, product
import joblib
import numpy as np
import pytest
from joblib.numpy_pickle import NumpyPickler
from numpy.testing import assert_allclose
from sklearn import clone, datasets, tree
from sklearn.dummy import DummyRegressor
from sklearn.exceptions import NotFittedError
from sklearn.impute import SimpleImputer
from sklearn.metrics import (
accuracy_score,
mean_absolute_error,
mean_poisson_deviance,
mean_squared_error,
)
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.random_projection import _sparse_random_matrix
from sklearn.tree import (
DecisionTreeClassifier,
DecisionTreeRegressor,
ExtraTreeClassifier,
ExtraTreeRegressor,
)
from sklearn.tree._classes import (
CRITERIA_CLF,
CRITERIA_REG,
DENSE_SPLITTERS,
SPARSE_SPLITTERS,
)
from sklearn.tree._criterion import _py_precompute_absolute_errors
from sklearn.tree._partitioner import _py_sort
from sklearn.tree._tree import (
NODE_DTYPE,
TREE_LEAF,
TREE_UNDEFINED,
_build_pruned_tree_py,
_check_n_classes,
_check_node_ndarray,
_check_value_ndarray,
)
from sklearn.tree._tree import Tree as CythonTree
from sklearn.utils import compute_sample_weight
from sklearn.utils._array_api import xpx
from sklearn.utils._testing import (
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
create_memmap_backed_data,
ignore_warnings,
)
from sklearn.utils.fixes import (
_IS_32BIT,
COO_CONTAINERS,
CSC_CONTAINERS,
CSR_CONTAINERS,
)
from sklearn.utils.stats import _weighted_percentile
from sklearn.utils.validation import check_random_state
CLF_CRITERIONS = ("gini", "log_loss")
REG_CRITERIONS = ("squared_error", "absolute_error", "friedman_mse", "poisson")
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES: dict = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [
"DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor",
]
X_small = np.array(
[
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0],
]
)
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0]
y_small_reg = [
1.0,
2.1,
1.2,
0.05,
10,
2.4,
3.1,
1.01,
0.01,
2.98,
3.1,
1.1,
0.0,
1.2,
2,
11,
0,
0,
4.5,
0.201,
1.06,
0.9,
0,
]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the diabetes dataset
# and randomly permute it
diabetes = datasets.load_diabetes()
perm = rng.permutation(diabetes.target.size)
diabetes.data = diabetes.data[perm]
diabetes.target = diabetes.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10
)
# NB: despite their names X_sparse_* are numpy arrays (and not sparse matrices)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.0
y_random = random_state.randint(0, 4, size=(20,))
X_sparse_mix = _sparse_random_matrix(20, 10, density=0.25, random_state=0).toarray()
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"diabetes": {"X": diabetes.data, "y": diabetes.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": -X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random},
}
def assert_tree_equal(d, s, message):
assert s.node_count == d.node_count, (
"{0}: inequal number of node ({1} != {2})".format(
message, s.node_count, d.node_count
)
)
assert_array_equal(
d.children_right, s.children_right, message + ": inequal children_right"
)
assert_array_equal(
d.children_left, s.children_left, message + ": inequal children_left"
)
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(
d.feature[internal], s.feature[internal], message + ": inequal features"
)
assert_array_equal(
d.threshold[internal], s.threshold[internal], message + ": inequal threshold"
)
assert_array_equal(
d.n_node_samples.sum(),
s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)",
)
assert_array_equal(
d.n_node_samples, s.n_node_samples, message + ": inequal n_node_samples"
)
assert_almost_equal(d.impurity, s.impurity, err_msg=message + ": inequal impurity")
assert_array_almost_equal(
d.value[external], s.value[external], err_msg=message + ": inequal value"
)
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.full(len(X), 0.5))
assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name))
@pytest.mark.parametrize("Tree", REG_TREES.values())
@pytest.mark.parametrize("criterion", REG_CRITERIONS)
def test_regression_toy(Tree, criterion):
# Check regression on a toy dataset.
if criterion == "poisson":
# make target positive while not touching the original y and
# true_result
a = np.abs(np.min(y)) + 1
y_train = np.array(y) + a
y_test = np.array(true_result) + a
else:
y_train = y
y_test = true_result
reg = Tree(criterion=criterion, random_state=1)
reg.fit(X, y_train)
assert_allclose(reg.predict(T), y_test)
clf = Tree(criterion=criterion, max_features=1, random_state=1)
clf.fit(X, y_train)
assert_allclose(reg.predict(T), y_test)
def test_xor():
# Check on an XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert clf.score(X, y) == 1.0, "Failed with {0}".format(name)
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert clf.score(X, y) == 1.0, "Failed with {0}".format(name)
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert score > 0.9, "Failed with {0}, criterion = {1} and score = {2}".format(
name, criterion, score
)
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert score > 0.5, "Failed with {0}, criterion = {1} and score = {2}".format(
name, criterion, score
)
@pytest.mark.parametrize("name, Tree", REG_TREES.items())
@pytest.mark.parametrize("criterion", REG_CRITERIONS)
def test_diabetes_overfit(name, Tree, criterion):
# check consistency of overfitted trees on the diabetes dataset
# since the trees will overfit, we expect an MSE of 0
reg = Tree(criterion=criterion, random_state=0)
reg.fit(diabetes.data, diabetes.target)
score = mean_squared_error(diabetes.target, reg.predict(diabetes.data))
assert score == pytest.approx(0), (
f"Failed with {name}, criterion = {criterion} and score = {score}"
)
@pytest.mark.parametrize("Tree", REG_TREES.values())
@pytest.mark.parametrize(
"criterion, metric",
[
("squared_error", mean_squared_error),
("absolute_error", mean_absolute_error),
("friedman_mse", mean_squared_error),
("poisson", mean_poisson_deviance),
],
)
def test_diabetes_underfit(Tree, criterion, metric, global_random_seed):
# check consistency of trees when the depth and the number of features are
# limited
kwargs = dict(criterion=criterion, max_features=6, random_state=global_random_seed)
X, y = diabetes.data, diabetes.target
loss1 = metric(y, Tree(**kwargs, max_depth=1).fit(X, y).predict(X))
loss4 = metric(y, Tree(**kwargs, max_depth=4).fit(X, y).predict(X))
loss7 = metric(y, Tree(**kwargs, max_depth=7).fit(X, y).predict(X))
# less depth => higher error
# diabetes.data.shape[0] > 2^7 so it can't overfit to get a 0 error
assert 0 < loss7 < loss4 < loss1, (loss7, loss4, loss1)
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name),
)
assert_array_equal(
np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name),
)
assert_almost_equal(
clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)),
8,
err_msg="Failed with {0}".format(name),
)
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y, err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(reg.predict(X), y, err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array(
[
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774],
]
)
y = np.array([1.0, 0.70209277, 0.53896582, 0.0, 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(
n_samples=5000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0,
)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert importances.shape[0] == 10, "Failed with {0}".format(name)
assert n_important == 3, "Failed with {0}".format(name)
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0, max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_, clf2.feature_importances_)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
with pytest.raises(ValueError):
getattr(clf, "feature_importances_")
def test_importances_gini_equal_squared_error():
# Check that gini is equivalent to squared_error for binary output variable
X, y = datasets.make_classification(
n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0,
)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5, random_state=0).fit(
X, y
)
reg = DecisionTreeRegressor(
criterion="squared_error", max_depth=5, random_state=0
).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert est.max_features_ == int(np.sqrt(iris.data.shape[1]))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert est.max_features_ == int(np.log2(iris.data.shape[1]))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert est.max_features_ == 1
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert est.max_features_ == 3
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert est.max_features_ == 1
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert est.max_features_ == int(0.5 * iris.data.shape[1])
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert est.max_features_ == iris.data.shape[1]
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert est.max_features_ == iris.data.shape[1]
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
with pytest.raises(NotFittedError):
est.predict_proba(X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
with pytest.raises(ValueError):
est.predict_proba(X2)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
with pytest.raises(ValueError):
est.fit(X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
with pytest.raises(NotFittedError):
est.predict(T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
with pytest.raises(ValueError):
est.predict(t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
with pytest.raises(ValueError):
est.predict(X)
with pytest.raises(ValueError):
est.apply(X)
clf = TreeEstimator()
clf.fit(X, y)
with pytest.raises(ValueError):
clf.predict(Xt)
with pytest.raises(ValueError):
clf.apply(Xt)
# apply before fitting
est = TreeEstimator()
with pytest.raises(NotFittedError):
est.apply(T)
# non positive target for Poisson splitting Criterion
est = DecisionTreeRegressor(criterion="poisson")
with pytest.raises(ValueError, match="y is not positive.*Poisson"):
est.fit([[0, 1, 2]], [0, 0, 0])
with pytest.raises(ValueError, match="Some.*y are negative.*Poisson"):
est.fit([[0, 1, 2]], [5, -0.1, 2])
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data, dtype=tree._tree.DTYPE)
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(
min_samples_split=10, max_leaf_nodes=max_leaf_nodes, random_state=0
)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert np.min(node_samples) > 9, "Failed with {0}".format(name)
# test for float parameter
est = TreeEstimator(
min_samples_split=0.2, max_leaf_nodes=max_leaf_nodes, random_state=0
)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert np.min(node_samples) > 9, "Failed with {0}".format(name)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data, dtype=tree._tree.DTYPE)
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(
min_samples_leaf=5, max_leaf_nodes=max_leaf_nodes, random_state=0
)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert np.min(leaf_count) > 4, "Failed with {0}".format(name)
# test float parameter
est = TreeEstimator(
min_samples_leaf=0.1, max_leaf_nodes=max_leaf_nodes, random_state=0
)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert np.min(leaf_count) > 4, "Failed with {0}".format(name)
def check_min_weight_fraction_leaf(name, datasets, sparse_container=None):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
X = DATASETS[datasets]["X"].astype(np.float32)
if sparse_container is not None:
X = sparse_container(X)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(
min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0
)
est.fit(X, y, sample_weight=weights)
if sparse_container is not None:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert np.min(leaf_weights) >= total_weight * est.min_weight_fraction_leaf, (
"Failed with {0} min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf
)
)
# test case with no weights passed in
total_weight = X.shape[0]
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(
min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0
)
est.fit(X, y)
if sparse_container is not None:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert np.min(leaf_weights) >= total_weight * est.min_weight_fraction_leaf, (
"Failed with {0} min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf
)
)
@pytest.mark.parametrize("name", ALL_TREES)
def test_min_weight_fraction_leaf_on_dense_input(name):
check_min_weight_fraction_leaf(name, "iris")
@pytest.mark.parametrize("name", SPARSE_TREES)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_min_weight_fraction_leaf_on_sparse_input(name, csc_container):
check_min_weight_fraction_leaf(name, "multilabel", sparse_container=csc_container)
def check_min_weight_fraction_leaf_with_min_samples_leaf(
name, datasets, sparse_container=None
):
"""Test the interaction between min_weight_fraction_leaf and
min_samples_leaf when sample_weights is not provided in fit."""
X = DATASETS[datasets]["X"].astype(np.float32)
if sparse_container is not None:
X = sparse_container(X)
y = DATASETS[datasets]["y"]
total_weight = X.shape[0]
TreeEstimator = ALL_TREES[name]
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)):
# test integer min_samples_leaf
est = TreeEstimator(
min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
min_samples_leaf=5,
random_state=0,
)
est.fit(X, y)
if sparse_container is not None:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert np.min(leaf_weights) >= max(
(total_weight * est.min_weight_fraction_leaf), 5
), "Failed with {0} min_weight_fraction_leaf={1}, min_samples_leaf={2}".format(
name, est.min_weight_fraction_leaf, est.min_samples_leaf
)
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)):
# test float min_samples_leaf
est = TreeEstimator(
min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
min_samples_leaf=0.1,
random_state=0,
)
est.fit(X, y)
if sparse_container is not None:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert np.min(leaf_weights) >= max(
(total_weight * est.min_weight_fraction_leaf),
(total_weight * est.min_samples_leaf),
), "Failed with {0} min_weight_fraction_leaf={1}, min_samples_leaf={2}".format(
name, est.min_weight_fraction_leaf, est.min_samples_leaf
)
@pytest.mark.parametrize("name", ALL_TREES)
def test_min_weight_fraction_leaf_with_min_samples_leaf_on_dense_input(name):
check_min_weight_fraction_leaf_with_min_samples_leaf(name, "iris")
@pytest.mark.parametrize("name", SPARSE_TREES)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_min_weight_fraction_leaf_with_min_samples_leaf_on_sparse_input(
name, csc_container
):
check_min_weight_fraction_leaf_with_min_samples_leaf(
name, "multilabel", sparse_container=csc_container
)
def test_min_impurity_decrease(global_random_seed):
# test if min_impurity_decrease ensure that a split is made only if
# if the impurity decrease is at least that value
X, y = datasets.make_classification(n_samples=100, random_state=global_random_seed)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# Check default value of min_impurity_decrease, 1e-7
est1 = TreeEstimator(max_leaf_nodes=max_leaf_nodes, random_state=0)
# Check with explicit value of 0.05
est2 = TreeEstimator(
max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=0.05, random_state=0
)
# Check with a much lower value of 0.0001
est3 = TreeEstimator(
max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=0.0001, random_state=0
)
# Check with a much lower value of 0.1
est4 = TreeEstimator(
max_leaf_nodes=max_leaf_nodes, min_impurity_decrease=0.1, random_state=0
)
for est, expected_decrease in (
(est1, 1e-7),
(est2, 0.05),
(est3, 0.0001),
(est4, 0.1),
):
assert est.min_impurity_decrease <= expected_decrease, (
"Failed, min_impurity_decrease = {0} > {1}".format(
est.min_impurity_decrease, expected_decrease
)
)
est.fit(X, y)
for node in range(est.tree_.node_count):
# If current node is a not leaf node, check if the split was
# justified w.r.t the min_impurity_decrease
if est.tree_.children_left[node] != TREE_LEAF:
imp_parent = est.tree_.impurity[node]
wtd_n_node = est.tree_.weighted_n_node_samples[node]
left = est.tree_.children_left[node]
wtd_n_left = est.tree_.weighted_n_node_samples[left]
imp_left = est.tree_.impurity[left]
wtd_imp_left = wtd_n_left * imp_left
right = est.tree_.children_right[node]
wtd_n_right = est.tree_.weighted_n_node_samples[right]
imp_right = est.tree_.impurity[right]
wtd_imp_right = wtd_n_right * imp_right
wtd_avg_left_right_imp = wtd_imp_right + wtd_imp_left
wtd_avg_left_right_imp /= wtd_n_node
fractional_node_weight = (
est.tree_.weighted_n_node_samples[node] / X.shape[0]
)
actual_decrease = fractional_node_weight * (
imp_parent - wtd_avg_left_right_imp
)
assert actual_decrease >= expected_decrease, (
"Failed with {0} expected min_impurity_decrease={1}".format(
actual_decrease, expected_decrease
)
)
def test_pickle():
"""Test pickling preserves Tree properties and performance."""
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = diabetes.data, diabetes.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
# test that all class properties are maintained
attributes = [
"max_depth",
"node_count",
"capacity",
"n_classes",
"children_left",
"children_right",
"n_leaves",
"feature",
"threshold",
"impurity",
"n_node_samples",
"weighted_n_node_samples",
"value",
]
fitted_attribute = {
attribute: getattr(est.tree_, attribute) for attribute in attributes
}
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert type(est2) == est.__class__
score2 = est2.score(X, y)
assert score == score2, (
"Failed to generate same score after pickling with {0}".format(name)
)
for attribute in fitted_attribute:
assert_array_equal(
getattr(est2.tree_, attribute),
fitted_attribute[attribute],
err_msg=(
f"Failed to generate same attribute {attribute} after pickling with"
f" {name}"
),
)
@pytest.mark.parametrize(
"Tree, criterion",
[
*product(REG_TREES.values(), REG_CRITERIONS),
*product(CLF_TREES.values(), CLF_CRITERIONS),
],
)
def test_multioutput(Tree, criterion):
# Check estimators on multi-output problems.
X = [
[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2],
]
y = np.array(
[
[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3],
]
)
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tree/tests/__init__.py | sklearn/tree/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/tree/tests/test_fenwick.py | sklearn/tree/tests/test_fenwick.py | import numpy as np
from sklearn.tree._utils import PytestWeightedFenwickTree
def test_cython_weighted_fenwick_tree(global_random_seed):
"""
Test Cython's weighted Fenwick tree implementation
"""
rng = np.random.default_rng(global_random_seed)
n = 100
indices = rng.permutation(n)
y = rng.normal(size=n)
w = rng.integers(0, 4, size=n)
y_included_so_far = np.zeros_like(y)
w_included_so_far = np.zeros_like(w)
tree = PytestWeightedFenwickTree(n)
tree.py_reset(n)
for i in range(n):
idx = indices[i]
tree.py_add(idx, y[idx], w[idx])
y_included_so_far[idx] = y[idx]
w_included_so_far[idx] = w[idx]
target = rng.uniform(0, w_included_so_far.sum())
t_idx_low, t_idx, cw, cwy = tree.py_search(target)
# check the aggregates are consistent with the returned idx
assert np.isclose(cw, np.sum(w_included_so_far[:t_idx]))
assert np.isclose(
cwy, np.sum(w_included_so_far[:t_idx] * y_included_so_far[:t_idx])
)
# check if the cumulative weight is less than or equal to the target
# depending on t_idx_low and t_idx
if t_idx_low == t_idx:
assert cw < target
else:
assert cw == target
# check that if we add the next non-null weight, we are above the target:
next_weights = w_included_so_far[t_idx:][w_included_so_far[t_idx:] > 0]
if next_weights.size > 0:
assert cw + next_weights[0] > target
# and not below the target for `t_idx_low`:
next_weights = w_included_so_far[t_idx_low:][w_included_so_far[t_idx_low:] > 0]
if next_weights.size > 0:
assert cw + next_weights[0] >= target
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/frozen/_frozen.py | sklearn/frozen/_frozen.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from copy import deepcopy
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
from sklearn.utils import get_tags
from sklearn.utils.metaestimators import available_if
from sklearn.utils.validation import check_is_fitted
def _estimator_has(attr):
"""Check that final_estimator has `attr`.
Used together with `available_if`.
"""
def check(self):
# raise original `AttributeError` if `attr` does not exist
getattr(self.estimator, attr)
return True
return check
class FrozenEstimator(BaseEstimator):
"""Estimator that wraps a fitted estimator to prevent re-fitting.
This meta-estimator takes an estimator and freezes it, in the sense that calling
`fit` on it has no effect. `fit_predict` and `fit_transform` are also disabled.
All other methods are delegated to the original estimator and original estimator's
attributes are accessible as well.
This is particularly useful when you have a fitted or a pre-trained model as a
transformer in a pipeline, and you'd like `pipeline.fit` to have no effect on this
step.
Parameters
----------
estimator : estimator
The estimator which is to be kept frozen.
See Also
--------
None: No similar entry in the scikit-learn documentation.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.frozen import FrozenEstimator
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = make_classification(random_state=0)
>>> clf = LogisticRegression(random_state=0).fit(X, y)
>>> frozen_clf = FrozenEstimator(clf)
>>> frozen_clf.fit(X, y) # No-op
FrozenEstimator(estimator=LogisticRegression(random_state=0))
>>> frozen_clf.predict(X) # Predictions from `clf.predict`
array(...)
"""
def __init__(self, estimator):
self.estimator = estimator
@available_if(_estimator_has("__getitem__"))
def __getitem__(self, *args, **kwargs):
"""__getitem__ is defined in :class:`~sklearn.pipeline.Pipeline` and \
:class:`~sklearn.compose.ColumnTransformer`.
"""
return self.estimator.__getitem__(*args, **kwargs)
def __getattr__(self, name):
# `estimator`'s attributes are now accessible except `fit_predict` and
# `fit_transform`
if name in ["fit_predict", "fit_transform"]:
raise AttributeError(f"{name} is not available for frozen estimators.")
return getattr(self.estimator, name)
def __sklearn_clone__(self):
return self
def __sklearn_is_fitted__(self):
try:
check_is_fitted(self.estimator)
return True
except NotFittedError:
return False
def fit(self, X, y, *args, **kwargs):
"""No-op.
As a frozen estimator, calling `fit` has no effect.
Parameters
----------
X : object
Ignored.
y : object
Ignored.
*args : tuple
Additional positional arguments. Ignored, but present for API compatibility
with `self.estimator`.
**kwargs : dict
Additional keyword arguments. Ignored, but present for API compatibility
with `self.estimator`.
Returns
-------
self : object
Returns the instance itself.
"""
check_is_fitted(self.estimator)
return self
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
The only valid key here is `estimator`. You cannot set the parameters of the
inner estimator.
Parameters
----------
**kwargs : dict
Estimator parameters.
Returns
-------
self : FrozenEstimator
This estimator.
"""
estimator = kwargs.pop("estimator", None)
if estimator is not None:
self.estimator = estimator
if kwargs:
raise ValueError(
"You cannot set parameters of the inner estimator in a frozen "
"estimator since calling `fit` has no effect. You can use "
"`frozenestimator.estimator.set_params` to set parameters of the inner "
"estimator."
)
def get_params(self, deep=True):
"""Get parameters for this estimator.
Returns a `{"estimator": estimator}` dict. The parameters of the inner
estimator are not included.
Parameters
----------
deep : bool, default=True
Ignored.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
return {"estimator": self.estimator}
def __sklearn_tags__(self):
tags = deepcopy(get_tags(self.estimator))
tags._skip_test = True
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/frozen/__init__.py | sklearn/frozen/__init__.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.frozen._frozen import FrozenEstimator
__all__ = ["FrozenEstimator"]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/frozen/tests/__init__.py | sklearn/frozen/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/frozen/tests/test_frozen.py | sklearn/frozen/tests/test_frozen.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import re
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from sklearn import config_context
from sklearn.base import (
BaseEstimator,
clone,
is_classifier,
is_clusterer,
is_outlier_detector,
is_regressor,
)
from sklearn.cluster import KMeans
from sklearn.compose import make_column_transformer
from sklearn.datasets import make_classification, make_regression
from sklearn.exceptions import NotFittedError, UnsetMetadataPassedError
from sklearn.frozen import FrozenEstimator
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.neighbors import LocalOutlierFactor
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler, StandardScaler
from sklearn.utils._testing import set_random_state
from sklearn.utils.validation import check_is_fitted
@pytest.fixture
def regression_dataset():
return make_regression()
@pytest.fixture
def classification_dataset():
return make_classification()
@pytest.mark.parametrize(
"estimator, dataset",
[
(LinearRegression(), "regression_dataset"),
(LogisticRegression(), "classification_dataset"),
(make_pipeline(StandardScaler(), LinearRegression()), "regression_dataset"),
(
make_pipeline(StandardScaler(), LogisticRegression()),
"classification_dataset",
),
(StandardScaler(), "regression_dataset"),
(KMeans(), "regression_dataset"),
(LocalOutlierFactor(), "regression_dataset"),
(
make_column_transformer(
(StandardScaler(), [0]),
(RobustScaler(), [1]),
),
"regression_dataset",
),
],
)
@pytest.mark.parametrize(
"method",
["predict", "predict_proba", "predict_log_proba", "decision_function", "transform"],
)
def test_frozen_methods(estimator, dataset, request, method):
"""Test that frozen.fit doesn't do anything, and that all other methods are
exposed by the frozen estimator and return the same values as the estimator.
"""
estimator = clone(estimator)
X, y = request.getfixturevalue(dataset)
set_random_state(estimator)
estimator.fit(X, y)
frozen = FrozenEstimator(estimator)
# this should be no-op
frozen.fit([[1]], [1])
if hasattr(estimator, method):
assert_array_equal(getattr(estimator, method)(X), getattr(frozen, method)(X))
assert is_classifier(estimator) == is_classifier(frozen)
assert is_regressor(estimator) == is_regressor(frozen)
assert is_clusterer(estimator) == is_clusterer(frozen)
assert is_outlier_detector(estimator) == is_outlier_detector(frozen)
@config_context(enable_metadata_routing=True)
def test_frozen_metadata_routing(regression_dataset):
"""Test that metadata routing works with frozen estimators."""
class ConsumesMetadata(BaseEstimator):
def __init__(self, on_fit=None, on_predict=None):
self.on_fit = on_fit
self.on_predict = on_predict
def fit(self, X, y, metadata=None):
if self.on_fit:
assert metadata is not None
self.fitted_ = True
return self
def predict(self, X, metadata=None):
if self.on_predict:
assert metadata is not None
return np.ones(len(X))
X, y = regression_dataset
pipeline = make_pipeline(
ConsumesMetadata(on_fit=True, on_predict=True)
.set_fit_request(metadata=True)
.set_predict_request(metadata=True)
)
pipeline.fit(X, y, metadata="test")
frozen = FrozenEstimator(pipeline)
pipeline.predict(X, metadata="test")
frozen.predict(X, metadata="test")
frozen["consumesmetadata"].set_predict_request(metadata=False)
with pytest.raises(
TypeError,
match=re.escape(
"Pipeline.predict got unexpected argument(s) {'metadata'}, which are not "
"routed to any object."
),
):
frozen.predict(X, metadata="test")
frozen["consumesmetadata"].set_predict_request(metadata=None)
with pytest.raises(UnsetMetadataPassedError):
frozen.predict(X, metadata="test")
def test_composite_fit(classification_dataset):
"""Test that calling fit_transform and fit_predict doesn't call fit."""
class Estimator(BaseEstimator):
def fit(self, X, y):
try:
self._fit_counter += 1
except AttributeError:
self._fit_counter = 1
return self
def fit_transform(self, X, y=None):
# only here to test that it doesn't get called
... # pragma: no cover
def fit_predict(self, X, y=None):
# only here to test that it doesn't get called
... # pragma: no cover
X, y = classification_dataset
est = Estimator().fit(X, y)
frozen = FrozenEstimator(est)
with pytest.raises(AttributeError):
frozen.fit_predict(X, y)
with pytest.raises(AttributeError):
frozen.fit_transform(X, y)
assert frozen._fit_counter == 1
def test_clone_frozen(regression_dataset):
"""Test that cloning a frozen estimator keeps the frozen state."""
X, y = regression_dataset
estimator = LinearRegression().fit(X, y)
frozen = FrozenEstimator(estimator)
cloned = clone(frozen)
assert cloned.estimator is estimator
def test_check_is_fitted(regression_dataset):
"""Test that check_is_fitted works on frozen estimators."""
X, y = regression_dataset
estimator = LinearRegression()
frozen = FrozenEstimator(estimator)
with pytest.raises(NotFittedError):
check_is_fitted(frozen)
estimator = LinearRegression().fit(X, y)
frozen = FrozenEstimator(estimator)
check_is_fitted(frozen)
def test_frozen_tags():
"""Test that frozen estimators have the same tags as the original estimator
except for the skip_test tag."""
class Estimator(BaseEstimator):
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.categorical = True
return tags
estimator = Estimator()
frozen = FrozenEstimator(estimator)
frozen_tags = frozen.__sklearn_tags__()
estimator_tags = estimator.__sklearn_tags__()
assert frozen_tags._skip_test is True
assert estimator_tags._skip_test is False
assert estimator_tags.input_tags.categorical is True
assert frozen_tags.input_tags.categorical is True
def test_frozen_params():
"""Test that FrozenEstimator only exposes the estimator parameter."""
est = LogisticRegression()
frozen = FrozenEstimator(est)
with pytest.raises(ValueError, match="You cannot set parameters of the inner"):
frozen.set_params(estimator__C=1)
assert frozen.get_params() == {"estimator": est}
other_est = LocalOutlierFactor()
frozen.set_params(estimator=other_est)
assert frozen.get_params() == {"estimator": other_est}
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.