repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_theil_sen.py | sklearn/linear_model/tests/test_theil_sen.py | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import os
import re
import sys
from contextlib import contextmanager
import numpy as np
import pytest
from numpy.testing import (
assert_array_almost_equal,
assert_array_equal,
assert_array_less,
)
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model._theil_sen import (
_breakdown_point,
_modified_weiszfeld_step,
_spatial_median,
)
from sklearn.utils._testing import assert_almost_equal
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
with open(os.devnull, "w") as devnull:
sys.stdout = devnull
sys.stderr = devnull
yield
devnull.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.0
if intercept:
c = 2.0
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5.0, 10.0])
c = 1.0
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5.0, 10.0, 42.0, 7.0])
c = 1.0
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1.0, 2.0, 3.0]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.0
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.0
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1.0, 2.0, 3.0]).reshape(1, 3)
y = X[0]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 1.0]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1.0, 2.0, 3.0]).reshape(3, 1)
true_median = 2.0
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0.0, 0.0, 1.0, 1.0, 0.0, 1.0]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.0e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
warning_message = "Maximum number of iterations 30 reached in spatial median."
with pytest.warns(ConvergenceWarning, match=warning_message):
_spatial_median(X, max_iter=30, tol=0.0)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert np.abs(lstq.coef_ - w) > 0.9
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert np.abs(lstq.coef_ - w - c) > 0.5
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False, random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.0)
# non-regression test for #18104
theil_sen.score(X, y)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert norm(lstq.coef_ - w) > 1.0
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3, random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert np.abs(bp - 1 + 1 / (np.sqrt(2))) < 1.0e-6
@pytest.mark.parametrize(
"param, ExceptionCls, match",
[
(
{"n_subsamples": 1},
ValueError,
re.escape("Invalid parameter since n_features+1 > n_subsamples (2 > 1)"),
),
(
{"n_subsamples": 101},
ValueError,
re.escape("Invalid parameter since n_subsamples > n_samples (101 > 50)"),
),
],
)
def test_checksubparams_invalid_input(param, ExceptionCls, match):
X, y, w, c = gen_toy_problem_1d()
theil_sen = TheilSenRegressor(**param, random_state=0)
with pytest.raises(ExceptionCls, match=match):
theil_sen.fit(X, y)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
theil_sen = TheilSenRegressor(n_subsamples=9, random_state=0)
with pytest.raises(ValueError):
theil_sen.fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250, random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0], random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
@pytest.mark.thread_unsafe # manually captured stdout
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True, max_subpopulation=10, random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert norm(lstq.coef_ - w) > 1.0
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=2, random_state=0, max_subpopulation=2e3).fit(
X, y
)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False, random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/__init__.py | sklearn/linear_model/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_coordinate_descent.py | sklearn/linear_model/tests/test_coordinate_descent.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from copy import deepcopy
import joblib
import numpy as np
import pytest
from scipy import interpolate, sparse
from sklearn.base import clone, config_context
from sklearn.datasets import load_diabetes, make_regression
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import (
ElasticNet,
ElasticNetCV,
Lasso,
LassoCV,
LassoLarsCV,
LinearRegression,
MultiTaskElasticNet,
MultiTaskElasticNetCV,
MultiTaskLasso,
MultiTaskLassoCV,
Ridge,
enet_path,
lars_path,
lasso_path,
)
from sklearn.linear_model import _cd_fast as cd_fast # type: ignore[attr-defined]
from sklearn.linear_model._coordinate_descent import _set_order
from sklearn.model_selection import (
BaseCrossValidator,
GridSearchCV,
LeaveOneGroupOut,
)
from sklearn.model_selection._split import GroupsConsumerMixin
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_array
from sklearn.utils._testing import (
TempMemmap,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
assert_array_less,
ignore_warnings,
)
from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS
@pytest.mark.parametrize("order", ["C", "F"])
@pytest.mark.parametrize("input_order", ["C", "F"])
def test_set_order_dense(order, input_order):
"""Check that _set_order returns arrays with promised order."""
X = np.array([[0], [0], [0]], order=input_order)
y = np.array([0, 0, 0], order=input_order)
X2, y2 = _set_order(X, y, order=order)
if order == "C":
assert X2.flags["C_CONTIGUOUS"]
assert y2.flags["C_CONTIGUOUS"]
elif order == "F":
assert X2.flags["F_CONTIGUOUS"]
assert y2.flags["F_CONTIGUOUS"]
if order == input_order:
assert X is X2
assert y is y2
@pytest.mark.parametrize("order", ["C", "F"])
@pytest.mark.parametrize("input_order", ["C", "F"])
@pytest.mark.parametrize("coo_container", COO_CONTAINERS)
def test_set_order_sparse(order, input_order, coo_container):
"""Check that _set_order returns sparse matrices in promised format."""
X = coo_container(np.array([[0], [0], [0]]))
y = coo_container(np.array([0, 0, 0]))
sparse_format = "csc" if input_order == "F" else "csr"
X = X.asformat(sparse_format)
y = X.asformat(sparse_format)
X2, y2 = _set_order(X, y, order=order)
format = "csc" if order == "F" else "csr"
assert sparse.issparse(X2) and X2.format == format
assert sparse.issparse(y2) and y2.format == format
def test_cython_solver_equivalence():
"""Test that all 3 Cython solvers for 1-d targets give same results."""
X, y = make_regression()
X_mean = X.mean(axis=0)
X_centered = np.asfortranarray(X - X_mean)
y -= y.mean()
alpha_max = np.linalg.norm(X.T @ y, ord=np.inf)
alpha = alpha_max / 10
params = {
"beta": 0,
"max_iter": 100,
"tol": 1e-10,
"rng": np.random.RandomState(0), # not used, but needed as argument
"random": False,
"positive": False,
}
def zc():
"""Create a new zero coefficient array (zc)."""
return np.zeros(X.shape[1])
# For alpha_max, coefficients must all be zero.
coef_1 = zc()
for do_screening in [True, False]:
cd_fast.enet_coordinate_descent(
w=coef_1,
alpha=alpha_max,
X=X_centered,
y=y,
**params,
do_screening=do_screening,
)
assert_allclose(coef_1, 0)
# Without gap safe screening rules
coef_1 = zc()
cd_fast.enet_coordinate_descent(
w=coef_1, alpha=alpha, X=X_centered, y=y, **params, do_screening=False
)
# At least 2 coefficients are non-zero
assert 2 <= np.sum(np.abs(coef_1) > 1e-8) < X.shape[1]
# With gap safe screening rules
coef_2 = zc()
cd_fast.enet_coordinate_descent(
w=coef_2, alpha=alpha, X=X_centered, y=y, **params, do_screening=True
)
assert_allclose(coef_2, coef_1)
# Sparse
Xs = sparse.csc_matrix(X)
for do_screening in [True, False]:
coef_3 = zc()
cd_fast.sparse_enet_coordinate_descent(
w=coef_3,
alpha=alpha,
X_data=Xs.data,
X_indices=Xs.indices,
X_indptr=Xs.indptr,
y=y,
sample_weight=None,
X_mean=X_mean,
**params,
do_screening=do_screening,
)
assert_allclose(coef_3, coef_1)
# Gram
for do_screening in [True, False]:
coef_4 = zc()
cd_fast.enet_coordinate_descent_gram(
w=coef_4,
alpha=alpha,
Q=X_centered.T @ X_centered,
q=X_centered.T @ y,
y=y,
**params,
do_screening=do_screening,
)
assert_allclose(coef_4, coef_1)
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
@pytest.mark.filterwarnings("ignore::RuntimeWarning") # overflow and similar
def test_enet_nonfinite_params():
# Check ElasticNet throws ValueError when dealing with non-finite parameter
# values
rng = np.random.RandomState(0)
n_samples = 10
fmax = np.finfo(np.float64).max
X = fmax * rng.uniform(size=(n_samples, 2))
y = rng.randint(0, 2, size=n_samples)
clf = ElasticNet(alpha=0.1)
msg = "Coordinate descent iterations resulted in non-finite parameter values"
with pytest.raises(ValueError, match=msg):
clf.fit(X, y)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.0])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.0], [0.0], [1.0]])
Y = [-1, 0, 1] # just a straight line
T = [[2.0], [3.0], [4.0]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100, precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_dual_gap():
"""
Check that Lasso.dual_gap_ matches its objective formulation, with the
datafit normalized by n_samples
"""
X, y, _, _ = build_dataset(n_samples=10, n_features=30)
n_samples = len(y)
alpha = 0.01 * np.max(np.abs(X.T @ y)) / n_samples
clf = Lasso(alpha=alpha, fit_intercept=False).fit(X, y)
w = clf.coef_
R = y - X @ w
primal = 0.5 * np.mean(R**2) + clf.alpha * np.sum(np.abs(w))
# dual pt: R / n_samples, dual constraint: norm(X.T @ theta, inf) <= alpha
R /= np.max(np.abs(X.T @ R) / (n_samples * alpha))
dual = 0.5 * (np.mean(y**2) - np.mean((y - R) ** 2))
assert_allclose(clf.dual_gap_, primal - dual)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10, n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(alphas=10, eps=1e-3, max_iter=max_iter, cv=3).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(alphas=10, eps=1e-3, max_iter=max_iter, precompute=True, cv=3)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(max_iter=30, cv=3).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert (
np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)
)
<= 1
)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.mse_path_.T)
assert_allclose(mse_lars(clf.alphas_[5]).mean(), clf.mse_path_[5].mean(), rtol=1e-2)
# test set
assert clf.score(X_test, y_test) > 0.99
def test_lasso_cv_with_some_model_selection():
from sklearn import datasets
from sklearn.model_selection import ShuffleSplit
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
pipe = make_pipeline(StandardScaler(), LassoCV(cv=ShuffleSplit(random_state=0)))
pipe.fit(X, y)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(alphas=3, eps=1e-1, max_iter=max_iter, cv=2, n_jobs=1)
clf_unconstrained.fit(X, y)
assert min(clf_unconstrained.coef_) < 0
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(
alphas=3, eps=1e-1, max_iter=max_iter, positive=True, cv=2, n_jobs=1
)
clf_constrained.fit(X, y)
assert min(clf_constrained.coef_) >= 0
@pytest.mark.parametrize(
"alphas, err_type, err_msg",
[
((1, -1, -100), ValueError, r"alphas\[1\] == -1, must be >= 0.0."),
(
(-0.1, -1.0, -10.0),
ValueError,
r"alphas\[0\] == -0.1, must be >= 0.0.",
),
(
(1, 1.0, "1"),
TypeError,
r"alphas\[2\] must be an instance of float, not str",
),
],
)
def test_lassocv_alphas_validation(alphas, err_type, err_msg):
"""Check the `alphas` validation in LassoCV."""
n_samples, n_features = 5, 5
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
y = rng.randint(0, 2, n_samples)
lassocv = LassoCV(alphas=alphas)
with pytest.raises(err_type, match=err_msg):
lassocv.fit(X, y)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5.0, 1.0, 0.5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method="lasso")
coef_path_cont_lars = interpolate.interp1d(
alphas_lars[::-1], coef_path_lars[:, ::-1]
)
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas)
coef_path_cont_lasso = interpolate.interp1d(
alphas_lasso2[::-1], coef_path_lasso2[:, ::-1]
)
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas), decimal=1
)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(
n_samples=200, n_features=100, n_informative_features=100
)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(
alphas=[0.01, 0.05, 0.1], eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter
)
clf.fit(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert clf.l1_ratio_ == min(clf.l1_ratio)
clf = ElasticNetCV(
alphas=[0.01, 0.05, 0.1],
eps=2e-3,
l1_ratio=[0.5, 0.7],
cv=3,
max_iter=max_iter,
precompute=True,
)
clf.fit(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have selected an elastic-net
# that is closer to ridge than to lasso
assert clf.l1_ratio_ == min(clf.l1_ratio)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert clf.score(X_test, y_test) > 0.99
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(
alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7], cv=3, max_iter=max_iter
)
clf.fit(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert clf.score(X_test, y_test) > 0.99
assert clf.coef_.shape == (3, 10)
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
@pytest.mark.filterwarnings("ignore:.*with no regularization.*:UserWarning")
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
warning_message = (
"With alpha=0, this algorithm does not "
"converge well. You are advised to use the "
"LinearRegression estimator"
)
with pytest.warns(UserWarning, match=warning_message):
clf.fit(X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, positive=True)
lasso.fit(X, y)
assert min(lasso.coef_) >= 0
lasso = Lasso(alpha=0.1, precompute=True, positive=True)
lasso.fit(X, y)
assert min(lasso.coef_) >= 0
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, positive=True)
enet.fit(X, y)
assert min(enet.coef_) >= 0
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(
alphas=3, eps=1e-1, max_iter=max_iter, cv=2, n_jobs=1
)
enetcv_unconstrained.fit(X, y)
assert min(enetcv_unconstrained.coef_) < 0
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(
alphas=3, eps=1e-1, max_iter=max_iter, cv=2, positive=True, n_jobs=1
)
enetcv_constrained.fit(X, y)
assert min(enetcv_constrained.coef_) >= 0
def test_uniform_targets():
enet = ElasticNetCV(alphas=3)
m_enet = MultiTaskElasticNetCV(alphas=3)
lasso = LassoCV(alphas=3)
m_lasso = MultiTaskLassoCV(alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution] * 3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution] * 3)
@pytest.mark.filterwarnings("error::sklearn.exceptions.ConvergenceWarning")
def test_multi_task_lasso_vs_skglm():
"""Test that MultiTaskLasso gives same results as the one from skglm.
To reproduce numbers, just use
from skglm import MultiTaskLasso
"""
# Numbers are with skglm version 0.5.
n_samples, n_features, n_tasks = 5, 4, 3
X = np.vander(np.arange(n_samples), n_features)
Y = np.arange(n_samples * n_tasks).reshape(n_samples, n_tasks)
def obj(W, X, y, alpha):
intercept = W[:, -1]
W = W[:, :-1]
l21_norm = np.sqrt(np.sum(W**2, axis=0)).sum()
return (
np.linalg.norm(Y - X @ W.T - intercept, ord="fro") ** 2 / (2 * n_samples)
+ alpha * l21_norm
)
alpha = 0.1
# TODO: The high number of iterations are required for convergence and show room
# for improvement of the CD algorithm.
m = MultiTaskLasso(alpha=alpha, tol=1e-10, max_iter=5000).fit(X, Y)
assert_allclose(
obj(np.c_[m.coef_, m.intercept_], X, Y, alpha=alpha),
0.4965993692547902,
rtol=1e-10,
)
assert_allclose(
m.intercept_, [0.219942959407, 1.219942959407, 2.219942959407], rtol=1e-7
)
assert_allclose(
m.coef_,
np.tile([-0.032075014794, 0.25430904614, 2.44785152982, 0], (n_tasks, 1)),
rtol=1e-6,
)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert 0 < clf.dual_gap_ < 1e-5
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert 0 < clf.dual_gap_ < 1e-5
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1.0, tol=1e-8, max_iter=1)
warning_message = (
"Objective did not converge. You might want to "
"increase the number of iterations."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
clf.fit(X, Y)
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.0])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert 0 < clf.dual_gap_ < 1e-5
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(
n_samples=10, n_features=8, n_informative_features=10, n_targets=n_targets
)
estimator = ElasticNet(alpha=0.01)
estimator.fit(X, y)
coef, intercept, dual_gap = (
estimator.coef_,
estimator.intercept_,
estimator.dual_gap_,
)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
y = rng.randn(10, 2)
clf = ElasticNetCV()
with pytest.raises(ValueError):
clf.fit(X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=50, n_targets=3)
clf = MultiTaskElasticNetCV(cv=3).fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV(cv=3, tol=1e-6).fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(
alphas=10, eps=1e-3, max_iter=200, l1_ratio=[0.3, 0.5], tol=1e-3, cv=3
)
clf.fit(X, y)
assert 0.5 == clf.l1_ratio_
assert (3, X.shape[1]) == clf.coef_.shape
assert (3,) == clf.intercept_.shape
assert (2, 10, 3) == clf.mse_path_.shape
assert (2, 10) == clf.alphas_.shape
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(alphas=10, eps=1e-3, max_iter=500, tol=1e-3, cv=3)
clf.fit(X, y)
assert (3, X.shape[1]) == clf.coef_.shape
assert (3,) == clf.intercept_.shape
assert (10, 3) == clf.mse_path_.shape
assert 10 == len(clf.alphas_)
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse_input_dtype_enet_and_lassocv(csr_container):
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(alphas=5)
clf.fit(csr_container(X), y)
clf1 = ElasticNetCV(alphas=5)
clf1.fit(csr_container(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(alphas=5)
clf.fit(csr_container(X), y)
clf1 = LassoCV(alphas=5)
clf1.fit(csr_container(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_elasticnet_precompute_incorrect_gram():
# check that passing an invalid precomputed Gram matrix will raise an
# error.
X, y, _, _ = build_dataset()
rng = np.random.RandomState(0)
X_centered = X - np.average(X, axis=0)
garbage = rng.standard_normal(X.shape)
precompute = np.dot(garbage.T, garbage)
clf = ElasticNet(alpha=0.01, precompute=precompute)
msg = "Gram matrix.*did not pass validation.*"
with pytest.raises(ValueError, match=msg):
clf.fit(X_centered, y)
def test_elasticnet_precompute_gram_weighted_samples():
# check the equivalence between passing a precomputed Gram matrix and
# internal computation using sample weights.
X, y, _, _ = build_dataset()
rng = np.random.RandomState(0)
sample_weight = rng.lognormal(size=y.shape)
w_norm = sample_weight * (y.shape / np.sum(sample_weight))
X_c = X - np.average(X, axis=0, weights=w_norm)
X_r = X_c * np.sqrt(w_norm)[:, np.newaxis]
gram = np.dot(X_r.T, X_r)
clf1 = ElasticNet(alpha=0.01, precompute=gram)
clf1.fit(X_c, y, sample_weight=sample_weight)
clf2 = ElasticNet(alpha=0.01, precompute=False)
clf2.fit(X, y, sample_weight=sample_weight)
assert_allclose(clf1.coef_, clf2.coef_)
def test_elasticnet_precompute_gram():
# Check the dtype-aware check for a precomputed Gram matrix
# (see https://github.com/scikit-learn/scikit-learn/pull/22059
# and https://github.com/scikit-learn/scikit-learn/issues/21997).
# Here: (X_c.T, X_c)[2, 3] is not equal to np.dot(X_c[:, 2], X_c[:, 3])
# but within tolerance for np.float32
rng = np.random.RandomState(58)
X = rng.binomial(1, 0.25, (1000, 4)).astype(np.float32)
y = rng.rand(1000).astype(np.float32)
X_c = X - np.average(X, axis=0)
gram = np.dot(X_c.T, X_c)
clf1 = ElasticNet(alpha=0.01, precompute=gram)
clf1.fit(X_c, y)
clf2 = ElasticNet(alpha=0.01, precompute=False)
clf2.fit(X, y)
assert_allclose(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize("sparse_X", [True, False])
def test_warm_start_convergence(sparse_X):
X, y, _, _ = build_dataset()
if sparse_X:
X = sparse.csr_matrix(X)
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert n_iter_reference > 2
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert n_iter_cold_start == n_iter_reference
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
# coordinate descent checks dual gap before entering the main loop
assert n_iter_warm_start == 0
def test_warm_start_convergence_with_regularizer_decrement():
X, y = load_diabetes(return_X_y=True)
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert low_reg_model.n_iter_ > high_reg_model.n_iter_
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert low_reg_model.n_iter_ > warm_low_reg_model.n_iter_
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_random_descent(csr_container):
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection="cyclic", tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection="random", tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection="cyclic", tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection="random", tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection="cyclic", tol=1e-8)
clf_cyclic.fit(csr_container(X), y)
clf_random = ElasticNet(selection="random", tol=1e-8, random_state=42)
clf_random.fit(csr_container(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection="cyclic", tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection="random", tol=1e-8, random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
def test_enet_path_positive():
# Test positive parameter
X, Y, _, _ = build_dataset(n_samples=50, n_features=50, n_targets=2)
# For mono output
# Test that the coefs returned by positive=True in enet_path are positive
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, Y[:, 0], positive=True)[1]
assert np.all(pos_path_coef >= 0)
# For multi output, positive parameter is not allowed
# Test that an error is raised
for path in [enet_path, lasso_path]:
with pytest.raises(ValueError):
path(X, Y, positive=True)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse_dense_descent_paths(csr_container):
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = csr_container(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, tol=1e-10)
_, sparse_coefs, _ = path(csr, y, tol=1e-10)
assert_allclose(coefs, sparse_coefs)
@pytest.mark.parametrize("path_func", [enet_path, lasso_path])
def test_path_unknown_parameter(path_func):
"""Check that passing parameter not used by the coordinate descent solver
will raise an error."""
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
err_msg = "Unexpected parameters in params"
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_perceptron.py | sklearn/linear_model/tests/test_perceptron.py | import numpy as np
import pytest
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
from sklearn.utils import check_random_state
from sklearn.utils._testing import assert_allclose, assert_array_almost_equal
from sklearn.utils.fixes import CSR_CONTAINERS
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
class MyPerceptron:
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
@pytest.mark.parametrize("container", CSR_CONTAINERS + [np.array])
def test_perceptron_accuracy(container):
data = container(X)
clf = Perceptron(max_iter=100, tol=None, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert score > 0.7
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(max_iter=2, shuffle=False, tol=None)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron(max_iter=100)
for meth in ("predict_proba", "predict_log_proba"):
with pytest.raises(AttributeError):
getattr(clf, meth)
def test_perceptron_l1_ratio():
"""Check that `l1_ratio` has an impact when `penalty='elasticnet'`"""
clf1 = Perceptron(l1_ratio=0, penalty="elasticnet")
clf1.fit(X, y)
clf2 = Perceptron(l1_ratio=0.15, penalty="elasticnet")
clf2.fit(X, y)
assert clf1.score(X, y) != clf2.score(X, y)
# check that the bounds of elastic net which should correspond to an l1 or
# l2 penalty depending of `l1_ratio` value.
clf_l1 = Perceptron(penalty="l1").fit(X, y)
clf_elasticnet = Perceptron(l1_ratio=1, penalty="elasticnet").fit(X, y)
assert_allclose(clf_l1.coef_, clf_elasticnet.coef_)
clf_l2 = Perceptron(penalty="l2").fit(X, y)
clf_elasticnet = Perceptron(l1_ratio=0, penalty="elasticnet").fit(X, y)
assert_allclose(clf_l2.coef_, clf_elasticnet.coef_)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_ransac.py | sklearn/linear_model/tests/test_ransac.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_regression
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import (
LinearRegression,
OrthogonalMatchingPursuit,
RANSACRegressor,
Ridge,
)
from sklearn.linear_model._ransac import _dynamic_max_trials
from sklearn.utils import check_random_state
from sklearn.utils._testing import assert_allclose
from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
rng = np.random.RandomState(1000)
outliers = np.unique(rng.randint(len(X), size=200))
data[outliers, :] += 50 + rng.rand(len(outliers), 2) * 10
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
estimator, min_samples=2, residual_threshold=5, random_state=0
)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert X.shape[0] == 2
assert y.shape[0] == 2
return False
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
y = rng.rand(10, 1)
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
estimator,
min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0,
)
with pytest.raises(ValueError):
ransac_estimator.fit(X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert X.shape[0] == 2
assert y.shape[0] == 2
return False
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
estimator,
min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0,
)
with pytest.raises(ValueError):
ransac_estimator.fit(X, y)
def test_ransac_max_trials():
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
estimator,
min_samples=2,
residual_threshold=5,
max_trials=0,
random_state=0,
)
with pytest.raises(ValueError):
ransac_estimator.fit(X, y)
# there is a 1e-9 chance it will take these many trials. No good reason
# 1e-2 isn't enough, can still happen
# 2 is the what ransac defines as min_samples = X.shape[1] + 1
max_trials = _dynamic_max_trials(len(X) - len(outliers), X.shape[0], 2, 1 - 1e-9)
ransac_estimator = RANSACRegressor(estimator, min_samples=2)
for i in range(50):
ransac_estimator.set_params(min_samples=2, random_state=i)
ransac_estimator.fit(X, y)
assert ransac_estimator.n_trials_ < max_trials + 1
def test_ransac_stop_n_inliers():
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
estimator,
min_samples=2,
residual_threshold=5,
stop_n_inliers=2,
random_state=0,
)
ransac_estimator.fit(X, y)
assert ransac_estimator.n_trials_ == 1
def test_ransac_stop_score():
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
estimator,
min_samples=2,
residual_threshold=5,
stop_score=0,
random_state=0,
)
ransac_estimator.fit(X, y)
assert ransac_estimator.n_trials_ == 1
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100,))
y[0] = 1
y[1] = 100
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
estimator, min_samples=2, residual_threshold=0.5, random_state=0
)
ransac_estimator.fit(X, y)
assert ransac_estimator.score(X[2:], y[2:]) == 1
assert ransac_estimator.score(X[:2], y[:2]) < 1
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100,))
y[0] = 1
y[1] = 100
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
estimator, min_samples=2, residual_threshold=0.5, random_state=0
)
ransac_estimator.fit(X, y)
assert_array_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_no_valid_data():
def is_data_valid(X, y):
return False
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
estimator, is_data_valid=is_data_valid, max_trials=5
)
msg = "RANSAC could not find a valid consensus set"
with pytest.raises(ValueError, match=msg):
ransac_estimator.fit(X, y)
assert ransac_estimator.n_skips_no_inliers_ == 0
assert ransac_estimator.n_skips_invalid_data_ == 5
assert ransac_estimator.n_skips_invalid_model_ == 0
def test_ransac_no_valid_model():
def is_model_valid(estimator, X, y):
return False
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
estimator, is_model_valid=is_model_valid, max_trials=5
)
msg = "RANSAC could not find a valid consensus set"
with pytest.raises(ValueError, match=msg):
ransac_estimator.fit(X, y)
assert ransac_estimator.n_skips_no_inliers_ == 0
assert ransac_estimator.n_skips_invalid_data_ == 0
assert ransac_estimator.n_skips_invalid_model_ == 5
def test_ransac_exceed_max_skips():
def is_data_valid(X, y):
return False
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
estimator, is_data_valid=is_data_valid, max_trials=5, max_skips=3
)
msg = "RANSAC skipped more iterations than `max_skips`"
with pytest.raises(ValueError, match=msg):
ransac_estimator.fit(X, y)
assert ransac_estimator.n_skips_no_inliers_ == 0
assert ransac_estimator.n_skips_invalid_data_ == 4
assert ransac_estimator.n_skips_invalid_model_ == 0
def test_ransac_warn_exceed_max_skips():
class IsDataValid:
def __init__(self):
self.call_counter = 0
def __call__(self, X, y):
result = self.call_counter == 0
self.call_counter += 1
return result
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
estimator, is_data_valid=IsDataValid(), max_skips=3, max_trials=5
)
warning_message = (
"RANSAC found a valid consensus set but exited "
"early due to skipping more iterations than "
"`max_skips`. See estimator attributes for "
"diagnostics."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
ransac_estimator.fit(X, y)
assert ransac_estimator.n_skips_no_inliers_ == 0
assert ransac_estimator.n_skips_invalid_data_ == 4
assert ransac_estimator.n_skips_invalid_model_ == 0
@pytest.mark.parametrize(
"sparse_container", COO_CONTAINERS + CSR_CONTAINERS + CSC_CONTAINERS
)
def test_ransac_sparse(sparse_container):
X_sparse = sparse_container(X)
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
estimator, min_samples=2, residual_threshold=5, random_state=0
)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
estimator, min_samples=2, residual_threshold=5, random_state=0
)
ransac_none_estimator = RANSACRegressor(
None, min_samples=2, residual_threshold=5, random_state=0
)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(
ransac_estimator.predict(X), ransac_none_estimator.predict(X)
)
def test_ransac_min_n_samples():
estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(
estimator, min_samples=2, residual_threshold=5, random_state=0
)
ransac_estimator2 = RANSACRegressor(
estimator,
min_samples=2.0 / X.shape[0],
residual_threshold=5,
random_state=0,
)
ransac_estimator5 = RANSACRegressor(
estimator, min_samples=2, residual_threshold=5, random_state=0
)
ransac_estimator6 = RANSACRegressor(estimator, residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(
estimator, min_samples=X.shape[0] + 1, residual_threshold=5, random_state=0
)
# GH #19390
ransac_estimator8 = RANSACRegressor(
Ridge(), min_samples=None, residual_threshold=5, random_state=0
)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(
ransac_estimator1.predict(X), ransac_estimator2.predict(X)
)
assert_array_almost_equal(
ransac_estimator1.predict(X), ransac_estimator5.predict(X)
)
assert_array_almost_equal(
ransac_estimator1.predict(X), ransac_estimator6.predict(X)
)
with pytest.raises(ValueError):
ransac_estimator7.fit(X, y)
err_msg = "`min_samples` needs to be explicitly set"
with pytest.raises(ValueError, match=err_msg):
ransac_estimator8.fit(X, y)
def test_ransac_multi_dimensional_targets():
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(
estimator, min_samples=2, residual_threshold=5, random_state=0
)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_loss():
def loss_multi1(y_true, y_pred):
return np.sum(np.abs(y_true - y_pred), axis=1)
def loss_multi2(y_true, y_pred):
return np.sum((y_true - y_pred) ** 2, axis=1)
def loss_mono(y_true, y_pred):
return np.abs(y_true - y_pred)
yyy = np.column_stack([y, y, y])
estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(
estimator, min_samples=2, residual_threshold=5, random_state=0
)
ransac_estimator1 = RANSACRegressor(
estimator,
min_samples=2,
residual_threshold=5,
random_state=0,
loss=loss_multi1,
)
ransac_estimator2 = RANSACRegressor(
estimator,
min_samples=2,
residual_threshold=5,
random_state=0,
loss=loss_multi2,
)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(
ransac_estimator0.predict(X), ransac_estimator1.predict(X)
)
assert_array_almost_equal(
ransac_estimator0.predict(X), ransac_estimator2.predict(X)
)
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.loss = loss_mono
ransac_estimator2.fit(X, y)
assert_array_almost_equal(
ransac_estimator0.predict(X), ransac_estimator2.predict(X)
)
ransac_estimator3 = RANSACRegressor(
estimator,
min_samples=2,
residual_threshold=5,
random_state=0,
loss="squared_error",
)
ransac_estimator3.fit(X, y)
assert_array_almost_equal(
ransac_estimator0.predict(X), ransac_estimator2.predict(X)
)
def test_ransac_default_residual_threshold():
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(estimator, min_samples=2, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert _dynamic_max_trials(100, 100, 2, 0.99) == 1
# e = 5%, min_samples = 2
assert _dynamic_max_trials(95, 100, 2, 0.99) == 2
# e = 10%, min_samples = 2
assert _dynamic_max_trials(90, 100, 2, 0.99) == 3
# e = 30%, min_samples = 2
assert _dynamic_max_trials(70, 100, 2, 0.99) == 7
# e = 50%, min_samples = 2
assert _dynamic_max_trials(50, 100, 2, 0.99) == 17
# e = 5%, min_samples = 8
assert _dynamic_max_trials(95, 100, 8, 0.99) == 5
# e = 10%, min_samples = 8
assert _dynamic_max_trials(90, 100, 8, 0.99) == 9
# e = 30%, min_samples = 8
assert _dynamic_max_trials(70, 100, 8, 0.99) == 78
# e = 50%, min_samples = 8
assert _dynamic_max_trials(50, 100, 8, 0.99) == 1177
# e = 0%, min_samples = 10
assert _dynamic_max_trials(1, 100, 10, 0) == 0
assert _dynamic_max_trials(1, 100, 10, 1) == float("inf")
def test_ransac_fit_sample_weight():
ransac_estimator = RANSACRegressor(random_state=0)
n_samples = y.shape[0]
weights = np.ones(n_samples)
ransac_estimator.fit(X, y, sample_weight=weights)
# sanity check
assert ransac_estimator.inlier_mask_.shape[0] == n_samples
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_).astype(np.bool_)
ref_inlier_mask[outliers] = False
# check that mask is correct
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
X_ = random_state.randint(0, 200, [10, 1])
y_ = np.ndarray.flatten(0.2 * X_ + 2)
sample_weight = random_state.randint(0, 10, 10)
outlier_X = random_state.randint(0, 1000, [1, 1])
outlier_weight = random_state.randint(0, 10, 1)
outlier_y = random_state.randint(-1000, 0, 1)
X_flat = np.append(
np.repeat(X_, sample_weight, axis=0),
np.repeat(outlier_X, outlier_weight, axis=0),
axis=0,
)
y_flat = np.ndarray.flatten(
np.append(
np.repeat(y_, sample_weight, axis=0),
np.repeat(outlier_y, outlier_weight, axis=0),
axis=0,
)
)
ransac_estimator.fit(X_flat, y_flat)
ref_coef_ = ransac_estimator.estimator_.coef_
sample_weight = np.append(sample_weight, outlier_weight)
X_ = np.append(X_, outlier_X, axis=0)
y_ = np.append(y_, outlier_y)
ransac_estimator.fit(X_, y_, sample_weight=sample_weight)
assert_allclose(ransac_estimator.estimator_.coef_, ref_coef_)
# check that if estimator.fit doesn't support
# sample_weight, raises error
estimator = OrthogonalMatchingPursuit()
ransac_estimator = RANSACRegressor(estimator, min_samples=10)
err_msg = f"{estimator.__class__.__name__} does not support sample_weight."
with pytest.raises(ValueError, match=err_msg):
ransac_estimator.fit(X, y, sample_weight=weights)
def test_ransac_final_model_fit_sample_weight():
X, y = make_regression(n_samples=1000, random_state=10)
rng = check_random_state(42)
sample_weight = rng.randint(1, 4, size=y.shape[0])
sample_weight = sample_weight / sample_weight.sum()
ransac = RANSACRegressor(random_state=0)
ransac.fit(X, y, sample_weight=sample_weight)
final_model = LinearRegression()
mask_samples = ransac.inlier_mask_
final_model.fit(
X[mask_samples], y[mask_samples], sample_weight=sample_weight[mask_samples]
)
assert_allclose(ransac.estimator_.coef_, final_model.coef_, atol=1e-12)
def test_perfect_horizontal_line():
"""Check that we can fit a line where all samples are inliers.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19497
"""
X = np.arange(100)[:, None]
y = np.zeros((100,))
estimator = LinearRegression()
ransac_estimator = RANSACRegressor(estimator, random_state=0)
ransac_estimator.fit(X, y)
assert_allclose(ransac_estimator.estimator_.coef_, 0.0)
assert_allclose(ransac_estimator.estimator_.intercept_, 0.0)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_huber.py | sklearn/linear_model/tests/test_huber.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import pytest
from scipy import optimize
from sklearn.datasets import make_regression
from sklearn.linear_model import HuberRegressor, LinearRegression, Ridge, SGDRegressor
from sklearn.linear_model._huber import _huber_loss_and_gradient
from sklearn.utils._testing import (
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
)
from sklearn.utils.fixes import CSR_CONTAINERS
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05
)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression()
lr.fit(X, y)
huber = HuberRegressor(epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_max_iter():
X, y = make_regression_with_outliers()
huber = HuberRegressor(max_iter=1)
huber.fit(X, y)
assert huber.n_iter_ == huber.max_iter
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
def loss_func(x, *args):
return _huber_loss_and_gradient(x, *args)[0]
def grad_func(x, *args):
return _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight
)
assert_almost_equal(grad_same, 1e-6, 4)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_huber_sample_weights(csr_container):
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor()
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
# Rescale coefs before comparing with assert_array_almost_equal to make
# sure that the number of decimal places used is somewhat insensitive to
# the amplitude of the coefficients and therefore to the scale of the
# data and the regularization parameter
scale = max(np.mean(np.abs(huber.coef_)), np.mean(np.abs(huber.intercept_)))
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale, huber_intercept / scale)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
sample_weight = np.ones(X.shape[0])
sample_weight[1] = 3
sample_weight[3] = 2
huber.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale, huber_intercept / scale)
# Test sparse implementation with sample weights.
X_csr = csr_container(X)
huber_sparse = HuberRegressor()
huber_sparse.fit(X_csr, y, sample_weight=sample_weight)
assert_array_almost_equal(huber_sparse.coef_ / scale, huber_coef / scale)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_huber_sparse(csr_container):
X, y = make_regression_with_outliers()
huber = HuberRegressor(alpha=0.1)
huber.fit(X, y)
X_csr = csr_container(X)
huber_sparse = HuberRegressor(alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
assert_array_equal(huber.outliers_, huber_sparse.outliers_)
def test_huber_scaling_invariant():
# Test that outliers filtering is scaling independent.
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
assert not np.all(n_outliers_mask_1)
huber.fit(X, 2.0 * y)
n_outliers_mask_2 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
huber.fit(2.0 * X, 2.0 * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
# Test they should converge to same coefficients for same parameters
X, y = make_regression_with_outliers(n_samples=10, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0,
loss="huber",
shuffle=True,
random_state=0,
max_iter=10000,
fit_intercept=False,
epsilon=1.35,
tol=None,
)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
assert huber_warm.n_iter_ == 0
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(alpha=0.01)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber
# regressor.
ridge = Ridge(alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert huber_score > ridge_score
# The huber model should also fit poorly on the outliers.
assert ridge_outlier_score > huber_outlier_score
def test_huber_bool():
# Test that it does not crash with bool data
X, y = make_regression(n_samples=200, n_features=2, noise=4.0, random_state=0)
X_bool = X > 0
HuberRegressor().fit(X_bool, y)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_linear_loss.py | sklearn/linear_model/tests/test_linear_loss.py | """
Tests for LinearModelLoss
Note that correctness of losses (which compose LinearModelLoss) is already well
covered in the _loss module.
"""
import numpy as np
import pytest
from numpy.testing import assert_allclose
from scipy import linalg, optimize
from sklearn._loss.loss import (
HalfBinomialLoss,
HalfMultinomialLoss,
HalfPoissonLoss,
)
from sklearn.datasets import make_low_rank_matrix
from sklearn.linear_model._linear_loss import LinearModelLoss
from sklearn.utils.extmath import squared_norm
from sklearn.utils.fixes import CSR_CONTAINERS
# We do not need to test all losses, just what LinearModelLoss does on top of the
# base losses.
LOSSES = [HalfBinomialLoss, HalfMultinomialLoss, HalfPoissonLoss]
def random_X_y_coef(
linear_model_loss, n_samples, n_features, coef_bound=(-2, 2), seed=42
):
"""Random generate y, X and coef in valid range."""
rng = np.random.RandomState(seed)
n_dof = n_features + linear_model_loss.fit_intercept
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features,
random_state=rng,
)
coef = linear_model_loss.init_zero_coef(X)
if linear_model_loss.base_loss.is_multiclass:
n_classes = linear_model_loss.base_loss.n_classes
coef.flat[:] = rng.uniform(
low=coef_bound[0],
high=coef_bound[1],
size=n_classes * n_dof,
)
if linear_model_loss.fit_intercept:
raw_prediction = X @ coef[:, :-1].T + coef[:, -1]
else:
raw_prediction = X @ coef.T
proba = linear_model_loss.base_loss.link.inverse(raw_prediction)
# y = rng.choice(np.arange(n_classes), p=proba) does not work.
# See https://stackoverflow.com/a/34190035/16761084
def choice_vectorized(items, p):
s = p.cumsum(axis=1)
r = rng.rand(p.shape[0])[:, None]
k = (s < r).sum(axis=1)
return items[k]
y = choice_vectorized(np.arange(n_classes), p=proba).astype(np.float64)
else:
coef.flat[:] = rng.uniform(
low=coef_bound[0],
high=coef_bound[1],
size=n_dof,
)
if linear_model_loss.fit_intercept:
raw_prediction = X @ coef[:-1] + coef[-1]
else:
raw_prediction = X @ coef
y = linear_model_loss.base_loss.link.inverse(
raw_prediction + rng.uniform(low=-1, high=1, size=n_samples)
)
return X, y, coef
@pytest.mark.parametrize("base_loss", LOSSES)
@pytest.mark.parametrize("fit_intercept", [False, True])
@pytest.mark.parametrize("n_features", [0, 1, 10])
@pytest.mark.parametrize("dtype", [None, np.float32, np.float64, np.int64])
def test_init_zero_coef(
base_loss, fit_intercept, n_features, dtype, global_random_seed
):
"""Test that init_zero_coef initializes coef correctly."""
loss = LinearModelLoss(base_loss=base_loss(), fit_intercept=fit_intercept)
rng = np.random.RandomState(global_random_seed)
X = rng.normal(size=(5, n_features))
coef = loss.init_zero_coef(X, dtype=dtype)
if loss.base_loss.is_multiclass:
n_classes = loss.base_loss.n_classes
assert coef.shape == (n_classes, n_features + fit_intercept)
assert coef.flags["F_CONTIGUOUS"]
else:
assert coef.shape == (n_features + fit_intercept,)
if dtype is None:
assert coef.dtype == X.dtype
else:
assert coef.dtype == dtype
assert np.count_nonzero(coef) == 0
@pytest.mark.parametrize("base_loss", LOSSES)
@pytest.mark.parametrize("fit_intercept", [False, True])
@pytest.mark.parametrize("sample_weight", [None, "range"])
@pytest.mark.parametrize("l2_reg_strength", [0, 1])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_loss_grad_hess_are_the_same(
base_loss,
fit_intercept,
sample_weight,
l2_reg_strength,
csr_container,
global_random_seed,
):
"""Test that loss and gradient are the same across different functions."""
loss = LinearModelLoss(base_loss=base_loss(), fit_intercept=fit_intercept)
X, y, coef = random_X_y_coef(
linear_model_loss=loss, n_samples=10, n_features=5, seed=global_random_seed
)
X_old, y_old, coef_old = X.copy(), y.copy(), coef.copy()
if sample_weight == "range":
sample_weight = np.linspace(1, y.shape[0], num=y.shape[0])
l1 = loss.loss(
coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
g1 = loss.gradient(
coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
l2, g2 = loss.loss_gradient(
coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
g3, h3 = loss.gradient_hessian_product(
coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
g4, h4, _ = loss.gradient_hessian(
coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
assert_allclose(l1, l2)
assert_allclose(g1, g2)
assert_allclose(g1, g3)
assert_allclose(g1, g4)
# The ravelling only takes effect for multiclass.
assert_allclose(h4 @ g4.ravel(order="F"), h3(g3).ravel(order="F"))
# Test that gradient_out and hessian_out are considered properly.
g_out = np.empty_like(coef)
h_out = np.empty_like(coef, shape=(coef.size, coef.size))
g5, h5, _ = loss.gradient_hessian(
coef,
X,
y,
sample_weight=sample_weight,
l2_reg_strength=l2_reg_strength,
gradient_out=g_out,
hessian_out=h_out,
)
assert np.shares_memory(g5, g_out)
assert np.shares_memory(h5, h_out)
assert_allclose(g5, g_out)
assert_allclose(h5, h_out)
assert_allclose(g1, g5)
assert_allclose(h5, h4)
# same for sparse X
Xs = csr_container(X)
l1_sp = loss.loss(
coef, Xs, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
g1_sp = loss.gradient(
coef, Xs, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
l2_sp, g2_sp = loss.loss_gradient(
coef, Xs, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
g3_sp, h3_sp = loss.gradient_hessian_product(
coef, Xs, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
g4_sp, h4_sp, _ = loss.gradient_hessian(
coef, Xs, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
assert_allclose(l1, l1_sp)
assert_allclose(l1, l2_sp)
assert_allclose(g1, g1_sp)
assert_allclose(g1, g2_sp)
assert_allclose(g1, g3_sp)
assert_allclose(h3(g1), h3_sp(g1_sp))
assert_allclose(g1, g4_sp)
assert_allclose(h4, h4_sp)
# X, y and coef should not have changed
assert_allclose(X, X_old)
assert_allclose(Xs.toarray(), X_old)
assert_allclose(y, y_old)
assert_allclose(coef, coef_old)
@pytest.mark.parametrize("base_loss", LOSSES)
@pytest.mark.parametrize("sample_weight", [None, "range"])
@pytest.mark.parametrize("l2_reg_strength", [0, 1])
@pytest.mark.parametrize("X_container", CSR_CONTAINERS + [None])
def test_loss_gradients_hessp_intercept(
base_loss, sample_weight, l2_reg_strength, X_container, global_random_seed
):
"""Test that loss and gradient handle intercept correctly."""
loss = LinearModelLoss(base_loss=base_loss(), fit_intercept=False)
loss_inter = LinearModelLoss(base_loss=base_loss(), fit_intercept=True)
n_samples, n_features = 10, 5
X, y, coef = random_X_y_coef(
linear_model_loss=loss,
n_samples=n_samples,
n_features=n_features,
seed=global_random_seed,
)
X[:, -1] = 1 # make last column of 1 to mimic intercept term
X_inter = X[
:, :-1
] # exclude intercept column as it is added automatically by loss_inter
if X_container is not None:
X = X_container(X)
if sample_weight == "range":
sample_weight = np.linspace(1, y.shape[0], num=y.shape[0])
l, g = loss.loss_gradient(
coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
_, hessp = loss.gradient_hessian_product(
coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
l_inter, g_inter = loss_inter.loss_gradient(
coef, X_inter, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
_, hessp_inter = loss_inter.gradient_hessian_product(
coef, X_inter, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
# Note, that intercept gets no L2 penalty.
assert l == pytest.approx(
l_inter + 0.5 * l2_reg_strength * squared_norm(coef.T[-1])
)
g_inter_corrected = g_inter
g_inter_corrected.T[-1] += l2_reg_strength * coef.T[-1]
assert_allclose(g, g_inter_corrected)
s = np.random.RandomState(global_random_seed).randn(*coef.shape)
h = hessp(s)
h_inter = hessp_inter(s)
h_inter_corrected = h_inter
h_inter_corrected.T[-1] += l2_reg_strength * s.T[-1]
assert_allclose(h, h_inter_corrected)
@pytest.mark.parametrize("base_loss", LOSSES)
@pytest.mark.parametrize("fit_intercept", [False, True])
@pytest.mark.parametrize("sample_weight", [None, "range"])
@pytest.mark.parametrize("l2_reg_strength", [0, 1])
def test_gradients_hessians_numerically(
base_loss, fit_intercept, sample_weight, l2_reg_strength, global_random_seed
):
"""Test gradients and hessians with numerical derivatives.
Gradient should equal the numerical derivatives of the loss function.
Hessians should equal the numerical derivatives of gradients.
"""
loss = LinearModelLoss(base_loss=base_loss(), fit_intercept=fit_intercept)
n_samples, n_features = 10, 5
X, y, coef = random_X_y_coef(
linear_model_loss=loss,
n_samples=n_samples,
n_features=n_features,
seed=global_random_seed,
)
coef = coef.ravel(order="F") # this is important only for multinomial loss
if sample_weight == "range":
sample_weight = np.linspace(1, y.shape[0], num=y.shape[0])
# 1. Check gradients numerically
eps = 1e-6
g, hessp = loss.gradient_hessian_product(
coef, X, y, sample_weight=sample_weight, l2_reg_strength=l2_reg_strength
)
# Use a trick to get central finite difference of accuracy 4 (five-point stencil)
# https://en.wikipedia.org/wiki/Numerical_differentiation
# https://en.wikipedia.org/wiki/Finite_difference_coefficient
# approx_g1 = (f(x + eps) - f(x - eps)) / (2*eps)
approx_g1 = optimize.approx_fprime(
coef,
lambda coef: loss.loss(
coef - eps,
X,
y,
sample_weight=sample_weight,
l2_reg_strength=l2_reg_strength,
),
2 * eps,
)
# approx_g2 = (f(x + 2*eps) - f(x - 2*eps)) / (4*eps)
approx_g2 = optimize.approx_fprime(
coef,
lambda coef: loss.loss(
coef - 2 * eps,
X,
y,
sample_weight=sample_weight,
l2_reg_strength=l2_reg_strength,
),
4 * eps,
)
# Five-point stencil approximation
# See: https://en.wikipedia.org/wiki/Five-point_stencil#1D_first_derivative
approx_g = (4 * approx_g1 - approx_g2) / 3
assert_allclose(g, approx_g, rtol=1e-2, atol=1e-8)
# 2. Check hessp numerically along the second direction of the gradient
vector = np.zeros_like(g)
vector[1] = 1
hess_col = hessp(vector)
# Computation of the Hessian is particularly fragile to numerical errors when doing
# simple finite differences. Here we compute the grad along a path in the direction
# of the vector and then use a least-square regression to estimate the slope
eps = 1e-3
d_x = np.linspace(-eps, eps, 30)
d_grad = np.array(
[
loss.gradient(
coef + t * vector,
X,
y,
sample_weight=sample_weight,
l2_reg_strength=l2_reg_strength,
)
for t in d_x
]
)
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_allclose(approx_hess_col, hess_col, rtol=1e-3)
@pytest.mark.parametrize("fit_intercept", [False, True])
def test_multinomial_coef_shape(fit_intercept, global_random_seed):
"""Test that multinomial LinearModelLoss respects shape of coef."""
loss = LinearModelLoss(base_loss=HalfMultinomialLoss(), fit_intercept=fit_intercept)
n_samples, n_features = 10, 5
X, y, coef = random_X_y_coef(
linear_model_loss=loss,
n_samples=n_samples,
n_features=n_features,
seed=global_random_seed,
)
s = np.random.RandomState(global_random_seed).randn(*coef.shape)
l, g = loss.loss_gradient(coef, X, y)
g1 = loss.gradient(coef, X, y)
g2, hessp = loss.gradient_hessian_product(coef, X, y)
h = hessp(s)
assert g.shape == coef.shape
assert h.shape == coef.shape
assert_allclose(g, g1)
assert_allclose(g, g2)
g3, hess, _ = loss.gradient_hessian(coef, X, y)
assert g3.shape == coef.shape
# But full hessian is always 2d.
assert hess.shape == (coef.size, coef.size)
coef_r = coef.ravel(order="F")
s_r = s.ravel(order="F")
l_r, g_r = loss.loss_gradient(coef_r, X, y)
g1_r = loss.gradient(coef_r, X, y)
g2_r, hessp_r = loss.gradient_hessian_product(coef_r, X, y)
h_r = hessp_r(s_r)
assert g_r.shape == coef_r.shape
assert h_r.shape == coef_r.shape
assert_allclose(g_r, g1_r)
assert_allclose(g_r, g2_r)
assert_allclose(g, g_r.reshape(loss.base_loss.n_classes, -1, order="F"))
assert_allclose(h, h_r.reshape(loss.base_loss.n_classes, -1, order="F"))
@pytest.mark.parametrize("sample_weight", [None, "range"])
def test_multinomial_hessian_3_classes(sample_weight, global_random_seed):
"""Test multinomial hessian for 3 classes and 2 points.
For n_classes = 3 and n_samples = 2, we have
p0 = [p0_0, p0_1]
p1 = [p1_0, p1_1]
p2 = [p2_0, p2_1]
and with 2 x 2 diagonal subblocks
H = [p0 * (1-p0), -p0 * p1, -p0 * p2]
[ -p0 * p1, p1 * (1-p1), -p1 * p2]
[ -p0 * p2, -p1 * p2, p2 * (1-p2)]
hess = X' H X
"""
n_samples, n_features, n_classes = 2, 5, 3
loss = LinearModelLoss(
base_loss=HalfMultinomialLoss(n_classes=n_classes), fit_intercept=False
)
X, y, coef = random_X_y_coef(
linear_model_loss=loss,
n_samples=n_samples,
n_features=n_features,
seed=global_random_seed,
)
coef = coef.ravel(order="F") # this is important only for multinomial loss
if sample_weight == "range":
sample_weight = np.linspace(1, y.shape[0], num=y.shape[0])
grad, hess, _ = loss.gradient_hessian(
coef,
X,
y,
sample_weight=sample_weight,
l2_reg_strength=0,
)
# Hessian must be a symmetrix matrix.
assert_allclose(hess, hess.T)
weights, intercept, raw_prediction = loss.weight_intercept_raw(coef, X)
grad_pointwise, proba = loss.base_loss.gradient_proba(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
)
p0d, p1d, p2d, oned = (
np.diag(proba[:, 0]),
np.diag(proba[:, 1]),
np.diag(proba[:, 2]),
np.diag(np.ones(2)),
)
h = np.block(
[
[p0d * (oned - p0d), -p0d * p1d, -p0d * p2d],
[-p0d * p1d, p1d * (oned - p1d), -p1d * p2d],
[-p0d * p2d, -p1d * p2d, p2d * (oned - p2d)],
]
)
h = h.reshape((n_classes, n_samples, n_classes, n_samples))
if sample_weight is None:
h /= n_samples
else:
h *= sample_weight / np.sum(sample_weight)
# hess_expected.shape = (n_features, n_classes, n_classes, n_features)
hess_expected = np.einsum("ij, mini, ik->jmnk", X, h, X)
hess_expected = np.moveaxis(hess_expected, 2, 3)
hess_expected = hess_expected.reshape(
n_classes * n_features, n_classes * n_features, order="C"
)
assert_allclose(hess_expected, hess_expected.T)
assert_allclose(hess, hess_expected)
def test_linear_loss_gradient_hessian_raises_wrong_out_parameters():
"""Test that wrong gradient_out and hessian_out raises errors."""
n_samples, n_features, n_classes = 5, 2, 3
loss = LinearModelLoss(base_loss=HalfBinomialLoss(), fit_intercept=False)
X = np.ones((n_samples, n_features))
y = np.ones(n_samples)
coef = loss.init_zero_coef(X)
gradient_out = np.zeros(1)
with pytest.raises(
ValueError, match="gradient_out is required to have shape coef.shape"
):
loss.gradient_hessian(
coef=coef,
X=X,
y=y,
gradient_out=gradient_out,
hessian_out=None,
)
hessian_out = np.zeros(1)
with pytest.raises(ValueError, match="hessian_out is required to have shape"):
loss.gradient_hessian(
coef=coef,
X=X,
y=y,
gradient_out=None,
hessian_out=hessian_out,
)
loss = LinearModelLoss(base_loss=HalfMultinomialLoss(), fit_intercept=False)
coef = loss.init_zero_coef(X)
gradient_out = np.zeros((2 * n_classes, n_features))[::2]
with pytest.raises(ValueError, match="gradient_out must be F-contiguous"):
loss.gradient_hessian(
coef=coef,
X=X,
y=y,
gradient_out=gradient_out,
)
hessian_out = np.zeros((2 * n_classes * n_features, n_classes * n_features))[::2]
with pytest.raises(ValueError, match="hessian_out must be contiguous"):
loss.gradient_hessian(
coef=coef,
X=X,
y=y,
gradient_out=None,
hessian_out=hessian_out,
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_sag.py | sklearn/linear_model/tests/test_sag.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import math
import re
import numpy as np
import pytest
from sklearn.base import clone
from sklearn.datasets import load_iris, make_blobs, make_classification
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.linear_model._sag import get_auto_step_size
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import check_random_state, compute_class_weight
from sklearn.utils._testing import (
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
)
from sklearn.utils.extmath import row_norms
from sklearn.utils.fixes import CSR_CONTAINERS
iris = load_iris()
# this is used for sag classification
def log_dloss(p, y):
z = p * y
# approximately equal and saves the computation of the log
if z > 18.0:
return math.exp(-z) * -y
if z < -18.0:
return -y
return -y / (math.exp(z) + 1.0)
def log_loss(p, y):
return np.mean(np.log(1.0 + np.exp(-y * p)))
# this is used for sag regression
def squared_dloss(p, y):
return p - y
def squared_loss(p, y):
return np.mean(0.5 * (p - y) * (p - y))
# function for measuring the log loss
def get_pobj(w, alpha, myX, myy, loss):
w = w.ravel()
pred = np.dot(myX, w)
p = loss(pred, myy)
p += alpha * w.dot(w) / 2.0
return p
def sag(
X,
y,
step_size,
alpha,
n_iter=1,
dloss=None,
sparse=False,
sample_weight=None,
fit_intercept=True,
saga=False,
):
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(X.shape[1])
sum_gradient = np.zeros(X.shape[1])
gradient_memory = np.zeros((n_samples, n_features))
intercept = 0.0
intercept_sum_gradient = 0.0
intercept_gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
decay = 1.0
seen = set()
# sparse data has a fixed decay of .01
if sparse:
decay = 0.01
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand() * n_samples)
# idx = k
entry = X[idx]
seen.add(idx)
p = np.dot(entry, weights) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient + alpha * weights
gradient_correction = update - gradient_memory[idx]
sum_gradient += gradient_correction
gradient_memory[idx] = update
if saga:
weights -= gradient_correction * step_size * (1 - 1.0 / len(seen))
if fit_intercept:
gradient_correction = gradient - intercept_gradient_memory[idx]
intercept_gradient_memory[idx] = gradient
intercept_sum_gradient += gradient_correction
gradient_correction *= step_size * (1.0 - 1.0 / len(seen))
if saga:
intercept -= (
step_size * intercept_sum_gradient / len(seen) * decay
) + gradient_correction
else:
intercept -= step_size * intercept_sum_gradient / len(seen) * decay
weights -= step_size * sum_gradient / len(seen)
return weights, intercept
def sag_sparse(
X,
y,
step_size,
alpha,
n_iter=1,
dloss=None,
sample_weight=None,
sparse=False,
fit_intercept=True,
saga=False,
random_state=0,
):
if step_size * alpha == 1.0:
raise ZeroDivisionError(
"Sparse sag does not handle the case step_size * alpha == 1"
)
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=int)
gradient_memory = np.zeros(n_samples)
rng = check_random_state(random_state)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
# sparse data has a fixed decay of .01
if sparse:
decay = 0.01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
# idx = k
idx = int(rng.rand() * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= (
c_sum[counter - 1] - c_sum[last_updated[j] - 1]
) * sum_gradient[j]
last_updated[j] = counter
p = (wscale * np.dot(entry, weights)) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
gradient_correction = update - (gradient_memory[idx] * entry)
sum_gradient += gradient_correction
if saga:
for j in range(n_features):
weights[j] -= (
gradient_correction[j]
* step_size
* (1 - 1.0 / len(seen))
/ wscale
)
if fit_intercept:
gradient_correction = gradient - gradient_memory[idx]
intercept_sum_gradient += gradient_correction
gradient_correction *= step_size * (1.0 - 1.0 / len(seen))
if saga:
intercept -= (
step_size * intercept_sum_gradient / len(seen) * decay
) + gradient_correction
else:
intercept -= step_size * intercept_sum_gradient / len(seen) * decay
gradient_memory[idx] = gradient
wscale *= 1.0 - alpha * step_size
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = c_sum[counter - 1] + step_size / (wscale * len(seen))
if counter >= 1 and wscale < 1e-9:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= (
c_sum[counter] - c_sum[last_updated[j] - 1]
) * sum_gradient[j]
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= (
c_sum[counter - 1] - c_sum[last_updated[j] - 1]
) * sum_gradient[j]
weights *= wscale
return weights, intercept
def get_step_size(X, alpha, fit_intercept, classification=True):
if classification:
return 4.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + 4.0 * alpha)
else:
return 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
def test_classifier_matching():
n_samples = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0, cluster_std=0.1)
# y must be 0 or 1
alpha = 1.1
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept)
for solver in ["sag", "saga"]:
if solver == "sag":
n_iter = 80
else:
# SAGA variance w.r.t. stream order is higher
n_iter = 300
clf = LogisticRegression(
solver=solver,
fit_intercept=fit_intercept,
tol=1e-11,
C=1.0 / alpha / n_samples,
max_iter=n_iter,
random_state=10,
)
clf.fit(X, y)
weights, intercept = sag_sparse(
X,
2 * y - 1, # y must be -1 or +1
step_size,
alpha,
n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept,
saga=solver == "saga",
)
weights2, intercept2 = sag(
X,
2 * y - 1, # y must be -1 or +1
step_size,
alpha,
n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept,
saga=solver == "saga",
)
weights = np.atleast_2d(weights)
intercept = np.atleast_1d(intercept)
weights2 = np.atleast_2d(weights2)
intercept2 = np.atleast_1d(intercept2)
assert_array_almost_equal(weights, clf.coef_, decimal=9)
assert_array_almost_equal(intercept, clf.intercept_, decimal=9)
assert_array_almost_equal(weights2, clf.coef_, decimal=9)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=9)
def test_regressor_matching():
n_samples = 10
n_features = 5
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
alpha = 1.0
n_iter = 100
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf = Ridge(
fit_intercept=fit_intercept,
tol=0.00000000001,
solver="sag",
alpha=alpha * n_samples,
max_iter=n_iter,
)
clf.fit(X, y)
weights1, intercept1 = sag_sparse(
X,
y,
step_size,
alpha,
n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept,
)
weights2, intercept2 = sag(
X,
y,
step_size,
alpha,
n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept,
)
assert_allclose(weights1, clf.coef_)
assert_allclose(intercept1, clf.intercept_)
assert_allclose(weights2, clf.coef_)
assert_allclose(intercept2, clf.intercept_)
@pytest.mark.filterwarnings("ignore:The max_iter was reached")
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sag_pobj_matches_logistic_regression(csr_container):
"""tests if the sag pobj matches log reg"""
n_samples = 100
alpha = 1.0
max_iter = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0, cluster_std=0.1)
clf1 = LogisticRegression(
solver="sag",
fit_intercept=False,
tol=0.0000001,
C=1.0 / alpha / n_samples,
max_iter=max_iter,
random_state=10,
)
clf2 = clone(clf1)
clf3 = LogisticRegression(
fit_intercept=False,
tol=0.0000001,
C=1.0 / alpha / n_samples,
max_iter=max_iter,
random_state=10,
)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj2, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj1, decimal=4)
@pytest.mark.filterwarnings("ignore:The max_iter was reached")
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sag_pobj_matches_ridge_regression(csr_container):
"""tests if the sag pobj matches ridge reg"""
n_samples = 100
n_features = 10
alpha = 1.0
n_iter = 100
fit_intercept = False
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
clf1 = Ridge(
fit_intercept=fit_intercept,
tol=0.00000000001,
solver="sag",
alpha=alpha,
max_iter=n_iter,
random_state=42,
)
clf2 = clone(clf1)
clf3 = Ridge(
fit_intercept=fit_intercept,
tol=0.00001,
solver="lsqr",
alpha=alpha,
max_iter=n_iter,
random_state=42,
)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj1, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj2, decimal=4)
@pytest.mark.filterwarnings("ignore:The max_iter was reached")
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sag_regressor_computed_correctly(csr_container):
"""tests if the sag regressor is computed correctly"""
alpha = 0.1
n_features = 10
n_samples = 40
max_iter = 100
tol = 0.000001
fit_intercept = True
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w) + 2.0
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf1 = Ridge(
fit_intercept=fit_intercept,
tol=tol,
solver="sag",
alpha=alpha * n_samples,
max_iter=max_iter,
random_state=rng,
)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
spweights1, spintercept1 = sag_sparse(
X,
y,
step_size,
alpha,
n_iter=max_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept,
random_state=rng,
)
spweights2, spintercept2 = sag_sparse(
X,
y,
step_size,
alpha,
n_iter=max_iter,
dloss=squared_dloss,
sparse=True,
fit_intercept=fit_intercept,
random_state=rng,
)
assert_array_almost_equal(clf1.coef_.ravel(), spweights1.ravel(), decimal=3)
assert_almost_equal(clf1.intercept_, spintercept1, decimal=1)
# TODO: uncomment when sparse Ridge with intercept will be fixed (#4710)
# assert_array_almost_equal(clf2.coef_.ravel(),
# spweights2.ravel(),
# decimal=3)
# assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)'''
def test_get_auto_step_size():
X = np.array([[1, 2, 3], [2, 3, 4], [2, 3, 2]], dtype=np.float64)
alpha = 1.2
fit_intercept = False
# sum the squares of the second sample because that's the largest
max_squared_sum = 4 + 9 + 16
max_squared_sum_ = row_norms(X, squared=True).max()
n_samples = X.shape[0]
assert_almost_equal(max_squared_sum, max_squared_sum_, decimal=4)
for saga in [True, False]:
for fit_intercept in (True, False):
if saga:
L_sqr = max_squared_sum + alpha + int(fit_intercept)
L_log = (max_squared_sum + 4.0 * alpha + int(fit_intercept)) / 4.0
mun_sqr = min(2 * n_samples * alpha, L_sqr)
mun_log = min(2 * n_samples * alpha, L_log)
step_size_sqr = 1 / (2 * L_sqr + mun_sqr)
step_size_log = 1 / (2 * L_log + mun_log)
else:
step_size_sqr = 1.0 / (max_squared_sum + alpha + int(fit_intercept))
step_size_log = 4.0 / (
max_squared_sum + 4.0 * alpha + int(fit_intercept)
)
step_size_sqr_ = get_auto_step_size(
max_squared_sum_,
alpha,
"squared",
fit_intercept,
n_samples=n_samples,
is_saga=saga,
)
step_size_log_ = get_auto_step_size(
max_squared_sum_,
alpha,
"log",
fit_intercept,
n_samples=n_samples,
is_saga=saga,
)
assert_almost_equal(step_size_sqr, step_size_sqr_, decimal=4)
assert_almost_equal(step_size_log, step_size_log_, decimal=4)
msg = "Unknown loss function for SAG solver, got wrong instead of"
with pytest.raises(ValueError, match=msg):
get_auto_step_size(max_squared_sum_, alpha, "wrong", fit_intercept)
@pytest.mark.parametrize("seed", range(3)) # locally tested with 1000 seeds
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sag_regressor(seed, csr_container):
"""tests if the sag regressor performs well"""
xmin, xmax = -5, 5
n_samples = 300
tol = 0.001
max_iter = 100
alpha = 0.1
rng = np.random.RandomState(seed)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf1 = Ridge(
tol=tol,
solver="sag",
max_iter=max_iter,
alpha=alpha * n_samples,
random_state=rng,
)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert score1 > 0.98
assert score2 > 0.98
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf1 = Ridge(
tol=tol,
solver="sag",
max_iter=max_iter,
alpha=alpha * n_samples,
random_state=rng,
)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert score1 > 0.45
assert score2 > 0.45
@pytest.mark.filterwarnings("ignore:The max_iter was reached")
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sag_classifier_computed_correctly(csr_container):
"""tests if the binary classifier is computed correctly"""
alpha = 0.1
n_samples = 50
n_iter = 50
tol = 0.00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0, cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
clf1 = LogisticRegression(
solver="sag",
C=1.0 / alpha / n_samples,
max_iter=n_iter,
tol=tol,
random_state=77,
fit_intercept=fit_intercept,
)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
spweights, spintercept = sag_sparse(
X,
y,
step_size,
alpha,
n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept,
)
spweights2, spintercept2 = sag_sparse(
X,
y,
step_size,
alpha,
n_iter=n_iter,
dloss=log_dloss,
sparse=True,
fit_intercept=fit_intercept,
)
assert_array_almost_equal(clf1.coef_.ravel(), spweights.ravel(), decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(), spweights2.ravel(), decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@pytest.mark.filterwarnings("ignore:The max_iter was reached")
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sag_multiclass_computed_correctly(csr_container):
"""tests if the multiclass classifier is computed correctly"""
alpha = 0.1
n_samples = 20
tol = 1e-5
max_iter = 70
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0, cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = OneVsRestClassifier(
LogisticRegression(
solver="sag",
C=1.0 / alpha / n_samples,
max_iter=max_iter,
tol=tol,
random_state=77,
fit_intercept=fit_intercept,
)
)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(
X,
y_encoded,
step_size,
alpha,
dloss=log_dloss,
n_iter=max_iter,
fit_intercept=fit_intercept,
)
spweights2, spintercept2 = sag_sparse(
X,
y_encoded,
step_size,
alpha,
dloss=log_dloss,
n_iter=max_iter,
sparse=True,
fit_intercept=fit_intercept,
)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_allclose(clf1.estimators_[i].coef_.ravel(), coef1[i], rtol=1e-2)
assert_allclose(clf1.estimators_[i].intercept_, intercept1[i], rtol=1e-1)
assert_allclose(clf2.estimators_[i].coef_.ravel(), coef2[i], rtol=1e-2)
# Note the very crude accuracy, i.e. high rtol.
assert_allclose(clf2.estimators_[i].intercept_, intercept2[i], rtol=5e-1)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_classifier_results(csr_container):
"""tests if classifier results match target"""
alpha = 0.1
n_features = 20
n_samples = 10
tol = 0.01
max_iter = 200
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w)
y = np.sign(y)
clf1 = LogisticRegression(
solver="sag",
C=1.0 / alpha / n_samples,
max_iter=max_iter,
tol=tol,
random_state=77,
)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert_almost_equal(pred1, y, decimal=12)
assert_almost_equal(pred2, y, decimal=12)
@pytest.mark.filterwarnings("ignore:The max_iter was reached")
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_binary_classifier_class_weight(csr_container):
"""tests binary classifier with classweights for each class"""
alpha = 0.1
n_samples = 50
n_iter = 20
tol = 0.00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10, cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
class_weight = {1: 0.45, -1: 0.55}
clf1 = LogisticRegression(
solver="sag",
C=1.0 / alpha / n_samples,
max_iter=n_iter,
tol=tol,
random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight,
)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(csr_container(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, classes=np.unique(y), y=y)
sample_weight = class_weight_[le.fit_transform(y)]
spweights, spintercept = sag_sparse(
X,
y,
step_size,
alpha,
n_iter=n_iter,
dloss=log_dloss,
sample_weight=sample_weight,
fit_intercept=fit_intercept,
)
spweights2, spintercept2 = sag_sparse(
X,
y,
step_size,
alpha,
n_iter=n_iter,
dloss=log_dloss,
sparse=True,
sample_weight=sample_weight,
fit_intercept=fit_intercept,
)
assert_array_almost_equal(clf1.coef_.ravel(), spweights.ravel(), decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(), spweights2.ravel(), decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
def test_classifier_single_class():
"""tests if ValueError is thrown with only one class"""
X = [[1, 2], [3, 4]]
y = [1, 1]
msg = "This solver needs samples of at least 2 classes in the data"
with pytest.raises(ValueError, match=msg):
LogisticRegression(solver="sag").fit(X, y)
def test_step_size_alpha_error():
X = [[0, 0], [0, 0]]
y = [1, -1]
fit_intercept = False
alpha = 1.0
msg = re.escape(
"Current sag implementation does not handle the case"
" step_size * alpha_scaled == 1"
)
clf1 = LogisticRegression(solver="sag", C=1.0 / alpha, fit_intercept=fit_intercept)
with pytest.raises(ZeroDivisionError, match=msg):
clf1.fit(X, y)
clf2 = Ridge(fit_intercept=fit_intercept, solver="sag", alpha=alpha)
with pytest.raises(ZeroDivisionError, match=msg):
clf2.fit(X, y)
@pytest.mark.parametrize("solver", ["sag", "saga"])
def test_sag_classifier_raises_error(solver):
# Following #13316, the error handling behavior changed in cython sag. This
# is simply a non-regression test to make sure numerical errors are
# properly raised.
# Train a classifier on a simple problem
rng = np.random.RandomState(42)
X, y = make_classification(random_state=rng)
clf = LogisticRegression(solver=solver, random_state=rng, warm_start=True)
clf.fit(X, y)
# Trigger a numerical error by:
# - corrupting the fitted coefficients of the classifier
# - fit it again starting from its current state thanks to warm_start
clf.coef_[:] = np.nan
with pytest.raises(ValueError, match="Floating-point under-/overflow"):
clf.fit(X, y)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_logistic.py | sklearn/linear_model/tests/test_logistic.py | import itertools
import os
import re
import warnings
import numpy as np
import pytest
from numpy.testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
)
from scipy import sparse
from scipy.linalg import LinAlgWarning, svd
from sklearn import config_context
from sklearn._loss import HalfMultinomialLoss
from sklearn.base import clone
from sklearn.datasets import load_iris, make_classification, make_low_rank_matrix
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV, SGDClassifier
from sklearn.linear_model._logistic import (
_log_reg_scoring_path,
_logistic_regression_path,
)
from sklearn.metrics import brier_score_loss, get_scorer, log_loss, make_scorer
from sklearn.model_selection import (
GridSearchCV,
KFold,
LeaveOneGroupOut,
StratifiedKFold,
cross_val_score,
train_test_split,
)
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import LabelEncoder, StandardScaler, scale
from sklearn.svm import l1_min_c
from sklearn.utils import compute_class_weight, shuffle
from sklearn.utils._testing import ignore_warnings
from sklearn.utils.fixes import _IS_32BIT, COO_CONTAINERS, CSR_CONTAINERS
pytestmark = pytest.mark.filterwarnings(
"error::sklearn.exceptions.ConvergenceWarning:sklearn.*"
)
# TODO(1.10): remove filterwarnings for l1_ratios after default changed.
pytestmark = pytest.mark.filterwarnings(
"ignore:The default value for l1_ratios.*:FutureWarning"
)
SOLVERS = ("lbfgs", "liblinear", "newton-cg", "newton-cholesky", "sag", "saga")
X = [[-1, 0], [0, 1], [1, 1]]
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert predicted.shape == (n_samples,)
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert probabilities.shape == (n_samples, n_classes)
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_predict_2_classes(csr_container):
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(), X, Y1)
check_predictions(LogisticRegression(), csr_container(X), Y1)
check_predictions(LogisticRegression(C=100), X, Y1)
check_predictions(LogisticRegression(C=100), csr_container(X), Y1)
check_predictions(LogisticRegression(fit_intercept=False), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False), csr_container(X), Y1)
def test_logistic_cv_mock_scorer():
"""Test that LogisticRegressionCV calls the scorer."""
class MockScorer:
def __init__(self):
self.calls = 0
self.scores = [0.1, 0.4, 0.8, 0.5]
def __call__(self, model, X, y, sample_weight=None):
score = self.scores[self.calls % len(self.scores)]
self.calls += 1
return score
mock_scorer = MockScorer()
Cs = [1, 2, 3, 4]
cv = 2
lr = LogisticRegressionCV(
Cs=Cs,
l1_ratios=(0,), # TODO(1.10): remove with new default of l1_ratios
scoring=mock_scorer,
cv=cv,
use_legacy_attributes=False,
)
X, y = make_classification(random_state=0)
lr.fit(X, y)
# Cs[2] has the highest score (0.8) from MockScorer
assert lr.C_ == Cs[2]
# scorer called 8 times (cv*len(Cs))
assert mock_scorer.calls == cv * len(Cs)
# reset mock_scorer
mock_scorer.calls = 0
custom_score = lr.score(X, lr.predict(X))
assert custom_score == mock_scorer.scores[0]
assert mock_scorer.calls == 1
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_predict_3_classes(csr_container):
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), csr_container(X), Y2)
@pytest.mark.parametrize(
"clf",
[
LogisticRegression(C=len(iris.data), solver="lbfgs", max_iter=200),
LogisticRegression(C=len(iris.data), solver="newton-cg"),
LogisticRegression(
C=len(iris.data),
solver="sag",
tol=1e-2,
),
LogisticRegression(
C=len(iris.data),
solver="saga",
tol=1e-2,
),
LogisticRegression(C=len(iris.data), solver="newton-cholesky"),
OneVsRestClassifier(LogisticRegression(C=len(iris.data), solver="liblinear")),
],
)
def test_predict_iris(clf, global_random_seed):
"""Test logistic regression with the iris dataset.
Test that different solvers handle multiclass data correctly and
give good accuracy score (>0.95) for the training data.
"""
clf = clone(clf) # Avoid side effects from shared instances
n_samples, _ = iris.data.shape
target = iris.target_names[iris.target]
if getattr(clf, "solver", None) in ("sag", "saga", "liblinear"):
clf.set_params(random_state=global_random_seed)
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert np.mean(pred == target) > 0.95
probabilities = clf.predict_proba(iris.data)
assert_allclose(probabilities.sum(axis=1), np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert np.mean(pred == target) > 0.95
@pytest.mark.filterwarnings("error::sklearn.exceptions.ConvergenceWarning")
@pytest.mark.parametrize("solver", ["lbfgs", "newton-cholesky"])
def test_logistic_glmnet(solver):
"""Compare Logistic regression with L2 regularization to glmnet"""
# 2 classes
# library("glmnet")
# options(digits=10)
# df <- data.frame(a=-4:4, b=c(0,0,1,0,1,1,1,0,0), y=c(0,0,0,1,1,1,1,1,1))
# x <- data.matrix(df[,c("a", "b")])
# y <- df$y
# fit <- glmnet(x=x, y=y, alpha=0, lambda=1, intercept=T, family="binomial",
# standardize=F, thresh=1e-10, nlambda=1)
# coef(fit, s=1)
# (Intercept) 0.89230405539
# a 0.44464569182
# b 0.01457563448
X = np.array([[-4, -3, -2, -1, 0, 1, 2, 3, 4], [0, 0, 1, 0, 1, 1, 1, 0, 0]]).T
y = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1])
glm = LogisticRegression(
C=1 / 1 / y.shape[0], # C=1.0 / L2-penalty (Ridge) / n_samples
fit_intercept=True,
tol=1e-8,
max_iter=300,
solver=solver,
)
glm.fit(X, y)
assert_allclose(glm.intercept_, 0.89230405539, rtol=1e-5)
assert_allclose(glm.coef_, [[0.44464569182, 0.01457563448]], rtol=1e-5)
# 3 classes
# y <- c(0,0,0,1,1,1,2,2,2)
# fit <- glmnet(x=x, y=y, alpha=0, lambda=1, intercept=T, family="multinomial",
# standardize=F, thresh=1e-12, nlambda=1)
# coef(fit, s=1)
# $`0`
# 3 x 1 sparse Matrix of class "dgCMatrix"
# s=1
# (Intercept) -0.12004759652
# a -0.38023389305
# b -0.01226499932
#
# $`1`
# 3 x 1 sparse Matrix of class "dgCMatrix"
# s=1
# (Intercept) 2.251747383e-01
# a -8.164030176e-05
# b 4.734548012e-02
#
# $`2`
# 3 x 1 sparse Matrix of class "dgCMatrix"
# s=1
# (Intercept) -0.1051271418
# a 0.3803155334
# b -0.0350804808
y = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
glm.fit(X, y)
assert_allclose(
glm.intercept_, [-0.12004759652, 2.251747383e-01, -0.1051271418], rtol=1e-5
)
assert_allclose(
glm.coef_,
[
[-0.38023389305, -0.01226499932],
[-8.164030176e-05, 4.734548012e-02],
[0.3803155334, -0.0350804808],
],
rtol=1e-5,
atol=1e-8,
)
# TODO(1.10): remove filterwarnings with deprecation period of use_legacy_attributes
@pytest.mark.filterwarnings("ignore:.*use_legacy_attributes.*:FutureWarning")
@pytest.mark.parametrize("LR", [LogisticRegression, LogisticRegressionCV])
def test_check_solver_option(LR):
X, y = iris.data, iris.target
# only 'liblinear' solver
for solver in ["liblinear"]:
msg = f"The '{solver}' solver does not support multiclass classification."
lr = LR(solver=solver)
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
# all solvers except 'liblinear' and 'saga'
for solver in ["lbfgs", "newton-cg", "newton-cholesky", "sag"]:
msg = "Solver %s supports only 'l2' or None penalties," % solver
if LR == LogisticRegression:
lr = LR(solver=solver, l1_ratio=1)
else:
lr = LR(solver=solver, l1_ratios=(1,))
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
for solver in ["lbfgs", "newton-cg", "newton-cholesky", "sag", "saga"]:
msg = "Solver %s supports only dual=False, got dual=True" % solver
lr = LR(solver=solver, dual=True)
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
# only saga supports elasticnet. We only test for liblinear because the
# error is raised before for the other solvers (solver %s supports only l2
# penalties)
for solver in ["liblinear"]:
msg = f"Only 'saga' solver supports elasticnet penalty, got solver={solver}."
if LR == LogisticRegression:
lr = LR(solver=solver, l1_ratio=0.5)
else:
lr = LR(solver=solver, l1_ratios=(0.5,))
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
# liblinear does not support penalty='none'
# (LogisticRegressionCV does not supports penalty='none' at all)
if LR is LogisticRegression:
msg = "penalty=None is not supported for the liblinear solver"
lr = LR(C=np.inf, solver="liblinear")
with pytest.raises(ValueError, match=msg):
lr.fit(X, y)
# TODO(1.10): remove test with removal of penalty
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.parametrize(
["LR", "arg"],
[(LogisticRegression, "l1_ratio"), (LogisticRegressionCV, "l1_ratios")],
)
def test_elasticnet_l1_ratio_err_helpful(LR, arg):
# Check that an informative error message is raised when penalty="elasticnet"
# but l1_ratio is not specified.
model = LR(penalty="elasticnet", solver="saga", **{arg: None})
with pytest.raises(ValueError, match=r".*l1_ratio.*"):
model.fit(np.array([[1, 2], [3, 4]]), np.array([0, 1]))
@pytest.mark.parametrize("coo_container", COO_CONTAINERS)
def test_sparsify(coo_container):
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
X = scale(iris.data)
clf = LogisticRegression().fit(X, target)
pred_d_d = clf.decision_function(X)
clf.sparsify()
assert sparse.issparse(clf.coef_)
pred_s_d = clf.decision_function(X)
sp_data = coo_container(X)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
with pytest.raises(ValueError):
clf.fit(X, y_wrong)
# Wrong dimensions for test data
with pytest.raises(ValueError):
clf.fit(X_, y_).predict(rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression()
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
logistic = LogisticRegression()
with pytest.raises(ValueError):
logistic.fit(Xnan, Y1)
def test_consistency_path(global_random_seed):
# Test that the path algorithm is consistent
rng = np.random.RandomState(global_random_seed)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ["sag", "saga"]:
coefs, Cs, _ = f(_logistic_regression_path)(
X,
y,
classes=[0, 1],
Cs=Cs,
fit_intercept=False,
tol=1e-5,
solver=solver,
max_iter=1000,
random_state=global_random_seed,
)
for i, C in enumerate(Cs):
lr = LogisticRegression(
C=C,
fit_intercept=False,
tol=1e-5,
solver=solver,
random_state=global_random_seed,
max_iter=1000,
)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(
lr_coef, coefs[i], decimal=4, err_msg="with solver = %s" % solver
)
# test for fit_intercept=True
for solver in ("lbfgs", "newton-cg", "newton-cholesky", "liblinear", "sag", "saga"):
Cs = [1e3]
coefs, Cs, _ = f(_logistic_regression_path)(
X,
y,
classes=[0, 1],
Cs=Cs,
tol=1e-6,
solver=solver,
intercept_scaling=10000.0,
random_state=global_random_seed,
)
lr = LogisticRegression(
C=Cs[0],
tol=1e-6,
intercept_scaling=10000.0,
random_state=global_random_seed,
solver=solver,
)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(
lr_coef, coefs[0], decimal=4, err_msg="with solver = %s" % solver
)
def test_logistic_regression_path_convergence_fail():
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = [1e3]
# Check that the convergence message points to both a model agnostic
# advice (scaling the data) and to the logistic regression specific
# documentation that includes hints on the solver configuration.
with pytest.warns(ConvergenceWarning) as record:
_logistic_regression_path(
X, y, classes=[0, 1], Cs=Cs, tol=0.0, max_iter=1, random_state=0, verbose=0
)
assert len(record) == 1
warn_msg = record[0].message.args[0]
assert "lbfgs failed to converge after 1 iteration(s)" in warn_msg
assert "Increase the number of iterations" in warn_msg
assert "scale the data" in warn_msg
assert "linear_model.html#logistic-regression" in warn_msg
# XXX: investigate thread-safety bug that might be related to:
# https://github.com/scikit-learn/scikit-learn/issues/31883
@pytest.mark.thread_unsafe
def test_liblinear_dual_random_state(global_random_seed):
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20, random_state=global_random_seed)
lr1 = LogisticRegression(
random_state=global_random_seed,
dual=True,
tol=1e-3,
solver="liblinear",
)
lr1.fit(X, y)
lr2 = LogisticRegression(
random_state=global_random_seed,
dual=True,
tol=1e-3,
solver="liblinear",
)
lr2.fit(X, y)
lr3 = LogisticRegression(
random_state=global_random_seed + 1,
dual=True,
tol=1e-3,
solver="liblinear",
)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
with pytest.raises(AssertionError, match=msg):
assert_array_almost_equal(lr1.coef_, lr3.coef_)
# TODO(1.12): remove deprecated use_legacy_attributes
@pytest.mark.parametrize("use_legacy_attributes", [True, False])
def test_logistic_cv(global_random_seed, use_legacy_attributes):
# test for LogisticRegressionCV object
n_samples, n_features, n_cv = 50, 5, 3
rng = np.random.RandomState(global_random_seed)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(
Cs=[1.0],
l1_ratios=(0.0,), # TODO(1.10): remove because it is default now.
fit_intercept=False,
random_state=global_random_seed,
solver="liblinear",
cv=n_cv,
use_legacy_attributes=use_legacy_attributes,
)
lr_cv.fit(X_ref, y)
lr = LogisticRegression(
C=1.0, fit_intercept=False, random_state=global_random_seed, solver="liblinear"
)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert lr_cv.coef_.shape == (1, n_features)
assert_array_equal(lr_cv.classes_, [-1, 1])
assert len(lr_cv.classes_) == 2
assert lr_cv.Cs_.shape == (1,)
n_Cs = lr_cv.Cs_.shape[0]
assert lr_cv.l1_ratios_.shape == (1,)
n_l1_ratios = lr_cv.l1_ratios_.shape[0]
if use_legacy_attributes:
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert coefs_paths.shape == (1, n_cv, n_Cs, n_l1_ratios, n_features)
scores = np.asarray(list(lr_cv.scores_.values()))
assert scores.shape == (1, n_cv, n_Cs, n_l1_ratios)
else:
assert lr_cv.coefs_paths_.shape == (n_cv, n_l1_ratios, n_Cs, 1, n_features)
assert isinstance(lr_cv.C_, float)
assert isinstance(lr_cv.l1_ratio_, float)
assert lr_cv.scores_.shape == (n_cv, n_l1_ratios, n_Cs)
@pytest.mark.parametrize(
"scoring, multiclass_agg_list",
[
("accuracy", [""]),
("precision", ["_macro", "_weighted"]),
# no need to test for micro averaging because it
# is the same as accuracy for f1, precision,
# and recall (see https://github.com/
# scikit-learn/scikit-learn/pull/
# 11578#discussion_r203250062)
("f1", ["_macro", "_weighted"]),
("neg_log_loss", [""]),
("recall", ["_macro", "_weighted"]),
],
)
def test_logistic_cv_multinomial_score(
global_random_seed, scoring, multiclass_agg_list
):
# test that LogisticRegressionCV uses the right score to compute its
# cross-validation scores when using a multinomial scoring
# see https://github.com/scikit-learn/scikit-learn/issues/8720
X, y = make_classification(
n_samples=100, random_state=global_random_seed, n_classes=3, n_informative=6
)
train, test = np.arange(80), np.arange(80, 100)
lr = LogisticRegression(C=1.0)
# we use lbfgs to support multinomial
params = lr.get_params()
# Replace default penalty='deprecated' in 1.8 by the equivalent value that
# can be used by _log_reg_scoring_path
# TODO(1.10) for consistency we may want to adapt _log_reg_scoring_path to
# use only l1_ratio rather than penalty + l1_ratio
params["penalty"] = "l2"
# we store the params to set them further in _log_reg_scoring_path
for key in ["C", "n_jobs", "warm_start"]:
del params[key]
lr.fit(X[train], y[train])
for averaging in multiclass_agg_list:
scorer = get_scorer(scoring + averaging)
assert_array_almost_equal(
_log_reg_scoring_path(
X,
y,
train,
test,
classes=np.unique(y),
Cs=[1.0],
scoring=scorer,
max_squared_sum=None,
sample_weight=None,
score_params=None,
**params,
)[2][0],
scorer(lr, X[test], y[test]),
)
def test_multinomial_logistic_regression_string_inputs():
"""Test internally encode labels"""
n_samples, n_features, n_classes = 50, 5, 3
X_ref, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_classes=n_classes,
n_informative=3,
random_state=0,
)
y_str = LabelEncoder().fit(["bar", "baz", "foo"]).inverse_transform(y)
# For numerical labels, let y values be taken from set (-1, 0, 1)
y = np.array(y) - 1
# Test for string labels
lr = LogisticRegression()
lr_cv = LogisticRegressionCV(Cs=3, use_legacy_attributes=False)
lr_str = LogisticRegression()
lr_cv_str = LogisticRegressionCV(Cs=3, use_legacy_attributes=False)
lr.fit(X_ref, y)
lr_cv.fit(X_ref, y)
lr_str.fit(X_ref, y_str)
lr_cv_str.fit(X_ref, y_str)
assert_allclose(lr.coef_, lr_str.coef_)
assert_allclose(lr.predict_proba(X_ref), lr_str.predict_proba(X_ref))
assert sorted(lr_str.classes_) == ["bar", "baz", "foo"]
assert_allclose(lr_cv.coef_, lr_cv_str.coef_)
assert_allclose(lr_cv.predict_proba(X_ref), lr_cv_str.predict_proba(X_ref))
assert sorted(lr_str.classes_) == ["bar", "baz", "foo"]
assert sorted(lr_cv_str.classes_) == ["bar", "baz", "foo"]
# The predictions should be in original labels
assert sorted(np.unique(lr_str.predict(X_ref))) == ["bar", "baz", "foo"]
# CV does not necessarily predict all labels
assert set(np.unique(lr_cv_str.predict(X_ref))) <= {"bar", "baz", "foo"}
# We use explicit Cs parameter to make sure all labels are predicted for each C.
lr_cv_str = LogisticRegressionCV(Cs=[1, 2, 10], use_legacy_attributes=False).fit(
X_ref, y_str
)
assert sorted(np.unique(lr_cv_str.predict(X_ref))) == ["bar", "baz", "foo"]
# Make sure class weights can be given with string labels
lr_cv_str = LogisticRegression(class_weight={"bar": 1, "baz": 2, "foo": 0}).fit(
X_ref, y_str
)
assert sorted(np.unique(lr_cv_str.predict(X_ref))) == ["bar", "baz"]
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_logistic_cv_sparse(global_random_seed, csr_container):
X, y = make_classification(
n_samples=100, n_features=5, random_state=global_random_seed
)
X[X < 1.0] = 0.0
csr = csr_container(X)
clf = LogisticRegressionCV(use_legacy_attributes=False)
clf.fit(X, y)
clfs = LogisticRegressionCV(use_legacy_attributes=False)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert clfs.C_ == clf.C_
# TODO(1.12): remove deprecated use_legacy_attributes
@pytest.mark.parametrize("use_legacy_attributes", [True, False])
def test_multinomial_cv_iris(use_legacy_attributes):
# Test that multinomial LogisticRegressionCV is correct using the iris dataset.
X, y = iris.data, iris.target
n_samples, n_features = X.shape
# The cv indices from stratified kfold
n_cv = 2
cv = StratifiedKFold(n_cv)
precomputed_folds = list(cv.split(X, y))
# Train clf on the original dataset
clf = LogisticRegressionCV(
cv=precomputed_folds, solver="newton-cholesky", use_legacy_attributes=True
)
clf.fit(X, y)
# Test the shape of various attributes.
assert clf.coef_.shape == (3, n_features)
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert coefs_paths.shape == (3, n_cv, 10, n_features + 1)
assert clf.Cs_.shape == (10,)
scores = np.asarray(list(clf.scores_.values()))
assert scores.shape == (3, n_cv, 10)
# Test that for the iris data multinomial gives a better accuracy than OvR
clf_ovr = GridSearchCV(
OneVsRestClassifier(LogisticRegression(solver="newton-cholesky")),
{"estimator__C": np.logspace(-4, 4, num=10)},
).fit(X, y)
for solver in ["lbfgs", "newton-cg", "sag", "saga"]:
max_iter = 500 if solver in ["sag", "saga"] else 30
clf_multi = LogisticRegressionCV(
solver=solver,
max_iter=max_iter,
random_state=42,
tol=1e-3 if solver in ["sag", "saga"] else 1e-2,
cv=2,
use_legacy_attributes=use_legacy_attributes,
)
if solver == "lbfgs":
# lbfgs requires scaling to avoid convergence warnings
X = scale(X)
clf_multi.fit(X, y)
multi_score = clf_multi.score(X, y)
ovr_score = clf_ovr.score(X, y)
assert multi_score > ovr_score
# Test attributes of LogisticRegressionCV
assert clf.coef_.shape == clf_multi.coef_.shape
assert_array_equal(clf_multi.classes_, [0, 1, 2])
if use_legacy_attributes:
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert coefs_paths.shape == (3, n_cv, 10, n_features + 1)
assert clf_multi.Cs_.shape == (10,)
scores = np.asarray(list(clf_multi.scores_.values()))
assert scores.shape == (3, n_cv, 10)
# Norm of coefficients should increase with increasing C.
for fold in range(clf_multi.coefs_paths_[0].shape[0]):
# with use_legacy_attributes=True, coefs_paths_ is a dict whose keys
# are classes and each value has shape
# (n_folds, n_l1_ratios, n_cs, n_features)
# Note that we have to exclude the intercept, hence the ':-1'
# on the last dimension
coefs = [
clf_multi.coefs_paths_[c][fold, :, :-1] for c in clf_multi.classes_
]
coefs = np.swapaxes(coefs, 1, 0).reshape(len(clf_multi.Cs_), -1)
norms = np.sum(coefs * coefs, axis=1) # L2 norm for each C
assert np.all(np.diff(norms) >= 0)
else:
n_folds, n_cs, n_l1_ratios, n_classes, n_dof = 2, 10, 1, 3, n_features + 1
assert clf_multi.coefs_paths_.shape == (
n_folds,
n_l1_ratios,
n_cs,
n_classes,
n_dof,
)
assert isinstance(clf_multi.C_, float)
assert isinstance(clf_multi.l1_ratio_, float)
assert clf_multi.scores_.shape == (n_folds, n_l1_ratios, n_cs)
# Norm of coefficients should increase with increasing C.
for fold in range(clf_multi.coefs_paths_.shape[0]):
# with use_legacy_attributes=False, coefs_paths_ has shape
# (n_folds, n_l1_ratios, n_Cs, n_classes, n_features + 1)
# Note that we have to exclude the intercept, hence the ':-1'
# on the last dimension
coefs = clf_multi.coefs_paths_[fold, 0, :, :, :-1]
norms = np.sum(coefs * coefs, axis=(-2, -1)) # L2 norm for each C
assert np.all(np.diff(norms) >= 0)
# Test CV folds with missing class labels:
# The iris target variable has 3 classes and is ordered such that a simple
# CV split with 3 folds separates the classes.
cv = KFold(n_splits=3)
# Check this assumption.
classes = np.unique(y)
assert len(classes) == 3
for train, test in cv.split(X, y):
assert len(np.unique(y[train])) == 2
assert len(np.unique(y[test])) == 1
assert set(y[train]) & set(y[test]) == set()
clf = LogisticRegressionCV(cv=cv, use_legacy_attributes=False).fit(X, y)
# We expect accuracy to be exactly 0 because train and test sets have
# non-overlapping labels
assert np.all(clf.scores_ == 0.0)
# We use a proper scoring rule, i.e. the Brier score, to evaluate our classifier.
# Because of a bug in LogisticRegressionCV, we need to create our own scoring
# function to pass explicitly the labels.
scoring = make_scorer(
brier_score_loss,
greater_is_better=False,
response_method="predict_proba",
scale_by_half=True,
labels=classes,
)
# We set small Cs, that is strong penalty as the best C is likely the smallest one.
clf = LogisticRegressionCV(
cv=cv, scoring=scoring, Cs=np.logspace(-6, 3, 10), use_legacy_attributes=False
).fit(X, y)
assert clf.C_ == 1e-6 # smallest value of provided Cs
brier_scores = -clf.scores_
# We expect the scores to be bad because train and test sets have
# non-overlapping labels
assert np.all(brier_scores > 0.7)
# But the best score should be better than the worst value of 1.
assert np.min(brier_scores) < 0.8
def test_logistic_regression_solvers(global_random_seed):
"""Test solvers converge to the same result."""
X, y = make_classification(
n_samples=200, n_features=10, n_informative=5, random_state=global_random_seed
)
params = dict(C=0.1, fit_intercept=False, random_state=global_random_seed)
classifiers = {
solver: LogisticRegression(solver=solver, **params).fit(X, y)
for solver in SOLVERS
}
for solver_1, solver_2 in itertools.combinations(classifiers, r=2):
assert_allclose(
classifiers[solver_1].coef_,
classifiers[solver_2].coef_,
atol=1e-3,
rtol=1e-4,
err_msg=f"Compare {solver_1} vs {solver_2}",
)
# FIXME: the random state is fixed in the following test because SAG fails
# to converge to the same results as BFGS for 20% of the cases. Usually it
# means that there is one coefficient that is slightly different.
@pytest.mark.parametrize("fit_intercept", [False, True])
def test_logistic_regression_solvers_multiclass(fit_intercept):
"""Test solvers converge to the same result for multiclass problems."""
X, y = make_classification(
n_samples=20,
n_features=20,
n_informative=10,
n_classes=3,
random_state=0,
)
tol = 1e-8
params = dict(fit_intercept=fit_intercept, tol=tol, random_state=42)
# Override max iteration count for specific solvers to allow for
# proper convergence.
solver_max_iter = {"lbfgs": 200, "sag": 10_000, "saga": 10_000}
classifiers = {
solver: LogisticRegression(
solver=solver, max_iter=solver_max_iter.get(solver, 100), **params
).fit(X, y)
for solver in set(SOLVERS) - set(["liblinear"])
}
for solver_1, solver_2 in itertools.combinations(classifiers, r=2):
assert_allclose(
classifiers[solver_1].coef_,
classifiers[solver_2].coef_,
rtol=5e-3 if (solver_1 == "saga" or solver_2 == "saga") else 1e-3,
err_msg=f"{solver_1} vs {solver_2}",
)
if fit_intercept:
assert_allclose(
classifiers[solver_1].intercept_,
classifiers[solver_2].intercept_,
rtol=5e-3 if (solver_1 == "saga" or solver_2 == "saga") else 1e-3,
err_msg=f"{solver_1} vs {solver_2}",
)
@pytest.mark.parametrize("fit_intercept", [False, True])
def test_logistic_regression_solvers_multiclass_unpenalized(
fit_intercept, global_random_seed
):
"""Test and compare solver results for unpenalized multinomial multiclass."""
# We want to avoid perfect separation.
n_samples, n_features, n_classes = 100, 4, 3
rng = np.random.RandomState(global_random_seed)
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features + fit_intercept,
effective_rank=n_features + fit_intercept,
tail_strength=0.1,
random_state=rng,
)
if fit_intercept:
X[:, -1] = 1
U, s, Vt = svd(X)
assert np.all(s > 1e-3) # to be sure that X is not singular
assert np.max(s) / np.min(s) < 100 # condition number of X
if fit_intercept:
X = X[:, :-1]
coef = rng.uniform(low=1, high=3, size=n_features * n_classes)
coef = coef.reshape(n_classes, n_features)
intercept = rng.uniform(low=-1, high=1, size=n_classes) * fit_intercept
raw_prediction = X @ coef.T + intercept
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_bayes.py | sklearn/linear_model/tests/test_bayes.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from math import log
import numpy as np
import pytest
from sklearn import datasets
from sklearn.linear_model import ARDRegression, BayesianRidge, Ridge
from sklearn.utils import check_random_state
from sklearn.utils._testing import (
_convert_container,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_less,
)
from sklearn.utils.extmath import fast_logdet
diabetes = datasets.load_diabetes()
def test_bayesian_ridge_scores():
"""Check scores attribute shape"""
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
assert clf.scores_.shape == (clf.n_iter_ + 1,)
def test_bayesian_ridge_score_values():
"""Check value of score on toy example.
Compute log marginal likelihood with equation (36) in Sparse Bayesian
Learning and the Relevance Vector Machine (Tipping, 2001):
- 0.5 * (log |Id/alpha + X.X^T/lambda| +
y^T.(Id/alpha + X.X^T/lambda).y + n * log(2 * pi))
+ lambda_1 * log(lambda) - lambda_2 * lambda
+ alpha_1 * log(alpha) - alpha_2 * alpha
and check equality with the score computed during training.
"""
X, y = diabetes.data, diabetes.target
n_samples = X.shape[0]
# check with initial values of alpha and lambda (see code for the values)
eps = np.finfo(np.float64).eps
alpha_ = 1.0 / (np.var(y) + eps)
lambda_ = 1.0
# value of the parameters of the Gamma hyperpriors
alpha_1 = 0.1
alpha_2 = 0.1
lambda_1 = 0.1
lambda_2 = 0.1
# compute score using formula of docstring
score = lambda_1 * log(lambda_) - lambda_2 * lambda_
score += alpha_1 * log(alpha_) - alpha_2 * alpha_
M = 1.0 / alpha_ * np.eye(n_samples) + 1.0 / lambda_ * np.dot(X, X.T)
M_inv_dot_y = np.linalg.solve(M, y)
score += -0.5 * (
fast_logdet(M) + np.dot(y.T, M_inv_dot_y) + n_samples * log(2 * np.pi)
)
# compute score with BayesianRidge
clf = BayesianRidge(
alpha_1=alpha_1,
alpha_2=alpha_2,
lambda_1=lambda_1,
lambda_2=lambda_2,
max_iter=1,
fit_intercept=False,
compute_score=True,
)
clf.fit(X, y)
assert_almost_equal(clf.scores_[0], score, decimal=9)
def test_bayesian_ridge_parameter():
# Test correctness of lambda_ and alpha_ parameters (GitHub issue #8224)
X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]])
y = np.array([1, 2, 3, 2, 0, 4, 5]).T
# A Ridge regression model using an alpha value equal to the ratio of
# lambda_ and alpha_ from the Bayesian Ridge model must be identical
br_model = BayesianRidge(compute_score=True).fit(X, y)
rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit(X, y)
assert_array_almost_equal(rr_model.coef_, br_model.coef_)
assert_almost_equal(rr_model.intercept_, br_model.intercept_)
@pytest.mark.parametrize("n_samples, n_features", [(10, 20), (20, 10)])
def test_bayesian_covariance_matrix(n_samples, n_features, global_random_seed):
"""Check the posterior covariance matrix sigma_
Non-regression test for https://github.com/scikit-learn/scikit-learn/issues/31093
"""
X, y = datasets.make_regression(
n_samples, n_features, random_state=global_random_seed
)
reg = BayesianRidge(fit_intercept=False).fit(X, y)
covariance_matrix = np.linalg.inv(
reg.lambda_ * np.identity(n_features) + reg.alpha_ * np.dot(X.T, X)
)
assert_allclose(reg.sigma_, covariance_matrix, rtol=1e-6)
def test_bayesian_sample_weights():
# Test correctness of the sample_weights method
X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]])
y = np.array([1, 2, 3, 2, 0, 4, 5]).T
w = np.array([4, 3, 3, 1, 1, 2, 3]).T
# A Ridge regression model using an alpha value equal to the ratio of
# lambda_ and alpha_ from the Bayesian Ridge model must be identical
br_model = BayesianRidge(compute_score=True).fit(X, y, sample_weight=w)
rr_model = Ridge(alpha=br_model.lambda_ / br_model.alpha_).fit(
X, y, sample_weight=w
)
assert_array_almost_equal(rr_model.coef_, br_model.coef_)
assert_almost_equal(rr_model.intercept_, br_model.intercept_)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_bayesian_initial_params():
# Test BayesianRidge with initial values (alpha_init, lambda_init)
X = np.vander(np.linspace(0, 4, 5), 4)
y = np.array([0.0, 1.0, 0.0, -1.0, 0.0]) # y = (x^3 - 6x^2 + 8x) / 3
# In this case, starting from the default initial values will increase
# the bias of the fitted curve. So, lambda_init should be small.
reg = BayesianRidge(alpha_init=1.0, lambda_init=1e-3)
# Check the R2 score nearly equals to one.
r2 = reg.fit(X, y).score(X, y)
assert_almost_equal(r2, 1.0)
def test_prediction_bayesian_ridge_ard_with_constant_input():
# Test BayesianRidge and ARDRegression predictions for edge case of
# constant target vectors
n_samples = 4
n_features = 5
random_state = check_random_state(42)
constant_value = random_state.rand()
X = random_state.random_sample((n_samples, n_features))
y = np.full(n_samples, constant_value, dtype=np.array(constant_value).dtype)
expected = np.full(n_samples, constant_value, dtype=np.array(constant_value).dtype)
for clf in [BayesianRidge(), ARDRegression()]:
y_pred = clf.fit(X, y).predict(X)
assert_array_almost_equal(y_pred, expected)
def test_std_bayesian_ridge_ard_with_constant_input():
# Test BayesianRidge and ARDRegression standard dev. for edge case of
# constant target vector
# The standard dev. should be relatively small (< 0.01 is tested here)
n_samples = 10
n_features = 5
random_state = check_random_state(42)
constant_value = random_state.rand()
X = random_state.random_sample((n_samples, n_features))
y = np.full(n_samples, constant_value, dtype=np.array(constant_value).dtype)
expected_upper_boundary = 0.01
for clf in [BayesianRidge(), ARDRegression()]:
_, y_std = clf.fit(X, y).predict(X, return_std=True)
assert_array_less(y_std, expected_upper_boundary)
def test_update_of_sigma_in_ard():
# Checks that `sigma_` is updated correctly after the last iteration
# of the ARDRegression algorithm. See issue #10128.
X = np.array([[1, 0], [0, 0]])
y = np.array([0, 0])
clf = ARDRegression(max_iter=1)
clf.fit(X, y)
# With the inputs above, ARDRegression prunes both of the two coefficients
# in the first iteration. Hence, the expected shape of `sigma_` is (0, 0).
assert clf.sigma_.shape == (0, 0)
# Ensure that no error is thrown at prediction stage
clf.predict(X, return_std=True)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
@pytest.mark.parametrize("n_samples, n_features", ((10, 100), (100, 10)))
def test_ard_accuracy_on_easy_problem(global_random_seed, n_samples, n_features):
# Check that ARD converges with reasonable accuracy on an easy problem
# (Github issue #14055)
X = np.random.RandomState(global_random_seed).normal(size=(250, 3))
y = X[:, 1]
regressor = ARDRegression()
regressor.fit(X, y)
abs_coef_error = np.abs(1 - regressor.coef_[1])
assert abs_coef_error < 1e-10
@pytest.mark.parametrize("constructor_name", ["array", "dataframe"])
def test_return_std(constructor_name):
# Test return_std option for both Bayesian regressors
def f(X):
return np.dot(X, w) + b
def f_noise(X, noise_mult):
return f(X) + np.random.randn(X.shape[0]) * noise_mult
d = 5
n_train = 50
n_test = 10
w = np.array([1.0, 0.0, 1.0, -1.0, 0.0])
b = 1.0
X = np.random.random((n_train, d))
X = _convert_container(X, constructor_name)
X_test = np.random.random((n_test, d))
X_test = _convert_container(X_test, constructor_name)
for decimal, noise_mult in enumerate([1, 0.1, 0.01]):
y = f_noise(X, noise_mult)
m1 = BayesianRidge()
m1.fit(X, y)
y_mean1, y_std1 = m1.predict(X_test, return_std=True)
assert_array_almost_equal(y_std1, noise_mult, decimal=decimal)
m2 = ARDRegression()
m2.fit(X, y)
y_mean2, y_std2 = m2.predict(X_test, return_std=True)
assert_array_almost_equal(y_std2, noise_mult, decimal=decimal)
def test_update_sigma(global_random_seed):
# make sure the two update_sigma() helpers are equivalent. The woodbury
# formula is used when n_samples < n_features, and the other one is used
# otherwise.
rng = np.random.RandomState(global_random_seed)
# set n_samples == n_features to avoid instability issues when inverting
# the matrices. Using the woodbury formula would be unstable when
# n_samples > n_features
n_samples = n_features = 10
X = rng.randn(n_samples, n_features)
alpha = 1
lmbda = np.arange(1, n_features + 1)
keep_lambda = np.array([True] * n_features)
reg = ARDRegression()
sigma = reg._update_sigma(X, alpha, lmbda, keep_lambda)
sigma_woodbury = reg._update_sigma_woodbury(X, alpha, lmbda, keep_lambda)
np.testing.assert_allclose(sigma, sigma_woodbury)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("Estimator", [BayesianRidge, ARDRegression])
def test_dtype_match(dtype, Estimator):
# Test that np.float32 input data is not cast to np.float64 when possible
X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]], dtype=dtype)
y = np.array([1, 2, 3, 2, 0, 4, 5]).T
model = Estimator()
# check type consistency
model.fit(X, y)
attributes = ["coef_", "sigma_"]
for attribute in attributes:
assert getattr(model, attribute).dtype == X.dtype
y_mean, y_std = model.predict(X, return_std=True)
assert y_mean.dtype == X.dtype
assert y_std.dtype == X.dtype
@pytest.mark.parametrize("Estimator", [BayesianRidge, ARDRegression])
def test_dtype_correctness(Estimator):
X = np.array([[1, 1], [3, 4], [5, 7], [4, 1], [2, 6], [3, 10], [3, 2]])
y = np.array([1, 2, 3, 2, 0, 4, 5]).T
model = Estimator()
coef_32 = model.fit(X.astype(np.float32), y).coef_
coef_64 = model.fit(X.astype(np.float64), y).coef_
np.testing.assert_allclose(coef_32, coef_64, rtol=1e-4)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/tests/test_ridge.py | sklearn/linear_model/tests/test_ridge.py | import warnings
from itertools import product
import numpy as np
import pytest
from scipy import linalg
from sklearn import config_context, datasets
from sklearn.base import clone
from sklearn.datasets import (
make_classification,
make_low_rank_matrix,
make_multilabel_classification,
make_regression,
)
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import (
LinearRegression,
Ridge,
RidgeClassifier,
RidgeClassifierCV,
RidgeCV,
ridge_regression,
)
from sklearn.linear_model._ridge import (
_check_gcv_mode,
_RidgeGCV,
_solve_cholesky,
_solve_cholesky_kernel,
_solve_lbfgs,
_solve_svd,
_X_CenterStackOp,
)
from sklearn.metrics import get_scorer, make_scorer, mean_squared_error
from sklearn.model_selection import (
GridSearchCV,
GroupKFold,
KFold,
LeaveOneOut,
cross_val_predict,
)
from sklearn.preprocessing import minmax_scale
from sklearn.utils import check_random_state
from sklearn.utils._array_api import (
_NUMPY_NAMESPACE_NAMES,
_atol_for_type,
_convert_to_numpy,
_get_namespace_device_dtype_ids,
_max_precision_float_dtype,
yield_namespace_device_dtype_combinations,
yield_namespaces,
)
from sklearn.utils._test_common.instance_generator import _get_check_estimator_ids
from sklearn.utils._testing import (
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
ignore_warnings,
)
from sklearn.utils.estimator_checks import (
_array_api_for_tests,
check_array_api_input_and_values,
)
from sklearn.utils.fixes import (
_IS_32BIT,
COO_CONTAINERS,
CSC_CONTAINERS,
CSR_CONTAINERS,
DOK_CONTAINERS,
LIL_CONTAINERS,
)
SOLVERS = ["svd", "sparse_cg", "cholesky", "lsqr", "sag", "saga"]
SPARSE_SOLVERS_WITH_INTERCEPT = ("sparse_cg", "sag")
SPARSE_SOLVERS_WITHOUT_INTERCEPT = ("sparse_cg", "cholesky", "lsqr", "sag", "saga")
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris, y_iris = iris.data, iris.target
def _accuracy_callable(y_test, y_pred, **kwargs):
return np.mean(y_test == y_pred)
def _mean_squared_error_callable(y_test, y_pred):
return ((y_test - y_pred) ** 2).mean()
@pytest.fixture(params=["long", "wide"])
def ols_ridge_dataset(global_random_seed, request):
"""Dataset with OLS and Ridge solutions, well conditioned X.
The construction is based on the SVD decomposition of X = U S V'.
Parameters
----------
type : {"long", "wide"}
If "long", then n_samples > n_features.
If "wide", then n_features > n_samples.
For "wide", we return the minimum norm solution w = X' (XX')^-1 y:
min ||w||_2 subject to X w = y
Returns
-------
X : ndarray
Last column of 1, i.e. intercept.
y : ndarray
coef_ols : ndarray of shape
Minimum norm OLS solutions, i.e. min ||X w - y||_2_2 (with minimum ||w||_2 in
case of ambiguity)
Last coefficient is intercept.
coef_ridge : ndarray of shape (5,)
Ridge solution with alpha=1, i.e. min ||X w - y||_2_2 + ||w||_2^2.
Last coefficient is intercept.
"""
# Make larger dim more than double as big as the smaller one.
# This helps when constructing singular matrices like (X, X).
if request.param == "long":
n_samples, n_features = 12, 4
else:
n_samples, n_features = 4, 12
k = min(n_samples, n_features)
rng = np.random.RandomState(global_random_seed)
X = make_low_rank_matrix(
n_samples=n_samples, n_features=n_features, effective_rank=k, random_state=rng
)
X[:, -1] = 1 # last columns acts as intercept
U, s, Vt = linalg.svd(X)
assert np.all(s > 1e-3) # to be sure
U1, U2 = U[:, :k], U[:, k:]
Vt1, _ = Vt[:k, :], Vt[k:, :]
if request.param == "long":
# Add a term that vanishes in the product X'y
coef_ols = rng.uniform(low=-10, high=10, size=n_features)
y = X @ coef_ols
y += U2 @ rng.normal(size=n_samples - n_features) ** 2
else:
y = rng.uniform(low=-10, high=10, size=n_samples)
# w = X'(XX')^-1 y = V s^-1 U' y
coef_ols = Vt1.T @ np.diag(1 / s) @ U1.T @ y
# Add penalty alpha * ||coef||_2^2 for alpha=1 and solve via normal equations.
# Note that the problem is well conditioned such that we get accurate results.
alpha = 1
d = alpha * np.identity(n_features)
d[-1, -1] = 0 # intercept gets no penalty
coef_ridge = linalg.solve(X.T @ X + d, X.T @ y)
# To be sure
R_OLS = y - X @ coef_ols
R_Ridge = y - X @ coef_ridge
assert np.linalg.norm(R_OLS) < np.linalg.norm(R_Ridge)
return X, y, coef_ols, coef_ridge
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_ridge_regression(solver, fit_intercept, ols_ridge_dataset, global_random_seed):
"""Test that Ridge converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
"""
X, y, _, coef = ols_ridge_dataset
alpha = 1.0 # because ols_ridge_dataset uses this.
params = dict(
alpha=alpha,
fit_intercept=True,
solver=solver,
tol=1e-15 if solver in ("sag", "saga") else 1e-10,
random_state=global_random_seed,
)
# Calculate residuals and R2.
res_null = y - np.mean(y)
res_Ridge = y - X @ coef
R2_Ridge = 1 - np.sum(res_Ridge**2) / np.sum(res_null**2)
model = Ridge(**params)
X = X[:, :-1] # remove intercept
if fit_intercept:
intercept = coef[-1]
else:
X = X - X.mean(axis=0)
y = y - y.mean()
intercept = 0
model.fit(X, y)
coef = coef[:-1]
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef)
assert model.score(X, y) == pytest.approx(R2_Ridge)
# Same with sample_weight.
model = Ridge(**params).fit(X, y, sample_weight=np.ones(X.shape[0]))
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef)
assert model.score(X, y) == pytest.approx(R2_Ridge)
assert model.solver_ == solver
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_ridge_regression_hstacked_X(
solver, fit_intercept, ols_ridge_dataset, global_random_seed
):
"""Test that Ridge converges for all solvers to correct solution on hstacked data.
We work with a simple constructed data set with known solution.
Fit on [X] with alpha is the same as fit on [X, X]/2 with alpha/2.
For long X, [X, X] is a singular matrix.
"""
X, y, _, coef = ols_ridge_dataset
n_samples, n_features = X.shape
alpha = 1.0 # because ols_ridge_dataset uses this.
model = Ridge(
alpha=alpha / 2,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-15 if solver in ("sag", "saga") else 1e-10,
random_state=global_random_seed,
)
X = X[:, :-1] # remove intercept
X = 0.5 * np.concatenate((X, X), axis=1)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features - 1)
if fit_intercept:
intercept = coef[-1]
else:
X = X - X.mean(axis=0)
y = y - y.mean()
intercept = 0
model.fit(X, y)
coef = coef[:-1]
assert model.intercept_ == pytest.approx(intercept)
# coefficients are not all on the same magnitude, adding a small atol to
# make this test less brittle
assert_allclose(model.coef_, np.r_[coef, coef], atol=1e-8)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_ridge_regression_vstacked_X(
solver, fit_intercept, ols_ridge_dataset, global_random_seed
):
"""Test that Ridge converges for all solvers to correct solution on vstacked data.
We work with a simple constructed data set with known solution.
Fit on [X] with alpha is the same as fit on [X], [y]
[X], [y] with 2 * alpha.
For wide X, [X', X'] is a singular matrix.
"""
X, y, _, coef = ols_ridge_dataset
n_samples, n_features = X.shape
alpha = 1.0 # because ols_ridge_dataset uses this.
model = Ridge(
alpha=2 * alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-15 if solver in ("sag", "saga") else 1e-10,
random_state=global_random_seed,
)
X = X[:, :-1] # remove intercept
X = np.concatenate((X, X), axis=0)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
y = np.r_[y, y]
if fit_intercept:
intercept = coef[-1]
else:
X = X - X.mean(axis=0)
y = y - y.mean()
intercept = 0
model.fit(X, y)
coef = coef[:-1]
assert model.intercept_ == pytest.approx(intercept)
# coefficients are not all on the same magnitude, adding a small atol to
# make this test less brittle
assert_allclose(model.coef_, coef, atol=1e-8)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_ridge_regression_unpenalized(
solver, fit_intercept, ols_ridge_dataset, global_random_seed
):
"""Test that unpenalized Ridge = OLS converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
Note: This checks the minimum norm solution for wide X, i.e.
n_samples < n_features:
min ||w||_2 subject to X w = y
"""
X, y, coef, _ = ols_ridge_dataset
n_samples, n_features = X.shape
alpha = 0 # OLS
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-15 if solver in ("sag", "saga") else 1e-10,
random_state=global_random_seed,
)
model = Ridge(**params)
# Note that cholesky might give a warning: "Singular matrix in solving dual
# problem. Using least-squares solution instead."
if fit_intercept:
X = X[:, :-1] # remove intercept
intercept = coef[-1]
coef = coef[:-1]
else:
intercept = 0
model.fit(X, y)
# FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails
# for the wide/fat case with n_features > n_samples. The current Ridge solvers do
# NOT return the minimum norm solution with fit_intercept=True.
if n_samples > n_features or not fit_intercept:
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef)
else:
# As it is an underdetermined problem, residuals = 0. This shows that we get
# a solution to X w = y ....
assert_allclose(model.predict(X), y)
assert_allclose(X @ coef + intercept, y)
# But it is not the minimum norm solution. (This should be equal.)
assert np.linalg.norm(np.r_[model.intercept_, model.coef_]) > np.linalg.norm(
np.r_[intercept, coef]
)
pytest.xfail(reason="Ridge does not provide the minimum norm solution.")
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_ridge_regression_unpenalized_hstacked_X(
solver, fit_intercept, ols_ridge_dataset, global_random_seed
):
"""Test that unpenalized Ridge = OLS converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
OLS fit on [X] is the same as fit on [X, X]/2.
For long X, [X, X] is a singular matrix and we check against the minimum norm
solution:
min ||w||_2 subject to min ||X w - y||_2
"""
X, y, coef, _ = ols_ridge_dataset
n_samples, n_features = X.shape
alpha = 0 # OLS
model = Ridge(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-15 if solver in ("sag", "saga") else 1e-10,
random_state=global_random_seed,
)
if fit_intercept:
X = X[:, :-1] # remove intercept
intercept = coef[-1]
coef = coef[:-1]
else:
intercept = 0
X = 0.5 * np.concatenate((X, X), axis=1)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
model.fit(X, y)
if n_samples > n_features or not fit_intercept:
assert model.intercept_ == pytest.approx(intercept)
if solver == "cholesky":
# Cholesky is a bad choice for singular X.
pytest.skip()
assert_allclose(model.coef_, np.r_[coef, coef])
else:
# FIXME: Same as in test_ridge_regression_unpenalized.
# As it is an underdetermined problem, residuals = 0. This shows that we get
# a solution to X w = y ....
assert_allclose(model.predict(X), y)
# But it is not the minimum norm solution. (This should be equal.)
assert np.linalg.norm(np.r_[model.intercept_, model.coef_]) > np.linalg.norm(
np.r_[intercept, coef, coef]
)
pytest.xfail(reason="Ridge does not provide the minimum norm solution.")
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, np.r_[coef, coef])
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_ridge_regression_unpenalized_vstacked_X(
solver, fit_intercept, ols_ridge_dataset, global_random_seed
):
"""Test that unpenalized Ridge = OLS converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
OLS fit on [X] is the same as fit on [X], [y]
[X], [y].
For wide X, [X', X'] is a singular matrix and we check against the minimum norm
solution:
min ||w||_2 subject to X w = y
"""
X, y, coef, _ = ols_ridge_dataset
n_samples, n_features = X.shape
alpha = 0 # OLS
model = Ridge(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-15 if solver in ("sag", "saga") else 1e-10,
random_state=global_random_seed,
)
if fit_intercept:
X = X[:, :-1] # remove intercept
intercept = coef[-1]
coef = coef[:-1]
else:
intercept = 0
X = np.concatenate((X, X), axis=0)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
y = np.r_[y, y]
model.fit(X, y)
if n_samples > n_features or not fit_intercept:
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef)
else:
# FIXME: Same as in test_ridge_regression_unpenalized.
# As it is an underdetermined problem, residuals = 0. This shows that we get
# a solution to X w = y ....
assert_allclose(model.predict(X), y)
# But it is not the minimum norm solution. (This should be equal.)
assert np.linalg.norm(np.r_[model.intercept_, model.coef_]) > np.linalg.norm(
np.r_[intercept, coef]
)
pytest.xfail(reason="Ridge does not provide the minimum norm solution.")
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS)
@pytest.mark.parametrize("alpha", [1.0, 1e-2])
def test_ridge_regression_sample_weights(
solver,
fit_intercept,
sparse_container,
alpha,
ols_ridge_dataset,
global_random_seed,
):
"""Test that Ridge with sample weights gives correct results.
We use the following trick:
||y - Xw||_2 = (z - Aw)' W (z - Aw)
for z=[y, y], A' = [X', X'] (vstacked), and W[:n/2] + W[n/2:] = 1, W=diag(W)
"""
if sparse_container is not None:
if fit_intercept and solver not in SPARSE_SOLVERS_WITH_INTERCEPT:
pytest.skip()
elif not fit_intercept and solver not in SPARSE_SOLVERS_WITHOUT_INTERCEPT:
pytest.skip()
X, y, _, coef = ols_ridge_dataset
n_samples, n_features = X.shape
sw = rng.uniform(low=0, high=1, size=n_samples)
model = Ridge(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-15 if solver in ["sag", "saga"] else 1e-10,
max_iter=100_000,
random_state=global_random_seed,
)
X = X[:, :-1] # remove intercept
X = np.concatenate((X, X), axis=0)
y = np.r_[y, y]
sw = np.r_[sw, 1 - sw] * alpha
if fit_intercept:
intercept = coef[-1]
else:
X = X - X.mean(axis=0)
y = y - y.mean()
intercept = 0
if sparse_container is not None:
X = sparse_container(X)
model.fit(X, y, sample_weight=sw)
coef = coef[:-1]
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_regression_convergence_fail():
rng = np.random.RandomState(0)
y = rng.randn(5)
X = rng.randn(5, 10)
warning_message = r"sparse_cg did not converge after [0-9]+ iterations."
with pytest.warns(ConvergenceWarning, match=warning_message):
ridge_regression(
X, y, alpha=1.0, solver="sparse_cg", tol=0.0, max_iter=None, verbose=1
)
def test_ridge_shapes_type():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert ridge.coef_.shape == (n_features,)
assert ridge.intercept_.shape == ()
assert isinstance(ridge.coef_, np.ndarray)
assert isinstance(ridge.intercept_, float)
ridge.fit(X, Y1)
assert ridge.coef_.shape == (n_features,)
assert ridge.intercept_.shape == (1,)
assert isinstance(ridge.coef_, np.ndarray)
assert isinstance(ridge.intercept_, np.ndarray)
ridge.fit(X, Y)
assert ridge.coef_.shape == (2, n_features)
assert ridge.intercept_.shape == (2,)
assert isinstance(ridge.coef_, np.ndarray)
assert isinstance(ridge.intercept_, np.ndarray)
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1.0 + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.0)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0.0, fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array(
[
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)
]
)
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-12).fit(X, y).coef_
for solver in ["svd", "sparse_cg", "lsqr", "cholesky", "sag", "saga"]
]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
err_msg = "Number of targets and number of penalties do not correspond: 4 != 5"
with pytest.raises(ValueError, match=err_msg):
ridge.fit(X, y)
@pytest.mark.parametrize("n_col", [(), (1,), (3,)])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_X_CenterStackOp(n_col, csr_container):
rng = np.random.RandomState(0)
X = rng.randn(11, 8)
X_m = rng.randn(8)
sqrt_sw = rng.randn(len(X))
Y = rng.randn(11, *n_col)
A = rng.randn(9, *n_col)
operator = _X_CenterStackOp(csr_container(X), X_m, sqrt_sw)
reference_operator = np.hstack([X - sqrt_sw[:, None] * X_m, sqrt_sw[:, None]])
assert_allclose(reference_operator.dot(A), operator.dot(A))
assert_allclose(reference_operator.T.dot(Y), operator.T.dot(Y))
@pytest.mark.parametrize("shape", [(10, 1), (13, 9), (3, 7), (2, 2), (20, 20)])
@pytest.mark.parametrize("uniform_weights", [True, False])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_compute_gram(shape, uniform_weights, csr_container):
rng = np.random.RandomState(0)
X = rng.randn(*shape)
if uniform_weights:
sw = np.ones(X.shape[0])
else:
sw = rng.chisquare(1, shape[0])
sqrt_sw = np.sqrt(sw)
X_mean = np.average(X, axis=0, weights=sw)
X_centered = (X - X_mean) * sqrt_sw[:, None]
true_gram = X_centered.dot(X_centered.T)
X_sparse = csr_container(X * sqrt_sw[:, None])
gcv = _RidgeGCV(fit_intercept=True)
computed_gram, computed_mean = gcv._compute_gram(X_sparse, sqrt_sw)
assert_allclose(X_mean, computed_mean)
assert_allclose(true_gram, computed_gram)
@pytest.mark.parametrize("shape", [(10, 1), (13, 9), (3, 7), (2, 2), (20, 20)])
@pytest.mark.parametrize("uniform_weights", [True, False])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_compute_covariance(shape, uniform_weights, csr_container):
rng = np.random.RandomState(0)
X = rng.randn(*shape)
if uniform_weights:
sw = np.ones(X.shape[0])
else:
sw = rng.chisquare(1, shape[0])
sqrt_sw = np.sqrt(sw)
X_mean = np.average(X, axis=0, weights=sw)
X_centered = (X - X_mean) * sqrt_sw[:, None]
true_covariance = X_centered.T.dot(X_centered)
X_sparse = csr_container(X * sqrt_sw[:, None])
gcv = _RidgeGCV(fit_intercept=True)
computed_cov, computed_mean = gcv._compute_covariance(X_sparse, sqrt_sw)
assert_allclose(X_mean, computed_mean)
assert_allclose(true_covariance, computed_cov)
def _make_sparse_offset_regression(
n_samples=100,
n_features=100,
proportion_nonzero=0.5,
n_informative=10,
n_targets=1,
bias=13.0,
X_offset=30.0,
noise=30.0,
shuffle=True,
coef=False,
positive=False,
random_state=None,
):
X, y, c = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_informative,
n_targets=n_targets,
bias=bias,
noise=noise,
shuffle=shuffle,
coef=True,
random_state=random_state,
)
if n_features == 1:
c = np.asarray([c])
X += X_offset
mask = (
np.random.RandomState(random_state).binomial(1, proportion_nonzero, X.shape) > 0
)
removed_X = X.copy()
X[~mask] = 0.0
removed_X[mask] = 0.0
y -= removed_X.dot(c)
if positive:
y += X.dot(np.abs(c) + 1 - c)
c = np.abs(c) + 1
if n_features == 1:
c = c[0]
if coef:
return X, y, c
return X, y
@pytest.mark.parametrize(
"solver, sparse_container",
(
(solver, sparse_container)
for (solver, sparse_container) in product(
["cholesky", "sag", "sparse_cg", "lsqr", "saga", "ridgecv"],
[None] + CSR_CONTAINERS,
)
if sparse_container is None or solver in ["sparse_cg", "ridgecv"]
),
)
@pytest.mark.parametrize(
"n_samples,dtype,proportion_nonzero",
[(20, "float32", 0.1), (40, "float32", 1.0), (20, "float64", 0.2)],
)
def test_solver_consistency(
solver, proportion_nonzero, n_samples, dtype, sparse_container, global_random_seed
):
alpha = 1.0
noise = 50.0 if proportion_nonzero > 0.9 else 500.0
X, y = _make_sparse_offset_regression(
bias=10,
n_features=30,
proportion_nonzero=proportion_nonzero,
noise=noise,
random_state=global_random_seed,
n_samples=n_samples,
)
# Manually scale the data to avoid pathological cases. We use
# minmax_scale to deal with the sparse case without breaking
# the sparsity pattern.
X = minmax_scale(X)
svd_ridge = Ridge(solver="svd", alpha=alpha).fit(X, y)
X = X.astype(dtype, copy=False)
y = y.astype(dtype, copy=False)
if sparse_container is not None:
X = sparse_container(X)
if solver == "ridgecv":
ridge = RidgeCV(alphas=[alpha])
else:
if solver.startswith("sag"):
# Avoid ConvergenceWarning for sag and saga solvers.
tol = 1e-7
max_iter = 100_000
else:
tol = 1e-10
max_iter = None
ridge = Ridge(
alpha=alpha,
solver=solver,
max_iter=max_iter,
tol=tol,
random_state=global_random_seed,
)
ridge.fit(X, y)
assert_allclose(ridge.coef_, svd_ridge.coef_, atol=1e-3, rtol=1e-3)
assert_allclose(ridge.intercept_, svd_ridge.intercept_, atol=1e-3, rtol=1e-3)
@pytest.mark.parametrize("gcv_mode", ["svd", "eigen"])
@pytest.mark.parametrize("X_container", [np.asarray] + CSR_CONTAINERS)
@pytest.mark.parametrize("X_shape", [(11, 8), (11, 20)])
@pytest.mark.parametrize("fit_intercept", [True, False])
@pytest.mark.parametrize(
"y_shape, noise",
[
((11,), 1.0),
((11, 1), 30.0),
((11, 3), 150.0),
],
)
def test_ridge_gcv_vs_ridge_loo_cv(
gcv_mode, X_container, X_shape, y_shape, fit_intercept, noise
):
n_samples, n_features = X_shape
n_targets = y_shape[-1] if len(y_shape) == 2 else 1
X, y = _make_sparse_offset_regression(
n_samples=n_samples,
n_features=n_features,
n_targets=n_targets,
random_state=0,
shuffle=False,
noise=noise,
n_informative=5,
)
y = y.reshape(y_shape)
alphas = [1e-3, 0.1, 1.0, 10.0, 1e3]
loo_ridge = RidgeCV(
cv=n_samples,
fit_intercept=fit_intercept,
alphas=alphas,
scoring="neg_mean_squared_error",
)
gcv_ridge = RidgeCV(
gcv_mode=gcv_mode,
fit_intercept=fit_intercept,
alphas=alphas,
)
loo_ridge.fit(X, y)
X_gcv = X_container(X)
gcv_ridge.fit(X_gcv, y)
assert gcv_ridge.alpha_ == pytest.approx(loo_ridge.alpha_)
assert_allclose(gcv_ridge.coef_, loo_ridge.coef_, rtol=1e-3)
assert_allclose(gcv_ridge.intercept_, loo_ridge.intercept_, rtol=1e-3)
def test_ridge_loo_cv_asym_scoring():
# checking on asymmetric scoring
scoring = "explained_variance"
n_samples, n_features = 10, 5
n_targets = 1
X, y = _make_sparse_offset_regression(
n_samples=n_samples,
n_features=n_features,
n_targets=n_targets,
random_state=0,
shuffle=False,
noise=1,
n_informative=5,
)
alphas = [1e-3, 0.1, 1.0, 10.0, 1e3]
loo_ridge = RidgeCV(
cv=n_samples, fit_intercept=True, alphas=alphas, scoring=scoring
)
gcv_ridge = RidgeCV(fit_intercept=True, alphas=alphas, scoring=scoring)
loo_ridge.fit(X, y)
gcv_ridge.fit(X, y)
assert gcv_ridge.alpha_ == pytest.approx(loo_ridge.alpha_), (
f"{gcv_ridge.alpha_=}, {loo_ridge.alpha_=}"
)
assert_allclose(gcv_ridge.coef_, loo_ridge.coef_, rtol=1e-3)
assert_allclose(gcv_ridge.intercept_, loo_ridge.intercept_, rtol=1e-3)
@pytest.mark.parametrize("gcv_mode", ["svd", "eigen"])
@pytest.mark.parametrize("X_container", [np.asarray] + CSR_CONTAINERS)
@pytest.mark.parametrize("n_features", [8, 20])
@pytest.mark.parametrize(
"y_shape, fit_intercept, noise",
[
((11,), True, 1.0),
((11, 1), True, 20.0),
((11, 3), True, 150.0),
((11, 3), False, 30.0),
],
)
def test_ridge_gcv_sample_weights(
gcv_mode, X_container, fit_intercept, n_features, y_shape, noise
):
alphas = [1e-3, 0.1, 1.0, 10.0, 1e3]
rng = np.random.RandomState(0)
n_targets = y_shape[-1] if len(y_shape) == 2 else 1
X, y = _make_sparse_offset_regression(
n_samples=11,
n_features=n_features,
n_targets=n_targets,
random_state=0,
shuffle=False,
noise=noise,
)
y = y.reshape(y_shape)
sample_weight = 3 * rng.randn(len(X))
sample_weight = (sample_weight - sample_weight.min() + 1).astype(int)
indices = np.repeat(np.arange(X.shape[0]), sample_weight)
sample_weight = sample_weight.astype(float)
X_tiled, y_tiled = X[indices], y[indices]
cv = GroupKFold(n_splits=X.shape[0])
splits = cv.split(X_tiled, y_tiled, groups=indices)
kfold = RidgeCV(
alphas=alphas,
cv=splits,
scoring="neg_mean_squared_error",
fit_intercept=fit_intercept,
)
kfold.fit(X_tiled, y_tiled)
ridge_reg = Ridge(alpha=kfold.alpha_, fit_intercept=fit_intercept)
splits = cv.split(X_tiled, y_tiled, groups=indices)
predictions = cross_val_predict(ridge_reg, X_tiled, y_tiled, cv=splits)
if predictions.shape != y_tiled.shape:
predictions = predictions.reshape(y_tiled.shape)
kfold_errors = (y_tiled - predictions) ** 2
kfold_errors = [
np.sum(kfold_errors[indices == i], axis=0) for i in np.arange(X.shape[0])
]
kfold_errors = np.asarray(kfold_errors)
X_gcv = X_container(X)
gcv_ridge = RidgeCV(
alphas=alphas,
store_cv_results=True,
gcv_mode=gcv_mode,
fit_intercept=fit_intercept,
)
gcv_ridge.fit(X_gcv, y, sample_weight=sample_weight)
if len(y_shape) == 2:
gcv_errors = gcv_ridge.cv_results_[:, :, alphas.index(kfold.alpha_)]
else:
gcv_errors = gcv_ridge.cv_results_[:, alphas.index(kfold.alpha_)]
assert kfold.alpha_ == pytest.approx(gcv_ridge.alpha_)
assert_allclose(gcv_errors, kfold_errors, rtol=1e-3)
assert_allclose(gcv_ridge.coef_, kfold.coef_, rtol=1e-3)
assert_allclose(gcv_ridge.intercept_, kfold.intercept_, rtol=1e-3)
@pytest.mark.parametrize("sparse_container", [None] + CSR_CONTAINERS)
@pytest.mark.parametrize(
"mode, mode_n_greater_than_p, mode_p_greater_than_n",
[
(None, "svd", "eigen"),
("auto", "svd", "eigen"),
("eigen", "eigen", "eigen"),
("svd", "svd", "svd"),
],
)
def test_check_gcv_mode_choice(
sparse_container, mode, mode_n_greater_than_p, mode_p_greater_than_n
):
X, _ = make_regression(n_samples=5, n_features=2)
if sparse_container is not None:
X = sparse_container(X)
assert _check_gcv_mode(X, mode) == mode_n_greater_than_p
assert _check_gcv_mode(X.T, mode) == mode_p_greater_than_n
def _test_ridge_loo(sparse_container):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
if sparse_container is None:
X, fit_intercept = X_diabetes, True
else:
X, fit_intercept = sparse_container(X_diabetes), False
ridge_gcv = _RidgeGCV(fit_intercept=fit_intercept)
# check best alpha
ridge_gcv.fit(X, y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(X, y_diabetes)
assert ridge_gcv2.alpha_ == pytest.approx(alpha_)
# check that we get same best alpha with custom score_func
def func(x, y):
return -mean_squared_error(x, y)
scoring = make_scorer(func)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_glm/glm.py | sklearn/linear_model/_glm/glm.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
"""
Generalized Linear Models with Exponential Dispersion Family
"""
from numbers import Integral, Real
import numpy as np
import scipy.optimize
from sklearn._loss.loss import (
HalfGammaLoss,
HalfPoissonLoss,
HalfSquaredError,
HalfTweedieLoss,
HalfTweedieLossIdentity,
)
from sklearn.base import BaseEstimator, RegressorMixin, _fit_context
from sklearn.linear_model._glm._newton_solver import NewtonCholeskySolver, NewtonSolver
from sklearn.linear_model._linear_loss import LinearModelLoss
from sklearn.utils import check_array
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
from sklearn.utils._param_validation import Hidden, Interval, StrOptions
from sklearn.utils.fixes import _get_additional_lbfgs_options_dict
from sklearn.utils.optimize import _check_optimize_result
from sklearn.utils.validation import (
_check_sample_weight,
check_is_fitted,
validate_data,
)
class _GeneralizedLinearRegressor(RegressorMixin, BaseEstimator):
"""Regression via a penalized Generalized Linear Model (GLM).
GLMs based on a reproductive Exponential Dispersion Model (EDM) aim at fitting and
predicting the mean of the target y as y_pred=h(X*w) with coefficients w.
Therefore, the fit minimizes the following objective function with L2 priors as
regularizer::
1/(2*sum(s_i)) * sum(s_i * deviance(y_i, h(x_i*w)) + 1/2 * alpha * ||w||_2^2
with inverse link function h, s=sample_weight and per observation (unit) deviance
deviance(y_i, h(x_i*w)). Note that for an EDM, 1/2 * deviance is the negative
log-likelihood up to a constant (in w) term.
The parameter ``alpha`` corresponds to the lambda parameter in glmnet.
Instead of implementing the EDM family and a link function separately, we directly
use the loss functions `from sklearn._loss` which have the link functions included
in them for performance reasons. We pick the loss functions that implement
(1/2 times) EDM deviances.
Read more in the :ref:`User Guide <Generalized_linear_models>`.
.. versionadded:: 0.23
Parameters
----------
alpha : float, default=1
Constant that multiplies the penalty term and thus determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
Values must be in the range `[0.0, inf)`.
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor (X @ coef + intercept).
solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
Algorithm to use in the optimization problem:
'lbfgs'
Calls scipy's L-BFGS-B optimizer.
'newton-cholesky'
Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
iterated reweighted least squares) with an inner Cholesky based solver.
This solver is a good choice for `n_samples` >> `n_features`, especially
with one-hot encoded categorical features with rare categories. Be aware
that the memory usage of this solver has a quadratic dependency on
`n_features` because it explicitly computes the Hessian matrix.
.. versionadded:: 1.2
max_iter : int, default=100
The maximal number of iterations for the solver.
Values must be in the range `[1, inf)`.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
Values must be in the range `(0.0, inf)`.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for ``coef_`` and ``intercept_``.
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Values must be in the range `[0, inf)`.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X @ coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_iter_ : int
Actual number of iterations used in the solver.
_base_loss : BaseLoss, default=HalfSquaredError()
This is set during fit via `self._get_loss()`.
A `_base_loss` contains a specific loss function as well as the link
function. The loss to be minimized specifies the distributional assumption of
the GLM, i.e. the distribution from the EDM. Here are some examples:
======================= ======== ==========================
_base_loss Link Target Domain
======================= ======== ==========================
HalfSquaredError identity y any real number
HalfPoissonLoss log 0 <= y
HalfGammaLoss log 0 < y
HalfTweedieLoss log dependent on tweedie power
HalfTweedieLossIdentity identity dependent on tweedie power
======================= ======== ==========================
The link function of the GLM, i.e. mapping from linear predictor
`X @ coeff + intercept` to prediction `y_pred`. For instance, with a log link,
we have `y_pred = exp(X @ coeff + intercept)`.
"""
# We allow for NewtonSolver classes for the "solver" parameter but do not
# make them public in the docstrings. This facilitates testing and
# benchmarking.
_parameter_constraints: dict = {
"alpha": [Interval(Real, 0.0, None, closed="left")],
"fit_intercept": ["boolean"],
"solver": [
StrOptions({"lbfgs", "newton-cholesky"}),
Hidden(type),
],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"tol": [Interval(Real, 0.0, None, closed="neither")],
"warm_start": ["boolean"],
"verbose": ["verbose"],
}
def __init__(
self,
*,
alpha=1.0,
fit_intercept=True,
solver="lbfgs",
max_iter=100,
tol=1e-4,
warm_start=False,
verbose=0,
):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.solver = solver
self.max_iter = max_iter
self.tol = tol
self.warm_start = warm_start
self.verbose = verbose
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Fit a Generalized Linear Model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Fitted model.
"""
X, y = validate_data(
self,
X,
y,
accept_sparse=["csc", "csr"],
dtype=[np.float64, np.float32],
y_numeric=True,
multi_output=False,
)
# required by losses
if self.solver == "lbfgs":
# lbfgs will force coef and therefore raw_prediction to be float64. The
# base_loss needs y, X @ coef and sample_weight all of same dtype
# (and contiguous).
loss_dtype = np.float64
else:
loss_dtype = min(max(y.dtype, X.dtype), np.float64)
y = check_array(y, dtype=loss_dtype, order="C", ensure_2d=False)
if sample_weight is not None:
# Note that _check_sample_weight calls check_array(order="C") required by
# losses.
sample_weight = _check_sample_weight(sample_weight, X, dtype=loss_dtype)
n_samples, n_features = X.shape
self._base_loss = self._get_loss()
linear_loss = LinearModelLoss(
base_loss=self._base_loss,
fit_intercept=self.fit_intercept,
)
if not linear_loss.base_loss.in_y_true_range(y):
raise ValueError(
"Some value(s) of y are out of the valid range of the loss"
f" {self._base_loss.__class__.__name__!r}."
)
# TODO: if alpha=0 check that X is not rank deficient
# NOTE: Rescaling of sample_weight:
# We want to minimize
# obj = 1/(2 * sum(sample_weight)) * sum(sample_weight * deviance)
# + 1/2 * alpha * L2,
# with
# deviance = 2 * loss.
# The objective is invariant to multiplying sample_weight by a constant. We
# could choose this constant such that sum(sample_weight) = 1 in order to end
# up with
# obj = sum(sample_weight * loss) + 1/2 * alpha * L2.
# But LinearModelLoss.loss() already computes
# average(loss, weights=sample_weight)
# Thus, without rescaling, we have
# obj = LinearModelLoss.loss(...)
if self.warm_start and hasattr(self, "coef_"):
if self.fit_intercept:
# LinearModelLoss needs intercept at the end of coefficient array.
coef = np.concatenate((self.coef_, np.array([self.intercept_])))
else:
coef = self.coef_
coef = coef.astype(loss_dtype, copy=False)
else:
coef = linear_loss.init_zero_coef(X, dtype=loss_dtype)
if self.fit_intercept:
coef[-1] = linear_loss.base_loss.link.link(
np.average(y, weights=sample_weight)
)
l2_reg_strength = self.alpha
n_threads = _openmp_effective_n_threads()
# Algorithms for optimization:
# Note again that our losses implement 1/2 * deviance.
if self.solver == "lbfgs":
func = linear_loss.loss_gradient
opt_res = scipy.optimize.minimize(
func,
coef,
method="L-BFGS-B",
jac=True,
options={
"maxiter": self.max_iter,
"maxls": 50, # default is 20
"gtol": self.tol,
# The constant 64 was found empirically to pass the test suite.
# The point is that ftol is very small, but a bit larger than
# machine precision for float64, which is the dtype used by lbfgs.
"ftol": 64 * np.finfo(float).eps,
**_get_additional_lbfgs_options_dict("iprint", self.verbose - 1),
},
args=(X, y, sample_weight, l2_reg_strength, n_threads),
)
self.n_iter_ = _check_optimize_result(
"lbfgs", opt_res, max_iter=self.max_iter
)
coef = opt_res.x
elif self.solver == "newton-cholesky":
sol = NewtonCholeskySolver(
coef=coef,
linear_loss=linear_loss,
l2_reg_strength=l2_reg_strength,
tol=self.tol,
max_iter=self.max_iter,
n_threads=n_threads,
verbose=self.verbose,
)
coef = sol.solve(X, y, sample_weight)
self.n_iter_ = sol.iteration
elif issubclass(self.solver, NewtonSolver):
sol = self.solver(
coef=coef,
linear_loss=linear_loss,
l2_reg_strength=l2_reg_strength,
tol=self.tol,
max_iter=self.max_iter,
n_threads=n_threads,
)
coef = sol.solve(X, y, sample_weight)
self.n_iter_ = sol.iteration
else:
raise ValueError(f"Invalid solver={self.solver}.")
if self.fit_intercept:
self.intercept_ = coef[-1]
self.coef_ = coef[:-1]
else:
# set intercept to zero as the other linear models do
self.intercept_ = 0.0
self.coef_ = coef
return self
def _linear_predictor(self, X):
"""Compute the linear_predictor = `X @ coef_ + intercept_`.
Note that we often use the term raw_prediction instead of linear predictor.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
Returns
-------
y_pred : array of shape (n_samples,)
Returns predicted values of linear predictor.
"""
check_is_fitted(self)
X = validate_data(
self,
X,
accept_sparse=["csr", "csc", "coo"],
dtype=[np.float64, np.float32],
ensure_2d=True,
allow_nd=False,
reset=False,
)
return X @ self.coef_ + self.intercept_
def predict(self, X):
"""Predict using GLM with feature matrix X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
Returns
-------
y_pred : array of shape (n_samples,)
Returns predicted values.
"""
# check_array is done in _linear_predictor
raw_prediction = self._linear_predictor(X)
y_pred = self._base_loss.link.inverse(raw_prediction)
return y_pred
def score(self, X, y, sample_weight=None):
"""Compute D^2, the percentage of deviance explained.
D^2 is a generalization of the coefficient of determination R^2.
R^2 uses squared error and D^2 uses the deviance of this GLM, see the
:ref:`User Guide <regression_metrics>`.
D^2 is defined as
:math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`,
:math:`D_{null}` is the null deviance, i.e. the deviance of a model
with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`.
The mean :math:`\\bar{y}` is averaged by sample_weight.
Best possible score is 1.0 and it can be negative (because the model
can be arbitrarily worse).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True values of target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
D^2 of self.predict(X) w.r.t. y.
"""
# TODO: Adapt link to User Guide in the docstring, once
# https://github.com/scikit-learn/scikit-learn/pull/22118 is merged.
#
# Note, default score defined in RegressorMixin is R^2 score.
# TODO: make D^2 a score function in module metrics (and thereby get
# input validation and so on)
raw_prediction = self._linear_predictor(X) # validates X
# required by losses
y = check_array(y, dtype=raw_prediction.dtype, order="C", ensure_2d=False)
if sample_weight is not None:
# Note that _check_sample_weight calls check_array(order="C") required by
# losses.
sample_weight = _check_sample_weight(sample_weight, X, dtype=y.dtype)
base_loss = self._base_loss
if not base_loss.in_y_true_range(y):
raise ValueError(
"Some value(s) of y are out of the valid range of the loss"
f" {base_loss.__name__}."
)
constant = np.average(
base_loss.constant_to_optimal_zero(y_true=y, sample_weight=None),
weights=sample_weight,
)
# Missing factor of 2 in deviance cancels out.
deviance = base_loss(
y_true=y,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
n_threads=1,
)
y_mean = base_loss.link.link(np.average(y, weights=sample_weight))
deviance_null = base_loss(
y_true=y,
raw_prediction=np.tile(y_mean, y.shape[0]),
sample_weight=sample_weight,
n_threads=1,
)
return 1 - (deviance + constant) / (deviance_null + constant)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
try:
# Create instance of BaseLoss if fit wasn't called yet. This is necessary as
# TweedieRegressor might set the used loss during fit different from
# self._base_loss.
base_loss = self._get_loss()
tags.target_tags.positive_only = not base_loss.in_y_true_range(-1.0)
except (ValueError, AttributeError, TypeError):
# This happens when the link or power parameter of TweedieRegressor is
# invalid. We fallback on the default tags in that case.
pass # pragma: no cover
return tags
def _get_loss(self):
"""This is only necessary because of the link and power arguments of the
TweedieRegressor.
Note that we do not need to pass sample_weight to the loss class as this is
only needed to set loss.constant_hessian on which GLMs do not rely.
"""
return HalfSquaredError()
class PoissonRegressor(_GeneralizedLinearRegressor):
"""Generalized Linear Model with a Poisson distribution.
This regressor uses the 'log' link function.
Read more in the :ref:`User Guide <Generalized_linear_models>`.
.. versionadded:: 0.23
Parameters
----------
alpha : float, default=1
Constant that multiplies the L2 penalty term and determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
Values of `alpha` must be in the range `[0.0, inf)`.
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor (`X @ coef + intercept`).
solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
Algorithm to use in the optimization problem:
'lbfgs'
Calls scipy's L-BFGS-B optimizer.
'newton-cholesky'
Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
iterated reweighted least squares) with an inner Cholesky based solver.
This solver is a good choice for `n_samples` >> `n_features`, especially
with one-hot encoded categorical features with rare categories. Be aware
that the memory usage of this solver has a quadratic dependency on
`n_features` because it explicitly computes the Hessian matrix.
.. versionadded:: 1.2
max_iter : int, default=100
The maximal number of iterations for the solver.
Values must be in the range `[1, inf)`.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
Values must be in the range `(0.0, inf)`.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for ``coef_`` and ``intercept_`` .
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Values must be in the range `[0, inf)`.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X @ coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
Actual number of iterations used in the solver.
See Also
--------
TweedieRegressor : Generalized Linear Model with a Tweedie distribution.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.PoissonRegressor()
>>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
>>> y = [12, 17, 22, 21]
>>> clf.fit(X, y)
PoissonRegressor()
>>> clf.score(X, y)
np.float64(0.990)
>>> clf.coef_
array([0.121, 0.158])
>>> clf.intercept_
np.float64(2.088)
>>> clf.predict([[1, 1], [3, 4]])
array([10.676, 21.875])
"""
_parameter_constraints: dict = {
**_GeneralizedLinearRegressor._parameter_constraints
}
def __init__(
self,
*,
alpha=1.0,
fit_intercept=True,
solver="lbfgs",
max_iter=100,
tol=1e-4,
warm_start=False,
verbose=0,
):
super().__init__(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
tol=tol,
warm_start=warm_start,
verbose=verbose,
)
def _get_loss(self):
return HalfPoissonLoss()
class GammaRegressor(_GeneralizedLinearRegressor):
"""Generalized Linear Model with a Gamma distribution.
This regressor uses the 'log' link function.
Read more in the :ref:`User Guide <Generalized_linear_models>`.
.. versionadded:: 0.23
Parameters
----------
alpha : float, default=1
Constant that multiplies the L2 penalty term and determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
Values of `alpha` must be in the range `[0.0, inf)`.
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor `X @ coef_ + intercept_`.
solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
Algorithm to use in the optimization problem:
'lbfgs'
Calls scipy's L-BFGS-B optimizer.
'newton-cholesky'
Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
iterated reweighted least squares) with an inner Cholesky based solver.
This solver is a good choice for `n_samples` >> `n_features`, especially
with one-hot encoded categorical features with rare categories. Be aware
that the memory usage of this solver has a quadratic dependency on
`n_features` because it explicitly computes the Hessian matrix.
.. versionadded:: 1.2
max_iter : int, default=100
The maximal number of iterations for the solver.
Values must be in the range `[1, inf)`.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
Values must be in the range `(0.0, inf)`.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for `coef_` and `intercept_`.
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Values must be in the range `[0, inf)`.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X @ coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_iter_ : int
Actual number of iterations used in the solver.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
PoissonRegressor : Generalized Linear Model with a Poisson distribution.
TweedieRegressor : Generalized Linear Model with a Tweedie distribution.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.GammaRegressor()
>>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
>>> y = [19, 26, 33, 30]
>>> clf.fit(X, y)
GammaRegressor()
>>> clf.score(X, y)
np.float64(0.773)
>>> clf.coef_
array([0.073, 0.067])
>>> clf.intercept_
np.float64(2.896)
>>> clf.predict([[1, 0], [2, 8]])
array([19.483, 35.795])
"""
_parameter_constraints: dict = {
**_GeneralizedLinearRegressor._parameter_constraints
}
def __init__(
self,
*,
alpha=1.0,
fit_intercept=True,
solver="lbfgs",
max_iter=100,
tol=1e-4,
warm_start=False,
verbose=0,
):
super().__init__(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
tol=tol,
warm_start=warm_start,
verbose=verbose,
)
def _get_loss(self):
return HalfGammaLoss()
class TweedieRegressor(_GeneralizedLinearRegressor):
"""Generalized Linear Model with a Tweedie distribution.
This estimator can be used to model different GLMs depending on the
``power`` parameter, which determines the underlying distribution.
Read more in the :ref:`User Guide <Generalized_linear_models>`.
.. versionadded:: 0.23
Parameters
----------
power : float, default=0
The power determines the underlying target distribution according
to the following table:
+-------+------------------------+
| Power | Distribution |
+=======+========================+
| 0 | Normal |
+-------+------------------------+
| 1 | Poisson |
+-------+------------------------+
| (1,2) | Compound Poisson Gamma |
+-------+------------------------+
| 2 | Gamma |
+-------+------------------------+
| 3 | Inverse Gaussian |
+-------+------------------------+
For ``0 < power < 1``, no distribution exists.
alpha : float, default=1
Constant that multiplies the L2 penalty term and determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
Values of `alpha` must be in the range `[0.0, inf)`.
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor (`X @ coef + intercept`).
link : {'auto', 'identity', 'log'}, default='auto'
The link function of the GLM, i.e. mapping from linear predictor
`X @ coeff + intercept` to prediction `y_pred`. Option 'auto' sets
the link depending on the chosen `power` parameter as follows:
- 'identity' for ``power <= 0``, e.g. for the Normal distribution
- 'log' for ``power > 0``, e.g. for Poisson, Gamma and Inverse Gaussian
distributions
solver : {'lbfgs', 'newton-cholesky'}, default='lbfgs'
Algorithm to use in the optimization problem:
'lbfgs'
Calls scipy's L-BFGS-B optimizer.
'newton-cholesky'
Uses Newton-Raphson steps (in arbitrary precision arithmetic equivalent to
iterated reweighted least squares) with an inner Cholesky based solver.
This solver is a good choice for `n_samples` >> `n_features`, especially
with one-hot encoded categorical features with rare categories. Be aware
that the memory usage of this solver has a quadratic dependency on
`n_features` because it explicitly computes the Hessian matrix.
.. versionadded:: 1.2
max_iter : int, default=100
The maximal number of iterations for the solver.
Values must be in the range `[1, inf)`.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
Values must be in the range `(0.0, inf)`.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for ``coef_`` and ``intercept_`` .
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Values must be in the range `[0, inf)`.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X @ coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_iter_ : int
Actual number of iterations used in the solver.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
PoissonRegressor : Generalized Linear Model with a Poisson distribution.
GammaRegressor : Generalized Linear Model with a Gamma distribution.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.TweedieRegressor()
>>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
>>> y = [2, 3.5, 5, 5.5]
>>> clf.fit(X, y)
TweedieRegressor()
>>> clf.score(X, y)
np.float64(0.839)
>>> clf.coef_
array([0.599, 0.299])
>>> clf.intercept_
np.float64(1.600)
>>> clf.predict([[1, 1], [3, 4]])
array([2.500, 4.599])
"""
_parameter_constraints: dict = {
**_GeneralizedLinearRegressor._parameter_constraints,
"power": [Interval(Real, None, None, closed="neither")],
"link": [StrOptions({"auto", "identity", "log"})],
}
def __init__(
self,
*,
power=0.0,
alpha=1.0,
fit_intercept=True,
link="auto",
solver="lbfgs",
max_iter=100,
tol=1e-4,
warm_start=False,
verbose=0,
):
super().__init__(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
tol=tol,
warm_start=warm_start,
verbose=verbose,
)
self.link = link
self.power = power
def _get_loss(self):
if self.link == "auto":
if self.power <= 0:
# identity link
return HalfTweedieLossIdentity(power=self.power)
else:
# log link
return HalfTweedieLoss(power=self.power)
if self.link == "log":
return HalfTweedieLoss(power=self.power)
if self.link == "identity":
return HalfTweedieLossIdentity(power=self.power)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_glm/_newton_solver.py | sklearn/linear_model/_glm/_newton_solver.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
"""
Newton solver for Generalized Linear Models
"""
import warnings
from abc import ABC, abstractmethod
import numpy as np
import scipy.linalg
import scipy.optimize
from sklearn._loss.loss import HalfSquaredError
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model._linear_loss import LinearModelLoss
from sklearn.utils.fixes import _get_additional_lbfgs_options_dict
from sklearn.utils.optimize import _check_optimize_result
class NewtonSolver(ABC):
"""Newton solver for GLMs.
This class implements Newton/2nd-order optimization routines for GLMs. Each Newton
iteration aims at finding the Newton step which is done by the inner solver. With
Hessian H, gradient g and coefficients coef, one step solves:
H @ coef_newton = -g
For our GLM / LinearModelLoss, we have gradient g and Hessian H:
g = X.T @ loss.gradient + l2_reg_strength * coef
H = X.T @ diag(loss.hessian) @ X + l2_reg_strength * identity
Backtracking line search updates coef = coef_old + t * coef_newton for some t in
(0, 1].
This is a base class, actual implementations (child classes) may deviate from the
above pattern and use structure specific tricks.
Usage pattern:
- initialize solver: sol = NewtonSolver(...)
- solve the problem: sol.solve(X, y, sample_weight)
References
----------
- Jorge Nocedal, Stephen J. Wright. (2006) "Numerical Optimization"
2nd edition
https://doi.org/10.1007/978-0-387-40065-5
- Stephen P. Boyd, Lieven Vandenberghe. (2004) "Convex Optimization."
Cambridge University Press, 2004.
https://web.stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf
Parameters
----------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Initial coefficients of a linear model.
If shape (n_classes * n_dof,), the classes of one feature are contiguous,
i.e. one reconstructs the 2d-array via
coef.reshape((n_classes, -1), order="F").
linear_loss : LinearModelLoss
The loss to be minimized.
l2_reg_strength : float, default=0.0
L2 regularization strength.
tol : float, default=1e-4
The optimization problem is solved when each of the following condition is
fulfilled:
1. maximum |gradient| <= tol
2. Newton decrement d: 1/2 * d^2 <= tol
max_iter : int, default=100
Maximum number of Newton steps allowed.
n_threads : int, default=1
Number of OpenMP threads to use for the computation of the Hessian and gradient
of the loss function.
Attributes
----------
coef_old : ndarray of shape coef.shape
Coefficient of previous iteration.
coef_newton : ndarray of shape coef.shape
Newton step.
gradient : ndarray of shape coef.shape
Gradient of the loss w.r.t. the coefficients.
gradient_old : ndarray of shape coef.shape
Gradient of previous iteration.
loss_value : float
Value of objective function = loss + penalty.
loss_value_old : float
Value of objective function of previous itertion.
raw_prediction : ndarray of shape (n_samples,) or (n_samples, n_classes)
converged : bool
Indicator for convergence of the solver.
iteration : int
Number of Newton steps, i.e. calls to inner_solve
use_fallback_lbfgs_solve : bool
If set to True, the solver will resort to call LBFGS to finish the optimisation
procedure in case of convergence issues.
gradient_times_newton : float
gradient @ coef_newton, set in inner_solve and used by line_search. If the
Newton step is a descent direction, this is negative.
"""
def __init__(
self,
*,
coef,
linear_loss=LinearModelLoss(base_loss=HalfSquaredError(), fit_intercept=True),
l2_reg_strength=0.0,
tol=1e-4,
max_iter=100,
n_threads=1,
verbose=0,
):
self.coef = coef
self.linear_loss = linear_loss
self.l2_reg_strength = l2_reg_strength
self.tol = tol
self.max_iter = max_iter
self.n_threads = n_threads
self.verbose = verbose
def setup(self, X, y, sample_weight):
"""Precomputations
If None, initializes:
- self.coef
Sets:
- self.raw_prediction
- self.loss_value
"""
_, _, self.raw_prediction = self.linear_loss.weight_intercept_raw(self.coef, X)
self.loss_value = self.linear_loss.loss(
coef=self.coef,
X=X,
y=y,
sample_weight=sample_weight,
l2_reg_strength=self.l2_reg_strength,
n_threads=self.n_threads,
raw_prediction=self.raw_prediction,
)
@abstractmethod
def update_gradient_hessian(self, X, y, sample_weight):
"""Update gradient and Hessian."""
@abstractmethod
def inner_solve(self, X, y, sample_weight):
"""Compute Newton step.
Sets:
- self.coef_newton
- self.gradient_times_newton
"""
def fallback_lbfgs_solve(self, X, y, sample_weight):
"""Fallback solver in case of emergency.
If a solver detects convergence problems, it may fall back to this methods in
the hope to exit with success instead of raising an error.
Sets:
- self.coef
- self.converged
"""
max_iter = self.max_iter - self.iteration
opt_res = scipy.optimize.minimize(
self.linear_loss.loss_gradient,
self.coef,
method="L-BFGS-B",
jac=True,
options={
"maxiter": max_iter,
"maxls": 50, # default is 20
"gtol": self.tol,
"ftol": 64 * np.finfo(np.float64).eps,
**_get_additional_lbfgs_options_dict("iprint", self.verbose - 1),
},
args=(X, y, sample_weight, self.l2_reg_strength, self.n_threads),
)
self.iteration += _check_optimize_result("lbfgs", opt_res, max_iter=max_iter)
self.coef = opt_res.x
self.converged = opt_res.status == 0
def line_search(self, X, y, sample_weight):
"""Backtracking line search.
Sets:
- self.coef_old
- self.coef
- self.loss_value_old
- self.loss_value
- self.gradient_old
- self.gradient
- self.raw_prediction
"""
# line search parameters
beta, sigma = 0.5, 0.00048828125 # 1/2, 1/2**11
eps = 16 * np.finfo(self.loss_value.dtype).eps
t = 1 # step size
# gradient_times_newton = self.gradient @ self.coef_newton
# was computed in inner_solve.
armijo_term = sigma * self.gradient_times_newton
_, _, raw_prediction_newton = self.linear_loss.weight_intercept_raw(
self.coef_newton, X
)
self.coef_old = self.coef
self.loss_value_old = self.loss_value
self.gradient_old = self.gradient
# np.sum(np.abs(self.gradient_old))
sum_abs_grad_old = -1
is_verbose = self.verbose >= 2
if is_verbose:
print(" Backtracking Line Search")
print(f" eps=16 * finfo.eps={eps}")
for i in range(21): # until and including t = beta**20 ~ 1e-6
self.coef = self.coef_old + t * self.coef_newton
raw = self.raw_prediction + t * raw_prediction_newton
self.loss_value, self.gradient = self.linear_loss.loss_gradient(
coef=self.coef,
X=X,
y=y,
sample_weight=sample_weight,
l2_reg_strength=self.l2_reg_strength,
n_threads=self.n_threads,
raw_prediction=raw,
)
# Note: If coef_newton is too large, loss_gradient may produce inf values,
# potentially accompanied by a RuntimeWarning.
# This case will be captured by the Armijo condition.
# 1. Check Armijo / sufficient decrease condition.
# The smaller (more negative) the better.
loss_improvement = self.loss_value - self.loss_value_old
check = loss_improvement <= t * armijo_term
if is_verbose:
print(
f" line search iteration={i + 1}, step size={t}\n"
f" check loss improvement <= armijo term: {loss_improvement} "
f"<= {t * armijo_term} {check}"
)
if check:
break
# 2. Deal with relative loss differences around machine precision.
tiny_loss = np.abs(self.loss_value_old * eps)
check = np.abs(loss_improvement) <= tiny_loss
if is_verbose:
print(
" check loss |improvement| <= eps * |loss_old|:"
f" {np.abs(loss_improvement)} <= {tiny_loss} {check}"
)
if check:
if sum_abs_grad_old < 0:
sum_abs_grad_old = scipy.linalg.norm(self.gradient_old, ord=1)
# 2.1 Check sum of absolute gradients as alternative condition.
sum_abs_grad = scipy.linalg.norm(self.gradient, ord=1)
check = sum_abs_grad < sum_abs_grad_old
if is_verbose:
print(
" check sum(|gradient|) < sum(|gradient_old|): "
f"{sum_abs_grad} < {sum_abs_grad_old} {check}"
)
if check:
break
t *= beta
else:
warnings.warn(
(
f"Line search of Newton solver {self.__class__.__name__} at"
f" iteration #{self.iteration} did no converge after 21 line search"
" refinement iterations. It will now resort to lbfgs instead."
),
ConvergenceWarning,
)
if self.verbose:
print(" Line search did not converge and resorts to lbfgs instead.")
self.use_fallback_lbfgs_solve = True
return
self.raw_prediction = raw
if is_verbose:
print(
f" line search successful after {i + 1} iterations with "
f"loss={self.loss_value}."
)
def check_convergence(self, X, y, sample_weight):
"""Check for convergence.
Sets self.converged.
"""
if self.verbose:
print(" Check Convergence")
# Note: Checking maximum relative change of coefficient <= tol is a bad
# convergence criterion because even a large step could have brought us close
# to the true minimum.
# coef_step = self.coef - self.coef_old
# change = np.max(np.abs(coef_step) / np.maximum(1, np.abs(self.coef_old)))
# check = change <= tol
# 1. Criterion: maximum |gradient| <= tol
# The gradient was already updated in line_search()
g_max_abs = np.max(np.abs(self.gradient))
check = g_max_abs <= self.tol
if self.verbose:
print(f" 1. max |gradient| {g_max_abs} <= {self.tol} {check}")
if not check:
return
# 2. Criterion: For Newton decrement d, check 1/2 * d^2 <= tol
# d = sqrt(grad @ hessian^-1 @ grad)
# = sqrt(coef_newton @ hessian @ coef_newton)
# See Boyd, Vanderberghe (2009) "Convex Optimization" Chapter 9.5.1.
d2 = self.coef_newton @ self.hessian @ self.coef_newton
check = 0.5 * d2 <= self.tol
if self.verbose:
print(f" 2. Newton decrement {0.5 * d2} <= {self.tol} {check}")
if not check:
return
if self.verbose:
loss_value = self.linear_loss.loss(
coef=self.coef,
X=X,
y=y,
sample_weight=sample_weight,
l2_reg_strength=self.l2_reg_strength,
n_threads=self.n_threads,
)
print(f" Solver did converge at loss = {loss_value}.")
self.converged = True
def finalize(self, X, y, sample_weight):
"""Finalize the solvers results.
Some solvers may need this, others not.
"""
pass
def solve(self, X, y, sample_weight):
"""Solve the optimization problem.
This is the main routine.
Order of calls:
self.setup()
while iteration:
self.update_gradient_hessian()
self.inner_solve()
self.line_search()
self.check_convergence()
self.finalize()
Returns
-------
coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)
Solution of the optimization problem.
"""
# setup usually:
# - initializes self.coef if needed
# - initializes and calculates self.raw_predictions, self.loss_value
self.setup(X=X, y=y, sample_weight=sample_weight)
self.iteration = 1
self.converged = False
self.use_fallback_lbfgs_solve = False
while self.iteration <= self.max_iter and not self.converged:
if self.verbose:
print(f"Newton iter={self.iteration}")
self.use_fallback_lbfgs_solve = False # Fallback solver.
# 1. Update Hessian and gradient
self.update_gradient_hessian(X=X, y=y, sample_weight=sample_weight)
# TODO:
# if iteration == 1:
# We might stop early, e.g. we already are close to the optimum,
# usually detected by zero gradients at this stage.
# 2. Inner solver
# Calculate Newton step/direction
# This usually sets self.coef_newton and self.gradient_times_newton.
self.inner_solve(X=X, y=y, sample_weight=sample_weight)
if self.use_fallback_lbfgs_solve:
break
# 3. Backtracking line search
# This usually sets self.coef_old, self.coef, self.loss_value_old
# self.loss_value, self.gradient_old, self.gradient,
# self.raw_prediction.
self.line_search(X=X, y=y, sample_weight=sample_weight)
if self.use_fallback_lbfgs_solve:
break
# 4. Check convergence
# Sets self.converged.
self.check_convergence(X=X, y=y, sample_weight=sample_weight)
# 5. Next iteration
self.iteration += 1
if not self.converged:
if self.use_fallback_lbfgs_solve:
# Note: The fallback solver circumvents check_convergence and relies on
# the convergence checks of lbfgs instead. Enough warnings have been
# raised on the way.
self.fallback_lbfgs_solve(X=X, y=y, sample_weight=sample_weight)
else:
warnings.warn(
(
f"Newton solver did not converge after {self.iteration - 1} "
"iterations."
),
ConvergenceWarning,
)
self.iteration -= 1
self.finalize(X=X, y=y, sample_weight=sample_weight)
return self.coef
class NewtonCholeskySolver(NewtonSolver):
"""Cholesky based Newton solver.
Inner solver for finding the Newton step H w_newton = -g uses Cholesky based linear
solver.
"""
def setup(self, X, y, sample_weight):
super().setup(X=X, y=y, sample_weight=sample_weight)
if self.linear_loss.base_loss.is_multiclass:
# Easier with ravelled arrays, e.g., for scipy.linalg.solve.
# As with LinearModelLoss, we always are contiguous in n_classes.
self.coef = self.coef.ravel(order="F")
# Note that the computation of gradient in LinearModelLoss follows the shape of
# coef.
self.gradient = np.empty_like(self.coef)
# But the hessian is always 2d.
n = self.coef.size
self.hessian = np.empty_like(self.coef, shape=(n, n))
# To help case distinctions.
self.is_multinomial_with_intercept = (
self.linear_loss.base_loss.is_multiclass and self.linear_loss.fit_intercept
)
self.is_multinomial_no_penalty = (
self.linear_loss.base_loss.is_multiclass and self.l2_reg_strength == 0
)
if self.is_multinomial_no_penalty:
# See inner_solve. The provided coef might not adhere to the convention
# that the last class is set to zero.
# This is done by the usual freedom of a (overparametrized) multinomial to
# add a constant to all classes which doesn't change predictions.
n_classes = self.linear_loss.base_loss.n_classes
coef = self.coef.reshape(n_classes, -1, order="F") # easier as 2d
coef -= coef[-1, :] # coef -= coef of last class
elif self.is_multinomial_with_intercept:
# See inner_solve. Same as above, but only for the intercept.
n_classes = self.linear_loss.base_loss.n_classes
# intercept -= intercept of last class
self.coef[-n_classes:] -= self.coef[-1]
def update_gradient_hessian(self, X, y, sample_weight):
_, _, self.hessian_warning = self.linear_loss.gradient_hessian(
coef=self.coef,
X=X,
y=y,
sample_weight=sample_weight,
l2_reg_strength=self.l2_reg_strength,
n_threads=self.n_threads,
gradient_out=self.gradient,
hessian_out=self.hessian,
raw_prediction=self.raw_prediction, # this was updated in line_search
)
def inner_solve(self, X, y, sample_weight):
if self.hessian_warning:
warnings.warn(
(
f"The inner solver of {self.__class__.__name__} detected a "
"pointwise hessian with many negative values at iteration "
f"#{self.iteration}. It will now resort to lbfgs instead."
),
ConvergenceWarning,
)
if self.verbose:
print(
" The inner solver detected a pointwise Hessian with many "
"negative values and resorts to lbfgs instead."
)
self.use_fallback_lbfgs_solve = True
return
# Note: The following case distinction could also be shifted to the
# implementation of HalfMultinomialLoss instead of here within the solver.
if self.is_multinomial_no_penalty:
# The multinomial loss is overparametrized for each unpenalized feature, so
# at least the intercepts. This can be seen by noting that predicted
# probabilities are invariant under shifting all coefficients of a single
# feature j for all classes by the same amount c:
# coef[k, :] -> coef[k, :] + c => proba stays the same
# where we have assumed coef.shape = (n_classes, n_features).
# Therefore, also the loss (-log-likelihood), gradient and hessian stay the
# same, see
# Noah Simon and Jerome Friedman and Trevor Hastie. (2013) "A Blockwise
# Descent Algorithm for Group-penalized Multiresponse and Multinomial
# Regression". https://doi.org/10.48550/arXiv.1311.6529
#
# We choose the standard approach and set all the coefficients of the last
# class to zero, for all features including the intercept.
# Note that coef was already dealt with in setup.
n_classes = self.linear_loss.base_loss.n_classes
n_dof = self.coef.size // n_classes # degree of freedom per class
n = self.coef.size - n_dof # effective size
self.gradient[n_classes - 1 :: n_classes] = 0
self.hessian[n_classes - 1 :: n_classes, :] = 0
self.hessian[:, n_classes - 1 :: n_classes] = 0
# We also need the reduced variants of gradient and hessian where the
# entries set to zero are removed. For 2 features and 3 classes with
# arbitrary values, "x" means removed:
# gradient = [0, 1, x, 3, 4, x]
#
# hessian = [0, 1, x, 3, 4, x]
# [1, 7, x, 9, 10, x]
# [x, x, x, x, x, x]
# [3, 9, x, 21, 22, x]
# [4, 10, x, 22, 28, x]
# [x, x, x, x, x, x]
# The following slicing triggers copies of gradient and hessian.
gradient = self.gradient.reshape(-1, n_classes)[:, :-1].flatten()
hessian = self.hessian.reshape(n_dof, n_classes, n_dof, n_classes)[
:, :-1, :, :-1
].reshape(n, n)
elif self.is_multinomial_with_intercept:
# Here, only intercepts are unpenalized. We again choose the last class and
# set its intercept to zero.
# Note that coef was already dealt with in setup.
self.gradient[-1] = 0
self.hessian[-1, :] = 0
self.hessian[:, -1] = 0
gradient, hessian = self.gradient[:-1], self.hessian[:-1, :-1]
else:
gradient, hessian = self.gradient, self.hessian
try:
with warnings.catch_warnings():
warnings.simplefilter("error", scipy.linalg.LinAlgWarning)
self.coef_newton = scipy.linalg.solve(
hessian, -gradient, check_finite=False, assume_a="sym"
)
if self.is_multinomial_no_penalty:
self.coef_newton = np.c_[
self.coef_newton.reshape(n_dof, n_classes - 1), np.zeros(n_dof)
].reshape(-1)
assert self.coef_newton.flags.f_contiguous
elif self.is_multinomial_with_intercept:
self.coef_newton = np.r_[self.coef_newton, 0]
self.gradient_times_newton = self.gradient @ self.coef_newton
if self.gradient_times_newton > 0:
if self.verbose:
print(
" The inner solver found a Newton step that is not a "
"descent direction and resorts to LBFGS steps instead."
)
self.use_fallback_lbfgs_solve = True
return
except (np.linalg.LinAlgError, scipy.linalg.LinAlgWarning) as e:
warnings.warn(
f"The inner solver of {self.__class__.__name__} stumbled upon a "
"singular or very ill-conditioned Hessian matrix at iteration "
f"{self.iteration}. It will now resort to lbfgs instead.\n"
"Further options are to use another solver or to avoid such situation "
"in the first place. Possible remedies are removing collinear features"
" of X or increasing the penalization strengths.\n"
"The original Linear Algebra message was:\n" + str(e),
scipy.linalg.LinAlgWarning,
)
# Possible causes:
# 1. hess_pointwise is negative. But this is already taken care in
# LinearModelLoss.gradient_hessian.
# 2. X is singular or ill-conditioned
# This might be the most probable cause.
#
# There are many possible ways to deal with this situation. Most of them
# add, explicitly or implicitly, a matrix to the hessian to make it
# positive definite, confer to Chapter 3.4 of Nocedal & Wright 2nd ed.
# Instead, we resort to lbfgs.
if self.verbose:
print(
" The inner solver stumbled upon a singular or ill-conditioned "
"Hessian matrix and resorts to LBFGS instead."
)
self.use_fallback_lbfgs_solve = True
return
def finalize(self, X, y, sample_weight):
if self.is_multinomial_no_penalty:
# Our convention is usually the symmetric parametrization where
# sum(coef[classes, features], axis=0) = 0.
# We convert now to this convention. Note that it does not change
# the predicted probabilities.
n_classes = self.linear_loss.base_loss.n_classes
self.coef = self.coef.reshape(n_classes, -1, order="F")
self.coef -= np.mean(self.coef, axis=0)
elif self.is_multinomial_with_intercept:
# Only the intercept needs an update to the symmetric parametrization.
n_classes = self.linear_loss.base_loss.n_classes
self.coef[-n_classes:] -= np.mean(self.coef[-n_classes:])
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_glm/__init__.py | sklearn/linear_model/_glm/__init__.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.linear_model._glm.glm import (
GammaRegressor,
PoissonRegressor,
TweedieRegressor,
_GeneralizedLinearRegressor,
)
__all__ = [
"GammaRegressor",
"PoissonRegressor",
"TweedieRegressor",
"_GeneralizedLinearRegressor",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_glm/tests/__init__.py | sklearn/linear_model/_glm/tests/__init__.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/linear_model/_glm/tests/test_glm.py | sklearn/linear_model/_glm/tests/test_glm.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
import warnings
from functools import partial
import numpy as np
import pytest
import scipy
from scipy import linalg
from scipy.optimize import minimize, root
from sklearn._loss import HalfBinomialLoss, HalfPoissonLoss, HalfTweedieLoss
from sklearn._loss.link import IdentityLink, LogLink
from sklearn.base import clone
from sklearn.datasets import make_low_rank_matrix, make_regression
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import (
GammaRegressor,
PoissonRegressor,
Ridge,
TweedieRegressor,
)
from sklearn.linear_model._glm import _GeneralizedLinearRegressor
from sklearn.linear_model._glm._newton_solver import NewtonCholeskySolver
from sklearn.linear_model._linear_loss import LinearModelLoss
from sklearn.metrics import d2_tweedie_score, mean_poisson_deviance
from sklearn.model_selection import train_test_split
from sklearn.utils._testing import assert_allclose
SOLVERS = ["lbfgs", "newton-cholesky"]
class BinomialRegressor(_GeneralizedLinearRegressor):
def _get_loss(self):
return HalfBinomialLoss()
def _special_minimize(fun, grad, x, tol_NM, tol):
# Find good starting point by Nelder-Mead
res_NM = minimize(
fun, x, method="Nelder-Mead", options={"xatol": tol_NM, "fatol": tol_NM}
)
# Now refine via root finding on the gradient of the function, which is
# more precise than minimizing the function itself.
res = root(
grad,
res_NM.x,
method="lm",
options={"ftol": tol, "xtol": tol, "gtol": tol},
)
return res.x
@pytest.fixture(scope="module")
def regression_data():
X, y = make_regression(
n_samples=107, n_features=10, n_informative=80, noise=0.5, random_state=2
)
return X, y
@pytest.fixture(
params=itertools.product(
["long", "wide"],
[
BinomialRegressor(),
PoissonRegressor(),
GammaRegressor(),
# TweedieRegressor(power=3.0), # too difficult
# TweedieRegressor(power=0, link="log"), # too difficult
TweedieRegressor(power=1.5),
],
),
ids=lambda param: f"{param[0]}-{param[1]}",
)
def glm_dataset(global_random_seed, request):
"""Dataset with GLM solutions, well conditioned X.
This is inspired by ols_ridge_dataset in test_ridge.py.
The construction is based on the SVD decomposition of X = U S V'.
Parameters
----------
type : {"long", "wide"}
If "long", then n_samples > n_features.
If "wide", then n_features > n_samples.
model : a GLM model
For "wide", we return the minimum norm solution:
min ||w||_2 subject to w = argmin deviance(X, y, w)
Note that the deviance is always minimized if y = inverse_link(X w) is possible to
achieve, which it is in the wide data case. Therefore, we can construct the
solution with minimum norm like (wide) OLS:
min ||w||_2 subject to link(y) = raw_prediction = X w
Returns
-------
model : GLM model
X : ndarray
Last column of 1, i.e. intercept.
y : ndarray
coef_unpenalized : ndarray
Minimum norm solutions, i.e. min sum(loss(w)) (with minimum ||w||_2 in
case of ambiguity)
Last coefficient is intercept.
coef_penalized : ndarray
GLM solution with alpha=l2_reg_strength=1, i.e.
min 1/n * sum(loss) + ||w[:-1]||_2^2.
Last coefficient is intercept.
l2_reg_strength : float
Always equal 1.
"""
data_type, model = request.param
# Make larger dim more than double as big as the smaller one.
# This helps when constructing singular matrices like (X, X).
if data_type == "long":
n_samples, n_features = 12, 4
else:
n_samples, n_features = 4, 12
k = min(n_samples, n_features)
rng = np.random.RandomState(global_random_seed)
X = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features,
effective_rank=k,
tail_strength=0.1,
random_state=rng,
)
X[:, -1] = 1 # last columns acts as intercept
U, s, Vt = linalg.svd(X, full_matrices=False)
assert np.all(s > 1e-3) # to be sure
assert np.max(s) / np.min(s) < 100 # condition number of X
if data_type == "long":
coef_unpenalized = rng.uniform(low=1, high=3, size=n_features)
coef_unpenalized *= rng.choice([-1, 1], size=n_features)
raw_prediction = X @ coef_unpenalized
else:
raw_prediction = rng.uniform(low=-3, high=3, size=n_samples)
# minimum norm solution min ||w||_2 such that raw_prediction = X w:
# w = X'(XX')^-1 raw_prediction = V s^-1 U' raw_prediction
coef_unpenalized = Vt.T @ np.diag(1 / s) @ U.T @ raw_prediction
linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=True)
sw = np.full(shape=n_samples, fill_value=1 / n_samples)
y = linear_loss.base_loss.link.inverse(raw_prediction)
# Add penalty l2_reg_strength * ||coef||_2^2 for l2_reg_strength=1 and solve with
# optimizer. Note that the problem is well conditioned such that we get accurate
# results.
l2_reg_strength = 1
fun = partial(
linear_loss.loss,
X=X[:, :-1],
y=y,
sample_weight=sw,
l2_reg_strength=l2_reg_strength,
)
grad = partial(
linear_loss.gradient,
X=X[:, :-1],
y=y,
sample_weight=sw,
l2_reg_strength=l2_reg_strength,
)
coef_penalized_with_intercept = _special_minimize(
fun, grad, coef_unpenalized, tol_NM=1e-6, tol=1e-14
)
linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=False)
fun = partial(
linear_loss.loss,
X=X[:, :-1],
y=y,
sample_weight=sw,
l2_reg_strength=l2_reg_strength,
)
grad = partial(
linear_loss.gradient,
X=X[:, :-1],
y=y,
sample_weight=sw,
l2_reg_strength=l2_reg_strength,
)
coef_penalized_without_intercept = _special_minimize(
fun, grad, coef_unpenalized[:-1], tol_NM=1e-6, tol=1e-14
)
# To be sure
assert np.linalg.norm(coef_penalized_with_intercept) < np.linalg.norm(
coef_unpenalized
)
return (
model,
X,
y,
coef_unpenalized,
coef_penalized_with_intercept,
coef_penalized_without_intercept,
l2_reg_strength,
)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [False, True])
def test_glm_regression(solver, fit_intercept, glm_dataset):
"""Test that GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
"""
model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
X = X[:, :-1] # remove intercept
if fit_intercept:
coef = coef_with_intercept
intercept = coef[-1]
coef = coef[:-1]
else:
coef = coef_without_intercept
intercept = 0
model.fit(X, y)
rtol = 5e-5 if solver == "lbfgs" else 1e-9
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, coef, rtol=rtol)
# Same with sample_weight.
model = (
clone(model).set_params(**params).fit(X, y, sample_weight=np.ones(X.shape[0]))
)
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, coef, rtol=rtol)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_glm_regression_hstacked_X(solver, fit_intercept, glm_dataset):
"""Test that GLM converges for all solvers to correct solution on hstacked data.
We work with a simple constructed data set with known solution.
Fit on [X] with alpha is the same as fit on [X, X]/2 with alpha/2.
For long X, [X, X] is still a long but singular matrix.
"""
model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
n_samples, n_features = X.shape
params = dict(
alpha=alpha / 2,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
X = X[:, :-1] # remove intercept
X = 0.5 * np.concatenate((X, X), axis=1)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features - 1)
if fit_intercept:
coef = coef_with_intercept
intercept = coef[-1]
coef = coef[:-1]
else:
coef = coef_without_intercept
intercept = 0
with warnings.catch_warnings():
# XXX: Investigate if the ConvergenceWarning that can appear in some
# cases should be considered a bug or not. In the mean time we don't
# fail when the assertions below pass irrespective of the presence of
# the warning.
warnings.simplefilter("ignore", ConvergenceWarning)
model.fit(X, y)
rtol = 2e-4 if solver == "lbfgs" else 5e-9
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, np.r_[coef, coef], rtol=rtol)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_glm_regression_vstacked_X(solver, fit_intercept, glm_dataset):
"""Test that GLM converges for all solvers to correct solution on vstacked data.
We work with a simple constructed data set with known solution.
Fit on [X] with alpha is the same as fit on [X], [y]
[X], [y] with 1 * alpha.
It is the same alpha as the average loss stays the same.
For wide X, [X', X'] is a singular matrix.
"""
model, X, y, _, coef_with_intercept, coef_without_intercept, alpha = glm_dataset
n_samples, n_features = X.shape
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
X = X[:, :-1] # remove intercept
X = np.concatenate((X, X), axis=0)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
y = np.r_[y, y]
if fit_intercept:
coef = coef_with_intercept
intercept = coef[-1]
coef = coef[:-1]
else:
coef = coef_without_intercept
intercept = 0
model.fit(X, y)
rtol = 3e-5 if solver == "lbfgs" else 5e-9
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, coef, rtol=rtol)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_glm_regression_unpenalized(solver, fit_intercept, glm_dataset):
"""Test that unpenalized GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
Note: This checks the minimum norm solution for wide X, i.e.
n_samples < n_features:
min ||w||_2 subject to w = argmin deviance(X, y, w)
"""
model, X, y, coef, _, _, _ = glm_dataset
n_samples, n_features = X.shape
alpha = 0 # unpenalized
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
if fit_intercept:
X = X[:, :-1] # remove intercept
intercept = coef[-1]
coef = coef[:-1]
else:
intercept = 0
with warnings.catch_warnings():
if solver.startswith("newton") and n_samples < n_features:
# The newton solvers should warn and automatically fallback to LBFGS
# in this case. The model should still converge.
warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
# XXX: Investigate if the ConvergenceWarning that can appear in some
# cases should be considered a bug or not. In the mean time we don't
# fail when the assertions below pass irrespective of the presence of
# the warning.
warnings.filterwarnings("ignore", category=ConvergenceWarning)
model.fit(X, y)
# FIXME: `assert_allclose(model.coef_, coef)` should work for all cases but fails
# for the wide/fat case with n_features > n_samples. Most current GLM solvers do
# NOT return the minimum norm solution with fit_intercept=True.
if n_samples > n_features:
rtol = 5e-5 if solver == "lbfgs" else 1e-7
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=rtol)
else:
# As it is an underdetermined problem, prediction = y. The following shows that
# we get a solution, i.e. a (non-unique) minimum of the objective function ...
rtol = 5e-5
if solver == "newton-cholesky":
rtol = 5e-4
assert_allclose(model.predict(X), y, rtol=rtol)
norm_solution = np.linalg.norm(np.r_[intercept, coef])
norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
if solver == "newton-cholesky":
# XXX: This solver shows random behaviour. Sometimes it finds solutions
# with norm_model <= norm_solution! So we check conditionally.
if norm_model < (1 + 1e-12) * norm_solution:
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=rtol)
elif solver == "lbfgs" and fit_intercept:
# But it is not the minimum norm solution. Otherwise the norms would be
# equal.
assert norm_model > (1 + 1e-12) * norm_solution
# See https://github.com/scikit-learn/scikit-learn/issues/23670.
# Note: Even adding a tiny penalty does not give the minimal norm solution.
# XXX: We could have naively expected LBFGS to find the minimal norm
# solution by adding a very small penalty. Even that fails for a reason we
# do not properly understand at this point.
else:
# When `fit_intercept=False`, LBFGS naturally converges to the minimum norm
# solution on this problem.
# XXX: Do we have any theoretical guarantees why this should be the case?
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, coef, rtol=rtol)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset):
"""Test that unpenalized GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
GLM fit on [X] is the same as fit on [X, X]/2.
For long X, [X, X] is a singular matrix and we check against the minimum norm
solution:
min ||w||_2 subject to w = argmin deviance(X, y, w)
"""
model, X, y, coef, _, _, _ = glm_dataset
n_samples, n_features = X.shape
alpha = 0 # unpenalized
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
if fit_intercept:
intercept = coef[-1]
coef = coef[:-1]
if n_samples > n_features:
X = X[:, :-1] # remove intercept
X = 0.5 * np.concatenate((X, X), axis=1)
else:
# To know the minimum norm solution, we keep one intercept column and do
# not divide by 2. Later on, we must take special care.
X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]]
else:
intercept = 0
X = 0.5 * np.concatenate((X, X), axis=1)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
with warnings.catch_warnings():
if solver.startswith("newton"):
# The newton solvers should warn and automatically fallback to LBFGS
# in this case. The model should still converge.
warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
# XXX: Investigate if the ConvergenceWarning that can appear in some
# cases should be considered a bug or not. In the mean time we don't
# fail when the assertions below pass irrespective of the presence of
# the warning.
warnings.filterwarnings("ignore", category=ConvergenceWarning)
model.fit(X, y)
if fit_intercept and n_samples < n_features:
# Here we take special care.
model_intercept = 2 * model.intercept_
model_coef = 2 * model.coef_[:-1] # exclude the other intercept term.
# For minimum norm solution, we would have
# assert model.intercept_ == pytest.approx(model.coef_[-1])
else:
model_intercept = model.intercept_
model_coef = model.coef_
if n_samples > n_features:
assert model_intercept == pytest.approx(intercept)
rtol = 1e-4
assert_allclose(model_coef, np.r_[coef, coef], rtol=rtol)
else:
# As it is an underdetermined problem, prediction = y. The following shows that
# we get a solution, i.e. a (non-unique) minimum of the objective function ...
rtol = 1e-6 if solver == "lbfgs" else 5e-6
assert_allclose(model.predict(X), y, rtol=rtol)
if (solver == "lbfgs" and fit_intercept) or solver == "newton-cholesky":
# Same as in test_glm_regression_unpenalized.
# But it is not the minimum norm solution. Otherwise the norms would be
# equal.
norm_solution = np.linalg.norm(
0.5 * np.r_[intercept, intercept, coef, coef]
)
norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
assert norm_model > (1 + 1e-12) * norm_solution
# For minimum norm solution, we would have
# assert model.intercept_ == pytest.approx(model.coef_[-1])
else:
assert model_intercept == pytest.approx(intercept, rel=5e-6)
assert_allclose(model_coef, np.r_[coef, coef], rtol=1e-4)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_glm_regression_unpenalized_vstacked_X(solver, fit_intercept, glm_dataset):
"""Test that unpenalized GLM converges for all solvers to correct solution.
We work with a simple constructed data set with known solution.
GLM fit on [X] is the same as fit on [X], [y]
[X], [y].
For wide X, [X', X'] is a singular matrix and we check against the minimum norm
solution:
min ||w||_2 subject to w = argmin deviance(X, y, w)
"""
model, X, y, coef, _, _, _ = glm_dataset
n_samples, n_features = X.shape
alpha = 0 # unpenalized
params = dict(
alpha=alpha,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-12,
max_iter=1000,
)
model = clone(model).set_params(**params)
if fit_intercept:
X = X[:, :-1] # remove intercept
intercept = coef[-1]
coef = coef[:-1]
else:
intercept = 0
X = np.concatenate((X, X), axis=0)
assert np.linalg.matrix_rank(X) <= min(n_samples, n_features)
y = np.r_[y, y]
with warnings.catch_warnings():
if solver.startswith("newton") and n_samples < n_features:
# The newton solvers should warn and automatically fallback to LBFGS
# in this case. The model should still converge.
warnings.filterwarnings("ignore", category=scipy.linalg.LinAlgWarning)
# XXX: Investigate if the ConvergenceWarning that can appear in some
# cases should be considered a bug or not. In the mean time we don't
# fail when the assertions below pass irrespective of the presence of
# the warning.
warnings.filterwarnings("ignore", category=ConvergenceWarning)
model.fit(X, y)
if n_samples > n_features:
rtol = 5e-5 if solver == "lbfgs" else 1e-6
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=rtol)
else:
# As it is an underdetermined problem, prediction = y. The following shows that
# we get a solution, i.e. a (non-unique) minimum of the objective function ...
rtol = 1e-6 if solver == "lbfgs" else 5e-6
assert_allclose(model.predict(X), y, rtol=rtol)
norm_solution = np.linalg.norm(np.r_[intercept, coef])
norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_])
if solver == "newton-cholesky":
# XXX: This solver shows random behaviour. Sometimes it finds solutions
# with norm_model <= norm_solution! So we check conditionally.
if not (norm_model > (1 + 1e-12) * norm_solution):
assert model.intercept_ == pytest.approx(intercept)
assert_allclose(model.coef_, coef, rtol=1e-4)
elif solver == "lbfgs" and fit_intercept:
# Same as in test_glm_regression_unpenalized.
# But it is not the minimum norm solution. Otherwise the norms would be
# equal.
assert norm_model > (1 + 1e-12) * norm_solution
else:
rtol = 1e-5 if solver == "newton-cholesky" else 1e-4
assert model.intercept_ == pytest.approx(intercept, rel=rtol)
assert_allclose(model.coef_, coef, rtol=rtol)
def test_sample_weights_validation():
"""Test the raised errors in the validation of sample_weight."""
# scalar value but not positive
X = [[1]]
y = [1]
weights = 0
glm = _GeneralizedLinearRegressor()
# Positive weights are accepted
glm.fit(X, y, sample_weight=1)
# 2d array
weights = [[0]]
with pytest.raises(ValueError, match="must be 1D array or scalar"):
glm.fit(X, y, weights)
# 1d but wrong length
weights = [1, 0]
msg = r"sample_weight.shape == \(2,\), expected \(1,\)!"
with pytest.raises(ValueError, match=msg):
glm.fit(X, y, weights)
@pytest.mark.parametrize(
"glm",
[
TweedieRegressor(power=3),
PoissonRegressor(),
GammaRegressor(),
TweedieRegressor(power=1.5),
],
)
def test_glm_wrong_y_range(glm):
"""
Test that fitting a GLM model raises a ValueError when `y` contains
values outside the valid range for the given distribution.
Generalized Linear Models (GLMs) with certain distributions, such as
Poisson, Gamma, and Tweedie (with power > 1), require `y` to be
non-negative. This test ensures that passing a `y` array containing
negative values triggers the expected ValueError with the correct message.
"""
y = np.array([-1, 2])
X = np.array([[1], [1]])
msg = r"Some value\(s\) of y are out of the valid range of the loss"
with pytest.raises(ValueError, match=msg):
glm.fit(X, y)
@pytest.mark.parametrize("fit_intercept", [False, True])
def test_glm_identity_regression(fit_intercept):
"""Test GLM regression with identity link on a simple dataset."""
coef = [1.0, 2.0]
X = np.array([[1, 1, 1, 1, 1], [0, 1, 2, 3, 4]]).T
y = np.dot(X, coef)
glm = _GeneralizedLinearRegressor(
alpha=0,
fit_intercept=fit_intercept,
tol=1e-12,
)
if fit_intercept:
glm.fit(X[:, 1:], y)
assert_allclose(glm.coef_, coef[1:])
assert_allclose(glm.intercept_, coef[0])
else:
glm.fit(X, y)
assert_allclose(glm.coef_, coef)
@pytest.mark.parametrize("fit_intercept", [False, True])
@pytest.mark.parametrize("alpha", [0.0, 1.0])
@pytest.mark.parametrize(
"GLMEstimator", [_GeneralizedLinearRegressor, PoissonRegressor, GammaRegressor]
)
def test_glm_sample_weight_consistency(fit_intercept, alpha, GLMEstimator):
"""Test that the impact of sample_weight is consistent"""
rng = np.random.RandomState(0)
n_samples, n_features = 10, 5
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
glm_params = dict(alpha=alpha, fit_intercept=fit_intercept)
glm = GLMEstimator(**glm_params).fit(X, y)
coef = glm.coef_.copy()
# sample_weight=np.ones(..) should be equivalent to sample_weight=None
sample_weight = np.ones(y.shape)
glm.fit(X, y, sample_weight=sample_weight)
assert_allclose(glm.coef_, coef)
# sample_weight are normalized to 1 so, scaling them has no effect
sample_weight = 2 * np.ones(y.shape)
glm.fit(X, y, sample_weight=sample_weight)
assert_allclose(glm.coef_, coef)
# setting one element of sample_weight to 0 is equivalent to removing
# the corresponding sample
sample_weight = np.ones(y.shape)
sample_weight[-1] = 0
glm.fit(X, y, sample_weight=sample_weight)
coef1 = glm.coef_.copy()
glm.fit(X[:-1], y[:-1])
assert_allclose(glm.coef_, coef1)
# check that multiplying sample_weight by 2 is equivalent
# to repeating corresponding samples twice
X2 = np.concatenate([X, X[: n_samples // 2]], axis=0)
y2 = np.concatenate([y, y[: n_samples // 2]])
sample_weight_1 = np.ones(len(y))
sample_weight_1[: n_samples // 2] = 2
glm1 = GLMEstimator(**glm_params).fit(X, y, sample_weight=sample_weight_1)
glm2 = GLMEstimator(**glm_params).fit(X2, y2, sample_weight=None)
assert_allclose(glm1.coef_, glm2.coef_)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
@pytest.mark.parametrize(
"estimator",
[
PoissonRegressor(),
GammaRegressor(),
TweedieRegressor(power=3.0),
TweedieRegressor(power=0, link="log"),
TweedieRegressor(power=1.5),
TweedieRegressor(power=4.5),
],
)
def test_glm_log_regression(solver, fit_intercept, estimator):
"""Test GLM regression with log link on a simple dataset."""
coef = [0.2, -0.1]
X = np.array([[0, 1, 2, 3, 4], [1, 1, 1, 1, 1]]).T
y = np.exp(np.dot(X, coef))
glm = clone(estimator).set_params(
alpha=0,
fit_intercept=fit_intercept,
solver=solver,
tol=1e-8,
)
if fit_intercept:
res = glm.fit(X[:, :-1], y)
assert_allclose(res.coef_, coef[:-1], rtol=1e-6)
assert_allclose(res.intercept_, coef[-1], rtol=1e-6)
else:
res = glm.fit(X, y)
assert_allclose(res.coef_, coef, rtol=2e-6)
@pytest.mark.parametrize("solver", SOLVERS)
@pytest.mark.parametrize("fit_intercept", [True, False])
def test_warm_start(solver, fit_intercept, global_random_seed):
"""
Test that `warm_start=True` enables incremental fitting in PoissonRegressor.
This test verifies that when using `warm_start=True`, the model continues
optimizing from previous coefficients instead of restarting from scratch.
It ensures that after an initial fit with `max_iter=1`, the model has a
higher objective function value (indicating incomplete optimization).
The test then checks whether allowing additional iterations enables
convergence to a solution comparable to a fresh training run (`warm_start=False`).
"""
n_samples, n_features = 100, 10
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features - 2,
bias=fit_intercept * 1.0,
noise=1.0,
random_state=global_random_seed,
)
y = np.abs(y) # Poisson requires non-negative targets.
alpha = 1
params = {
"solver": solver,
"fit_intercept": fit_intercept,
"tol": 1e-10,
}
glm1 = PoissonRegressor(warm_start=False, max_iter=1000, alpha=alpha, **params)
glm1.fit(X, y)
glm2 = PoissonRegressor(warm_start=True, max_iter=1, alpha=alpha, **params)
# As we intentionally set max_iter=1 such that the solver should raise a
# ConvergenceWarning.
with pytest.warns(ConvergenceWarning):
glm2.fit(X, y)
linear_loss = LinearModelLoss(
base_loss=glm1._get_loss(),
fit_intercept=fit_intercept,
)
sw = np.full_like(y, fill_value=1 / n_samples)
objective_glm1 = linear_loss.loss(
coef=np.r_[glm1.coef_, glm1.intercept_] if fit_intercept else glm1.coef_,
X=X,
y=y,
sample_weight=sw,
l2_reg_strength=alpha,
)
objective_glm2 = linear_loss.loss(
coef=np.r_[glm2.coef_, glm2.intercept_] if fit_intercept else glm2.coef_,
X=X,
y=y,
sample_weight=sw,
l2_reg_strength=alpha,
)
assert objective_glm1 < objective_glm2
glm2.set_params(max_iter=1000)
glm2.fit(X, y)
# The two models are not exactly identical since the lbfgs solver
# computes the approximate hessian from previous iterations, which
# will not be strictly identical in the case of a warm start.
rtol = 2e-4 if solver == "lbfgs" else 1e-9
assert_allclose(glm1.coef_, glm2.coef_, rtol=rtol)
assert_allclose(glm1.score(X, y), glm2.score(X, y), rtol=1e-5)
@pytest.mark.parametrize("n_samples, n_features", [(100, 10), (10, 100)])
@pytest.mark.parametrize("fit_intercept", [True, False])
@pytest.mark.parametrize("sample_weight", [None, True])
def test_normal_ridge_comparison(
n_samples, n_features, fit_intercept, sample_weight, request
):
"""Compare with Ridge regression for Normal distributions."""
test_size = 10
X, y = make_regression(
n_samples=n_samples + test_size,
n_features=n_features,
n_informative=n_features - 2,
noise=0.5,
random_state=42,
)
if n_samples > n_features:
ridge_params = {"solver": "svd"}
else:
ridge_params = {"solver": "saga", "max_iter": 1000000, "tol": 1e-7}
(
X_train,
X_test,
y_train,
y_test,
) = train_test_split(X, y, test_size=test_size, random_state=0)
alpha = 1.0
if sample_weight is None:
sw_train = None
alpha_ridge = alpha * n_samples
else:
sw_train = np.random.RandomState(0).rand(len(y_train))
alpha_ridge = alpha * sw_train.sum()
# GLM has 1/(2*n) * Loss + 1/2*L2, Ridge has Loss + L2
ridge = Ridge(
alpha=alpha_ridge,
random_state=42,
fit_intercept=fit_intercept,
**ridge_params,
)
ridge.fit(X_train, y_train, sample_weight=sw_train)
glm = _GeneralizedLinearRegressor(
alpha=alpha,
fit_intercept=fit_intercept,
max_iter=300,
tol=1e-5,
)
glm.fit(X_train, y_train, sample_weight=sw_train)
assert glm.coef_.shape == (X.shape[1],)
assert_allclose(glm.coef_, ridge.coef_, atol=5e-5)
assert_allclose(glm.intercept_, ridge.intercept_, rtol=1e-5)
assert_allclose(glm.predict(X_train), ridge.predict(X_train), rtol=2e-4)
assert_allclose(glm.predict(X_test), ridge.predict(X_test), rtol=2e-4)
@pytest.mark.parametrize("solver", ["lbfgs", "newton-cholesky"])
def test_poisson_glmnet(solver):
"""Compare Poisson regression with L2 regularization and LogLink to glmnet"""
# library("glmnet")
# options(digits=10)
# df <- data.frame(a=c(-2,-1,1,2), b=c(0,0,1,1), y=c(0,1,1,2))
# x <- data.matrix(df[,c("a", "b")])
# y <- df$y
# fit <- glmnet(x=x, y=y, alpha=0, intercept=T, family="poisson",
# standardize=F, thresh=1e-10, nlambda=10000)
# coef(fit, s=1)
# (Intercept) -0.12889386979
# a 0.29019207995
# b 0.03741173122
X = np.array([[-2, -1, 1, 2], [0, 0, 1, 1]]).T
y = np.array([0, 1, 1, 2])
glm = PoissonRegressor(
alpha=1,
fit_intercept=True,
tol=1e-7,
max_iter=300,
solver=solver,
)
glm.fit(X, y)
assert_allclose(glm.intercept_, -0.12889386979, rtol=1e-5)
assert_allclose(glm.coef_, [0.29019207995, 0.03741173122], rtol=1e-5)
def test_convergence_warning(regression_data):
X, y = regression_data
est = _GeneralizedLinearRegressor(max_iter=1, tol=1e-20)
with pytest.warns(ConvergenceWarning):
est.fit(X, y)
@pytest.mark.parametrize(
"name, link_class", [("identity", IdentityLink), ("log", LogLink)]
)
def test_tweedie_link_argument(name, link_class):
"""Test GLM link argument set as string."""
y = np.array([0.1, 0.5]) # in range of all distributions
X = np.array([[1], [2]])
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/compose/_target.py | sklearn/compose/_target.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin, _fit_context, clone
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import FunctionTransformer
from sklearn.utils import Bunch, _safe_indexing, check_array
from sklearn.utils._metadata_requests import (
MetadataRouter,
MethodMapping,
_routing_enabled,
process_routing,
)
from sklearn.utils._param_validation import HasMethods
from sklearn.utils._tags import get_tags
from sklearn.utils.validation import check_is_fitted
__all__ = ["TransformedTargetRegressor"]
class TransformedTargetRegressor(RegressorMixin, BaseEstimator):
"""Meta-estimator to regress on a transformed target.
Useful for applying a non-linear transformation to the target `y` in
regression problems. This transformation can be given as a Transformer
such as the :class:`~sklearn.preprocessing.QuantileTransformer` or as a
function and its inverse such as `np.log` and `np.exp`.
The computation during :meth:`fit` is::
regressor.fit(X, func(y))
or::
regressor.fit(X, transformer.transform(y))
The computation during :meth:`predict` is::
inverse_func(regressor.predict(X))
or::
transformer.inverse_transform(regressor.predict(X))
Read more in the :ref:`User Guide <transformed_target_regressor>`.
.. versionadded:: 0.20
Parameters
----------
regressor : object, default=None
Regressor object such as derived from
:class:`~sklearn.base.RegressorMixin`. This regressor will
automatically be cloned each time prior to fitting. If `regressor is
None`, :class:`~sklearn.linear_model.LinearRegression` is created and used.
transformer : object, default=None
Estimator object such as derived from
:class:`~sklearn.base.TransformerMixin`. Cannot be set at the same time
as `func` and `inverse_func`. If `transformer is None` as well as
`func` and `inverse_func`, the transformer will be an identity
transformer. Note that the transformer will be cloned during fitting.
Also, the transformer is restricting `y` to be a numpy array.
func : function, default=None
Function to apply to `y` before passing to :meth:`fit`. Cannot be set
at the same time as `transformer`. If `func is None`, the function used will be
the identity function. If `func` is set, `inverse_func` also needs to be
provided. The function needs to return a 2-dimensional array.
inverse_func : function, default=None
Function to apply to the prediction of the regressor. Cannot be set at
the same time as `transformer`. The inverse function is used to return
predictions to the same space of the original training labels. If
`inverse_func` is set, `func` also needs to be provided. The inverse
function needs to return a 2-dimensional array.
check_inverse : bool, default=True
Whether to check that `transform` followed by `inverse_transform`
or `func` followed by `inverse_func` leads to the original targets.
Attributes
----------
regressor_ : object
Fitted regressor.
transformer_ : object
Transformer used in :meth:`fit` and :meth:`predict`.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying regressor exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
sklearn.preprocessing.FunctionTransformer : Construct a transformer from an
arbitrary callable.
Notes
-----
Internally, the target `y` is always converted into a 2-dimensional array
to be used by scikit-learn transformers. At the time of prediction, the
output will be reshaped to a have the same number of dimensions as `y`.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LinearRegression
>>> from sklearn.compose import TransformedTargetRegressor
>>> tt = TransformedTargetRegressor(regressor=LinearRegression(),
... func=np.log, inverse_func=np.exp)
>>> X = np.arange(4).reshape(-1, 1)
>>> y = np.exp(2 * X).ravel()
>>> tt.fit(X, y)
TransformedTargetRegressor(...)
>>> tt.score(X, y)
1.0
>>> tt.regressor_.coef_
array([2.])
For a more detailed example use case refer to
:ref:`sphx_glr_auto_examples_compose_plot_transformed_target.py`.
"""
_parameter_constraints: dict = {
"regressor": [HasMethods(["fit", "predict"]), None],
"transformer": [HasMethods("transform"), None],
"func": [callable, None],
"inverse_func": [callable, None],
"check_inverse": ["boolean"],
}
def __init__(
self,
regressor=None,
*,
transformer=None,
func=None,
inverse_func=None,
check_inverse=True,
):
self.regressor = regressor
self.transformer = transformer
self.func = func
self.inverse_func = inverse_func
self.check_inverse = check_inverse
def _fit_transformer(self, y):
"""Check transformer and fit transformer.
Create the default transformer, fit it and make additional inverse
check on a subset (optional).
"""
if self.transformer is not None and (
self.func is not None or self.inverse_func is not None
):
raise ValueError(
"'transformer' and functions 'func'/'inverse_func' cannot both be set."
)
elif self.transformer is not None:
self.transformer_ = clone(self.transformer)
else:
if (self.func is not None and self.inverse_func is None) or (
self.func is None and self.inverse_func is not None
):
lacking_param, existing_param = (
("func", "inverse_func")
if self.func is None
else ("inverse_func", "func")
)
raise ValueError(
f"When '{existing_param}' is provided, '{lacking_param}' must also"
f" be provided. If {lacking_param} is supposed to be the default,"
" you need to explicitly pass it the identity function."
)
self.transformer_ = FunctionTransformer(
func=self.func,
inverse_func=self.inverse_func,
validate=True,
check_inverse=self.check_inverse,
)
# We are transforming the target here and not the features, so we set the
# output of FunctionTransformer() to be a numpy array (default) and to not
# depend on the global configuration:
self.transformer_.set_output(transform="default")
# XXX: sample_weight is not currently passed to the
# transformer. However, if transformer starts using sample_weight, the
# code should be modified accordingly. At the time to consider the
# sample_prop feature, it is also a good use case to be considered.
self.transformer_.fit(y)
if self.check_inverse:
idx_selected = slice(None, None, max(1, y.shape[0] // 10))
y_sel = _safe_indexing(y, idx_selected)
y_sel_t = self.transformer_.transform(y_sel)
if not np.allclose(y_sel, self.transformer_.inverse_transform(y_sel_t)):
warnings.warn(
(
"The provided functions or transformer are"
" not strictly inverse of each other. If"
" you are sure you want to proceed regardless"
", set 'check_inverse=False'"
),
UserWarning,
)
@_fit_context(
# TransformedTargetRegressor.regressor/transformer are not validated yet.
prefer_skip_nested_validation=False
)
def fit(self, X, y, **fit_params):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
**fit_params : dict
- If `enable_metadata_routing=False` (default): Parameters directly passed
to the `fit` method of the underlying regressor.
- If `enable_metadata_routing=True`: Parameters safely routed to the `fit`
method of the underlying regressor.
.. versionchanged:: 1.6
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
"""
if y is None:
raise ValueError(
f"This {self.__class__.__name__} estimator "
"requires y to be passed, but the target y is None."
)
y = check_array(
y,
input_name="y",
accept_sparse=False,
ensure_all_finite=True,
ensure_2d=False,
dtype="numeric",
allow_nd=True,
)
# store the number of dimension of the target to predict an array of
# similar shape at predict
self._training_dim = y.ndim
# transformers are designed to modify X which is 2d dimensional, we
# need to modify y accordingly.
if y.ndim == 1:
y_2d = y.reshape(-1, 1)
else:
y_2d = y
self._fit_transformer(y_2d)
# transform y and convert back to 1d array if needed
y_trans = self.transformer_.transform(y_2d)
# FIXME: a FunctionTransformer can return a 1D array even when validate
# is set to True. Therefore, we need to check the number of dimension
# first.
if y_trans.ndim == 2 and y_trans.shape[1] == 1 and self._training_dim == 1:
y_trans = y_trans.squeeze(axis=1)
self.regressor_ = self._get_regressor(get_clone=True)
if _routing_enabled():
routed_params = process_routing(self, "fit", **fit_params)
else:
routed_params = Bunch(regressor=Bunch(fit=fit_params))
self.regressor_.fit(X, y_trans, **routed_params.regressor.fit)
if hasattr(self.regressor_, "feature_names_in_"):
self.feature_names_in_ = self.regressor_.feature_names_in_
return self
def predict(self, X, **predict_params):
"""Predict using the base regressor, applying inverse.
The regressor is used to predict and the `inverse_func` or
`inverse_transform` is applied before returning the prediction.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
**predict_params : dict of str -> object
- If `enable_metadata_routing=False` (default): Parameters directly passed
to the `predict` method of the underlying regressor.
- If `enable_metadata_routing=True`: Parameters safely routed to the
`predict` method of the underlying regressor.
.. versionchanged:: 1.6
See :ref:`Metadata Routing User Guide <metadata_routing>`
for more details.
Returns
-------
y_hat : ndarray of shape (n_samples,)
Predicted values.
"""
check_is_fitted(self)
if _routing_enabled():
routed_params = process_routing(self, "predict", **predict_params)
else:
routed_params = Bunch(regressor=Bunch(predict=predict_params))
pred = self.regressor_.predict(X, **routed_params.regressor.predict)
if pred.ndim == 1:
pred_trans = self.transformer_.inverse_transform(pred.reshape(-1, 1))
else:
pred_trans = self.transformer_.inverse_transform(pred)
if (
self._training_dim == 1
and pred_trans.ndim == 2
and pred_trans.shape[1] == 1
):
pred_trans = pred_trans.squeeze(axis=1)
return pred_trans
def __sklearn_tags__(self):
regressor = self._get_regressor()
tags = super().__sklearn_tags__()
tags.regressor_tags.poor_score = True
tags.input_tags.sparse = get_tags(regressor).input_tags.sparse
tags.target_tags.multi_output = get_tags(regressor).target_tags.multi_output
return tags
@property
def n_features_in_(self):
"""Number of features seen during :term:`fit`."""
# For consistency with other estimators we raise an AttributeError so
# that hasattr() returns False the estimator isn't fitted.
try:
check_is_fitted(self)
except NotFittedError as nfe:
raise AttributeError(
"{} object has no n_features_in_ attribute.".format(
self.__class__.__name__
)
) from nfe
return self.regressor_.n_features_in_
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.6
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self).add(
regressor=self._get_regressor(),
method_mapping=MethodMapping()
.add(caller="fit", callee="fit")
.add(caller="predict", callee="predict"),
)
return router
def _get_regressor(self, get_clone=False):
if self.regressor is None:
return LinearRegression()
return clone(self.regressor) if get_clone else self.regressor
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/compose/_column_transformer.py | sklearn/compose/_column_transformer.py | """
The :mod:`sklearn.compose._column_transformer` module implements utilities
to work with heterogeneous data and to apply different transformers to
different columns.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from collections import Counter
from functools import partial
from itertools import chain
from numbers import Integral, Real
import numpy as np
from scipy import sparse
from sklearn.base import TransformerMixin, _fit_context, clone
from sklearn.pipeline import _fit_transform_one, _name_estimators, _transform_one
from sklearn.preprocessing import FunctionTransformer
from sklearn.utils import Bunch
from sklearn.utils._dataframe import is_pandas_df
from sklearn.utils._indexing import (
_determine_key_type,
_get_column_indices,
_safe_indexing,
)
from sklearn.utils._metadata_requests import METHODS
from sklearn.utils._param_validation import HasMethods, Hidden, Interval, StrOptions
from sklearn.utils._repr_html.estimator import _VisualBlock
from sklearn.utils._set_output import (
_get_container_adapter,
_get_output_config,
_safe_set_output,
)
from sklearn.utils._tags import get_tags
from sklearn.utils.metadata_routing import (
MetadataRouter,
MethodMapping,
_raise_for_params,
_routing_enabled,
process_routing,
)
from sklearn.utils.metaestimators import _BaseComposition
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import (
_check_feature_names_in,
_check_n_features,
_get_feature_names,
_num_samples,
check_array,
check_is_fitted,
validate_data,
)
__all__ = ["ColumnTransformer", "make_column_selector", "make_column_transformer"]
_ERR_MSG_1DCOLUMN = (
"1D data passed to a transformer that expects 2D data. "
"Try to specify the column selection as a list of one "
"item instead of a scalar."
)
class ColumnTransformer(TransformerMixin, _BaseComposition):
"""Applies transformers to columns of an array or pandas DataFrame.
This estimator allows different columns or column subsets of the input
to be transformed separately and the features generated by each transformer
will be concatenated to form a single feature space.
This is useful for heterogeneous or columnar data, to combine several
feature extraction mechanisms or transformations into a single transformer.
Read more in the :ref:`User Guide <column_transformer>`.
.. versionadded:: 0.20
Parameters
----------
transformers : list of tuples
List of (name, transformer, columns) tuples specifying the
transformer objects to be applied to subsets of the data.
name : str
Like in Pipeline and FeatureUnion, this allows the transformer and
its parameters to be set using ``set_params`` and searched in grid
search.
transformer : {'drop', 'passthrough'} or estimator
Estimator must support :term:`fit` and :term:`transform`.
Special-cased strings 'drop' and 'passthrough' are accepted as
well, to indicate to drop the columns or to pass them through
untransformed, respectively.
columns : str, array-like of str, int, array-like of int, \
array-like of bool, slice or callable
Indexes the data on its second axis. Integers are interpreted as
positional columns, while strings can reference DataFrame columns
by name. A scalar string or int should be used where
``transformer`` expects X to be a 1d array-like (vector),
otherwise a 2d array will be passed to the transformer.
A callable is passed the input data `X` and can return any of the
above. To select multiple columns by name or dtype, you can use
:obj:`make_column_selector`.
remainder : {'drop', 'passthrough'} or estimator, default='drop'
By default, only the specified columns in `transformers` are
transformed and combined in the output, and the non-specified
columns are dropped. (default of ``'drop'``).
By specifying ``remainder='passthrough'``, all remaining columns that
were not specified in `transformers`, but present in the data passed
to `fit` will be automatically passed through. This subset of columns
is concatenated with the output of the transformers. For dataframes,
extra columns not seen during `fit` will be excluded from the output
of `transform`.
By setting ``remainder`` to be an estimator, the remaining
non-specified columns will use the ``remainder`` estimator. The
estimator must support :term:`fit` and :term:`transform`.
Note that using this feature requires that the DataFrame columns
input at :term:`fit` and :term:`transform` have identical order.
sparse_threshold : float, default=0.3
If the output of the different transformers contains sparse matrices,
these will be stacked as a sparse matrix if the overall density is
lower than this value. Use ``sparse_threshold=0`` to always return
dense. When the transformed output consists of all dense data, the
stacked result will be dense, and this keyword will be ignored.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
transformer_weights : dict, default=None
Multiplicative weights for features per transformer. The output of the
transformer is multiplied by these weights. Keys are transformer names,
values the weights.
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
verbose_feature_names_out : bool, str or Callable[[str, str], str], default=True
- If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix
all feature names with the name of the transformer that generated that
feature. It is equivalent to setting
`verbose_feature_names_out="{transformer_name}__{feature_name}"`.
- If False, :meth:`ColumnTransformer.get_feature_names_out` will not
prefix any feature names and will error if feature names are not
unique.
- If ``Callable[[str, str], str]``,
:meth:`ColumnTransformer.get_feature_names_out` will rename all the features
using the name of the transformer. The first argument of the callable is the
transformer name and the second argument is the feature name. The returned
string will be the new feature name.
- If ``str``, it must be a string ready for formatting. The given string will
be formatted using two field names: ``transformer_name`` and ``feature_name``.
e.g. ``"{feature_name}__{transformer_name}"``. See :meth:`str.format` method
from the standard library for more info.
.. versionadded:: 1.0
.. versionchanged:: 1.6
`verbose_feature_names_out` can be a callable or a string to be formatted.
force_int_remainder_cols : bool, default=False
This parameter has no effect.
.. note::
If you do not access the list of columns for the remainder columns
in the `transformers_` fitted attribute, you do not need to set
this parameter.
.. versionadded:: 1.5
.. versionchanged:: 1.7
The default value for `force_int_remainder_cols` will change from
`True` to `False` in version 1.7.
.. deprecated:: 1.7
`force_int_remainder_cols` is deprecated and will be removed in 1.9.
Attributes
----------
transformers_ : list
The collection of fitted transformers as tuples of (name,
fitted_transformer, column). `fitted_transformer` can be an estimator,
or `'drop'`; `'passthrough'` is replaced with an equivalent
:class:`~sklearn.preprocessing.FunctionTransformer`. In case there were
no columns selected, this will be the unfitted transformer. If there
are remaining columns, the final element is a tuple of the form:
('remainder', transformer, remaining_columns) corresponding to the
``remainder`` parameter. If there are remaining columns, then
``len(transformers_)==len(transformers)+1``, otherwise
``len(transformers_)==len(transformers)``.
.. versionadded:: 1.7
The format of the remaining columns now attempts to match that of the other
transformers: if all columns were provided as column names (`str`), the
remaining columns are stored as column names; if all columns were provided
as mask arrays (`bool`), so are the remaining columns; in all other cases
the remaining columns are stored as indices (`int`).
named_transformers_ : :class:`~sklearn.utils.Bunch`
Read-only attribute to access any transformer by given name.
Keys are transformer names and values are the fitted transformer
objects.
sparse_output_ : bool
Boolean flag indicating whether the output of ``transform`` is a
sparse matrix or a dense numpy array, which depends on the output
of the individual transformers and the `sparse_threshold` keyword.
output_indices_ : dict
A dictionary from each transformer name to a slice, where the slice
corresponds to indices in the transformed output. This is useful to
inspect which transformer is responsible for which transformed
feature(s).
.. versionadded:: 1.0
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying transformers expose such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
make_column_transformer : Convenience function for
combining the outputs of multiple transformer objects applied to
column subsets of the original feature space.
make_column_selector : Convenience function for selecting
columns based on datatype or the columns name with a regex pattern.
Notes
-----
The order of the columns in the transformed feature matrix follows the
order of how the columns are specified in the `transformers` list.
Columns of the original feature matrix that are not specified are
dropped from the resulting transformed feature matrix, unless specified
in the `passthrough` keyword. Those columns specified with `passthrough`
are added at the right to the output of the transformers.
Examples
--------
>>> import numpy as np
>>> from sklearn.compose import ColumnTransformer
>>> from sklearn.preprocessing import Normalizer
>>> ct = ColumnTransformer(
... [("norm1", Normalizer(norm='l1'), [0, 1]),
... ("norm2", Normalizer(norm='l1'), slice(2, 4))])
>>> X = np.array([[0., 1., 2., 2.],
... [1., 1., 0., 1.]])
>>> # Normalizer scales each row of X to unit norm. A separate scaling
>>> # is applied for the two first and two last elements of each
>>> # row independently.
>>> ct.fit_transform(X)
array([[0. , 1. , 0.5, 0.5],
[0.5, 0.5, 0. , 1. ]])
:class:`ColumnTransformer` can be configured with a transformer that requires
a 1d array by setting the column to a string:
>>> from sklearn.feature_extraction.text import CountVectorizer
>>> from sklearn.preprocessing import MinMaxScaler
>>> import pandas as pd # doctest: +SKIP
>>> X = pd.DataFrame({
... "documents": ["First item", "second one here", "Is this the last?"],
... "width": [3, 4, 5],
... }) # doctest: +SKIP
>>> # "documents" is a string which configures ColumnTransformer to
>>> # pass the documents column as a 1d array to the CountVectorizer
>>> ct = ColumnTransformer(
... [("text_preprocess", CountVectorizer(), "documents"),
... ("num_preprocess", MinMaxScaler(), ["width"])])
>>> X_trans = ct.fit_transform(X) # doctest: +SKIP
For a more detailed example of usage, see
:ref:`sphx_glr_auto_examples_compose_plot_column_transformer_mixed_types.py`.
"""
_parameter_constraints: dict = {
"transformers": [list, Hidden(tuple)],
"remainder": [
StrOptions({"drop", "passthrough"}),
HasMethods(["fit", "transform"]),
HasMethods(["fit_transform", "transform"]),
],
"sparse_threshold": [Interval(Real, 0, 1, closed="both")],
"n_jobs": [Integral, None],
"transformer_weights": [dict, None],
"verbose": ["verbose"],
"verbose_feature_names_out": ["boolean", str, callable],
"force_int_remainder_cols": ["boolean", Hidden(StrOptions({"deprecated"}))],
}
def __init__(
self,
transformers,
*,
remainder="drop",
sparse_threshold=0.3,
n_jobs=None,
transformer_weights=None,
verbose=False,
verbose_feature_names_out=True,
force_int_remainder_cols="deprecated",
):
self.transformers = transformers
self.remainder = remainder
self.sparse_threshold = sparse_threshold
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
self.verbose = verbose
self.verbose_feature_names_out = verbose_feature_names_out
self.force_int_remainder_cols = force_int_remainder_cols
@property
def _transformers(self):
"""
Internal list of transformer only containing the name and
transformers, dropping the columns.
DO NOT USE: This is for the implementation of get_params via
BaseComposition._get_params which expects lists of tuples of len 2.
To iterate through the transformers, use ``self._iter`` instead.
"""
try:
return [(name, trans) for name, trans, _ in self.transformers]
except (TypeError, ValueError):
return self.transformers
@_transformers.setter
def _transformers(self, value):
"""DO NOT USE: This is for the implementation of set_params via
BaseComposition._get_params which gives lists of tuples of len 2.
"""
try:
self.transformers = [
(name, trans, col)
for ((name, trans), (_, _, col)) in zip(value, self.transformers)
]
except (TypeError, ValueError):
self.transformers = value
def set_output(self, *, transform=None):
"""Set the output container when `"transform"` and `"fit_transform"` are called.
Calling `set_output` will set the output of all estimators in `transformers`
and `transformers_`.
Parameters
----------
transform : {"default", "pandas", "polars"}, default=None
Configure output of `transform` and `fit_transform`.
- `"default"`: Default output format of a transformer
- `"pandas"`: DataFrame output
- `"polars"`: Polars output
- `None`: Transform configuration is unchanged
.. versionadded:: 1.4
`"polars"` option was added.
Returns
-------
self : estimator instance
Estimator instance.
"""
super().set_output(transform=transform)
transformers = (
trans
for _, trans, _ in chain(
self.transformers, getattr(self, "transformers_", [])
)
if trans not in {"passthrough", "drop"}
)
for trans in transformers:
_safe_set_output(trans, transform=transform)
if self.remainder not in {"passthrough", "drop"}:
_safe_set_output(self.remainder, transform=transform)
return self
def get_params(self, deep=True):
"""Get parameters for this estimator.
Returns the parameters given in the constructor as well as the
estimators contained within the `transformers` of the
`ColumnTransformer`.
Parameters
----------
deep : bool, default=True
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : dict
Parameter names mapped to their values.
"""
return self._get_params("_transformers", deep=deep)
def set_params(self, **kwargs):
"""Set the parameters of this estimator.
Valid parameter keys can be listed with ``get_params()``. Note that you
can directly set the parameters of the estimators contained in
`transformers` of `ColumnTransformer`.
Parameters
----------
**kwargs : dict
Estimator parameters.
Returns
-------
self : ColumnTransformer
This estimator.
"""
self._set_params("_transformers", **kwargs)
return self
def _iter(self, fitted, column_as_labels, skip_drop, skip_empty_columns):
"""
Generate (name, trans, columns, weight) tuples.
Parameters
----------
fitted : bool
If True, use the fitted transformers (``self.transformers_``) to
iterate through transformers, else use the transformers passed by
the user (``self.transformers``).
column_as_labels : bool
If True, columns are returned as string labels. If False, columns
are returned as they were given by the user. This can only be True
if the ``ColumnTransformer`` is already fitted.
skip_drop : bool
If True, 'drop' transformers are filtered out.
skip_empty_columns : bool
If True, transformers with empty selected columns are filtered out.
Yields
------
A generator of tuples containing:
- name : the name of the transformer
- transformer : the transformer object
- columns : the columns for that transformer
- weight : the weight of the transformer
"""
if fitted:
transformers = self.transformers_
else:
# interleave the validated column specifiers
transformers = [
(name, trans, column)
for (name, trans, _), column in zip(self.transformers, self._columns)
]
# add transformer tuple for remainder
if self._remainder[2]:
transformers = chain(transformers, [self._remainder])
get_weight = (self.transformer_weights or {}).get
for name, trans, columns in transformers:
if skip_drop and trans == "drop":
continue
if skip_empty_columns and _is_empty_column_selection(columns):
continue
if column_as_labels:
# Convert all columns to using their string labels
columns_is_scalar = np.isscalar(columns)
indices = self._transformer_to_input_indices[name]
columns = self.feature_names_in_[indices]
if columns_is_scalar:
# selection is done with one dimension
columns = columns[0]
yield (name, trans, columns, get_weight(name))
def _validate_transformers(self):
"""Validate names of transformers and the transformers themselves.
This checks whether given transformers have the required methods, i.e.
`fit` or `fit_transform` and `transform` implemented.
"""
if not self.transformers:
return
names, transformers, _ = zip(*self.transformers)
# validate names
self._validate_names(names)
# validate estimators
self._check_estimators_are_instances(transformers)
for t in transformers:
if t in ("drop", "passthrough"):
continue
if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(
t, "transform"
):
# Used to validate the transformers in the `transformers` list
raise TypeError(
"All estimators should implement fit and "
"transform, or can be 'drop' or 'passthrough' "
"specifiers. '%s' (type %s) doesn't." % (t, type(t))
)
def _validate_column_callables(self, X):
"""
Converts callable column specifications.
This stores a dictionary of the form `{step_name: column_indices}` and
calls the `columns` on `X` if `columns` is a callable for a given
transformer.
The results are then stored in `self._transformer_to_input_indices`.
"""
all_columns = []
transformer_to_input_indices = {}
for name, _, columns in self.transformers:
if callable(columns):
columns = columns(X)
all_columns.append(columns)
transformer_to_input_indices[name] = _get_column_indices(X, columns)
self._columns = all_columns
self._transformer_to_input_indices = transformer_to_input_indices
def _validate_remainder(self, X):
"""
Validates ``remainder`` and defines ``_remainder`` targeting
the remaining columns.
"""
cols = set(chain(*self._transformer_to_input_indices.values()))
remaining = sorted(set(range(self.n_features_in_)) - cols)
self._transformer_to_input_indices["remainder"] = remaining
remainder_cols = self._get_remainder_cols(remaining)
self._remainder = ("remainder", self.remainder, remainder_cols)
def _get_remainder_cols_dtype(self):
try:
all_dtypes = {_determine_key_type(c) for (*_, c) in self.transformers}
if len(all_dtypes) == 1:
return next(iter(all_dtypes))
except ValueError:
# _determine_key_type raises a ValueError if some transformer
# columns are Callables
return "int"
return "int"
def _get_remainder_cols(self, indices):
dtype = self._get_remainder_cols_dtype()
if dtype == "str":
return list(self.feature_names_in_[indices])
if dtype == "bool":
return [i in indices for i in range(self.n_features_in_)]
return indices
@property
def named_transformers_(self):
"""Access the fitted transformer by name.
Read-only attribute to access any transformer by given name.
Keys are transformer names and values are the fitted transformer
objects.
"""
# Use Bunch object to improve autocomplete
return Bunch(**{name: trans for name, trans, _ in self.transformers_})
def _get_feature_name_out_for_transformer(self, name, trans, feature_names_in):
"""Gets feature names of transformer.
Used in conjunction with self._iter(fitted=True) in get_feature_names_out.
"""
column_indices = self._transformer_to_input_indices[name]
names = feature_names_in[column_indices]
# An actual transformer
if not hasattr(trans, "get_feature_names_out"):
raise AttributeError(
f"Transformer {name} (type {type(trans).__name__}) does "
"not provide get_feature_names_out."
)
return trans.get_feature_names_out(names)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then the following input feature names are generated:
`["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self)
input_features = _check_feature_names_in(self, input_features)
# List of tuples (name, feature_names_out)
transformer_with_feature_names_out = []
for name, trans, *_ in self._iter(
fitted=True,
column_as_labels=False,
skip_empty_columns=True,
skip_drop=True,
):
feature_names_out = self._get_feature_name_out_for_transformer(
name, trans, input_features
)
if feature_names_out is None:
continue
transformer_with_feature_names_out.append((name, feature_names_out))
if not transformer_with_feature_names_out:
# No feature names
return np.array([], dtype=object)
return self._add_prefix_for_feature_names_out(
transformer_with_feature_names_out
)
def _add_prefix_for_feature_names_out(self, transformer_with_feature_names_out):
"""Add prefix for feature names out that includes the transformer names.
Parameters
----------
transformer_with_feature_names_out : list of tuples of (str, array-like of str)
The tuple consistent of the transformer's name and its feature names out.
Returns
-------
feature_names_out : ndarray of shape (n_features,), dtype=str
Transformed feature names.
"""
feature_names_out_callable = None
if callable(self.verbose_feature_names_out):
feature_names_out_callable = self.verbose_feature_names_out
elif isinstance(self.verbose_feature_names_out, str):
feature_names_out_callable = partial(
_feature_names_out_with_str_format,
str_format=self.verbose_feature_names_out,
)
elif self.verbose_feature_names_out is True:
feature_names_out_callable = partial(
_feature_names_out_with_str_format,
str_format="{transformer_name}__{feature_name}",
)
if feature_names_out_callable is not None:
# Prefix the feature names out with the transformers name
names = list(
chain.from_iterable(
(feature_names_out_callable(name, i) for i in feature_names_out)
for name, feature_names_out in transformer_with_feature_names_out
)
)
return np.asarray(names, dtype=object)
# verbose_feature_names_out is False
# Check that names are all unique without a prefix
feature_names_count = Counter(
chain.from_iterable(s for _, s in transformer_with_feature_names_out)
)
top_6_overlap = [
name for name, count in feature_names_count.most_common(6) if count > 1
]
top_6_overlap.sort()
if top_6_overlap:
if len(top_6_overlap) == 6:
# There are more than 5 overlapping names, we only show the 5
# of the feature names
names_repr = str(top_6_overlap[:5])[:-1] + ", ...]"
else:
names_repr = str(top_6_overlap)
raise ValueError(
f"Output feature names: {names_repr} are not unique. Please set "
"verbose_feature_names_out=True to add prefixes to feature names"
)
return np.concatenate(
[name for _, name in transformer_with_feature_names_out],
)
def _update_fitted_transformers(self, transformers):
"""Set self.transformers_ from given transformers.
Parameters
----------
transformers : list of estimators
The fitted estimators as the output of
`self._call_func_on_transformers(func=_fit_transform_one, ...)`.
That function doesn't include 'drop' or transformers for which no
column is selected. 'drop' is kept as is, and for the no-column
transformers the unfitted transformer is put in
`self.transformers_`.
"""
# transformers are fitted; excludes 'drop' cases
fitted_transformers = iter(transformers)
transformers_ = []
for name, old, column, _ in self._iter(
fitted=False,
column_as_labels=False,
skip_drop=False,
skip_empty_columns=False,
):
if old == "drop":
trans = "drop"
elif _is_empty_column_selection(column):
trans = old
else:
trans = next(fitted_transformers)
transformers_.append((name, trans, column))
# sanity check that transformers is exhausted
assert not list(fitted_transformers)
self.transformers_ = transformers_
def _validate_output(self, result):
"""
Ensure that the output of each transformer is 2D. Otherwise
hstack can raise an error or produce incorrect results.
"""
names = [
name
for name, _, _, _ in self._iter(
fitted=True,
column_as_labels=False,
skip_drop=True,
skip_empty_columns=True,
)
]
for Xs, name in zip(result, names):
if not getattr(Xs, "ndim", 0) == 2 and not hasattr(Xs, "__dataframe__"):
raise ValueError(
"The output of the '{0}' transformer should be 2D (numpy array, "
"scipy sparse array, dataframe).".format(name)
)
if _get_output_config("transform", self)["dense"] == "pandas":
return
try:
import pandas as pd
except ImportError:
return
for Xs, name in zip(result, names):
if not is_pandas_df(Xs):
continue
for col_name, dtype in Xs.dtypes.to_dict().items():
if getattr(dtype, "na_value", None) is not pd.NA:
continue
if pd.NA not in Xs[col_name].values:
continue
class_name = self.__class__.__name__
raise ValueError(
f"The output of the '{name}' transformer for column"
f" '{col_name}' has dtype {dtype} and uses pandas.NA to"
" represent null values. Storing this output in a numpy array"
" can cause errors in downstream scikit-learn estimators, and"
" inefficiencies. To avoid this problem you can (i)"
" store the output in a pandas DataFrame by using"
f" {class_name}.set_output(transform='pandas') or (ii) modify"
f" the input data or the '{name}' transformer to avoid the"
" presence of pandas.NA (for example by using"
" pandas.DataFrame.astype)."
)
def _record_output_indices(self, Xs):
"""
Record which transformer produced which column.
"""
idx = 0
self.output_indices_ = {}
for transformer_idx, (name, _, _, _) in enumerate(
self._iter(
fitted=True,
column_as_labels=False,
skip_drop=True,
skip_empty_columns=True,
)
):
n_columns = Xs[transformer_idx].shape[1]
self.output_indices_[name] = slice(idx, idx + n_columns)
idx += n_columns
# `_iter` only generates transformers that have a non empty
# selection. Here we set empty slices for transformers that
# generate no output, which are safe for indexing
all_names = [t[0] for t in self.transformers] + ["remainder"]
for name in all_names:
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/compose/__init__.py | sklearn/compose/__init__.py | """Meta-estimators for building composite models with transformers.
In addition to its current contents, this module will eventually be home to
refurbished versions of :class:`~sklearn.pipeline.Pipeline` and
:class:`~sklearn.pipeline.FeatureUnion`.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.compose._column_transformer import (
ColumnTransformer,
make_column_selector,
make_column_transformer,
)
from sklearn.compose._target import TransformedTargetRegressor
__all__ = [
"ColumnTransformer",
"TransformedTargetRegressor",
"make_column_selector",
"make_column_transformer",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/compose/tests/test_target.py | sklearn/compose/tests/test_target.py | import warnings
import numpy as np
import pytest
from sklearn import config_context, datasets
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn.compose import TransformedTargetRegressor
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import LinearRegression, OrthogonalMatchingPursuit
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import FunctionTransformer, StandardScaler
from sklearn.utils._testing import assert_allclose
friedman = datasets.make_friedman1(random_state=0)
def test_transform_target_regressor_error():
X, y = friedman
# provide a transformer and functions at the same time
regr = TransformedTargetRegressor(
regressor=LinearRegression(),
transformer=StandardScaler(),
func=np.exp,
inverse_func=np.log,
)
with pytest.raises(
ValueError,
match="'transformer' and functions 'func'/'inverse_func' cannot both be set.",
):
regr.fit(X, y)
# fit with sample_weight with a regressor which does not support it
sample_weight = np.ones((y.shape[0],))
regr = TransformedTargetRegressor(
regressor=OrthogonalMatchingPursuit(), transformer=StandardScaler()
)
with pytest.raises(
TypeError,
match=r"fit\(\) got an unexpected keyword argument 'sample_weight'",
):
regr.fit(X, y, sample_weight=sample_weight)
# one of (func, inverse_func) is given but the other one is not
regr = TransformedTargetRegressor(func=np.exp)
with pytest.raises(
ValueError,
match="When 'func' is provided, 'inverse_func' must also be provided",
):
regr.fit(X, y)
regr = TransformedTargetRegressor(inverse_func=np.log)
with pytest.raises(
ValueError,
match="When 'inverse_func' is provided, 'func' must also be provided",
):
regr.fit(X, y)
def test_transform_target_regressor_invertible():
X, y = friedman
regr = TransformedTargetRegressor(
regressor=LinearRegression(),
func=np.sqrt,
inverse_func=np.log,
check_inverse=True,
)
with pytest.warns(
UserWarning,
match=(r"The provided functions.* are not strictly inverse of each other"),
):
regr.fit(X, y)
regr = TransformedTargetRegressor(
regressor=LinearRegression(), func=np.sqrt, inverse_func=np.log
)
regr.set_params(check_inverse=False)
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
regr.fit(X, y)
def _check_standard_scaled(y, y_pred):
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
assert_allclose((y - y_mean) / y_std, y_pred)
def _check_shifted_by_one(y, y_pred):
assert_allclose(y + 1, y_pred)
def test_transform_target_regressor_functions():
X, y = friedman
regr = TransformedTargetRegressor(
regressor=LinearRegression(), func=np.log, inverse_func=np.exp
)
y_pred = regr.fit(X, y).predict(X)
# check the transformer output
y_tran = regr.transformer_.transform(y.reshape(-1, 1)).squeeze()
assert_allclose(np.log(y), y_tran)
assert_allclose(
y, regr.transformer_.inverse_transform(y_tran.reshape(-1, 1)).squeeze()
)
assert y.shape == y_pred.shape
assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X)))
# check the regressor output
lr = LinearRegression().fit(X, regr.func(y))
assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel())
def test_transform_target_regressor_functions_multioutput():
X = friedman[0]
y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T
regr = TransformedTargetRegressor(
regressor=LinearRegression(), func=np.log, inverse_func=np.exp
)
y_pred = regr.fit(X, y).predict(X)
# check the transformer output
y_tran = regr.transformer_.transform(y)
assert_allclose(np.log(y), y_tran)
assert_allclose(y, regr.transformer_.inverse_transform(y_tran))
assert y.shape == y_pred.shape
assert_allclose(y_pred, regr.inverse_func(regr.regressor_.predict(X)))
# check the regressor output
lr = LinearRegression().fit(X, regr.func(y))
assert_allclose(regr.regressor_.coef_.ravel(), lr.coef_.ravel())
@pytest.mark.parametrize(
"X,y", [friedman, (friedman[0], np.vstack((friedman[1], friedman[1] ** 2 + 1)).T)]
)
def test_transform_target_regressor_1d_transformer(X, y):
# All transformer in scikit-learn expect 2D data. FunctionTransformer with
# validate=False lift this constraint without checking that the input is a
# 2D vector. We check the consistency of the data shape using a 1D and 2D y
# array.
transformer = FunctionTransformer(
func=lambda x: x + 1, inverse_func=lambda x: x - 1
)
regr = TransformedTargetRegressor(
regressor=LinearRegression(), transformer=transformer
)
y_pred = regr.fit(X, y).predict(X)
assert y.shape == y_pred.shape
# consistency forward transform
y_tran = regr.transformer_.transform(y)
_check_shifted_by_one(y, y_tran)
assert y.shape == y_pred.shape
# consistency inverse transform
assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze())
# consistency of the regressor
lr = LinearRegression()
transformer2 = clone(transformer)
lr.fit(X, transformer2.fit_transform(y))
y_lr_pred = lr.predict(X)
assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred))
assert_allclose(regr.regressor_.coef_, lr.coef_)
@pytest.mark.parametrize(
"X,y", [friedman, (friedman[0], np.vstack((friedman[1], friedman[1] ** 2 + 1)).T)]
)
def test_transform_target_regressor_2d_transformer(X, y):
# Check consistency with transformer accepting only 2D array and a 1D/2D y
# array.
transformer = StandardScaler()
regr = TransformedTargetRegressor(
regressor=LinearRegression(), transformer=transformer
)
y_pred = regr.fit(X, y).predict(X)
assert y.shape == y_pred.shape
# consistency forward transform
if y.ndim == 1: # create a 2D array and squeeze results
y_tran = regr.transformer_.transform(y.reshape(-1, 1))
else:
y_tran = regr.transformer_.transform(y)
_check_standard_scaled(y, y_tran.squeeze())
assert y.shape == y_pred.shape
# consistency inverse transform
assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze())
# consistency of the regressor
lr = LinearRegression()
transformer2 = clone(transformer)
if y.ndim == 1: # create a 2D array and squeeze results
lr.fit(X, transformer2.fit_transform(y.reshape(-1, 1)).squeeze())
y_lr_pred = lr.predict(X).reshape(-1, 1)
y_pred2 = transformer2.inverse_transform(y_lr_pred).squeeze()
else:
lr.fit(X, transformer2.fit_transform(y))
y_lr_pred = lr.predict(X)
y_pred2 = transformer2.inverse_transform(y_lr_pred)
assert_allclose(y_pred, y_pred2)
assert_allclose(regr.regressor_.coef_, lr.coef_)
def test_transform_target_regressor_2d_transformer_multioutput():
# Check consistency with transformer accepting only 2D array and a 2D y
# array.
X = friedman[0]
y = np.vstack((friedman[1], friedman[1] ** 2 + 1)).T
transformer = StandardScaler()
regr = TransformedTargetRegressor(
regressor=LinearRegression(), transformer=transformer
)
y_pred = regr.fit(X, y).predict(X)
assert y.shape == y_pred.shape
# consistency forward transform
y_tran = regr.transformer_.transform(y)
_check_standard_scaled(y, y_tran)
assert y.shape == y_pred.shape
# consistency inverse transform
assert_allclose(y, regr.transformer_.inverse_transform(y_tran).squeeze())
# consistency of the regressor
lr = LinearRegression()
transformer2 = clone(transformer)
lr.fit(X, transformer2.fit_transform(y))
y_lr_pred = lr.predict(X)
assert_allclose(y_pred, transformer2.inverse_transform(y_lr_pred))
assert_allclose(regr.regressor_.coef_, lr.coef_)
def test_transform_target_regressor_3d_target():
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/18866
# Check with a 3D target with a transformer that reshapes the target
X = friedman[0]
y = np.tile(friedman[1].reshape(-1, 1, 1), [1, 3, 2])
def flatten_data(data):
return data.reshape(data.shape[0], -1)
def unflatten_data(data):
return data.reshape(data.shape[0], -1, 2)
transformer = FunctionTransformer(func=flatten_data, inverse_func=unflatten_data)
regr = TransformedTargetRegressor(
regressor=LinearRegression(), transformer=transformer
)
y_pred = regr.fit(X, y).predict(X)
assert y.shape == y_pred.shape
def test_transform_target_regressor_multi_to_single():
X = friedman[0]
y = np.transpose([friedman[1], (friedman[1] ** 2 + 1)])
def func(y):
out = np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2)
return out[:, np.newaxis]
def inverse_func(y):
return y
tt = TransformedTargetRegressor(
func=func, inverse_func=inverse_func, check_inverse=False
)
tt.fit(X, y)
y_pred_2d_func = tt.predict(X)
assert y_pred_2d_func.shape == (100, 1)
# force that the function only return a 1D array
def func(y):
return np.sqrt(y[:, 0] ** 2 + y[:, 1] ** 2)
tt = TransformedTargetRegressor(
func=func, inverse_func=inverse_func, check_inverse=False
)
tt.fit(X, y)
y_pred_1d_func = tt.predict(X)
assert y_pred_1d_func.shape == (100, 1)
assert_allclose(y_pred_1d_func, y_pred_2d_func)
class DummyCheckerArrayTransformer(TransformerMixin, BaseEstimator):
def fit(self, X, y=None):
assert isinstance(X, np.ndarray)
return self
def transform(self, X):
assert isinstance(X, np.ndarray)
return X
def inverse_transform(self, X):
assert isinstance(X, np.ndarray)
return X
class DummyCheckerListRegressor(DummyRegressor):
def fit(self, X, y, sample_weight=None):
assert isinstance(X, list)
return super().fit(X, y, sample_weight)
def predict(self, X):
assert isinstance(X, list)
return super().predict(X)
def test_transform_target_regressor_ensure_y_array():
# check that the target ``y`` passed to the transformer will always be a
# numpy array. Similarly, if ``X`` is passed as a list, we check that the
# predictor receive as it is.
X, y = friedman
tt = TransformedTargetRegressor(
transformer=DummyCheckerArrayTransformer(),
regressor=DummyCheckerListRegressor(),
check_inverse=False,
)
tt.fit(X.tolist(), y.tolist())
tt.predict(X.tolist())
with pytest.raises(AssertionError):
tt.fit(X, y.tolist())
with pytest.raises(AssertionError):
tt.predict(X)
class DummyTransformer(TransformerMixin, BaseEstimator):
"""Dummy transformer which count how many time fit was called."""
def __init__(self, fit_counter=0):
self.fit_counter = fit_counter
def fit(self, X, y=None):
self.fit_counter += 1
return self
def transform(self, X):
return X
def inverse_transform(self, X):
return X
@pytest.mark.parametrize("check_inverse", [False, True])
def test_transform_target_regressor_count_fit(check_inverse):
# regression test for gh-issue #11618
# check that we only call a single time fit for the transformer
X, y = friedman
ttr = TransformedTargetRegressor(
transformer=DummyTransformer(), check_inverse=check_inverse
)
ttr.fit(X, y)
assert ttr.transformer_.fit_counter == 1
class DummyRegressorWithExtraFitParams(DummyRegressor):
def fit(self, X, y, sample_weight=None, check_input=True):
# on the test below we force this to false, we make sure this is
# actually passed to the regressor
assert not check_input
return super().fit(X, y, sample_weight)
def test_transform_target_regressor_pass_fit_parameters():
X, y = friedman
regr = TransformedTargetRegressor(
regressor=DummyRegressorWithExtraFitParams(), transformer=DummyTransformer()
)
regr.fit(X, y, check_input=False)
assert regr.transformer_.fit_counter == 1
def test_transform_target_regressor_route_pipeline():
X, y = friedman
regr = TransformedTargetRegressor(
regressor=DummyRegressorWithExtraFitParams(), transformer=DummyTransformer()
)
estimators = [("normalize", StandardScaler()), ("est", regr)]
pip = Pipeline(estimators)
pip.fit(X, y, **{"est__check_input": False})
assert regr.transformer_.fit_counter == 1
class DummyRegressorWithExtraPredictParams(DummyRegressor):
def predict(self, X, check_input=True):
# In the test below we make sure that the check input parameter is
# passed as false
self.predict_called = True
assert not check_input
return super().predict(X)
def test_transform_target_regressor_pass_extra_predict_parameters():
# Checks that predict kwargs are passed to regressor.
X, y = friedman
regr = TransformedTargetRegressor(
regressor=DummyRegressorWithExtraPredictParams(), transformer=DummyTransformer()
)
regr.fit(X, y)
regr.predict(X, check_input=False)
assert regr.regressor_.predict_called
@pytest.mark.parametrize("output_format", ["pandas", "polars"])
def test_transform_target_regressor_not_warns_with_global_output_set(output_format):
"""Test that TransformedTargetRegressor will not raise warnings if
set_config(transform_output="pandas"/"polars") is set globally; regression test for
issue #29361."""
X, y = datasets.make_regression()
y = np.abs(y) + 1
with config_context(transform_output=output_format):
with warnings.catch_warnings():
warnings.simplefilter("error")
TransformedTargetRegressor(
regressor=LinearRegression(), func=np.log, inverse_func=np.exp
).fit(X, y)
class ValidateDimensionRegressor(BaseEstimator):
"""A regressor that expects the target to have a specific number of dimensions."""
def __init__(self, ndim):
self.ndim = ndim
def fit(self, X, y):
assert y.ndim == self.ndim
def predict(self, X):
pass # pragma: no cover
@pytest.mark.parametrize("ndim", [1, 2])
def test_transform_target_regressor_preserves_input_shape(ndim):
"""Check that TransformedTargetRegressor internally preserves the shape of the input
non-regression test for issue #26530.
"""
X, y = datasets.make_regression(n_samples=10, n_features=5, random_state=42)
if ndim == 2:
y = y.reshape(-1, 1)
regr = TransformedTargetRegressor(regressor=ValidateDimensionRegressor(ndim))
regr.fit(X, y)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/compose/tests/test_column_transformer.py | sklearn/compose/tests/test_column_transformer.py | """
Test the ColumnTransformer.
"""
import pickle
import re
import warnings
import joblib
import numpy as np
import pytest
from numpy.testing import assert_allclose
from scipy import sparse
from sklearn import config_context
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.compose import (
ColumnTransformer,
make_column_selector,
make_column_transformer,
)
from sklearn.exceptions import NotFittedError
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import VarianceThreshold
from sklearn.preprocessing import (
FunctionTransformer,
Normalizer,
OneHotEncoder,
StandardScaler,
)
from sklearn.tests.metadata_routing_common import (
ConsumingTransformer,
_Registry,
check_recorded_metadata,
)
from sklearn.utils._indexing import _safe_indexing
from sklearn.utils._testing import (
_convert_container,
assert_allclose_dense_sparse,
assert_almost_equal,
assert_array_equal,
)
from sklearn.utils.fixes import CSR_CONTAINERS, parse_version
class Trans(TransformerMixin, BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# 1D Series -> 2D DataFrame
if hasattr(X, "to_frame"):
return X.to_frame()
# 1D array -> 2D array
if getattr(X, "ndim", 2) == 1:
return np.atleast_2d(X).T
return X
class DoubleTrans(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X):
return 2 * X
class SparseMatrixTrans(BaseEstimator):
def __init__(self, csr_container):
self.csr_container = csr_container
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
n_samples = len(X)
return self.csr_container(sparse.eye(n_samples, n_samples))
class TransNo2D(BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X
class TransRaise(BaseEstimator):
def fit(self, X, y=None):
raise ValueError("specific message")
def transform(self, X, y=None):
raise ValueError("specific message")
@pytest.mark.parametrize(
"transformers",
[
[("trans1", Trans, [0]), ("trans2", Trans(), [1])],
[("trans1", Trans(), [0]), ("trans2", Trans, [1])],
[("drop", "drop", [0]), ("trans2", Trans, [1])],
[("trans1", Trans, [0]), ("passthrough", "passthrough", [1])],
],
)
def test_column_transformer_raises_class_not_instance_error(transformers):
# non-regression tests for https://github.com/scikit-learn/scikit-learn/issues/32719
ct = ColumnTransformer(transformers)
msg = "Expected an estimator instance (.*()), got estimator class instead (.*)."
with pytest.raises(TypeError, match=msg):
ct.fit([[1]])
def test_column_transformer():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_first1D = np.array([0, 1, 2])
X_res_second1D = np.array([2, 4, 6])
X_res_first = X_res_first1D.reshape(-1, 1)
X_res_both = X_array
cases = [
# single column 1D / 2D
(0, X_res_first),
([0], X_res_first),
# list-like
([0, 1], X_res_both),
(np.array([0, 1]), X_res_both),
# slice
(slice(0, 1), X_res_first),
(slice(0, 2), X_res_both),
# boolean mask
(np.array([True, False]), X_res_first),
([True, False], X_res_first),
(np.array([True, True]), X_res_both),
([True, True], X_res_both),
]
for selection, res in cases:
ct = ColumnTransformer([("trans", Trans(), selection)], remainder="drop")
assert_array_equal(ct.fit_transform(X_array), res)
assert_array_equal(ct.fit(X_array).transform(X_array), res)
# callable that returns any of the allowed specifiers
ct = ColumnTransformer(
[("trans", Trans(), lambda x: selection)], remainder="drop"
)
assert_array_equal(ct.fit_transform(X_array), res)
assert_array_equal(ct.fit(X_array).transform(X_array), res)
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
assert_array_equal(ct.fit_transform(X_array), X_res_both)
assert_array_equal(ct.fit(X_array).transform(X_array), X_res_both)
assert len(ct.transformers_) == 2
# test with transformer_weights
transformer_weights = {"trans1": 0.1, "trans2": 10}
both = ColumnTransformer(
[("trans1", Trans(), [0]), ("trans2", Trans(), [1])],
transformer_weights=transformer_weights,
)
res = np.vstack(
[
transformer_weights["trans1"] * X_res_first1D,
transformer_weights["trans2"] * X_res_second1D,
]
).T
assert_array_equal(both.fit_transform(X_array), res)
assert_array_equal(both.fit(X_array).transform(X_array), res)
assert len(both.transformers_) == 2
both = ColumnTransformer(
[("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1}
)
assert_array_equal(both.fit_transform(X_array), 0.1 * X_res_both)
assert_array_equal(both.fit(X_array).transform(X_array), 0.1 * X_res_both)
assert len(both.transformers_) == 1
def test_column_transformer_tuple_transformers_parameter():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
transformers = [("trans1", Trans(), [0]), ("trans2", Trans(), [1])]
ct_with_list = ColumnTransformer(transformers)
ct_with_tuple = ColumnTransformer(tuple(transformers))
assert_array_equal(
ct_with_list.fit_transform(X_array), ct_with_tuple.fit_transform(X_array)
)
assert_array_equal(
ct_with_list.fit(X_array).transform(X_array),
ct_with_tuple.fit(X_array).transform(X_array),
)
@pytest.mark.parametrize("constructor_name", ["dataframe", "polars"])
def test_column_transformer_dataframe(constructor_name):
if constructor_name == "dataframe":
dataframe_lib = pytest.importorskip("pandas")
else:
dataframe_lib = pytest.importorskip(constructor_name)
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = _convert_container(
X_array, constructor_name, columns_name=["first", "second"]
)
X_res_first = np.array([0, 1, 2]).reshape(-1, 1)
X_res_both = X_array
cases = [
# String keys: label based
# list
(["first"], X_res_first),
(["first", "second"], X_res_both),
# slice
(slice("first", "second"), X_res_both),
# int keys: positional
# list
([0], X_res_first),
([0, 1], X_res_both),
(np.array([0, 1]), X_res_both),
# slice
(slice(0, 1), X_res_first),
(slice(0, 2), X_res_both),
# boolean mask
(np.array([True, False]), X_res_first),
([True, False], X_res_first),
]
if constructor_name == "dataframe":
# Scalars are only supported for pandas dataframes.
cases.extend(
[
# scalar
(0, X_res_first),
("first", X_res_first),
(
dataframe_lib.Series([True, False], index=["first", "second"]),
X_res_first,
),
]
)
for selection, res in cases:
ct = ColumnTransformer([("trans", Trans(), selection)], remainder="drop")
assert_array_equal(ct.fit_transform(X_df), res)
assert_array_equal(ct.fit(X_df).transform(X_df), res)
# callable that returns any of the allowed specifiers
ct = ColumnTransformer(
[("trans", Trans(), lambda X: selection)], remainder="drop"
)
assert_array_equal(ct.fit_transform(X_df), res)
assert_array_equal(ct.fit(X_df).transform(X_df), res)
ct = ColumnTransformer(
[("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])]
)
assert_array_equal(ct.fit_transform(X_df), X_res_both)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != "remainder"
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
assert_array_equal(ct.fit_transform(X_df), X_res_both)
assert_array_equal(ct.fit(X_df).transform(X_df), X_res_both)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != "remainder"
# test with transformer_weights
transformer_weights = {"trans1": 0.1, "trans2": 10}
both = ColumnTransformer(
[("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])],
transformer_weights=transformer_weights,
)
res = np.vstack(
[
transformer_weights["trans1"] * X_df["first"],
transformer_weights["trans2"] * X_df["second"],
]
).T
assert_array_equal(both.fit_transform(X_df), res)
assert_array_equal(both.fit(X_df).transform(X_df), res)
assert len(both.transformers_) == 2
assert both.transformers_[-1][0] != "remainder"
# test multiple columns
both = ColumnTransformer(
[("trans", Trans(), ["first", "second"])], transformer_weights={"trans": 0.1}
)
assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
assert len(both.transformers_) == 1
assert both.transformers_[-1][0] != "remainder"
both = ColumnTransformer(
[("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1}
)
assert_array_equal(both.fit_transform(X_df), 0.1 * X_res_both)
assert_array_equal(both.fit(X_df).transform(X_df), 0.1 * X_res_both)
assert len(both.transformers_) == 1
assert both.transformers_[-1][0] != "remainder"
# ensure pandas object is passed through
class TransAssert(BaseEstimator):
def __init__(self, expected_type_transform):
self.expected_type_transform = expected_type_transform
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
assert isinstance(X, self.expected_type_transform)
if isinstance(X, dataframe_lib.Series):
X = X.to_frame()
return X
ct = ColumnTransformer(
[
(
"trans",
TransAssert(expected_type_transform=dataframe_lib.DataFrame),
["first", "second"],
)
]
)
ct.fit_transform(X_df)
if constructor_name == "dataframe":
# DataFrame protocol does not have 1d columns, so we only test on Pandas
# dataframes.
ct = ColumnTransformer(
[
(
"trans",
TransAssert(expected_type_transform=dataframe_lib.Series),
"first",
)
],
remainder="drop",
)
ct.fit_transform(X_df)
# Only test on pandas because the dataframe protocol requires string column
# names
# integer column spec + integer column names -> still use positional
X_df2 = X_df.copy()
X_df2.columns = [1, 0]
ct = ColumnTransformer([("trans", Trans(), 0)], remainder="drop")
assert_array_equal(ct.fit_transform(X_df2), X_res_first)
assert_array_equal(ct.fit(X_df2).transform(X_df2), X_res_first)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] == "remainder"
assert ct.transformers_[-1][1] == "drop"
assert_array_equal(ct.transformers_[-1][2], [1])
@pytest.mark.parametrize("pandas", [True, False], ids=["pandas", "numpy"])
@pytest.mark.parametrize(
"column_selection",
[[], np.array([False, False]), [False, False]],
ids=["list", "bool", "bool_int"],
)
@pytest.mark.parametrize("callable_column", [False, True])
def test_column_transformer_empty_columns(pandas, column_selection, callable_column):
# test case that ensures that the column transformer does also work when
# a given transformer doesn't have any columns to work on
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_both = X_array
if pandas:
pd = pytest.importorskip("pandas")
X = pd.DataFrame(X_array, columns=["first", "second"])
else:
X = X_array
if callable_column:
column = lambda X: column_selection
else:
column = column_selection
ct = ColumnTransformer(
[("trans1", Trans(), [0, 1]), ("trans2", TransRaise(), column)]
)
assert_array_equal(ct.fit_transform(X), X_res_both)
assert_array_equal(ct.fit(X).transform(X), X_res_both)
assert len(ct.transformers_) == 2
assert isinstance(ct.transformers_[1][1], TransRaise)
ct = ColumnTransformer(
[("trans1", TransRaise(), column), ("trans2", Trans(), [0, 1])]
)
assert_array_equal(ct.fit_transform(X), X_res_both)
assert_array_equal(ct.fit(X).transform(X), X_res_both)
assert len(ct.transformers_) == 2
assert isinstance(ct.transformers_[0][1], TransRaise)
ct = ColumnTransformer([("trans", TransRaise(), column)], remainder="passthrough")
assert_array_equal(ct.fit_transform(X), X_res_both)
assert_array_equal(ct.fit(X).transform(X), X_res_both)
assert len(ct.transformers_) == 2 # including remainder
assert isinstance(ct.transformers_[0][1], TransRaise)
fixture = np.array([[], [], []])
ct = ColumnTransformer([("trans", TransRaise(), column)], remainder="drop")
assert_array_equal(ct.fit_transform(X), fixture)
assert_array_equal(ct.fit(X).transform(X), fixture)
assert len(ct.transformers_) == 2 # including remainder
assert isinstance(ct.transformers_[0][1], TransRaise)
def test_column_transformer_output_indices():
# Checks for the output_indices_ attribute
X_array = np.arange(6).reshape(3, 2)
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
X_trans = ct.fit_transform(X_array)
assert ct.output_indices_ == {
"trans1": slice(0, 1),
"trans2": slice(1, 2),
"remainder": slice(0, 0),
}
assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]])
assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]])
# test with transformer_weights and multiple columns
ct = ColumnTransformer(
[("trans", Trans(), [0, 1])], transformer_weights={"trans": 0.1}
)
X_trans = ct.fit_transform(X_array)
assert ct.output_indices_ == {"trans": slice(0, 2), "remainder": slice(0, 0)}
assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["trans"]])
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
# test case that ensures that the attribute does also work when
# a given transformer doesn't have any columns to work on
ct = ColumnTransformer([("trans1", Trans(), [0, 1]), ("trans2", TransRaise(), [])])
X_trans = ct.fit_transform(X_array)
assert ct.output_indices_ == {
"trans1": slice(0, 2),
"trans2": slice(0, 0),
"remainder": slice(0, 0),
}
assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["trans1"]])
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["trans2"]])
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
ct = ColumnTransformer([("trans", TransRaise(), [])], remainder="passthrough")
X_trans = ct.fit_transform(X_array)
assert ct.output_indices_ == {"trans": slice(0, 0), "remainder": slice(0, 2)}
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["trans"]])
assert_array_equal(X_trans[:, [0, 1]], X_trans[:, ct.output_indices_["remainder"]])
def test_column_transformer_output_indices_df():
# Checks for the output_indices_ attribute with data frames
pd = pytest.importorskip("pandas")
X_df = pd.DataFrame(np.arange(6).reshape(3, 2), columns=["first", "second"])
ct = ColumnTransformer(
[("trans1", Trans(), ["first"]), ("trans2", Trans(), ["second"])]
)
X_trans = ct.fit_transform(X_df)
assert ct.output_indices_ == {
"trans1": slice(0, 1),
"trans2": slice(1, 2),
"remainder": slice(0, 0),
}
assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]])
assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]])
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", Trans(), [1])])
X_trans = ct.fit_transform(X_df)
assert ct.output_indices_ == {
"trans1": slice(0, 1),
"trans2": slice(1, 2),
"remainder": slice(0, 0),
}
assert_array_equal(X_trans[:, [0]], X_trans[:, ct.output_indices_["trans1"]])
assert_array_equal(X_trans[:, [1]], X_trans[:, ct.output_indices_["trans2"]])
assert_array_equal(X_trans[:, []], X_trans[:, ct.output_indices_["remainder"]])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_column_transformer_sparse_array(csr_container):
X_sparse = csr_container(sparse.eye(3, 2))
# no distinction between 1D and 2D
X_res_first = X_sparse[:, [0]]
X_res_both = X_sparse
for col in [(0,), [0], slice(0, 1)]:
for remainder, res in [("drop", X_res_first), ("passthrough", X_res_both)]:
ct = ColumnTransformer(
[("trans", Trans(), col)], remainder=remainder, sparse_threshold=0.8
)
assert sparse.issparse(ct.fit_transform(X_sparse))
assert_allclose_dense_sparse(ct.fit_transform(X_sparse), res)
assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), res)
for col in [[0, 1], slice(0, 2)]:
ct = ColumnTransformer([("trans", Trans(), col)], sparse_threshold=0.8)
assert sparse.issparse(ct.fit_transform(X_sparse))
assert_allclose_dense_sparse(ct.fit_transform(X_sparse), X_res_both)
assert_allclose_dense_sparse(ct.fit(X_sparse).transform(X_sparse), X_res_both)
def test_column_transformer_list():
X_list = [[1, float("nan"), "a"], [0, 0, "b"]]
expected_result = np.array(
[
[1, float("nan"), 1, 0],
[-1, 0, 0, 1],
]
)
ct = ColumnTransformer(
[
("numerical", StandardScaler(), [0, 1]),
("categorical", OneHotEncoder(), [2]),
]
)
assert_array_equal(ct.fit_transform(X_list), expected_result)
assert_array_equal(ct.fit(X_list).transform(X_list), expected_result)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
@pytest.mark.parametrize("constructor_name", ["array", "pandas", "polars"])
def test_column_transformer_sparse_stacking(csr_container, constructor_name):
X = np.array([[0, 1, 2], [2, 4, 6]]).T
X = _convert_container(X, constructor_name, columns_name=["first", "second"])
col_trans = ColumnTransformer(
[("trans1", Trans(), [0]), ("trans2", SparseMatrixTrans(csr_container), 1)],
sparse_threshold=0.8,
)
col_trans.fit(X)
X_trans = col_trans.transform(X)
assert sparse.issparse(X_trans)
assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1)
assert_array_equal(X_trans.toarray()[:, 1:], np.eye(X_trans.shape[0]))
assert len(col_trans.transformers_) == 2
assert col_trans.transformers_[-1][0] != "remainder"
col_trans = ColumnTransformer(
[("trans1", Trans(), [0]), ("trans2", SparseMatrixTrans(csr_container), 1)],
sparse_threshold=0.1,
)
col_trans.fit(X)
X_trans = col_trans.transform(X)
assert not sparse.issparse(X_trans)
assert X_trans.shape == (X_trans.shape[0], X_trans.shape[0] + 1)
assert_array_equal(X_trans[:, 1:], np.eye(X_trans.shape[0]))
def test_column_transformer_mixed_cols_sparse():
df = np.array([["a", 1, True], ["b", 2, False]], dtype="O")
ct = make_column_transformer(
(OneHotEncoder(), [0]), ("passthrough", [1, 2]), sparse_threshold=1.0
)
# this shouldn't fail, since boolean can be coerced into a numeric
# See: https://github.com/scikit-learn/scikit-learn/issues/11912
X_trans = ct.fit_transform(df)
assert X_trans.format == "csr"
assert_array_equal(X_trans.toarray(), np.array([[1, 0, 1, 1], [0, 1, 2, 0]]))
ct = make_column_transformer(
(OneHotEncoder(), [0]), ("passthrough", [0]), sparse_threshold=1.0
)
with pytest.raises(ValueError, match="For a sparse output, all columns should"):
# this fails since strings `a` and `b` cannot be
# coerced into a numeric.
ct.fit_transform(df)
def test_column_transformer_sparse_threshold():
X_array = np.array([["a", "b"], ["A", "B"]], dtype=object).T
# above data has sparsity of 4 / 8 = 0.5
# apply threshold even if all sparse
col_trans = ColumnTransformer(
[("trans1", OneHotEncoder(), [0]), ("trans2", OneHotEncoder(), [1])],
sparse_threshold=0.2,
)
res = col_trans.fit_transform(X_array)
assert not sparse.issparse(res)
assert not col_trans.sparse_output_
# mixed -> sparsity of (4 + 2) / 8 = 0.75
for thres in [0.75001, 1]:
col_trans = ColumnTransformer(
[
("trans1", OneHotEncoder(sparse_output=True), [0]),
("trans2", OneHotEncoder(sparse_output=False), [1]),
],
sparse_threshold=thres,
)
res = col_trans.fit_transform(X_array)
assert sparse.issparse(res)
assert col_trans.sparse_output_
for thres in [0.75, 0]:
col_trans = ColumnTransformer(
[
("trans1", OneHotEncoder(sparse_output=True), [0]),
("trans2", OneHotEncoder(sparse_output=False), [1]),
],
sparse_threshold=thres,
)
res = col_trans.fit_transform(X_array)
assert not sparse.issparse(res)
assert not col_trans.sparse_output_
# if nothing is sparse -> no sparse
for thres in [0.33, 0, 1]:
col_trans = ColumnTransformer(
[
("trans1", OneHotEncoder(sparse_output=False), [0]),
("trans2", OneHotEncoder(sparse_output=False), [1]),
],
sparse_threshold=thres,
)
res = col_trans.fit_transform(X_array)
assert not sparse.issparse(res)
assert not col_trans.sparse_output_
def test_column_transformer_error_msg_1D():
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
col_trans = ColumnTransformer([("trans", StandardScaler(), 0)])
msg = "1D data passed to a transformer"
with pytest.raises(ValueError, match=msg):
col_trans.fit(X_array)
with pytest.raises(ValueError, match=msg):
col_trans.fit_transform(X_array)
col_trans = ColumnTransformer([("trans", TransRaise(), 0)])
for func in [col_trans.fit, col_trans.fit_transform]:
with pytest.raises(ValueError, match="specific message"):
func(X_array)
def test_2D_transformer_output():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
# if one transformer is dropped, test that name is still correct
ct = ColumnTransformer([("trans1", "drop", 0), ("trans2", TransNo2D(), 1)])
msg = "the 'trans2' transformer should be 2D"
with pytest.raises(ValueError, match=msg):
ct.fit_transform(X_array)
# because fit is also doing transform, this raises already on fit
with pytest.raises(ValueError, match=msg):
ct.fit(X_array)
def test_2D_transformer_output_pandas():
pd = pytest.importorskip("pandas")
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=["col1", "col2"])
# if one transformer is dropped, test that name is still correct
ct = ColumnTransformer([("trans1", TransNo2D(), "col1")])
msg = "the 'trans1' transformer should be 2D"
with pytest.raises(ValueError, match=msg):
ct.fit_transform(X_df)
# because fit is also doing transform, this raises already on fit
with pytest.raises(ValueError, match=msg):
ct.fit(X_df)
@pytest.mark.parametrize("remainder", ["drop", "passthrough"])
def test_column_transformer_invalid_columns(remainder):
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
# general invalid
for col in [1.5, ["string", 1], slice(1, "s"), np.array([1.0])]:
ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
with pytest.raises(ValueError, match="No valid specification"):
ct.fit(X_array)
# invalid for arrays
for col in ["string", ["string", "other"], slice("a", "b")]:
ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
with pytest.raises(ValueError, match="Specifying the columns"):
ct.fit(X_array)
# transformed n_features does not match fitted n_features
col = [0, 1]
ct = ColumnTransformer([("trans", Trans(), col)], remainder=remainder)
ct.fit(X_array)
X_array_more = np.array([[0, 1, 2], [2, 4, 6], [3, 6, 9]]).T
msg = "X has 3 features, but ColumnTransformer is expecting 2 features as input."
with pytest.raises(ValueError, match=msg):
ct.transform(X_array_more)
X_array_fewer = np.array(
[
[0, 1, 2],
]
).T
err_msg = (
"X has 1 features, but ColumnTransformer is expecting 2 features as input."
)
with pytest.raises(ValueError, match=err_msg):
ct.transform(X_array_fewer)
def test_column_transformer_invalid_transformer():
class NoTrans(BaseEstimator):
def fit(self, X, y=None):
return self
def predict(self, X):
return X
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
ct = ColumnTransformer([("trans", NoTrans(), [0])])
msg = "All estimators should implement fit and transform"
with pytest.raises(TypeError, match=msg):
ct.fit(X_array)
def test_make_column_transformer():
scaler = StandardScaler()
norm = Normalizer()
ct = make_column_transformer((scaler, "first"), (norm, ["second"]))
names, transformers, columns = zip(*ct.transformers)
assert names == ("standardscaler", "normalizer")
assert transformers == (scaler, norm)
assert columns == ("first", ["second"])
def test_make_column_transformer_pandas():
pd = pytest.importorskip("pandas")
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_df = pd.DataFrame(X_array, columns=["first", "second"])
norm = Normalizer()
ct1 = ColumnTransformer([("norm", Normalizer(), X_df.columns)])
ct2 = make_column_transformer((norm, X_df.columns))
assert_almost_equal(ct1.fit_transform(X_df), ct2.fit_transform(X_df))
def test_make_column_transformer_kwargs():
scaler = StandardScaler()
norm = Normalizer()
ct = make_column_transformer(
(scaler, "first"),
(norm, ["second"]),
n_jobs=3,
remainder="drop",
sparse_threshold=0.5,
)
assert (
ct.transformers
== make_column_transformer((scaler, "first"), (norm, ["second"])).transformers
)
assert ct.n_jobs == 3
assert ct.remainder == "drop"
assert ct.sparse_threshold == 0.5
# invalid keyword parameters should raise an error message
msg = re.escape(
"make_column_transformer() got an unexpected "
"keyword argument 'transformer_weights'"
)
with pytest.raises(TypeError, match=msg):
make_column_transformer(
(scaler, "first"),
(norm, ["second"]),
transformer_weights={"pca": 10, "Transf": 1},
)
def test_make_column_transformer_remainder_transformer():
scaler = StandardScaler()
norm = Normalizer()
remainder = StandardScaler()
ct = make_column_transformer(
(scaler, "first"), (norm, ["second"]), remainder=remainder
)
assert ct.remainder == remainder
def test_column_transformer_get_set_params():
ct = ColumnTransformer(
[("trans1", StandardScaler(), [0]), ("trans2", StandardScaler(), [1])]
)
exp = {
"n_jobs": None,
"remainder": "drop",
"sparse_threshold": 0.3,
"trans1": ct.transformers[0][1],
"trans1__copy": True,
"trans1__with_mean": True,
"trans1__with_std": True,
"trans2": ct.transformers[1][1],
"trans2__copy": True,
"trans2__with_mean": True,
"trans2__with_std": True,
"transformers": ct.transformers,
"transformer_weights": None,
"verbose_feature_names_out": True,
"verbose": False,
"force_int_remainder_cols": "deprecated",
}
assert ct.get_params() == exp
ct.set_params(trans1__with_mean=False)
assert not ct.get_params()["trans1__with_mean"]
ct.set_params(trans1="passthrough")
exp = {
"n_jobs": None,
"remainder": "drop",
"sparse_threshold": 0.3,
"trans1": "passthrough",
"trans2": ct.transformers[1][1],
"trans2__copy": True,
"trans2__with_mean": True,
"trans2__with_std": True,
"transformers": ct.transformers,
"transformer_weights": None,
"verbose_feature_names_out": True,
"verbose": False,
"force_int_remainder_cols": "deprecated",
}
assert ct.get_params() == exp
def test_column_transformer_named_estimators():
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
ct = ColumnTransformer(
[
("trans1", StandardScaler(), [0]),
("trans2", StandardScaler(with_std=False), [1]),
]
)
assert not hasattr(ct, "transformers_")
ct.fit(X_array)
assert hasattr(ct, "transformers_")
assert isinstance(ct.named_transformers_["trans1"], StandardScaler)
assert isinstance(ct.named_transformers_.trans1, StandardScaler)
assert isinstance(ct.named_transformers_["trans2"], StandardScaler)
assert isinstance(ct.named_transformers_.trans2, StandardScaler)
assert not ct.named_transformers_.trans2.with_std
# check it are fitted transformers
assert ct.named_transformers_.trans1.mean_ == 1.0
def test_column_transformer_cloning():
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
ct = ColumnTransformer([("trans", StandardScaler(), [0])])
ct.fit(X_array)
assert not hasattr(ct.transformers[0][1], "mean_")
assert hasattr(ct.transformers_[0][1], "mean_")
ct = ColumnTransformer([("trans", StandardScaler(), [0])])
ct.fit_transform(X_array)
assert not hasattr(ct.transformers[0][1], "mean_")
assert hasattr(ct.transformers_[0][1], "mean_")
def test_column_transformer_get_feature_names():
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
ct = ColumnTransformer([("trans", Trans(), [0, 1])])
# raise correct error when not fitted
with pytest.raises(NotFittedError):
ct.get_feature_names_out()
# raise correct error when no feature names are available
ct.fit(X_array)
msg = re.escape(
"Transformer trans (type Trans) does not provide get_feature_names_out"
)
with pytest.raises(AttributeError, match=msg):
ct.get_feature_names_out()
def test_column_transformer_special_strings():
# one 'drop' -> ignore
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", "drop", [1])])
exp = np.array([[0.0], [1.0], [2.0]])
assert_array_equal(ct.fit_transform(X_array), exp)
assert_array_equal(ct.fit(X_array).transform(X_array), exp)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != "remainder"
# all 'drop' -> return shape 0 array
ct = ColumnTransformer([("trans1", "drop", [0]), ("trans2", "drop", [1])])
assert_array_equal(ct.fit(X_array).transform(X_array).shape, (3, 0))
assert_array_equal(ct.fit_transform(X_array).shape, (3, 0))
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != "remainder"
# 'passthrough'
X_array = np.array([[0.0, 1.0, 2.0], [2.0, 4.0, 6.0]]).T
ct = ColumnTransformer([("trans1", Trans(), [0]), ("trans2", "passthrough", [1])])
exp = X_array
assert_array_equal(ct.fit_transform(X_array), exp)
assert_array_equal(ct.fit(X_array).transform(X_array), exp)
assert len(ct.transformers_) == 2
assert ct.transformers_[-1][0] != "remainder"
def test_column_transformer_remainder():
X_array = np.array([[0, 1, 2], [2, 4, 6]]).T
X_res_first = np.array([0, 1, 2]).reshape(-1, 1)
X_res_second = np.array([2, 4, 6]).reshape(-1, 1)
X_res_both = X_array
# default drop
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/compose/tests/__init__.py | sklearn/compose/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/inspection/_pd_utils.py | sklearn/inspection/_pd_utils.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
def _check_feature_names(X, feature_names=None):
"""Check feature names.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
feature_names : None or array-like of shape (n_names,), dtype=str
Feature names to check or `None`.
Returns
-------
feature_names : list of str
Feature names validated. If `feature_names` is `None`, then a list of
feature names is provided, i.e. the column names of a pandas dataframe
or a generic list of feature names (e.g. `["x0", "x1", ...]`) for a
NumPy array.
"""
if feature_names is None:
if hasattr(X, "columns") and hasattr(X.columns, "tolist"):
# get the column names for a pandas dataframe
feature_names = X.columns.tolist()
else:
# define a list of numbered indices for a numpy array
feature_names = [f"x{i}" for i in range(X.shape[1])]
elif hasattr(feature_names, "tolist"):
# convert numpy array or pandas index to a list
feature_names = feature_names.tolist()
if len(set(feature_names)) != len(feature_names):
raise ValueError("feature_names should not contain duplicates.")
return feature_names
def _get_feature_index(fx, feature_names=None):
"""Get feature index.
Parameters
----------
fx : int or str
Feature index or name.
feature_names : list of str, default=None
All feature names from which to search the indices.
Returns
-------
idx : int
Feature index.
"""
if isinstance(fx, str):
if feature_names is None:
raise ValueError(
f"Cannot plot partial dependence for feature {fx!r} since "
"the list of feature names was not provided, neither as "
"column names of a pandas data-frame nor via the feature_names "
"parameter."
)
try:
return feature_names.index(fx)
except ValueError as e:
raise ValueError(f"Feature {fx!r} not in feature_names") from e
return fx
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/inspection/_partial_dependence.py | sklearn/inspection/_partial_dependence.py | """Partial dependence plots for regression and classification models."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from collections.abc import Iterable
import numpy as np
from scipy import sparse
from scipy.stats.mstats import mquantiles
from sklearn.base import is_classifier, is_regressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble._gb import BaseGradientBoosting
from sklearn.ensemble._hist_gradient_boosting.gradient_boosting import (
BaseHistGradientBoosting,
)
from sklearn.inspection._pd_utils import _check_feature_names, _get_feature_index
from sklearn.tree import DecisionTreeRegressor
from sklearn.utils import Bunch, _safe_indexing, check_array
from sklearn.utils._indexing import (
_determine_key_type,
_get_column_indices,
_safe_assign,
)
from sklearn.utils._optional_dependencies import check_matplotlib_support # noqa: F401
from sklearn.utils._param_validation import (
HasMethods,
Integral,
Interval,
StrOptions,
validate_params,
)
from sklearn.utils._response import _get_response_values
from sklearn.utils.extmath import cartesian
from sklearn.utils.validation import _check_sample_weight, check_is_fitted
__all__ = [
"partial_dependence",
]
def _grid_from_X(X, percentiles, is_categorical, grid_resolution, custom_values):
"""Generate a grid of points based on the percentiles of X.
The grid is a cartesian product between the columns of ``values``. The
ith column of ``values`` consists in ``grid_resolution`` equally-spaced
points between the percentiles of the jth column of X.
If ``grid_resolution`` is bigger than the number of unique values in the
j-th column of X or if the feature is a categorical feature (by inspecting
`is_categorical`) , then those unique values will be used instead.
Parameters
----------
X : array-like of shape (n_samples, n_target_features)
The data.
percentiles : tuple of float
The percentiles which are used to construct the extreme values of
the grid. Must be in [0, 1].
is_categorical : list of bool
For each feature, tells whether it is categorical or not. If a feature
is categorical, then the values used will be the unique ones
(i.e. categories) instead of the percentiles.
grid_resolution : int
The number of equally spaced points to be placed on the grid for each
feature.
custom_values: dict
Mapping from column index of X to an array-like of values where
the partial dependence should be calculated for that feature
Returns
-------
grid : ndarray of shape (n_points, n_target_features)
A value for each feature at each point in the grid. ``n_points`` is
always ``<= grid_resolution ** X.shape[1]``.
values : list of 1d ndarrays
The values with which the grid has been created. The size of each
array ``values[j]`` is either ``grid_resolution``, the number of
unique values in ``X[:, j]``, if j is not in ``custom_range``.
If j is in ``custom_range``, then it is the length of ``custom_range[j]``.
"""
if not isinstance(percentiles, Iterable) or len(percentiles) != 2:
raise ValueError("'percentiles' must be a sequence of 2 elements.")
if not all(0 <= x <= 1 for x in percentiles):
raise ValueError("'percentiles' values must be in [0, 1].")
if percentiles[0] >= percentiles[1]:
raise ValueError("percentiles[0] must be strictly less than percentiles[1].")
if grid_resolution <= 1:
raise ValueError("'grid_resolution' must be strictly greater than 1.")
def _convert_custom_values(values):
# Convert custom types such that object types are always used for string arrays
dtype = object if any(isinstance(v, str) for v in values) else None
return np.asarray(values, dtype=dtype)
custom_values = {k: _convert_custom_values(v) for k, v in custom_values.items()}
if any(v.ndim != 1 for v in custom_values.values()):
error_string = ", ".join(
f"Feature {k}: {v.ndim} dimensions"
for k, v in custom_values.items()
if v.ndim != 1
)
raise ValueError(
"The custom grid for some features is not a one-dimensional array. "
f"{error_string}"
)
values = []
# TODO: we should handle missing values (i.e. `np.nan`) specifically and store them
# in a different Bunch attribute.
for feature, is_cat in enumerate(is_categorical):
if feature in custom_values:
# Use values in the custom range
axis = custom_values[feature]
else:
try:
uniques = np.unique(_safe_indexing(X, feature, axis=1))
except TypeError as exc:
# `np.unique` will fail in the presence of `np.nan` and `str` categories
# due to sorting. Temporary, we reraise an error explaining the problem.
raise ValueError(
f"The column #{feature} contains mixed data types. Finding unique "
"categories fail due to sorting. It usually means that the column "
"contains `np.nan` values together with `str` categories. Such use "
"case is not yet supported in scikit-learn."
) from exc
if is_cat or uniques.shape[0] < grid_resolution:
# Use the unique values either because:
# - feature has low resolution use unique values
# - feature is categorical
axis = uniques
else:
# create axis based on percentiles and grid resolution
emp_percentiles = mquantiles(
_safe_indexing(X, feature, axis=1), prob=percentiles, axis=0
)
if np.allclose(emp_percentiles[0], emp_percentiles[1]):
raise ValueError(
"percentiles are too close to each other, "
"unable to build the grid. Please choose percentiles "
"that are further apart."
)
axis = np.linspace(
emp_percentiles[0],
emp_percentiles[1],
num=grid_resolution,
endpoint=True,
)
values.append(axis)
return cartesian(values), values
def _partial_dependence_recursion(est, grid, features):
"""Calculate partial dependence via the recursion method.
The recursion method is in particular enabled for tree-based estimators.
For each `grid` value, a weighted tree traversal is performed: if a split node
involves an input feature of interest, the corresponding left or right branch
is followed; otherwise both branches are followed, each branch being weighted
by the fraction of training samples that entered that branch. Finally, the
partial dependence is given by a weighted average of all the visited leaves
values.
This method is more efficient in terms of speed than the `'brute'` method
(:func:`~sklearn.inspection._partial_dependence._partial_dependence_brute`).
However, here, the partial dependence computation is done explicitly with the
`X` used during training of `est`.
Parameters
----------
est : BaseEstimator
A fitted estimator object implementing :term:`predict` or
:term:`decision_function`. Multioutput-multiclass classifiers are not
supported. Note that `'recursion'` is only supported for some tree-based
estimators (namely
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
:class:`~sklearn.tree.DecisionTreeRegressor`,
:class:`~sklearn.ensemble.RandomForestRegressor`,
).
grid : array-like of shape (n_points, n_target_features)
The grid of feature values for which the partial dependence is calculated.
Note that `n_points` is the number of points in the grid and `n_target_features`
is the number of features you are doing partial dependence at.
features : array-like of {int, str}
The feature (e.g. `[0]`) or pair of interacting features
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
Returns
-------
averaged_predictions : array-like of shape (n_targets, n_points)
The averaged predictions for the given `grid` of features values.
Note that `n_targets` is the number of targets (e.g. 1 for binary
classification, `n_tasks` for multi-output regression, and `n_classes` for
multiclass classification) and `n_points` is the number of points in the `grid`.
"""
averaged_predictions = est._compute_partial_dependence_recursion(grid, features)
if averaged_predictions.ndim == 1:
# reshape to (1, n_points) for consistency with
# _partial_dependence_brute
averaged_predictions = averaged_predictions.reshape(1, -1)
return averaged_predictions
def _partial_dependence_brute(
est, grid, features, X, response_method, sample_weight=None
):
"""Calculate partial dependence via the brute force method.
The brute method explicitly averages the predictions of an estimator over a
grid of feature values.
For each `grid` value, all the samples from `X` have their variables of
interest replaced by that specific `grid` value. The predictions are then made
and averaged across the samples.
This method is slower than the `'recursion'`
(:func:`~sklearn.inspection._partial_dependence._partial_dependence_recursion`)
version for estimators with this second option. However, with the `'brute'`
force method, the average will be done with the given `X` and not the `X`
used during training, as it is done in the `'recursion'` version. Therefore
the average can always accept `sample_weight` (even when the estimator was
fitted without).
Parameters
----------
est : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
grid : array-like of shape (n_points, n_target_features)
The grid of feature values for which the partial dependence is calculated.
Note that `n_points` is the number of points in the grid and `n_target_features`
is the number of features you are doing partial dependence at.
features : array-like of {int, str}
The feature (e.g. `[0]`) or pair of interacting features
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
X : array-like of shape (n_samples, n_features)
`X` is used to generate values for the complement features. That is, for
each value in `grid`, the method will average the prediction of each
sample from `X` having that grid value for `features`.
response_method : {'auto', 'predict_proba', 'decision_function'}, \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights are used to calculate weighted means when averaging the
model output. If `None`, then samples are equally weighted. Note that
`sample_weight` does not change the individual predictions.
Returns
-------
averaged_predictions : array-like of shape (n_targets, n_points)
The averaged predictions for the given `grid` of features values.
Note that `n_targets` is the number of targets (e.g. 1 for binary
classification, `n_tasks` for multi-output regression, and `n_classes` for
multiclass classification) and `n_points` is the number of points in the `grid`.
predictions : array-like
The predictions for the given `grid` of features values over the samples
from `X`. For non-multioutput regression and binary classification the
shape is `(n_instances, n_points)` and for multi-output regression and
multiclass classification the shape is `(n_targets, n_instances, n_points)`,
where `n_targets` is the number of targets (`n_tasks` for multi-output
regression, and `n_classes` for multiclass classification), `n_instances`
is the number of instances in `X`, and `n_points` is the number of points
in the `grid`.
"""
predictions = []
averaged_predictions = []
if response_method == "auto":
response_method = (
"predict" if is_regressor(est) else ["predict_proba", "decision_function"]
)
X_eval = X.copy()
for new_values in grid:
for i, variable in enumerate(features):
_safe_assign(X_eval, new_values[i], column_indexer=variable)
# Note: predictions is of shape
# (n_points,) for non-multioutput regressors
# (n_points, n_tasks) for multioutput regressors
# (n_points, 1) for the regressors in cross_decomposition (I think)
# (n_points, 1) for binary classification (positive class already selected)
# (n_points, n_classes) for multiclass classification
pred, _ = _get_response_values(est, X_eval, response_method=response_method)
predictions.append(pred)
# average over samples
averaged_predictions.append(np.average(pred, axis=0, weights=sample_weight))
n_samples = X.shape[0]
# reshape to (n_targets, n_instances, n_points) where n_targets is:
# - 1 for non-multioutput regression and binary classification (shape is
# already correct in those cases)
# - n_tasks for multi-output regression
# - n_classes for multiclass classification.
predictions = np.array(predictions).T
if is_regressor(est) and predictions.ndim == 2:
# non-multioutput regression, shape is (n_instances, n_points,)
predictions = predictions.reshape(n_samples, -1)
elif is_classifier(est) and predictions.shape[0] == 2:
# Binary classification, shape is (2, n_instances, n_points).
# we output the effect of **positive** class
predictions = predictions[1]
predictions = predictions.reshape(n_samples, -1)
# reshape averaged_predictions to (n_targets, n_points) where n_targets is:
# - 1 for non-multioutput regression and binary classification (shape is
# already correct in those cases)
# - n_tasks for multi-output regression
# - n_classes for multiclass classification.
averaged_predictions = np.array(averaged_predictions).T
if averaged_predictions.ndim == 1:
# reshape to (1, n_points) for consistency with
# _partial_dependence_recursion
averaged_predictions = averaged_predictions.reshape(1, -1)
return averaged_predictions, predictions
@validate_params(
{
"estimator": [
HasMethods(["fit", "predict"]),
HasMethods(["fit", "predict_proba"]),
HasMethods(["fit", "decision_function"]),
],
"X": ["array-like", "sparse matrix"],
"features": ["array-like", Integral, str],
"sample_weight": ["array-like", None],
"categorical_features": ["array-like", None],
"feature_names": ["array-like", None],
"response_method": [StrOptions({"auto", "predict_proba", "decision_function"})],
"percentiles": [tuple],
"grid_resolution": [Interval(Integral, 1, None, closed="left")],
"method": [StrOptions({"auto", "recursion", "brute"})],
"kind": [StrOptions({"average", "individual", "both"})],
"custom_values": [dict, None],
},
prefer_skip_nested_validation=True,
)
def partial_dependence(
estimator,
X,
features,
*,
sample_weight=None,
categorical_features=None,
feature_names=None,
response_method="auto",
percentiles=(0.05, 0.95),
grid_resolution=100,
custom_values=None,
method="auto",
kind="average",
):
"""Partial dependence of ``features``.
Partial dependence of a feature (or a set of features) corresponds to
the average response of an estimator for each possible value of the
feature.
Read more in
:ref:`sphx_glr_auto_examples_inspection_plot_partial_dependence.py`
and the :ref:`User Guide <partial_dependence>`.
.. warning::
For :class:`~sklearn.ensemble.GradientBoostingClassifier` and
:class:`~sklearn.ensemble.GradientBoostingRegressor`, the
`'recursion'` method (used by default) will not account for the `init`
predictor of the boosting process. In practice, this will produce
the same values as `'brute'` up to a constant offset in the target
response, provided that `init` is a constant estimator (which is the
default). However, if `init` is not a constant estimator, the
partial dependence values are incorrect for `'recursion'` because the
offset will be sample-dependent. It is preferable to use the `'brute'`
method. Note that this only applies to
:class:`~sklearn.ensemble.GradientBoostingClassifier` and
:class:`~sklearn.ensemble.GradientBoostingRegressor`, not to
:class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
Parameters
----------
estimator : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
X : {array-like, sparse matrix or dataframe} of shape (n_samples, n_features)
``X`` is used to generate a grid of values for the target
``features`` (where the partial dependence will be evaluated), and
also to generate values for the complement features when the
`method` is 'brute'.
features : array-like of {int, str, bool} or int or str
The feature (e.g. `[0]`) or pair of interacting features
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights are used to calculate weighted means when averaging the
model output. If `None`, then samples are equally weighted. If
`sample_weight` is not `None`, then `method` will be set to `'brute'`.
Note that `sample_weight` is ignored for `kind='individual'`.
.. versionadded:: 1.3
categorical_features : array-like of shape (n_features,) or shape \
(n_categorical_features,), dtype={bool, int, str}, default=None
Indicates the categorical features.
- `None`: no feature will be considered categorical;
- boolean array-like: boolean mask of shape `(n_features,)`
indicating which features are categorical. Thus, this array has
the same shape has `X.shape[1]`;
- integer or string array-like: integer indices or strings
indicating categorical features.
.. versionadded:: 1.2
feature_names : array-like of shape (n_features,), dtype=str, default=None
Name of each feature; `feature_names[i]` holds the name of the feature
with index `i`.
By default, the name of the feature corresponds to their numerical
index for NumPy array and their column name for pandas dataframe.
.. versionadded:: 1.2
response_method : {'auto', 'predict_proba', 'decision_function'}, \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist. If
``method`` is 'recursion', the response is always the output of
:term:`decision_function`.
percentiles : tuple of float, default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the grid. Must be in [0, 1].
This parameter is overridden by `custom_values` if that parameter is set.
grid_resolution : int, default=100
The number of equally spaced points on the grid, for each target
feature.
This parameter is overridden by `custom_values` if that parameter is set.
custom_values : dict
A dictionary mapping the index of an element of `features` to an array
of values where the partial dependence should be calculated
for that feature. Setting a range of values for a feature overrides
`grid_resolution` and `percentiles`.
See :ref:`how to use partial_dependence
<plt_partial_dependence_custom_values>` for an example of how this parameter can
be used.
.. versionadded:: 1.7
method : {'auto', 'recursion', 'brute'}, default='auto'
The method used to calculate the averaged predictions:
- `'recursion'` is only supported for some tree-based estimators
(namely
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
:class:`~sklearn.tree.DecisionTreeRegressor`,
:class:`~sklearn.ensemble.RandomForestRegressor`,
) when `kind='average'`.
This is more efficient in terms of speed.
With this method, the target response of a
classifier is always the decision function, not the predicted
probabilities. Since the `'recursion'` method implicitly computes
the average of the Individual Conditional Expectation (ICE) by
design, it is not compatible with ICE and thus `kind` must be
`'average'`.
- `'brute'` is supported for any estimator, but is more
computationally intensive.
- `'auto'`: the `'recursion'` is used for estimators that support it,
and `'brute'` is used otherwise. If `sample_weight` is not `None`,
then `'brute'` is used regardless of the estimator.
Please see :ref:`this note <pdp_method_differences>` for
differences between the `'brute'` and `'recursion'` method.
kind : {'average', 'individual', 'both'}, default='average'
Whether to return the partial dependence averaged across all the
samples in the dataset or one value per sample or both.
See Returns below.
Note that the fast `method='recursion'` option is only available for
`kind='average'` and `sample_weights=None`. Computing individual
dependencies and doing weighted averages requires using the slower
`method='brute'`.
.. versionadded:: 0.24
Returns
-------
predictions : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
individual : ndarray of shape (n_outputs, n_instances, \
len(values[0]), len(values[1]), ...)
The predictions for all the points in the grid for all
samples in X. This is also known as Individual
Conditional Expectation (ICE).
Only available when `kind='individual'` or `kind='both'`.
average : ndarray of shape (n_outputs, len(values[0]), \
len(values[1]), ...)
The predictions for all the points in the grid, averaged
over all samples in X (or over the training data if
`method` is 'recursion').
Only available when `kind='average'` or `kind='both'`.
grid_values : seq of 1d ndarrays
The values with which the grid has been created. The generated
grid is a cartesian product of the arrays in `grid_values` where
`len(grid_values) == len(features)`. The size of each array
`grid_values[j]` is either `grid_resolution`, or the number of
unique values in `X[:, j]`, whichever is smaller.
.. versionadded:: 1.3
`n_outputs` corresponds to the number of classes in a multi-class
setting, or to the number of tasks for multi-output regression.
For classical regression and binary classification `n_outputs==1`.
`n_values_feature_j` corresponds to the size `grid_values[j]`.
See Also
--------
PartialDependenceDisplay.from_estimator : Plot Partial Dependence.
PartialDependenceDisplay : Partial Dependence visualization.
Examples
--------
>>> X = [[0, 0, 2], [1, 0, 0]]
>>> y = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(X, y)
>>> partial_dependence(gb, features=[0], X=X, percentiles=(0, 1),
... grid_resolution=2) # doctest: +SKIP
(array([[-4.52, 4.52]]), [array([ 0., 1.])])
"""
check_is_fitted(estimator)
if not (is_classifier(estimator) or is_regressor(estimator)):
raise ValueError("'estimator' must be a fitted regressor or classifier.")
if is_classifier(estimator) and isinstance(estimator.classes_[0], np.ndarray):
raise ValueError("Multiclass-multioutput estimators are not supported")
# Use check_array only on lists and other non-array-likes / sparse. Do not
# convert DataFrame into a NumPy array.
if not (hasattr(X, "__array__") or sparse.issparse(X)):
X = check_array(X, ensure_all_finite="allow-nan", dtype=object)
if is_regressor(estimator) and response_method != "auto":
raise ValueError(
"The response_method parameter is ignored for regressors and "
"must be 'auto'."
)
if kind != "average":
if method == "recursion":
raise ValueError(
"The 'recursion' method only applies when 'kind' is set to 'average'"
)
method = "brute"
if method == "recursion" and sample_weight is not None:
raise ValueError(
"The 'recursion' method can only be applied when sample_weight is None."
)
if method == "auto":
if sample_weight is not None:
method = "brute"
elif isinstance(estimator, BaseGradientBoosting) and estimator.init is None:
method = "recursion"
elif isinstance(
estimator,
(BaseHistGradientBoosting, DecisionTreeRegressor, RandomForestRegressor),
):
method = "recursion"
else:
method = "brute"
if method == "recursion":
if not isinstance(
estimator,
(
BaseGradientBoosting,
BaseHistGradientBoosting,
DecisionTreeRegressor,
RandomForestRegressor,
),
):
supported_classes_recursion = (
"GradientBoostingClassifier",
"GradientBoostingRegressor",
"HistGradientBoostingClassifier",
"HistGradientBoostingRegressor",
"HistGradientBoostingRegressor",
"DecisionTreeRegressor",
"RandomForestRegressor",
)
raise ValueError(
"Only the following estimators support the 'recursion' "
"method: {}. Try using method='brute'.".format(
", ".join(supported_classes_recursion)
)
)
if response_method == "auto":
response_method = "decision_function"
if response_method != "decision_function":
raise ValueError(
"With the 'recursion' method, the response_method must be "
"'decision_function'. Got {}.".format(response_method)
)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if _determine_key_type(features, accept_slice=False) == "int":
# _get_column_indices() supports negative indexing. Here, we limit
# the indexing to be positive. The upper bound will be checked
# by _get_column_indices()
if np.any(np.less(features, 0)):
raise ValueError("all features must be in [0, {}]".format(X.shape[1] - 1))
features_indices = np.asarray(
_get_column_indices(X, features), dtype=np.intp, order="C"
).ravel()
feature_names = _check_feature_names(X, feature_names)
n_features = X.shape[1]
if categorical_features is None:
is_categorical = [False] * len(features_indices)
else:
categorical_features = np.asarray(categorical_features)
if categorical_features.size == 0:
raise ValueError(
"Passing an empty list (`[]`) to `categorical_features` is not "
"supported. Use `None` instead to indicate that there are no "
"categorical features."
)
if categorical_features.dtype.kind == "b":
# categorical features provided as a list of boolean
if categorical_features.size != n_features:
raise ValueError(
"When `categorical_features` is a boolean array-like, "
"the array should be of shape (n_features,). Got "
f"{categorical_features.size} elements while `X` contains "
f"{n_features} features."
)
is_categorical = [categorical_features[idx] for idx in features_indices]
elif categorical_features.dtype.kind in ("i", "O", "U"):
# categorical features provided as a list of indices or feature names
categorical_features_idx = [
_get_feature_index(cat, feature_names=feature_names)
for cat in categorical_features
]
is_categorical = [
idx in categorical_features_idx for idx in features_indices
]
else:
raise ValueError(
"Expected `categorical_features` to be an array-like of boolean,"
f" integer, or string. Got {categorical_features.dtype} instead."
)
custom_values = custom_values or {}
if isinstance(features, (str, int)):
features = [features]
for feature_idx, feature, is_cat in zip(features_indices, features, is_categorical):
if is_cat:
continue
if _safe_indexing(X, feature_idx, axis=1).dtype.kind in "iu":
# TODO(1.9): raise a ValueError instead.
warnings.warn(
f"The column {feature!r} contains integer data. Partial "
"dependence plots are not supported for integer data: this "
"can lead to implicit rounding with NumPy arrays or even errors "
"with newer pandas versions. Please convert numerical features"
"to floating point dtypes ahead of time to avoid problems. "
"This will raise ValueError in scikit-learn 1.9.",
FutureWarning,
)
# Do not warn again for other features to avoid spamming the caller.
break
X_subset = _safe_indexing(X, features_indices, axis=1)
custom_values_for_X_subset = {
index: custom_values.get(feature)
for index, feature in enumerate(features)
if feature in custom_values
}
grid, values = _grid_from_X(
X_subset,
percentiles,
is_categorical,
grid_resolution,
custom_values_for_X_subset,
)
if method == "brute":
averaged_predictions, predictions = _partial_dependence_brute(
estimator, grid, features_indices, X, response_method, sample_weight
)
# reshape predictions to
# (n_outputs, n_instances, n_values_feature_0, n_values_feature_1, ...)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/inspection/_permutation_importance.py | sklearn/inspection/_permutation_importance.py | """Permutation importance for estimators."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numbers
import numpy as np
from sklearn.ensemble._bagging import _generate_indices
from sklearn.metrics import check_scoring, get_scorer_names
from sklearn.model_selection._validation import _aggregate_score_dicts
from sklearn.utils import Bunch, _safe_indexing, check_array, check_random_state
from sklearn.utils._param_validation import (
HasMethods,
Integral,
Interval,
RealNotInt,
StrOptions,
validate_params,
)
from sklearn.utils.parallel import Parallel, delayed
def _weights_scorer(scorer, estimator, X, y, sample_weight):
if sample_weight is not None:
return scorer(estimator, X, y, sample_weight=sample_weight)
return scorer(estimator, X, y)
def _calculate_permutation_scores(
estimator,
X,
y,
sample_weight,
col_idx,
random_state,
n_repeats,
scorer,
max_samples,
):
"""Calculate score when `col_idx` is permuted."""
random_state = check_random_state(random_state)
# Work on a copy of X to ensure thread-safety in case of threading based
# parallelism. Furthermore, making a copy is also useful when the joblib
# backend is 'loky' (default) or the old 'multiprocessing': in those cases,
# if X is large it will be automatically be backed by a readonly memory map
# (memmap). X.copy() on the other hand is always guaranteed to return a
# writable data-structure whose columns can be shuffled inplace.
if max_samples < X.shape[0]:
row_indices = _generate_indices(
random_state=random_state,
bootstrap=False,
n_population=X.shape[0],
n_samples=max_samples,
)
X_permuted = _safe_indexing(X, row_indices, axis=0)
y = _safe_indexing(y, row_indices, axis=0)
if sample_weight is not None:
sample_weight = _safe_indexing(sample_weight, row_indices, axis=0)
else:
X_permuted = X.copy()
scores = []
shuffling_idx = np.arange(X_permuted.shape[0])
for _ in range(n_repeats):
random_state.shuffle(shuffling_idx)
if hasattr(X_permuted, "iloc"):
col = X_permuted.iloc[shuffling_idx, col_idx]
col.index = X_permuted.index
X_permuted[X_permuted.columns[col_idx]] = col
else:
X_permuted[:, col_idx] = X_permuted[shuffling_idx, col_idx]
scores.append(_weights_scorer(scorer, estimator, X_permuted, y, sample_weight))
if isinstance(scores[0], dict):
scores = _aggregate_score_dicts(scores)
else:
scores = np.array(scores)
return scores
def _create_importances_bunch(baseline_score, permuted_score):
"""Compute the importances as the decrease in score.
Parameters
----------
baseline_score : ndarray of shape (n_features,)
The baseline score without permutation.
permuted_score : ndarray of shape (n_features, n_repeats)
The permuted scores for the `n` repetitions.
Returns
-------
importances : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
importances_mean : ndarray, shape (n_features, )
Mean of feature importance over `n_repeats`.
importances_std : ndarray, shape (n_features, )
Standard deviation over `n_repeats`.
importances : ndarray, shape (n_features, n_repeats)
Raw permutation importance scores.
"""
importances = baseline_score - permuted_score
return Bunch(
importances_mean=np.mean(importances, axis=1),
importances_std=np.std(importances, axis=1),
importances=importances,
)
@validate_params(
{
"estimator": [HasMethods(["fit"])],
"X": ["array-like"],
"y": ["array-like", None],
"scoring": [
StrOptions(set(get_scorer_names())),
callable,
list,
tuple,
dict,
None,
],
"n_repeats": [Interval(Integral, 1, None, closed="left")],
"n_jobs": [Integral, None],
"random_state": ["random_state"],
"sample_weight": ["array-like", None],
"max_samples": [
Interval(Integral, 1, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="right"),
],
},
prefer_skip_nested_validation=True,
)
def permutation_importance(
estimator,
X,
y,
*,
scoring=None,
n_repeats=5,
n_jobs=None,
random_state=None,
sample_weight=None,
max_samples=1.0,
):
"""Permutation importance for feature evaluation [BRE]_.
The :term:`estimator` is required to be a fitted estimator. `X` can be the
data set used to train the estimator or a hold-out set. The permutation
importance of a feature is calculated as follows. First, a baseline metric,
defined by :term:`scoring`, is evaluated on a (potentially different)
dataset defined by the `X`. Next, a feature column from the validation set
is permuted and the metric is evaluated again. The permutation importance
is defined to be the difference between the baseline metric and metric from
permutating the feature column.
Read more in the :ref:`User Guide <permutation_importance>`.
Parameters
----------
estimator : object
An estimator that has already been :term:`fitted` and is compatible
with :term:`scorer`.
X : ndarray or DataFrame, shape (n_samples, n_features)
Data on which permutation importance will be computed.
y : array-like or None, shape (n_samples, ) or (n_samples, n_classes)
Targets for supervised or `None` for unsupervised.
scoring : str, callable, list, tuple, or dict, default=None
Scorer to use.
If `scoring` represents a single score, one can use:
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
If `scoring` represents multiple scores, one can use:
- a list or tuple of unique strings;
- a callable returning a dictionary where the keys are the metric
names and the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
Passing multiple scores to `scoring` is more efficient than calling
`permutation_importance` for each of the scores as it reuses
predictions to avoid redundant computation.
n_repeats : int, default=5
Number of times to permute a feature.
n_jobs : int or None, default=None
Number of jobs to run in parallel. The computation is done by computing
permutation score for each columns and parallelized over the columns.
`None` means 1 unless in a :obj:`joblib.parallel_backend` context.
`-1` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Pseudo-random number generator to control the permutations of each
feature.
Pass an int to get reproducible results across function calls.
See :term:`Glossary <random_state>`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights used in scoring.
.. versionadded:: 0.24
max_samples : int or float, default=1.0
The number of samples to draw from X to compute feature importance
in each repeat (without replacement).
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
- If `max_samples` is equal to `1.0` or `X.shape[0]`, all samples
will be used.
While using this option may provide less accurate importance estimates,
it keeps the method tractable when evaluating feature importance on
large datasets. In combination with `n_repeats`, this allows to control
the computational speed vs statistical accuracy trade-off of this method.
.. versionadded:: 1.0
Returns
-------
result : :class:`~sklearn.utils.Bunch` or dict of such instances
Dictionary-like object, with the following attributes.
importances_mean : ndarray of shape (n_features, )
Mean of feature importance over `n_repeats`.
importances_std : ndarray of shape (n_features, )
Standard deviation over `n_repeats`.
importances : ndarray of shape (n_features, n_repeats)
Raw permutation importance scores.
If there are multiple scoring metrics in the scoring parameter
`result` is a dict with scorer names as keys (e.g. 'roc_auc') and
`Bunch` objects like above as values.
References
----------
.. [BRE] :doi:`L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32,
2001. <10.1023/A:1010933404324>`
Examples
--------
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.inspection import permutation_importance
>>> X = [[1, 9, 9],[1, 9, 9],[1, 9, 9],
... [0, 9, 9],[0, 9, 9],[0, 9, 9]]
>>> y = [1, 1, 1, 0, 0, 0]
>>> clf = LogisticRegression().fit(X, y)
>>> result = permutation_importance(clf, X, y, n_repeats=10,
... random_state=0)
>>> result.importances_mean
array([0.4666, 0. , 0. ])
>>> result.importances_std
array([0.2211, 0. , 0. ])
"""
if not hasattr(X, "iloc"):
X = check_array(X, ensure_all_finite="allow-nan", dtype=None)
# Precompute random seed from the random state to be used
# to get a fresh independent RandomState instance for each
# parallel call to _calculate_permutation_scores, irrespective of
# the fact that variables are shared or not depending on the active
# joblib backend (sequential, thread-based or process-based).
random_state = check_random_state(random_state)
random_seed = random_state.randint(np.iinfo(np.int32).max + 1)
if not isinstance(max_samples, numbers.Integral):
max_samples = int(max_samples * X.shape[0])
elif max_samples > X.shape[0]:
raise ValueError("max_samples must be <= n_samples")
scorer = check_scoring(estimator, scoring=scoring)
baseline_score = _weights_scorer(scorer, estimator, X, y, sample_weight)
scores = Parallel(n_jobs=n_jobs)(
delayed(_calculate_permutation_scores)(
estimator,
X,
y,
sample_weight,
col_idx,
random_seed,
n_repeats,
scorer,
max_samples,
)
for col_idx in range(X.shape[1])
)
if isinstance(baseline_score, dict):
return {
name: _create_importances_bunch(
baseline_score[name],
# unpack the permuted scores
np.array([scores[col_idx][name] for col_idx in range(X.shape[1])]),
)
for name in baseline_score
}
else:
return _create_importances_bunch(baseline_score, np.array(scores))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/inspection/__init__.py | sklearn/inspection/__init__.py | """Tools for model inspection."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.inspection._partial_dependence import partial_dependence
from sklearn.inspection._permutation_importance import permutation_importance
from sklearn.inspection._plot.decision_boundary import DecisionBoundaryDisplay
from sklearn.inspection._plot.partial_dependence import PartialDependenceDisplay
__all__ = [
"DecisionBoundaryDisplay",
"PartialDependenceDisplay",
"partial_dependence",
"permutation_importance",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/inspection/_plot/decision_boundary.py | sklearn/inspection/_plot/decision_boundary.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
import numpy as np
from sklearn.base import is_regressor
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import _safe_indexing
from sklearn.utils._dataframe import is_pandas_df, is_polars_df
from sklearn.utils._optional_dependencies import check_matplotlib_support
from sklearn.utils._response import _get_response_values
from sklearn.utils._set_output import _get_adapter_from_container
from sklearn.utils.validation import (
_is_arraylike_not_scalar,
_num_features,
check_is_fitted,
)
def _check_boundary_response_method(estimator, response_method):
"""Validate the response methods to be used with the fitted estimator.
Parameters
----------
estimator : object
Fitted estimator to check.
response_method : {'auto', 'decision_function', 'predict_proba', 'predict'}
Specifies whether to use :term:`decision_function`, :term:`predict_proba`,
:term:`predict` as the target response. If set to 'auto', the response method is
tried in the before mentioned order.
Returns
-------
prediction_method : list of str or str
The name or list of names of the response methods to use.
"""
has_classes = hasattr(estimator, "classes_")
if has_classes and _is_arraylike_not_scalar(estimator.classes_[0]):
msg = "Multi-label and multi-output multi-class classifiers are not supported"
raise ValueError(msg)
if response_method == "auto":
if is_regressor(estimator):
prediction_method = "predict"
else:
prediction_method = ["decision_function", "predict_proba", "predict"]
else:
prediction_method = response_method
return prediction_method
class DecisionBoundaryDisplay:
"""Decisions boundary visualization.
It is recommended to use
:func:`~sklearn.inspection.DecisionBoundaryDisplay.from_estimator`
to create a :class:`DecisionBoundaryDisplay`. All parameters are stored as
attributes.
Read more in the :ref:`User Guide <visualizations>`.
For a detailed example comparing the decision boundaries of multinomial and
one-vs-rest logistic regression, please see
:ref:`sphx_glr_auto_examples_linear_model_plot_logistic_multinomial.py`.
.. versionadded:: 1.1
Parameters
----------
xx0 : ndarray of shape (grid_resolution, grid_resolution)
First output of :func:`meshgrid <numpy.meshgrid>`.
xx1 : ndarray of shape (grid_resolution, grid_resolution)
Second output of :func:`meshgrid <numpy.meshgrid>`.
response : ndarray of shape (grid_resolution, grid_resolution) or \
(grid_resolution, grid_resolution, n_classes)
Values of the response function.
multiclass_colors : list of str or str, default=None
Specifies how to color each class when plotting all classes of multiclass
problem. Ignored for binary problems and multiclass problems when plotting a
single prediction value per point.
Possible inputs are:
* list: list of Matplotlib
`color <https://matplotlib.org/stable/users/explain/colors/colors.html#colors-def>`_
strings, of length `n_classes`
* str: name of :class:`matplotlib.colors.Colormap`
* None: 'viridis' colormap is used to sample colors
Single color colormaps will be generated from the colors in the list or
colors taken from the colormap and passed to the `cmap` parameter of
the `plot_method`.
.. versionadded:: 1.7
xlabel : str, default=None
Default label to place on x axis.
ylabel : str, default=None
Default label to place on y axis.
Attributes
----------
surface_ : matplotlib `QuadContourSet` or `QuadMesh` or list of such objects
If `plot_method` is 'contour' or 'contourf', `surface_` is
:class:`QuadContourSet <matplotlib.contour.QuadContourSet>`. If
`plot_method` is 'pcolormesh', `surface_` is
:class:`QuadMesh <matplotlib.collections.QuadMesh>`.
multiclass_colors_ : array of shape (n_classes, 4)
Colors used to plot each class in multiclass problems.
Only defined when `color_of_interest` is None.
.. versionadded:: 1.7
ax_ : matplotlib Axes
Axes with decision boundary.
figure_ : matplotlib Figure
Figure containing the decision boundary.
See Also
--------
DecisionBoundaryDisplay.from_estimator : Plot decision boundary given an estimator.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from sklearn.datasets import load_iris
>>> from sklearn.inspection import DecisionBoundaryDisplay
>>> from sklearn.tree import DecisionTreeClassifier
>>> iris = load_iris()
>>> feature_1, feature_2 = np.meshgrid(
... np.linspace(iris.data[:, 0].min(), iris.data[:, 0].max()),
... np.linspace(iris.data[:, 1].min(), iris.data[:, 1].max())
... )
>>> grid = np.vstack([feature_1.ravel(), feature_2.ravel()]).T
>>> tree = DecisionTreeClassifier().fit(iris.data[:, :2], iris.target)
>>> y_pred = np.reshape(tree.predict(grid), feature_1.shape)
>>> display = DecisionBoundaryDisplay(
... xx0=feature_1, xx1=feature_2, response=y_pred
... )
>>> display.plot()
<...>
>>> display.ax_.scatter(
... iris.data[:, 0], iris.data[:, 1], c=iris.target, edgecolor="black"
... )
<...>
>>> plt.show()
"""
def __init__(
self, *, xx0, xx1, response, multiclass_colors=None, xlabel=None, ylabel=None
):
self.xx0 = xx0
self.xx1 = xx1
self.response = response
self.multiclass_colors = multiclass_colors
self.xlabel = xlabel
self.ylabel = ylabel
def plot(self, plot_method="contourf", ax=None, xlabel=None, ylabel=None, **kwargs):
"""Plot visualization.
Parameters
----------
plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'
Plotting method to call when plotting the response. Please refer
to the following matplotlib documentation for details:
:func:`contourf <matplotlib.pyplot.contourf>`,
:func:`contour <matplotlib.pyplot.contour>`,
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>`.
ax : Matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
xlabel : str, default=None
Overwrite the x-axis label.
ylabel : str, default=None
Overwrite the y-axis label.
**kwargs : dict
Additional keyword arguments to be passed to the `plot_method`.
Returns
-------
display: :class:`~sklearn.inspection.DecisionBoundaryDisplay`
Object that stores computed values.
"""
check_matplotlib_support("DecisionBoundaryDisplay.plot")
import matplotlib as mpl
import matplotlib.pyplot as plt
if plot_method not in ("contourf", "contour", "pcolormesh"):
raise ValueError(
"plot_method must be 'contourf', 'contour', or 'pcolormesh'. "
f"Got {plot_method} instead."
)
if ax is None:
_, ax = plt.subplots()
plot_func = getattr(ax, plot_method)
if self.response.ndim == 2:
self.surface_ = plot_func(self.xx0, self.xx1, self.response, **kwargs)
else: # self.response.ndim == 3
n_responses = self.response.shape[-1]
for kwarg in ("cmap", "colors"):
if kwarg in kwargs:
warnings.warn(
f"'{kwarg}' is ignored in favor of 'multiclass_colors' "
"in the multiclass case when the response method is "
"'decision_function' or 'predict_proba'."
)
del kwargs[kwarg]
if self.multiclass_colors is None or isinstance(
self.multiclass_colors, str
):
if self.multiclass_colors is None:
cmap = "tab10" if n_responses <= 10 else "gist_rainbow"
else:
cmap = self.multiclass_colors
# Special case for the tab10 and tab20 colormaps that encode a
# discrete set of colors that are easily distinguishable
# contrary to other colormaps that are continuous.
if cmap == "tab10" and n_responses <= 10:
colors = plt.get_cmap("tab10", 10).colors[:n_responses]
elif cmap == "tab20" and n_responses <= 20:
colors = plt.get_cmap("tab20", 20).colors[:n_responses]
else:
cmap = plt.get_cmap(cmap, n_responses)
if not hasattr(cmap, "colors"):
# For LinearSegmentedColormap
colors = cmap(np.linspace(0, 1, n_responses))
else:
colors = cmap.colors
elif isinstance(self.multiclass_colors, list):
colors = [mpl.colors.to_rgba(color) for color in self.multiclass_colors]
else:
raise ValueError("'multiclass_colors' must be a list or a str.")
self.multiclass_colors_ = colors
if plot_method == "contour":
# Plot only argmax map for contour
class_map = self.response.argmax(axis=2)
self.surface_ = plot_func(
self.xx0, self.xx1, class_map, colors=colors, **kwargs
)
else:
multiclass_cmaps = [
mpl.colors.LinearSegmentedColormap.from_list(
f"colormap_{class_idx}", [(1.0, 1.0, 1.0, 1.0), (r, g, b, 1.0)]
)
for class_idx, (r, g, b, _) in enumerate(colors)
]
self.surface_ = []
for class_idx, cmap in enumerate(multiclass_cmaps):
response = np.ma.array(
self.response[:, :, class_idx],
mask=~(self.response.argmax(axis=2) == class_idx),
)
self.surface_.append(
plot_func(self.xx0, self.xx1, response, cmap=cmap, **kwargs)
)
if xlabel is not None or not ax.get_xlabel():
xlabel = self.xlabel if xlabel is None else xlabel
ax.set_xlabel(xlabel)
if ylabel is not None or not ax.get_ylabel():
ylabel = self.ylabel if ylabel is None else ylabel
ax.set_ylabel(ylabel)
self.ax_ = ax
self.figure_ = ax.figure
return self
@classmethod
def from_estimator(
cls,
estimator,
X,
*,
grid_resolution=100,
eps=1.0,
plot_method="contourf",
response_method="auto",
class_of_interest=None,
multiclass_colors=None,
xlabel=None,
ylabel=None,
ax=None,
**kwargs,
):
"""Plot decision boundary given an estimator.
Read more in the :ref:`User Guide <visualizations>`.
Parameters
----------
estimator : object
Trained estimator used to plot the decision boundary.
X : {array-like, sparse matrix, dataframe} of shape (n_samples, 2)
Input data that should be only 2-dimensional.
grid_resolution : int, default=100
Number of grid points to use for plotting decision boundary.
Higher values will make the plot look nicer but be slower to
render.
eps : float, default=1.0
Extends the minimum and maximum values of X for evaluating the
response function.
plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'
Plotting method to call when plotting the response. Please refer
to the following matplotlib documentation for details:
:func:`contourf <matplotlib.pyplot.contourf>`,
:func:`contour <matplotlib.pyplot.contour>`,
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>`.
response_method : {'auto', 'decision_function', 'predict_proba', \
'predict'}, default='auto'
Specifies whether to use :term:`decision_function`,
:term:`predict_proba` or :term:`predict` as the target response.
If set to 'auto', the response method is tried in the order as
listed above.
.. versionchanged:: 1.6
For multiclass problems, 'auto' no longer defaults to 'predict'.
class_of_interest : int, float, bool or str, default=None
The class to be plotted when `response_method` is 'predict_proba'
or 'decision_function'. If None, `estimator.classes_[1]` is considered
the positive class for binary classifiers. For multiclass
classifiers, if None, all classes will be represented in the
decision boundary plot; the class with the highest response value
at each point is plotted. The color of each class can be set via
`multiclass_colors`.
.. versionadded:: 1.4
multiclass_colors : list of str, or str, default=None
Specifies how to color each class when plotting multiclass
'predict_proba' or 'decision_function' and `class_of_interest` is
None. Ignored in all other cases.
Possible inputs are:
* list: list of Matplotlib
`color <https://matplotlib.org/stable/users/explain/colors/colors.html#colors-def>`_
strings, of length `n_classes`
* str: name of :class:`matplotlib.colors.Colormap`
* None: 'tab10' colormap is used to sample colors if the number of
classes is less than or equal to 10, otherwise 'gist_rainbow'
colormap.
Single color colormaps will be generated from the colors in the list or
colors taken from the colormap, and passed to the `cmap` parameter of
the `plot_method`.
.. versionadded:: 1.7
xlabel : str, default=None
The label used for the x-axis. If `None`, an attempt is made to
extract a label from `X` if it is a dataframe, otherwise an empty
string is used.
ylabel : str, default=None
The label used for the y-axis. If `None`, an attempt is made to
extract a label from `X` if it is a dataframe, otherwise an empty
string is used.
ax : Matplotlib axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
**kwargs : dict
Additional keyword arguments to be passed to the
`plot_method`.
Returns
-------
display : :class:`~sklearn.inspection.DecisionBoundaryDisplay`
Object that stores the result.
See Also
--------
DecisionBoundaryDisplay : Decision boundary visualization.
sklearn.metrics.ConfusionMatrixDisplay.from_estimator : Plot the
confusion matrix given an estimator, the data, and the label.
sklearn.metrics.ConfusionMatrixDisplay.from_predictions : Plot the
confusion matrix given the true and predicted labels.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.inspection import DecisionBoundaryDisplay
>>> iris = load_iris()
>>> X = iris.data[:, :2]
>>> classifier = LogisticRegression().fit(X, iris.target)
>>> disp = DecisionBoundaryDisplay.from_estimator(
... classifier, X, response_method="predict",
... xlabel=iris.feature_names[0], ylabel=iris.feature_names[1],
... alpha=0.5,
... )
>>> disp.ax_.scatter(X[:, 0], X[:, 1], c=iris.target, edgecolor="k")
<...>
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_estimator")
check_is_fitted(estimator)
import matplotlib as mpl
if not grid_resolution > 1:
raise ValueError(
"grid_resolution must be greater than 1. Got"
f" {grid_resolution} instead."
)
if not eps >= 0:
raise ValueError(
f"eps must be greater than or equal to 0. Got {eps} instead."
)
possible_plot_methods = ("contourf", "contour", "pcolormesh")
if plot_method not in possible_plot_methods:
available_methods = ", ".join(possible_plot_methods)
raise ValueError(
f"plot_method must be one of {available_methods}. "
f"Got {plot_method} instead."
)
num_features = _num_features(X)
if num_features != 2:
raise ValueError(
f"n_features must be equal to 2. Got {num_features} instead."
)
if (
response_method in ("predict_proba", "decision_function", "auto")
and multiclass_colors is not None
and hasattr(estimator, "classes_")
and (n_classes := len(estimator.classes_)) > 2
):
if isinstance(multiclass_colors, list):
if len(multiclass_colors) != n_classes:
raise ValueError(
"When 'multiclass_colors' is a list, it must be of the same "
f"length as 'estimator.classes_' ({n_classes}), got: "
f"{len(multiclass_colors)}."
)
elif any(
not mpl.colors.is_color_like(col) for col in multiclass_colors
):
raise ValueError(
"When 'multiclass_colors' is a list, it can only contain valid"
f" Matplotlib color names. Got: {multiclass_colors}"
)
if isinstance(multiclass_colors, str):
if multiclass_colors not in mpl.pyplot.colormaps():
raise ValueError(
"When 'multiclass_colors' is a string, it must be a valid "
f"Matplotlib colormap. Got: {multiclass_colors}"
)
x0, x1 = _safe_indexing(X, 0, axis=1), _safe_indexing(X, 1, axis=1)
x0_min, x0_max = x0.min() - eps, x0.max() + eps
x1_min, x1_max = x1.min() - eps, x1.max() + eps
xx0, xx1 = np.meshgrid(
np.linspace(x0_min, x0_max, grid_resolution),
np.linspace(x1_min, x1_max, grid_resolution),
)
X_grid = np.c_[xx0.ravel(), xx1.ravel()]
if is_pandas_df(X) or is_polars_df(X):
adapter = _get_adapter_from_container(X)
X_grid = adapter.create_container(
X_grid,
X_grid,
columns=X.columns,
)
prediction_method = _check_boundary_response_method(estimator, response_method)
try:
response, _, response_method_used = _get_response_values(
estimator,
X_grid,
response_method=prediction_method,
pos_label=class_of_interest,
return_response_method_used=True,
)
except ValueError as exc:
if "is not a valid label" in str(exc):
# re-raise a more informative error message since `pos_label` is unknown
# to our user when interacting with
# `DecisionBoundaryDisplay.from_estimator`
raise ValueError(
f"class_of_interest={class_of_interest} is not a valid label: It "
f"should be one of {estimator.classes_}"
) from exc
raise
# convert classes predictions into integers
if response_method_used == "predict" and hasattr(estimator, "classes_"):
encoder = LabelEncoder()
encoder.classes_ = estimator.classes_
response = encoder.transform(response)
if response.ndim == 1:
response = response.reshape(*xx0.shape)
else:
if is_regressor(estimator):
raise ValueError("Multi-output regressors are not supported")
if class_of_interest is not None:
# For the multiclass case, `_get_response_values` returns the response
# as-is. Thus, we have a column per class and we need to select the
# column corresponding to the positive class.
col_idx = np.flatnonzero(estimator.classes_ == class_of_interest)[0]
response = response[:, col_idx].reshape(*xx0.shape)
else:
response = response.reshape(*xx0.shape, response.shape[-1])
if xlabel is None:
xlabel = X.columns[0] if hasattr(X, "columns") else ""
if ylabel is None:
ylabel = X.columns[1] if hasattr(X, "columns") else ""
display = cls(
xx0=xx0,
xx1=xx1,
response=response,
multiclass_colors=multiclass_colors,
xlabel=xlabel,
ylabel=ylabel,
)
return display.plot(ax=ax, plot_method=plot_method, **kwargs)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/inspection/_plot/partial_dependence.py | sklearn/inspection/_plot/partial_dependence.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numbers
from itertools import chain
from math import ceil
import numpy as np
from scipy import sparse
from scipy.stats.mstats import mquantiles
from sklearn.base import is_regressor
from sklearn.inspection import partial_dependence
from sklearn.inspection._pd_utils import _check_feature_names, _get_feature_index
from sklearn.utils import Bunch, _safe_indexing, check_array, check_random_state
from sklearn.utils._encode import _unique
from sklearn.utils._optional_dependencies import check_matplotlib_support
from sklearn.utils._plotting import _validate_style_kwargs
from sklearn.utils.parallel import Parallel, delayed
class PartialDependenceDisplay:
"""Partial Dependence Plot (PDP) and Individual Conditional Expectation (ICE).
It is recommended to use
:func:`~sklearn.inspection.PartialDependenceDisplay.from_estimator` to create a
:class:`~sklearn.inspection.PartialDependenceDisplay`. All parameters are stored
as attributes.
For general information regarding `scikit-learn` visualization tools, see
the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the
:ref:`Inspection Guide <partial_dependence>`.
For an example on how to use this class, see the following example:
:ref:`sphx_glr_auto_examples_miscellaneous_plot_partial_dependence_visualization_api.py`.
.. versionadded:: 0.22
Parameters
----------
pd_results : list of Bunch
Results of :func:`~sklearn.inspection.partial_dependence` for
``features``.
features : list of (int,) or list of (int, int)
Indices of features for a given plot. A tuple of one integer will plot
a partial dependence curve of one feature. A tuple of two integers will
plot a two-way partial dependence curve as a contour plot.
feature_names : list of str
Feature names corresponding to the indices in ``features``.
target_idx : int
- In a multiclass setting, specifies the class for which the PDPs
should be computed. Note that for binary classification, the
positive class (index 1) is always used.
- In a multioutput setting, specifies the task for which the PDPs
should be computed.
Ignored in binary classification or classical regression settings.
deciles : dict
Deciles for feature indices in ``features``.
kind : {'average', 'individual', 'both'} or list of such str, \
default='average'
Whether to plot the partial dependence averaged across all the samples
in the dataset or one line per sample or both.
- ``kind='average'`` results in the traditional PD plot;
- ``kind='individual'`` results in the ICE plot;
- ``kind='both'`` results in plotting both the ICE and PD on the same
plot.
A list of such strings can be provided to specify `kind` on a per-plot
basis. The length of the list should be the same as the number of
interaction requested in `features`.
.. note::
ICE ('individual' or 'both') is not a valid option for 2-ways
interactions plot. As a result, an error will be raised.
2-ways interaction plots should always be configured to
use the 'average' kind instead.
.. note::
The fast ``method='recursion'`` option is only available for
`kind='average'` and `sample_weights=None`. Computing individual
dependencies and doing weighted averages requires using the slower
`method='brute'`.
.. versionadded:: 0.24
Add `kind` parameter with `'average'`, `'individual'`, and `'both'`
options.
.. versionadded:: 1.1
Add the possibility to pass a list of string specifying `kind`
for each plot.
subsample : float, int or None, default=1000
Sampling for ICE curves when `kind` is 'individual' or 'both'.
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to be used to plot ICE curves. If int, represents the
maximum absolute number of samples to use.
Note that the full dataset is still used to calculate partial
dependence when `kind='both'`.
.. versionadded:: 0.24
random_state : int, RandomState instance or None, default=None
Controls the randomness of the selected samples when subsamples is not
`None`. See :term:`Glossary <random_state>` for details.
.. versionadded:: 0.24
is_categorical : list of (bool,) or list of (bool, bool), default=None
Whether each target feature in `features` is categorical or not.
The list should be same size as `features`. If `None`, all features
are assumed to be continuous.
.. versionadded:: 1.2
Attributes
----------
bounding_ax_ : matplotlib Axes or None
If `ax` is an axes or None, the `bounding_ax_` is the axes where the
grid of partial dependence plots are drawn. If `ax` is a list of axes
or a numpy array of axes, `bounding_ax_` is None.
axes_ : ndarray of matplotlib Axes
If `ax` is an axes or None, `axes_[i, j]` is the axes on the i-th row
and j-th column. If `ax` is a list of axes, `axes_[i]` is the i-th item
in `ax`. Elements that are None correspond to a nonexisting axes in
that position.
lines_ : ndarray of matplotlib Artists
If `ax` is an axes or None, `lines_[i, j]` is the partial dependence
curve on the i-th row and j-th column. If `ax` is a list of axes,
`lines_[i]` is the partial dependence curve corresponding to the i-th
item in `ax`. Elements that are None correspond to a nonexisting axes
or an axes that does not include a line plot.
deciles_vlines_ : ndarray of matplotlib LineCollection
If `ax` is an axes or None, `vlines_[i, j]` is the line collection
representing the x axis deciles of the i-th row and j-th column. If
`ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in
`ax`. Elements that are None correspond to a nonexisting axes or an
axes that does not include a PDP plot.
.. versionadded:: 0.23
deciles_hlines_ : ndarray of matplotlib LineCollection
If `ax` is an axes or None, `vlines_[i, j]` is the line collection
representing the y axis deciles of the i-th row and j-th column. If
`ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in
`ax`. Elements that are None correspond to a nonexisting axes or an
axes that does not include a 2-way plot.
.. versionadded:: 0.23
contours_ : ndarray of matplotlib Artists
If `ax` is an axes or None, `contours_[i, j]` is the partial dependence
plot on the i-th row and j-th column. If `ax` is a list of axes,
`contours_[i]` is the partial dependence plot corresponding to the i-th
item in `ax`. Elements that are None correspond to a nonexisting axes
or an axes that does not include a contour plot.
bars_ : ndarray of matplotlib Artists
If `ax` is an axes or None, `bars_[i, j]` is the partial dependence bar
plot on the i-th row and j-th column (for a categorical feature).
If `ax` is a list of axes, `bars_[i]` is the partial dependence bar
plot corresponding to the i-th item in `ax`. Elements that are None
correspond to a nonexisting axes or an axes that does not include a
bar plot.
.. versionadded:: 1.2
heatmaps_ : ndarray of matplotlib Artists
If `ax` is an axes or None, `heatmaps_[i, j]` is the partial dependence
heatmap on the i-th row and j-th column (for a pair of categorical
features) . If `ax` is a list of axes, `heatmaps_[i]` is the partial
dependence heatmap corresponding to the i-th item in `ax`. Elements
that are None correspond to a nonexisting axes or an axes that does not
include a heatmap.
.. versionadded:: 1.2
figure_ : matplotlib Figure
Figure containing partial dependence plots.
See Also
--------
partial_dependence : Compute Partial Dependence values.
PartialDependenceDisplay.from_estimator : Plot Partial Dependence.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> from sklearn.inspection import PartialDependenceDisplay
>>> from sklearn.inspection import partial_dependence
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> features, feature_names = [(0,)], [f"Features #{i}" for i in range(X.shape[1])]
>>> deciles = {0: np.linspace(0, 1, num=5)}
>>> pd_results = partial_dependence(
... clf, X, features=0, kind="average", grid_resolution=5)
>>> display = PartialDependenceDisplay(
... [pd_results], features=features, feature_names=feature_names,
... target_idx=0, deciles=deciles
... )
>>> display.plot(pdp_lim={1: (-1.38, 0.66)})
<...>
>>> plt.show()
"""
def __init__(
self,
pd_results,
*,
features,
feature_names,
target_idx,
deciles,
kind="average",
subsample=1000,
random_state=None,
is_categorical=None,
):
self.pd_results = pd_results
self.features = features
self.feature_names = feature_names
self.target_idx = target_idx
self.deciles = deciles
self.kind = kind
self.subsample = subsample
self.random_state = random_state
self.is_categorical = is_categorical
@classmethod
def from_estimator(
cls,
estimator,
X,
features,
*,
sample_weight=None,
categorical_features=None,
feature_names=None,
target=None,
response_method="auto",
n_cols=3,
grid_resolution=100,
percentiles=(0.05, 0.95),
custom_values=None,
method="auto",
n_jobs=None,
verbose=0,
line_kw=None,
ice_lines_kw=None,
pd_line_kw=None,
contour_kw=None,
ax=None,
kind="average",
centered=False,
subsample=1000,
random_state=None,
):
"""Partial dependence (PD) and individual conditional expectation (ICE) plots.
Partial dependence plots, individual conditional expectation plots, or an
overlay of both can be plotted by setting the `kind` parameter.
This method generates one plot for each entry in `features`. The plots
are arranged in a grid with `n_cols` columns. For one-way partial
dependence plots, the deciles of the feature values are shown on the
x-axis. For two-way plots, the deciles are shown on both axes and PDPs
are contour plots.
For general information regarding `scikit-learn` visualization tools, see
the :ref:`Visualization Guide <visualizations>`.
For guidance on interpreting these plots, refer to the
:ref:`Inspection Guide <partial_dependence>`.
For an example on how to use this class method, see
:ref:`sphx_glr_auto_examples_inspection_plot_partial_dependence.py`.
.. note::
:func:`PartialDependenceDisplay.from_estimator` does not support using the
same axes with multiple calls. To plot the partial dependence for
multiple estimators, please pass the axes created by the first call to the
second call::
>>> from sklearn.inspection import PartialDependenceDisplay
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.linear_model import LinearRegression
>>> from sklearn.ensemble import RandomForestRegressor
>>> X, y = make_friedman1()
>>> est1 = LinearRegression().fit(X, y)
>>> est2 = RandomForestRegressor().fit(X, y)
>>> disp1 = PartialDependenceDisplay.from_estimator(est1, X,
... [1, 2])
>>> disp2 = PartialDependenceDisplay.from_estimator(est2, X, [1, 2],
... ax=disp1.axes_)
.. warning::
For :class:`~sklearn.ensemble.GradientBoostingClassifier` and
:class:`~sklearn.ensemble.GradientBoostingRegressor`, the
`'recursion'` method (used by default) will not account for the `init`
predictor of the boosting process. In practice, this will produce
the same values as `'brute'` up to a constant offset in the target
response, provided that `init` is a constant estimator (which is the
default). However, if `init` is not a constant estimator, the
partial dependence values are incorrect for `'recursion'` because the
offset will be sample-dependent. It is preferable to use the `'brute'`
method. Note that this only applies to
:class:`~sklearn.ensemble.GradientBoostingClassifier` and
:class:`~sklearn.ensemble.GradientBoostingRegressor`, not to
:class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
.. versionadded:: 1.0
Parameters
----------
estimator : BaseEstimator
A fitted estimator object implementing :term:`predict`,
:term:`predict_proba`, or :term:`decision_function`.
Multioutput-multiclass classifiers are not supported.
X : {array-like, dataframe} of shape (n_samples, n_features)
``X`` is used to generate a grid of values for the target
``features`` (where the partial dependence will be evaluated), and
also to generate values for the complement features when the
`method` is `'brute'`.
features : list of {int, str, pair of int, pair of str}
The target features for which to create the PDPs.
If `features[i]` is an integer or a string, a one-way PDP is created;
if `features[i]` is a tuple, a two-way PDP is created (only supported
with `kind='average'`). Each tuple must be of size 2.
If any entry is a string, then it must be in ``feature_names``.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights are used to calculate weighted means when averaging the
model output. If `None`, then samples are equally weighted. If
`sample_weight` is not `None`, then `method` will be set to `'brute'`.
Note that `sample_weight` is ignored for `kind='individual'`.
.. versionadded:: 1.3
categorical_features : array-like of shape (n_features,) or shape \
(n_categorical_features,), dtype={bool, int, str}, default=None
Indicates the categorical features.
- `None`: no feature will be considered categorical;
- boolean array-like: boolean mask of shape `(n_features,)`
indicating which features are categorical. Thus, this array has
the same shape has `X.shape[1]`;
- integer or string array-like: integer indices or strings
indicating categorical features.
.. versionadded:: 1.2
feature_names : array-like of shape (n_features,), dtype=str, default=None
Name of each feature; `feature_names[i]` holds the name of the feature
with index `i`.
By default, the name of the feature corresponds to their numerical
index for NumPy array and their column name for pandas dataframe.
target : int, default=None
- In a multiclass setting, specifies the class for which the PDPs
should be computed. Note that for binary classification, the
positive class (index 1) is always used.
- In a multioutput setting, specifies the task for which the PDPs
should be computed.
Ignored in binary classification or classical regression settings.
response_method : {'auto', 'predict_proba', 'decision_function'}, \
default='auto'
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. For regressors
this parameter is ignored and the response is always the output of
:term:`predict`. By default, :term:`predict_proba` is tried first
and we revert to :term:`decision_function` if it doesn't exist. If
``method`` is `'recursion'`, the response is always the output of
:term:`decision_function`.
n_cols : int, default=3
The maximum number of columns in the grid plot. Only active when `ax`
is a single axis or `None`.
grid_resolution : int, default=100
The number of equally spaced points on the axes of the plots, for each
target feature.
This parameter is overridden by `custom_values` if that parameter is set.
percentiles : tuple of float, default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes. Must be in [0, 1].
This parameter is overridden by `custom_values` if that parameter is set.
custom_values : dict
A dictionary mapping the index of an element of `features` to an
array of values where the partial dependence should be calculated
for that feature. Setting a range of values for a feature overrides
`grid_resolution` and `percentiles`.
.. versionadded:: 1.7
method : str, default='auto'
The method used to calculate the averaged predictions:
- `'recursion'` is only supported for some tree-based estimators
(namely
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
:class:`~sklearn.tree.DecisionTreeRegressor`,
:class:`~sklearn.ensemble.RandomForestRegressor`
but is more efficient in terms of speed.
With this method, the target response of a
classifier is always the decision function, not the predicted
probabilities. Since the `'recursion'` method implicitly computes
the average of the ICEs by design, it is not compatible with ICE and
thus `kind` must be `'average'`.
- `'brute'` is supported for any estimator, but is more
computationally intensive.
- `'auto'`: the `'recursion'` is used for estimators that support it,
and `'brute'` is used otherwise. If `sample_weight` is not `None`,
then `'brute'` is used regardless of the estimator.
Please see :ref:`this note <pdp_method_differences>` for
differences between the `'brute'` and `'recursion'` method.
n_jobs : int, default=None
The number of CPUs to use to compute the partial dependences.
Computation is parallelized over features specified by the `features`
parameter.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
Verbose output during PD computations.
line_kw : dict, default=None
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
For one-way partial dependence plots. It can be used to define common
properties for both `ice_lines_kw` and `pdp_line_kw`.
ice_lines_kw : dict, default=None
Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.
For ICE lines in the one-way partial dependence plots.
The key value pairs defined in `ice_lines_kw` takes priority over
`line_kw`.
pd_line_kw : dict, default=None
Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.
For partial dependence in one-way partial dependence plots.
The key value pairs defined in `pd_line_kw` takes priority over
`line_kw`.
contour_kw : dict, default=None
Dict with keywords passed to the ``matplotlib.pyplot.contourf`` call.
For two-way partial dependence plots.
ax : Matplotlib axes or array-like of Matplotlib axes, default=None
- If a single axis is passed in, it is treated as a bounding axes
and a grid of partial dependence plots will be drawn within
these bounds. The `n_cols` parameter controls the number of
columns in the grid.
- If an array-like of axes are passed in, the partial dependence
plots will be drawn directly into these axes.
- If `None`, a figure and a bounding axes is created and treated
as the single axes case.
kind : {'average', 'individual', 'both'}, default='average'
Whether to plot the partial dependence averaged across all the samples
in the dataset or one line per sample or both.
- ``kind='average'`` results in the traditional PD plot;
- ``kind='individual'`` results in the ICE plot.
Note that the fast `method='recursion'` option is only available for
`kind='average'` and `sample_weights=None`. Computing individual
dependencies and doing weighted averages requires using the slower
`method='brute'`.
centered : bool, default=False
If `True`, the ICE and PD lines will start at the origin of the
y-axis. By default, no centering is done.
.. versionadded:: 1.1
subsample : float, int or None, default=1000
Sampling for ICE curves when `kind` is 'individual' or 'both'.
If `float`, should be between 0.0 and 1.0 and represent the proportion
of the dataset to be used to plot ICE curves. If `int`, represents the
absolute number samples to use.
Note that the full dataset is still used to calculate averaged partial
dependence when `kind='both'`.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the selected samples when subsamples is not
`None` and `kind` is either `'both'` or `'individual'`.
See :term:`Glossary <random_state>` for details.
Returns
-------
display : :class:`~sklearn.inspection.PartialDependenceDisplay`
See Also
--------
partial_dependence : Compute Partial Dependence values.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> from sklearn.inspection import PartialDependenceDisplay
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> PartialDependenceDisplay.from_estimator(clf, X, [0, (0, 1)])
<...>
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_estimator")
import matplotlib.pyplot as plt
# set target_idx for multi-class estimators
if hasattr(estimator, "classes_") and np.size(estimator.classes_) > 2:
if target is None:
raise ValueError("target must be specified for multi-class")
target_idx = np.searchsorted(estimator.classes_, target)
if (
not (0 <= target_idx < len(estimator.classes_))
or estimator.classes_[target_idx] != target
):
raise ValueError("target not in est.classes_, got {}".format(target))
else:
# regression and binary classification
target_idx = 0
# Use check_array only on lists and other non-array-likes / sparse. Do not
# convert DataFrame into a NumPy array.
if not (hasattr(X, "__array__") or sparse.issparse(X)):
X = check_array(X, ensure_all_finite="allow-nan", dtype=object)
n_features = X.shape[1]
feature_names = _check_feature_names(X, feature_names)
# expand kind to always be a list of str
kind_ = [kind] * len(features) if isinstance(kind, str) else kind
if len(kind_) != len(features):
raise ValueError(
"When `kind` is provided as a list of strings, it should contain "
f"as many elements as `features`. `kind` contains {len(kind_)} "
f"element(s) and `features` contains {len(features)} element(s)."
)
# convert features into a seq of int tuples
tmp_features, ice_for_two_way_pd = [], []
for kind_plot, fxs in zip(kind_, features):
if isinstance(fxs, (numbers.Integral, str)):
fxs = (fxs,)
try:
fxs = tuple(
_get_feature_index(fx, feature_names=feature_names) for fx in fxs
)
except TypeError as e:
raise ValueError(
"Each entry in features must be either an int, "
"a string, or an iterable of size at most 2."
) from e
if not 1 <= np.size(fxs) <= 2:
raise ValueError(
"Each entry in features must be either an int, "
"a string, or an iterable of size at most 2."
)
# store the information if 2-way PD was requested with ICE to later
# raise a ValueError with an exhaustive list of problematic
# settings.
ice_for_two_way_pd.append(kind_plot != "average" and np.size(fxs) > 1)
tmp_features.append(fxs)
if any(ice_for_two_way_pd):
# raise an error and be specific regarding the parameter values
# when 1- and 2-way PD were requested
kind_ = [
"average" if forcing_average else kind_plot
for forcing_average, kind_plot in zip(ice_for_two_way_pd, kind_)
]
raise ValueError(
"ICE plot cannot be rendered for 2-way feature interactions. "
"2-way feature interactions mandates PD plots using the "
"'average' kind: "
f"features={features!r} should be configured to use "
f"kind={kind_!r} explicitly."
)
features = tmp_features
if categorical_features is None:
is_categorical = [
(False,) if len(fxs) == 1 else (False, False) for fxs in features
]
else:
# we need to create a boolean indicator of which features are
# categorical from the categorical_features list.
categorical_features = np.asarray(categorical_features)
if categorical_features.dtype.kind == "b":
# categorical features provided as a list of boolean
if categorical_features.size != n_features:
raise ValueError(
"When `categorical_features` is a boolean array-like, "
"the array should be of shape (n_features,). Got "
f"{categorical_features.size} elements while `X` contains "
f"{n_features} features."
)
is_categorical = [
tuple(categorical_features[fx] for fx in fxs) for fxs in features
]
elif categorical_features.dtype.kind in ("i", "O", "U"):
# categorical features provided as a list of indices or feature names
categorical_features_idx = [
_get_feature_index(cat, feature_names=feature_names)
for cat in categorical_features
]
is_categorical = [
tuple([idx in categorical_features_idx for idx in fxs])
for fxs in features
]
else:
raise ValueError(
"Expected `categorical_features` to be an array-like of boolean,"
f" integer, or string. Got {categorical_features.dtype} instead."
)
for cats in is_categorical:
if np.size(cats) == 2 and (cats[0] != cats[1]):
raise ValueError(
"Two-way partial dependence plots are not supported for pairs"
" of continuous and categorical features."
)
# collect the indices of the categorical features targeted by the partial
# dependence computation
categorical_features_targeted = set(
[
fx
for fxs, cats in zip(features, is_categorical)
for fx in fxs
if any(cats)
]
)
if categorical_features_targeted:
min_n_cats = min(
[
len(_unique(_safe_indexing(X, idx, axis=1)))
for idx in categorical_features_targeted
]
)
if grid_resolution < min_n_cats:
raise ValueError(
"The resolution of the computed grid is less than the "
"minimum number of categories in the targeted categorical "
"features. Expect the `grid_resolution` to be greater than "
f"{min_n_cats}. Got {grid_resolution} instead."
)
for is_cat, kind_plot in zip(is_categorical, kind_):
if any(is_cat) and kind_plot != "average":
raise ValueError(
"It is not possible to display individual effects for"
" categorical features."
)
# Early exit if the axes does not have the correct number of axes
if ax is not None and not isinstance(ax, plt.Axes):
axes = np.asarray(ax, dtype=object)
if axes.size != len(features):
raise ValueError(
"Expected ax to have {} axes, got {}".format(
len(features), axes.size
)
)
for i in chain.from_iterable(features):
if i >= len(feature_names):
raise ValueError(
"All entries of features must be less than "
"len(feature_names) = {0}, got {1}.".format(len(feature_names), i)
)
if isinstance(subsample, numbers.Integral):
if subsample <= 0:
raise ValueError(
f"When an integer, subsample={subsample} should be positive."
)
elif isinstance(subsample, numbers.Real):
if subsample <= 0 or subsample >= 1:
raise ValueError(
f"When a floating-point, subsample={subsample} should be in "
"the (0, 1) range."
)
# compute predictions and/or averaged predictions
pd_results = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(
estimator,
X,
fxs,
sample_weight=sample_weight,
feature_names=feature_names,
categorical_features=categorical_features,
response_method=response_method,
method=method,
grid_resolution=grid_resolution,
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/inspection/_plot/__init__.py | sklearn/inspection/_plot/__init__.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/inspection/_plot/tests/test_plot_partial_dependence.py | sklearn/inspection/_plot/tests/test_plot_partial_dependence.py | import numpy as np
import pytest
from numpy.testing import assert_allclose
from scipy.stats.mstats import mquantiles
from sklearn.compose import make_column_transformer
from sklearn.datasets import (
load_diabetes,
load_iris,
make_classification,
make_regression,
)
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor
from sklearn.inspection import PartialDependenceDisplay
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils._testing import _convert_container
@pytest.fixture(scope="module")
def diabetes():
# diabetes dataset, subsampled for speed
data = load_diabetes()
data.data = data.data[:50]
data.target = data.target[:50]
return data
@pytest.fixture(scope="module")
def clf_diabetes(diabetes):
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(diabetes.data, diabetes.target)
return clf
def custom_values_helper(feature, grid_resolution):
return np.linspace(
*mquantiles(feature, (0.05, 0.95), axis=0), num=grid_resolution, endpoint=True
)
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
@pytest.mark.parametrize("grid_resolution", [10, 20])
@pytest.mark.parametrize("use_custom_values", [True, False])
def test_plot_partial_dependence(
use_custom_values,
grid_resolution,
pyplot,
clf_diabetes,
diabetes,
):
# Test partial dependence plot function.
# Use columns 0 & 2 as 1 is not quantitative (sex)
feature_names = diabetes.feature_names
custom_values = None
if use_custom_values:
custom_values = {
0: custom_values_helper(diabetes.data[:, 0], grid_resolution),
2: custom_values_helper(diabetes.data[:, 2], grid_resolution),
}
disp = PartialDependenceDisplay.from_estimator(
clf_diabetes,
diabetes.data,
[0, 2, (0, 2)],
grid_resolution=grid_resolution,
feature_names=feature_names,
contour_kw={"cmap": "jet"},
custom_values=custom_values,
)
fig = pyplot.gcf()
axs = fig.get_axes()
assert disp.figure_ is fig
assert len(axs) == 4
assert disp.bounding_ax_ is not None
assert disp.axes_.shape == (1, 3)
assert disp.lines_.shape == (1, 3)
assert disp.contours_.shape == (1, 3)
assert disp.deciles_vlines_.shape == (1, 3)
assert disp.deciles_hlines_.shape == (1, 3)
assert disp.lines_[0, 2] is None
assert disp.contours_[0, 0] is None
assert disp.contours_[0, 1] is None
# deciles lines: always show on xaxis, only show on yaxis if 2-way PDP
for i in range(3):
assert disp.deciles_vlines_[0, i] is not None
assert disp.deciles_hlines_[0, 0] is None
assert disp.deciles_hlines_[0, 1] is None
assert disp.deciles_hlines_[0, 2] is not None
assert disp.features == [(0,), (2,), (0, 2)]
assert np.all(disp.feature_names == feature_names)
assert len(disp.deciles) == 2
for i in [0, 2]:
assert_allclose(
disp.deciles[i],
mquantiles(diabetes.data[:, i], prob=np.arange(0.1, 1.0, 0.1)),
)
single_feature_positions = [(0, (0, 0)), (2, (0, 1))]
expected_ylabels = ["Partial dependence", ""]
for i, (feat_col, pos) in enumerate(single_feature_positions):
ax = disp.axes_[pos]
assert ax.get_ylabel() == expected_ylabels[i]
assert ax.get_xlabel() == diabetes.feature_names[feat_col]
line = disp.lines_[pos]
avg_preds = disp.pd_results[i]
assert avg_preds.average.shape == (1, grid_resolution)
target_idx = disp.target_idx
line_data = line.get_data()
assert_allclose(line_data[0], avg_preds["grid_values"][0])
assert_allclose(line_data[1], avg_preds.average[target_idx].ravel())
# two feature position
ax = disp.axes_[0, 2]
coutour = disp.contours_[0, 2]
assert coutour.get_cmap().name == "jet"
assert ax.get_xlabel() == diabetes.feature_names[0]
assert ax.get_ylabel() == diabetes.feature_names[2]
@pytest.mark.parametrize(
"kind, centered, subsample, shape",
[
("average", False, None, (1, 3)),
("individual", False, None, (1, 3, 50)),
("both", False, None, (1, 3, 51)),
("individual", False, 20, (1, 3, 20)),
("both", False, 20, (1, 3, 21)),
("individual", False, 0.5, (1, 3, 25)),
("both", False, 0.5, (1, 3, 26)),
("average", True, None, (1, 3)),
("individual", True, None, (1, 3, 50)),
("both", True, None, (1, 3, 51)),
("individual", True, 20, (1, 3, 20)),
("both", True, 20, (1, 3, 21)),
],
)
def test_plot_partial_dependence_kind(
pyplot,
kind,
centered,
subsample,
shape,
clf_diabetes,
diabetes,
):
disp = PartialDependenceDisplay.from_estimator(
clf_diabetes,
diabetes.data,
[0, 1, 2],
kind=kind,
centered=centered,
subsample=subsample,
)
assert disp.axes_.shape == (1, 3)
assert disp.lines_.shape == shape
assert disp.contours_.shape == (1, 3)
assert disp.contours_[0, 0] is None
assert disp.contours_[0, 1] is None
assert disp.contours_[0, 2] is None
if centered:
assert all([ln._y[0] == 0.0 for ln in disp.lines_.ravel() if ln is not None])
else:
assert all([ln._y[0] != 0.0 for ln in disp.lines_.ravel() if ln is not None])
@pytest.mark.parametrize(
"input_type, feature_names_type",
[
("dataframe", None),
("dataframe", "list"),
("list", "list"),
("array", "list"),
("dataframe", "array"),
("list", "array"),
("array", "array"),
("dataframe", "series"),
("list", "series"),
("array", "series"),
("dataframe", "index"),
("list", "index"),
("array", "index"),
],
)
@pytest.mark.parametrize("use_custom_values", [True, False])
def test_plot_partial_dependence_str_features(
pyplot,
use_custom_values,
clf_diabetes,
diabetes,
input_type,
feature_names_type,
):
age = diabetes.data[:, diabetes.feature_names.index("age")]
bmi = diabetes.data[:, diabetes.feature_names.index("bmi")]
if input_type == "dataframe":
pd = pytest.importorskip("pandas")
X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
elif input_type == "list":
X = diabetes.data.tolist()
else:
X = diabetes.data
if feature_names_type is None:
feature_names = None
else:
feature_names = _convert_container(diabetes.feature_names, feature_names_type)
grid_resolution = 25
custom_values = None
if use_custom_values:
custom_values = {
"age": custom_values_helper(age, grid_resolution),
"bmi": custom_values_helper(bmi, grid_resolution),
}
# check with str features and array feature names and single column
disp = PartialDependenceDisplay.from_estimator(
clf_diabetes,
X,
[("age", "bmi"), "bmi"],
grid_resolution=grid_resolution,
feature_names=feature_names,
n_cols=1,
line_kw={"alpha": 0.8},
custom_values=custom_values,
)
fig = pyplot.gcf()
axs = fig.get_axes()
assert len(axs) == 3
assert disp.figure_ is fig
assert disp.axes_.shape == (2, 1)
assert disp.lines_.shape == (2, 1)
assert disp.contours_.shape == (2, 1)
assert disp.deciles_vlines_.shape == (2, 1)
assert disp.deciles_hlines_.shape == (2, 1)
assert disp.lines_[0, 0] is None
assert disp.deciles_vlines_[0, 0] is not None
assert disp.deciles_hlines_[0, 0] is not None
assert disp.contours_[1, 0] is None
assert disp.deciles_hlines_[1, 0] is None
assert disp.deciles_vlines_[1, 0] is not None
# line
ax = disp.axes_[1, 0]
assert ax.get_xlabel() == "bmi"
assert ax.get_ylabel() == "Partial dependence"
line = disp.lines_[1, 0]
avg_preds = disp.pd_results[1]
target_idx = disp.target_idx
assert line.get_alpha() == 0.8
line_data = line.get_data()
assert_allclose(line_data[0], avg_preds["grid_values"][0])
assert_allclose(line_data[1], avg_preds.average[target_idx].ravel())
# contour
ax = disp.axes_[0, 0]
assert ax.get_xlabel() == "age"
assert ax.get_ylabel() == "bmi"
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
@pytest.mark.parametrize("use_custom_values", [True, False])
def test_plot_partial_dependence_custom_axes(
use_custom_values, pyplot, clf_diabetes, diabetes
):
grid_resolution = 25
fig, (ax1, ax2) = pyplot.subplots(1, 2)
age = diabetes.data[:, diabetes.feature_names.index("age")]
bmi = diabetes.data[:, diabetes.feature_names.index("bmi")]
custom_values = None
if use_custom_values:
custom_values = {
"age": custom_values_helper(age, grid_resolution),
"bmi": custom_values_helper(bmi, grid_resolution),
}
disp = PartialDependenceDisplay.from_estimator(
clf_diabetes,
diabetes.data,
["age", ("age", "bmi")],
grid_resolution=grid_resolution,
feature_names=diabetes.feature_names,
ax=[ax1, ax2],
custom_values=custom_values,
)
assert fig is disp.figure_
assert disp.bounding_ax_ is None
assert disp.axes_.shape == (2,)
assert disp.axes_[0] is ax1
assert disp.axes_[1] is ax2
ax = disp.axes_[0]
assert ax.get_xlabel() == "age"
assert ax.get_ylabel() == "Partial dependence"
line = disp.lines_[0]
avg_preds = disp.pd_results[0]
target_idx = disp.target_idx
line_data = line.get_data()
assert_allclose(line_data[0], avg_preds["grid_values"][0])
assert_allclose(line_data[1], avg_preds.average[target_idx].ravel())
# contour
ax = disp.axes_[1]
assert ax.get_xlabel() == "age"
assert ax.get_ylabel() == "bmi"
@pytest.mark.parametrize(
"kind, lines", [("average", 1), ("individual", 50), ("both", 51)]
)
@pytest.mark.parametrize("use_custom_values", [True, False])
def test_plot_partial_dependence_passing_numpy_axes(
pyplot,
clf_diabetes,
diabetes,
use_custom_values,
kind,
lines,
):
grid_resolution = 25
feature_names = diabetes.feature_names
age = diabetes.data[:, diabetes.feature_names.index("age")]
bmi = diabetes.data[:, diabetes.feature_names.index("bmi")]
custom_values = None
if use_custom_values:
custom_values = {
"age": custom_values_helper(age, grid_resolution),
"bmi": custom_values_helper(bmi, grid_resolution),
}
disp1 = PartialDependenceDisplay.from_estimator(
clf_diabetes,
diabetes.data,
["age", "bmi"],
kind=kind,
grid_resolution=grid_resolution,
feature_names=feature_names,
custom_values=custom_values,
)
assert disp1.axes_.shape == (1, 2)
assert disp1.axes_[0, 0].get_ylabel() == "Partial dependence"
assert disp1.axes_[0, 1].get_ylabel() == ""
assert len(disp1.axes_[0, 0].get_lines()) == lines
assert len(disp1.axes_[0, 1].get_lines()) == lines
lr = LinearRegression()
lr.fit(diabetes.data, diabetes.target)
disp2 = PartialDependenceDisplay.from_estimator(
lr,
diabetes.data,
["age", "bmi"],
kind=kind,
grid_resolution=grid_resolution,
feature_names=feature_names,
ax=disp1.axes_,
)
assert np.all(disp1.axes_ == disp2.axes_)
assert len(disp2.axes_[0, 0].get_lines()) == 2 * lines
assert len(disp2.axes_[0, 1].get_lines()) == 2 * lines
@pytest.mark.parametrize("nrows, ncols", [(2, 2), (3, 1)])
@pytest.mark.parametrize("use_custom_values", [True, False])
def test_plot_partial_dependence_incorrent_num_axes(
pyplot,
clf_diabetes,
diabetes,
use_custom_values,
nrows,
ncols,
):
grid_resolution = 5
fig, axes = pyplot.subplots(nrows, ncols)
axes_formats = [list(axes.ravel()), tuple(axes.ravel()), axes]
msg = "Expected ax to have 2 axes, got {}".format(nrows * ncols)
age = diabetes.data[:, diabetes.feature_names.index("age")]
bmi = diabetes.data[:, diabetes.feature_names.index("bmi")]
custom_values = None
if use_custom_values:
custom_values = {
"age": custom_values_helper(age, grid_resolution),
"bmi": custom_values_helper(bmi, grid_resolution),
}
age = diabetes.data[:, diabetes.feature_names.index("age")]
bmi = diabetes.data[:, diabetes.feature_names.index("bmi")]
custom_values = None
if use_custom_values:
custom_values = {
"age": custom_values_helper(age, grid_resolution),
"bmi": custom_values_helper(bmi, grid_resolution),
}
disp = PartialDependenceDisplay.from_estimator(
clf_diabetes,
diabetes.data,
["age", "bmi"],
grid_resolution=grid_resolution,
feature_names=diabetes.feature_names,
custom_values=custom_values,
)
for ax_format in axes_formats:
with pytest.raises(ValueError, match=msg):
PartialDependenceDisplay.from_estimator(
clf_diabetes,
diabetes.data,
["age", "bmi"],
grid_resolution=grid_resolution,
feature_names=diabetes.feature_names,
ax=ax_format,
custom_values=custom_values,
)
# with axes object
with pytest.raises(ValueError, match=msg):
disp.plot(ax=ax_format)
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
@pytest.mark.parametrize("use_custom_values", [True, False])
def test_plot_partial_dependence_with_same_axes(
use_custom_values, pyplot, clf_diabetes, diabetes
):
# The first call to plot_partial_dependence will create two new axes to
# place in the space of the passed in axes, which results in a total of
# three axes in the figure.
# Currently the API does not allow for the second call to
# plot_partial_dependence to use the same axes again, because it will
# create two new axes in the space resulting in five axes. To get the
# expected behavior one needs to pass the generated axes into the second
# call:
# disp1 = plot_partial_dependence(...)
# disp2 = plot_partial_dependence(..., ax=disp1.axes_)
grid_resolution = 25
age = diabetes.data[:, diabetes.feature_names.index("age")]
bmi = diabetes.data[:, diabetes.feature_names.index("bmi")]
custom_values = None
if use_custom_values:
custom_values = {
"age": custom_values_helper(age, grid_resolution),
"bmi": custom_values_helper(bmi, grid_resolution),
}
fig, ax = pyplot.subplots()
PartialDependenceDisplay.from_estimator(
clf_diabetes,
diabetes.data,
["age", "bmi"],
grid_resolution=grid_resolution,
feature_names=diabetes.feature_names,
ax=ax,
custom_values=custom_values,
)
msg = (
"The ax was already used in another plot function, please set "
"ax=display.axes_ instead"
)
with pytest.raises(ValueError, match=msg):
PartialDependenceDisplay.from_estimator(
clf_diabetes,
diabetes.data,
["age", "bmi"],
grid_resolution=grid_resolution,
feature_names=diabetes.feature_names,
custom_values=custom_values,
ax=ax,
)
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
@pytest.mark.parametrize("use_custom_values", [True, False])
def test_plot_partial_dependence_feature_name_reuse(
use_custom_values, pyplot, clf_diabetes, diabetes
):
# second call to plot does not change the feature names from the first
# call
grid_resolution = 10
custom_values = None
if use_custom_values:
custom_values = {
0: custom_values_helper(diabetes.data[:, 0], grid_resolution),
1: custom_values_helper(diabetes.data[:, 1], grid_resolution),
}
feature_names = diabetes.feature_names
disp = PartialDependenceDisplay.from_estimator(
clf_diabetes,
diabetes.data,
[0, 1],
grid_resolution=grid_resolution,
feature_names=feature_names,
custom_values=custom_values,
)
PartialDependenceDisplay.from_estimator(
clf_diabetes,
diabetes.data,
[0, 1],
grid_resolution=grid_resolution,
ax=disp.axes_,
custom_values=custom_values,
)
for i, ax in enumerate(disp.axes_.ravel()):
assert ax.get_xlabel() == feature_names[i]
@pytest.mark.filterwarnings("ignore:A Bunch will be returned")
@pytest.mark.parametrize("use_custom_values", [True, False])
def test_plot_partial_dependence_multiclass(use_custom_values, pyplot):
grid_resolution = 25
clf_int = GradientBoostingClassifier(n_estimators=10, random_state=1)
iris = load_iris()
custom_values = None
if use_custom_values:
custom_values = {
0: custom_values_helper(iris.data[:, 0], grid_resolution),
1: custom_values_helper(iris.data[:, 1], grid_resolution),
}
# Test partial dependence plot function on multi-class input.
clf_int.fit(iris.data, iris.target)
disp_target_0 = PartialDependenceDisplay.from_estimator(
clf_int,
iris.data,
[0, 1],
target=0,
grid_resolution=grid_resolution,
custom_values=custom_values,
)
assert disp_target_0.figure_ is pyplot.gcf()
assert disp_target_0.axes_.shape == (1, 2)
assert disp_target_0.lines_.shape == (1, 2)
assert disp_target_0.contours_.shape == (1, 2)
assert disp_target_0.deciles_vlines_.shape == (1, 2)
assert disp_target_0.deciles_hlines_.shape == (1, 2)
assert all(c is None for c in disp_target_0.contours_.flat)
assert disp_target_0.target_idx == 0
# now with symbol labels
target = iris.target_names[iris.target]
clf_symbol = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf_symbol.fit(iris.data, target)
disp_symbol = PartialDependenceDisplay.from_estimator(
clf_symbol,
iris.data,
[0, 1],
target="setosa",
grid_resolution=grid_resolution,
custom_values=custom_values,
)
assert disp_symbol.figure_ is pyplot.gcf()
assert disp_symbol.axes_.shape == (1, 2)
assert disp_symbol.lines_.shape == (1, 2)
assert disp_symbol.contours_.shape == (1, 2)
assert disp_symbol.deciles_vlines_.shape == (1, 2)
assert disp_symbol.deciles_hlines_.shape == (1, 2)
assert all(c is None for c in disp_symbol.contours_.flat)
assert disp_symbol.target_idx == 0
for int_result, symbol_result in zip(
disp_target_0.pd_results, disp_symbol.pd_results
):
assert_allclose(int_result.average, symbol_result.average)
assert_allclose(int_result["grid_values"], symbol_result["grid_values"])
# check that the pd plots are different for another target
disp_target_1 = PartialDependenceDisplay.from_estimator(
clf_int,
iris.data,
[0, 3],
target=1,
grid_resolution=grid_resolution,
custom_values=custom_values,
)
target_0_data_y = disp_target_0.lines_[0, 0].get_data()[1]
target_1_data_y = disp_target_1.lines_[0, 0].get_data()[1]
assert any(target_0_data_y != target_1_data_y)
multioutput_regression_data = make_regression(n_samples=50, n_targets=2, random_state=0)
@pytest.mark.parametrize("target", [0, 1])
@pytest.mark.parametrize("use_custom_values", [True, False])
def test_plot_partial_dependence_multioutput(use_custom_values, pyplot, target):
# Test partial dependence plot function on multi-output input.
X, y = multioutput_regression_data
clf = LinearRegression().fit(X, y)
grid_resolution = 25
custom_values = None
if use_custom_values:
custom_values = {
0: custom_values_helper(X[:, 0], grid_resolution),
1: custom_values_helper(X[:, 1], grid_resolution),
}
disp = PartialDependenceDisplay.from_estimator(
clf,
X,
[0, 1],
target=target,
grid_resolution=grid_resolution,
custom_values=custom_values,
)
fig = pyplot.gcf()
axs = fig.get_axes()
assert len(axs) == 3
assert disp.target_idx == target
assert disp.bounding_ax_ is not None
positions = [(0, 0), (0, 1)]
expected_label = ["Partial dependence", ""]
for i, pos in enumerate(positions):
ax = disp.axes_[pos]
assert ax.get_ylabel() == expected_label[i]
assert ax.get_xlabel() == f"x{i}"
def test_plot_partial_dependence_dataframe(pyplot, clf_diabetes, diabetes):
pd = pytest.importorskip("pandas")
df = pd.DataFrame(diabetes.data, columns=diabetes.feature_names)
grid_resolution = 25
PartialDependenceDisplay.from_estimator(
clf_diabetes,
df,
["bp", "s1"],
grid_resolution=grid_resolution,
feature_names=df.columns.tolist(),
)
dummy_classification_data = make_classification(random_state=0)
@pytest.mark.parametrize(
"data, params, err_msg",
[
(
multioutput_regression_data,
{"target": None, "features": [0]},
"target must be specified for multi-output",
),
(
multioutput_regression_data,
{"target": -1, "features": [0]},
r"target must be in \[0, n_tasks\]",
),
(
multioutput_regression_data,
{"target": 100, "features": [0]},
r"target must be in \[0, n_tasks\]",
),
(
dummy_classification_data,
{"features": ["foobar"], "feature_names": None},
"Feature 'foobar' not in feature_names",
),
(
dummy_classification_data,
{"features": ["foobar"], "feature_names": ["abcd", "def"]},
"Feature 'foobar' not in feature_names",
),
(
dummy_classification_data,
{"features": [(1, 2, 3)]},
"Each entry in features must be either an int, ",
),
(
dummy_classification_data,
{"features": [1, {}]},
"Each entry in features must be either an int, ",
),
(
dummy_classification_data,
{"features": [tuple()]},
"Each entry in features must be either an int, ",
),
(
dummy_classification_data,
{"features": [123], "feature_names": ["blahblah"]},
"All entries of features must be less than ",
),
(
dummy_classification_data,
{"features": [0, 1, 2], "feature_names": ["a", "b", "a"]},
"feature_names should not contain duplicates",
),
(
dummy_classification_data,
{"features": [1, 2], "kind": ["both"]},
"When `kind` is provided as a list of strings, it should contain",
),
(
dummy_classification_data,
{"features": [1], "subsample": -1},
"When an integer, subsample=-1 should be positive.",
),
(
dummy_classification_data,
{"features": [1], "subsample": 1.2},
r"When a floating-point, subsample=1.2 should be in the \(0, 1\) range",
),
(
dummy_classification_data,
{"features": [1, 2], "categorical_features": [1.0, 2.0]},
"Expected `categorical_features` to be an array-like of boolean,",
),
(
dummy_classification_data,
{"features": [(1, 2)], "categorical_features": [2]},
"Two-way partial dependence plots are not supported for pairs",
),
(
dummy_classification_data,
{"features": [1], "categorical_features": [1], "kind": "individual"},
"It is not possible to display individual effects",
),
],
)
def test_plot_partial_dependence_error(pyplot, data, params, err_msg):
X, y = data
estimator = LinearRegression().fit(X, y)
with pytest.raises(ValueError, match=err_msg):
PartialDependenceDisplay.from_estimator(estimator, X, **params)
@pytest.mark.parametrize(
"params, err_msg",
[
({"target": 4, "features": [0]}, "target not in est.classes_, got 4"),
({"target": None, "features": [0]}, "target must be specified for multi-class"),
(
{"target": 1, "features": [4.5]},
"Each entry in features must be either an int,",
),
],
)
def test_plot_partial_dependence_multiclass_error(pyplot, params, err_msg):
iris = load_iris()
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
with pytest.raises(ValueError, match=err_msg):
PartialDependenceDisplay.from_estimator(clf, iris.data, **params)
def test_plot_partial_dependence_does_not_override_ylabel(
pyplot, clf_diabetes, diabetes
):
# Non-regression test to be sure to not override the ylabel if it has been
# See https://github.com/scikit-learn/scikit-learn/issues/15772
_, axes = pyplot.subplots(1, 2)
axes[0].set_ylabel("Hello world")
PartialDependenceDisplay.from_estimator(
clf_diabetes, diabetes.data, [0, 1], ax=axes
)
assert axes[0].get_ylabel() == "Hello world"
assert axes[1].get_ylabel() == "Partial dependence"
@pytest.mark.parametrize(
"categorical_features, array_type",
[
(["col_A", "col_C"], "dataframe"),
([0, 2], "array"),
([True, False, True], "array"),
],
)
def test_plot_partial_dependence_with_categorical(
pyplot, categorical_features, array_type
):
X = [[1, 1, "A"], [2, 0, "C"], [3, 2, "B"]]
column_name = ["col_A", "col_B", "col_C"]
X = _convert_container(X, array_type, columns_name=column_name)
y = np.array([1.2, 0.5, 0.45]).T
preprocessor = make_column_transformer((OneHotEncoder(), categorical_features))
model = make_pipeline(preprocessor, LinearRegression())
model.fit(X, y)
# single feature
disp = PartialDependenceDisplay.from_estimator(
model,
X,
features=["col_C"],
feature_names=column_name,
categorical_features=categorical_features,
)
assert disp.figure_ is pyplot.gcf()
assert disp.bars_.shape == (1, 1)
assert disp.bars_[0][0] is not None
assert disp.lines_.shape == (1, 1)
assert disp.lines_[0][0] is None
assert disp.contours_.shape == (1, 1)
assert disp.contours_[0][0] is None
assert disp.deciles_vlines_.shape == (1, 1)
assert disp.deciles_vlines_[0][0] is None
assert disp.deciles_hlines_.shape == (1, 1)
assert disp.deciles_hlines_[0][0] is None
assert disp.axes_[0, 0].get_legend() is None
# interaction between two features
disp = PartialDependenceDisplay.from_estimator(
model,
X,
features=[("col_A", "col_C")],
feature_names=column_name,
categorical_features=categorical_features,
)
assert disp.figure_ is pyplot.gcf()
assert disp.bars_.shape == (1, 1)
assert disp.bars_[0][0] is None
assert disp.lines_.shape == (1, 1)
assert disp.lines_[0][0] is None
assert disp.contours_.shape == (1, 1)
assert disp.contours_[0][0] is None
assert disp.deciles_vlines_.shape == (1, 1)
assert disp.deciles_vlines_[0][0] is None
assert disp.deciles_hlines_.shape == (1, 1)
assert disp.deciles_hlines_[0][0] is None
assert disp.axes_[0, 0].get_legend() is None
def test_plot_partial_dependence_legend(pyplot):
pd = pytest.importorskip("pandas")
X = pd.DataFrame(
{
"col_A": ["A", "B", "C"],
"col_B": [1.0, 0.0, 2.0],
"col_C": ["C", "B", "A"],
}
)
y = np.array([1.2, 0.5, 0.45]).T
categorical_features = ["col_A", "col_C"]
preprocessor = make_column_transformer((OneHotEncoder(), categorical_features))
model = make_pipeline(preprocessor, LinearRegression())
model.fit(X, y)
disp = PartialDependenceDisplay.from_estimator(
model,
X,
features=["col_B", "col_C"],
categorical_features=categorical_features,
kind=["both", "average"],
)
legend_text = disp.axes_[0, 0].get_legend().get_texts()
assert len(legend_text) == 1
assert legend_text[0].get_text() == "average"
assert disp.axes_[0, 1].get_legend() is None
@pytest.mark.parametrize(
"kind, expected_shape",
[("average", (1, 2)), ("individual", (1, 2, 20)), ("both", (1, 2, 21))],
)
@pytest.mark.parametrize("use_custom_values", [True, False])
def test_plot_partial_dependence_subsampling(
pyplot,
clf_diabetes,
diabetes,
use_custom_values,
kind,
expected_shape,
):
# check that the subsampling is properly working
# non-regression test for:
# https://github.com/scikit-learn/scikit-learn/pull/18359
matplotlib = pytest.importorskip("matplotlib")
grid_resolution = 25
feature_names = diabetes.feature_names
age = diabetes.data[:, diabetes.feature_names.index("age")]
bmi = diabetes.data[:, diabetes.feature_names.index("bmi")]
custom_values = None
if use_custom_values:
custom_values = {
"age": custom_values_helper(age, grid_resolution),
"bmi": custom_values_helper(bmi, grid_resolution),
}
disp1 = PartialDependenceDisplay.from_estimator(
clf_diabetes,
diabetes.data,
["age", "bmi"],
kind=kind,
grid_resolution=grid_resolution,
feature_names=feature_names,
subsample=20,
random_state=0,
custom_values=custom_values,
)
assert disp1.lines_.shape == expected_shape
assert all(
[isinstance(line, matplotlib.lines.Line2D) for line in disp1.lines_.ravel()]
)
@pytest.mark.parametrize(
"kind, line_kw, label",
[
("individual", {}, None),
("individual", {"label": "xxx"}, None),
("average", {}, None),
("average", {"label": "xxx"}, "xxx"),
("both", {}, "average"),
("both", {"label": "xxx"}, "xxx"),
],
)
def test_partial_dependence_overwrite_labels(
pyplot,
clf_diabetes,
diabetes,
kind,
line_kw,
label,
):
"""Test that make sure that we can overwrite the label of the PDP plot"""
disp = PartialDependenceDisplay.from_estimator(
clf_diabetes,
diabetes.data,
[0, 2],
grid_resolution=25,
feature_names=diabetes.feature_names,
kind=kind,
line_kw=line_kw,
)
for ax in disp.axes_.ravel():
if label is None:
assert ax.get_legend() is None
else:
legend_text = ax.get_legend().get_texts()
assert len(legend_text) == 1
assert legend_text[0].get_text() == label
@pytest.mark.parametrize(
"categorical_features, array_type",
[
(["col_A", "col_C"], "dataframe"),
([0, 2], "array"),
([True, False, True], "array"),
],
)
def test_grid_resolution_with_categorical(pyplot, categorical_features, array_type):
"""Check that we raise a ValueError when the grid_resolution is too small
respect to the number of categories in the categorical features targeted.
"""
X = [["A", 1, "A"], ["B", 0, "C"], ["C", 2, "B"]]
column_name = ["col_A", "col_B", "col_C"]
X = _convert_container(X, array_type, columns_name=column_name)
y = np.array([1.2, 0.5, 0.45]).T
preprocessor = make_column_transformer((OneHotEncoder(), categorical_features))
model = make_pipeline(preprocessor, LinearRegression())
model.fit(X, y)
err_msg = (
"resolution of the computed grid is less than the minimum number of categories"
)
with pytest.raises(ValueError, match=err_msg):
PartialDependenceDisplay.from_estimator(
model,
X,
features=["col_C"],
feature_names=column_name,
categorical_features=categorical_features,
grid_resolution=2,
)
@pytest.mark.parametrize("kind", ["individual", "average", "both"])
@pytest.mark.parametrize("centered", [True, False])
def test_partial_dependence_plot_limits_one_way(
pyplot, clf_diabetes, diabetes, kind, centered
):
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/inspection/_plot/tests/__init__.py | sklearn/inspection/_plot/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/inspection/_plot/tests/test_boundary_decision_display.py | sklearn/inspection/_plot/tests/test_boundary_decision_display.py | import warnings
import numpy as np
import pytest
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.datasets import (
load_diabetes,
load_iris,
make_classification,
make_multilabel_classification,
)
from sklearn.ensemble import IsolationForest
from sklearn.inspection import DecisionBoundaryDisplay
from sklearn.inspection._plot.decision_boundary import _check_boundary_response_method
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import scale
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils._testing import (
_convert_container,
assert_allclose,
assert_array_equal,
)
from sklearn.utils.fixes import parse_version
X, y = make_classification(
n_informative=1,
n_redundant=1,
n_clusters_per_class=1,
n_features=2,
random_state=42,
)
def load_iris_2d_scaled():
X, y = load_iris(return_X_y=True)
X = scale(X)[:, :2]
return X, y
@pytest.fixture(scope="module")
def fitted_clf():
return LogisticRegression().fit(X, y)
def test_input_data_dimension(pyplot):
"""Check that we raise an error when `X` does not have exactly 2 features."""
X, y = make_classification(n_samples=10, n_features=4, random_state=0)
clf = LogisticRegression().fit(X, y)
msg = "n_features must be equal to 2. Got 4 instead."
with pytest.raises(ValueError, match=msg):
DecisionBoundaryDisplay.from_estimator(estimator=clf, X=X)
def test_check_boundary_response_method_error():
"""Check error raised for multi-output multi-class classifiers by
`_check_boundary_response_method`.
"""
class MultiLabelClassifier:
classes_ = [np.array([0, 1]), np.array([0, 1])]
err_msg = "Multi-label and multi-output multi-class classifiers are not supported"
with pytest.raises(ValueError, match=err_msg):
_check_boundary_response_method(MultiLabelClassifier(), "predict")
@pytest.mark.parametrize(
"estimator, response_method, expected_prediction_method",
[
(DecisionTreeRegressor(), "predict", "predict"),
(DecisionTreeRegressor(), "auto", "predict"),
(LogisticRegression().fit(*load_iris_2d_scaled()), "predict", "predict"),
(
LogisticRegression().fit(*load_iris_2d_scaled()),
"auto",
["decision_function", "predict_proba", "predict"],
),
(
LogisticRegression().fit(*load_iris_2d_scaled()),
"predict_proba",
"predict_proba",
),
(
LogisticRegression().fit(*load_iris_2d_scaled()),
"decision_function",
"decision_function",
),
(
LogisticRegression().fit(X, y),
"auto",
["decision_function", "predict_proba", "predict"],
),
(LogisticRegression().fit(X, y), "predict", "predict"),
(
LogisticRegression().fit(X, y),
["predict_proba", "decision_function"],
["predict_proba", "decision_function"],
),
],
)
def test_check_boundary_response_method(
estimator, response_method, expected_prediction_method
):
"""Check the behaviour of `_check_boundary_response_method` for the supported
cases.
"""
prediction_method = _check_boundary_response_method(estimator, response_method)
assert prediction_method == expected_prediction_method
def test_multiclass_predict(pyplot):
"""Check multiclass `response=predict` gives expected results."""
grid_resolution = 10
eps = 1.0
X, y = make_classification(n_classes=3, n_informative=3, random_state=0)
X = X[:, [0, 1]]
lr = LogisticRegression(random_state=0).fit(X, y)
disp = DecisionBoundaryDisplay.from_estimator(
lr, X, response_method="predict", grid_resolution=grid_resolution, eps=1.0
)
x0_min, x0_max = X[:, 0].min() - eps, X[:, 0].max() + eps
x1_min, x1_max = X[:, 1].min() - eps, X[:, 1].max() + eps
xx0, xx1 = np.meshgrid(
np.linspace(x0_min, x0_max, grid_resolution),
np.linspace(x1_min, x1_max, grid_resolution),
)
response = lr.predict(np.c_[xx0.ravel(), xx1.ravel()])
assert_allclose(disp.response, response.reshape(xx0.shape))
assert_allclose(disp.xx0, xx0)
assert_allclose(disp.xx1, xx1)
@pytest.mark.parametrize(
"kwargs, error_msg",
[
(
{"plot_method": "hello_world"},
r"plot_method must be one of contourf, contour, pcolormesh. Got hello_world"
r" instead.",
),
(
{"grid_resolution": 1},
r"grid_resolution must be greater than 1. Got 1 instead",
),
(
{"grid_resolution": -1},
r"grid_resolution must be greater than 1. Got -1 instead",
),
({"eps": -1.1}, r"eps must be greater than or equal to 0. Got -1.1 instead"),
],
)
def test_input_validation_errors(pyplot, kwargs, error_msg, fitted_clf):
"""Check input validation from_estimator."""
with pytest.raises(ValueError, match=error_msg):
DecisionBoundaryDisplay.from_estimator(fitted_clf, X, **kwargs)
@pytest.mark.parametrize(
"kwargs, error_msg",
[
(
{"multiclass_colors": {"dict": "not_list"}},
"'multiclass_colors' must be a list or a str.",
),
({"multiclass_colors": "not_cmap"}, "it must be a valid Matplotlib colormap"),
({"multiclass_colors": ["red", "green"]}, "it must be of the same length"),
(
{"multiclass_colors": ["red", "green", "not color"]},
"it can only contain valid Matplotlib color names",
),
],
)
def test_input_validation_errors_multiclass_colors(pyplot, kwargs, error_msg):
"""Check input validation for `multiclass_colors` in `from_estimator`."""
X, y = load_iris_2d_scaled()
clf = LogisticRegression().fit(X, y)
with pytest.raises(ValueError, match=error_msg):
DecisionBoundaryDisplay.from_estimator(clf, X, **kwargs)
def test_display_plot_input_error(pyplot, fitted_clf):
"""Check input validation for `plot`."""
disp = DecisionBoundaryDisplay.from_estimator(fitted_clf, X, grid_resolution=5)
with pytest.raises(ValueError, match="plot_method must be 'contourf'"):
disp.plot(plot_method="hello_world")
@pytest.mark.parametrize(
"response_method", ["auto", "predict", "predict_proba", "decision_function"]
)
@pytest.mark.parametrize("plot_method", ["contourf", "contour"])
def test_decision_boundary_display_classifier(
pyplot, fitted_clf, response_method, plot_method
):
"""Check that decision boundary is correct."""
fig, ax = pyplot.subplots()
eps = 2.0
disp = DecisionBoundaryDisplay.from_estimator(
fitted_clf,
X,
grid_resolution=5,
response_method=response_method,
plot_method=plot_method,
eps=eps,
ax=ax,
)
assert isinstance(disp.surface_, pyplot.matplotlib.contour.QuadContourSet)
assert disp.ax_ == ax
assert disp.figure_ == fig
x0, x1 = X[:, 0], X[:, 1]
x0_min, x0_max = x0.min() - eps, x0.max() + eps
x1_min, x1_max = x1.min() - eps, x1.max() + eps
assert disp.xx0.min() == pytest.approx(x0_min)
assert disp.xx0.max() == pytest.approx(x0_max)
assert disp.xx1.min() == pytest.approx(x1_min)
assert disp.xx1.max() == pytest.approx(x1_max)
fig2, ax2 = pyplot.subplots()
# change plotting method for second plot
disp.plot(plot_method="pcolormesh", ax=ax2, shading="auto")
assert isinstance(disp.surface_, pyplot.matplotlib.collections.QuadMesh)
assert disp.ax_ == ax2
assert disp.figure_ == fig2
@pytest.mark.parametrize("response_method", ["auto", "predict", "decision_function"])
@pytest.mark.parametrize("plot_method", ["contourf", "contour"])
def test_decision_boundary_display_outlier_detector(
pyplot, response_method, plot_method
):
"""Check that decision boundary is correct for outlier detector."""
fig, ax = pyplot.subplots()
eps = 2.0
outlier_detector = IsolationForest(random_state=0).fit(X, y)
disp = DecisionBoundaryDisplay.from_estimator(
outlier_detector,
X,
grid_resolution=5,
response_method=response_method,
plot_method=plot_method,
eps=eps,
ax=ax,
)
assert isinstance(disp.surface_, pyplot.matplotlib.contour.QuadContourSet)
assert disp.ax_ == ax
assert disp.figure_ == fig
x0, x1 = X[:, 0], X[:, 1]
x0_min, x0_max = x0.min() - eps, x0.max() + eps
x1_min, x1_max = x1.min() - eps, x1.max() + eps
assert disp.xx0.min() == pytest.approx(x0_min)
assert disp.xx0.max() == pytest.approx(x0_max)
assert disp.xx1.min() == pytest.approx(x1_min)
assert disp.xx1.max() == pytest.approx(x1_max)
@pytest.mark.parametrize("response_method", ["auto", "predict"])
@pytest.mark.parametrize("plot_method", ["contourf", "contour"])
def test_decision_boundary_display_regressor(pyplot, response_method, plot_method):
"""Check that we can display the decision boundary for a regressor."""
X, y = load_diabetes(return_X_y=True)
X = X[:, :2]
tree = DecisionTreeRegressor().fit(X, y)
fig, ax = pyplot.subplots()
eps = 2.0
disp = DecisionBoundaryDisplay.from_estimator(
tree,
X,
response_method=response_method,
ax=ax,
eps=eps,
plot_method=plot_method,
)
assert isinstance(disp.surface_, pyplot.matplotlib.contour.QuadContourSet)
assert disp.ax_ == ax
assert disp.figure_ == fig
x0, x1 = X[:, 0], X[:, 1]
x0_min, x0_max = x0.min() - eps, x0.max() + eps
x1_min, x1_max = x1.min() - eps, x1.max() + eps
assert disp.xx0.min() == pytest.approx(x0_min)
assert disp.xx0.max() == pytest.approx(x0_max)
assert disp.xx1.min() == pytest.approx(x1_min)
assert disp.xx1.max() == pytest.approx(x1_max)
fig2, ax2 = pyplot.subplots()
# change plotting method for second plot
disp.plot(plot_method="pcolormesh", ax=ax2, shading="auto")
assert isinstance(disp.surface_, pyplot.matplotlib.collections.QuadMesh)
assert disp.ax_ == ax2
assert disp.figure_ == fig2
@pytest.mark.parametrize(
"response_method, msg",
[
(
"predict_proba",
"MyClassifier has none of the following attributes: predict_proba",
),
(
"decision_function",
"MyClassifier has none of the following attributes: decision_function",
),
(
"auto",
(
"MyClassifier has none of the following attributes: decision_function, "
"predict_proba, predict"
),
),
(
"bad_method",
"MyClassifier has none of the following attributes: bad_method",
),
],
)
def test_error_bad_response(pyplot, response_method, msg):
"""Check errors for bad response."""
class MyClassifier(ClassifierMixin, BaseEstimator):
def fit(self, X, y):
self.fitted_ = True
self.classes_ = [0, 1]
return self
clf = MyClassifier().fit(X, y)
with pytest.raises(AttributeError, match=msg):
DecisionBoundaryDisplay.from_estimator(clf, X, response_method=response_method)
@pytest.mark.parametrize("response_method", ["auto", "predict", "predict_proba"])
def test_multilabel_classifier_error(pyplot, response_method):
"""Check that multilabel classifier raises correct error."""
X, y = make_multilabel_classification(random_state=0)
X = X[:, :2]
tree = DecisionTreeClassifier().fit(X, y)
msg = "Multi-label and multi-output multi-class classifiers are not supported"
with pytest.raises(ValueError, match=msg):
DecisionBoundaryDisplay.from_estimator(
tree,
X,
response_method=response_method,
)
@pytest.mark.parametrize("response_method", ["auto", "predict", "predict_proba"])
def test_multi_output_multi_class_classifier_error(pyplot, response_method):
"""Check that multi-output multi-class classifier raises correct error."""
X = np.asarray([[0, 1], [1, 2]])
y = np.asarray([["tree", "cat"], ["cat", "tree"]])
tree = DecisionTreeClassifier().fit(X, y)
msg = "Multi-label and multi-output multi-class classifiers are not supported"
with pytest.raises(ValueError, match=msg):
DecisionBoundaryDisplay.from_estimator(
tree,
X,
response_method=response_method,
)
def test_multioutput_regressor_error(pyplot):
"""Check that multioutput regressor raises correct error."""
X = np.asarray([[0, 1], [1, 2]])
y = np.asarray([[0, 1], [4, 1]])
tree = DecisionTreeRegressor().fit(X, y)
with pytest.raises(ValueError, match="Multi-output regressors are not supported"):
DecisionBoundaryDisplay.from_estimator(tree, X, response_method="predict")
@pytest.mark.parametrize(
"response_method",
["predict_proba", "decision_function", ["predict_proba", "predict"]],
)
def test_regressor_unsupported_response(pyplot, response_method):
"""Check that we can display the decision boundary for a regressor."""
X, y = load_diabetes(return_X_y=True)
X = X[:, :2]
tree = DecisionTreeRegressor().fit(X, y)
err_msg = "should either be a classifier to be used with response_method"
with pytest.raises(ValueError, match=err_msg):
DecisionBoundaryDisplay.from_estimator(tree, X, response_method=response_method)
@pytest.mark.filterwarnings(
# We expect to raise the following warning because the classifier is fit on a
# NumPy array
"ignore:X has feature names, but LogisticRegression was fitted without"
)
def test_dataframe_labels_used(pyplot, fitted_clf):
"""Check that column names are used for pandas."""
pd = pytest.importorskip("pandas")
df = pd.DataFrame(X, columns=["col_x", "col_y"])
# pandas column names are used by default
_, ax = pyplot.subplots()
disp = DecisionBoundaryDisplay.from_estimator(fitted_clf, df, ax=ax)
assert ax.get_xlabel() == "col_x"
assert ax.get_ylabel() == "col_y"
# second call to plot will have the names
fig, ax = pyplot.subplots()
disp.plot(ax=ax)
assert ax.get_xlabel() == "col_x"
assert ax.get_ylabel() == "col_y"
# axes with a label will not get overridden
fig, ax = pyplot.subplots()
ax.set(xlabel="hello", ylabel="world")
disp.plot(ax=ax)
assert ax.get_xlabel() == "hello"
assert ax.get_ylabel() == "world"
# labels get overridden only if provided to the `plot` method
disp.plot(ax=ax, xlabel="overwritten_x", ylabel="overwritten_y")
assert ax.get_xlabel() == "overwritten_x"
assert ax.get_ylabel() == "overwritten_y"
# labels do not get inferred if provided to `from_estimator`
_, ax = pyplot.subplots()
disp = DecisionBoundaryDisplay.from_estimator(
fitted_clf, df, ax=ax, xlabel="overwritten_x", ylabel="overwritten_y"
)
assert ax.get_xlabel() == "overwritten_x"
assert ax.get_ylabel() == "overwritten_y"
def test_string_target(pyplot):
"""Check that decision boundary works with classifiers trained on string labels."""
iris = load_iris()
X = iris.data[:, [0, 1]]
# Use strings as target
y = iris.target_names[iris.target]
log_reg = LogisticRegression().fit(X, y)
# Does not raise
DecisionBoundaryDisplay.from_estimator(
log_reg,
X,
grid_resolution=5,
response_method="predict",
)
@pytest.mark.parametrize("constructor_name", ["pandas", "polars"])
def test_dataframe_support(pyplot, constructor_name):
"""Check that passing a dataframe at fit and to the Display does not
raise warnings.
Non-regression test for:
* https://github.com/scikit-learn/scikit-learn/issues/23311
* https://github.com/scikit-learn/scikit-learn/issues/28717
"""
df = _convert_container(
X, constructor_name=constructor_name, columns_name=["col_x", "col_y"]
)
estimator = LogisticRegression().fit(df, y)
with warnings.catch_warnings():
# no warnings linked to feature names validation should be raised
warnings.simplefilter("error", UserWarning)
DecisionBoundaryDisplay.from_estimator(estimator, df, response_method="predict")
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
def test_class_of_interest_binary(pyplot, response_method):
"""Check the behaviour of passing `class_of_interest` for plotting the output of
`predict_proba` and `decision_function` in the binary case.
"""
iris = load_iris()
X = iris.data[:100, :2]
y = iris.target[:100]
assert_array_equal(np.unique(y), [0, 1])
estimator = LogisticRegression().fit(X, y)
# We will check that `class_of_interest=None` is equivalent to
# `class_of_interest=estimator.classes_[1]`
disp_default = DecisionBoundaryDisplay.from_estimator(
estimator,
X,
response_method=response_method,
class_of_interest=None,
)
disp_class_1 = DecisionBoundaryDisplay.from_estimator(
estimator,
X,
response_method=response_method,
class_of_interest=estimator.classes_[1],
)
assert_allclose(disp_default.response, disp_class_1.response)
# we can check that `_get_response_values` modifies the response when targeting
# the other class, i.e. 1 - p(y=1|x) for `predict_proba` and -decision_function
# for `decision_function`.
disp_class_0 = DecisionBoundaryDisplay.from_estimator(
estimator,
X,
response_method=response_method,
class_of_interest=estimator.classes_[0],
)
if response_method == "predict_proba":
assert_allclose(disp_default.response, 1 - disp_class_0.response)
else:
assert response_method == "decision_function"
assert_allclose(disp_default.response, -disp_class_0.response)
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
def test_class_of_interest_multiclass(pyplot, response_method):
"""Check the behaviour of passing `class_of_interest` for plotting the output of
`predict_proba` and `decision_function` in the multiclass case.
"""
iris = load_iris()
X = iris.data[:, :2]
y = iris.target # the target are numerical labels
class_of_interest_idx = 2
estimator = LogisticRegression().fit(X, y)
disp = DecisionBoundaryDisplay.from_estimator(
estimator,
X,
response_method=response_method,
class_of_interest=class_of_interest_idx,
)
# we will check that we plot the expected values as response
grid = np.concatenate([disp.xx0.reshape(-1, 1), disp.xx1.reshape(-1, 1)], axis=1)
response = getattr(estimator, response_method)(grid)[:, class_of_interest_idx]
assert_allclose(response.reshape(*disp.response.shape), disp.response)
# make the same test but this time using target as strings
y = iris.target_names[iris.target]
estimator = LogisticRegression().fit(X, y)
disp = DecisionBoundaryDisplay.from_estimator(
estimator,
X,
response_method=response_method,
class_of_interest=iris.target_names[class_of_interest_idx],
)
grid = np.concatenate([disp.xx0.reshape(-1, 1), disp.xx1.reshape(-1, 1)], axis=1)
response = getattr(estimator, response_method)(grid)[:, class_of_interest_idx]
assert_allclose(response.reshape(*disp.response.shape), disp.response)
# check that we raise an error for unknown labels
# this test should already be handled in `_get_response_values` but we can have this
# test here as well
err_msg = "class_of_interest=2 is not a valid label: It should be one of"
with pytest.raises(ValueError, match=err_msg):
DecisionBoundaryDisplay.from_estimator(
estimator,
X,
response_method=response_method,
class_of_interest=class_of_interest_idx,
)
@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"])
def test_multiclass_plot_max_class(pyplot, response_method):
"""Check plot correct when plotting max multiclass class."""
import matplotlib as mpl
# In matplotlib < v3.5, default value of `pcolormesh(shading)` is 'flat', which
# results in the last row and column being dropped. Thus older versions produce
# a 99x99 grid, while newer versions produce a 100x100 grid.
if parse_version(mpl.__version__) < parse_version("3.5"):
pytest.skip("`pcolormesh` in Matplotlib >= 3.5 gives smaller grid size.")
X, y = load_iris_2d_scaled()
clf = LogisticRegression().fit(X, y)
disp = DecisionBoundaryDisplay.from_estimator(
clf,
X,
plot_method="pcolormesh",
response_method=response_method,
)
grid = np.concatenate([disp.xx0.reshape(-1, 1), disp.xx1.reshape(-1, 1)], axis=1)
response = getattr(clf, response_method)(grid).reshape(*disp.response.shape)
assert_allclose(response, disp.response)
assert len(disp.surface_) == len(clf.classes_)
# Get which class has highest response and check it is plotted
highest_class = np.argmax(response, axis=2)
for idx, quadmesh in enumerate(disp.surface_):
# Note quadmesh mask is True (i.e. masked) when `idx` is NOT the highest class
assert_array_equal(
highest_class != idx,
quadmesh.get_array().mask.reshape(*highest_class.shape),
)
@pytest.mark.parametrize(
"multiclass_colors",
[
"plasma",
"Blues",
["red", "green", "blue"],
],
)
@pytest.mark.parametrize("plot_method", ["contourf", "contour", "pcolormesh"])
def test_multiclass_colors_cmap(pyplot, plot_method, multiclass_colors):
"""Check correct cmap used for all `multiclass_colors` inputs."""
import matplotlib as mpl
if parse_version(mpl.__version__) < parse_version("3.5"):
pytest.skip(
"Matplotlib >= 3.5 is needed for `==` to check equivalence of colormaps"
)
X, y = load_iris_2d_scaled()
clf = LogisticRegression().fit(X, y)
disp = DecisionBoundaryDisplay.from_estimator(
clf,
X,
plot_method=plot_method,
multiclass_colors=multiclass_colors,
)
if multiclass_colors == "plasma":
colors = mpl.pyplot.get_cmap(multiclass_colors, len(clf.classes_)).colors
elif multiclass_colors == "Blues":
cmap = mpl.pyplot.get_cmap(multiclass_colors, len(clf.classes_))
colors = cmap(np.linspace(0, 1, len(clf.classes_)))
else:
colors = [mpl.colors.to_rgba(color) for color in multiclass_colors]
if plot_method != "contour":
cmaps = [
mpl.colors.LinearSegmentedColormap.from_list(
f"colormap_{class_idx}", [(1.0, 1.0, 1.0, 1.0), (r, g, b, 1.0)]
)
for class_idx, (r, g, b, _) in enumerate(colors)
]
for idx, quad in enumerate(disp.surface_):
assert quad.cmap == cmaps[idx]
else:
assert_allclose(disp.surface_.colors, colors)
def test_cmap_and_colors_logic(pyplot):
"""Check the handling logic for `cmap` and `colors`."""
X, y = load_iris_2d_scaled()
clf = LogisticRegression().fit(X, y)
with pytest.warns(
UserWarning,
match="'cmap' is ignored in favor of 'multiclass_colors'",
):
DecisionBoundaryDisplay.from_estimator(
clf,
X,
multiclass_colors="plasma",
cmap="Blues",
)
with pytest.warns(
UserWarning,
match="'colors' is ignored in favor of 'multiclass_colors'",
):
DecisionBoundaryDisplay.from_estimator(
clf,
X,
multiclass_colors="plasma",
colors="blue",
)
def test_subclass_named_constructors_return_type_is_subclass(pyplot):
"""Check that named constructors return the correct type when subclassed.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/pull/27675
"""
clf = LogisticRegression().fit(X, y)
class SubclassOfDisplay(DecisionBoundaryDisplay):
pass
curve = SubclassOfDisplay.from_estimator(estimator=clf, X=X)
assert isinstance(curve, SubclassOfDisplay)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/inspection/tests/test_permutation_importance.py | sklearn/inspection/tests/test_permutation_importance.py | import numpy as np
import pytest
from joblib import parallel_backend
from numpy.testing import assert_allclose
from sklearn.compose import ColumnTransformer
from sklearn.datasets import (
load_diabetes,
load_iris,
make_classification,
make_regression,
)
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.impute import SimpleImputer
from sklearn.inspection import permutation_importance
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.metrics import (
get_scorer,
mean_squared_error,
r2_score,
)
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder, StandardScaler, scale
from sklearn.utils._testing import _convert_container
@pytest.mark.parametrize("n_jobs", [1, 2])
@pytest.mark.parametrize("max_samples", [0.5, 1.0])
@pytest.mark.parametrize("sample_weight", [None, "ones"])
def test_permutation_importance_correlated_feature_regression(
n_jobs, max_samples, sample_weight
):
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
X, y = load_diabetes(return_X_y=True)
y_with_little_noise = (y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
X = np.hstack([X, y_with_little_noise])
weights = np.ones_like(y) if sample_weight == "ones" else sample_weight
clf = RandomForestRegressor(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(
clf,
X,
y,
sample_weight=weights,
n_repeats=n_repeats,
random_state=rng,
n_jobs=n_jobs,
max_samples=max_samples,
)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
@pytest.mark.parametrize("n_jobs", [1, 2])
@pytest.mark.parametrize("max_samples", [0.5, 1.0])
def test_permutation_importance_correlated_feature_regression_pandas(
n_jobs, max_samples
):
pd = pytest.importorskip("pandas")
# Make sure that feature highly correlated to the target have a higher
# importance
rng = np.random.RandomState(42)
n_repeats = 5
dataset = load_iris()
X, y = dataset.data, dataset.target
y_with_little_noise = (y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1)
# Adds feature correlated with y as the last column
X = pd.DataFrame(X, columns=dataset.feature_names)
X["correlated_feature"] = y_with_little_noise
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X, y)
result = permutation_importance(
clf,
X,
y,
n_repeats=n_repeats,
random_state=rng,
n_jobs=n_jobs,
max_samples=max_samples,
)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y was added as the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
@pytest.mark.parametrize("n_jobs", [1, 2])
@pytest.mark.parametrize("max_samples", [0.5, 1.0])
def test_robustness_to_high_cardinality_noisy_feature(n_jobs, max_samples, seed=42):
# Permutation variable importance should not be affected by the high
# cardinality bias of traditional feature importances, especially when
# computed on a held-out test set:
rng = np.random.RandomState(seed)
n_repeats = 5
n_samples = 1000
n_classes = 5
n_informative_features = 2
n_noise_features = 1
n_features = n_informative_features + n_noise_features
# Generate a multiclass classification dataset and a set of informative
# binary features that can be used to predict some classes of y exactly
# while leaving some classes unexplained to make the problem harder.
classes = np.arange(n_classes)
y = rng.choice(classes, size=n_samples)
X = np.hstack([(y == c).reshape(-1, 1) for c in classes[:n_informative_features]])
X = X.astype(np.float32)
# Not all target classes are explained by the binary class indicator
# features:
assert n_informative_features < n_classes
# Add 10 other noisy features with high cardinality (numerical) values
# that can be used to overfit the training data.
X = np.concatenate([X, rng.randn(n_samples, n_noise_features)], axis=1)
assert X.shape == (n_samples, n_features)
# Split the dataset to be able to evaluate on a held-out test set. The
# Test size should be large enough for importance measurements to be
# stable:
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=rng
)
clf = RandomForestClassifier(n_estimators=5, random_state=rng)
clf.fit(X_train, y_train)
# Variable importances computed by impurity decrease on the tree node
# splits often use the noisy features in splits. This can give misleading
# impression that high cardinality noisy variables are the most important:
tree_importances = clf.feature_importances_
informative_tree_importances = tree_importances[:n_informative_features]
noisy_tree_importances = tree_importances[n_informative_features:]
assert informative_tree_importances.max() < noisy_tree_importances.min()
# Let's check that permutation-based feature importances do not have this
# problem.
r = permutation_importance(
clf,
X_test,
y_test,
n_repeats=n_repeats,
random_state=rng,
n_jobs=n_jobs,
max_samples=max_samples,
)
assert r.importances.shape == (X.shape[1], n_repeats)
# Split the importances between informative and noisy features
informative_importances = r.importances_mean[:n_informative_features]
noisy_importances = r.importances_mean[n_informative_features:]
# Because we do not have a binary variable explaining each target classes,
# the RF model will have to use the random variable to make some
# (overfitting) splits (as max_depth is not set). Therefore the noisy
# variables will be non-zero but with small values oscillating around
# zero:
assert max(np.abs(noisy_importances)) > 1e-7
assert noisy_importances.max() < 0.05
# The binary features correlated with y should have a higher importance
# than the high cardinality noisy features.
# The maximum test accuracy is 2 / 5 == 0.4, each informative feature
# contributing approximately a bit more than 0.2 of accuracy.
assert informative_importances.min() > 0.15
def test_permutation_importance_mixed_types():
rng = np.random.RandomState(42)
n_repeats = 4
# Last column is correlated with y
X = np.array([[1.0, 2.0, 3.0, np.nan], [2, 1, 2, 1]]).T
y = np.array([0, 1, 0, 1])
clf = make_pipeline(SimpleImputer(), LogisticRegression(solver="lbfgs"))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats, random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
# use another random state
rng = np.random.RandomState(0)
result2 = permutation_importance(clf, X, y, n_repeats=n_repeats, random_state=rng)
assert result2.importances.shape == (X.shape[1], n_repeats)
assert not np.allclose(result.importances, result2.importances)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result2.importances_mean[-1] > result2.importances_mean[:-1])
def test_permutation_importance_mixed_types_pandas():
pd = pytest.importorskip("pandas")
rng = np.random.RandomState(42)
n_repeats = 5
# Last column is correlated with y
X = pd.DataFrame({"col1": [1.0, 2.0, 3.0, np.nan], "col2": ["a", "b", "a", "b"]})
y = np.array([0, 1, 0, 1])
num_preprocess = make_pipeline(SimpleImputer(), StandardScaler())
preprocess = ColumnTransformer(
[("num", num_preprocess, ["col1"]), ("cat", OneHotEncoder(), ["col2"])]
)
clf = make_pipeline(preprocess, LogisticRegression(solver="lbfgs"))
clf.fit(X, y)
result = permutation_importance(clf, X, y, n_repeats=n_repeats, random_state=rng)
assert result.importances.shape == (X.shape[1], n_repeats)
# the correlated feature with y is the last column and should
# have the highest importance
assert np.all(result.importances_mean[-1] > result.importances_mean[:-1])
def test_permutation_importance_linear_regresssion():
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
X = scale(X)
y = scale(y)
lr = LinearRegression().fit(X, y)
# this relationship can be computed in closed form
expected_importances = 2 * lr.coef_**2
results = permutation_importance(
lr, X, y, n_repeats=50, scoring="neg_mean_squared_error"
)
assert_allclose(
expected_importances, results.importances_mean, rtol=1e-1, atol=1e-6
)
@pytest.mark.parametrize("max_samples", [500, 1.0])
def test_permutation_importance_equivalence_sequential_parallel(max_samples):
# regression test to make sure that sequential and parallel calls will
# output the same results.
# Also tests that max_samples equal to number of samples is equivalent to 1.0
X, y = make_regression(n_samples=500, n_features=10, random_state=0)
lr = LinearRegression().fit(X, y)
importance_sequential = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=1, max_samples=max_samples
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_sequential["importances"].min()
imp_max = importance_sequential["importances"].max()
assert imp_max - imp_min > 0.3
# The actually check that parallelism does not impact the results
# either with shared memory (threading) or without isolated memory
# via process-based parallelism using the default backend
# ('loky' or 'multiprocessing') depending on the joblib version:
# process-based parallelism (by default):
importance_processes = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2
)
assert_allclose(
importance_processes["importances"], importance_sequential["importances"]
)
# thread-based parallelism:
with parallel_backend("threading"):
importance_threading = permutation_importance(
lr, X, y, n_repeats=5, random_state=0, n_jobs=2
)
assert_allclose(
importance_threading["importances"], importance_sequential["importances"]
)
@pytest.mark.parametrize("n_jobs", [None, 1, 2])
@pytest.mark.parametrize("max_samples", [0.5, 1.0])
def test_permutation_importance_equivalence_array_dataframe(n_jobs, max_samples):
# This test checks that the column shuffling logic has the same behavior
# both a dataframe and a simple numpy array.
pd = pytest.importorskip("pandas")
# regression test to make sure that sequential and parallel calls will
# output the same results.
X, y = make_regression(n_samples=100, n_features=5, random_state=0)
X_df = pd.DataFrame(X)
# Add a categorical feature that is statistically linked to y:
binner = KBinsDiscretizer(
n_bins=3,
encode="ordinal",
quantile_method="averaged_inverted_cdf",
)
cat_column = binner.fit_transform(y.reshape(-1, 1))
# Concatenate the extra column to the numpy array: integers will be
# cast to float values
X = np.hstack([X, cat_column])
assert X.dtype.kind == "f"
# Insert extra column as a non-numpy-native dtype:
cat_column = pd.Categorical(cat_column.ravel())
new_col_idx = len(X_df.columns)
X_df[new_col_idx] = cat_column
assert X_df[new_col_idx].dtype == cat_column.dtype
# Stich an arbitrary index to the dataframe:
X_df.index = np.arange(len(X_df)).astype(str)
rf = RandomForestRegressor(n_estimators=5, max_depth=3, random_state=0)
rf.fit(X, y)
n_repeats = 3
importance_array = permutation_importance(
rf,
X,
y,
n_repeats=n_repeats,
random_state=0,
n_jobs=n_jobs,
max_samples=max_samples,
)
# First check that the problem is structured enough and that the model is
# complex enough to not yield trivial, constant importances:
imp_min = importance_array["importances"].min()
imp_max = importance_array["importances"].max()
assert imp_max - imp_min > 0.3
# Now check that importances computed on dataframe matche the values
# of those computed on the array with the same data.
importance_dataframe = permutation_importance(
rf,
X_df,
y,
n_repeats=n_repeats,
random_state=0,
n_jobs=n_jobs,
max_samples=max_samples,
)
assert_allclose(
importance_array["importances"], importance_dataframe["importances"]
)
@pytest.mark.parametrize("input_type", ["array", "dataframe"])
def test_permutation_importance_large_memmaped_data(input_type):
# Smoke, non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/15810
n_samples, n_features = int(5e4), 4
X, y = make_classification(
n_samples=n_samples, n_features=n_features, random_state=0
)
assert X.nbytes > 1e6 # trigger joblib memmaping
X = _convert_container(X, input_type)
clf = DummyClassifier(strategy="prior").fit(X, y)
# Actual smoke test: should not raise any error:
n_repeats = 5
r = permutation_importance(clf, X, y, n_repeats=n_repeats, n_jobs=2)
# Auxiliary check: DummyClassifier is feature independent:
# permutating feature should not change the predictions
expected_importances = np.zeros((n_features, n_repeats))
assert_allclose(expected_importances, r.importances)
def test_permutation_importance_sample_weight():
# Creating data with 2 features and 1000 samples, where the target
# variable is a linear combination of the two features, such that
# in half of the samples the impact of feature 1 is twice the impact of
# feature 2, and vice versa on the other half of the samples.
rng = np.random.RandomState(1)
n_samples = 1000
n_features = 2
n_half_samples = n_samples // 2
x = rng.normal(0.0, 0.001, (n_samples, n_features))
y = np.zeros(n_samples)
y[:n_half_samples] = 2 * x[:n_half_samples, 0] + x[:n_half_samples, 1]
y[n_half_samples:] = x[n_half_samples:, 0] + 2 * x[n_half_samples:, 1]
# Fitting linear regression with perfect prediction
lr = LinearRegression(fit_intercept=False)
lr.fit(x, y)
# When all samples are weighted with the same weights, the ratio of
# the two features importance should equal to 1 on expectation (when using
# mean absolutes error as the loss function).
pi = permutation_importance(
lr, x, y, random_state=1, scoring="neg_mean_absolute_error", n_repeats=200
)
x1_x2_imp_ratio_w_none = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w_none == pytest.approx(1, 0.01)
# When passing a vector of ones as the sample_weight, results should be
# the same as in the case that sample_weight=None.
w = np.ones(n_samples)
pi = permutation_importance(
lr,
x,
y,
random_state=1,
scoring="neg_mean_absolute_error",
n_repeats=200,
sample_weight=w,
)
x1_x2_imp_ratio_w_ones = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w_ones == pytest.approx(x1_x2_imp_ratio_w_none, 0.01)
# When the ratio between the weights of the first half of the samples and
# the second half of the samples approaches to infinity, the ratio of
# the two features importance should equal to 2 on expectation (when using
# mean absolutes error as the loss function).
w = np.hstack([np.repeat(10.0**10, n_half_samples), np.repeat(1.0, n_half_samples)])
lr.fit(x, y, w)
pi = permutation_importance(
lr,
x,
y,
random_state=1,
scoring="neg_mean_absolute_error",
n_repeats=200,
sample_weight=w,
)
x1_x2_imp_ratio_w = pi.importances_mean[0] / pi.importances_mean[1]
assert x1_x2_imp_ratio_w / x1_x2_imp_ratio_w_none == pytest.approx(2, 0.01)
def test_permutation_importance_no_weights_scoring_function():
# Creating a scorer function that does not takes sample_weight
def my_scorer(estimator, X, y):
return 1
# Creating some data and estimator for the permutation test
x = np.array([[1, 2], [3, 4]])
y = np.array([1, 2])
w = np.array([1, 1])
lr = LinearRegression()
lr.fit(x, y)
# test that permutation_importance does not return error when
# sample_weight is None
try:
permutation_importance(lr, x, y, random_state=1, scoring=my_scorer, n_repeats=1)
except TypeError:
pytest.fail(
"permutation_test raised an error when using a scorer "
"function that does not accept sample_weight even though "
"sample_weight was None"
)
# test that permutation_importance raise exception when sample_weight is
# not None
with pytest.raises(TypeError):
permutation_importance(
lr, x, y, random_state=1, scoring=my_scorer, n_repeats=1, sample_weight=w
)
@pytest.mark.parametrize(
"list_single_scorer, multi_scorer",
[
(["r2", "neg_mean_squared_error"], ["r2", "neg_mean_squared_error"]),
(
["r2", "neg_mean_squared_error"],
{
"r2": get_scorer("r2"),
"neg_mean_squared_error": get_scorer("neg_mean_squared_error"),
},
),
(
["r2", "neg_mean_squared_error"],
lambda estimator, X, y: {
"r2": r2_score(y, estimator.predict(X)),
"neg_mean_squared_error": -mean_squared_error(y, estimator.predict(X)),
},
),
],
)
def test_permutation_importance_multi_metric(list_single_scorer, multi_scorer):
# Test permutation importance when scoring contains multiple scorers
# Creating some data and estimator for the permutation test
x, y = make_regression(n_samples=500, n_features=10, random_state=0)
lr = LinearRegression().fit(x, y)
multi_importance = permutation_importance(
lr, x, y, random_state=1, scoring=multi_scorer, n_repeats=2
)
assert set(multi_importance.keys()) == set(list_single_scorer)
for scorer in list_single_scorer:
multi_result = multi_importance[scorer]
single_result = permutation_importance(
lr, x, y, random_state=1, scoring=scorer, n_repeats=2
)
assert_allclose(multi_result.importances, single_result.importances)
def test_permutation_importance_max_samples_error():
"""Check that a proper error message is raised when `max_samples` is not
set to a valid input value.
"""
X = np.array([(1.0, 2.0, 3.0, 4.0)]).T
y = np.array([0, 1, 0, 1])
clf = LogisticRegression()
clf.fit(X, y)
err_msg = r"max_samples must be <= n_samples"
with pytest.raises(ValueError, match=err_msg):
permutation_importance(clf, X, y, max_samples=5)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/inspection/tests/test_partial_dependence.py | sklearn/inspection/tests/test_partial_dependence.py | """
Testing for the partial dependence module.
"""
import re
import warnings
import numpy as np
import pytest
import sklearn
from sklearn.base import BaseEstimator, ClassifierMixin, clone, is_regressor
from sklearn.cluster import KMeans
from sklearn.compose import make_column_transformer
from sklearn.datasets import load_iris, make_classification, make_regression
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import (
GradientBoostingClassifier,
GradientBoostingRegressor,
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
RandomForestRegressor,
)
from sklearn.exceptions import NotFittedError
from sklearn.impute import SimpleImputer
from sklearn.inspection import partial_dependence
from sklearn.inspection._partial_dependence import (
_grid_from_X,
_partial_dependence_brute,
_partial_dependence_recursion,
)
from sklearn.linear_model import LinearRegression, LogisticRegression, MultiTaskLasso
from sklearn.metrics import r2_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import (
OneHotEncoder,
PolynomialFeatures,
RobustScaler,
StandardScaler,
scale,
)
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree.tests.test_tree import assert_is_subtree
from sklearn.utils._testing import assert_allclose, assert_array_equal
from sklearn.utils.fixes import _IS_32BIT
from sklearn.utils.validation import check_random_state
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
# (X, y), n_targets <-- as expected in the output of partial_dep()
binary_classification_data = (make_classification(n_samples=50, random_state=0), 1)
multiclass_classification_data = (
make_classification(
n_samples=50, n_classes=3, n_clusters_per_class=1, random_state=0
),
3,
)
regression_data = (make_regression(n_samples=50, random_state=0), 1)
multioutput_regression_data = (
make_regression(n_samples=50, n_targets=2, random_state=0),
2,
)
# iris
iris = load_iris()
@pytest.mark.parametrize(
"Estimator, method, data",
[
(GradientBoostingClassifier, "auto", binary_classification_data),
(GradientBoostingClassifier, "auto", multiclass_classification_data),
(GradientBoostingClassifier, "brute", binary_classification_data),
(GradientBoostingClassifier, "brute", multiclass_classification_data),
(GradientBoostingRegressor, "auto", regression_data),
(GradientBoostingRegressor, "brute", regression_data),
(DecisionTreeRegressor, "brute", regression_data),
(LinearRegression, "brute", regression_data),
(LinearRegression, "brute", multioutput_regression_data),
(LogisticRegression, "brute", binary_classification_data),
(LogisticRegression, "brute", multiclass_classification_data),
(MultiTaskLasso, "brute", multioutput_regression_data),
],
)
@pytest.mark.parametrize("grid_resolution", (5, 10))
@pytest.mark.parametrize("features", ([1], [1, 2]))
@pytest.mark.parametrize("kind", ("average", "individual", "both"))
@pytest.mark.parametrize("use_custom_values", [True, False])
def test_output_shape(
Estimator, method, data, grid_resolution, features, kind, use_custom_values
):
# Check that partial_dependence has consistent output shape for different
# kinds of estimators:
# - classifiers with binary and multiclass settings
# - regressors
# - multi-task regressors
est = Estimator()
if hasattr(est, "n_estimators"):
est.set_params(n_estimators=2) # speed-up computations
# n_target corresponds to the number of classes (1 for binary classif) or
# the number of tasks / outputs in multi task settings. It's equal to 1 for
# classical regression_data.
(X, y), n_targets = data
n_instances = X.shape[0]
custom_values = None
if use_custom_values:
grid_resolution = 5
custom_values = {f: X[:grid_resolution, f] for f in features}
est.fit(X, y)
result = partial_dependence(
est,
X=X,
features=features,
method=method,
kind=kind,
grid_resolution=grid_resolution,
custom_values=custom_values,
)
pdp, axes = result, result["grid_values"]
expected_pdp_shape = (n_targets, *[grid_resolution for _ in range(len(features))])
expected_ice_shape = (
n_targets,
n_instances,
*[grid_resolution for _ in range(len(features))],
)
if kind == "average":
assert pdp.average.shape == expected_pdp_shape
elif kind == "individual":
assert pdp.individual.shape == expected_ice_shape
else: # 'both'
assert pdp.average.shape == expected_pdp_shape
assert pdp.individual.shape == expected_ice_shape
expected_axes_shape = (len(features), grid_resolution)
assert axes is not None
assert np.asarray(axes).shape == expected_axes_shape
def test_grid_from_X():
# tests for _grid_from_X: sanity check for output, and for shapes.
# Make sure that the grid is a cartesian product of the input (it will use
# the unique values instead of the percentiles)
percentiles = (0.05, 0.95)
grid_resolution = 100
is_categorical = [False, False]
X = np.asarray([[1, 2], [3, 4]])
grid, axes = _grid_from_X(X, percentiles, is_categorical, grid_resolution, {})
assert_array_equal(grid, [[1, 2], [1, 4], [3, 2], [3, 4]])
assert_array_equal(axes, X.T)
# test shapes of returned objects depending on the number of unique values
# for a feature.
rng = np.random.RandomState(0)
grid_resolution = 15
# n_unique_values > grid_resolution
X = rng.normal(size=(20, 2))
grid, axes = _grid_from_X(
X,
percentiles,
is_categorical,
grid_resolution=grid_resolution,
custom_values={},
)
assert grid.shape == (grid_resolution * grid_resolution, X.shape[1])
assert np.asarray(axes).shape == (2, grid_resolution)
assert grid.dtype == X.dtype
# n_unique_values < grid_resolution, will use actual values
n_unique_values = 12
X[n_unique_values - 1 :, 0] = 12345
rng.shuffle(X) # just to make sure the order is irrelevant
grid, axes = _grid_from_X(
X,
percentiles,
is_categorical,
grid_resolution=grid_resolution,
custom_values={},
)
assert grid.shape == (n_unique_values * grid_resolution, X.shape[1])
# axes is a list of arrays of different shapes
assert axes[0].shape == (n_unique_values,)
assert axes[1].shape == (grid_resolution,)
assert grid.dtype == X.dtype
# Check that uses custom_range
X = rng.normal(size=(20, 2))
X[n_unique_values - 1 :, 0] = 12345
col_1_range = [0, 2, 3]
grid, axes = _grid_from_X(
X,
percentiles,
is_categorical=is_categorical,
grid_resolution=grid_resolution,
custom_values={1: col_1_range},
)
assert grid.shape == (n_unique_values * len(col_1_range), X.shape[1])
# axes is a list of arrays of different shapes
assert axes[0].shape == (n_unique_values,)
assert axes[1].shape == (len(col_1_range),)
assert grid.dtype == X.dtype
# Check that grid_resolution does not impact custom_range
X = rng.normal(size=(20, 2))
col_0_range = [0, 2, 3, 4, 5, 6]
grid_resolution = 5
grid, axes = _grid_from_X(
X,
percentiles,
is_categorical=is_categorical,
grid_resolution=grid_resolution,
custom_values={0: col_0_range},
)
assert grid.shape == (grid_resolution * len(col_0_range), X.shape[1])
# axes is a list of arrays of different shapes
assert axes[0].shape == (len(col_0_range),)
assert axes[1].shape == (grid_resolution,)
assert grid.dtype == np.result_type(X, np.asarray(col_0_range).dtype)
X = np.array([[0, "a"], [1, "b"], [2, "c"]])
grid, axes = _grid_from_X(
X,
percentiles,
is_categorical=is_categorical,
grid_resolution=grid_resolution,
custom_values={1: ["a", "b", "c"]},
)
assert grid.dtype == object
@pytest.mark.parametrize(
"grid_resolution",
[
2, # since n_categories > 2, we should not use quantiles resampling
100,
],
)
def test_grid_from_X_with_categorical(grid_resolution):
"""Check that `_grid_from_X` always sample from categories and does not
depend from the percentiles.
"""
pd = pytest.importorskip("pandas")
percentiles = (0.05, 0.95)
is_categorical = [True]
X = pd.DataFrame({"cat_feature": ["A", "B", "C", "A", "B", "D", "E"]})
grid, axes = _grid_from_X(
X,
percentiles,
is_categorical,
grid_resolution=grid_resolution,
custom_values={},
)
assert grid.shape == (5, X.shape[1])
assert axes[0].shape == (5,)
@pytest.mark.parametrize("grid_resolution", [3, 100])
def test_grid_from_X_heterogeneous_type(grid_resolution):
"""Check that `_grid_from_X` always sample from categories and does not
depend from the percentiles.
"""
pd = pytest.importorskip("pandas")
percentiles = (0.05, 0.95)
is_categorical = [True, False]
X = pd.DataFrame(
{
"cat": ["A", "B", "C", "A", "B", "D", "E", "A", "B", "D"],
"num": [1, 1, 1, 2, 5, 6, 6, 6, 6, 8],
}
)
nunique = X.nunique()
grid, axes = _grid_from_X(
X,
percentiles,
is_categorical,
grid_resolution=grid_resolution,
custom_values={},
)
if grid_resolution == 3:
assert grid.shape == (15, 2)
assert axes[0].shape[0] == nunique["num"]
assert axes[1].shape[0] == grid_resolution
else:
assert grid.shape == (25, 2)
assert axes[0].shape[0] == nunique["cat"]
assert axes[1].shape[0] == nunique["cat"]
@pytest.mark.parametrize(
"grid_resolution, percentiles, err_msg",
[
(2, (0, 0.0001), "percentiles are too close"),
(100, (1, 2, 3, 4), "'percentiles' must be a sequence of 2 elements"),
(100, 12345, "'percentiles' must be a sequence of 2 elements"),
(100, (-1, 0.95), r"'percentiles' values must be in \[0, 1\]"),
(100, (0.05, 2), r"'percentiles' values must be in \[0, 1\]"),
(100, (0.9, 0.1), r"percentiles\[0\] must be strictly less than"),
(1, (0.05, 0.95), "'grid_resolution' must be strictly greater than 1"),
],
)
def test_grid_from_X_error(grid_resolution, percentiles, err_msg):
X = np.asarray([[1, 2], [3, 4]])
is_categorical = [False]
with pytest.raises(ValueError, match=err_msg):
_grid_from_X(X, percentiles, is_categorical, grid_resolution, custom_values={})
@pytest.mark.parametrize("target_feature", range(5))
@pytest.mark.parametrize(
"est, method",
[
(LinearRegression(), "brute"),
(GradientBoostingRegressor(random_state=0), "brute"),
(GradientBoostingRegressor(random_state=0), "recursion"),
(HistGradientBoostingRegressor(random_state=0), "brute"),
(HistGradientBoostingRegressor(random_state=0), "recursion"),
],
)
def test_partial_dependence_helpers(est, method, target_feature):
# Check that what is returned by _partial_dependence_brute or
# _partial_dependence_recursion is equivalent to manually setting a target
# feature to a given value, and computing the average prediction over all
# samples.
# This also checks that the brute and recursion methods give the same
# output.
# Note that even on the trainset, the brute and the recursion methods
# aren't always strictly equivalent, in particular when the slow method
# generates unrealistic samples that have low mass in the joint
# distribution of the input features, and when some of the features are
# dependent. Hence the high tolerance on the checks.
X, y = make_regression(random_state=0, n_features=5, n_informative=5)
# The 'init' estimator for GBDT (here the average prediction) isn't taken
# into account with the recursion method, for technical reasons. We set
# the mean to 0 to that this 'bug' doesn't have any effect.
y = y - y.mean()
# Clone is necessary to make the test thread-safe.
est = clone(est).fit(X, y)
# target feature will be set to .5 and then to 123
features = np.array([target_feature], dtype=np.intp)
grid = np.array([[0.5], [123]])
if method == "brute":
pdp, predictions = _partial_dependence_brute(
est, grid, features, X, response_method="auto"
)
else:
pdp = _partial_dependence_recursion(est, grid, features)
mean_predictions = []
for val in (0.5, 123):
X_ = X.copy()
X_[:, target_feature] = val
mean_predictions.append(est.predict(X_).mean())
pdp = pdp[0] # (shape is (1, 2) so make it (2,))
# allow for greater margin for error with recursion method
rtol = 1e-1 if method == "recursion" else 1e-3
assert np.allclose(pdp, mean_predictions, rtol=rtol)
@pytest.mark.parametrize("seed", range(1))
def test_recursion_decision_tree_vs_forest_and_gbdt(seed):
# Make sure that the recursion method gives the same results on a
# DecisionTreeRegressor and a GradientBoostingRegressor or a
# RandomForestRegressor with 1 tree and equivalent parameters.
rng = np.random.RandomState(seed)
# Purely random dataset to avoid correlated features
n_samples = 1000
n_features = 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples) * 10
# The 'init' estimator for GBDT (here the average prediction) isn't taken
# into account with the recursion method, for technical reasons. We set
# the mean to 0 to that this 'bug' doesn't have any effect.
y = y - y.mean()
# set max_depth not too high to avoid splits with same gain but different
# features
max_depth = 5
tree_seed = 0
forest = RandomForestRegressor(
n_estimators=1,
max_features=None,
bootstrap=False,
max_depth=max_depth,
random_state=tree_seed,
)
# The forest will use ensemble.base._set_random_states to set the
# random_state of the tree sub-estimator. We simulate this here to have
# equivalent estimators.
equiv_random_state = check_random_state(tree_seed).randint(np.iinfo(np.int32).max)
gbdt = GradientBoostingRegressor(
n_estimators=1,
learning_rate=1,
criterion="squared_error",
max_depth=max_depth,
random_state=equiv_random_state,
)
tree = DecisionTreeRegressor(max_depth=max_depth, random_state=equiv_random_state)
forest.fit(X, y)
gbdt.fit(X, y)
tree.fit(X, y)
# sanity check: if the trees aren't the same, the PD values won't be equal
try:
assert_is_subtree(tree.tree_, gbdt[0, 0].tree_)
assert_is_subtree(tree.tree_, forest[0].tree_)
except AssertionError:
# For some reason the trees aren't exactly equal on 32bits, so the PDs
# cannot be equal either. See
# https://github.com/scikit-learn/scikit-learn/issues/8853
assert _IS_32BIT, "this should only fail on 32 bit platforms"
return
grid = rng.randn(50).reshape(-1, 1)
for f in range(n_features):
features = np.array([f], dtype=np.intp)
pdp_forest = _partial_dependence_recursion(forest, grid, features)
pdp_gbdt = _partial_dependence_recursion(gbdt, grid, features)
pdp_tree = _partial_dependence_recursion(tree, grid, features)
np.testing.assert_allclose(pdp_gbdt, pdp_tree)
np.testing.assert_allclose(pdp_forest, pdp_tree)
@pytest.mark.parametrize(
"est",
(
GradientBoostingClassifier(random_state=0),
HistGradientBoostingClassifier(random_state=0),
),
)
@pytest.mark.parametrize("target_feature", (0, 1, 2, 3, 4, 5))
def test_recursion_decision_function(est, target_feature):
# Make sure the recursion method (implicitly uses decision_function) has
# the same result as using brute method with
# response_method=decision_function
X, y = make_classification(n_classes=2, n_clusters_per_class=1, random_state=1)
assert np.mean(y) == 0.5 # make sure the init estimator predicts 0 anyway
est = clone(est).fit(X, y)
preds_1 = partial_dependence(
est,
X,
[target_feature],
response_method="decision_function",
method="recursion",
kind="average",
)
preds_2 = partial_dependence(
est,
X,
[target_feature],
response_method="decision_function",
method="brute",
kind="average",
)
assert_allclose(preds_1["average"], preds_2["average"], atol=1e-7)
@pytest.mark.parametrize(
"est",
(
LinearRegression(),
GradientBoostingRegressor(random_state=0),
HistGradientBoostingRegressor(
random_state=0, min_samples_leaf=1, max_leaf_nodes=None, max_iter=1
),
DecisionTreeRegressor(random_state=0),
),
)
@pytest.mark.parametrize("power", (1, 2))
def test_partial_dependence_easy_target(est, power):
# If the target y only depends on one feature in an obvious way (linear or
# quadratic) then the partial dependence for that feature should reflect
# it.
# We here fit a linear regression_data model (with polynomial features if
# needed) and compute r_squared to check that the partial dependence
# correctly reflects the target.
rng = np.random.RandomState(0)
n_samples = 200
target_variable = 2
X = rng.normal(size=(n_samples, 5))
y = X[:, target_variable] ** power
est = clone(est).fit(X, y)
pdp = partial_dependence(
est, features=[target_variable], X=X, grid_resolution=1000, kind="average"
)
new_X = pdp["grid_values"][0].reshape(-1, 1)
new_y = pdp["average"][0]
# add polynomial features if needed
new_X = PolynomialFeatures(degree=power).fit_transform(new_X)
lr = LinearRegression().fit(new_X, new_y)
r2 = r2_score(new_y, lr.predict(new_X))
assert r2 > 0.99
@pytest.mark.parametrize(
"Estimator",
(
sklearn.tree.DecisionTreeClassifier,
sklearn.tree.ExtraTreeClassifier,
sklearn.ensemble.ExtraTreesClassifier,
sklearn.neighbors.KNeighborsClassifier,
sklearn.neighbors.RadiusNeighborsClassifier,
sklearn.ensemble.RandomForestClassifier,
),
)
def test_multiclass_multioutput(Estimator):
# Make sure error is raised for multiclass-multioutput classifiers
# make multiclass-multioutput dataset
X, y = make_classification(n_classes=3, n_clusters_per_class=1, random_state=0)
y = np.array([y, y]).T
est = Estimator()
est.fit(X, y)
with pytest.raises(
ValueError, match="Multiclass-multioutput estimators are not supported"
):
partial_dependence(est, X, [0])
class NoPredictProbaNoDecisionFunction(ClassifierMixin, BaseEstimator):
def fit(self, X, y):
# simulate that we have some classes
self.classes_ = [0, 1]
return self
@pytest.mark.parametrize(
"estimator, params, err_msg",
[
(
KMeans(random_state=0, n_init="auto"),
{"features": [0]},
"'estimator' must be a fitted regressor or classifier",
),
(
LinearRegression(),
{"features": [0], "response_method": "predict_proba"},
"The response_method parameter is ignored for regressors",
),
(
GradientBoostingClassifier(random_state=0),
{
"features": [0],
"response_method": "predict_proba",
"method": "recursion",
},
"'recursion' method, the response_method must be 'decision_function'",
),
(
GradientBoostingClassifier(random_state=0),
{"features": [0], "response_method": "predict_proba", "method": "auto"},
"'recursion' method, the response_method must be 'decision_function'",
),
(
LinearRegression(),
{"features": [0], "method": "recursion", "kind": "individual"},
"The 'recursion' method only applies when 'kind' is set to 'average'",
),
(
LinearRegression(),
{"features": [0], "method": "recursion", "kind": "both"},
"The 'recursion' method only applies when 'kind' is set to 'average'",
),
(
LinearRegression(),
{"features": [0], "method": "recursion"},
"Only the following estimators support the 'recursion' method:",
),
(
LinearRegression(),
{"features": [0, 1], "custom_values": {0: [1, 2, 3], 1: np.ones((3, 3))}},
(
"The custom grid for some features is not a one-dimensional array. "
"Feature 1: 2 dimensions"
),
),
],
)
def test_partial_dependence_error(estimator, params, err_msg):
X, y = make_classification(random_state=0)
estimator = clone(estimator).fit(X, y)
with pytest.raises(ValueError, match=err_msg):
partial_dependence(estimator, X, **params)
@pytest.mark.parametrize(
"estimator", [LinearRegression(), GradientBoostingClassifier(random_state=0)]
)
@pytest.mark.parametrize("features", [-1, 10000])
def test_partial_dependence_unknown_feature_indices(estimator, features):
X, y = make_classification(random_state=0)
estimator = clone(estimator).fit(X, y)
err_msg = "all features must be in"
with pytest.raises(ValueError, match=err_msg):
partial_dependence(estimator, X, [features])
@pytest.mark.parametrize(
"estimator", [LinearRegression(), GradientBoostingClassifier(random_state=0)]
)
def test_partial_dependence_unknown_feature_string(estimator):
pd = pytest.importorskip("pandas")
X, y = make_classification(random_state=0)
df = pd.DataFrame(X)
estimator = clone(estimator).fit(df, y)
features = ["random"]
err_msg = "A given column is not a column of the dataframe"
with pytest.raises(ValueError, match=err_msg):
partial_dependence(estimator, df, features)
@pytest.mark.parametrize(
"estimator", [LinearRegression(), GradientBoostingClassifier(random_state=0)]
)
def test_partial_dependence_X_list(estimator):
# check that array-like objects are accepted
X, y = make_classification(random_state=0)
estimator = clone(estimator).fit(X, y)
partial_dependence(estimator, list(X), [0], kind="average")
def test_warning_recursion_non_constant_init():
# make sure that passing a non-constant init parameter to a GBDT and using
# recursion method yields a warning.
gbc = GradientBoostingClassifier(init=DummyClassifier(), random_state=0)
gbc.fit(X, y)
with pytest.warns(
UserWarning, match="Using recursion method with a non-constant init predictor"
):
partial_dependence(gbc, X, [0], method="recursion", kind="average")
with pytest.warns(
UserWarning, match="Using recursion method with a non-constant init predictor"
):
partial_dependence(gbc, X, [0], method="recursion", kind="average")
def test_partial_dependence_sample_weight_of_fitted_estimator():
# Test near perfect correlation between partial dependence and diagonal
# when sample weights emphasize y = x predictions
# non-regression test for #13193
# TODO: extend to HistGradientBoosting once sample_weight is supported
N = 1000
rng = np.random.RandomState(123456)
mask = rng.randint(2, size=N, dtype=bool)
x = rng.rand(N)
# set y = x on mask and y = -x outside
y = x.copy()
y[~mask] = -y[~mask]
X = np.c_[mask, x]
# sample weights to emphasize data points where y = x
sample_weight = np.ones(N)
sample_weight[mask] = 1000.0
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(X, y, sample_weight=sample_weight)
pdp = partial_dependence(clf, X, features=[1], kind="average")
assert np.corrcoef(pdp["average"], pdp["grid_values"])[0, 1] > 0.99
def test_hist_gbdt_sw_not_supported():
# TODO: remove/fix when PDP supports HGBT with sample weights
clf = HistGradientBoostingRegressor(random_state=1)
clf.fit(X, y, sample_weight=np.ones(len(X)))
with pytest.raises(
NotImplementedError, match="does not support partial dependence"
):
partial_dependence(clf, X, features=[1])
def test_partial_dependence_pipeline():
# check that the partial dependence support pipeline
iris = load_iris()
scaler = StandardScaler()
clf = DummyClassifier(random_state=42)
pipe = make_pipeline(scaler, clf)
clf.fit(scaler.fit_transform(iris.data), iris.target)
pipe.fit(iris.data, iris.target)
features = 0
pdp_pipe = partial_dependence(
pipe, iris.data, features=[features], grid_resolution=10, kind="average"
)
pdp_clf = partial_dependence(
clf,
scaler.transform(iris.data),
features=[features],
grid_resolution=10,
kind="average",
)
assert_allclose(pdp_pipe["average"], pdp_clf["average"])
assert_allclose(
pdp_pipe["grid_values"][0],
pdp_clf["grid_values"][0] * scaler.scale_[features] + scaler.mean_[features],
)
@pytest.mark.parametrize(
"features, grid_resolution, n_vals_expected",
[
(["a"], 10, 10),
(["a"], 2, 2),
],
)
def test_partial_dependence_binary_model_grid_resolution(
features, grid_resolution, n_vals_expected
):
pd = pytest.importorskip("pandas")
model = DummyClassifier()
rng = np.random.RandomState(0)
X = pd.DataFrame(
{
"a": rng.randint(0, 10, size=100).astype(np.float64),
"b": rng.randint(0, 10, size=100).astype(np.float64),
}
)
y = pd.Series(rng.randint(0, 2, size=100))
model.fit(X, y)
part_dep = partial_dependence(
model,
X,
features=features,
grid_resolution=grid_resolution,
kind="average",
)
assert part_dep["average"].size == n_vals_expected
@pytest.mark.parametrize(
"features, custom_values, n_vals_expected",
[
(["a"], {"a": [1.0, 2.0, 3.0, 4.0]}, 4),
(["a"], {"a": [1.0, 2.0]}, 2),
(["a"], {"a": [1.0]}, 1),
],
)
def test_partial_dependence_binary_model_custom_values(
features, custom_values, n_vals_expected
):
pd = pytest.importorskip("pandas")
model = DummyClassifier()
X = pd.DataFrame({"a": [1.0, 2.0, 3.0, 4.0], "b": [6.0, 7.0, 8.0, 9.0]})
y = pd.Series([0, 1, 0, 1])
model.fit(X, y)
part_dep = partial_dependence(
model,
X,
features=features,
grid_resolution=3,
custom_values=custom_values,
kind="average",
)
assert part_dep["average"].size == n_vals_expected
@pytest.mark.parametrize(
"features, custom_values, n_vals_expected",
[
(["b"], {"b": ["a", "b"]}, 2),
(["b"], {"b": ["a"]}, 1),
(["a", "b"], {"a": [1.0, 2.0], "b": ["a", "b"]}, 4),
],
)
def test_partial_dependence_pipeline_custom_values(
features, custom_values, n_vals_expected
):
pd = pytest.importorskip("pandas")
pl = make_pipeline(
SimpleImputer(strategy="most_frequent"), OneHotEncoder(), DummyClassifier()
)
X = pd.DataFrame({"a": [1.0, 2.0, 3.0, 4.0], "b": ["a", "b", "a", "b"]})
y = pd.Series([0, 1, 0, 1])
pl.fit(X, y)
X_holdout = pd.DataFrame({"a": [1.0, 2.0, 3.0, 4.0], "b": ["a", "b", "a", None]})
part_dep = partial_dependence(
pl,
X_holdout,
features=features,
grid_resolution=3,
custom_values=custom_values,
kind="average",
)
assert part_dep["average"].size == n_vals_expected
@pytest.mark.parametrize(
"estimator",
[
LogisticRegression(max_iter=1000, random_state=0),
GradientBoostingClassifier(random_state=0, n_estimators=5),
],
ids=["estimator-brute", "estimator-recursion"],
)
@pytest.mark.parametrize(
"preprocessor",
[
None,
make_column_transformer(
(StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),
(RobustScaler(), [iris.feature_names[i] for i in (1, 3)]),
),
make_column_transformer(
(StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),
remainder="passthrough",
),
],
ids=["None", "column-transformer", "column-transformer-passthrough"],
)
@pytest.mark.parametrize(
"features",
[[0, 2], [iris.feature_names[i] for i in (0, 2)]],
ids=["features-integer", "features-string"],
)
def test_partial_dependence_dataframe(estimator, preprocessor, features):
# check that the partial dependence support dataframe and pipeline
# including a column transformer
pd = pytest.importorskip("pandas")
df = pd.DataFrame(scale(iris.data), columns=iris.feature_names)
pipe = make_pipeline(preprocessor, clone(estimator))
pipe.fit(df, iris.target)
pdp_pipe = partial_dependence(
pipe, df, features=features, grid_resolution=10, kind="average"
)
# the column transformer will reorder the column when transforming
# we mixed the index to be sure that we are computing the partial
# dependence of the right columns
if preprocessor is not None:
X_proc = clone(preprocessor).fit_transform(df)
features_clf = [0, 1]
else:
X_proc = df
features_clf = [0, 2]
clf = clone(estimator).fit(X_proc, iris.target)
pdp_clf = partial_dependence(
clf,
X_proc,
features=features_clf,
method="brute",
grid_resolution=10,
kind="average",
)
assert_allclose(pdp_pipe["average"], pdp_clf["average"])
if preprocessor is not None:
scaler = preprocessor.named_transformers_["standardscaler"]
assert_allclose(
pdp_pipe["grid_values"][1],
pdp_clf["grid_values"][1] * scaler.scale_[1] + scaler.mean_[1],
)
else:
assert_allclose(pdp_pipe["grid_values"][1], pdp_clf["grid_values"][1])
@pytest.mark.parametrize(
"features, custom_values, expected_pd_shape",
[
(0, None, (3, 10)),
(0, {0: [1.0, 2.0, 3.0]}, (3, 3)),
(iris.feature_names[0], None, (3, 10)),
(iris.feature_names[0], {iris.feature_names[0]: np.array([1.0, 2.0])}, (3, 2)),
([0, 2], None, (3, 10, 10)),
([0, 2], {2: [7, 8, 9, 10]}, (3, 10, 4)),
([iris.feature_names[i] for i in (0, 2)], None, (3, 10, 10)),
(
[iris.feature_names[i] for i in (0, 2)],
{iris.feature_names[2]: [1, 2, 3, 10]},
(3, 10, 4),
),
([iris.feature_names[i] for i in (0, 2)], {2: [1, 2, 3, 10]}, (3, 10, 10)),
(
[iris.feature_names[i] for i in (0, 2, 3)],
{iris.feature_names[2]: [1, 10]},
(3, 10, 2, 10),
),
([True, False, True, False], None, (3, 10, 10)),
],
ids=[
"scalar-int",
"scalar-int-custom-values",
"scalar-str",
"scalar-str-custom-values",
"list-int",
"list-int-custom-values",
"list-str",
"list-str-custom-values",
"list-str-custom-values-incorrect",
"list-str-three-features",
"mask",
],
)
def test_partial_dependence_feature_type(features, custom_values, expected_pd_shape):
# check all possible features type supported in PDP
pd = pytest.importorskip("pandas")
df = pd.DataFrame(iris.data, columns=iris.feature_names)
preprocessor = make_column_transformer(
(StandardScaler(), [iris.feature_names[i] for i in (0, 2)]),
(RobustScaler(), [iris.feature_names[i] for i in (1, 3)]),
)
pipe = make_pipeline(
preprocessor, LogisticRegression(max_iter=1000, random_state=0)
)
pipe.fit(df, iris.target)
pdp_pipe = partial_dependence(
pipe,
df,
features=features,
grid_resolution=10,
kind="average",
custom_values=custom_values,
)
assert pdp_pipe["average"].shape == expected_pd_shape
assert len(pdp_pipe["grid_values"]) == len(pdp_pipe["average"].shape) - 1
@pytest.mark.parametrize(
"estimator",
[
LinearRegression(),
LogisticRegression(),
GradientBoostingRegressor(),
GradientBoostingClassifier(),
],
)
def test_partial_dependence_unfitted(estimator):
X = iris.data
preprocessor = make_column_transformer(
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/inspection/tests/__init__.py | sklearn/inspection/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/inspection/tests/test_pd_utils.py | sklearn/inspection/tests/test_pd_utils.py | import numpy as np
import pytest
from sklearn.inspection._pd_utils import _check_feature_names, _get_feature_index
from sklearn.utils._testing import _convert_container
@pytest.mark.parametrize(
"feature_names, array_type, expected_feature_names",
[
(None, "array", ["x0", "x1", "x2"]),
(None, "dataframe", ["a", "b", "c"]),
(np.array(["a", "b", "c"]), "array", ["a", "b", "c"]),
],
)
def test_check_feature_names(feature_names, array_type, expected_feature_names):
X = np.random.randn(10, 3)
column_names = ["a", "b", "c"]
X = _convert_container(X, constructor_name=array_type, columns_name=column_names)
feature_names_validated = _check_feature_names(X, feature_names)
assert feature_names_validated == expected_feature_names
def test_check_feature_names_error():
X = np.random.randn(10, 3)
feature_names = ["a", "b", "c", "a"]
msg = "feature_names should not contain duplicates."
with pytest.raises(ValueError, match=msg):
_check_feature_names(X, feature_names)
@pytest.mark.parametrize("fx, idx", [(0, 0), (1, 1), ("a", 0), ("b", 1), ("c", 2)])
def test_get_feature_index(fx, idx):
feature_names = ["a", "b", "c"]
assert _get_feature_index(fx, feature_names) == idx
@pytest.mark.parametrize(
"fx, feature_names, err_msg",
[
("a", None, "Cannot plot partial dependence for feature 'a'"),
("d", ["a", "b", "c"], "Feature 'd' not in feature_names"),
],
)
def test_get_feature_names_error(fx, feature_names, err_msg):
with pytest.raises(ValueError, match=err_msg):
_get_feature_index(fx, feature_names)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/_dbscan.py | sklearn/cluster/_dbscan.py | """
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from numbers import Integral, Real
import numpy as np
from scipy import sparse
from sklearn.base import BaseEstimator, ClusterMixin, _fit_context
from sklearn.cluster._dbscan_inner import dbscan_inner
from sklearn.metrics.pairwise import _VALID_METRICS
from sklearn.neighbors import NearestNeighbors
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
from sklearn.utils.validation import _check_sample_weight, validate_data
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"sample_weight": ["array-like", None],
},
prefer_skip_nested_validation=False,
)
def dbscan(
X,
eps=0.5,
*,
min_samples=5,
metric="minkowski",
metric_params=None,
algorithm="auto",
leaf_size=30,
p=2,
sample_weight=None,
n_jobs=None,
):
"""Perform DBSCAN clustering from vector array or distance matrix.
This function is a wrapper around :class:`~cluster.DBSCAN`, suitable for
quick, standalone clustering tasks. For estimator-based workflows, where
estimator attributes or pipeline integration is required, prefer
:class:`~cluster.DBSCAN`.
DBSCAN (Density-Based Spatial Clustering of Applications with Noise) is a
density-based clustering algorithm that groups together points that are
closely packed while marking points in low-density regions as outliers.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : {array-like, scipy sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``. When using precomputed distances, X must
be a square symmetric matrix.
eps : float, default=0.5
The maximum distance between two samples for one to be considered
as in the neighborhood of the other. This is not a maximum bound
on the distances of points within a cluster. This is the most
important DBSCAN parameter to choose appropriately for your data set
and distance function. Smaller values result in more clusters,
while larger values result in fewer, larger clusters.
min_samples : int, default=5
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
Higher values yield fewer, denser clusters, while lower values yield
more, sparser clusters.
metric : str or callable, default='minkowski'
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by :func:`sklearn.metrics.pairwise_distances` for
its metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit.
X may be a :term:`sparse graph <sparse graph>`,
in which case only "nonzero" elements may be considered neighbors.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
See :class:`~sklearn.neighbors.NearestNeighbors` documentation for
details.
leaf_size : int, default=30
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem. Generally, smaller leaf sizes
lead to faster queries but slower construction.
p : float, default=2
Power parameter for the Minkowski metric. When p = 1, this is equivalent
to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2.
For arbitrary p, minkowski_distance (l_p) is used. This parameter is expected
to be positive.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search. ``None`` means
1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means
using all processors. See :term:`Glossary <n_jobs>` for more details.
If precomputed distances are used, parallel execution is not available
and thus n_jobs will have no effect.
Returns
-------
core_samples : ndarray of shape (n_core_samples,)
Indices of core samples.
labels : ndarray of shape (n_samples,)
Cluster labels for each point. Noisy samples are given the label -1.
Non-negative integers indicate cluster membership.
See Also
--------
DBSCAN : An estimator interface for this clustering algorithm.
OPTICS : A similar estimator interface clustering at multiple values of
eps. Our implementation is optimized for memory usage.
Notes
-----
For an example, see :ref:`sphx_glr_auto_examples_cluster_plot_dbscan.py`.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n). It may attract a higher
memory complexity when querying these nearest neighborhoods, depending
on the ``algorithm``.
One way to avoid the query complexity is to pre-compute sparse
neighborhoods in chunks using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with
``mode='distance'``, then using ``metric='precomputed'`` here.
Another way to reduce memory and computation time is to remove
(near-)duplicate points and use ``sample_weight`` instead.
:class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower
memory usage.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise"
<https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_.
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017).
:doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN."
<10.1145/3068335>`
ACM Transactions on Database Systems (TODS), 42(3), 19.
Examples
--------
>>> from sklearn.cluster import dbscan
>>> X = [[1, 2], [2, 2], [2, 3], [8, 7], [8, 8], [25, 80]]
>>> core_samples, labels = dbscan(X, eps=3, min_samples=2)
>>> core_samples
array([0, 1, 2, 3, 4])
>>> labels
array([ 0, 0, 0, 1, 1, -1])
"""
est = DBSCAN(
eps=eps,
min_samples=min_samples,
metric=metric,
metric_params=metric_params,
algorithm=algorithm,
leaf_size=leaf_size,
p=p,
n_jobs=n_jobs,
)
est.fit(X, sample_weight=sample_weight)
return est.core_sample_indices_, est.labels_
class DBSCAN(ClusterMixin, BaseEstimator):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
This algorithm is particularly good for data which contains clusters of
similar density and can find clusters of arbitrary shape.
Unlike K-means, DBSCAN does not require specifying the number of clusters
in advance and can identify outliers as noise points.
This implementation has a worst case memory complexity of :math:`O({n}^2)`,
which can occur when the `eps` param is large and `min_samples` is low,
while the original DBSCAN only uses linear memory.
For further details, see the Notes below.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, default=0.5
The maximum distance between two samples for one to be considered
as in the neighborhood of the other. This is not a maximum bound
on the distances of points within a cluster. This is the most
important DBSCAN parameter to choose appropriately for your data set
and distance function. Smaller values generally lead to more clusters.
min_samples : int, default=5
The number of samples (or total weight) in a neighborhood for a point to
be considered as a core point. This includes the point itself. If
`min_samples` is set to a higher value, DBSCAN will find denser clusters,
whereas if it is set to a lower value, the found clusters will be more
sparse.
metric : str, or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by :func:`sklearn.metrics.pairwise_distances` for
its metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a :term:`sparse graph`, in which
case only "nonzero" elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
See :class:`~sklearn.neighbors.NearestNeighbors` documentation for
details.
leaf_size : int, default=30
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, default=None
The power of the Minkowski metric to be used to calculate distance
between points. If None, then ``p=2`` (equivalent to the Euclidean
distance). When p=1, this is equivalent to Manhattan distance.
n_jobs : int, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
core_sample_indices_ : ndarray of shape (n_core_samples,)
Indices of core samples.
components_ : ndarray of shape (n_core_samples, n_features)
Copy of each core sample found by training.
labels_ : ndarray of shape (n_samples,)
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1. Non-negative integers
indicate cluster membership.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
OPTICS : A similar clustering at multiple values of eps. Our implementation
is optimized for memory usage.
Notes
-----
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n). It may attract a higher
memory complexity when querying these nearest neighborhoods, depending
on the ``algorithm``.
One way to avoid the query complexity is to pre-compute sparse
neighborhoods in chunks using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with
``mode='distance'``, then using ``metric='precomputed'`` here.
Another way to reduce memory and computation time is to remove
(near-)duplicate points and use ``sample_weight`` instead.
:class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower memory
usage.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise"
<https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_.
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017).
:doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN."
<10.1145/3068335>`
ACM Transactions on Database Systems (TODS), 42(3), 19.
Examples
--------
>>> from sklearn.cluster import DBSCAN
>>> import numpy as np
>>> X = np.array([[1, 2], [2, 2], [2, 3],
... [8, 7], [8, 8], [25, 80]])
>>> clustering = DBSCAN(eps=3, min_samples=2).fit(X)
>>> clustering.labels_
array([ 0, 0, 0, 1, 1, -1])
>>> clustering
DBSCAN(eps=3, min_samples=2)
For an example, see
:ref:`sphx_glr_auto_examples_cluster_plot_dbscan.py`.
For a comparison of DBSCAN with other clustering algorithms, see
:ref:`sphx_glr_auto_examples_cluster_plot_cluster_comparison.py`
"""
_parameter_constraints: dict = {
"eps": [Interval(Real, 0.0, None, closed="neither")],
"min_samples": [Interval(Integral, 1, None, closed="left")],
"metric": [
StrOptions(set(_VALID_METRICS) | {"precomputed"}),
callable,
],
"metric_params": [dict, None],
"algorithm": [StrOptions({"auto", "ball_tree", "kd_tree", "brute"})],
"leaf_size": [Interval(Integral, 1, None, closed="left")],
"p": [Interval(Real, 0.0, None, closed="left"), None],
"n_jobs": [Integral, None],
}
def __init__(
self,
eps=0.5,
*,
min_samples=5,
metric="euclidean",
metric_params=None,
algorithm="auto",
leaf_size=30,
p=None,
n_jobs=None,
):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.metric_params = metric_params
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
self.n_jobs = n_jobs
@_fit_context(
# DBSCAN.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features, or distance matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
(n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``. If a sparse matrix is provided, it will
be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with a
negative weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
self : object
Returns a fitted instance of self.
"""
X = validate_data(self, X, accept_sparse="csr")
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
# Calculate neighborhood for all samples. This leaves the original
# point in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if self.metric == "precomputed" and sparse.issparse(X):
# set the diagonal to explicit values, as a point is its own
# neighbor
X = X.copy() # copy to avoid in-place modification
with warnings.catch_warnings():
warnings.simplefilter("ignore", sparse.SparseEfficiencyWarning)
X.setdiag(X.diagonal())
neighbors_model = NearestNeighbors(
radius=self.eps,
algorithm=self.algorithm,
leaf_size=self.leaf_size,
metric=self.metric,
metric_params=self.metric_params,
p=self.p,
n_jobs=self.n_jobs,
)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods])
else:
n_neighbors = np.array(
[np.sum(sample_weight[neighbors]) for neighbors in neighborhoods]
)
# Initially, all samples are noise.
labels = np.full(X.shape[0], -1, dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= self.min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
self.core_sample_indices_ = np.where(core_samples)[0]
self.labels_ = labels
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Compute clusters from a data or distance matrix and predict labels.
This method fits the model and returns the cluster labels in a single step.
It is equivalent to calling fit(X).labels_.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
(n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``. If a sparse matrix is provided, it will
be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with a
negative weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels. Noisy samples are given the label -1.
Non-negative integers indicate cluster membership.
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.pairwise = self.metric == "precomputed"
tags.input_tags.sparse = True
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/_mean_shift.py | sklearn/cluster/_mean_shift.py | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from collections import defaultdict
from numbers import Integral, Real
import numpy as np
from sklearn._config import config_context
from sklearn.base import BaseEstimator, ClusterMixin, _fit_context
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import check_array, check_random_state, gen_batches
from sklearn.utils._param_validation import Interval, validate_params
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import check_is_fitted, validate_data
@validate_params(
{
"X": ["array-like"],
"quantile": [Interval(Real, 0, 1, closed="both")],
"n_samples": [Interval(Integral, 1, None, closed="left"), None],
"random_state": ["random_state"],
"n_jobs": [Integral, None],
},
prefer_skip_nested_validation=True,
)
def estimate_bandwidth(X, *, quantile=0.3, n_samples=None, random_state=0, n_jobs=None):
"""Estimate the bandwidth to use with the mean-shift algorithm.
This function takes time at least quadratic in `n_samples`. For large
datasets, it is wise to subsample by setting `n_samples`. Alternatively,
the parameter `bandwidth` can be set to a small value without estimating
it.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input points.
quantile : float, default=0.3
Should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, default=None
The number of samples to use. If not given, all samples are used.
random_state : int, RandomState instance, default=None
The generator used to randomly select the samples from input points
for bandwidth estimation. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
bandwidth : float
The bandwidth parameter.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import estimate_bandwidth
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> estimate_bandwidth(X, quantile=0.5)
np.float64(1.61)
"""
X = check_array(X)
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
n_neighbors = int(X.shape[0] * quantile)
if n_neighbors < 1: # cannot fit NearestNeighbors with n_neighbors = 0
n_neighbors = 1
nbrs = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=n_jobs)
nbrs.fit(X)
bandwidth = 0.0
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()["radius"]
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth, return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (
np.linalg.norm(my_mean - my_old_mean) <= stop_thresh
or completed_iterations == max_iter
):
break
completed_iterations += 1
return tuple(my_mean), len(points_within), completed_iterations
@validate_params(
{"X": ["array-like"]},
prefer_skip_nested_validation=False,
)
def mean_shift(
X,
*,
bandwidth=None,
seeds=None,
bin_seeding=False,
min_bin_freq=1,
cluster_all=True,
max_iter=300,
n_jobs=None,
):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
bandwidth : float, default=None
Kernel bandwidth. If not None, must be in the range [0, +inf).
If None, the bandwidth is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like of shape (n_seeds, n_features) or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : bool, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : bool, default=True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default=300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int, default=None
The number of jobs to use for the computation. The following tasks benefit
from the parallelization:
- The search of nearest neighbors for bandwidth estimation and label
assignments. See the details in the docstring of the
``NearestNeighbors`` class.
- Hill-climbing optimization for all seeds.
See :term:`Glossary <n_jobs>` for more details.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : ndarray of shape (n_clusters, n_features)
Coordinates of cluster centers.
labels : ndarray of shape (n_samples,)
Cluster labels for each point.
Notes
-----
For a usage example, see
:ref:`sphx_glr_auto_examples_cluster_plot_mean_shift.py`.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import mean_shift
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> cluster_centers, labels = mean_shift(X, bandwidth=2)
>>> cluster_centers
array([[3.33, 6. ],
[1.33, 0.66]])
>>> labels
array([1, 1, 1, 0, 0, 0])
"""
model = MeanShift(
bandwidth=bandwidth,
seeds=seeds,
min_bin_freq=min_bin_freq,
bin_seeding=bin_seeding,
cluster_all=cluster_all,
n_jobs=n_jobs,
max_iter=max_iter,
).fit(X)
return model.cluster_centers_, model.labels_
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Find seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : int, default=1
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like of shape (n_samples, n_features)
Points used as initial kernel positions in clustering.mean_shift.
"""
if bin_size == 0:
return X
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array(
[point for point, freq in bin_sizes.items() if freq >= min_bin_freq],
dtype=np.float32,
)
if len(bin_seeds) == len(X):
warnings.warn(
"Binning data failed with provided bin_size=%f, using data points as seeds."
% bin_size
)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(ClusterMixin, BaseEstimator):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
For an example of how to use MeanShift clustering, refer to:
:ref:`sphx_glr_auto_examples_cluster_plot_mean_shift.py`.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, default=None
Bandwidth used in the flat kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array-like of shape (n_samples, n_features), default=None
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : bool, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
The default value is False.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : bool, default=True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int, default=None
The number of jobs to use for the computation. The following tasks benefit
from the parallelization:
- The search of nearest neighbors for bandwidth estimation and label
assignments. See the details in the docstring of the
``NearestNeighbors`` class.
- Hill-climbing optimization for all seeds.
See :term:`Glossary <n_jobs>` for more details.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
max_iter : int, default=300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
.. versionadded:: 0.22
Attributes
----------
cluster_centers_ : ndarray of shape (n_clusters, n_features)
Coordinates of cluster centers.
labels_ : ndarray of shape (n_samples,)
Labels of each point.
n_iter_ : int
Maximum number of iterations performed on each seed.
.. versionadded:: 0.22
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
KMeans : K-Means clustering.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will tend
towards O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
Examples
--------
>>> from sklearn.cluster import MeanShift
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = MeanShift(bandwidth=2).fit(X)
>>> clustering.labels_
array([1, 1, 1, 0, 0, 0])
>>> clustering.predict([[0, 0], [5, 5]])
array([1, 0])
>>> clustering
MeanShift(bandwidth=2)
For a comparison of Mean Shift clustering with other clustering algorithms, see
:ref:`sphx_glr_auto_examples_cluster_plot_cluster_comparison.py`
"""
_parameter_constraints: dict = {
"bandwidth": [Interval(Real, 0, None, closed="neither"), None],
"seeds": ["array-like", None],
"bin_seeding": ["boolean"],
"min_bin_freq": [Interval(Integral, 1, None, closed="left")],
"cluster_all": ["boolean"],
"n_jobs": [Integral, None],
"max_iter": [Interval(Integral, 0, None, closed="left")],
}
def __init__(
self,
*,
bandwidth=None,
seeds=None,
bin_seeding=False,
min_bin_freq=1,
cluster_all=True,
n_jobs=None,
max_iter=300,
):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
self.max_iter = max_iter
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Perform clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples to cluster.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted instance.
"""
X = validate_data(self, X)
bandwidth = self.bandwidth
if bandwidth is None:
bandwidth = estimate_bandwidth(X, n_jobs=self.n_jobs)
seeds = self.seeds
if seeds is None:
if self.bin_seeding:
seeds = get_bin_seeds(X, bandwidth, self.min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
# We use n_jobs=1 because this will be used in nested calls under
# parallel calls to _mean_shift_single_seed so there is no need for
# for further parallelism.
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=1).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=self.n_jobs)(
delayed(_mean_shift_single_seed)(seed, X, nbrs, self.max_iter)
for seed in seeds
)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i][1]: # i.e. len(points_within) > 0
center_intensity_dict[all_res[i][0]] = all_res[i][1]
self.n_iter_ = max([x[2] for x in all_res])
if not center_intensity_dict:
# nothing near seeds
raise ValueError(
"No point was within bandwidth=%f of any seed. Try a different seeding"
" strategy or increase the bandwidth."
% bandwidth
)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(
center_intensity_dict.items(),
key=lambda tup: (tup[1], tup[0]),
reverse=True,
)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=bool)
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=self.n_jobs).fit(
sorted_centers
)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center], return_distance=False)[
0
]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=int)
distances, idxs = nbrs.kneighbors(X)
if self.cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
self.cluster_centers_, self.labels_ = cluster_centers, labels
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : array-like of shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
with config_context(assume_finite=True):
return pairwise_distances_argmin(X, self.cluster_centers_)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/_affinity_propagation.py | sklearn/cluster/_affinity_propagation.py | """Affinity Propagation clustering algorithm."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from numbers import Integral, Real
import numpy as np
from sklearn._config import config_context
from sklearn.base import BaseEstimator, ClusterMixin, _fit_context
from sklearn.exceptions import ConvergenceWarning
from sklearn.metrics import euclidean_distances, pairwise_distances_argmin
from sklearn.utils import check_random_state
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
from sklearn.utils.validation import check_is_fitted, validate_data
def _equal_similarities_and_preferences(S, preference):
def all_equal_preferences():
return np.all(preference == preference.flat[0])
def all_equal_similarities():
# Create mask to ignore diagonal of S
mask = np.ones(S.shape, dtype=bool)
np.fill_diagonal(mask, 0)
return np.all(S[mask].flat == S[mask].flat[0])
return all_equal_preferences() and all_equal_similarities()
def _affinity_propagation(
S,
*,
preference,
convergence_iter,
max_iter,
damping,
verbose,
return_n_iter,
random_state,
):
"""Main affinity propagation algorithm."""
n_samples = S.shape[0]
if n_samples == 1 or _equal_similarities_and_preferences(S, preference):
# It makes no sense to run the algorithm in this case, so return 1 or
# n_samples clusters, depending on preferences
warnings.warn(
"All samples have mutually equal similarities. "
"Returning arbitrary cluster center(s)."
)
if preference.flat[0] > S.flat[n_samples - 1]:
return (
(np.arange(n_samples), np.arange(n_samples), 0)
if return_n_iter
else (np.arange(n_samples), np.arange(n_samples))
)
else:
return (
(np.array([0]), np.array([0] * n_samples), 0)
if return_n_iter
else (np.array([0]), np.array([0] * n_samples))
)
# Place preference on the diagonal of S
S.flat[:: (n_samples + 1)] = preference
A = np.zeros((n_samples, n_samples))
R = np.zeros((n_samples, n_samples)) # Initialize messages
# Intermediate results
tmp = np.zeros((n_samples, n_samples))
# Remove degeneracies
S += (
np.finfo(S.dtype).eps * S + np.finfo(S.dtype).tiny * 100
) * random_state.standard_normal(size=(n_samples, n_samples))
# Execute parallel affinity propagation updates
e = np.zeros((n_samples, convergence_iter))
ind = np.arange(n_samples)
for it in range(max_iter):
# tmp = A + S; compute responsibilities
np.add(A, S, tmp)
I = np.argmax(tmp, axis=1)
Y = tmp[ind, I] # np.max(A + S, axis=1)
tmp[ind, I] = -np.inf
Y2 = np.max(tmp, axis=1)
# tmp = Rnew
np.subtract(S, Y[:, None], tmp)
tmp[ind, I] = S[ind, I] - Y2
# Damping
tmp *= 1 - damping
R *= damping
R += tmp
# tmp = Rp; compute availabilities
np.maximum(R, 0, out=tmp)
tmp.flat[:: n_samples + 1] = R.flat[:: n_samples + 1]
# tmp = -Anew
tmp -= np.sum(tmp, axis=0)
dA = np.diag(tmp).copy()
tmp.clip(0, np.inf, tmp)
tmp.flat[:: n_samples + 1] = dA
# Damping
tmp *= 1 - damping
A *= damping
A -= tmp
# Check for convergence
E = (np.diag(A) + np.diag(R)) > 0
e[:, it % convergence_iter] = E
K = np.sum(E, axis=0)
if it >= convergence_iter:
se = np.sum(e, axis=1)
unconverged = np.sum((se == convergence_iter) + (se == 0)) != n_samples
if (not unconverged and (K > 0)) or (it == max_iter):
never_converged = False
if verbose:
print("Converged after %d iterations." % it)
break
else:
never_converged = True
if verbose:
print("Did not converge")
I = np.flatnonzero(E)
K = I.size # Identify exemplars
if K > 0:
if never_converged:
warnings.warn(
(
"Affinity propagation did not converge, this model "
"may return degenerate cluster centers and labels."
),
ConvergenceWarning,
)
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K) # Identify clusters
# Refine the final set of exemplars and clusters and return results
for k in range(K):
ii = np.asarray(c == k).nonzero()[0]
j = np.argmax(np.sum(S[ii[:, np.newaxis], ii], axis=0))
I[k] = ii[j]
c = np.argmax(S[:, I], axis=1)
c[I] = np.arange(K)
labels = I[c]
# Reduce labels to a sorted, gapless, list
cluster_centers_indices = np.unique(labels)
labels = np.searchsorted(cluster_centers_indices, labels)
else:
warnings.warn(
(
"Affinity propagation did not converge and this model "
"will not have any cluster centers."
),
ConvergenceWarning,
)
labels = np.array([-1] * n_samples)
cluster_centers_indices = []
if return_n_iter:
return cluster_centers_indices, labels, it + 1
else:
return cluster_centers_indices, labels
###############################################################################
# Public API
@validate_params(
{
"S": ["array-like"],
"return_n_iter": ["boolean"],
},
prefer_skip_nested_validation=False,
)
def affinity_propagation(
S,
*,
preference=None,
convergence_iter=15,
max_iter=200,
damping=0.5,
copy=True,
verbose=False,
return_n_iter=False,
random_state=None,
):
"""Perform Affinity Propagation Clustering of data.
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
S : array-like of shape (n_samples, n_samples)
Matrix of similarities between points.
preference : array-like of shape (n_samples,) or float, default=None
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number of
exemplars, i.e. of clusters, is influenced by the input preferences
value. If the preferences are not passed as arguments, they will be
set to the median of the input similarities (resulting in a moderate
number of clusters). For a smaller amount of clusters, this can be set
to the minimum value of the similarities.
convergence_iter : int, default=15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
max_iter : int, default=200
Maximum number of iterations.
damping : float, default=0.5
Damping factor between 0.5 and 1.
copy : bool, default=True
If copy is False, the affinity matrix is modified inplace by the
algorithm, for memory efficiency.
verbose : bool, default=False
The verbosity level.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the starting state.
Use an int for reproducible results across function calls.
See the :term:`Glossary <random_state>`.
.. versionadded:: 0.23
this parameter was previously hardcoded as 0.
Returns
-------
cluster_centers_indices : ndarray of shape (n_clusters,)
Index of clusters centers.
labels : ndarray of shape (n_samples,)
Cluster labels for each point.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
Notes
-----
For an example usage,
see :ref:`sphx_glr_auto_examples_cluster_plot_affinity_propagation.py`.
You may also check out,
:ref:`sphx_glr_auto_examples_applications_plot_stock_market.py`
When the algorithm does not converge, it will still return an array of
``cluster_center_indices`` and labels if there are any exemplars/clusters,
however they may be degenerate and should be used with caution.
When all training samples have equal similarities and equal preferences,
the assignment of cluster centers and labels depends on the preference.
If the preference is smaller than the similarities, a single cluster center
and label ``0`` for every sample will be returned. Otherwise, every
training sample becomes its own cluster center and is assigned a unique
label.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import affinity_propagation
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> S = -euclidean_distances(X, squared=True)
>>> cluster_centers_indices, labels = affinity_propagation(S, random_state=0)
>>> cluster_centers_indices
array([0, 3])
>>> labels
array([0, 0, 0, 1, 1, 1])
"""
estimator = AffinityPropagation(
damping=damping,
max_iter=max_iter,
convergence_iter=convergence_iter,
copy=copy,
preference=preference,
affinity="precomputed",
verbose=verbose,
random_state=random_state,
).fit(S)
if return_n_iter:
return estimator.cluster_centers_indices_, estimator.labels_, estimator.n_iter_
return estimator.cluster_centers_indices_, estimator.labels_
class AffinityPropagation(ClusterMixin, BaseEstimator):
"""Perform Affinity Propagation Clustering of data.
Read more in the :ref:`User Guide <affinity_propagation>`.
Parameters
----------
damping : float, default=0.5
Damping factor in the range `[0.5, 1.0)` is the extent to
which the current value is maintained relative to
incoming values (weighted 1 - damping). This in order
to avoid numerical oscillations when updating these
values (messages).
max_iter : int, default=200
Maximum number of iterations.
convergence_iter : int, default=15
Number of iterations with no change in the number
of estimated clusters that stops the convergence.
copy : bool, default=True
Make a copy of input data.
preference : array-like of shape (n_samples,) or float, default=None
Preferences for each point - points with larger values of
preferences are more likely to be chosen as exemplars. The number
of exemplars, ie of clusters, is influenced by the input
preferences value. If the preferences are not passed as arguments,
they will be set to the median of the input similarities.
affinity : {'euclidean', 'precomputed'}, default='euclidean'
Which affinity to use. At the moment 'precomputed' and
``euclidean`` are supported. 'euclidean' uses the
negative squared euclidean distance between points.
verbose : bool, default=False
Whether to be verbose.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the starting state.
Use an int for reproducible results across function calls.
See the :term:`Glossary <random_state>`.
.. versionadded:: 0.23
this parameter was previously hardcoded as 0.
Attributes
----------
cluster_centers_indices_ : ndarray of shape (n_clusters,)
Indices of cluster centers.
cluster_centers_ : ndarray of shape (n_clusters, n_features)
Cluster centers (if affinity != ``precomputed``).
labels_ : ndarray of shape (n_samples,)
Labels of each point.
affinity_matrix_ : ndarray of shape (n_samples, n_samples)
Stores the affinity matrix used in ``fit``.
n_iter_ : int
Number of iterations taken to converge.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AgglomerativeClustering : Recursively merges the pair of
clusters that minimally increases a given linkage distance.
FeatureAgglomeration : Similar to AgglomerativeClustering,
but recursively merges features instead of samples.
KMeans : K-Means clustering.
MiniBatchKMeans : Mini-Batch K-Means clustering.
MeanShift : Mean shift clustering using a flat kernel.
SpectralClustering : Apply clustering to a projection
of the normalized Laplacian.
Notes
-----
The algorithmic complexity of affinity propagation is quadratic
in the number of points.
When the algorithm does not converge, it will still return an array of
``cluster_center_indices`` and labels if there are any exemplars/clusters,
however they may be degenerate and should be used with caution.
When ``fit`` does not converge, ``cluster_centers_`` is still populated
however it may be degenerate. In such a case, proceed with caution.
If ``fit`` does not converge and fails to produce any ``cluster_centers_``
then ``predict`` will label every sample as ``-1``.
When all training samples have equal similarities and equal preferences,
the assignment of cluster centers and labels depends on the preference.
If the preference is smaller than the similarities, ``fit`` will result in
a single cluster center and label ``0`` for every sample. Otherwise, every
training sample becomes its own cluster center and is assigned a unique
label.
References
----------
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
Examples
--------
>>> from sklearn.cluster import AffinityPropagation
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> clustering = AffinityPropagation(random_state=5).fit(X)
>>> clustering
AffinityPropagation(random_state=5)
>>> clustering.labels_
array([0, 0, 0, 1, 1, 1])
>>> clustering.predict([[0, 0], [4, 4]])
array([0, 1])
>>> clustering.cluster_centers_
array([[1, 2],
[4, 2]])
For an example usage,
see :ref:`sphx_glr_auto_examples_cluster_plot_affinity_propagation.py`.
For a comparison of Affinity Propagation with other clustering algorithms, see
:ref:`sphx_glr_auto_examples_cluster_plot_cluster_comparison.py`
"""
_parameter_constraints: dict = {
"damping": [Interval(Real, 0.5, 1.0, closed="left")],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"convergence_iter": [Interval(Integral, 1, None, closed="left")],
"copy": ["boolean"],
"preference": [
"array-like",
Interval(Real, None, None, closed="neither"),
None,
],
"affinity": [StrOptions({"euclidean", "precomputed"})],
"verbose": ["verbose"],
"random_state": ["random_state"],
}
def __init__(
self,
*,
damping=0.5,
max_iter=200,
convergence_iter=15,
copy=True,
preference=None,
affinity="euclidean",
verbose=False,
random_state=None,
):
self.damping = damping
self.max_iter = max_iter
self.convergence_iter = convergence_iter
self.copy = copy
self.verbose = verbose
self.preference = preference
self.affinity = affinity
self.random_state = random_state
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.pairwise = self.affinity == "precomputed"
tags.input_tags.sparse = self.affinity != "precomputed"
return tags
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the clustering from features, or affinity matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
array-like of shape (n_samples, n_samples)
Training instances to cluster, or similarities / affinities between
instances if ``affinity='precomputed'``. If a sparse feature matrix
is provided, it will be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Returns the instance itself.
"""
if self.affinity == "precomputed":
X = validate_data(self, X, copy=self.copy, force_writeable=True)
self.affinity_matrix_ = X
else: # self.affinity == "euclidean"
X = validate_data(self, X, accept_sparse="csr")
self.affinity_matrix_ = -euclidean_distances(X, squared=True)
if self.affinity_matrix_.shape[0] != self.affinity_matrix_.shape[1]:
raise ValueError(
"The matrix of similarities must be a square array. "
f"Got {self.affinity_matrix_.shape} instead."
)
if self.preference is None:
preference = np.median(self.affinity_matrix_)
else:
preference = self.preference
preference = np.asarray(preference)
random_state = check_random_state(self.random_state)
(
self.cluster_centers_indices_,
self.labels_,
self.n_iter_,
) = _affinity_propagation(
self.affinity_matrix_,
max_iter=self.max_iter,
convergence_iter=self.convergence_iter,
preference=preference,
damping=self.damping,
verbose=self.verbose,
return_n_iter=True,
random_state=random_state,
)
if self.affinity != "precomputed":
self.cluster_centers_ = X[self.cluster_centers_indices_].copy()
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False, accept_sparse="csr")
if not hasattr(self, "cluster_centers_"):
raise ValueError(
"Predict method is not supported when affinity='precomputed'."
)
if self.cluster_centers_.shape[0] > 0:
with config_context(assume_finite=True):
return pairwise_distances_argmin(X, self.cluster_centers_)
else:
warnings.warn(
(
"This model does not have any cluster centers "
"because affinity propagation did not converge. "
"Labeling every sample as '-1'."
),
ConvergenceWarning,
)
return np.array([-1] * X.shape[0])
def fit_predict(self, X, y=None):
"""Fit clustering from features/affinity matrix; return cluster labels.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
array-like of shape (n_samples, n_samples)
Training instances to cluster, or similarities / affinities between
instances if ``affinity='precomputed'``. If a sparse feature matrix
is provided, it will be converted into a sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels.
"""
return super().fit_predict(X, y)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/_kmeans.py | sklearn/cluster/_kmeans.py | """K-means clustering."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from abc import ABC, abstractmethod
from numbers import Integral, Real
import numpy as np
import scipy.sparse as sp
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
ClusterMixin,
TransformerMixin,
_fit_context,
)
from sklearn.cluster._k_means_common import (
CHUNK_SIZE,
_inertia_dense,
_inertia_sparse,
_is_same_clustering,
)
from sklearn.cluster._k_means_elkan import (
elkan_iter_chunked_dense,
elkan_iter_chunked_sparse,
init_bounds_dense,
init_bounds_sparse,
)
from sklearn.cluster._k_means_lloyd import (
lloyd_iter_chunked_dense,
lloyd_iter_chunked_sparse,
)
from sklearn.cluster._k_means_minibatch import (
_minibatch_update_dense,
_minibatch_update_sparse,
)
from sklearn.exceptions import ConvergenceWarning
from sklearn.metrics.pairwise import _euclidean_distances, euclidean_distances
from sklearn.utils import check_array, check_random_state
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
from sklearn.utils.extmath import row_norms
from sklearn.utils.parallel import (
_get_threadpool_controller,
_threadpool_controller_decorator,
)
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.validation import (
_check_sample_weight,
_is_arraylike_not_scalar,
check_is_fitted,
validate_data,
)
###############################################################################
# Initialization heuristic
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"n_clusters": [Interval(Integral, 1, None, closed="left")],
"sample_weight": ["array-like", None],
"x_squared_norms": ["array-like", None],
"random_state": ["random_state"],
"n_local_trials": [Interval(Integral, 1, None, closed="left"), None],
},
prefer_skip_nested_validation=True,
)
def kmeans_plusplus(
X,
n_clusters,
*,
sample_weight=None,
x_squared_norms=None,
random_state=None,
n_local_trials=None,
):
"""Init n_clusters seeds according to k-means++.
.. versionadded:: 0.24
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to pick seeds from.
n_clusters : int
The number of centroids to initialize.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in `X`. If `None`, all observations
are assigned equal weight. `sample_weight` is ignored if `init`
is a callable or a user provided array.
.. versionadded:: 1.3
x_squared_norms : array-like of shape (n_samples,), default=None
Squared Euclidean norm of each data point.
random_state : int or RandomState instance, default=None
Determines random number generation for centroid initialization. Pass
an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
n_local_trials : int, default=None
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)) which is the recommended setting.
Setting to 1 disables the greedy cluster selection and recovers the
vanilla k-means++ algorithm which was empirically shown to work less
well than its greedy variant.
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
The initial centers for k-means.
indices : ndarray of shape (n_clusters,)
The index location of the chosen centers in the data array X. For a
given index and center, X[index] = center.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Examples
--------
>>> from sklearn.cluster import kmeans_plusplus
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [10, 2], [10, 4], [10, 0]])
>>> centers, indices = kmeans_plusplus(X, n_clusters=2, random_state=0)
>>> centers
array([[10, 2],
[ 1, 0]])
>>> indices
array([3, 2])
"""
# Check data
check_array(X, accept_sparse="csr", dtype=[np.float64, np.float32])
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
if X.shape[0] < n_clusters:
raise ValueError(
f"n_samples={X.shape[0]} should be >= n_clusters={n_clusters}."
)
# Check parameters
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
else:
x_squared_norms = check_array(x_squared_norms, dtype=X.dtype, ensure_2d=False)
if x_squared_norms.shape[0] != X.shape[0]:
raise ValueError(
f"The length of x_squared_norms {x_squared_norms.shape[0]} should "
f"be equal to the length of n_samples {X.shape[0]}."
)
random_state = check_random_state(random_state)
# Call private k-means++
centers, indices = _kmeans_plusplus(
X, n_clusters, x_squared_norms, sample_weight, random_state, n_local_trials
)
return centers, indices
def _kmeans_plusplus(
X, n_clusters, x_squared_norms, sample_weight, random_state, n_local_trials=None
):
"""Computational component for initialization of n_clusters by
k-means++. Prior validation of data is assumed.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The data to pick seeds for.
n_clusters : int
The number of seeds to choose.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in `X`.
x_squared_norms : ndarray of shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : RandomState instance
The generator used to initialize the centers.
See :term:`Glossary <random_state>`.
n_local_trials : int, default=None
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Returns
-------
centers : ndarray of shape (n_clusters, n_features)
The initial centers for k-means.
indices : ndarray of shape (n_clusters,)
The index location of the chosen centers in the data array X. For a
given index and center, X[index] = center.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly and track index of point
center_id = random_state.choice(n_samples, p=sample_weight / sample_weight.sum())
indices = np.full(n_clusters, -1, dtype=int)
if sp.issparse(X):
centers[0] = X[[center_id]].toarray()
else:
centers[0] = X[center_id]
indices[0] = center_id
# Initialize list of closest distances and calculate current potential
closest_dist_sq = _euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms, squared=True
)
current_pot = closest_dist_sq @ sample_weight
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.uniform(size=n_local_trials) * current_pot
candidate_ids = np.searchsorted(
np.cumsum(sample_weight * closest_dist_sq), rand_vals
)
# XXX: numerical imprecision can result in a candidate_id out of range
np.clip(candidate_ids, None, closest_dist_sq.size - 1, out=candidate_ids)
# Compute distances to center candidates
distance_to_candidates = _euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True
)
# update closest distances squared and potential for each candidate
np.minimum(closest_dist_sq, distance_to_candidates, out=distance_to_candidates)
candidates_pot = distance_to_candidates @ sample_weight.reshape(-1, 1)
# Decide which candidate is the best
best_candidate = np.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[[best_candidate]].toarray()
else:
centers[c] = X[best_candidate]
indices[c] = best_candidate
return centers, indices
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _tolerance(X, tol):
"""Return a tolerance which is dependent on the dataset."""
if tol == 0:
return 0
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"sample_weight": ["array-like", None],
"return_n_iter": [bool],
},
prefer_skip_nested_validation=False,
)
def k_means(
X,
n_clusters,
*,
sample_weight=None,
init="k-means++",
n_init="auto",
max_iter=300,
verbose=False,
tol=1e-4,
random_state=None,
copy_x=True,
algorithm="lloyd",
return_n_iter=False,
):
"""Perform K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The observations to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory copy
if the given data is not C-contiguous.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in `X`. If `None`, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable or a user provided array.
init : {'k-means++', 'random'}, callable or array-like of shape \
(n_clusters, n_features), default='k-means++'
Method for initialization:
- `'k-means++'` : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
- `'random'`: choose `n_clusters` observations (rows) at random from data
for the initial centroids.
- If an array is passed, it should be of shape `(n_clusters, n_features)`
and gives the initial centers.
- If a callable is passed, it should take arguments `X`, `n_clusters` and a
random state and return an initialization.
n_init : 'auto' or int, default="auto"
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
When `n_init='auto'`, the number of runs depends on the value of init:
10 if using `init='random'` or `init` is a callable;
1 if using `init='k-means++'` or `init` is an array-like.
.. versionadded:: 1.2
Added 'auto' option for `n_init`.
.. versionchanged:: 1.4
Default value for `n_init` changed to `'auto'`.
max_iter : int, default=300
Maximum number of iterations of the k-means algorithm to run.
verbose : bool, default=False
Verbosity mode.
tol : float, default=1e-4
Relative tolerance with regards to Frobenius norm of the difference
in the cluster centers of two consecutive iterations to declare
convergence.
random_state : int, RandomState instance or None, default=None
Determines random number generation for centroid initialization. Use
an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
copy_x : bool, default=True
When pre-computing distances it is more numerically accurate to center
the data first. If `copy_x` is True (default), then the original data is
not modified. If False, the original data is modified, and put back
before the function returns, but small numerical differences may be
introduced by subtracting and then adding the data mean. Note that if
the original data is not C-contiguous, a copy will be made even if
`copy_x` is False. If the original data is sparse, but not in CSR format,
a copy will be made even if `copy_x` is False.
algorithm : {"lloyd", "elkan"}, default="lloyd"
K-means algorithm to use. The classical EM-style algorithm is `"lloyd"`.
The `"elkan"` variation can be more efficient on some datasets with
well-defined clusters, by using the triangle inequality. However it's
more memory intensive due to the allocation of an extra array of shape
`(n_samples, n_clusters)`.
.. versionchanged:: 0.18
Added Elkan algorithm
.. versionchanged:: 1.1
Renamed "full" to "lloyd", and deprecated "auto" and "full".
Changed "auto" to use "lloyd" instead of "elkan".
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
centroid : ndarray of shape (n_clusters, n_features)
Centroids found at the last iteration of k-means.
label : ndarray of shape (n_samples,)
The `label[i]` is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter : int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import k_means
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [10, 2], [10, 4], [10, 0]])
>>> centroid, label, inertia = k_means(
... X, n_clusters=2, n_init="auto", random_state=0
... )
>>> centroid
array([[10., 2.],
[ 1., 2.]])
>>> label
array([1, 1, 1, 0, 0, 0], dtype=int32)
>>> inertia
16.0
"""
est = KMeans(
n_clusters=n_clusters,
init=init,
n_init=n_init,
max_iter=max_iter,
verbose=verbose,
tol=tol,
random_state=random_state,
copy_x=copy_x,
algorithm=algorithm,
).fit(X, sample_weight=sample_weight)
if return_n_iter:
return est.cluster_centers_, est.labels_, est.inertia_, est.n_iter_
else:
return est.cluster_centers_, est.labels_, est.inertia_
def _kmeans_single_elkan(
X,
sample_weight,
centers_init,
max_iter=300,
verbose=False,
tol=1e-4,
n_threads=1,
):
"""A single run of k-means elkan, assumes preparation completed prior.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The observations to cluster. If sparse matrix, must be in CSR format.
sample_weight : array-like of shape (n_samples,)
The weights for each observation in X.
centers_init : ndarray of shape (n_clusters, n_features)
The initial centers.
max_iter : int, default=300
Maximum number of iterations of the k-means algorithm to run.
verbose : bool, default=False
Verbosity mode.
tol : float, default=1e-4
Relative tolerance with regards to Frobenius norm of the difference
in the cluster centers of two consecutive iterations to declare
convergence.
It's not advised to set `tol=0` since convergence might never be
declared due to rounding errors. Use a very small number instead.
n_threads : int, default=1
The number of OpenMP threads to use for the computation. Parallelism is
sample-wise on the main cython loop which assigns each sample to its
closest center.
Returns
-------
centroid : ndarray of shape (n_clusters, n_features)
Centroids found at the last iteration of k-means.
label : ndarray of shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
n_samples = X.shape[0]
n_clusters = centers_init.shape[0]
# Buffers to avoid new allocations at each iteration.
centers = centers_init
centers_new = np.zeros_like(centers)
weight_in_clusters = np.zeros(n_clusters, dtype=X.dtype)
labels = np.full(n_samples, -1, dtype=np.int32)
labels_old = labels.copy()
center_half_distances = euclidean_distances(centers) / 2
distance_next_center = np.partition(
np.asarray(center_half_distances), kth=1, axis=0
)[1]
upper_bounds = np.zeros(n_samples, dtype=X.dtype)
lower_bounds = np.zeros((n_samples, n_clusters), dtype=X.dtype)
center_shift = np.zeros(n_clusters, dtype=X.dtype)
if sp.issparse(X):
init_bounds = init_bounds_sparse
elkan_iter = elkan_iter_chunked_sparse
_inertia = _inertia_sparse
else:
init_bounds = init_bounds_dense
elkan_iter = elkan_iter_chunked_dense
_inertia = _inertia_dense
init_bounds(
X,
centers,
center_half_distances,
labels,
upper_bounds,
lower_bounds,
n_threads=n_threads,
)
strict_convergence = False
for i in range(max_iter):
elkan_iter(
X,
sample_weight,
centers,
centers_new,
weight_in_clusters,
center_half_distances,
distance_next_center,
upper_bounds,
lower_bounds,
labels,
center_shift,
n_threads,
)
# compute new pairwise distances between centers and closest other
# center of each center for next iterations
center_half_distances = euclidean_distances(centers_new) / 2
distance_next_center = np.partition(
np.asarray(center_half_distances), kth=1, axis=0
)[1]
if verbose:
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
print(f"Iteration {i}, inertia {inertia}")
centers, centers_new = centers_new, centers
if np.array_equal(labels, labels_old):
# First check the labels for strict convergence.
if verbose:
print(f"Converged at iteration {i}: strict convergence.")
strict_convergence = True
break
else:
# No strict convergence, check for tol based convergence.
center_shift_tot = (center_shift**2).sum()
if center_shift_tot <= tol:
if verbose:
print(
f"Converged at iteration {i}: center shift "
f"{center_shift_tot} within tolerance {tol}."
)
break
labels_old[:] = labels
if not strict_convergence:
# rerun E-step so that predicted labels match cluster centers
elkan_iter(
X,
sample_weight,
centers,
centers,
weight_in_clusters,
center_half_distances,
distance_next_center,
upper_bounds,
lower_bounds,
labels,
center_shift,
n_threads,
update_centers=False,
)
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
return labels, inertia, centers, i + 1
# Threadpoolctl context to limit the number of threads in second level of
# nested parallelism (i.e. BLAS) to avoid oversubscription.
@_threadpool_controller_decorator(limits=1, user_api="blas")
def _kmeans_single_lloyd(
X,
sample_weight,
centers_init,
max_iter=300,
verbose=False,
tol=1e-4,
n_threads=1,
):
"""A single run of k-means lloyd, assumes preparation completed prior.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The observations to cluster. If sparse matrix, must be in CSR format.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
centers_init : ndarray of shape (n_clusters, n_features)
The initial centers.
max_iter : int, default=300
Maximum number of iterations of the k-means algorithm to run.
verbose : bool, default=False
Verbosity mode
tol : float, default=1e-4
Relative tolerance with regards to Frobenius norm of the difference
in the cluster centers of two consecutive iterations to declare
convergence.
It's not advised to set `tol=0` since convergence might never be
declared due to rounding errors. Use a very small number instead.
n_threads : int, default=1
The number of OpenMP threads to use for the computation. Parallelism is
sample-wise on the main cython loop which assigns each sample to its
closest center.
Returns
-------
centroid : ndarray of shape (n_clusters, n_features)
Centroids found at the last iteration of k-means.
label : ndarray of shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
n_clusters = centers_init.shape[0]
# Buffers to avoid new allocations at each iteration.
centers = centers_init
centers_new = np.zeros_like(centers)
labels = np.full(X.shape[0], -1, dtype=np.int32)
labels_old = labels.copy()
weight_in_clusters = np.zeros(n_clusters, dtype=X.dtype)
center_shift = np.zeros(n_clusters, dtype=X.dtype)
if sp.issparse(X):
lloyd_iter = lloyd_iter_chunked_sparse
_inertia = _inertia_sparse
else:
lloyd_iter = lloyd_iter_chunked_dense
_inertia = _inertia_dense
strict_convergence = False
for i in range(max_iter):
lloyd_iter(
X,
sample_weight,
centers,
centers_new,
weight_in_clusters,
labels,
center_shift,
n_threads,
)
if verbose:
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
print(f"Iteration {i}, inertia {inertia}.")
centers, centers_new = centers_new, centers
if np.array_equal(labels, labels_old):
# First check the labels for strict convergence.
if verbose:
print(f"Converged at iteration {i}: strict convergence.")
strict_convergence = True
break
else:
# No strict convergence, check for tol based convergence.
center_shift_tot = (center_shift**2).sum()
if center_shift_tot <= tol:
if verbose:
print(
f"Converged at iteration {i}: center shift "
f"{center_shift_tot} within tolerance {tol}."
)
break
labels_old[:] = labels
if not strict_convergence:
# rerun E-step so that predicted labels match cluster centers
lloyd_iter(
X,
sample_weight,
centers,
centers,
weight_in_clusters,
labels,
center_shift,
n_threads,
update_centers=False,
)
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
return labels, inertia, centers, i + 1
def _labels_inertia(X, sample_weight, centers, n_threads=1, return_inertia=True):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples to assign to the labels. If sparse matrix, must
be in CSR format.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
x_squared_norms : ndarray of shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers : ndarray of shape (n_clusters, n_features)
The cluster centers.
n_threads : int, default=1
The number of OpenMP threads to use for the computation. Parallelism is
sample-wise on the main cython loop which assigns each sample to its
closest center.
return_inertia : bool, default=True
Whether to compute and return the inertia.
Returns
-------
labels : ndarray of shape (n_samples,)
The resulting assignment.
inertia : float
Sum of squared distances of samples to their closest cluster center.
Inertia is only returned if return_inertia is True.
"""
n_samples = X.shape[0]
n_clusters = centers.shape[0]
labels = np.full(n_samples, -1, dtype=np.int32)
center_shift = np.zeros(n_clusters, dtype=centers.dtype)
if sp.issparse(X):
_labels = lloyd_iter_chunked_sparse
_inertia = _inertia_sparse
else:
_labels = lloyd_iter_chunked_dense
_inertia = _inertia_dense
_labels(
X,
sample_weight,
centers,
centers_new=None,
weight_in_clusters=None,
labels=labels,
center_shift=center_shift,
n_threads=n_threads,
update_centers=False,
)
if return_inertia:
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
return labels, inertia
return labels
# Same as _labels_inertia but in a threadpool_limits context.
_labels_inertia_threadpool_limit = _threadpool_controller_decorator(
limits=1, user_api="blas"
)(_labels_inertia)
class _BaseKMeans(
ClassNamePrefixFeaturesOutMixin, TransformerMixin, ClusterMixin, BaseEstimator, ABC
):
"""Base class for KMeans and MiniBatchKMeans"""
_parameter_constraints: dict = {
"n_clusters": [Interval(Integral, 1, None, closed="left")],
"init": [StrOptions({"k-means++", "random"}), callable, "array-like"],
"n_init": [
StrOptions({"auto"}),
Interval(Integral, 1, None, closed="left"),
],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"tol": [Interval(Real, 0, None, closed="left")],
"verbose": ["verbose"],
"random_state": ["random_state"],
}
def __init__(
self,
n_clusters,
*,
init,
n_init,
max_iter,
tol,
verbose,
random_state,
):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
def _check_params_vs_input(self, X, default_n_init=None):
# n_clusters
if X.shape[0] < self.n_clusters:
raise ValueError(
f"n_samples={X.shape[0]} should be >= n_clusters={self.n_clusters}."
)
# tol
self._tol = _tolerance(X, self.tol)
# n-init
if self.n_init == "auto":
if isinstance(self.init, str) and self.init == "k-means++":
self._n_init = 1
elif isinstance(self.init, str) and self.init == "random":
self._n_init = default_n_init
elif callable(self.init):
self._n_init = default_n_init
else: # array-like
self._n_init = 1
else:
self._n_init = self.n_init
if _is_arraylike_not_scalar(self.init) and self._n_init != 1:
warnings.warn(
(
"Explicit initial center position passed: performing only"
f" one init in {self.__class__.__name__} instead of "
f"n_init={self._n_init}."
),
RuntimeWarning,
stacklevel=2,
)
self._n_init = 1
@abstractmethod
def _warn_mkl_vcomp(self, n_active_threads):
"""Issue an estimator specific warning when vcomp and mkl are both present
This method is called by `_check_mkl_vcomp`.
"""
def _check_mkl_vcomp(self, X, n_samples):
"""Check when vcomp and mkl are both present"""
# The BLAS call inside a prange in lloyd_iter_chunked_dense is known to
# cause a small memory leak when there are less chunks than the number
# of available threads. It only happens when the OpenMP library is
# vcomp (microsoft OpenMP) and the BLAS library is MKL. see #18653
if sp.issparse(X):
return
n_active_threads = int(np.ceil(n_samples / CHUNK_SIZE))
if n_active_threads < self._n_threads:
modules = _get_threadpool_controller().info()
has_vcomp = "vcomp" in [module["prefix"] for module in modules]
has_mkl = ("mkl", "intel") in [
(module["internal_api"], module.get("threading_layer", None))
for module in modules
]
if has_vcomp and has_mkl:
self._warn_mkl_vcomp(n_active_threads)
def _validate_center_shape(self, X, centers):
"""Check if centers is compatible with X and n_clusters."""
if centers.shape[0] != self.n_clusters:
raise ValueError(
f"The shape of the initial centers {centers.shape} does not "
f"match the number of clusters {self.n_clusters}."
)
if centers.shape[1] != X.shape[1]:
raise ValueError(
f"The shape of the initial centers {centers.shape} does not "
f"match the number of features of the data {X.shape[1]}."
)
def _check_test_data(self, X):
X = validate_data(
self,
X,
accept_sparse="csr",
reset=False,
dtype=[np.float64, np.float32],
order="C",
accept_large_sparse=False,
)
return X
def _init_centroids(
self,
X,
x_squared_norms,
init,
random_state,
sample_weight,
init_size=None,
n_centroids=None,
):
"""Compute the initial centroids.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features)
The input samples.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point. Pass it if you have it
at hands already to avoid it being recomputed here.
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/_bicluster.py | sklearn/cluster/_bicluster.py | """Spectral biclustering algorithms."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from abc import ABCMeta, abstractmethod
from numbers import Integral
import numpy as np
from scipy.linalg import norm
from scipy.sparse import dia_matrix, issparse
from scipy.sparse.linalg import eigsh, svds
from sklearn.base import BaseEstimator, BiclusterMixin, _fit_context
from sklearn.cluster._kmeans import KMeans, MiniBatchKMeans
from sklearn.utils import check_random_state, check_scalar
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils.extmath import _randomized_svd, make_nonnegative, safe_sparse_dot
from sklearn.utils.validation import assert_all_finite, validate_data
__all__ = ["SpectralBiclustering", "SpectralCoclustering"]
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r @ X @ c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError(
"Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0."
)
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(BiclusterMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for spectral biclustering."""
_parameter_constraints: dict = {
"svd_method": [StrOptions({"randomized", "arpack"})],
"n_svd_vecs": [Interval(Integral, 0, None, closed="left"), None],
"mini_batch": ["boolean"],
"init": [StrOptions({"k-means++", "random"}), np.ndarray],
"n_init": [Interval(Integral, 1, None, closed="left")],
"random_state": ["random_state"],
}
@abstractmethod
def __init__(
self,
n_clusters=3,
svd_method="randomized",
n_svd_vecs=None,
mini_batch=False,
init="k-means++",
n_init=10,
random_state=None,
):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.random_state = random_state
@abstractmethod
def _check_parameters(self, n_samples):
"""Validate parameters depending on the input data."""
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Create a biclustering for X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
SpectralBiclustering instance.
"""
X = validate_data(self, X, accept_sparse="csr", dtype=np.float64)
self._check_parameters(X.shape[0])
self._fit(X)
return self
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == "randomized":
kwargs = {}
if self.n_svd_vecs is not None:
kwargs["n_oversamples"] = self.n_svd_vecs
u, _, vt = _randomized_svd(
array, n_components, random_state=self.random_state, **kwargs
)
elif self.svd_method == "arpack":
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(
n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state,
)
else:
model = KMeans(
n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state,
)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001) [1]_.
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : int, default=3
The number of biclusters to find.
svd_method : {'randomized', 'arpack'}, default='randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`scipy.sparse.linalg.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, default=None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, default=False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random'}, or ndarray of shape \
(n_clusters, n_features), default='k-means++'
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, default=10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
random_state : int, RandomState instance, default=None
Used for randomizing the singular value decomposition and the k-means
initialization. Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Attributes
----------
rows_ : array-like of shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like of shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like of shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like of shape (n_cols,)
The bicluster label of each column.
biclusters_ : tuple of two ndarrays
The tuple contains the `rows_` and `columns_` arrays.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
SpectralBiclustering : Partitions rows and columns under the assumption
that the data has an underlying checkerboard structure.
References
----------
.. [1] :doi:`Dhillon, Inderjit S, 2001. Co-clustering documents and words using
bipartite spectral graph partitioning.
<10.1145/502512.502550>`
Examples
--------
>>> from sklearn.cluster import SpectralCoclustering
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = SpectralCoclustering(n_clusters=2, random_state=0).fit(X)
>>> clustering.row_labels_ #doctest: +SKIP
array([0, 1, 1, 0, 0, 0], dtype=int32)
>>> clustering.column_labels_ #doctest: +SKIP
array([0, 0], dtype=int32)
>>> clustering
SpectralCoclustering(n_clusters=2, random_state=0)
For a more detailed example, see the following:
:ref:`sphx_glr_auto_examples_bicluster_plot_spectral_coclustering.py`.
"""
_parameter_constraints: dict = {
**BaseSpectral._parameter_constraints,
"n_clusters": [Interval(Integral, 1, None, closed="left")],
}
def __init__(
self,
n_clusters=3,
*,
svd_method="randomized",
n_svd_vecs=None,
mini_batch=False,
init="k-means++",
n_init=10,
random_state=None,
):
super().__init__(
n_clusters, svd_method, n_svd_vecs, mini_batch, init, n_init, random_state
)
def _check_parameters(self, n_samples):
if self.n_clusters > n_samples:
raise ValueError(
f"n_clusters should be <= n_samples={n_samples}. Got"
f" {self.n_clusters} instead."
)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u, col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack([self.row_labels_ == c for c in range(self.n_clusters)])
self.columns_ = np.vstack(
[self.column_labels_ == c for c in range(self.n_clusters)]
)
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003) [1]_.
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : int or tuple (n_row_clusters, n_column_clusters), default=3
The number of row and column clusters in the checkerboard
structure.
method : {'bistochastic', 'scale', 'log'}, default='bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'.
.. warning::
if `method='log'`, the data must not be sparse.
n_components : int, default=6
Number of singular vectors to check.
n_best : int, default=3
Number of best singular vectors to which to project the data
for clustering.
svd_method : {'randomized', 'arpack'}, default='randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
:func:`~sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`scipy.sparse.linalg.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, default=None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, default=False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random'} or ndarray of shape (n_clusters, n_features), \
default='k-means++'
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, default=10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
random_state : int, RandomState instance, default=None
Used for randomizing the singular value decomposition and the k-means
initialization. Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Attributes
----------
rows_ : array-like of shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like of shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like of shape (n_rows,)
Row partition labels.
column_labels_ : array-like of shape (n_cols,)
Column partition labels.
biclusters_ : tuple of two ndarrays
The tuple contains the `rows_` and `columns_` arrays.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
SpectralCoclustering : Clusters rows and columns of an array `X` to solve the
relaxed normalized cut of the bipartite graph created from `X`.
References
----------
.. [1] :doi:`Kluger, Yuval, et. al., 2003. Spectral biclustering of microarray
data: coclustering genes and conditions.
<10.1101/gr.648603>`
Examples
--------
>>> from sklearn.cluster import SpectralBiclustering
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = SpectralBiclustering(n_clusters=2, random_state=0).fit(X)
>>> clustering.row_labels_
array([1, 1, 1, 0, 0, 0], dtype=int32)
>>> clustering.column_labels_
array([1, 0], dtype=int32)
>>> clustering
SpectralBiclustering(n_clusters=2, random_state=0)
For a more detailed example, see
:ref:`sphx_glr_auto_examples_bicluster_plot_spectral_biclustering.py`
"""
_parameter_constraints: dict = {
**BaseSpectral._parameter_constraints,
"n_clusters": [Interval(Integral, 1, None, closed="left"), tuple],
"method": [StrOptions({"bistochastic", "scale", "log"})],
"n_components": [Interval(Integral, 1, None, closed="left")],
"n_best": [Interval(Integral, 1, None, closed="left")],
}
def __init__(
self,
n_clusters=3,
*,
method="bistochastic",
n_components=6,
n_best=3,
svd_method="randomized",
n_svd_vecs=None,
mini_batch=False,
init="k-means++",
n_init=10,
random_state=None,
):
super().__init__(
n_clusters, svd_method, n_svd_vecs, mini_batch, init, n_init, random_state
)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self, n_samples):
if isinstance(self.n_clusters, Integral):
if self.n_clusters > n_samples:
raise ValueError(
f"n_clusters should be <= n_samples={n_samples}. Got"
f" {self.n_clusters} instead."
)
else: # tuple
try:
n_row_clusters, n_column_clusters = self.n_clusters
check_scalar(
n_row_clusters,
"n_row_clusters",
target_type=Integral,
min_val=1,
max_val=n_samples,
)
check_scalar(
n_column_clusters,
"n_column_clusters",
target_type=Integral,
min_val=1,
max_val=n_samples,
)
except (ValueError, TypeError) as e:
raise ValueError(
"Incorrect parameter n_clusters has value:"
f" {self.n_clusters}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)"
" And the values are should be in the"
" range: (1, n_samples)"
) from e
if self.n_best > self.n_components:
raise ValueError(
f"n_best={self.n_best} must be <= n_components={self.n_components}."
)
def _fit(self, X):
n_sv = self.n_components
if self.method == "bistochastic":
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == "scale":
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == "log":
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == "log" else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best, n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best, n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T, n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T, n_col_clusters)
self.rows_ = np.vstack(
[
self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters)
]
)
self.columns_ = np.vstack(
[
self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters)
]
)
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise, axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1, arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/_bisect_k_means.py | sklearn/cluster/_bisect_k_means.py | """Bisecting K-means clustering."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.base import _fit_context
from sklearn.cluster._k_means_common import _inertia_dense, _inertia_sparse
from sklearn.cluster._kmeans import (
_BaseKMeans,
_kmeans_single_elkan,
_kmeans_single_lloyd,
_labels_inertia_threadpool_limit,
)
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
from sklearn.utils._param_validation import Integral, Interval, StrOptions
from sklearn.utils.extmath import row_norms
from sklearn.utils.validation import (
_check_sample_weight,
check_is_fitted,
check_random_state,
validate_data,
)
class _BisectingTree:
"""Tree structure representing the hierarchical clusters of BisectingKMeans."""
def __init__(self, center, indices, score):
"""Create a new cluster node in the tree.
The node holds the center of this cluster and the indices of the data points
that belong to it.
"""
self.center = center
self.indices = indices
self.score = score
self.left = None
self.right = None
def split(self, labels, centers, scores):
"""Split the cluster node into two subclusters."""
self.left = _BisectingTree(
indices=self.indices[labels == 0], center=centers[0], score=scores[0]
)
self.right = _BisectingTree(
indices=self.indices[labels == 1], center=centers[1], score=scores[1]
)
# reset the indices attribute to save memory
self.indices = None
def get_cluster_to_bisect(self):
"""Return the cluster node to bisect next.
It's based on the score of the cluster, which can be either the number of
data points assigned to that cluster or the inertia of that cluster
(see `bisecting_strategy` for details).
"""
max_score = None
for cluster_leaf in self.iter_leaves():
if max_score is None or cluster_leaf.score > max_score:
max_score = cluster_leaf.score
best_cluster_leaf = cluster_leaf
return best_cluster_leaf
def iter_leaves(self):
"""Iterate over all the cluster leaves in the tree."""
if self.left is None:
yield self
else:
yield from self.left.iter_leaves()
yield from self.right.iter_leaves()
class BisectingKMeans(_BaseKMeans):
"""Bisecting K-Means clustering.
Read more in the :ref:`User Guide <bisect_k_means>`.
.. versionadded:: 1.1
Parameters
----------
n_clusters : int, default=8
The number of clusters to form as well as the number of
centroids to generate.
init : {'k-means++', 'random'} or callable, default='random'
Method for initialization:
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose `n_clusters` observations (rows) at random from data
for the initial centroids.
If a callable is passed, it should take arguments X, n_clusters and a
random state and return an initialization.
n_init : int, default=1
Number of time the inner k-means algorithm will be run with different
centroid seeds in each bisection.
That will result producing for each bisection best output of n_init
consecutive runs in terms of inertia.
random_state : int, RandomState instance or None, default=None
Determines random number generation for centroid initialization
in inner K-Means. Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
max_iter : int, default=300
Maximum number of iterations of the inner k-means algorithm at each
bisection.
verbose : int, default=0
Verbosity mode.
tol : float, default=1e-4
Relative tolerance with regards to Frobenius norm of the difference
in the cluster centers of two consecutive iterations to declare
convergence. Used in inner k-means algorithm at each bisection to pick
best possible clusters.
copy_x : bool, default=True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True (default), then the original data is
not modified. If False, the original data is modified, and put back
before the function returns, but small numerical differences may be
introduced by subtracting and then adding the data mean. Note that if
the original data is not C-contiguous, a copy will be made even if
copy_x is False. If the original data is sparse, but not in CSR format,
a copy will be made even if copy_x is False.
algorithm : {"lloyd", "elkan"}, default="lloyd"
Inner K-means algorithm used in bisection.
The classical EM-style algorithm is `"lloyd"`.
The `"elkan"` variation can be more efficient on some datasets with
well-defined clusters, by using the triangle inequality. However it's
more memory intensive due to the allocation of an extra array of shape
`(n_samples, n_clusters)`.
bisecting_strategy : {"biggest_inertia", "largest_cluster"},\
default="biggest_inertia"
Defines how bisection should be performed:
- "biggest_inertia" means that BisectingKMeans will always check
all calculated cluster for cluster with biggest SSE
(Sum of squared errors) and bisect it. This approach concentrates on
precision, but may be costly in terms of execution time (especially for
larger amount of data points).
- "largest_cluster" - BisectingKMeans will always split cluster with
largest amount of points assigned to it from all clusters
previously calculated. That should work faster than picking by SSE
('biggest_inertia') and may produce similar results in most cases.
Attributes
----------
cluster_centers_ : ndarray of shape (n_clusters, n_features)
Coordinates of cluster centers. If the algorithm stops before fully
converging (see ``tol`` and ``max_iter``), these will not be
consistent with ``labels_``.
labels_ : ndarray of shape (n_samples,)
Labels of each point.
inertia_ : float
Sum of squared distances of samples to their closest cluster center,
weighted by the sample weights if provided.
n_features_in_ : int
Number of features seen during :term:`fit`.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
See Also
--------
KMeans : Original implementation of K-Means algorithm.
Notes
-----
It might be inefficient when n_cluster is less than 3, due to unnecessary
calculations for that case.
Examples
--------
>>> from sklearn.cluster import BisectingKMeans
>>> import numpy as np
>>> X = np.array([[1, 1], [10, 1], [3, 1],
... [10, 0], [2, 1], [10, 2],
... [10, 8], [10, 9], [10, 10]])
>>> bisect_means = BisectingKMeans(n_clusters=3, random_state=0).fit(X)
>>> bisect_means.labels_
array([0, 2, 0, 2, 0, 2, 1, 1, 1], dtype=int32)
>>> bisect_means.predict([[0, 0], [12, 3]])
array([0, 2], dtype=int32)
>>> bisect_means.cluster_centers_
array([[ 2., 1.],
[10., 9.],
[10., 1.]])
For a comparison between BisectingKMeans and K-Means refer to example
:ref:`sphx_glr_auto_examples_cluster_plot_bisect_kmeans.py`.
"""
_parameter_constraints: dict = {
**_BaseKMeans._parameter_constraints,
"init": [StrOptions({"k-means++", "random"}), callable],
"n_init": [Interval(Integral, 1, None, closed="left")],
"copy_x": ["boolean"],
"algorithm": [StrOptions({"lloyd", "elkan"})],
"bisecting_strategy": [StrOptions({"biggest_inertia", "largest_cluster"})],
}
def __init__(
self,
n_clusters=8,
*,
init="random",
n_init=1,
random_state=None,
max_iter=300,
verbose=0,
tol=1e-4,
copy_x=True,
algorithm="lloyd",
bisecting_strategy="biggest_inertia",
):
super().__init__(
n_clusters=n_clusters,
init=init,
max_iter=max_iter,
verbose=verbose,
random_state=random_state,
tol=tol,
n_init=n_init,
)
self.copy_x = copy_x
self.algorithm = algorithm
self.bisecting_strategy = bisecting_strategy
def _warn_mkl_vcomp(self, n_active_threads):
"""Warn when vcomp and mkl are both present"""
warnings.warn(
"BisectingKMeans is known to have a memory leak on Windows "
"with MKL, when there are less chunks than available "
"threads. You can avoid it by setting the environment"
f" variable OMP_NUM_THREADS={n_active_threads}."
)
def _inertia_per_cluster(self, X, centers, labels, sample_weight):
"""Calculate the sum of squared errors (inertia) per cluster.
Parameters
----------
X : {ndarray, csr_matrix} of shape (n_samples, n_features)
The input samples.
centers : ndarray of shape (n_clusters=2, n_features)
The cluster centers.
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
Returns
-------
inertia_per_cluster : ndarray of shape (n_clusters=2,)
Sum of squared errors (inertia) for each cluster.
"""
n_clusters = centers.shape[0] # = 2 since centers comes from a bisection
_inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense
inertia_per_cluster = np.empty(n_clusters)
for label in range(n_clusters):
inertia_per_cluster[label] = _inertia(
X, sample_weight, centers, labels, self._n_threads, single_label=label
)
return inertia_per_cluster
def _bisect(self, X, x_squared_norms, sample_weight, cluster_to_bisect):
"""Split a cluster into 2 subsclusters.
Parameters
----------
X : {ndarray, csr_matrix} of shape (n_samples, n_features)
Training instances to cluster.
x_squared_norms : ndarray of shape (n_samples,)
Squared euclidean norm of each data point.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
cluster_to_bisect : _BisectingTree node object
The cluster node to split.
"""
X = X[cluster_to_bisect.indices]
x_squared_norms = x_squared_norms[cluster_to_bisect.indices]
sample_weight = sample_weight[cluster_to_bisect.indices]
best_inertia = None
# Split samples in X into 2 clusters.
# Repeating `n_init` times to obtain best clusters
for _ in range(self.n_init):
centers_init = self._init_centroids(
X,
x_squared_norms=x_squared_norms,
init=self.init,
random_state=self._random_state,
n_centroids=2,
sample_weight=sample_weight,
)
labels, inertia, centers, _ = self._kmeans_single(
X,
sample_weight,
centers_init,
max_iter=self.max_iter,
verbose=self.verbose,
tol=self.tol,
n_threads=self._n_threads,
)
# allow small tolerance on the inertia to accommodate for
# non-deterministic rounding errors due to parallel computation
if best_inertia is None or inertia < best_inertia * (1 - 1e-6):
best_labels = labels
best_centers = centers
best_inertia = inertia
if self.verbose:
print(f"New centroids from bisection: {best_centers}")
if self.bisecting_strategy == "biggest_inertia":
scores = self._inertia_per_cluster(
X, best_centers, best_labels, sample_weight
)
else: # bisecting_strategy == "largest_cluster"
# Using minlength to make sure that we have the counts for both labels even
# if all samples are labelled 0.
scores = np.bincount(best_labels, minlength=2)
cluster_to_bisect.split(best_labels, best_centers, scores)
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None, sample_weight=None):
"""Compute bisecting k-means clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training instances to cluster.
.. note:: The data will be converted to C ordering,
which will cause a memory copy
if the given data is not C-contiguous.
y : Ignored
Not used, present here for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
The weights for each observation in X. If None, all observations
are assigned equal weight. `sample_weight` is not used during
initialization if `init` is a callable.
Returns
-------
self
Fitted estimator.
"""
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=[np.float64, np.float32],
order="C",
copy=self.copy_x,
accept_large_sparse=False,
)
self._check_params_vs_input(X)
self._random_state = check_random_state(self.random_state)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
self._n_threads = _openmp_effective_n_threads()
if self.algorithm == "lloyd" or self.n_clusters == 1:
self._kmeans_single = _kmeans_single_lloyd
self._check_mkl_vcomp(X, X.shape[0])
else:
self._kmeans_single = _kmeans_single_elkan
# Subtract of mean of X for more accurate distance computations
if not sp.issparse(X):
self._X_mean = X.mean(axis=0)
X -= self._X_mean
# Initialize the hierarchical clusters tree
self._bisecting_tree = _BisectingTree(
indices=np.arange(X.shape[0]),
center=X.mean(axis=0),
score=0,
)
x_squared_norms = row_norms(X, squared=True)
for _ in range(self.n_clusters - 1):
# Chose cluster to bisect
cluster_to_bisect = self._bisecting_tree.get_cluster_to_bisect()
# Split this cluster into 2 subclusters
self._bisect(X, x_squared_norms, sample_weight, cluster_to_bisect)
# Aggregate final labels and centers from the bisecting tree
self.labels_ = np.full(X.shape[0], -1, dtype=np.int32)
self.cluster_centers_ = np.empty((self.n_clusters, X.shape[1]), dtype=X.dtype)
for i, cluster_node in enumerate(self._bisecting_tree.iter_leaves()):
self.labels_[cluster_node.indices] = i
self.cluster_centers_[i] = cluster_node.center
cluster_node.label = i # label final clusters for future prediction
cluster_node.indices = None # release memory
# Restore original data
if not sp.issparse(X):
X += self._X_mean
self.cluster_centers_ += self._X_mean
_inertia = _inertia_sparse if sp.issparse(X) else _inertia_dense
self.inertia_ = _inertia(
X, sample_weight, self.cluster_centers_, self.labels_, self._n_threads
)
self._n_features_out = self.cluster_centers_.shape[0]
return self
def predict(self, X):
"""Predict which cluster each sample in X belongs to.
Prediction is made by going down the hierarchical tree
in searching of closest leaf cluster.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self)
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
# sample weights are unused but necessary in cython helpers
sample_weight = np.ones_like(x_squared_norms)
labels = self._predict_recursive(X, sample_weight, self._bisecting_tree)
return labels
def _predict_recursive(self, X, sample_weight, cluster_node):
"""Predict recursively by going down the hierarchical tree.
Parameters
----------
X : {ndarray, csr_matrix} of shape (n_samples, n_features)
The data points, currently assigned to `cluster_node`, to predict between
the subclusters of this node.
sample_weight : ndarray of shape (n_samples,)
The weights for each observation in X.
cluster_node : _BisectingTree node object
The cluster node of the hierarchical tree.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
"""
if cluster_node.left is None:
# This cluster has no subcluster. Labels are just the label of the cluster.
return np.full(X.shape[0], cluster_node.label, dtype=np.int32)
# Determine if data points belong to the left or right subcluster
centers = np.vstack((cluster_node.left.center, cluster_node.right.center))
if hasattr(self, "_X_mean"):
centers += self._X_mean
cluster_labels = _labels_inertia_threadpool_limit(
X,
sample_weight,
centers,
self._n_threads,
return_inertia=False,
)
mask = cluster_labels == 0
# Compute the labels for each subset of the data points.
labels = np.full(X.shape[0], -1, dtype=np.int32)
labels[mask] = self._predict_recursive(
X[mask], sample_weight[mask], cluster_node.left
)
labels[~mask] = self._predict_recursive(
X[~mask], sample_weight[~mask], cluster_node.right
)
return labels
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/_feature_agglomeration.py | sklearn/cluster/_feature_agglomeration.py | """
Feature agglomeration. Base classes and functions for performing feature
agglomeration.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from scipy.sparse import issparse
from sklearn.base import TransformerMixin
from sklearn.utils.validation import check_is_fitted, validate_data
###############################################################################
# Mixin class for feature agglomeration.
class AgglomerationTransform(TransformerMixin):
"""
A class for feature agglomeration via the transform interface.
"""
def transform(self, X):
"""
Transform a new matrix using the built clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
An M by N array of M observations in N dimensions or a length
M array of M one-dimensional observations.
Returns
-------
Y : ndarray of shape (n_samples, n_clusters) or (n_clusters,)
The pooled values for each feature cluster.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
if self.pooling_func == np.mean and not issparse(X):
size = np.bincount(self.labels_)
n_samples = X.shape[0]
# a fast way to compute the mean of grouped features
nX = np.array(
[np.bincount(self.labels_, X[i, :]) / size for i in range(n_samples)]
)
else:
nX = [
self.pooling_func(X[:, self.labels_ == l], axis=1)
for l in np.unique(self.labels_)
]
nX = np.array(nX).T
return nX
def inverse_transform(self, X):
"""
Inverse the transformation and return a vector of size `n_features`.
Parameters
----------
X : array-like of shape (n_samples, n_clusters) or (n_clusters,)
The values to be assigned to each cluster of samples.
Returns
-------
X_original : ndarray of shape (n_samples, n_features) or (n_features,)
A vector of size `n_samples` with the values of `X` assigned to
each of the cluster of samples.
"""
check_is_fitted(self)
unil, inverse = np.unique(self.labels_, return_inverse=True)
return X[..., inverse]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/_birch.py | sklearn/cluster/_birch.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from math import sqrt
from numbers import Integral, Real
import numpy as np
from scipy import sparse
from sklearn._config import config_context
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
ClusterMixin,
TransformerMixin,
_fit_context,
)
from sklearn.cluster import AgglomerativeClustering
from sklearn.exceptions import ConvergenceWarning
from sklearn.metrics import pairwise_distances_argmin
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.utils._param_validation import Interval
from sklearn.utils.extmath import row_norms
from sklearn.utils.validation import check_is_fitted, validate_data
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in range(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold=threshold,
branching_factor=branching_factor,
is_leaf=node.is_leaf,
n_features=node.n_features,
dtype=node.init_centroids_.dtype,
)
new_node2 = _CFNode(
threshold=threshold,
branching_factor=branching_factor,
is_leaf=node.is_leaf,
n_features=node.n_features,
dtype=node.init_centroids_.dtype,
)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True
)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[(farthest_idx,)]
node1_closer = node1_dist < node2_dist
# make sure node1 is closest to itself even if all distances are equal.
# This can only happen when all node.centroids_ are duplicates leading to all
# distances between centroids being zero.
node1_closer[farthest_idx[0]] = True
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode:
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : list
List of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray of shape (branching_factor + 1, n_features)
Manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray of shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray of shape (branching_factor + 1, n_features)
View of ``init_centroids_``.
squared_norm_ : ndarray of shape (branching_factor + 1,)
View of ``init_sq_norm_``.
"""
def __init__(self, *, threshold, branching_factor, is_leaf, n_features, dtype):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features), dtype=dtype)
self.init_sq_norm_ = np.zeros((branching_factor + 1), dtype)
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[: n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[: n_samples + 1]
def update_split_subclusters(self, subcluster, new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.0
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = self.subclusters_[
closest_index
].centroid_
self.init_sq_norm_[closest_index] = self.subclusters_[
closest_index
].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_,
threshold,
branching_factor,
)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2
)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster:
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray of shape (n_features,), default=None
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray of shape (branching_factor + 1, n_features)
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray of shape (branching_factor + 1,)
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, *, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.centroid_ = self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_
)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_sq_norm = np.dot(new_centroid, new_centroid)
# The squared radius of the cluster is defined:
# r^2 = sum_i ||x_i - c||^2 / n
# with x_i the n points assigned to the cluster and c its centroid:
# c = sum_i x_i / n
# This can be expanded to:
# r^2 = sum_i ||x_i||^2 / n - 2 < sum_i x_i / n, c> + n ||c||^2 / n
# and therefore simplifies to:
# r^2 = sum_i ||x_i||^2 / n - ||c||^2
sq_radius = new_ss / new_n - new_sq_norm
if sq_radius <= threshold**2:
(
self.n_samples_,
self.linear_sum_,
self.squared_sum_,
self.centroid_,
self.sq_norm_,
) = (new_n, new_ls, new_ss, new_centroid, new_sq_norm)
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
# Because of numerical issues, this could become negative
sq_radius = self.squared_sum_ / self.n_samples_ - self.sq_norm_
return sqrt(max(0, sq_radius))
class Birch(
ClassNamePrefixFeaturesOutMixin, ClusterMixin, TransformerMixin, BaseEstimator
):
"""Implements the BIRCH clustering algorithm.
It is a memory-efficient, online-learning algorithm provided as an
alternative to :class:`MiniBatchKMeans`. It constructs a tree
data structure with the cluster centroids being read off the leaf.
These can be either the final cluster centroids or can be provided as input
to another clustering algorithm such as :class:`AgglomerativeClustering`.
Read more in the :ref:`User Guide <birch>`.
.. versionadded:: 0.16
Parameters
----------
threshold : float, default=0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started. Setting this value to be very low promotes
splitting and vice-versa.
branching_factor : int, default=50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
that node is split into two nodes with the subclusters redistributed
in each. The parent subcluster of that node is removed and two new
subclusters are added as parents of the 2 split nodes.
n_clusters : int, instance of sklearn.cluster model or None, default=3
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples.
- `None` : the final clustering step is not performed and the
subclusters are returned as they are.
- :mod:`sklearn.cluster` Estimator : If a model is provided, the model
is fit treating the subclusters as new samples and the initial data
is mapped to the label of the closest subcluster.
- `int` : the model fit is :class:`AgglomerativeClustering` with
`n_clusters` set to be equal to the int.
compute_labels : bool, default=True
Whether or not to compute labels for each fit.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray of shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
MiniBatchKMeans : Alternative implementation that does incremental updates
of the centers' positions using mini-batches.
Notes
-----
The tree data structure consists of nodes with each node consisting of
a number of subclusters. The maximum number of subclusters in a node
is determined by the branching factor. Each subcluster maintains a
linear sum, squared sum and the number of samples in that subcluster.
In addition, each subcluster can also have a node as its child, if the
subcluster is not a member of a leaf node.
For a new point entering the root, it is merged with the subcluster closest
to it and the linear sum, squared sum and the number of samples of that
subcluster are updated. This is done recursively till the properties of
the leaf node are updated.
See :ref:`sphx_glr_auto_examples_cluster_plot_birch_vs_minibatchkmeans.py` for a
comparison with :class:`~sklearn.cluster.MiniBatchKMeans`.
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
https://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/archive/p/jbirch
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(n_clusters=None)
>>> brc.fit(X)
Birch(n_clusters=None)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
For a comparison of the BIRCH clustering algorithm with other clustering algorithms,
see :ref:`sphx_glr_auto_examples_cluster_plot_cluster_comparison.py`
"""
_parameter_constraints: dict = {
"threshold": [Interval(Real, 0.0, None, closed="neither")],
"branching_factor": [Interval(Integral, 1, None, closed="neither")],
"n_clusters": [None, ClusterMixin, Interval(Integral, 1, None, closed="left")],
"compute_labels": ["boolean"],
}
def __init__(
self,
*,
threshold=0.5,
branching_factor=50,
n_clusters=3,
compute_labels=True,
):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
return self._fit(X, partial=False)
def _fit(self, X, partial):
has_root = getattr(self, "root_", None)
first_call = not (partial and has_root)
X = validate_data(
self,
X,
accept_sparse="csr",
reset=first_call,
dtype=[np.float64, np.float32],
)
threshold = self.threshold
branching_factor = self.branching_factor
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
if first_call:
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(
threshold=threshold,
branching_factor=branching_factor,
is_leaf=True,
n_features=n_features,
dtype=X.dtype,
)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(
threshold=threshold,
branching_factor=branching_factor,
is_leaf=True,
n_features=n_features,
dtype=X.dtype,
)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor
)
del self.root_
self.root_ = _CFNode(
threshold=threshold,
branching_factor=branching_factor,
is_leaf=False,
n_features=n_features,
dtype=X.dtype,
)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._n_features_out = self.subcluster_centers_.shape[0]
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves : list of shape (n_leaves,)
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), \
default=None
Input data. If X is not provided, only the global clustering
step is done.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
return self._fit(X, partial=True)
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
labels : ndarray of shape(n_samples,)
Labelled data.
"""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse="csr", reset=False)
return self._predict(X)
def _predict(self, X):
"""Predict data using the ``centroids_`` of subclusters."""
kwargs = {"Y_norm_squared": self._subcluster_norms}
with config_context(assume_finite=True):
argmin = pairwise_distances_argmin(
X, self.subcluster_centers_, metric_kwargs=kwargs
)
return self.subcluster_labels_[argmin]
def transform(self, X):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix} of shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse="csr", reset=False)
with config_context(assume_finite=True):
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, Integral):
clusterer = AgglomerativeClustering(n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by BIRCH is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters),
ConvergenceWarning,
)
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_)
if compute_labels:
self.labels_ = self._predict(X)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
tags.input_tags.sparse = True
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/_optics.py | sklearn/cluster/_optics.py | """Ordering Points To Identify the Clustering Structure (OPTICS)
These routines execute the OPTICS algorithm, and implement various
cluster extraction methods of the ordered list.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from numbers import Integral, Real
import numpy as np
from scipy.sparse import SparseEfficiencyWarning, issparse
from sklearn.base import BaseEstimator, ClusterMixin, _fit_context
from sklearn.exceptions import DataConversionWarning
from sklearn.metrics import pairwise_distances
from sklearn.metrics.pairwise import _VALID_METRICS, PAIRWISE_BOOLEAN_FUNCTIONS
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import gen_batches
from sklearn.utils._chunking import get_chunk_n_rows
from sklearn.utils._param_validation import (
HasMethods,
Interval,
RealNotInt,
StrOptions,
validate_params,
)
from sklearn.utils.validation import check_memory, validate_data
class OPTICS(ClusterMixin, BaseEstimator):
"""Estimate clustering structure from vector array.
OPTICS (Ordering Points To Identify the Clustering Structure), closely
related to DBSCAN, finds core samples of high density and expands clusters
from them [1]_. Unlike DBSCAN, it keeps cluster hierarchy for a variable
neighborhood radius. Better suited for usage on large datasets than the
current scikit-learn implementation of DBSCAN.
Clusters are then extracted from the cluster-order using a
DBSCAN-like method (cluster_method = 'dbscan') or an automatic
technique proposed in [1]_ (cluster_method = 'xi').
This implementation deviates from the original OPTICS by first performing
k-nearest-neighborhood searches on all points to identify core sizes of
all points (instead of computing neighbors while looping through points).
Reachability distances to only unprocessed points are then computed, to
construct the cluster order, similar to the original OPTICS.
Note that we do not employ a heap to manage the expansion
candidates, so the time complexity will be O(n^2).
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
min_samples : int > 1 or float between 0 and 1, default=5
The number of samples in a neighborhood for a point to be considered as
a core point. Also, up and down steep regions can't have more than
``min_samples`` consecutive non-steep points. Expressed as an absolute
number or a fraction of the number of samples (rounded to be at least
2).
max_eps : float, default=np.inf
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Default value of ``np.inf`` will
identify clusters across all scales; reducing ``max_eps`` will result
in shorter run times.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Any metric from scikit-learn
or :mod:`scipy.spatial.distance` can be used.
If `metric` is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string. If metric is
"precomputed", `X` is assumed to be a distance matrix and must be
square.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
Sparse matrices are only supported by scikit-learn metrics.
See :mod:`scipy.spatial.distance` for details on these metrics.
.. note::
`'kulsinski'` is deprecated from SciPy 1.9 and will be removed in SciPy 1.11.
p : float, default=2
Parameter for the Minkowski metric from
:class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
cluster_method : {'xi', 'dbscan'}, default='xi'
The extraction method used to extract clusters using the calculated
reachability and ordering.
eps : float, default=None
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. By default it assumes the same value
as ``max_eps``.
Used only when ``cluster_method='dbscan'``.
xi : float between 0 and 1, default=0.05
Determines the minimum steepness on the reachability plot that
constitutes a cluster boundary. For example, an upwards point in the
reachability plot is defined by the ratio from one point to its
successor being at most 1-xi.
Used only when ``cluster_method='xi'``.
predecessor_correction : bool, default=True
Correct clusters according to the predecessors calculated by OPTICS
[2]_. This parameter has minimal effect on most datasets.
Used only when ``cluster_method='xi'``.
min_cluster_size : int > 1 or float between 0 and 1, default=None
Minimum number of samples in an OPTICS cluster, expressed as an
absolute number or a fraction of the number of samples (rounded to be
at least 2). If ``None``, the value of ``min_samples`` is used instead.
Used only when ``cluster_method='xi'``.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`~sklearn.neighbors.BallTree`.
- 'kd_tree' will use :class:`~sklearn.neighbors.KDTree`.
- 'brute' will use a brute-force search.
- 'auto' (default) will attempt to decide the most appropriate
algorithm based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to :class:`~sklearn.neighbors.BallTree` or
:class:`~sklearn.neighbors.KDTree`. This can affect the speed of the
construction and query, as well as the memory required to store the
tree. The optimal value depends on the nature of the problem.
memory : str or object with the joblib.Memory interface, default=None
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
labels_ : ndarray of shape (n_samples,)
Cluster labels for each point in the dataset given to fit().
Noisy samples and points which are not included in a leaf cluster
of ``cluster_hierarchy_`` are labeled as -1.
reachability_ : ndarray of shape (n_samples,)
Reachability distances per sample, indexed by object order. Use
``clust.reachability_[clust.ordering_]`` to access in cluster order.
ordering_ : ndarray of shape (n_samples,)
The cluster ordered list of sample indices.
core_distances_ : ndarray of shape (n_samples,)
Distance at which each sample becomes a core point, indexed by object
order. Points which will never be core have a distance of inf. Use
``clust.core_distances_[clust.ordering_]`` to access in cluster order.
predecessor_ : ndarray of shape (n_samples,)
Point that a sample was reached from, indexed by object order.
Seed points have a predecessor of -1.
cluster_hierarchy_ : ndarray of shape (n_clusters, 2)
The list of clusters in the form of ``[start, end]`` in each row, with
all indices inclusive. The clusters are ordered according to
``(end, -start)`` (ascending) so that larger clusters encompassing
smaller clusters come after those smaller ones. Since ``labels_`` does
not reflect the hierarchy, usually
``len(cluster_hierarchy_) > np.unique(optics.labels_)``. Please also
note that these indices are of the ``ordering_``, i.e.
``X[ordering_][start:end + 1]`` form a cluster.
Only available when ``cluster_method='xi'``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
DBSCAN : A similar clustering for a specified neighborhood radius (eps).
Our implementation is optimized for runtime.
References
----------
.. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
and Jörg Sander. "OPTICS: ordering points to identify the clustering
structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
.. [2] Schubert, Erich, Michael Gertz.
"Improving the Cluster Structure Extracted from OPTICS Plots." Proc. of
the Conference "Lernen, Wissen, Daten, Analysen" (LWDA) (2018): 318-329.
Examples
--------
>>> from sklearn.cluster import OPTICS
>>> import numpy as np
>>> X = np.array([[1, 2], [2, 5], [3, 6],
... [8, 7], [8, 8], [7, 3]])
>>> clustering = OPTICS(min_samples=2).fit(X)
>>> clustering.labels_
array([0, 0, 0, 1, 1, 1])
For a more detailed example see
:ref:`sphx_glr_auto_examples_cluster_plot_optics.py`.
For a comparison of OPTICS with other clustering algorithms, see
:ref:`sphx_glr_auto_examples_cluster_plot_cluster_comparison.py`
"""
_parameter_constraints: dict = {
"min_samples": [
Interval(Integral, 2, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="both"),
],
"max_eps": [Interval(Real, 0, None, closed="both")],
"metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
"p": [Interval(Real, 1, None, closed="left")],
"metric_params": [dict, None],
"cluster_method": [StrOptions({"dbscan", "xi"})],
"eps": [Interval(Real, 0, None, closed="both"), None],
"xi": [Interval(Real, 0, 1, closed="both")],
"predecessor_correction": ["boolean"],
"min_cluster_size": [
Interval(Integral, 2, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="right"),
None,
],
"algorithm": [StrOptions({"auto", "brute", "ball_tree", "kd_tree"})],
"leaf_size": [Interval(Integral, 1, None, closed="left")],
"memory": [str, HasMethods("cache"), None],
"n_jobs": [Integral, None],
}
def __init__(
self,
*,
min_samples=5,
max_eps=np.inf,
metric="minkowski",
p=2,
metric_params=None,
cluster_method="xi",
eps=None,
xi=0.05,
predecessor_correction=True,
min_cluster_size=None,
algorithm="auto",
leaf_size=30,
memory=None,
n_jobs=None,
):
self.max_eps = max_eps
self.min_samples = min_samples
self.min_cluster_size = min_cluster_size
self.algorithm = algorithm
self.metric = metric
self.metric_params = metric_params
self.p = p
self.leaf_size = leaf_size
self.cluster_method = cluster_method
self.eps = eps
self.xi = xi
self.predecessor_correction = predecessor_correction
self.memory = memory
self.n_jobs = n_jobs
@_fit_context(
# Optics.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None):
"""Perform OPTICS clustering.
Extracts an ordered list of points and reachability distances, and
performs initial clustering using ``max_eps`` distance specified at
OPTICS object instantiation.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features), or \
(n_samples, n_samples) if metric='precomputed'
A feature array, or array of distances between samples if
metric='precomputed'. If a sparse matrix is provided, it will be
converted into CSR format.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns a fitted instance of self.
"""
dtype = bool if self.metric in PAIRWISE_BOOLEAN_FUNCTIONS else float
if dtype is bool and X.dtype != bool:
msg = (
"Data will be converted to boolean for"
f" metric {self.metric}, to avoid this warning,"
" you may convert the data prior to calling fit."
)
warnings.warn(msg, DataConversionWarning)
X = validate_data(self, X, dtype=dtype, accept_sparse="csr")
if self.metric == "precomputed" and issparse(X):
X = X.copy() # copy to avoid in-place modification
with warnings.catch_warnings():
warnings.simplefilter("ignore", SparseEfficiencyWarning)
# Set each diagonal to an explicit value so each point is its
# own neighbor
X.setdiag(X.diagonal())
memory = check_memory(self.memory)
(
self.ordering_,
self.core_distances_,
self.reachability_,
self.predecessor_,
) = memory.cache(compute_optics_graph)(
X=X,
min_samples=self.min_samples,
algorithm=self.algorithm,
leaf_size=self.leaf_size,
metric=self.metric,
metric_params=self.metric_params,
p=self.p,
n_jobs=self.n_jobs,
max_eps=self.max_eps,
)
# Extract clusters from the calculated orders and reachability
if self.cluster_method == "xi":
labels_, clusters_ = cluster_optics_xi(
reachability=self.reachability_,
predecessor=self.predecessor_,
ordering=self.ordering_,
min_samples=self.min_samples,
min_cluster_size=self.min_cluster_size,
xi=self.xi,
predecessor_correction=self.predecessor_correction,
)
self.cluster_hierarchy_ = clusters_
elif self.cluster_method == "dbscan":
if self.eps is None:
eps = self.max_eps
else:
eps = self.eps
if eps > self.max_eps:
raise ValueError(
"Specify an epsilon smaller than %s. Got %s." % (self.max_eps, eps)
)
labels_ = cluster_optics_dbscan(
reachability=self.reachability_,
core_distances=self.core_distances_,
ordering=self.ordering_,
eps=eps,
)
self.labels_ = labels_
return self
def _validate_size(size, n_samples, param_name):
if size > n_samples:
raise ValueError(
"%s must be no greater than the number of samples (%d). Got %d"
% (param_name, n_samples, size)
)
# OPTICS helper functions
def _compute_core_distances_(X, neighbors, min_samples, working_memory):
"""Compute the k-th nearest neighbor of each sample.
Equivalent to neighbors.kneighbors(X, self.min_samples)[0][:, -1]
but with more memory efficiency.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
neighbors : NearestNeighbors instance
The fitted nearest neighbors estimator.
working_memory : int, default=None
The sought maximum memory for temporary distance matrix chunks.
When None (default), the value of
``sklearn.get_config()['working_memory']`` is used.
Returns
-------
core_distances : ndarray of shape (n_samples,)
Distance at which each sample becomes a core point.
Points which will never be core have a distance of inf.
"""
n_samples = X.shape[0]
core_distances = np.empty(n_samples)
core_distances.fill(np.nan)
chunk_n_rows = get_chunk_n_rows(
row_bytes=16 * min_samples, max_n_rows=n_samples, working_memory=working_memory
)
slices = gen_batches(n_samples, chunk_n_rows)
for sl in slices:
core_distances[sl] = neighbors.kneighbors(X[sl], min_samples)[0][:, -1]
return core_distances
@validate_params(
{
"X": [np.ndarray, "sparse matrix"],
"min_samples": [
Interval(Integral, 2, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="both"),
],
"max_eps": [Interval(Real, 0, None, closed="both")],
"metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
"p": [Interval(Real, 0, None, closed="right"), None],
"metric_params": [dict, None],
"algorithm": [StrOptions({"auto", "brute", "ball_tree", "kd_tree"})],
"leaf_size": [Interval(Integral, 1, None, closed="left")],
"n_jobs": [Integral, None],
},
prefer_skip_nested_validation=False, # metric is not validated yet
)
def compute_optics_graph(
X, *, min_samples, max_eps, metric, p, metric_params, algorithm, leaf_size, n_jobs
):
"""Compute the OPTICS reachability graph.
Read more in the :ref:`User Guide <optics>`.
Parameters
----------
X : {ndarray, sparse matrix} of shape (n_samples, n_features), or \
(n_samples, n_samples) if metric='precomputed'
A feature array, or array of distances between samples if
metric='precomputed'.
min_samples : int > 1 or float between 0 and 1
The number of samples in a neighborhood for a point to be considered
as a core point. Expressed as an absolute number or a fraction of the
number of samples (rounded to be at least 2).
max_eps : float, default=np.inf
The maximum distance between two samples for one to be considered as
in the neighborhood of the other. Default value of ``np.inf`` will
identify clusters across all scales; reducing ``max_eps`` will result
in shorter run times.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string. If metric is
"precomputed", X is assumed to be a distance matrix and must be square.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
.. note::
`'kulsinski'` is deprecated from SciPy 1.9 and will be removed in SciPy 1.11.
p : float, default=2
Parameter for the Minkowski metric from
:class:`~sklearn.metrics.pairwise_distances`. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`~sklearn.neighbors.BallTree`.
- 'kd_tree' will use :class:`~sklearn.neighbors.KDTree`.
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to `fit` method. (default)
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to :class:`~sklearn.neighbors.BallTree` or
:class:`~sklearn.neighbors.KDTree`. This can affect the speed of the
construction and query, as well as the memory required to store the
tree. The optimal value depends on the nature of the problem.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
ordering_ : array of shape (n_samples,)
The cluster ordered list of sample indices.
core_distances_ : array of shape (n_samples,)
Distance at which each sample becomes a core point, indexed by object
order. Points which will never be core have a distance of inf. Use
``clust.core_distances_[clust.ordering_]`` to access in cluster order.
reachability_ : array of shape (n_samples,)
Reachability distances per sample, indexed by object order. Use
``clust.reachability_[clust.ordering_]`` to access in cluster order.
predecessor_ : array of shape (n_samples,)
Point that a sample was reached from, indexed by object order.
Seed points have a predecessor of -1.
References
----------
.. [1] Ankerst, Mihael, Markus M. Breunig, Hans-Peter Kriegel,
and Jörg Sander. "OPTICS: ordering points to identify the clustering
structure." ACM SIGMOD Record 28, no. 2 (1999): 49-60.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import compute_optics_graph
>>> X = np.array([[1, 2], [2, 5], [3, 6],
... [8, 7], [8, 8], [7, 3]])
>>> ordering, core_distances, reachability, predecessor = compute_optics_graph(
... X,
... min_samples=2,
... max_eps=np.inf,
... metric="minkowski",
... p=2,
... metric_params=None,
... algorithm="auto",
... leaf_size=30,
... n_jobs=None,
... )
>>> ordering
array([0, 1, 2, 5, 3, 4])
>>> core_distances
array([3.16, 1.41, 1.41, 1. , 1. ,
4.12])
>>> reachability
array([ inf, 3.16, 1.41, 4.12, 1. ,
5. ])
>>> predecessor
array([-1, 0, 1, 5, 3, 2])
"""
n_samples = X.shape[0]
_validate_size(min_samples, n_samples, "min_samples")
if min_samples <= 1:
min_samples = max(2, int(min_samples * n_samples))
# Start all points as 'unprocessed' ##
reachability_ = np.empty(n_samples)
reachability_.fill(np.inf)
predecessor_ = np.empty(n_samples, dtype=int)
predecessor_.fill(-1)
nbrs = NearestNeighbors(
n_neighbors=min_samples,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params,
p=p,
n_jobs=n_jobs,
)
nbrs.fit(X)
# Here we first do a kNN query for each point, this differs from
# the original OPTICS that only used epsilon range queries.
# TODO: handle working_memory somehow?
core_distances_ = _compute_core_distances_(
X=X, neighbors=nbrs, min_samples=min_samples, working_memory=None
)
# OPTICS puts an upper limit on these, use inf for undefined.
core_distances_[core_distances_ > max_eps] = np.inf
np.around(
core_distances_,
decimals=np.finfo(core_distances_.dtype).precision,
out=core_distances_,
)
# Main OPTICS loop. Not parallelizable. The order that entries are
# written to the 'ordering_' list is important!
# Note that this implementation is O(n^2) theoretically, but
# supposedly with very low constant factors.
processed = np.zeros(X.shape[0], dtype=bool)
ordering = np.zeros(X.shape[0], dtype=int)
for ordering_idx in range(X.shape[0]):
# Choose next based on smallest reachability distance
# (And prefer smaller ids on ties, possibly np.inf!)
index = np.where(processed == 0)[0]
point = index[np.argmin(reachability_[index])]
processed[point] = True
ordering[ordering_idx] = point
if core_distances_[point] != np.inf:
_set_reach_dist(
core_distances_=core_distances_,
reachability_=reachability_,
predecessor_=predecessor_,
point_index=point,
processed=processed,
X=X,
nbrs=nbrs,
metric=metric,
metric_params=metric_params,
p=p,
max_eps=max_eps,
)
if np.all(np.isinf(reachability_)):
warnings.warn(
(
"All reachability values are inf. Set a larger"
" max_eps or all data will be considered outliers."
),
UserWarning,
)
return ordering, core_distances_, reachability_, predecessor_
def _set_reach_dist(
core_distances_,
reachability_,
predecessor_,
point_index,
processed,
X,
nbrs,
metric,
metric_params,
p,
max_eps,
):
P = X[point_index : point_index + 1]
# Assume that radius_neighbors is faster without distances
# and we don't need all distances, nevertheless, this means
# we may be doing some work twice.
indices = nbrs.radius_neighbors(P, radius=max_eps, return_distance=False)[0]
# Getting indices of neighbors that have not been processed
unproc = np.compress(~np.take(processed, indices), indices)
# Neighbors of current point are already processed.
if not unproc.size:
return
# Only compute distances to unprocessed neighbors:
if metric == "precomputed":
dists = X[[point_index], unproc]
if isinstance(dists, np.matrix):
dists = np.asarray(dists)
dists = dists.ravel()
else:
_params = dict() if metric_params is None else metric_params.copy()
if metric == "minkowski" and "p" not in _params:
# the same logic as neighbors, p is ignored if explicitly set
# in the dict params
_params["p"] = p
dists = pairwise_distances(P, X[unproc], metric, n_jobs=None, **_params).ravel()
rdists = np.maximum(dists, core_distances_[point_index])
np.around(rdists, decimals=np.finfo(rdists.dtype).precision, out=rdists)
improved = np.where(rdists < np.take(reachability_, unproc))
reachability_[unproc[improved]] = rdists[improved]
predecessor_[unproc[improved]] = point_index
@validate_params(
{
"reachability": [np.ndarray],
"core_distances": [np.ndarray],
"ordering": [np.ndarray],
"eps": [Interval(Real, 0, None, closed="both")],
},
prefer_skip_nested_validation=True,
)
def cluster_optics_dbscan(*, reachability, core_distances, ordering, eps):
"""Perform DBSCAN extraction for an arbitrary epsilon.
Extracting the clusters runs in linear time. Note that this results in
``labels_`` which are close to a :class:`~sklearn.cluster.DBSCAN` with
similar settings and ``eps``, only if ``eps`` is close to ``max_eps``.
Parameters
----------
reachability : ndarray of shape (n_samples,)
Reachability distances calculated by OPTICS (``reachability_``).
core_distances : ndarray of shape (n_samples,)
Distances at which points become core (``core_distances_``).
ordering : ndarray of shape (n_samples,)
OPTICS ordered point indices (``ordering_``).
eps : float
DBSCAN ``eps`` parameter. Must be set to < ``max_eps``. Results
will be close to DBSCAN algorithm if ``eps`` and ``max_eps`` are close
to one another.
Returns
-------
labels_ : array of shape (n_samples,)
The estimated labels.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import cluster_optics_dbscan, compute_optics_graph
>>> X = np.array([[1, 2], [2, 5], [3, 6],
... [8, 7], [8, 8], [7, 3]])
>>> ordering, core_distances, reachability, predecessor = compute_optics_graph(
... X,
... min_samples=2,
... max_eps=np.inf,
... metric="minkowski",
... p=2,
... metric_params=None,
... algorithm="auto",
... leaf_size=30,
... n_jobs=None,
... )
>>> eps = 4.5
>>> labels = cluster_optics_dbscan(
... reachability=reachability,
... core_distances=core_distances,
... ordering=ordering,
... eps=eps,
... )
>>> labels
array([0, 0, 0, 1, 1, 1])
"""
n_samples = len(core_distances)
labels = np.zeros(n_samples, dtype=int)
far_reach = reachability > eps
near_core = core_distances <= eps
labels[ordering] = np.cumsum(far_reach[ordering] & near_core[ordering]) - 1
labels[far_reach & ~near_core] = -1
return labels
@validate_params(
{
"reachability": [np.ndarray],
"predecessor": [np.ndarray],
"ordering": [np.ndarray],
"min_samples": [
Interval(Integral, 2, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="both"),
],
"min_cluster_size": [
Interval(Integral, 2, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="both"),
None,
],
"xi": [Interval(Real, 0, 1, closed="both")],
"predecessor_correction": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def cluster_optics_xi(
*,
reachability,
predecessor,
ordering,
min_samples,
min_cluster_size=None,
xi=0.05,
predecessor_correction=True,
):
"""Automatically extract clusters according to the Xi-steep method.
Parameters
----------
reachability : ndarray of shape (n_samples,)
Reachability distances calculated by OPTICS (`reachability_`).
predecessor : ndarray of shape (n_samples,)
Predecessors calculated by OPTICS.
ordering : ndarray of shape (n_samples,)
OPTICS ordered point indices (`ordering_`).
min_samples : int > 1 or float between 0 and 1
The same as the min_samples given to OPTICS. Up and down steep regions
can't have more then ``min_samples`` consecutive non-steep points.
Expressed as an absolute number or a fraction of the number of samples
(rounded to be at least 2).
min_cluster_size : int > 1 or float between 0 and 1, default=None
Minimum number of samples in an OPTICS cluster, expressed as an
absolute number or a fraction of the number of samples (rounded to be
at least 2). If ``None``, the value of ``min_samples`` is used instead.
xi : float between 0 and 1, default=0.05
Determines the minimum steepness on the reachability plot that
constitutes a cluster boundary. For example, an upwards point in the
reachability plot is defined by the ratio from one point to its
successor being at most 1-xi.
predecessor_correction : bool, default=True
Correct clusters based on the calculated predecessors.
Returns
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/__init__.py | sklearn/cluster/__init__.py | """Popular unsupervised clustering algorithms."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.cluster._affinity_propagation import (
AffinityPropagation,
affinity_propagation,
)
from sklearn.cluster._agglomerative import (
AgglomerativeClustering,
FeatureAgglomeration,
linkage_tree,
ward_tree,
)
from sklearn.cluster._bicluster import SpectralBiclustering, SpectralCoclustering
from sklearn.cluster._birch import Birch
from sklearn.cluster._bisect_k_means import BisectingKMeans
from sklearn.cluster._dbscan import DBSCAN, dbscan
from sklearn.cluster._hdbscan.hdbscan import HDBSCAN
from sklearn.cluster._kmeans import KMeans, MiniBatchKMeans, k_means, kmeans_plusplus
from sklearn.cluster._mean_shift import (
MeanShift,
estimate_bandwidth,
get_bin_seeds,
mean_shift,
)
from sklearn.cluster._optics import (
OPTICS,
cluster_optics_dbscan,
cluster_optics_xi,
compute_optics_graph,
)
from sklearn.cluster._spectral import SpectralClustering, spectral_clustering
__all__ = [
"DBSCAN",
"HDBSCAN",
"OPTICS",
"AffinityPropagation",
"AgglomerativeClustering",
"Birch",
"BisectingKMeans",
"FeatureAgglomeration",
"KMeans",
"MeanShift",
"MiniBatchKMeans",
"SpectralBiclustering",
"SpectralClustering",
"SpectralCoclustering",
"affinity_propagation",
"cluster_optics_dbscan",
"cluster_optics_xi",
"compute_optics_graph",
"dbscan",
"estimate_bandwidth",
"get_bin_seeds",
"k_means",
"kmeans_plusplus",
"linkage_tree",
"mean_shift",
"spectral_clustering",
"ward_tree",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/_agglomerative.py | sklearn/cluster/_agglomerative.py | """Hierarchical Agglomerative Clustering
These routines perform some hierarchical agglomerative clustering of some
input data.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from heapq import heapify, heappop, heappush, heappushpop
from numbers import Integral, Real
import numpy as np
from scipy import sparse
from scipy.sparse.csgraph import connected_components
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
ClusterMixin,
_fit_context,
)
# mypy error: Module 'sklearn.cluster' has no attribute '_hierarchical_fast'
from sklearn.cluster import ( # type: ignore[attr-defined]
_hierarchical_fast as _hierarchical,
)
from sklearn.cluster._feature_agglomeration import AgglomerationTransform
from sklearn.metrics import DistanceMetric
from sklearn.metrics._dist_metrics import METRIC_MAPPING64
from sklearn.metrics.pairwise import _VALID_METRICS, paired_distances
from sklearn.utils import check_array
from sklearn.utils._fast_dict import IntFloatDict
from sklearn.utils._param_validation import (
HasMethods,
Interval,
StrOptions,
validate_params,
)
from sklearn.utils.graph import _fix_connected_components
from sklearn.utils.validation import check_memory, validate_data
###############################################################################
# For non fully-connected graphs
def _fix_connectivity(X, connectivity, affinity):
"""
Fixes the connectivity matrix.
The different steps are:
- copies it
- makes it symmetric
- converts it to LIL if necessary
- completes it if necessary.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Feature matrix representing `n_samples` samples to be clustered.
connectivity : sparse matrix, default=None
Connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is `None`, i.e, the Ward algorithm is unstructured.
affinity : {"euclidean", "precomputed"}, default="euclidean"
Which affinity to use. At the moment `precomputed` and
``euclidean`` are supported. `euclidean` uses the
negative squared Euclidean distance between points.
Returns
-------
connectivity : sparse matrix
The fixed connectivity matrix.
n_connected_components : int
The number of connected components in the graph.
"""
n_samples = X.shape[0]
if connectivity.shape[0] != n_samples or connectivity.shape[1] != n_samples:
raise ValueError(
"Wrong shape for connectivity matrix: %s when X is %s"
% (connectivity.shape, X.shape)
)
# Make the connectivity matrix symmetric:
connectivity = connectivity + connectivity.T
# Convert connectivity matrix to LIL
if not sparse.issparse(connectivity):
connectivity = sparse.lil_matrix(connectivity)
# `connectivity` is a sparse matrix at this point
if connectivity.format != "lil":
connectivity = connectivity.tolil()
# Compute the number of nodes
n_connected_components, labels = connected_components(connectivity)
if n_connected_components > 1:
warnings.warn(
"the number of connected components of the "
"connectivity matrix is %d > 1. Completing it to avoid "
"stopping the tree early." % n_connected_components,
stacklevel=2,
)
# XXX: Can we do without completing the matrix?
connectivity = _fix_connected_components(
X=X,
graph=connectivity,
n_connected_components=n_connected_components,
component_labels=labels,
metric=affinity,
mode="connectivity",
)
return connectivity, n_connected_components
def _single_linkage_tree(
connectivity,
n_samples,
n_nodes,
n_clusters,
n_connected_components,
return_distance,
):
"""
Perform single linkage clustering on sparse data via the minimum
spanning tree from scipy.sparse.csgraph, then using union-find to label.
The parent array is then generated by walking through the tree.
"""
from scipy.sparse.csgraph import minimum_spanning_tree
# explicitly cast connectivity to ensure safety
connectivity = connectivity.astype(np.float64, copy=False)
# Ensure zero distances aren't ignored by setting them to "epsilon"
epsilon_value = np.finfo(dtype=connectivity.data.dtype).eps
connectivity.data[connectivity.data == 0] = epsilon_value
# Use scipy.sparse.csgraph to generate a minimum spanning tree
mst = minimum_spanning_tree(connectivity.tocsr())
# Convert the graph to scipy.cluster.hierarchy array format
mst = mst.tocoo()
# Undo the epsilon values
mst.data[mst.data == epsilon_value] = 0
mst_array = np.vstack([mst.row, mst.col, mst.data]).T
# Sort edges of the min_spanning_tree by weight
mst_array = mst_array[np.argsort(mst_array.T[2], kind="mergesort"), :]
# Convert edge list into standard hierarchical clustering format
single_linkage_tree = _hierarchical._single_linkage_label(mst_array)
children_ = single_linkage_tree[:, :2].astype(int)
# Compute parents
parent = np.arange(n_nodes, dtype=np.intp)
for i, (left, right) in enumerate(children_, n_samples):
if n_clusters is not None and i >= n_nodes:
break
if left < n_nodes:
parent[left] = i
if right < n_nodes:
parent[right] = i
if return_distance:
distances = single_linkage_tree[:, 2]
return children_, n_connected_components, n_samples, parent, distances
return children_, n_connected_components, n_samples, parent
###############################################################################
# Hierarchical tree building functions
@validate_params(
{
"X": ["array-like"],
"connectivity": ["array-like", "sparse matrix", None],
"n_clusters": [Interval(Integral, 1, None, closed="left"), None],
"return_distance": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def ward_tree(X, *, connectivity=None, n_clusters=None, return_distance=False):
"""Ward clustering based on a Feature matrix.
Recursively merges the pair of clusters that minimally increases
within-cluster variance.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Feature matrix representing `n_samples` samples to be clustered.
connectivity : {array-like, sparse matrix}, default=None
Connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is None, i.e, the Ward algorithm is unstructured.
n_clusters : int, default=None
`n_clusters` should be less than `n_samples`. Stop early the
construction of the tree at `n_clusters.` This is useful to decrease
computation time if the number of clusters is not small compared to the
number of samples. In this case, the complete tree is not computed, thus
the 'children' output is of limited use, and the 'parents' output should
rather be used. This option is valid only when specifying a connectivity
matrix.
return_distance : bool, default=False
If `True`, return the distance between the clusters.
Returns
-------
children : ndarray of shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
n_connected_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree.
parents : ndarray of shape (n_nodes,) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : ndarray of shape (n_nodes-1,)
Only returned if `return_distance` is set to `True` (for compatibility).
The distances between the centers of the nodes. `distances[i]`
corresponds to a weighted Euclidean distance between
the nodes `children[i, 1]` and `children[i, 2]`. If the nodes refer to
leaves of the tree, then `distances[i]` is their unweighted Euclidean
distance. Distances are updated in the following way
(from scipy.hierarchy.linkage):
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
- \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import ward_tree
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> children, n_connected_components, n_leaves, parents = ward_tree(X)
>>> children
array([[0, 1],
[3, 5],
[2, 6],
[4, 7],
[8, 9]])
>>> n_connected_components
1
>>> n_leaves
6
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
n_samples, n_features = X.shape
if connectivity is None:
from scipy.cluster import hierarchy # imports PIL
if n_clusters is not None:
warnings.warn(
(
"Partial build of the tree is implemented "
"only for structured clustering (i.e. with "
"explicit connectivity). The algorithm "
"will build the full tree and only "
"retain the lower branches required "
"for the specified number of clusters"
),
stacklevel=2,
)
X = np.require(X, requirements="W")
out = hierarchy.ward(X)
children_ = out[:, :2].astype(np.intp)
if return_distance:
distances = out[:, 2]
return children_, 1, n_samples, None, distances
else:
return children_, 1, n_samples, None
connectivity, n_connected_components = _fix_connectivity(
X, connectivity, affinity="euclidean"
)
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
if n_clusters > n_samples:
raise ValueError(
"Cannot provide more clusters than samples. "
"%i n_clusters was asked, and there are %i "
"samples." % (n_clusters, n_samples)
)
n_nodes = 2 * n_samples - n_clusters
# create inertia matrix
coord_row = []
coord_col = []
A = []
for ind, row in enumerate(connectivity.rows):
A.append(row)
# We keep only the upper triangular for the moments
# Generator expressions are faster than arrays on the following
row = [i for i in row if i < ind]
coord_row.extend(
len(row)
* [
ind,
]
)
coord_col.extend(row)
coord_row = np.array(coord_row, dtype=np.intp, order="C")
coord_col = np.array(coord_col, dtype=np.intp, order="C")
# build moments as a list
moments_1 = np.zeros(n_nodes, order="C")
moments_1[:n_samples] = 1
moments_2 = np.zeros((n_nodes, n_features), order="C")
moments_2[:n_samples] = X
inertia = np.empty(len(coord_row), dtype=np.float64, order="C")
_hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, inertia)
inertia = list(zip(inertia, coord_row, coord_col))
heapify(inertia)
# prepare the main fields
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=bool)
children = []
if return_distance:
distances = np.empty(n_nodes - n_samples)
not_visited = np.empty(n_nodes, dtype=bool, order="C")
# recursive merge loop
for k in range(n_samples, n_nodes):
# identify the merge
while True:
inert, i, j = heappop(inertia)
if used_node[i] and used_node[j]:
break
parent[i], parent[j] = k, k
children.append((i, j))
used_node[i] = used_node[j] = False
if return_distance: # store inertia value
distances[k - n_samples] = inert
# update the moments
moments_1[k] = moments_1[i] + moments_1[j]
moments_2[k] = moments_2[i] + moments_2[j]
# update the structure matrix A and the inertia matrix
coord_col = []
not_visited.fill(1)
not_visited[k] = 0
_hierarchical._get_parents(A[i], coord_col, parent, not_visited)
_hierarchical._get_parents(A[j], coord_col, parent, not_visited)
# List comprehension is faster than a for loop
[A[col].append(k) for col in coord_col]
A.append(coord_col)
coord_col = np.array(coord_col, dtype=np.intp, order="C")
coord_row = np.empty(coord_col.shape, dtype=np.intp, order="C")
coord_row.fill(k)
n_additions = len(coord_row)
ini = np.empty(n_additions, dtype=np.float64, order="C")
_hierarchical.compute_ward_dist(moments_1, moments_2, coord_row, coord_col, ini)
# List comprehension is faster than a for loop
[heappush(inertia, (ini[idx], k, coord_col[idx])) for idx in range(n_additions)]
# Separate leaves in children (empty lists up to now)
n_leaves = n_samples
# sort children to get consistent output with unstructured version
children = [c[::-1] for c in children]
children = np.array(children) # return numpy array for efficient caching
if return_distance:
# 2 is scaling factor to compare w/ unstructured version
distances = np.sqrt(2.0 * distances)
return children, n_connected_components, n_leaves, parent, distances
else:
return children, n_connected_components, n_leaves, parent
# single average and complete linkage
def linkage_tree(
X,
connectivity=None,
n_clusters=None,
linkage="complete",
affinity="euclidean",
return_distance=False,
):
"""Linkage agglomerative clustering based on a Feature matrix.
The inertia matrix uses a Heapq-based representation.
This is the structured version, that takes into account some topological
structure between samples.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Feature matrix representing `n_samples` samples to be clustered.
connectivity : sparse matrix, default=None
Connectivity matrix. Defines for each sample the neighboring samples
following a given structure of the data. The matrix is assumed to
be symmetric and only the upper triangular half is used.
Default is `None`, i.e, the Ward algorithm is unstructured.
n_clusters : int, default=None
Stop early the construction of the tree at `n_clusters`. This is
useful to decrease computation time if the number of clusters is
not small compared to the number of samples. In this case, the
complete tree is not computed, thus the 'children' output is of
limited use, and the 'parents' output should rather be used.
This option is valid only when specifying a connectivity matrix.
linkage : {"average", "complete", "single"}, default="complete"
Which linkage criteria to use. The linkage criterion determines which
distance to use between sets of observation.
- "average" uses the average of the distances of each observation of
the two sets.
- "complete" or maximum linkage uses the maximum distances between
all observations of the two sets.
- "single" uses the minimum of the distances between all
observations of the two sets.
affinity : str or callable, default='euclidean'
Which metric to use. Can be 'euclidean', 'manhattan', or any
distance known to paired distance (see metric.pairwise).
return_distance : bool, default=False
Whether or not to return the distances between the clusters.
Returns
-------
children : ndarray of shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
n_connected_components : int
The number of connected components in the graph.
n_leaves : int
The number of leaves in the tree.
parents : ndarray of shape (n_nodes, ) or None
The parent of each node. Only returned when a connectivity matrix
is specified, elsewhere 'None' is returned.
distances : ndarray of shape (n_nodes-1,)
Returned when `return_distance` is set to `True`.
distances[i] refers to the distance between children[i][0] and
children[i][1] when they are merged.
See Also
--------
ward_tree : Hierarchical clustering with ward linkage.
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (-1, 1))
n_samples, n_features = X.shape
linkage_choices = {
"complete": _hierarchical.max_merge,
"average": _hierarchical.average_merge,
"single": None,
} # Single linkage is handled differently
try:
join_func = linkage_choices[linkage]
except KeyError as e:
raise ValueError(
"Unknown linkage option, linkage should be one of %s, but %s was given"
% (linkage_choices.keys(), linkage)
) from e
if affinity == "cosine" and np.any(~np.any(X, axis=1)):
raise ValueError("Cosine affinity cannot be used when X contains zero vectors")
if connectivity is None:
from scipy.cluster import hierarchy # imports PIL
if n_clusters is not None:
warnings.warn(
(
"Partial build of the tree is implemented "
"only for structured clustering (i.e. with "
"explicit connectivity). The algorithm "
"will build the full tree and only "
"retain the lower branches required "
"for the specified number of clusters"
),
stacklevel=2,
)
if affinity == "precomputed":
# for the linkage function of hierarchy to work on precomputed
# data, provide as first argument an ndarray of the shape returned
# by sklearn.metrics.pairwise_distances.
if X.shape[0] != X.shape[1]:
raise ValueError(
f"Distance matrix should be square, got matrix of shape {X.shape}"
)
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
elif affinity == "l2":
# Translate to something understood by scipy
affinity = "euclidean"
elif affinity in ("l1", "manhattan"):
affinity = "cityblock"
elif callable(affinity):
X = affinity(X)
i, j = np.triu_indices(X.shape[0], k=1)
X = X[i, j]
if (
linkage == "single"
and affinity != "precomputed"
and not callable(affinity)
and affinity in METRIC_MAPPING64
):
# We need the fast cythonized metric from neighbors
dist_metric = DistanceMetric.get_metric(affinity)
# The Cython routines used require contiguous arrays
X = np.ascontiguousarray(X, dtype=np.double)
mst = _hierarchical.mst_linkage_core(X, dist_metric)
# Sort edges of the min_spanning_tree by weight
mst = mst[np.argsort(mst.T[2], kind="mergesort"), :]
# Convert edge list into standard hierarchical clustering format
out = _hierarchical.single_linkage_label(mst)
else:
out = hierarchy.linkage(X, method=linkage, metric=affinity)
children_ = out[:, :2].astype(int, copy=False)
if return_distance:
distances = out[:, 2]
return children_, 1, n_samples, None, distances
return children_, 1, n_samples, None
connectivity, n_connected_components = _fix_connectivity(
X, connectivity, affinity=affinity
)
connectivity = connectivity.tocoo()
# Put the diagonal to zero
diag_mask = connectivity.row != connectivity.col
connectivity.row = connectivity.row[diag_mask]
connectivity.col = connectivity.col[diag_mask]
connectivity.data = connectivity.data[diag_mask]
del diag_mask
if affinity == "precomputed":
distances = X[connectivity.row, connectivity.col].astype(np.float64, copy=False)
else:
# FIXME We compute all the distances, while we could have only computed
# the "interesting" distances
distances = paired_distances(
X[connectivity.row], X[connectivity.col], metric=affinity
)
connectivity.data = distances
if n_clusters is None:
n_nodes = 2 * n_samples - 1
else:
assert n_clusters <= n_samples
n_nodes = 2 * n_samples - n_clusters
if linkage == "single":
return _single_linkage_tree(
connectivity,
n_samples,
n_nodes,
n_clusters,
n_connected_components,
return_distance,
)
if return_distance:
distances = np.empty(n_nodes - n_samples)
# create inertia heap and connection matrix
A = np.empty(n_nodes, dtype=object)
inertia = list()
# LIL seems to the best format to access the rows quickly,
# without the numpy overhead of slicing CSR indices and data.
connectivity = connectivity.tolil()
# We are storing the graph in a list of IntFloatDict
for ind, (data, row) in enumerate(zip(connectivity.data, connectivity.rows)):
A[ind] = IntFloatDict(
np.asarray(row, dtype=np.intp), np.asarray(data, dtype=np.float64)
)
# We keep only the upper triangular for the heap
# Generator expressions are faster than arrays on the following
inertia.extend(
_hierarchical.WeightedEdge(d, ind, r) for r, d in zip(row, data) if r < ind
)
del connectivity
heapify(inertia)
# prepare the main fields
parent = np.arange(n_nodes, dtype=np.intp)
used_node = np.ones(n_nodes, dtype=np.intp)
children = []
# recursive merge loop
for k in range(n_samples, n_nodes):
# identify the merge
while True:
edge = heappop(inertia)
if used_node[edge.a] and used_node[edge.b]:
break
i = edge.a
j = edge.b
if return_distance:
# store distances
distances[k - n_samples] = edge.weight
parent[i] = parent[j] = k
children.append((i, j))
# Keep track of the number of elements per cluster
n_i = used_node[i]
n_j = used_node[j]
used_node[k] = n_i + n_j
used_node[i] = used_node[j] = False
# update the structure matrix A and the inertia matrix
# a clever 'min', or 'max' operation between A[i] and A[j]
coord_col = join_func(A[i], A[j], used_node, n_i, n_j)
for col, d in coord_col:
A[col].append(k, d)
# Here we use the information from coord_col (containing the
# distances) to update the heap
heappush(inertia, _hierarchical.WeightedEdge(d, k, col))
A[k] = coord_col
# Clear A[i] and A[j] to save memory
A[i] = A[j] = 0
# Separate leaves in children (empty lists up to now)
n_leaves = n_samples
# # return numpy array for efficient caching
children = np.array(children)[:, ::-1]
if return_distance:
return children, n_connected_components, n_leaves, parent, distances
return children, n_connected_components, n_leaves, parent
# Matching names to tree-building strategies
def _complete_linkage(*args, **kwargs):
kwargs["linkage"] = "complete"
return linkage_tree(*args, **kwargs)
def _average_linkage(*args, **kwargs):
kwargs["linkage"] = "average"
return linkage_tree(*args, **kwargs)
def _single_linkage(*args, **kwargs):
kwargs["linkage"] = "single"
return linkage_tree(*args, **kwargs)
_TREE_BUILDERS = dict(
ward=ward_tree,
complete=_complete_linkage,
average=_average_linkage,
single=_single_linkage,
)
###############################################################################
# Functions for cutting hierarchical clustering tree
def _hc_cut(n_clusters, children, n_leaves):
"""Function cutting the ward tree for a given number of clusters.
Parameters
----------
n_clusters : int or ndarray
The number of clusters to form.
children : ndarray of shape (n_nodes-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
n_leaves : int
Number of leaves of the tree.
Returns
-------
labels : array [n_samples]
Cluster labels for each point.
"""
if n_clusters > n_leaves:
raise ValueError(
"Cannot extract more clusters than samples: "
f"{n_clusters} clusters were given for a tree with {n_leaves} leaves."
)
# In this function, we store nodes as a heap to avoid recomputing
# the max of the nodes: the first element is always the smallest
# We use negated indices as heaps work on smallest elements, and we
# are interested in largest elements
# children[-1] is the root of the tree
nodes = [-(max(children[-1]) + 1)]
for _ in range(n_clusters - 1):
# As we have a heap, nodes[0] is the smallest element
these_children = children[-nodes[0] - n_leaves]
# Insert the 2 children and remove the largest node
heappush(nodes, -these_children[0])
heappushpop(nodes, -these_children[1])
label = np.zeros(n_leaves, dtype=np.intp)
for i, node in enumerate(nodes):
label[_hierarchical._hc_get_descendent(-node, children, n_leaves)] = i
return label
###############################################################################
class AgglomerativeClustering(ClusterMixin, BaseEstimator):
"""
Agglomerative Clustering.
Recursively merges pair of clusters of sample data; uses linkage distance.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
n_clusters : int or None, default=2
The number of clusters to find. It must be ``None`` if
``distance_threshold`` is not ``None``.
metric : str or callable, default="euclidean"
Metric used to compute the linkage. Can be "euclidean", "l1", "l2",
"manhattan", "cosine", or "precomputed". If linkage is "ward", only
"euclidean" is accepted. If "precomputed", a distance matrix is needed
as input for the fit method. If connectivity is None, linkage is
"single" and affinity is not "precomputed" any valid pairwise distance
metric can be assigned.
For an example of agglomerative clustering with different metrics, see
:ref:`sphx_glr_auto_examples_cluster_plot_agglomerative_clustering_metrics.py`.
.. versionadded:: 1.2
memory : str or object with the joblib.Memory interface, default=None
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
connectivity : array-like, sparse matrix, or callable, default=None
Connectivity matrix. Defines for each sample the neighboring
samples following a given structure of the data.
This can be a connectivity matrix itself or a callable that transforms
the data into a connectivity matrix, such as derived from
`kneighbors_graph`. Default is ``None``, i.e, the
hierarchical clustering algorithm is unstructured.
For an example of connectivity matrix using
:class:`~sklearn.neighbors.kneighbors_graph`, see
:ref:`sphx_glr_auto_examples_cluster_plot_ward_structured_vs_unstructured.py`.
compute_full_tree : 'auto' or bool, default='auto'
Stop early the construction of the tree at ``n_clusters``. This is
useful to decrease computation time if the number of clusters is not
small compared to the number of samples. This option is useful only
when specifying a connectivity matrix. Note also that when varying the
number of clusters and using caching, it may be advantageous to compute
the full tree. It must be ``True`` if ``distance_threshold`` is not
``None``. By default `compute_full_tree` is "auto", which is equivalent
to `True` when `distance_threshold` is not `None` or that `n_clusters`
is inferior to the maximum between 100 or `0.02 * n_samples`.
Otherwise, "auto" is equivalent to `False`.
linkage : {'ward', 'complete', 'average', 'single'}, default='ward'
Which linkage criterion to use. The linkage criterion determines which
distance to use between sets of observation. The algorithm will merge
the pairs of cluster that minimize this criterion.
- 'ward' minimizes the variance of the clusters being merged.
- 'average' uses the average of the distances of each observation of
the two sets.
- 'complete' or 'maximum' linkage uses the maximum distances between
all observations of the two sets.
- 'single' uses the minimum of the distances between all observations
of the two sets.
.. versionadded:: 0.20
Added the 'single' option
For examples comparing different `linkage` criteria, see
:ref:`sphx_glr_auto_examples_cluster_plot_linkage_comparison.py`.
distance_threshold : float, default=None
The linkage distance threshold at or above which clusters will not be
merged. If not ``None``, ``n_clusters`` must be ``None`` and
``compute_full_tree`` must be ``True``.
.. versionadded:: 0.21
compute_distances : bool, default=False
Computes distances between clusters even if `distance_threshold` is not
used. This can be used to make dendrogram visualization, but introduces
a computational and memory overhead.
.. versionadded:: 0.24
For an example of dendrogram visualization, see
:ref:`sphx_glr_auto_examples_cluster_plot_agglomerative_dendrogram.py`.
Attributes
----------
n_clusters_ : int
The number of clusters found by the algorithm. If
``distance_threshold=None``, it will be equal to the given
``n_clusters``.
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/_spectral.py | sklearn/cluster/_spectral.py | """Algorithms for spectral clustering"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from numbers import Integral, Real
import numpy as np
from scipy.linalg import LinAlgError, qr, svd
from scipy.sparse import csc_matrix
from sklearn.base import BaseEstimator, ClusterMixin, _fit_context
from sklearn.cluster._kmeans import k_means
from sklearn.manifold._spectral_embedding import _spectral_embedding
from sklearn.metrics.pairwise import KERNEL_PARAMS, pairwise_kernels
from sklearn.neighbors import NearestNeighbors, kneighbors_graph
from sklearn.utils import as_float_array, check_random_state
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
from sklearn.utils.validation import validate_data
def cluster_qr(vectors):
"""Find the discrete partition closest to the eigenvector embedding.
This implementation was proposed in [1]_.
.. versionadded:: 1.1
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
Returns
-------
labels : array of integers, shape: n_samples
The cluster labels of vectors.
References
----------
.. [1] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019
Anil Damle, Victor Minden, Lexing Ying
<10.1093/imaiai/iay008>`
"""
k = vectors.shape[1]
_, _, piv = qr(vectors.T, pivoting=True)
ut, _, v = svd(vectors[piv[:k], :].T)
vectors = abs(np.dot(vectors, np.dot(ut, v.conj())))
return vectors.argmax(axis=1)
def discretize(
vectors, *, copy=True, max_svd_restarts=30, n_iter_max=20, random_state=None
):
"""Search for a partition matrix which is closest to the eigenvector embedding.
This implementation was proposed in [1]_.
Parameters
----------
vectors : array-like of shape (n_samples, n_clusters)
The embedding space of the samples.
copy : bool, default=True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, default=30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, default=30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int, RandomState instance, default=None
Determines random number generation for rotation matrix initialization.
Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
.. [1] `Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
<https://people.eecs.berkeley.edu/~jordan/courses/281B-spring04/readings/yu-shi.pdf>`_
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) * norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors**2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components),
)
t_svd = vectors_discrete.T @ vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
except LinAlgError:
svd_restarts += 1
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if (abs(ncut_value - last_objective_value) < eps) or (n_iter > n_iter_max):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError("SVD did not converge")
return labels
@validate_params(
{"affinity": ["array-like", "sparse matrix"]},
prefer_skip_nested_validation=False,
)
def spectral_clustering(
affinity,
*,
n_clusters=8,
n_components=None,
eigen_solver=None,
random_state=None,
n_init=10,
eigen_tol="auto",
assign_labels="kmeans",
verbose=False,
):
"""Apply clustering to a projection of the normalized Laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance, when clusters are
nested circles on the 2D plane.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts [1]_, [2]_.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
----------
affinity : {array-like, sparse matrix} of shape (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : int, default=None
Number of clusters to extract.
n_components : int, default=n_clusters
Number of eigenvectors to use for the spectral embedding.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition method. If None then ``'arpack'`` is used.
See [4]_ for more details regarding ``'lobpcg'``.
Eigensolver ``'amg'`` runs ``'lobpcg'`` with optional
Algebraic MultiGrid preconditioning and requires pyamg to be installed.
It can be faster on very large sparse problems [6]_ and [7]_.
random_state : int, RandomState instance, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigenvectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
n_init : int, default=10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of n_init
consecutive runs in terms of inertia. Only used if
``assign_labels='kmeans'``.
eigen_tol : float, default="auto"
Stopping criterion for eigendecomposition of the Laplacian matrix.
If `eigen_tol="auto"` then the passed tolerance will depend on the
`eigen_solver`:
- If `eigen_solver="arpack"`, then `eigen_tol=0.0`;
- If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then
`eigen_tol=None` which configures the underlying `lobpcg` solver to
automatically resolve the value according to their heuristics. See,
:func:`scipy.sparse.linalg.lobpcg` for details.
Note that when using `eigen_solver="lobpcg"` or `eigen_solver="amg"`
values of `tol<1e-5` may lead to convergence issues and should be
avoided.
.. versionadded:: 1.2
Added 'auto' option.
assign_labels : {'kmeans', 'discretize', 'cluster_qr'}, default='kmeans'
The strategy to use to assign labels in the embedding
space. There are three ways to assign labels after the Laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization [3]_.
The cluster_qr method [5]_ directly extracts clusters from eigenvectors
in spectral clustering. In contrast to k-means and discretization, cluster_qr
has no tuning parameters and is not an iterative method, yet may outperform
k-means and discretization in terms of both quality and speed. For a detailed
comparison of clustering strategies, refer to the following example:
:ref:`sphx_glr_auto_examples_cluster_plot_coin_segmentation.py`.
.. versionchanged:: 1.1
Added new labeling method 'cluster_qr'.
verbose : bool, default=False
Verbosity mode.
.. versionadded:: 0.24
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
Notes
-----
The graph should contain only one connected component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for `k=2`: it is a
normalized spectral clustering.
References
----------
.. [1] :doi:`Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
<10.1109/34.868688>`
.. [2] :doi:`A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
<10.1007/s11222-007-9033-z>`
.. [3] `Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
<https://people.eecs.berkeley.edu/~jordan/courses/281B-spring04/readings/yu-shi.pdf>`_
.. [4] :doi:`Toward the Optimal Preconditioned Eigensolver:
Locally Optimal Block Preconditioned Conjugate Gradient Method, 2001
A. V. Knyazev
SIAM Journal on Scientific Computing 23, no. 2, pp. 517-541.
<10.1137/S1064827500366124>`
.. [5] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019
Anil Damle, Victor Minden, Lexing Ying
<10.1093/imaiai/iay008>`
.. [6] :doi:`Multiscale Spectral Image Segmentation Multiscale preconditioning
for computing eigenvalues of graph Laplacians in image segmentation, 2006
Andrew Knyazev
<10.13140/RG.2.2.35280.02565>`
.. [7] :doi:`Preconditioned spectral clustering for stochastic block partition
streaming graph challenge (Preliminary version at arXiv.)
David Zhuzhunashvili, Andrew Knyazev
<10.1109/HPEC.2017.8091045>`
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics.pairwise import pairwise_kernels
>>> from sklearn.cluster import spectral_clustering
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> affinity = pairwise_kernels(X, metric='rbf')
>>> spectral_clustering(
... affinity=affinity, n_clusters=2, assign_labels="discretize", random_state=0
... )
array([1, 1, 1, 0, 0, 0])
"""
clusterer = SpectralClustering(
n_clusters=n_clusters,
n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
n_init=n_init,
affinity="precomputed",
eigen_tol=eigen_tol,
assign_labels=assign_labels,
verbose=verbose,
).fit(affinity)
return clusterer.labels_
class SpectralClustering(ClusterMixin, BaseEstimator):
"""Apply clustering to a projection of the normalized Laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex, or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster, such as when clusters are
nested circles on the 2D plane.
If the affinity matrix is the adjacency matrix of a graph, this method
can be used to find normalized graph cuts [1]_, [2]_.
When calling ``fit``, an affinity matrix is constructed using either
a kernel function such the Gaussian (aka RBF) kernel with Euclidean
distance ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, a user-provided affinity matrix can be specified by
setting ``affinity='precomputed'``.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
----------
n_clusters : int, default=8
The dimension of the projection subspace.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used. See [4]_ for more details regarding `'lobpcg'`.
n_components : int, default=None
Number of eigenvectors to use for the spectral embedding. If None,
defaults to `n_clusters`.
random_state : int, RandomState instance, default=None
A pseudo random number generator used for the initialization
of the lobpcg eigenvectors decomposition when `eigen_solver ==
'amg'`, and for the K-Means initialization. Use an int to make
the results deterministic across calls (See
:term:`Glossary <random_state>`).
.. note::
When using `eigen_solver == 'amg'`,
it is necessary to also fix the global numpy seed with
`np.random.seed(int)` to get deterministic results. See
https://github.com/pyamg/pyamg/issues/139 for further
information.
n_init : int, default=10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of n_init
consecutive runs in terms of inertia. Only used if
``assign_labels='kmeans'``.
gamma : float, default=1.0
Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels.
Ignored for ``affinity='nearest_neighbors'``, ``affinity='precomputed'``
or ``affinity='precomputed_nearest_neighbors'``.
affinity : str or callable, default='rbf'
How to construct the affinity matrix.
- 'nearest_neighbors': construct the affinity matrix by computing a
graph of nearest neighbors.
- 'rbf': construct the affinity matrix using a radial basis function
(RBF) kernel.
- 'precomputed': interpret ``X`` as a precomputed affinity matrix,
where larger values indicate greater similarity between instances.
- 'precomputed_nearest_neighbors': interpret ``X`` as a sparse graph
of precomputed distances, and construct a binary affinity matrix
from the ``n_neighbors`` nearest neighbors of each instance.
- one of the kernels supported by
:func:`~sklearn.metrics.pairwise.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
n_neighbors : int, default=10
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_tol : float, default="auto"
Stopping criterion for eigen decomposition of the Laplacian matrix.
If `eigen_tol="auto"` then the passed tolerance will depend on the
`eigen_solver`:
- If `eigen_solver="arpack"`, then `eigen_tol=0.0`;
- If `eigen_solver="lobpcg"` or `eigen_solver="amg"`, then
`eigen_tol=None` which configures the underlying `lobpcg` solver to
automatically resolve the value according to their heuristics. See,
:func:`scipy.sparse.linalg.lobpcg` for details.
Note that when using `eigen_solver="lobpcg"` or `eigen_solver="amg"`
values of `tol<1e-5` may lead to convergence issues and should be
avoided.
.. versionadded:: 1.2
Added 'auto' option.
assign_labels : {'kmeans', 'discretize', 'cluster_qr'}, default='kmeans'
The strategy for assigning labels in the embedding space. There are two
ways to assign labels after the Laplacian embedding. k-means is a
popular choice, but it can be sensitive to initialization.
Discretization is another approach which is less sensitive to random
initialization [3]_.
The cluster_qr method [5]_ directly extract clusters from eigenvectors
in spectral clustering. In contrast to k-means and discretization, cluster_qr
has no tuning parameters and runs no iterations, yet may outperform
k-means and discretization in terms of both quality and speed.
.. versionchanged:: 1.1
Added new labeling method 'cluster_qr'.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : dict of str to any, default=None
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int, default=None
The number of parallel jobs to run when `affinity='nearest_neighbors'`
or `affinity='precomputed_nearest_neighbors'`. The neighbors search
will be done in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : bool, default=False
Verbosity mode.
.. versionadded:: 0.24
Attributes
----------
affinity_matrix_ : array-like of shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only after calling
``fit``.
labels_ : ndarray of shape (n_samples,)
Labels of each point
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
sklearn.cluster.KMeans : K-Means clustering.
sklearn.cluster.DBSCAN : Density-Based Spatial Clustering of
Applications with Noise.
Notes
-----
A distance matrix for which 0 indicates identical elements and high values
indicate very dissimilar elements can be transformed into an affinity /
similarity matrix that is well-suited for the algorithm by
applying the Gaussian (aka RBF, heat) kernel::
np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
where ``delta`` is a free parameter representing the width of the Gaussian
kernel.
An alternative is to take a symmetric version of the k-nearest neighbors
connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
.. [1] :doi:`Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
<10.1109/34.868688>`
.. [2] :doi:`A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
<10.1007/s11222-007-9033-z>`
.. [3] `Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
<https://people.eecs.berkeley.edu/~jordan/courses/281B-spring04/readings/yu-shi.pdf>`_
.. [4] :doi:`Toward the Optimal Preconditioned Eigensolver:
Locally Optimal Block Preconditioned Conjugate Gradient Method, 2001
A. V. Knyazev
SIAM Journal on Scientific Computing 23, no. 2, pp. 517-541.
<10.1137/S1064827500366124>`
.. [5] :doi:`Simple, direct, and efficient multi-way spectral clustering, 2019
Anil Damle, Victor Minden, Lexing Ying
<10.1093/imaiai/iay008>`
Examples
--------
>>> from sklearn.cluster import SpectralClustering
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = SpectralClustering(n_clusters=2,
... assign_labels='discretize',
... random_state=0).fit(X)
>>> clustering.labels_
array([1, 1, 1, 0, 0, 0])
>>> clustering
SpectralClustering(assign_labels='discretize', n_clusters=2,
random_state=0)
For a comparison of Spectral clustering with other clustering algorithms, see
:ref:`sphx_glr_auto_examples_cluster_plot_cluster_comparison.py`
"""
_parameter_constraints: dict = {
"n_clusters": [Interval(Integral, 1, None, closed="left")],
"eigen_solver": [StrOptions({"arpack", "lobpcg", "amg"}), None],
"n_components": [Interval(Integral, 1, None, closed="left"), None],
"random_state": ["random_state"],
"n_init": [Interval(Integral, 1, None, closed="left")],
"gamma": [Interval(Real, 0, None, closed="left")],
"affinity": [
callable,
StrOptions(
set(KERNEL_PARAMS)
| {"nearest_neighbors", "precomputed", "precomputed_nearest_neighbors"}
),
],
"n_neighbors": [Interval(Integral, 1, None, closed="left")],
"eigen_tol": [
Interval(Real, 0.0, None, closed="left"),
StrOptions({"auto"}),
],
"assign_labels": [StrOptions({"kmeans", "discretize", "cluster_qr"})],
"degree": [Interval(Real, 0, None, closed="left")],
"coef0": [Interval(Real, None, None, closed="neither")],
"kernel_params": [dict, None],
"n_jobs": [Integral, None],
"verbose": ["verbose"],
}
def __init__(
self,
n_clusters=8,
*,
eigen_solver=None,
n_components=None,
random_state=None,
n_init=10,
gamma=1.0,
affinity="rbf",
n_neighbors=10,
eigen_tol="auto",
assign_labels="kmeans",
degree=3,
coef0=1,
kernel_params=None,
n_jobs=None,
verbose=False,
):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.n_components = n_components
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
self.verbose = verbose
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Perform spectral clustering from features, or affinity matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples)
Training instances to cluster, similarities / affinities between
instances if ``affinity='precomputed'``, or distances between
instances if ``affinity='precomputed_nearest_neighbors``. If a
sparse matrix is provided in a format other than ``csr_matrix``,
``csc_matrix``, or ``coo_matrix``, it will be converted into a
sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
A fitted instance of the estimator.
"""
X = validate_data(
self,
X,
accept_sparse=["csr", "csc", "coo"],
dtype=np.float64,
ensure_min_samples=2,
)
allow_squared = self.affinity in [
"precomputed",
"precomputed_nearest_neighbors",
]
if X.shape[0] == X.shape[1] and not allow_squared:
warnings.warn(
"The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``."
)
if self.affinity == "nearest_neighbors":
connectivity = kneighbors_graph(
X, n_neighbors=self.n_neighbors, include_self=True, n_jobs=self.n_jobs
)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == "precomputed_nearest_neighbors":
estimator = NearestNeighbors(
n_neighbors=self.n_neighbors, n_jobs=self.n_jobs, metric="precomputed"
).fit(X)
connectivity = estimator.kneighbors_graph(X=X, mode="connectivity")
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == "precomputed":
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params["gamma"] = self.gamma
params["degree"] = self.degree
params["coef0"] = self.coef0
self.affinity_matrix_ = pairwise_kernels(
X, metric=self.affinity, filter_params=True, **params
)
random_state = check_random_state(self.random_state)
n_components = (
self.n_clusters if self.n_components is None else self.n_components
)
# We now obtain the real valued solution matrix to the
# relaxed Ncut problem, solving the eigenvalue problem
# L_sym x = lambda x and recovering u = D^-1/2 x.
# The first eigenvector is constant only for fully connected graphs
# and should be kept for spectral clustering (drop_first = False)
# See spectral_embedding documentation.
maps = _spectral_embedding(
self.affinity_matrix_,
n_components=n_components,
eigen_solver=self.eigen_solver,
random_state=random_state,
eigen_tol=self.eigen_tol,
drop_first=False,
)
if self.verbose:
print(f"Computing label assignment using {self.assign_labels}")
if self.assign_labels == "kmeans":
_, self.labels_, _ = k_means(
maps,
self.n_clusters,
random_state=random_state,
n_init=self.n_init,
verbose=self.verbose,
)
elif self.assign_labels == "cluster_qr":
self.labels_ = cluster_qr(maps)
else:
self.labels_ = discretize(maps, random_state=random_state)
return self
def fit_predict(self, X, y=None):
"""Perform spectral clustering on `X` and return cluster labels.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples)
Training instances to cluster, similarities / affinities between
instances if ``affinity='precomputed'``, or distances between
instances if ``affinity='precomputed_nearest_neighbors``. If a
sparse matrix is provided in a format other than ``csr_matrix``,
``csc_matrix``, or ``coo_matrix``, it will be converted into a
sparse ``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels.
"""
return super().fit_predict(X, y)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
tags.input_tags.pairwise = self.affinity in [
"precomputed",
"precomputed_nearest_neighbors",
]
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/_hdbscan/hdbscan.py | sklearn/cluster/_hdbscan/hdbscan.py | """
HDBSCAN: Hierarchical Density-Based Spatial Clustering
of Applications with Noise
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from numbers import Integral, Real
from warnings import warn
import numpy as np
from scipy.sparse import csgraph, issparse
from sklearn.base import BaseEstimator, ClusterMixin, _fit_context
from sklearn.cluster._hdbscan._linkage import (
MST_edge_dtype,
make_single_linkage,
mst_from_data_matrix,
mst_from_mutual_reachability,
)
from sklearn.cluster._hdbscan._reachability import mutual_reachability_graph
from sklearn.cluster._hdbscan._tree import (
HIERARCHY_dtype,
labelling_at_cut,
tree_to_labels,
)
from sklearn.metrics import pairwise_distances
from sklearn.metrics._dist_metrics import DistanceMetric
from sklearn.metrics.pairwise import _VALID_METRICS
from sklearn.neighbors import BallTree, KDTree, NearestNeighbors
from sklearn.utils._param_validation import Hidden, Interval, StrOptions
from sklearn.utils.validation import (
_allclose_dense_sparse,
_assert_all_finite,
validate_data,
)
FAST_METRICS = set(KDTree.valid_metrics + BallTree.valid_metrics)
# Encodings are arbitrary but must be strictly negative.
# The current encodings are chosen as extensions to the -1 noise label.
# Avoided enums so that the end user only deals with simple labels.
_OUTLIER_ENCODING: dict = {
"infinite": {
"label": -2,
# The probability could also be 1, since infinite points are certainly
# infinite outliers, however 0 is convention from the HDBSCAN library
# implementation.
"prob": 0,
},
"missing": {
"label": -3,
# A nan probability is chosen to emphasize the fact that the
# corresponding data was not considered in the clustering problem.
"prob": np.nan,
},
}
def _brute_mst(mutual_reachability, min_samples):
"""
Builds a minimum spanning tree (MST) from the provided mutual-reachability
values. This function dispatches to a custom Cython implementation for
dense arrays, and `scipy.sparse.csgraph.minimum_spanning_tree` for sparse
arrays/matrices.
Parameters
----------
mututal_reachability_graph: {ndarray, sparse matrix} of shape \
(n_samples, n_samples)
Weighted adjacency matrix of the mutual reachability graph.
min_samples : int, default=None
The number of samples in a neighborhood for a point
to be considered as a core point. This includes the point itself.
Returns
-------
mst : ndarray of shape (n_samples - 1,), dtype=MST_edge_dtype
The MST representation of the mutual-reachability graph. The MST is
represented as a collection of edges.
"""
if not issparse(mutual_reachability):
return mst_from_mutual_reachability(mutual_reachability)
# Check if the mutual reachability matrix has any rows which have
# less than `min_samples` non-zero elements.
indptr = mutual_reachability.indptr
num_points = mutual_reachability.shape[0]
if any((indptr[i + 1] - indptr[i]) < min_samples for i in range(num_points)):
raise ValueError(
f"There exists points with fewer than {min_samples} neighbors. Ensure"
" your distance matrix has non-zero values for at least"
f" `min_sample`={min_samples} neighbors for each points (i.e. K-nn"
" graph), or specify a `max_distance` in `metric_params` to use when"
" distances are missing."
)
# Check connected component on mutual reachability.
# If more than one connected component is present,
# it means that the graph is disconnected.
n_components = csgraph.connected_components(
mutual_reachability, directed=False, return_labels=False
)
if n_components > 1:
raise ValueError(
f"Sparse mutual reachability matrix has {n_components} connected"
" components. HDBSCAN cannot be performed on a disconnected graph. Ensure"
" that the sparse distance matrix has only one connected component."
)
# Compute the minimum spanning tree for the sparse graph
sparse_min_spanning_tree = csgraph.minimum_spanning_tree(mutual_reachability)
rows, cols = sparse_min_spanning_tree.nonzero()
mst = np.rec.fromarrays(
[rows, cols, sparse_min_spanning_tree.data],
dtype=MST_edge_dtype,
)
return mst
def _process_mst(min_spanning_tree):
"""
Builds a single-linkage tree (SLT) from the provided minimum spanning tree
(MST). The MST is first sorted then processed by a custom Cython routine.
Parameters
----------
min_spanning_tree : ndarray of shape (n_samples - 1,), dtype=MST_edge_dtype
The MST representation of the mutual-reachability graph. The MST is
represented as a collection of edges.
Returns
-------
single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
The single-linkage tree tree (dendrogram) built from the MST.
"""
# Sort edges of the min_spanning_tree by weight
row_order = np.argsort(min_spanning_tree["distance"])
min_spanning_tree = min_spanning_tree[row_order]
# Convert edge list into standard hierarchical clustering format
return make_single_linkage(min_spanning_tree)
def _hdbscan_brute(
X,
min_samples=5,
alpha=None,
metric="euclidean",
n_jobs=None,
copy=False,
**metric_params,
):
"""
Builds a single-linkage tree (SLT) from the input data `X`. If
`metric="precomputed"` then `X` must be a symmetric array of distances.
Otherwise, the pairwise distances are calculated directly and passed to
`mutual_reachability_graph`.
Parameters
----------
X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)
Either the raw data from which to compute the pairwise distances,
or the precomputed distances.
min_samples : int, default=None
The number of samples in a neighborhood for a point
to be considered as a core point. This includes the point itself.
alpha : float, default=1.0
A distance scaling parameter as used in robust single linkage.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array.
- If metric is a string or callable, it must be one of
the options allowed by :func:`~sklearn.metrics.pairwise_distances`
for its metric parameter.
- If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
n_jobs : int, default=None
The number of jobs to use for computing the pairwise distances. This
works by breaking down the pairwise matrix into n_jobs even slices and
computing them in parallel. This parameter is passed directly to
:func:`~sklearn.metrics.pairwise_distances`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
copy : bool, default=False
If `copy=True` then any time an in-place modifications would be made
that would overwrite `X`, a copy will first be made, guaranteeing that
the original data will be unchanged. Currently, it only applies when
`metric="precomputed"`, when passing a dense array or a CSR sparse
array/matrix.
metric_params : dict, default=None
Arguments passed to the distance metric.
Returns
-------
single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
The single-linkage tree tree (dendrogram) built from the MST.
"""
if metric == "precomputed":
if X.shape[0] != X.shape[1]:
raise ValueError(
"The precomputed distance matrix is expected to be symmetric, however"
f" it has shape {X.shape}. Please verify that the"
" distance matrix was constructed correctly."
)
if not _allclose_dense_sparse(X, X.T):
raise ValueError(
"The precomputed distance matrix is expected to be symmetric, however"
" its values appear to be asymmetric. Please verify that the distance"
" matrix was constructed correctly."
)
distance_matrix = X.copy() if copy else X
else:
distance_matrix = pairwise_distances(
X, metric=metric, n_jobs=n_jobs, **metric_params
)
distance_matrix /= alpha
max_distance = metric_params.get("max_distance", 0.0)
if issparse(distance_matrix) and distance_matrix.format != "csr":
# we need CSR format to avoid a conversion in `_brute_mst` when calling
# `csgraph.connected_components`
distance_matrix = distance_matrix.tocsr()
# Note that `distance_matrix` is manipulated in-place, however we do not
# need it for anything else past this point, hence the operation is safe.
mutual_reachability_ = mutual_reachability_graph(
distance_matrix, min_samples=min_samples, max_distance=max_distance
)
min_spanning_tree = _brute_mst(mutual_reachability_, min_samples=min_samples)
# Warn if the MST couldn't be constructed around the missing distances
if np.isinf(min_spanning_tree["distance"]).any():
warn(
(
"The minimum spanning tree contains edge weights with value "
"infinity. Potentially, you are missing too many distances "
"in the initial distance matrix for the given neighborhood "
"size."
),
UserWarning,
)
return _process_mst(min_spanning_tree)
def _hdbscan_prims(
X,
algo,
min_samples=5,
alpha=1.0,
metric="euclidean",
leaf_size=40,
n_jobs=None,
**metric_params,
):
"""
Builds a single-linkage tree (SLT) from the input data `X`. If
`metric="precomputed"` then `X` must be a symmetric array of distances.
Otherwise, the pairwise distances are calculated directly and passed to
`mutual_reachability_graph`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The raw data.
min_samples : int, default=None
The number of samples in a neighborhood for a point
to be considered as a core point. This includes the point itself.
alpha : float, default=1.0
A distance scaling parameter as used in robust single linkage.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. `metric` must be one of the options allowed by
:func:`~sklearn.metrics.pairwise_distances` for its metric
parameter.
n_jobs : int, default=None
The number of jobs to use for computing the pairwise distances. This
works by breaking down the pairwise matrix into n_jobs even slices and
computing them in parallel. This parameter is passed directly to
:func:`~sklearn.metrics.pairwise_distances`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
copy : bool, default=False
If `copy=True` then any time an in-place modifications would be made
that would overwrite `X`, a copy will first be made, guaranteeing that
the original data will be unchanged. Currently, it only applies when
`metric="precomputed"`, when passing a dense array or a CSR sparse
array/matrix.
metric_params : dict, default=None
Arguments passed to the distance metric.
Returns
-------
single_linkage : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
The single-linkage tree tree (dendrogram) built from the MST.
"""
# The Cython routines used require contiguous arrays
X = np.asarray(X, order="C")
# Get distance to kth nearest neighbour
nbrs = NearestNeighbors(
n_neighbors=min_samples,
algorithm=algo,
leaf_size=leaf_size,
metric=metric,
metric_params=metric_params,
n_jobs=n_jobs,
p=None,
).fit(X)
neighbors_distances, _ = nbrs.kneighbors(X, min_samples, return_distance=True)
core_distances = np.ascontiguousarray(neighbors_distances[:, -1])
dist_metric = DistanceMetric.get_metric(metric, **metric_params)
# Mutual reachability distance is implicit in mst_from_data_matrix
min_spanning_tree = mst_from_data_matrix(X, core_distances, dist_metric, alpha)
return _process_mst(min_spanning_tree)
def remap_single_linkage_tree(tree, internal_to_raw, non_finite):
"""
Takes an internal single_linkage_tree structure and adds back in a set of points
that were initially detected as non-finite and returns that new tree.
These points will all be merged into the final node at np.inf distance and
considered noise points.
Parameters
----------
tree : ndarray of shape (n_samples - 1,), dtype=HIERARCHY_dtype
The single-linkage tree tree (dendrogram) built from the MST.
internal_to_raw: dict
A mapping from internal integer index to the raw integer index
non_finite : ndarray
Boolean array of which entries in the raw data are non-finite
"""
finite_count = len(internal_to_raw)
outlier_count = len(non_finite)
for i, _ in enumerate(tree):
left = tree[i]["left_node"]
right = tree[i]["right_node"]
if left < finite_count:
tree[i]["left_node"] = internal_to_raw[left]
else:
tree[i]["left_node"] = left + outlier_count
if right < finite_count:
tree[i]["right_node"] = internal_to_raw[right]
else:
tree[i]["right_node"] = right + outlier_count
outlier_tree = np.zeros(len(non_finite), dtype=HIERARCHY_dtype)
last_cluster_id = max(
tree[tree.shape[0] - 1]["left_node"], tree[tree.shape[0] - 1]["right_node"]
)
last_cluster_size = tree[tree.shape[0] - 1]["cluster_size"]
for i, outlier in enumerate(non_finite):
outlier_tree[i] = (outlier, last_cluster_id + 1, np.inf, last_cluster_size + 1)
last_cluster_id += 1
last_cluster_size += 1
tree = np.concatenate([tree, outlier_tree])
return tree
def _get_finite_row_indices(matrix):
"""
Returns the indices of the purely finite rows of a
sparse matrix or dense ndarray
"""
if issparse(matrix):
row_indices = np.array(
[i for i, row in enumerate(matrix.tolil().data) if np.all(np.isfinite(row))]
)
else:
(row_indices,) = np.isfinite(matrix.sum(axis=1)).nonzero()
return row_indices
class HDBSCAN(ClusterMixin, BaseEstimator):
"""Cluster data using hierarchical density-based clustering.
HDBSCAN - Hierarchical Density-Based Spatial Clustering of Applications
with Noise. Performs :class:`~sklearn.cluster.DBSCAN` over varying epsilon
values and integrates the result to find a clustering that gives the best
stability over epsilon.
This allows HDBSCAN to find clusters of varying densities (unlike
:class:`~sklearn.cluster.DBSCAN`), and be more robust to parameter selection.
Read more in the :ref:`User Guide <hdbscan>`.
.. versionadded:: 1.3
Parameters
----------
min_cluster_size : int, default=5
The minimum number of samples in a group for that group to be
considered a cluster; groupings smaller than this size will be left
as noise.
min_samples : int, default=None
The parameter `k` used to calculate the distance between a point
`x_p` and its k-th nearest neighbor.
When `None`, defaults to `min_cluster_size`.
cluster_selection_epsilon : float, default=0.0
A distance threshold. Clusters below this value will be merged.
See [5]_ for more information.
max_cluster_size : int, default=None
A limit to the size of clusters returned by the `"eom"` cluster
selection algorithm. There is no limit when `max_cluster_size=None`.
Has no effect if `cluster_selection_method="leaf"`.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array.
- If metric is a string or callable, it must be one of
the options allowed by :func:`~sklearn.metrics.pairwise_distances`
for its metric parameter.
- If metric is "precomputed", X is assumed to be a distance matrix and
must be square.
metric_params : dict, default=None
Arguments passed to the distance metric.
alpha : float, default=1.0
A distance scaling parameter as used in robust single linkage.
See [3]_ for more information.
algorithm : {"auto", "brute", "kd_tree", "ball_tree"}, default="auto"
Exactly which algorithm to use for computing core distances; By default
this is set to `"auto"` which attempts to use a
:class:`~sklearn.neighbors.KDTree` tree if possible, otherwise it uses
a :class:`~sklearn.neighbors.BallTree` tree. Both `"kd_tree"` and
`"ball_tree"` algorithms use the
:class:`~sklearn.neighbors.NearestNeighbors` estimator.
If the `X` passed during `fit` is sparse or `metric` is invalid for
both :class:`~sklearn.neighbors.KDTree` and
:class:`~sklearn.neighbors.BallTree`, then it resolves to use the
`"brute"` algorithm.
leaf_size : int, default=40
Leaf size for trees responsible for fast nearest neighbour queries when
a KDTree or a BallTree are used as core-distance algorithms. A large
dataset size and small `leaf_size` may induce excessive memory usage.
If you are running out of memory consider increasing the `leaf_size`
parameter. Ignored for `algorithm="brute"`.
n_jobs : int, default=None
Number of jobs to run in parallel to calculate distances.
`None` means 1 unless in a :obj:`joblib.parallel_backend` context.
`-1` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
cluster_selection_method : {"eom", "leaf"}, default="eom"
The method used to select clusters from the condensed tree. The
standard approach for HDBSCAN* is to use an Excess of Mass (`"eom"`)
algorithm to find the most persistent clusters. Alternatively you can
instead select the clusters at the leaves of the tree -- this provides
the most fine grained and homogeneous clusters.
allow_single_cluster : bool, default=False
By default HDBSCAN* will not produce a single cluster, setting this
to True will override this and allow single cluster results in
the case that you feel this is a valid result for your dataset.
store_centers : str, default=None
Which, if any, cluster centers to compute and store. The options are:
- `None` which does not compute nor store any centers.
- `"centroid"` which calculates the center by taking the weighted
average of their positions. Note that the algorithm uses the
euclidean metric and does not guarantee that the output will be
an observed data point.
- `"medoid"` which calculates the center by taking the point in the
fitted data which minimizes the distance to all other points in
the cluster. This is slower than "centroid" since it requires
computing additional pairwise distances between points of the
same cluster but guarantees the output is an observed data point.
The medoid is also well-defined for arbitrary metrics, and does not
depend on a euclidean metric.
- `"both"` which computes and stores both forms of centers.
copy : bool, default=False
If `copy=True` then any time an in-place modifications would be made
that would overwrite data passed to :term:`fit`, a copy will first be
made, guaranteeing that the original data will be unchanged.
Currently, it only applies when `metric="precomputed"`, when passing
a dense array or a CSR sparse matrix and when `algorithm="brute"`.
.. versionchanged:: 1.10
The default value for `copy` will change from `False` to `True`
in version 1.10.
Attributes
----------
labels_ : ndarray of shape (n_samples,)
Cluster labels for each point in the dataset given to :term:`fit`.
Outliers are labeled as follows:
- Noisy samples are given the label -1.
- Samples with infinite elements (+/- np.inf) are given the label -2.
- Samples with missing data are given the label -3, even if they
also have infinite elements.
probabilities_ : ndarray of shape (n_samples,)
The strength with which each sample is a member of its assigned
cluster.
- Clustered samples have probabilities proportional to the degree that
they persist as part of the cluster.
- Noisy samples have probability zero.
- Samples with infinite elements (+/- np.inf) have probability 0.
- Samples with missing data have probability `np.nan`.
n_features_in_ : int
Number of features seen during :term:`fit`.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
centroids_ : ndarray of shape (n_clusters, n_features)
A collection containing the centroid of each cluster calculated under
the standard euclidean metric. The centroids may fall "outside" their
respective clusters if the clusters themselves are non-convex.
Note that `n_clusters` only counts non-outlier clusters. That is to
say, the `-1, -2, -3` labels for the outlier clusters are excluded.
medoids_ : ndarray of shape (n_clusters, n_features)
A collection containing the medoid of each cluster calculated under
the whichever metric was passed to the `metric` parameter. The
medoids are points in the original cluster which minimize the average
distance to all other points in that cluster under the chosen metric.
These can be thought of as the result of projecting the `metric`-based
centroid back onto the cluster.
Note that `n_clusters` only counts non-outlier clusters. That is to
say, the `-1, -2, -3` labels for the outlier clusters are excluded.
See Also
--------
DBSCAN : Density-Based Spatial Clustering of Applications
with Noise.
OPTICS : Ordering Points To Identify the Clustering Structure.
Birch : Memory-efficient, online-learning algorithm.
Notes
-----
The `min_samples` parameter includes the point itself, whereas the implementation in
`scikit-learn-contrib/hdbscan <https://github.com/scikit-learn-contrib/hdbscan>`_
does not. To get the same results in both versions, the value of `min_samples` here
must be 1 greater than the value used in `scikit-learn-contrib/hdbscan
<https://github.com/scikit-learn-contrib/hdbscan>`_.
References
----------
.. [1] :doi:`Campello, R. J., Moulavi, D., & Sander, J. Density-based clustering
based on hierarchical density estimates.
<10.1007/978-3-642-37456-2_14>`
.. [2] :doi:`Campello, R. J., Moulavi, D., Zimek, A., & Sander, J.
Hierarchical density estimates for data clustering, visualization,
and outlier detection.<10.1145/2733381>`
.. [3] `Chaudhuri, K., & Dasgupta, S. Rates of convergence for the
cluster tree.
<https://papers.nips.cc/paper/2010/hash/
b534ba68236ba543ae44b22bd110a1d6-Abstract.html>`_
.. [4] `Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and
Sander, J. Density-Based Clustering Validation.
<https://epubs.siam.org/doi/pdf/10.1137/1.9781611973440.96>`_
.. [5] :arxiv:`Malzer, C., & Baum, M. "A Hybrid Approach To Hierarchical
Density-based Cluster Selection."<1911.02282>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.cluster import HDBSCAN
>>> from sklearn.datasets import load_digits
>>> X, _ = load_digits(return_X_y=True)
>>> hdb = HDBSCAN(copy=True, min_cluster_size=20)
>>> hdb.fit(X)
HDBSCAN(copy=True, min_cluster_size=20)
>>> hdb.labels_.shape == (X.shape[0],)
True
>>> np.unique(hdb.labels_).tolist()
[-1, 0, 1, 2, 3, 4, 5, 6, 7]
"""
_parameter_constraints = {
"min_cluster_size": [Interval(Integral, left=2, right=None, closed="left")],
"min_samples": [Interval(Integral, left=1, right=None, closed="left"), None],
"cluster_selection_epsilon": [
Interval(Real, left=0, right=None, closed="left")
],
"max_cluster_size": [
None,
Interval(Integral, left=1, right=None, closed="left"),
],
"metric": [
StrOptions(FAST_METRICS | set(_VALID_METRICS) | {"precomputed"}),
callable,
],
"metric_params": [dict, None],
"alpha": [Interval(Real, left=0, right=None, closed="neither")],
"algorithm": [StrOptions({"auto", "brute", "kd_tree", "ball_tree"})],
"leaf_size": [Interval(Integral, left=1, right=None, closed="left")],
"n_jobs": [Integral, None],
"cluster_selection_method": [StrOptions({"eom", "leaf"})],
"allow_single_cluster": ["boolean"],
"store_centers": [None, StrOptions({"centroid", "medoid", "both"})],
"copy": ["boolean", Hidden(StrOptions({"warn"}))],
}
def __init__(
self,
min_cluster_size=5,
min_samples=None,
cluster_selection_epsilon=0.0,
max_cluster_size=None,
metric="euclidean",
metric_params=None,
alpha=1.0,
algorithm="auto",
leaf_size=40,
n_jobs=None,
cluster_selection_method="eom",
allow_single_cluster=False,
store_centers=None,
copy="warn",
):
self.min_cluster_size = min_cluster_size
self.min_samples = min_samples
self.alpha = alpha
self.max_cluster_size = max_cluster_size
self.cluster_selection_epsilon = cluster_selection_epsilon
self.metric = metric
self.metric_params = metric_params
self.algorithm = algorithm
self.leaf_size = leaf_size
self.n_jobs = n_jobs
self.cluster_selection_method = cluster_selection_method
self.allow_single_cluster = allow_single_cluster
self.store_centers = store_centers
self.copy = copy
@_fit_context(
# HDBSCAN.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None):
"""Find clusters based on hierarchical density-based clustering.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
ndarray of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
`metric='precomputed'`.
y : None
Ignored.
Returns
-------
self : object
Returns self.
"""
# TODO(1.10): remove "warn" option
# and leave copy to its default value where applicable in examples and doctests.
if self.copy == "warn":
warn(
"The default value of `copy` will change from False to True in 1.10."
" Explicitly set a value for `copy` to silence this warning.",
FutureWarning,
)
_copy = False
else:
_copy = self.copy
if self.metric == "precomputed" and self.store_centers is not None:
raise ValueError(
"Cannot store centers when using a precomputed distance matrix."
)
self._metric_params = self.metric_params or {}
if self.metric != "precomputed":
# Non-precomputed matrices may contain non-finite values.
X = validate_data(
self,
X,
accept_sparse=["csr", "lil"],
ensure_all_finite=False,
dtype=np.float64,
)
self._raw_data = X
all_finite = True
try:
_assert_all_finite(X.data if issparse(X) else X)
except ValueError:
all_finite = False
if not all_finite:
# Pass only the purely finite indices into hdbscan
# We will later assign all non-finite points their
# corresponding labels, as specified in `_OUTLIER_ENCODING`
# Reduce X to make the checks for missing/outlier samples more
# convenient.
reduced_X = X.sum(axis=1)
# Samples with missing data are denoted by the presence of
# `np.nan`
missing_index = np.isnan(reduced_X).nonzero()[0]
# Outlier samples are denoted by the presence of `np.inf`
infinite_index = np.isinf(reduced_X).nonzero()[0]
# Continue with only finite samples
finite_index = _get_finite_row_indices(X)
internal_to_raw = {x: y for x, y in enumerate(finite_index)}
X = X[finite_index]
elif issparse(X):
# Handle sparse precomputed distance matrices separately
X = validate_data(
self,
X,
accept_sparse=["csr", "lil"],
dtype=np.float64,
force_writeable=True,
)
else:
# Only non-sparse, precomputed distance matrices are handled here
# and thereby allowed to contain numpy.inf for missing distances
# Perform data validation after removing infinite values (numpy.inf)
# from the given distance matrix.
X = validate_data(
self, X, ensure_all_finite=False, dtype=np.float64, force_writeable=True
)
if np.isnan(X).any():
# TODO: Support np.nan in Cython implementation for precomputed
# dense HDBSCAN
raise ValueError("np.nan values found in precomputed-dense")
if X.shape[0] == 1:
raise ValueError("n_samples=1 while HDBSCAN requires more than one sample")
self._min_samples = (
self.min_cluster_size if self.min_samples is None else self.min_samples
)
if self._min_samples > X.shape[0]:
raise ValueError(
f"min_samples ({self._min_samples}) must be at most the number of"
f" samples in X ({X.shape[0]})"
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/_hdbscan/__init__.py | sklearn/cluster/_hdbscan/__init__.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/_hdbscan/tests/__init__.py | sklearn/cluster/_hdbscan/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/_hdbscan/tests/test_reachibility.py | sklearn/cluster/_hdbscan/tests/test_reachibility.py | import numpy as np
import pytest
from sklearn.cluster._hdbscan._reachability import mutual_reachability_graph
from sklearn.utils._testing import (
_convert_container,
assert_allclose,
)
def test_mutual_reachability_graph_error_sparse_format():
"""Check that we raise an error if the sparse format is not CSR."""
rng = np.random.RandomState(0)
X = rng.randn(10, 10)
X = X.T @ X
np.fill_diagonal(X, 0.0)
X = _convert_container(X, "sparse_csc")
err_msg = "Only sparse CSR matrices are supported"
with pytest.raises(ValueError, match=err_msg):
mutual_reachability_graph(X)
@pytest.mark.parametrize("array_type", ["array", "sparse_csr"])
def test_mutual_reachability_graph_inplace(array_type):
"""Check that the operation is happening inplace."""
rng = np.random.RandomState(0)
X = rng.randn(10, 10)
X = X.T @ X
np.fill_diagonal(X, 0.0)
X = _convert_container(X, array_type)
mr_graph = mutual_reachability_graph(X)
assert id(mr_graph) == id(X)
def test_mutual_reachability_graph_equivalence_dense_sparse():
"""Check that we get the same results for dense and sparse implementation."""
rng = np.random.RandomState(0)
X = rng.randn(5, 5)
X_dense = X.T @ X
X_sparse = _convert_container(X_dense, "sparse_csr")
mr_graph_dense = mutual_reachability_graph(X_dense, min_samples=3)
mr_graph_sparse = mutual_reachability_graph(X_sparse, min_samples=3)
assert_allclose(mr_graph_dense, mr_graph_sparse.toarray())
@pytest.mark.parametrize("array_type", ["array", "sparse_csr"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_mutual_reachability_graph_preserves_dtype(array_type, dtype):
"""Check that the computation preserve dtype thanks to fused types."""
rng = np.random.RandomState(0)
X = rng.randn(10, 10)
X = (X.T @ X).astype(dtype)
np.fill_diagonal(X, 0.0)
X = _convert_container(X, array_type)
assert X.dtype == dtype
mr_graph = mutual_reachability_graph(X)
assert mr_graph.dtype == dtype
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/tests/test_bicluster.py | sklearn/cluster/tests/test_bicluster.py | """Testing for Spectral Biclustering methods"""
import numpy as np
import pytest
from scipy.sparse import issparse
from sklearn.base import BaseEstimator, BiclusterMixin, clone
from sklearn.cluster import SpectralBiclustering, SpectralCoclustering
from sklearn.cluster._bicluster import (
_bistochastic_normalize,
_log_normalize,
_scale_normalize,
)
from sklearn.datasets import make_biclusters, make_checkerboard
from sklearn.metrics import consensus_score, v_measure_score
from sklearn.model_selection import ParameterGrid
from sklearn.utils._testing import (
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
)
from sklearn.utils.fixes import CSR_CONTAINERS
class MockBiclustering(BiclusterMixin, BaseEstimator):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (
np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0],
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_get_submatrix(csr_container):
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_container(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3], [6, 7], [18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert np.all(X != -1)
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert len(i_ind) == m
assert len(j_ind) == n
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_spectral_coclustering(global_random_seed, csr_container):
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {
"svd_method": ["randomized", "arpack"],
"n_svd_vecs": [None, 20],
"mini_batch": [False, True],
"init": ["k-means++"],
"n_init": [10],
}
S, rows, cols = make_biclusters(
(30, 30), 3, noise=0.1, random_state=global_random_seed
)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_container(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(
n_clusters=3, random_state=global_random_seed, **kwargs
)
model.fit(mat)
assert model.rows_.shape == (3, 30)
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert consensus_score(model.biclusters_, (rows, cols)) == 1
_test_shape_indices(model)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_spectral_biclustering(global_random_seed, csr_container):
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard(
(30, 30), 3, noise=0.5, random_state=global_random_seed
)
non_default_params = {
"method": ["scale", "log"],
"svd_method": ["arpack"],
"n_svd_vecs": [20],
"mini_batch": [True],
}
for mat in (S, csr_container(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init="k-means++",
random_state=global_random_seed,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get("method") == "log":
# cannot take log of sparse matrix
with pytest.raises(ValueError):
model.fit(mat)
continue
else:
model.fit(mat)
assert model.rows_.shape == (9, 30)
assert model.columns_.shape == (9, 30)
assert_array_equal(model.rows_.sum(axis=0), np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0), np.repeat(3, 30))
assert consensus_score(model.biclusters_, (rows, cols)) == 1
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100), decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100), decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(), scaled.sum(axis=1).mean(), decimal=1)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_scale_normalize(global_random_seed, csr_container):
generator = np.random.RandomState(global_random_seed)
X = generator.rand(100, 100)
for mat in (X, csr_container(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_bistochastic_normalize(global_random_seed, csr_container):
generator = np.random.RandomState(global_random_seed)
X = generator.rand(100, 100)
for mat in (X, csr_container(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize(global_random_seed):
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(global_random_seed)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise(global_random_seed):
model = SpectralBiclustering(random_state=global_random_seed)
vectors = np.array([[0, 0, 0, 1, 1, 1], [2, 2, 2, 3, 3, 3], [0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_project_and_cluster(global_random_seed, csr_container):
model = SpectralBiclustering(random_state=global_random_seed)
data = np.array([[1, 1, 1], [1, 1, 1], [3, 6, 3], [3, 6, 3]])
vectors = np.array([[1, 0], [0, 1], [0, 0]])
for mat in (data, csr_container(data)):
labels = model._project_and_cluster(mat, vectors, n_clusters=2)
assert_almost_equal(v_measure_score(labels, [0, 0, 1, 1]), 1.0)
def test_perfect_checkerboard(global_random_seed):
# XXX Previously failed on build bot (not reproducible)
model = SpectralBiclustering(
3, svd_method="arpack", random_state=global_random_seed
)
S, rows, cols = make_checkerboard(
(30, 30), 3, noise=0, random_state=global_random_seed
)
model.fit(S)
assert consensus_score(model.biclusters_, (rows, cols)) == 1
S, rows, cols = make_checkerboard(
(40, 30), 3, noise=0, random_state=global_random_seed
)
model.fit(S)
assert consensus_score(model.biclusters_, (rows, cols)) == 1
S, rows, cols = make_checkerboard(
(30, 40), 3, noise=0, random_state=global_random_seed
)
model.fit(S)
assert consensus_score(model.biclusters_, (rows, cols)) == 1
@pytest.mark.parametrize(
"params, type_err, err_msg",
[
(
{"n_clusters": 6},
ValueError,
"n_clusters should be <= n_samples=5",
),
(
{"n_clusters": (3, 3, 3)},
ValueError,
"Incorrect parameter n_clusters",
),
(
{"n_clusters": (3, 6)},
ValueError,
"Incorrect parameter n_clusters",
),
(
{"n_components": 3, "n_best": 4},
ValueError,
"n_best=4 must be <= n_components=3",
),
],
)
def test_spectralbiclustering_parameter_validation(params, type_err, err_msg):
"""Check parameters validation in `SpectralBiClustering`"""
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(**params)
with pytest.raises(type_err, match=err_msg):
model.fit(data)
@pytest.mark.parametrize("est", (SpectralBiclustering(), SpectralCoclustering()))
def test_n_features_in_(est):
X, _, _ = make_biclusters((3, 3), 3, random_state=0)
est = clone(est)
assert not hasattr(est, "n_features_in_")
est.fit(X)
assert est.n_features_in_ == 3
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/tests/test_hierarchical.py | sklearn/cluster/tests/test_hierarchical.py | """
Several basic tests for hierarchical clustering procedures
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
import shutil
from functools import partial
from tempfile import mkdtemp
import numpy as np
import pytest
from scipy.cluster import hierarchy
from scipy.sparse.csgraph import connected_components
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration, ward_tree
from sklearn.cluster._agglomerative import (
_TREE_BUILDERS,
_fix_connectivity,
_hc_cut,
linkage_tree,
)
from sklearn.cluster._hierarchical_fast import (
average_merge,
max_merge,
mst_linkage_core,
)
from sklearn.datasets import make_circles, make_moons
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics import DistanceMetric
from sklearn.metrics.cluster import adjusted_rand_score, normalized_mutual_info_score
from sklearn.metrics.pairwise import (
PAIRED_DISTANCES,
cosine_distances,
manhattan_distances,
pairwise_distances,
)
from sklearn.metrics.tests.test_dist_metrics import METRICS_DEFAULT_PARAMS
from sklearn.neighbors import kneighbors_graph
from sklearn.utils._fast_dict import IntFloatDict
from sklearn.utils._testing import (
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
create_memmap_backed_data,
ignore_warnings,
)
from sklearn.utils.fixes import LIL_CONTAINERS
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
with pytest.raises(ValueError):
linkage_tree(X, linkage="foo")
with pytest.raises(ValueError):
linkage_tree(X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hierarchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hierarchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = tree_builder(
X.T, connectivity=connectivity
)
n_nodes = 2 * X.shape[1] - 1
assert len(children) + n_leaves == n_nodes
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
with pytest.raises(ValueError):
tree_builder(X.T, connectivity=np.ones((4, 4)))
# Check that fitting with no samples raises an error
with pytest.raises(ValueError):
tree_builder(X.T[:0], connectivity=connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
with pytest.warns(UserWarning):
children, n_nodes, n_leaves, parent = ward_tree(this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert len(children) + n_leaves == n_nodes
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
with pytest.warns(UserWarning):
children, n_nodes, n_leaves, parent = tree_builder(
this_X.T, n_clusters=10
)
n_nodes = 2 * X.shape[1] - 1
assert len(children) + n_leaves == n_nodes
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(
X.T, connectivity=connectivity
)
n_nodes = 2 * X.shape[1] - 1
assert len(children) + n_leaves == n_nodes
def test_zero_cosine_linkage_tree():
# Check that zero vectors in X produce an error when
# 'cosine' affinity is used
X = np.array([[0, 1], [0, 0]])
msg = "Cosine affinity cannot be used when X contains zero vectors"
with pytest.raises(ValueError, match=msg):
linkage_tree(X, affinity="cosine")
@pytest.mark.parametrize("n_clusters, distance_threshold", [(None, 0.5), (10, None)])
@pytest.mark.parametrize("compute_distances", [True, False])
@pytest.mark.parametrize("linkage", ["ward", "complete", "average", "single"])
def test_agglomerative_clustering_distances(
n_clusters, compute_distances, distance_threshold, linkage
):
# Check that when `compute_distances` is True or `distance_threshold` is
# given, the fitted model has an attribute `distances_`.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
clustering = AgglomerativeClustering(
n_clusters=n_clusters,
connectivity=connectivity,
linkage=linkage,
distance_threshold=distance_threshold,
compute_distances=compute_distances,
)
clustering.fit(X)
if compute_distances or (distance_threshold is not None):
assert hasattr(clustering, "distances_")
n_children = clustering.children_.shape[0]
n_nodes = n_children + 1
assert clustering.distances_.shape == (n_nodes - 1,)
else:
assert not hasattr(clustering, "distances_")
@pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
def test_agglomerative_clustering(global_random_seed, lil_container):
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(global_random_seed)
mask = np.ones([10, 10], dtype=bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average", "single"):
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage
)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity,
memory=tempdir,
linkage=linkage,
)
clustering.fit(X)
labels = clustering.labels_
assert np.size(np.unique(labels)) == 10
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage
)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_, labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert np.size(np.unique(clustering.labels_)) == 10
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=lil_container(connectivity.toarray()[:10, :10]),
linkage=linkage,
)
with pytest.raises(ValueError):
clustering.fit(X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
metric="manhattan",
linkage="ward",
)
with pytest.raises(ValueError):
clustering.fit(X)
# Test using another metric than euclidean works with linkage complete
for metric in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
metric=metric,
linkage="complete",
)
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10, connectivity=None, metric=metric, linkage="complete"
)
clustering2.fit(X)
assert_almost_equal(
normalized_mutual_info_score(clustering2.labels_, clustering.labels_), 1
)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage="complete"
)
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity,
metric="precomputed",
linkage="complete",
)
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_agglomerative_clustering_memory_mapped():
"""AgglomerativeClustering must work on mem-mapped dataset.
Non-regression test for issue #19875.
"""
rng = np.random.RandomState(0)
Xmm = create_memmap_backed_data(rng.randn(50, 100))
AgglomerativeClustering(metric="euclidean", linkage="single").fit(Xmm)
def test_ward_agglomeration(global_random_seed):
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(global_random_seed)
mask = np.ones([10, 10], dtype=bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert np.size(np.unique(agglo.labels_)) == 5
X_red = agglo.transform(X)
assert X_red.shape[1] == 5
X_full = agglo.inverse_transform(X_red)
assert np.unique(X_full[0]).size == 5
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
with pytest.raises(ValueError):
agglo.fit(X[:0])
def test_single_linkage_clustering():
# Check that we get the correct result in two emblematic cases
moons, moon_labels = make_moons(noise=0.05, random_state=42)
clustering = AgglomerativeClustering(n_clusters=2, linkage="single")
clustering.fit(moons)
assert_almost_equal(
normalized_mutual_info_score(clustering.labels_, moon_labels), 1
)
circles, circle_labels = make_circles(factor=0.5, noise=0.025, random_state=42)
clustering = AgglomerativeClustering(n_clusters=2, linkage="single")
clustering.fit(circles)
assert_almost_equal(
normalized_mutual_info_score(clustering.labels_, circle_labels), 1
)
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert (co_clust[0] == co_clust[1]).all()
def test_sparse_scikit_vs_scipy(global_random_seed):
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(global_random_seed)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = 0.1 * rng.normal(size=(n, p))
X -= 4.0 * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(int, copy=False)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](
X, connectivity=connectivity
)
# Sort the order of child nodes per row for consistency
children.sort(axis=1)
assert_array_equal(
children,
children_,
"linkage tree differs from scipy impl for linkage: " + linkage,
)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
with pytest.raises(ValueError):
_hc_cut(n_leaves + 1, children, n_leaves)
# Make sure our custom mst_linkage_core gives
# the same results as scipy's builtin
def test_vector_scikit_single_vs_scipy_single(global_random_seed):
n_samples, n_features, n_clusters = 10, 5, 3
rng = np.random.RandomState(global_random_seed)
X = 0.1 * rng.normal(size=(n_samples, n_features))
X -= 4.0 * np.arange(n_samples)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method="single")
children_scipy = out[:, :2].astype(int)
children, _, n_leaves, _ = _TREE_BUILDERS["single"](X)
# Sort the order of child nodes per row for consistency
children.sort(axis=1)
assert_array_equal(
children,
children_scipy,
"linkage tree differs from scipy impl for single linkage.",
)
cut = _hc_cut(n_clusters, children, n_leaves)
cut_scipy = _hc_cut(n_clusters, children_scipy, n_leaves)
assess_same_labelling(cut, cut_scipy)
@pytest.mark.parametrize("metric_param_grid", METRICS_DEFAULT_PARAMS)
def test_mst_linkage_core_memory_mapped(metric_param_grid):
"""The MST-LINKAGE-CORE algorithm must work on mem-mapped dataset.
Non-regression test for issue #19875.
"""
rng = np.random.RandomState(seed=1)
X = rng.normal(size=(20, 4))
Xmm = create_memmap_backed_data(X)
metric, param_grid = metric_param_grid
keys = param_grid.keys()
for vals in itertools.product(*param_grid.values()):
kwargs = dict(zip(keys, vals))
distance_metric = DistanceMetric.get_metric(metric, **kwargs)
mst = mst_linkage_core(X, distance_metric)
mst_mm = mst_linkage_core(Xmm, distance_metric)
np.testing.assert_equal(mst, mst_mm)
def test_identical_points():
# Ensure identical points are handled correctly when using mst with
# a sparse connectivity matrix
X = np.array([[0, 0, 0], [0, 0, 0], [1, 1, 1], [1, 1, 1], [2, 2, 2], [2, 2, 2]])
true_labels = np.array([0, 0, 1, 1, 2, 2])
connectivity = kneighbors_graph(X, n_neighbors=3, include_self=False)
connectivity = 0.5 * (connectivity + connectivity.T)
connectivity, n_components = _fix_connectivity(X, connectivity, "euclidean")
for linkage in ("single", "average", "average", "ward"):
clustering = AgglomerativeClustering(
n_clusters=3, linkage=linkage, connectivity=connectivity
)
clustering.fit(X)
assert_almost_equal(
normalized_mutual_info_score(clustering.labels_, true_labels), 1
)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array(
[
(0.014, 0.120),
(0.014, 0.099),
(0.014, 0.097),
(0.017, 0.153),
(0.017, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.153),
(0.018, 0.152),
(0.018, 0.149),
(0.018, 0.144),
]
)
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage="ward"
)
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order(global_random_seed):
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(global_random_seed)
connectivity = np.ones((n, n))
for i in range(5):
X = 0.1 * rng.normal(size=(n, p))
X -= 4.0 * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance(global_random_seed):
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(global_random_seed)
connectivity = np.ones((n, n))
for i in range(5):
X = 0.1 * rng.normal(size=(n, p))
X -= 4.0 * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity, return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ["average", "complete", "single"]:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage, return_distance=True
)[-1]
unstructured_items = linkage_tree(X, linkage=linkage, return_distance=True)[
-1
]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array(
[
[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355],
]
)
# truth
linkage_X_ward = np.array(
[
[3.0, 4.0, 0.36265956, 2.0],
[1.0, 5.0, 1.77045373, 2.0],
[0.0, 2.0, 2.55760419, 2.0],
[6.0, 8.0, 9.10208346, 4.0],
[7.0, 9.0, 24.7784379, 6.0],
]
)
linkage_X_complete = np.array(
[
[3.0, 4.0, 0.36265956, 2.0],
[1.0, 5.0, 1.77045373, 2.0],
[0.0, 2.0, 2.55760419, 2.0],
[6.0, 8.0, 6.96742194, 4.0],
[7.0, 9.0, 18.77445997, 6.0],
]
)
linkage_X_average = np.array(
[
[3.0, 4.0, 0.36265956, 2.0],
[1.0, 5.0, 1.77045373, 2.0],
[0.0, 2.0, 2.55760419, 2.0],
[6.0, 8.0, 6.55832839, 4.0],
[7.0, 9.0, 15.44089605, 6.0],
]
)
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X, return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ["complete", "average", "single"]
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for linkage, X_truth in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage, return_distance=True
)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage="ward")
with pytest.warns(UserWarning):
w.fit(x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp, copy=False))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50, dtype=np.intp)[::2]
other_values = np.full(50, 0.5)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False)
)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert n_nodes == n_samples - 1
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert n_nodes == n_samples - n_clusters
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert ignore_warnings(linkage_func)(X, connectivity=connectivity)[1] == 5
def test_affinity_passed_to_fix_connectivity():
# Test that the affinity parameter is actually passed to the pairwise
# function
size = 2
rng = np.random.RandomState(0)
X = rng.randn(size, size)
mask = np.array([True, False, False, True])
connectivity = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
class FakeAffinity:
def __init__(self):
self.counter = 0
def increment(self, *args, **kwargs):
self.counter += 1
return self.counter
fa = FakeAffinity()
linkage_tree(X, connectivity=connectivity, affinity=fa.increment)
assert fa.counter == 3
@pytest.mark.parametrize("linkage", ["ward", "complete", "average"])
def test_agglomerative_clustering_with_distance_threshold(linkage, global_random_seed):
# Check that we obtain the correct number of clusters with
# agglomerative clustering with distance_threshold.
rng = np.random.RandomState(global_random_seed)
mask = np.ones([10, 10], dtype=bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
# test when distance threshold is set to 10
distance_threshold = 10
for conn in [None, connectivity]:
clustering = AgglomerativeClustering(
n_clusters=None,
distance_threshold=distance_threshold,
connectivity=conn,
linkage=linkage,
)
clustering.fit(X)
clusters_produced = clustering.labels_
num_clusters_produced = len(np.unique(clustering.labels_))
# test if the clusters produced match the point in the linkage tree
# where the distance exceeds the threshold
tree_builder = _TREE_BUILDERS[linkage]
children, n_components, n_leaves, parent, distances = tree_builder(
X, connectivity=conn, n_clusters=None, return_distance=True
)
num_clusters_at_threshold = (
np.count_nonzero(distances >= distance_threshold) + 1
)
# test number of clusters produced
assert num_clusters_at_threshold == num_clusters_produced
# test clusters produced
clusters_at_threshold = _hc_cut(
n_clusters=num_clusters_produced, children=children, n_leaves=n_leaves
)
assert np.array_equiv(clusters_produced, clusters_at_threshold)
def test_small_distance_threshold(global_random_seed):
rng = np.random.RandomState(global_random_seed)
n_samples = 10
X = rng.randint(-300, 300, size=(n_samples, 3))
# this should result in all data in their own clusters, given that
# their pairwise distances are bigger than .1 (which may not be the case
# with a different random seed).
clustering = AgglomerativeClustering(
n_clusters=None, distance_threshold=1.0, linkage="single"
).fit(X)
# check that the pairwise distances are indeed all larger than .1
all_distances = pairwise_distances(X, metric="minkowski", p=2)
np.fill_diagonal(all_distances, np.inf)
assert np.all(all_distances > 0.1)
assert clustering.n_clusters_ == n_samples
def test_cluster_distances_with_distance_threshold(global_random_seed):
rng = np.random.RandomState(global_random_seed)
n_samples = 100
X = rng.randint(-10, 10, size=(n_samples, 3))
# check the distances within the clusters and with other clusters
distance_threshold = 4
clustering = AgglomerativeClustering(
n_clusters=None, distance_threshold=distance_threshold, linkage="single"
).fit(X)
labels = clustering.labels_
D = pairwise_distances(X, metric="minkowski", p=2)
# to avoid taking the 0 diagonal in min()
np.fill_diagonal(D, np.inf)
for label in np.unique(labels):
in_cluster_mask = labels == label
max_in_cluster_distance = (
D[in_cluster_mask][:, in_cluster_mask].min(axis=0).max()
)
min_out_cluster_distance = (
D[in_cluster_mask][:, ~in_cluster_mask].min(axis=0).min()
)
# single data point clusters only have that inf diagonal here
if in_cluster_mask.sum() > 1:
assert max_in_cluster_distance < distance_threshold
assert min_out_cluster_distance >= distance_threshold
@pytest.mark.parametrize("linkage", ["ward", "complete", "average"])
@pytest.mark.parametrize(
("threshold", "y_true"), [(0.5, [1, 0]), (1.0, [1, 0]), (1.5, [0, 0])]
)
def test_agglomerative_clustering_with_distance_threshold_edge_case(
linkage, threshold, y_true
):
# test boundary case of distance_threshold matching the distance
X = [[0], [1]]
clusterer = AgglomerativeClustering(
n_clusters=None, distance_threshold=threshold, linkage=linkage
)
y_pred = clusterer.fit_predict(X)
assert adjusted_rand_score(y_true, y_pred) == 1
def test_dist_threshold_invalid_parameters():
X = [[0], [1]]
with pytest.raises(ValueError, match="Exactly one of "):
AgglomerativeClustering(n_clusters=None, distance_threshold=None).fit(X)
with pytest.raises(ValueError, match="Exactly one of "):
AgglomerativeClustering(n_clusters=2, distance_threshold=1).fit(X)
X = [[0], [1]]
with pytest.raises(ValueError, match="compute_full_tree must be True if"):
AgglomerativeClustering(
n_clusters=None, distance_threshold=1, compute_full_tree=False
).fit(X)
def test_invalid_shape_precomputed_dist_matrix():
# Check that an error is raised when affinity='precomputed'
# and a non square matrix is passed (PR #16257).
rng = np.random.RandomState(0)
X = rng.rand(5, 3)
with pytest.raises(
ValueError,
match=r"Distance matrix should be square, got matrix of shape \(5, 3\)",
):
AgglomerativeClustering(metric="precomputed", linkage="complete").fit(X)
def test_precomputed_connectivity_metric_with_2_connected_components():
"""Check that connecting components works when connectivity and
affinity are both precomputed and the number of connected components is
greater than 1. Non-regression test for #16151.
"""
connectivity_matrix = np.array(
[
[0, 1, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0],
]
)
# ensure that connectivity_matrix has two connected components
assert connected_components(connectivity_matrix)[0] == 2
rng = np.random.RandomState(0)
X = rng.randn(5, 10)
X_dist = pairwise_distances(X)
clusterer_precomputed = AgglomerativeClustering(
metric="precomputed", connectivity=connectivity_matrix, linkage="complete"
)
msg = "Completing it to avoid stopping the tree early"
with pytest.warns(UserWarning, match=msg):
clusterer_precomputed.fit(X_dist)
clusterer = AgglomerativeClustering(
connectivity=connectivity_matrix, linkage="complete"
)
with pytest.warns(UserWarning, match=msg):
clusterer.fit(X)
assert_array_equal(clusterer.labels_, clusterer_precomputed.labels_)
assert_array_equal(clusterer.children_, clusterer_precomputed.children_)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/tests/test_affinity_propagation.py | sklearn/cluster/tests/test_affinity_propagation.py | """
Testing for Clustering methods
"""
import warnings
import numpy as np
import pytest
from sklearn.cluster import AffinityPropagation, affinity_propagation
from sklearn.cluster._affinity_propagation import _equal_similarities_and_preferences
from sklearn.datasets import make_blobs
from sklearn.exceptions import ConvergenceWarning, NotFittedError
from sklearn.metrics import euclidean_distances
from sklearn.utils._testing import assert_allclose, assert_array_equal
from sklearn.utils.fixes import CSR_CONTAINERS
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(
n_samples=60,
n_features=2,
centers=centers,
cluster_std=0.4,
shuffle=True,
random_state=0,
)
# TODO: AffinityPropagation must preserve dtype for its fitted attributes
# and test must be created accordingly to this new behavior.
# For more details, see: https://github.com/scikit-learn/scikit-learn/issues/11000
def test_affinity_propagation(global_random_seed, global_dtype):
"""Test consistency of the affinity propagations."""
S = -euclidean_distances(X.astype(global_dtype, copy=False), squared=True)
preference = np.median(S) * 10
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference, random_state=global_random_seed
)
n_clusters_ = len(cluster_centers_indices)
assert n_clusters == n_clusters_
def test_affinity_propagation_precomputed():
"""Check equality of precomputed affinity matrix to internally computed affinity
matrix.
"""
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
af = AffinityPropagation(
preference=preference, affinity="precomputed", random_state=28
)
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True, random_state=37)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert np.unique(labels).size == n_clusters_
assert n_clusters == n_clusters_
def test_affinity_propagation_no_copy():
"""Check behaviour of not copying the input data."""
S = -euclidean_distances(X, squared=True)
S_original = S.copy()
preference = np.median(S) * 10
assert not np.allclose(S.diagonal(), preference)
# with copy=True S should not be modified
affinity_propagation(S, preference=preference, copy=True, random_state=0)
assert_allclose(S, S_original)
assert not np.allclose(S.diagonal(), preference)
assert_allclose(S.diagonal(), np.zeros(S.shape[0]))
# with copy=False S will be modified inplace
affinity_propagation(S, preference=preference, copy=False, random_state=0)
assert_allclose(S.diagonal(), preference)
# test that copy=True and copy=False lead to the same result
S = S_original.copy()
af = AffinityPropagation(preference=preference, verbose=True, random_state=0)
labels = af.fit(X).labels_
_, labels_no_copy = affinity_propagation(
S, preference=preference, copy=False, random_state=74
)
assert_array_equal(labels, labels_no_copy)
def test_affinity_propagation_affinity_shape():
"""Check the shape of the affinity matrix when using `affinity_propagation."""
S = -euclidean_distances(X, squared=True)
err_msg = "The matrix of similarities must be a square array"
with pytest.raises(ValueError, match=err_msg):
affinity_propagation(S[:, :-1])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_affinity_propagation_precomputed_with_sparse_input(csr_container):
err_msg = "Sparse data was passed for X, but dense data is required"
with pytest.raises(TypeError, match=err_msg):
AffinityPropagation(affinity="precomputed").fit(csr_container((3, 3)))
def test_affinity_propagation_predict(global_random_seed, global_dtype):
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean", random_state=global_random_seed)
X_ = X.astype(global_dtype, copy=False)
labels = af.fit_predict(X_)
labels2 = af.predict(X_)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
with pytest.raises(NotFittedError):
af.predict(X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed", random_state=57)
af.fit(S)
with pytest.raises(ValueError, match="expecting 60 features as input"):
af.predict(X)
def test_affinity_propagation_fit_non_convergence(global_dtype):
# In case of non-convergence of affinity_propagation(), the cluster
# centers should be an empty array and training samples should be labelled
# as noise (-1)
X = np.array([[0, 0], [1, 1], [-2, -2]], dtype=global_dtype)
# Force non-convergence by allowing only a single iteration
af = AffinityPropagation(preference=-10, max_iter=1, random_state=82)
with pytest.warns(ConvergenceWarning):
af.fit(X)
assert_allclose(np.empty((0, 2)), af.cluster_centers_)
assert_array_equal(np.array([-1, -1, -1]), af.labels_)
def test_affinity_propagation_equal_mutual_similarities(global_dtype):
X = np.array([[-1, 1], [1, -1]], dtype=global_dtype)
S = -euclidean_distances(X, squared=True)
# setting preference > similarity
with pytest.warns(UserWarning, match="mutually equal"):
cluster_center_indices, labels = affinity_propagation(S, preference=0)
# expect every sample to become an exemplar
assert_array_equal([0, 1], cluster_center_indices)
assert_array_equal([0, 1], labels)
# setting preference < similarity
with pytest.warns(UserWarning, match="mutually equal"):
cluster_center_indices, labels = affinity_propagation(S, preference=-10)
# expect one cluster, with arbitrary (first) sample as exemplar
assert_array_equal([0], cluster_center_indices)
assert_array_equal([0, 0], labels)
# setting different preferences
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
cluster_center_indices, labels = affinity_propagation(
S, preference=[-20, -10], random_state=37
)
# expect one cluster, with highest-preference sample as exemplar
assert_array_equal([1], cluster_center_indices)
assert_array_equal([0, 0], labels)
def test_affinity_propagation_predict_non_convergence(global_dtype):
# In case of non-convergence of affinity_propagation(), the cluster
# centers should be an empty array
X = np.array([[0, 0], [1, 1], [-2, -2]], dtype=global_dtype)
# Force non-convergence by allowing only a single iteration
with pytest.warns(ConvergenceWarning):
af = AffinityPropagation(preference=-10, max_iter=1, random_state=75).fit(X)
# At prediction time, consider new samples as noise since there are no
# clusters
to_predict = np.array([[2, 2], [3, 3], [4, 4]])
with pytest.warns(ConvergenceWarning):
y = af.predict(to_predict)
assert_array_equal(np.array([-1, -1, -1]), y)
def test_affinity_propagation_non_convergence_regressiontest(global_dtype):
X = np.array(
[[1, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 1]], dtype=global_dtype
)
af = AffinityPropagation(affinity="euclidean", max_iter=2, random_state=34)
msg = (
"Affinity propagation did not converge, this model may return degenerate"
" cluster centers and labels."
)
with pytest.warns(ConvergenceWarning, match=msg):
af.fit(X)
assert_array_equal(np.array([0, 0, 0]), af.labels_)
def test_equal_similarities_and_preferences(global_dtype):
# Unequal distances
X = np.array([[0, 0], [1, 1], [-2, -2]], dtype=global_dtype)
S = -euclidean_distances(X, squared=True)
assert not _equal_similarities_and_preferences(S, np.array(0))
assert not _equal_similarities_and_preferences(S, np.array([0, 0]))
assert not _equal_similarities_and_preferences(S, np.array([0, 1]))
# Equal distances
X = np.array([[0, 0], [1, 1]], dtype=global_dtype)
S = -euclidean_distances(X, squared=True)
# Different preferences
assert not _equal_similarities_and_preferences(S, np.array([0, 1]))
# Same preferences
assert _equal_similarities_and_preferences(S, np.array([0, 0]))
assert _equal_similarities_and_preferences(S, np.array(0))
def test_affinity_propagation_random_state():
"""Check that different random states lead to different initialisations
by looking at the center locations after two iterations.
"""
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(
n_samples=300, centers=centers, cluster_std=0.5, random_state=0
)
# random_state = 0
ap = AffinityPropagation(convergence_iter=1, max_iter=2, random_state=0)
ap.fit(X)
centers0 = ap.cluster_centers_
# random_state = 76
ap = AffinityPropagation(convergence_iter=1, max_iter=2, random_state=76)
ap.fit(X)
centers76 = ap.cluster_centers_
# check that the centers have not yet converged to the same solution
assert np.mean((centers0 - centers76) ** 2) > 1
@pytest.mark.parametrize("container", CSR_CONTAINERS + [np.array])
def test_affinity_propagation_convergence_warning_dense_sparse(container, global_dtype):
"""
Check that having sparse or dense `centers` format should not
influence the convergence.
Non-regression test for gh-13334.
"""
centers = container(np.zeros((1, 10)))
rng = np.random.RandomState(42)
X = rng.rand(40, 10).astype(global_dtype, copy=False)
y = (4 * rng.rand(40)).astype(int)
ap = AffinityPropagation(random_state=46)
ap.fit(X, y)
ap.cluster_centers_ = centers
with warnings.catch_warnings():
warnings.simplefilter("error", ConvergenceWarning)
assert_array_equal(ap.predict(X), np.zeros(X.shape[0], dtype=int))
# FIXME; this test is broken with different random states, needs to be revisited
def test_correct_clusters(global_dtype):
# Test to fix incorrect clusters due to dtype change
# (non-regression test for issue #10832)
X = np.array(
[[1, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 1]], dtype=global_dtype
)
afp = AffinityPropagation(preference=1, affinity="precomputed", random_state=0).fit(
X
)
expected = np.array([0, 1, 1, 2])
assert_array_equal(afp.labels_, expected)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse_input_for_predict(csr_container):
# Test to make sure sparse inputs are accepted for predict
# (non-regression test for issue #20049)
af = AffinityPropagation(affinity="euclidean", random_state=42)
af.fit(X)
labels = af.predict(csr_container((2, 2)))
assert_array_equal(labels, (2, 2))
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse_input_for_fit_predict(csr_container):
# Test to make sure sparse inputs are accepted for fit_predict
# (non-regression test for issue #20049)
af = AffinityPropagation(affinity="euclidean", random_state=42)
rng = np.random.RandomState(42)
X = csr_container(rng.randint(0, 2, size=(5, 5)))
labels = af.fit_predict(X)
assert_array_equal(labels, (0, 1, 1, 2, 3))
def test_affinity_propagation_equal_points():
"""Make sure we do not assign multiple clusters to equal points.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/pull/20043
"""
X = np.zeros((8, 1))
af = AffinityPropagation(affinity="euclidean", damping=0.5, random_state=42).fit(X)
assert np.all(af.labels_ == 0)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/tests/test_dbscan.py | sklearn/cluster/tests/test_dbscan.py | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import warnings
import numpy as np
import pytest
from scipy.spatial import distance
from sklearn.cluster import DBSCAN, dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.utils._testing import assert_array_equal
from sklearn.utils.fixes import CSR_CONTAINERS, LIL_CONTAINERS
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(
D, metric="precomputed", eps=eps, min_samples=min_samples
)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert n_clusters_1 == n_clusters
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert n_clusters_2 == n_clusters
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = "euclidean"
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps, min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert n_clusters_1 == n_clusters
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert n_clusters_2 == n_clusters
@pytest.mark.parametrize("lil_container", LIL_CONTAINERS)
def test_dbscan_sparse(lil_container):
core_sparse, labels_sparse = dbscan(lil_container(X), eps=0.8, min_samples=10)
core_dense, labels_dense = dbscan(X, eps=0.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
@pytest.mark.parametrize("include_self", [False, True])
def test_dbscan_sparse_precomputed(include_self):
D = pairwise_distances(X)
nn = NearestNeighbors(radius=0.9).fit(X)
X_ = X if include_self else None
D_sparse = nn.radius_neighbors_graph(X=X_, mode="distance")
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(
D_sparse, eps=0.8, min_samples=10, metric="precomputed"
)
core_dense, labels_dense = dbscan(D, eps=0.8, min_samples=10, metric="precomputed")
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed_different_eps():
# test that precomputed neighbors graph is filtered if computed with
# a radius larger than DBSCAN's eps.
lower_eps = 0.2
nn = NearestNeighbors(radius=lower_eps).fit(X)
D_sparse = nn.radius_neighbors_graph(X, mode="distance")
dbscan_lower = dbscan(D_sparse, eps=lower_eps, metric="precomputed")
higher_eps = lower_eps + 0.7
nn = NearestNeighbors(radius=higher_eps).fit(X)
D_sparse = nn.radius_neighbors_graph(X, mode="distance")
dbscan_higher = dbscan(D_sparse, eps=lower_eps, metric="precomputed")
assert_array_equal(dbscan_lower[0], dbscan_higher[0])
assert_array_equal(dbscan_lower[1], dbscan_higher[1])
@pytest.mark.parametrize("metric", ["precomputed", "minkowski"])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS + [None])
def test_dbscan_input_not_modified(metric, csr_container):
# test that the input is not modified by dbscan
X = np.random.RandomState(0).rand(10, 10)
X = csr_container(X) if csr_container is not None else X
X_copy = X.copy()
dbscan(X, metric=metric)
if csr_container is not None:
assert_array_equal(X.toarray(), X_copy.toarray())
else:
assert_array_equal(X, X_copy)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_dbscan_input_not_modified_precomputed_sparse_nodiag(csr_container):
"""Check that we don't modify in-place the pre-computed sparse matrix.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27508
"""
X = np.random.RandomState(0).rand(10, 10)
# Add zeros on the diagonal that will be implicit when creating
# the sparse matrix. If `X` is modified in-place, the zeros from
# the diagonal will be made explicit.
np.fill_diagonal(X, 0)
X = csr_container(X)
assert all(row != col for row, col in zip(*X.nonzero()))
X_copy = X.copy()
dbscan(X, metric="precomputed")
# Make sure that we did not modify `X` in-place even by creating
# explicit 0s values.
assert X.nnz == X_copy.nnz
assert_array_equal(X.toarray(), X_copy.toarray())
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_dbscan_no_core_samples(csr_container):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < 0.8] = 0
for X_ in [X, csr_container(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert db.core_sample_indices_.shape == (0,)
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(
X, metric=metric, eps=eps, min_samples=min_samples, algorithm="ball_tree"
)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert n_clusters_1 == n_clusters
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples, algorithm="ball_tree")
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert n_clusters_2 == n_clusters
def test_dbscan_metric_params():
# Tests that DBSCAN works with the metrics_params argument.
eps = 0.8
min_samples = 10
p = 1
# Compute DBSCAN with metric_params arg
with warnings.catch_warnings(record=True) as warns:
db = DBSCAN(
metric="minkowski",
metric_params={"p": p},
eps=eps,
p=None,
min_samples=min_samples,
algorithm="ball_tree",
).fit(X)
assert not warns, warns[0].message
core_sample_1, labels_1 = db.core_sample_indices_, db.labels_
# Test that sample labels are the same as passing Minkowski 'p' directly
db = DBSCAN(
metric="minkowski", eps=eps, min_samples=min_samples, algorithm="ball_tree", p=p
).fit(X)
core_sample_2, labels_2 = db.core_sample_indices_, db.labels_
assert_array_equal(core_sample_1, core_sample_2)
assert_array_equal(labels_1, labels_2)
# Minkowski with p=1 should be equivalent to Manhattan distance
db = DBSCAN(
metric="manhattan", eps=eps, min_samples=min_samples, algorithm="ball_tree"
).fit(X)
core_sample_3, labels_3 = db.core_sample_indices_, db.labels_
assert_array_equal(core_sample_1, core_sample_3)
assert_array_equal(labels_1, labels_3)
with pytest.warns(
SyntaxWarning,
match=(
"Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored."
),
):
# Test that checks p is ignored in favor of metric_params={'p': <val>}
db = DBSCAN(
metric="minkowski",
metric_params={"p": p},
eps=eps,
p=p + 1,
min_samples=min_samples,
algorithm="ball_tree",
).fit(X)
core_sample_4, labels_4 = db.core_sample_indices_, db.labels_
assert_array_equal(core_sample_1, core_sample_4)
assert_array_equal(labels_1, labels_4)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(
D, metric="precomputed", eps=eps, min_samples=min_samples
)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert n_clusters_1 == n_clusters
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm="ball_tree")
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert n_clusters_2 == n_clusters
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm="kd_tree")
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert n_clusters_3 == n_clusters
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm="ball_tree")
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert n_clusters_4 == n_clusters
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples, algorithm="ball_tree")
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert n_clusters_5 == n_clusters
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1.0, 2.0], [3.0, 4.0]]
DBSCAN().fit(X) # must not raise exception
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert type(pickle.loads(s)) is obj.__class__
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert 0 in core
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert 0 in core
core, _ = dbscan([[0], [1], [1]], eps=0.99, min_samples=2)
assert 0 not in core
def test_weighted_dbscan(global_random_seed):
# ensure sample_weight is validated
with pytest.raises(ValueError):
dbscan([[0], [1]], sample_weight=[2])
with pytest.raises(ValueError):
dbscan([[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5], min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5], min_samples=6)[0])
assert_array_equal(
[0, 1], dbscan([[0], [1]], sample_weight=[6, 6], min_samples=6)[0]
)
# points within eps of each other:
assert_array_equal(
[0, 1], dbscan([[0], [1]], eps=1.5, sample_weight=[5, 1], min_samples=6)[0]
)
# and effect of non-positive and non-integer sample_weight:
assert_array_equal(
[], dbscan([[0], [1]], sample_weight=[5, 0], eps=1.5, min_samples=6)[0]
)
assert_array_equal(
[0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1], eps=1.5, min_samples=6)[0]
)
assert_array_equal(
[0, 1], dbscan([[0], [1]], sample_weight=[6, 0], eps=1.5, min_samples=6)[0]
)
assert_array_equal(
[], dbscan([[0], [1]], sample_weight=[6, -1], eps=1.5, min_samples=6)[0]
)
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(global_random_seed)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert len(label1) == len(X)
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight, metric="precomputed")
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
@pytest.mark.parametrize("algorithm", ["brute", "kd_tree", "ball_tree"])
def test_dbscan_core_samples_toy(algorithm):
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1, min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, np.full(n_samples, -1.0))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric="precomputed").fit(X).labels_
assert len(set(labels)) == 1
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric="precomputed").fit(X).labels_
assert len(set(labels)) == 1
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_dbscan_precomputed_metric_with_initial_rows_zero(csr_container):
# sample matrix with initial two row all zero
ar = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.1, 0.1, 0.0, 0.0, 0.3],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1],
[0.0, 0.0, 0.0, 0.0, 0.3, 0.1, 0.0],
]
)
matrix = csr_container(ar)
labels = DBSCAN(eps=0.2, metric="precomputed", min_samples=2).fit(matrix).labels_
assert_array_equal(labels, [-1, -1, 0, 0, 0, 1, 1])
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/tests/common.py | sklearn/cluster/tests/common.py | """
Common utilities for testing clustering.
"""
import numpy as np
###############################################################################
# Generate sample data
def generate_clustered_data(
seed=0, n_clusters=3, n_features=2, n_samples_per_cluster=20, std=0.4
):
prng = np.random.RandomState(seed)
# the data is voluntary shifted away from zero to check clustering
# algorithm robustness with regards to non centered data
means = (
np.array(
[
[1, 1, 1, 0],
[-1, -1, 0, 1],
[1, -1, 1, 1],
[-1, 1, 1, 0],
]
)
+ 10
)
X = np.empty((0, n_features))
for i in range(n_clusters):
X = np.r_[
X,
means[i][:n_features] + std * prng.randn(n_samples_per_cluster, n_features),
]
return X
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/tests/test_hdbscan.py | sklearn/cluster/tests/test_hdbscan.py | """
Tests for HDBSCAN clustering algorithm
Based on the DBSCAN test code
"""
import numpy as np
import pytest
from scipy import stats
from scipy.spatial import distance
from sklearn.cluster import HDBSCAN
from sklearn.cluster._hdbscan._tree import (
CONDENSED_dtype,
_condense_tree,
_do_labelling,
)
from sklearn.cluster._hdbscan.hdbscan import _OUTLIER_ENCODING
from sklearn.datasets import make_blobs
from sklearn.metrics import fowlkes_mallows_score
from sklearn.metrics.pairwise import _VALID_METRICS, euclidean_distances
from sklearn.neighbors import BallTree, KDTree
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
from sklearn.utils._testing import assert_allclose, assert_array_equal
from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS
X, y = make_blobs(n_samples=200, random_state=10)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
ALGORITHMS = [
"kd_tree",
"ball_tree",
"brute",
"auto",
]
OUTLIER_SET = {-1} | {out["label"] for _, out in _OUTLIER_ENCODING.items()}
def check_label_quality(labels, threshold=0.99):
n_clusters = len(set(labels) - OUTLIER_SET)
assert n_clusters == 3
assert fowlkes_mallows_score(labels, y) > threshold
@pytest.mark.parametrize("outlier_type", _OUTLIER_ENCODING)
def test_outlier_data(outlier_type):
"""
Tests if np.inf and np.nan data are each treated as special outliers.
"""
outlier = {
"infinite": np.inf,
"missing": np.nan,
}[outlier_type]
prob_check = {
"infinite": lambda x, y: x == y,
"missing": lambda x, y: np.isnan(x),
}[outlier_type]
label = _OUTLIER_ENCODING[outlier_type]["label"]
prob = _OUTLIER_ENCODING[outlier_type]["prob"]
X_outlier = X.copy()
X_outlier[0] = [outlier, 1]
X_outlier[5] = [outlier, outlier]
model = HDBSCAN(copy=False).fit(X_outlier)
(missing_labels_idx,) = (model.labels_ == label).nonzero()
assert_array_equal(missing_labels_idx, [0, 5])
(missing_probs_idx,) = (prob_check(model.probabilities_, prob)).nonzero()
assert_array_equal(missing_probs_idx, [0, 5])
clean_indices = list(range(1, 5)) + list(range(6, 200))
clean_model = HDBSCAN(copy=False).fit(X_outlier[clean_indices])
assert_array_equal(clean_model.labels_, model.labels_[clean_indices])
def test_hdbscan_distance_matrix():
"""
Tests that HDBSCAN works with precomputed distance matrices, and throws the
appropriate errors when needed.
"""
D = euclidean_distances(X)
D_original = D.copy()
labels = HDBSCAN(metric="precomputed", copy=True).fit_predict(D)
assert_allclose(D, D_original)
check_label_quality(labels)
msg = r"The precomputed distance matrix.*has shape"
with pytest.raises(ValueError, match=msg):
HDBSCAN(metric="precomputed", copy=True).fit_predict(X)
msg = r"The precomputed distance matrix.*values"
# Ensure the matrix is not symmetric
D[0, 1] = 10
D[1, 0] = 1
with pytest.raises(ValueError, match=msg):
HDBSCAN(metric="precomputed", copy=False).fit_predict(D)
@pytest.mark.parametrize("sparse_constructor", [*CSR_CONTAINERS, *CSC_CONTAINERS])
def test_hdbscan_sparse_distance_matrix(sparse_constructor):
"""
Tests that HDBSCAN works with sparse distance matrices.
"""
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
threshold = stats.scoreatpercentile(D.flatten(), 50)
D[D >= threshold] = 0.0
D = sparse_constructor(D)
D.eliminate_zeros()
labels = HDBSCAN(metric="precomputed", copy=False).fit_predict(D)
check_label_quality(labels)
def test_hdbscan_feature_array():
"""
Tests that HDBSCAN works with feature array, including an arbitrary
goodness of fit check. Note that the check is a simple heuristic.
"""
labels = HDBSCAN(copy=False).fit_predict(X)
# Check that clustering is arbitrarily good
# This is a heuristic to guard against regression
check_label_quality(labels)
@pytest.mark.parametrize("algo", ALGORITHMS)
@pytest.mark.parametrize("metric", _VALID_METRICS)
def test_hdbscan_algorithms(algo, metric):
"""
Tests that HDBSCAN works with the expected combinations of algorithms and
metrics, or raises the expected errors.
"""
labels = HDBSCAN(algorithm=algo, copy=False).fit_predict(X)
check_label_quality(labels)
# Validation for brute is handled by `pairwise_distances`
if algo in ("brute", "auto"):
return
ALGOS_TREES = {
"kd_tree": KDTree,
"ball_tree": BallTree,
}
metric_params = {
"mahalanobis": {"V": np.eye(X.shape[1])},
"seuclidean": {"V": np.ones(X.shape[1])},
"minkowski": {"p": 2},
"wminkowski": {"p": 2, "w": np.ones(X.shape[1])},
}.get(metric, None)
hdb = HDBSCAN(
algorithm=algo,
metric=metric,
metric_params=metric_params,
copy=False,
)
if metric not in ALGOS_TREES[algo].valid_metrics:
with pytest.raises(ValueError):
hdb.fit(X)
elif metric == "wminkowski":
with pytest.warns(FutureWarning):
hdb.fit(X)
else:
hdb.fit(X)
def test_dbscan_clustering():
"""
Tests that HDBSCAN can generate a sufficiently accurate dbscan clustering.
This test is more of a sanity check than a rigorous evaluation.
"""
clusterer = HDBSCAN(copy=False).fit(X)
labels = clusterer.dbscan_clustering(0.3)
# We use a looser threshold due to dbscan producing a more constrained
# clustering representation
check_label_quality(labels, threshold=0.92)
@pytest.mark.parametrize("cut_distance", (0.1, 0.5, 1))
def test_dbscan_clustering_outlier_data(cut_distance):
"""
Tests if np.inf and np.nan data are each treated as special outliers.
"""
missing_label = _OUTLIER_ENCODING["missing"]["label"]
infinite_label = _OUTLIER_ENCODING["infinite"]["label"]
X_outlier = X.copy()
X_outlier[0] = [np.inf, 1]
X_outlier[2] = [1, np.nan]
X_outlier[5] = [np.inf, np.nan]
model = HDBSCAN(copy=False).fit(X_outlier)
labels = model.dbscan_clustering(cut_distance=cut_distance)
missing_labels_idx = np.flatnonzero(labels == missing_label)
assert_array_equal(missing_labels_idx, [2, 5])
infinite_labels_idx = np.flatnonzero(labels == infinite_label)
assert_array_equal(infinite_labels_idx, [0])
clean_idx = list(set(range(200)) - set(missing_labels_idx + infinite_labels_idx))
clean_model = HDBSCAN(copy=False).fit(X_outlier[clean_idx])
clean_labels = clean_model.dbscan_clustering(cut_distance=cut_distance)
assert_array_equal(clean_labels, labels[clean_idx])
def test_hdbscan_best_balltree_metric():
"""
Tests that HDBSCAN using `BallTree` works.
"""
labels = HDBSCAN(
metric="seuclidean", metric_params={"V": np.ones(X.shape[1])}, copy=False
).fit_predict(X)
check_label_quality(labels)
def test_hdbscan_no_clusters():
"""
Tests that HDBSCAN correctly does not generate a valid cluster when the
`min_cluster_size` is too large for the data.
"""
labels = HDBSCAN(min_cluster_size=len(X) - 1, copy=False).fit_predict(X)
assert set(labels).issubset(OUTLIER_SET)
def test_hdbscan_min_cluster_size():
"""
Test that the smallest non-noise cluster has at least `min_cluster_size`
many points
"""
for min_cluster_size in range(2, len(X), 1):
labels = HDBSCAN(min_cluster_size=min_cluster_size, copy=False).fit_predict(X)
true_labels = [label for label in labels if label != -1]
if len(true_labels) != 0:
assert np.min(np.bincount(true_labels)) >= min_cluster_size
def test_hdbscan_callable_metric():
"""
Tests that HDBSCAN works when passed a callable metric.
"""
metric = distance.euclidean
labels = HDBSCAN(metric=metric, copy=False).fit_predict(X)
check_label_quality(labels)
@pytest.mark.parametrize("tree", ["kd_tree", "ball_tree"])
def test_hdbscan_precomputed_non_brute(tree):
"""
Tests that HDBSCAN correctly raises an error when passing precomputed data
while requesting a tree-based algorithm.
"""
hdb = HDBSCAN(metric="precomputed", algorithm=tree, copy=False)
msg = "precomputed is not a valid metric for"
with pytest.raises(ValueError, match=msg):
hdb.fit(X)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_hdbscan_sparse(csr_container):
"""
Tests that HDBSCAN works correctly when passing sparse feature data.
Evaluates correctness by comparing against the same data passed as a dense
array.
"""
dense_labels = HDBSCAN(copy=False).fit(X).labels_
check_label_quality(dense_labels)
_X_sparse = csr_container(X)
X_sparse = _X_sparse.copy()
sparse_labels = HDBSCAN(copy=False).fit(X_sparse).labels_
assert_array_equal(dense_labels, sparse_labels)
# Compare that the sparse and dense non-precomputed routines return the same labels
# where the 0th observation contains the outlier.
for outlier_val, outlier_type in ((np.inf, "infinite"), (np.nan, "missing")):
X_dense = X.copy()
X_dense[0, 0] = outlier_val
dense_labels = HDBSCAN(copy=False).fit(X_dense).labels_
check_label_quality(dense_labels)
assert dense_labels[0] == _OUTLIER_ENCODING[outlier_type]["label"]
X_sparse = _X_sparse.copy()
X_sparse[0, 0] = outlier_val
sparse_labels = HDBSCAN(copy=False).fit(X_sparse).labels_
assert_array_equal(dense_labels, sparse_labels)
msg = "Sparse data matrices only support algorithm `brute`."
with pytest.raises(ValueError, match=msg):
HDBSCAN(metric="euclidean", algorithm="ball_tree", copy=False).fit(X_sparse)
@pytest.mark.parametrize("algorithm", ALGORITHMS)
def test_hdbscan_centers(algorithm):
"""
Tests that HDBSCAN centers are calculated and stored properly, and are
accurate to the data.
"""
centers = [(0.0, 0.0), (3.0, 3.0)]
H, _ = make_blobs(n_samples=2000, random_state=0, centers=centers, cluster_std=0.5)
hdb = HDBSCAN(store_centers="both", copy=False).fit(H)
for center, centroid, medoid in zip(centers, hdb.centroids_, hdb.medoids_):
assert_allclose(center, centroid, rtol=1, atol=0.05)
assert_allclose(center, medoid, rtol=1, atol=0.05)
# Ensure that nothing is done for noise
hdb = HDBSCAN(
algorithm=algorithm,
store_centers="both",
min_cluster_size=X.shape[0],
copy=False,
).fit(X)
assert hdb.centroids_.shape[0] == 0
assert hdb.medoids_.shape[0] == 0
def test_hdbscan_allow_single_cluster_with_epsilon():
"""
Tests that HDBSCAN single-cluster selection with epsilon works correctly.
"""
rng = np.random.RandomState(0)
no_structure = rng.rand(150, 2)
# without epsilon we should see many noise points as children of root.
labels = HDBSCAN(
min_cluster_size=5,
cluster_selection_epsilon=0.0,
cluster_selection_method="eom",
allow_single_cluster=True,
copy=False,
).fit_predict(no_structure)
unique_labels, counts = np.unique(labels, return_counts=True)
assert len(unique_labels) == 2
# Arbitrary heuristic. Would prefer something more precise.
assert counts[unique_labels == -1] > 30
# for this random seed an epsilon of 0.18 will produce exactly 2 noise
# points at that cut in single linkage.
labels = HDBSCAN(
min_cluster_size=5,
cluster_selection_epsilon=0.18,
cluster_selection_method="eom",
allow_single_cluster=True,
algorithm="kd_tree",
copy=False,
).fit_predict(no_structure)
unique_labels, counts = np.unique(labels, return_counts=True)
assert len(unique_labels) == 2
assert counts[unique_labels == -1] == 2
def test_hdbscan_better_than_dbscan():
"""
Validate that HDBSCAN can properly cluster this difficult synthetic
dataset. Note that DBSCAN fails on this (see HDBSCAN plotting
example)
"""
centers = [[-0.85, -0.85], [-0.85, 0.85], [3, 3], [3, -3]]
X, y = make_blobs(
n_samples=750,
centers=centers,
cluster_std=[0.2, 0.35, 1.35, 1.35],
random_state=0,
)
labels = HDBSCAN(copy=False).fit(X).labels_
n_clusters = len(set(labels)) - int(-1 in labels)
assert n_clusters == 4
fowlkes_mallows_score(labels, y) > 0.99
@pytest.mark.parametrize(
"kwargs, X",
[
({"metric": "precomputed"}, np.array([[1, np.inf], [np.inf, 1]])),
({"metric": "precomputed"}, [[1, 2], [2, 1]]),
({}, [[1, 2], [3, 4]]),
],
)
def test_hdbscan_usable_inputs(X, kwargs):
"""
Tests that HDBSCAN works correctly for array-likes and precomputed inputs
with non-finite points.
"""
HDBSCAN(min_samples=1, copy=False, **kwargs).fit(X)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_hdbscan_sparse_distances_too_few_nonzero(csr_container):
"""
Tests that HDBSCAN raises the correct error when there are too few
non-zero distances.
"""
X = csr_container(np.zeros((10, 10)))
msg = "There exists points with fewer than"
with pytest.raises(ValueError, match=msg):
HDBSCAN(metric="precomputed", copy=False).fit(X)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_hdbscan_sparse_distances_disconnected_graph(csr_container):
"""
Tests that HDBSCAN raises the correct error when the distance matrix
has multiple connected components.
"""
# Create symmetric sparse matrix with 2 connected components
X = np.zeros((20, 20))
X[:5, :5] = 1
X[5:, 15:] = 1
X = X + X.T
X = csr_container(X)
msg = "HDBSCAN cannot be performed on a disconnected graph"
with pytest.raises(ValueError, match=msg):
HDBSCAN(metric="precomputed", copy=False).fit(X)
def test_hdbscan_tree_invalid_metric():
"""
Tests that HDBSCAN correctly raises an error for invalid metric choices.
"""
metric_callable = lambda x: x
msg = (
".* is not a valid metric for a .*-based algorithm\\. Please select a different"
" metric\\."
)
# Callables are not supported for either
with pytest.raises(ValueError, match=msg):
HDBSCAN(algorithm="kd_tree", metric=metric_callable, copy=False).fit(X)
with pytest.raises(ValueError, match=msg):
HDBSCAN(algorithm="ball_tree", metric=metric_callable, copy=False).fit(X)
# The set of valid metrics for KDTree at the time of writing this test is a
# strict subset of those supported in BallTree
metrics_not_kd = list(set(BallTree.valid_metrics) - set(KDTree.valid_metrics))
if len(metrics_not_kd) > 0:
with pytest.raises(ValueError, match=msg):
HDBSCAN(algorithm="kd_tree", metric=metrics_not_kd[0], copy=False).fit(X)
def test_hdbscan_too_many_min_samples():
"""
Tests that HDBSCAN correctly raises an error when setting `min_samples`
larger than the number of samples.
"""
hdb = HDBSCAN(min_samples=len(X) + 1, copy=False)
msg = r"min_samples (.*) must be at most"
with pytest.raises(ValueError, match=msg):
hdb.fit(X)
def test_hdbscan_precomputed_dense_nan():
"""
Tests that HDBSCAN correctly raises an error when providing precomputed
distances with `np.nan` values.
"""
X_nan = X.copy()
X_nan[0, 0] = np.nan
msg = "np.nan values found in precomputed-dense"
hdb = HDBSCAN(metric="precomputed", copy=False)
with pytest.raises(ValueError, match=msg):
hdb.fit(X_nan)
@pytest.mark.parametrize("allow_single_cluster", [True, False])
@pytest.mark.parametrize("epsilon", [0, 0.1])
def test_labelling_distinct(global_random_seed, allow_single_cluster, epsilon):
"""
Tests that the `_do_labelling` helper function correctly assigns labels.
"""
n_samples = 48
X, y = make_blobs(
n_samples,
random_state=global_random_seed,
# Ensure the clusters are distinct with no overlap
centers=[
[0, 0],
[10, 0],
[0, 10],
],
)
est = HDBSCAN(copy=False).fit(X)
condensed_tree = _condense_tree(
est._single_linkage_tree_, min_cluster_size=est.min_cluster_size
)
clusters = {n_samples + 2, n_samples + 3, n_samples + 4}
cluster_label_map = {n_samples + 2: 0, n_samples + 3: 1, n_samples + 4: 2}
labels = _do_labelling(
condensed_tree=condensed_tree,
clusters=clusters,
cluster_label_map=cluster_label_map,
allow_single_cluster=allow_single_cluster,
cluster_selection_epsilon=epsilon,
)
first_with_label = {_y: np.where(y == _y)[0][0] for _y in list(set(y))}
y_to_labels = {_y: labels[first_with_label[_y]] for _y in list(set(y))}
aligned_target = np.vectorize(y_to_labels.get)(y)
assert_array_equal(labels, aligned_target)
def test_labelling_thresholding():
"""
Tests that the `_do_labelling` helper function correctly thresholds the
incoming lambda values given various `cluster_selection_epsilon` values.
"""
n_samples = 5
MAX_LAMBDA = 1.5
condensed_tree = np.array(
[
(5, 2, MAX_LAMBDA, 1),
(5, 1, 0.1, 1),
(5, 0, MAX_LAMBDA, 1),
(5, 3, 0.2, 1),
(5, 4, 0.3, 1),
],
dtype=CONDENSED_dtype,
)
labels = _do_labelling(
condensed_tree=condensed_tree,
clusters={n_samples},
cluster_label_map={n_samples: 0, n_samples + 1: 1},
allow_single_cluster=True,
cluster_selection_epsilon=1,
)
num_noise = condensed_tree["value"] < 1
assert sum(num_noise) == sum(labels == -1)
labels = _do_labelling(
condensed_tree=condensed_tree,
clusters={n_samples},
cluster_label_map={n_samples: 0, n_samples + 1: 1},
allow_single_cluster=True,
cluster_selection_epsilon=0,
)
# The threshold should be calculated per-sample based on the largest
# lambda of any simbling node. In this case, all points are siblings
# and the largest value is exactly MAX_LAMBDA.
num_noise = condensed_tree["value"] < MAX_LAMBDA
assert sum(num_noise) == sum(labels == -1)
@pytest.mark.parametrize("store_centers", ["centroid", "medoid"])
def test_hdbscan_error_precomputed_and_store_centers(store_centers):
"""Check that we raise an error if the centers are requested together with
a precomputed input matrix.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27893
"""
rng = np.random.RandomState(0)
X = rng.random((100, 2))
X_dist = euclidean_distances(X)
err_msg = "Cannot store centers when using a precomputed distance matrix."
with pytest.raises(ValueError, match=err_msg):
HDBSCAN(
metric="precomputed",
store_centers=store_centers,
copy=False,
).fit(X_dist)
@pytest.mark.parametrize("valid_algo", ["auto", "brute"])
def test_hdbscan_cosine_metric_valid_algorithm(valid_algo):
"""Test that HDBSCAN works with the "cosine" metric when the algorithm is set
to "brute" or "auto".
Non-regression test for issue #28631
"""
HDBSCAN(metric="cosine", algorithm=valid_algo, copy=False).fit_predict(X)
@pytest.mark.parametrize("invalid_algo", ["kd_tree", "ball_tree"])
def test_hdbscan_cosine_metric_invalid_algorithm(invalid_algo):
"""Test that HDBSCAN raises an informative error is raised when an unsupported
algorithm is used with the "cosine" metric.
"""
hdbscan = HDBSCAN(metric="cosine", algorithm=invalid_algo, copy=False)
with pytest.raises(ValueError, match="cosine is not a valid metric"):
hdbscan.fit_predict(X)
# TODO(1.10): remove this test
def test_hdbscan_default_copy_warning():
"""
Test that HDBSCAN raises a FutureWarning when the `copy`
parameter is not set.
"""
X = np.random.RandomState(0).random((100, 2))
msg = r"The default value of `copy` will change from False to True in 1.10."
with pytest.warns(FutureWarning, match=msg):
hdb = HDBSCAN(min_cluster_size=20)
hdb.fit(X)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/tests/test_mean_shift.py | sklearn/cluster/tests/test_mean_shift.py | """
Testing for mean shift clustering methods
"""
import warnings
import numpy as np
import pytest
from sklearn.cluster import MeanShift, estimate_bandwidth, get_bin_seeds, mean_shift
from sklearn.datasets import make_blobs
from sklearn.metrics import v_measure_score
from sklearn.utils._testing import assert_allclose, assert_array_equal
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(
n_samples=300,
n_features=2,
centers=centers,
cluster_std=0.4,
shuffle=True,
random_state=11,
)
def test_convergence_of_1d_constant_data():
# Test convergence using 1D constant data
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/28926
model = MeanShift()
n_iter = model.fit(np.ones(10).reshape(-1, 1)).n_iter_
assert n_iter < model.max_iter
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert 0.9 <= bandwidth <= 1.5
def test_estimate_bandwidth_1sample(global_dtype):
# Test estimate_bandwidth when n_samples=1 and quantile<1, so that
# n_neighbors is set to 1.
bandwidth = estimate_bandwidth(
X.astype(global_dtype, copy=False), n_samples=1, quantile=0.3
)
assert bandwidth.dtype == X.dtype
assert bandwidth == pytest.approx(0.0, abs=1e-5)
@pytest.mark.parametrize(
"bandwidth, cluster_all, expected, first_cluster_label",
[(1.2, True, 3, 0), (1.2, False, 4, -1)],
)
def test_mean_shift(
global_dtype, bandwidth, cluster_all, expected, first_cluster_label
):
# Test MeanShift algorithm
X_with_global_dtype = X.astype(global_dtype, copy=False)
ms = MeanShift(bandwidth=bandwidth, cluster_all=cluster_all)
labels = ms.fit(X_with_global_dtype).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert n_clusters_ == expected
assert labels_unique[0] == first_cluster_label
assert ms.cluster_centers_.dtype == global_dtype
cluster_centers, labels_mean_shift = mean_shift(
X_with_global_dtype, cluster_all=cluster_all
)
labels_mean_shift_unique = np.unique(labels_mean_shift)
n_clusters_mean_shift = len(labels_mean_shift_unique)
assert n_clusters_mean_shift == expected
assert labels_mean_shift_unique[0] == first_cluster_label
assert cluster_centers.dtype == global_dtype
# TODO: remove mark once loky bug is fixed:
# https://github.com/joblib/loky/issues/458
@pytest.mark.thread_unsafe
def test_parallel(global_dtype, global_random_seed):
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(
n_samples=50,
n_features=2,
centers=centers,
cluster_std=0.4,
shuffle=True,
random_state=global_random_seed,
)
X = X.astype(global_dtype, copy=False)
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_allclose(ms1.cluster_centers_, ms2.cluster_centers_)
assert ms1.cluster_centers_.dtype == ms2.cluster_centers_.dtype
assert_array_equal(ms1.labels_, ms2.labels_)
def test_meanshift_predict(global_dtype):
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
X_with_global_dtype = X.astype(global_dtype, copy=False)
labels = ms.fit_predict(X_with_global_dtype)
labels2 = ms.predict(X_with_global_dtype)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
with pytest.raises(ValueError, match=msg):
ms.fit(
X,
)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert not hasattr(ms, "cluster_centers_")
assert not hasattr(ms, "labels_")
def test_cluster_intensity_tie(global_dtype):
X = np.array([[1, 1], [2, 1], [1, 0], [4, 7], [3, 5], [3, 6]], dtype=global_dtype)
c1 = MeanShift(bandwidth=2).fit(X)
X = np.array([[4, 7], [3, 5], [3, 6], [1, 1], [2, 1], [1, 0]], dtype=global_dtype)
c2 = MeanShift(bandwidth=2).fit(X)
assert_array_equal(c1.labels_, [1, 1, 1, 0, 0, 0])
assert_array_equal(c2.labels_, [0, 0, 0, 1, 1, 1])
def test_bin_seeds(global_dtype):
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array(
[[1.0, 1.0], [1.4, 1.4], [1.8, 1.2], [2.0, 1.0], [2.1, 1.1], [0.0, 0.0]],
dtype=global_dtype,
)
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = {(1.0, 1.0), (2.0, 1.0), (0.0, 0.0)}
test_bins = get_bin_seeds(X, 1, 1)
test_result = set(tuple(p) for p in test_bins)
assert len(ground_truth.symmetric_difference(test_result)) == 0
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = {(1.0, 1.0), (2.0, 1.0)}
test_bins = get_bin_seeds(X, 1, 2)
test_result = set(tuple(p) for p in test_bins)
assert len(ground_truth.symmetric_difference(test_result)) == 0
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_allclose(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(
n_samples=100,
n_features=2,
centers=[[0, 0], [1, 1]],
cluster_std=0.1,
random_state=0,
)
X = X.astype(global_dtype, copy=False)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
@pytest.mark.parametrize("max_iter", [1, 100])
def test_max_iter(max_iter):
clusters1, _ = mean_shift(X, max_iter=max_iter)
ms = MeanShift(max_iter=max_iter).fit(X)
clusters2 = ms.cluster_centers_
assert ms.n_iter_ <= ms.max_iter
assert len(clusters1) == len(clusters2)
for c1, c2 in zip(clusters1, clusters2):
assert np.allclose(c1, c2)
def test_mean_shift_zero_bandwidth(global_dtype):
# Check that mean shift works when the estimated bandwidth is 0.
X = np.array([1, 1, 1, 2, 2, 2, 3, 3], dtype=global_dtype).reshape(-1, 1)
# estimate_bandwidth with default args returns 0 on this dataset
bandwidth = estimate_bandwidth(X)
assert bandwidth == 0
# get_bin_seeds with a 0 bin_size should return the dataset itself
assert get_bin_seeds(X, bin_size=bandwidth) is X
# MeanShift with binning and a 0 estimated bandwidth should be equivalent
# to no binning.
ms_binning = MeanShift(bin_seeding=True, bandwidth=None).fit(X)
ms_nobinning = MeanShift(bin_seeding=False).fit(X)
expected_labels = np.array([0, 0, 0, 1, 1, 1, 2, 2])
assert v_measure_score(ms_binning.labels_, expected_labels) == pytest.approx(1)
assert v_measure_score(ms_nobinning.labels_, expected_labels) == pytest.approx(1)
assert_allclose(ms_binning.cluster_centers_, ms_nobinning.cluster_centers_)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/tests/test_spectral.py | sklearn/cluster/tests/test_spectral.py | """Testing for Spectral Clustering methods"""
import pickle
import re
import numpy as np
import pytest
from scipy.linalg import LinAlgError
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster._spectral import cluster_qr, discretize
from sklearn.datasets import make_blobs
from sklearn.feature_extraction import img_to_graph
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import check_random_state
from sklearn.utils._testing import assert_array_equal
from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS
try:
from pyamg import smoothed_aggregation_solver # noqa: F401
amg_loaded = True
except ImportError:
amg_loaded = False
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(
n_samples=60,
n_features=2,
centers=centers,
cluster_std=0.4,
shuffle=True,
random_state=0,
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
@pytest.mark.parametrize("eigen_solver", ("arpack", "lobpcg"))
@pytest.mark.parametrize("assign_labels", ("kmeans", "discretize", "cluster_qr"))
def test_spectral_clustering(
eigen_solver, assign_labels, csr_container, global_random_seed
):
S = np.array(
[
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
]
)
for mat in (S, csr_container(S)):
model = SpectralClustering(
random_state=global_random_seed,
n_clusters=2,
affinity="precomputed",
eigen_solver=eigen_solver,
assign_labels=assign_labels,
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert adjusted_rand_score(labels, [1, 1, 1, 0, 0, 0, 0]) == 1
model_copy = pickle.loads(pickle.dumps(model))
assert model_copy.n_clusters == model.n_clusters
assert model_copy.eigen_solver == model.eigen_solver
assert_array_equal(model_copy.labels_, model.labels_)
@pytest.mark.parametrize("coo_container", COO_CONTAINERS)
@pytest.mark.parametrize("assign_labels", ("kmeans", "discretize", "cluster_qr"))
def test_spectral_clustering_sparse(assign_labels, coo_container, global_random_seed):
X, y = make_blobs(
n_samples=20,
random_state=global_random_seed,
centers=[[1, 1], [-1, -1]],
cluster_std=0.01,
)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = coo_container(S)
labels = (
SpectralClustering(
random_state=global_random_seed,
n_clusters=2,
affinity="precomputed",
assign_labels=assign_labels,
)
.fit(S)
.labels_
)
assert adjusted_rand_score(y, labels) == 1
def test_precomputed_nearest_neighbors_filtering(global_random_seed):
# Test precomputed graph filtering when containing too many neighbors
X, y = make_blobs(
n_samples=250,
random_state=global_random_seed,
centers=[[1, 1, 1], [-1, -1, -1]],
cluster_std=0.01,
)
n_neighbors = 2
results = []
for additional_neighbors in [0, 10]:
nn = NearestNeighbors(n_neighbors=n_neighbors + additional_neighbors).fit(X)
graph = nn.kneighbors_graph(X, mode="distance")
labels = (
SpectralClustering(
random_state=global_random_seed,
n_clusters=2,
affinity="precomputed_nearest_neighbors",
n_neighbors=n_neighbors,
)
.fit(graph)
.labels_
)
results.append(labels)
assert_array_equal(results[0], results[1])
def test_affinities(global_random_seed):
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(
n_samples=20, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity="nearest_neighbors", random_state=0)
with pytest.warns(UserWarning, match="not fully connected"):
sp.fit(X)
assert adjusted_rand_score(y, sp.labels_) == 1
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=global_random_seed)
labels = sp.fit(X).labels_
assert adjusted_rand_score(y, labels) == 1
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != "additive_chi2":
sp = SpectralClustering(n_clusters=2, affinity=kern, random_state=0)
labels = sp.fit(X).labels_
assert (X.shape[0],) == labels.shape
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1, random_state=0)
labels = sp.fit(X).labels_
assert (X.shape[0],) == labels.shape
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert kwargs == {} # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert (X.shape[0],) == labels.shape
def test_cluster_qr(global_random_seed):
# cluster_qr by itself should not be used for clustering generic data
# other than the rows of the eigenvectors within spectral clustering,
# but cluster_qr must still preserve the labels for different dtypes
# of the generic fixed input even if the labels may be meaningless.
random_state = np.random.RandomState(seed=global_random_seed)
n_samples, n_components = 10, 5
data = random_state.randn(n_samples, n_components)
labels_float64 = cluster_qr(data.astype(np.float64))
# Each sample is assigned a cluster identifier
assert labels_float64.shape == (n_samples,)
# All components should be covered by the assignment
assert np.array_equal(np.unique(labels_float64), np.arange(n_components))
# Single precision data should yield the same cluster assignments
labels_float32 = cluster_qr(data.astype(np.float32))
assert np.array_equal(labels_float64, labels_float32)
def test_cluster_qr_permutation_invariance(global_random_seed):
# cluster_qr must be invariant to sample permutation.
random_state = np.random.RandomState(seed=global_random_seed)
n_samples, n_components = 100, 5
data = random_state.randn(n_samples, n_components)
perm = random_state.permutation(n_samples)
assert np.array_equal(
cluster_qr(data)[perm],
cluster_qr(data[perm]),
)
@pytest.mark.parametrize("coo_container", COO_CONTAINERS)
@pytest.mark.parametrize("n_samples", [50, 100, 150, 500])
def test_discretize(n_samples, coo_container, global_random_seed):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed=global_random_seed)
for n_class in range(2, 10):
# random class labels
y_true = random_state.randint(0, n_class + 1, n_samples)
y_true = np.array(y_true, float)
# noise class assignment matrix
y_indicator = coo_container(
(np.ones(n_samples), (np.arange(n_samples), y_true)),
shape=(n_samples, n_class + 1),
)
y_true_noisy = y_indicator.toarray() + 0.1 * random_state.randn(
n_samples, n_class + 1
)
y_pred = discretize(y_true_noisy, random_state=random_state)
assert adjusted_rand_score(y_true, y_pred) > 0.8
def test_spectral_clustering_with_arpack_amg_solvers(global_random_seed):
# Test that spectral_clustering is the same for arpack and amg solver
# Based on toy example from plot_segmentation_toy.py
# a small two coin image
x, y = np.indices((40, 40))
center1, center2 = (14, 12), (20, 25)
radius1, radius2 = 8, 7
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1**2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2**2
circles = circle1 | circle2
mask = circles.copy()
img = circles.astype(float)
graph = img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels_arpack = spectral_clustering(
graph, n_clusters=2, eigen_solver="arpack", random_state=global_random_seed
)
assert len(np.unique(labels_arpack)) == 2
if amg_loaded:
labels_amg = spectral_clustering(
graph, n_clusters=2, eigen_solver="amg", random_state=global_random_seed
)
assert adjusted_rand_score(labels_arpack, labels_amg) == 1
else:
with pytest.raises(ValueError):
spectral_clustering(graph, n_clusters=2, eigen_solver="amg", random_state=0)
def test_n_components(global_random_seed):
# Test that after adding n_components, result is different and
# n_components = n_clusters by default
X, y = make_blobs(
n_samples=20,
random_state=global_random_seed,
centers=[[1, 1], [-1, -1]],
cluster_std=0.01,
)
sp = SpectralClustering(n_clusters=2, random_state=global_random_seed)
labels = sp.fit(X).labels_
# set n_components = n_cluster and test if result is the same
labels_same_ncomp = (
SpectralClustering(
n_clusters=2, n_components=2, random_state=global_random_seed
)
.fit(X)
.labels_
)
# test that n_components=n_clusters by default
assert_array_equal(labels, labels_same_ncomp)
# test that n_components affect result
# n_clusters=8 by default, and set n_components=2
labels_diff_ncomp = (
SpectralClustering(n_components=2, random_state=global_random_seed)
.fit(X)
.labels_
)
assert not np.array_equal(labels, labels_diff_ncomp)
@pytest.mark.parametrize("assign_labels", ("kmeans", "discretize", "cluster_qr"))
def test_verbose(assign_labels, capsys):
# Check verbose mode of KMeans for better coverage.
X, y = make_blobs(
n_samples=20, random_state=0, centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
SpectralClustering(n_clusters=2, random_state=42, verbose=1).fit(X)
captured = capsys.readouterr()
assert re.search(r"Computing label assignment using", captured.out)
if assign_labels == "kmeans":
assert re.search(r"Initialization complete", captured.out)
assert re.search(r"Iteration [0-9]+, inertia", captured.out)
def test_spectral_clustering_np_matrix_raises():
"""Check that spectral_clustering raises an informative error when passed
an np.matrix. See #10993"""
X = np.matrix([[0.0, 2.0], [2.0, 0.0]])
msg = r"np\.matrix is not supported. Please convert to a numpy array"
with pytest.raises(TypeError, match=msg):
spectral_clustering(X)
def test_spectral_clustering_not_infinite_loop(capsys, monkeypatch):
"""Check that discretize raises LinAlgError when svd never converges.
Non-regression test for #21380
"""
def new_svd(*args, **kwargs):
raise LinAlgError()
monkeypatch.setattr(np.linalg, "svd", new_svd)
vectors = np.ones((10, 4))
with pytest.raises(LinAlgError, match="SVD did not converge"):
discretize(vectors)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/tests/test_birch.py | sklearn/cluster/tests/test_birch.py | """
Tests for the birch clustering algorithm.
"""
import numpy as np
import pytest
from sklearn.cluster import AgglomerativeClustering, Birch
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.datasets import make_blobs
from sklearn.exceptions import ConvergenceWarning
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils._testing import assert_allclose, assert_array_equal
from sklearn.utils.fixes import CSR_CONTAINERS
def test_n_samples_leaves_roots(global_random_seed, global_dtype):
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10, random_state=global_random_seed)
X = X.astype(global_dtype, copy=False)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum(
[sc.n_samples_ for leaf in brc._get_leaves() for sc in leaf.subclusters_]
)
assert n_samples_leaves == X.shape[0]
assert n_samples_root == X.shape[0]
def test_partial_fit(global_random_seed, global_dtype):
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100, random_state=global_random_seed)
X = X.astype(global_dtype, copy=False)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_allclose(brc_partial.subcluster_centers_, brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict(global_random_seed, global_dtype):
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(global_random_seed)
X = generate_clustered_data(n_clusters=3, n_features=3, n_samples_per_cluster=10)
X = X.astype(global_dtype, copy=False)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.0)
brc.fit(X_shuffle)
# Birch must preserve inputs' dtype
assert brc.subcluster_centers_.dtype == global_dtype
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
centroids = brc.subcluster_centers_
nearest_centroid = brc.subcluster_labels_[
pairwise_distances_argmin(X_shuffle, centroids)
]
assert_allclose(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters(global_random_seed, global_dtype):
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10, random_state=global_random_seed)
X = X.astype(global_dtype, copy=False)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert len(brc1.subcluster_centers_) > 10
assert len(np.unique(brc1.labels_)) == 10
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.0)
with pytest.warns(ConvergenceWarning):
brc4.fit(X)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse_X(global_random_seed, global_dtype, csr_container):
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10, random_state=global_random_seed)
X = X.astype(global_dtype, copy=False)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = csr_container(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
# Birch must preserve inputs' dtype
assert brc_sparse.subcluster_centers_.dtype == global_dtype
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_allclose(brc.subcluster_centers_, brc_sparse.subcluster_centers_)
def test_partial_fit_second_call_error_checks():
# second partial fit calls will error when n_features is not consistent
# with the first call
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.partial_fit(X, y)
msg = "X has 1 features, but Birch is expecting 2 features"
with pytest.raises(ValueError, match=msg):
brc.partial_fit(X[:, [0]], y)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert branching_factor >= len(subclusters)
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor(global_random_seed, global_dtype):
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs(random_state=global_random_seed)
X = X.astype(global_dtype, copy=False)
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor, threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor, threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert threshold >= sc.radius
current_leaf = current_leaf.next_leaf_
def test_threshold(global_random_seed, global_dtype):
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4, random_state=global_random_seed)
X = X.astype(global_dtype, copy=False)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.0)
def test_birch_n_clusters_long_int():
# Check that birch supports n_clusters with np.int64 dtype, for instance
# coming from np.arange. #16484
X, _ = make_blobs(random_state=0)
n_clusters = np.int64(5)
Birch(n_clusters=n_clusters).fit(X)
def test_feature_names_out():
"""Check `get_feature_names_out` for `Birch`."""
X, _ = make_blobs(n_samples=80, n_features=4, random_state=0)
brc = Birch(n_clusters=4)
brc.fit(X)
n_clusters = brc.subcluster_centers_.shape[0]
names_out = brc.get_feature_names_out()
assert_array_equal([f"birch{i}" for i in range(n_clusters)], names_out)
def test_transform_match_across_dtypes(global_random_seed):
X, _ = make_blobs(n_samples=80, n_features=4, random_state=global_random_seed)
brc = Birch(n_clusters=4, threshold=1.1)
Y_64 = brc.fit_transform(X)
Y_32 = brc.fit_transform(X.astype(np.float32))
assert_allclose(Y_64, Y_32, atol=1e-6)
def test_subcluster_dtype(global_dtype):
X = make_blobs(n_samples=80, n_features=4, random_state=0)[0].astype(
global_dtype, copy=False
)
brc = Birch(n_clusters=4)
assert brc.fit(X).subcluster_centers_.dtype == global_dtype
def test_both_subclusters_updated():
"""Check that both subclusters are updated when a node a split, even when there are
duplicated data points. Non-regression test for #23269.
"""
X = np.array(
[
[-2.6192791, -1.5053215],
[-2.9993038, -1.6863596],
[-2.3724914, -1.3438171],
[-2.336792, -1.3417323],
[-2.4089134, -1.3290224],
[-2.3724914, -1.3438171],
[-3.364009, -1.8846745],
[-2.3724914, -1.3438171],
[-2.617677, -1.5003285],
[-2.2960556, -1.3260119],
[-2.3724914, -1.3438171],
[-2.5459878, -1.4533926],
[-2.25979, -1.3003055],
[-2.4089134, -1.3290224],
[-2.3724914, -1.3438171],
[-2.4089134, -1.3290224],
[-2.5459878, -1.4533926],
[-2.3724914, -1.3438171],
[-2.9720619, -1.7058647],
[-2.336792, -1.3417323],
[-2.3724914, -1.3438171],
],
dtype=np.float32,
)
# no error
Birch(branching_factor=5, threshold=1e-5, n_clusters=None).fit(X)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/tests/test_k_means.py | sklearn/cluster/tests/test_k_means.py | """Testing for K-means"""
import re
import sys
from io import StringIO
import numpy as np
import pytest
from scipy import sparse as sp
from threadpoolctl import threadpool_info
from sklearn.base import clone
from sklearn.cluster import KMeans, MiniBatchKMeans, k_means, kmeans_plusplus
from sklearn.cluster._k_means_common import (
_euclidean_dense_dense_wrapper,
_euclidean_sparse_dense_wrapper,
_inertia_dense,
_inertia_sparse,
_is_same_clustering,
_relocate_empty_clusters_dense,
_relocate_empty_clusters_sparse,
)
from sklearn.cluster._kmeans import _labels_inertia, _mini_batch_step
from sklearn.datasets import make_blobs
from sklearn.exceptions import ConvergenceWarning
from sklearn.metrics import pairwise_distances, pairwise_distances_argmin
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.utils._testing import (
assert_allclose,
assert_array_equal,
create_memmap_backed_data,
)
from sklearn.utils.extmath import row_norms
from sklearn.utils.fixes import CSR_CONTAINERS
from sklearn.utils.parallel import _get_threadpool_controller
# non centered, sparse centers to check the
centers = np.array(
[
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
]
)
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(
n_samples=n_samples, centers=centers, cluster_std=1.0, random_state=42
)
X_as_any_csr = [container(X) for container in CSR_CONTAINERS]
data_containers = [np.array] + CSR_CONTAINERS
data_containers_ids = (
["dense", "sparse_matrix", "sparse_array"]
if len(X_as_any_csr) == 2
else ["dense", "sparse_matrix"]
)
@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids)
@pytest.mark.parametrize("algo", ["lloyd", "elkan"])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_kmeans_results(array_constr, algo, dtype):
# Checks that KMeans works as intended on toy dataset by comparing with
# expected results computed by hand.
X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]], dtype=dtype)
sample_weight = [3, 1, 1, 3]
init_centers = np.array([[0, 0], [1, 1]], dtype=dtype)
expected_labels = [0, 0, 1, 1]
expected_inertia = 0.375
expected_centers = np.array([[0.125, 0], [0.875, 1]], dtype=dtype)
expected_n_iter = 2
kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo)
kmeans.fit(X, sample_weight=sample_weight)
assert_array_equal(kmeans.labels_, expected_labels)
assert_allclose(kmeans.inertia_, expected_inertia)
assert_allclose(kmeans.cluster_centers_, expected_centers)
assert kmeans.n_iter_ == expected_n_iter
@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids)
@pytest.mark.parametrize("algo", ["lloyd", "elkan"])
def test_kmeans_relocated_clusters(array_constr, algo):
# check that empty clusters are relocated as expected
X = array_constr([[0, 0], [0.5, 0], [0.5, 1], [1, 1]])
# second center too far from others points will be empty at first iter
init_centers = np.array([[0.5, 0.5], [3, 3]])
kmeans = KMeans(n_clusters=2, n_init=1, init=init_centers, algorithm=algo)
kmeans.fit(X)
expected_n_iter = 3
expected_inertia = 0.25
assert_allclose(kmeans.inertia_, expected_inertia)
assert kmeans.n_iter_ == expected_n_iter
# There are two acceptable ways of relocating clusters in this example, the output
# depends on how the argpartition strategy breaks ties. We accept both outputs.
try:
expected_labels = [0, 0, 1, 1]
expected_centers = [[0.25, 0], [0.75, 1]]
assert_array_equal(kmeans.labels_, expected_labels)
assert_allclose(kmeans.cluster_centers_, expected_centers)
except AssertionError:
expected_labels = [1, 1, 0, 0]
expected_centers = [[0.75, 1.0], [0.25, 0.0]]
assert_array_equal(kmeans.labels_, expected_labels)
assert_allclose(kmeans.cluster_centers_, expected_centers)
@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids)
def test_relocate_empty_clusters(array_constr):
# test for the _relocate_empty_clusters_(dense/sparse) helpers
# Synthetic dataset with 3 obvious clusters of different sizes
X = np.array([-10.0, -9.5, -9, -8.5, -8, -1, 1, 9, 9.5, 10]).reshape(-1, 1)
X = array_constr(X)
sample_weight = np.ones(10)
# centers all initialized to the first point of X
centers_old = np.array([-10.0, -10, -10]).reshape(-1, 1)
# With this initialization, all points will be assigned to the first center
# At this point a center in centers_new is the weighted sum of the points
# it contains if it's not empty, otherwise it is the same as before.
centers_new = np.array([-16.5, -10, -10]).reshape(-1, 1)
weight_in_clusters = np.array([10.0, 0, 0])
labels = np.zeros(10, dtype=np.int32)
if array_constr is np.array:
_relocate_empty_clusters_dense(
X, sample_weight, centers_old, centers_new, weight_in_clusters, labels
)
else:
_relocate_empty_clusters_sparse(
X.data,
X.indices,
X.indptr,
sample_weight,
centers_old,
centers_new,
weight_in_clusters,
labels,
)
# The relocation scheme will take the 2 points farthest from the center and
# assign them to the 2 empty clusters, i.e. points at 10 and at 9.9. The
# first center will be updated to contain the other 8 points.
assert_array_equal(weight_in_clusters, [8, 1, 1])
assert_allclose(centers_new, [[-36], [10], [9.5]])
@pytest.mark.parametrize("distribution", ["normal", "blobs"])
@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids)
@pytest.mark.parametrize("tol", [1e-2, 1e-8, 1e-100, 0])
def test_kmeans_elkan_results(distribution, array_constr, tol, global_random_seed):
# Check that results are identical between lloyd and elkan algorithms
rnd = np.random.RandomState(global_random_seed)
if distribution == "normal":
X = rnd.normal(size=(5000, 10))
else:
X, _ = make_blobs(random_state=rnd)
X[X < 0] = 0
X = array_constr(X)
km_lloyd = KMeans(n_clusters=5, random_state=global_random_seed, n_init=1, tol=tol)
km_elkan = KMeans(
algorithm="elkan",
n_clusters=5,
random_state=global_random_seed,
n_init=1,
tol=tol,
)
km_lloyd.fit(X)
km_elkan.fit(X)
assert_allclose(km_elkan.cluster_centers_, km_lloyd.cluster_centers_)
assert_array_equal(km_elkan.labels_, km_lloyd.labels_)
assert km_elkan.n_iter_ == km_lloyd.n_iter_
assert km_elkan.inertia_ == pytest.approx(km_lloyd.inertia_, rel=1e-6)
@pytest.mark.parametrize("algorithm", ["lloyd", "elkan"])
def test_kmeans_convergence(algorithm, global_random_seed):
# Check that KMeans stops when convergence is reached when tol=0. (#16075)
rnd = np.random.RandomState(global_random_seed)
X = rnd.normal(size=(5000, 10))
max_iter = 300
km = KMeans(
algorithm=algorithm,
n_clusters=5,
random_state=global_random_seed,
n_init=1,
tol=0,
max_iter=max_iter,
).fit(X)
assert km.n_iter_ < max_iter
@pytest.mark.parametrize("X_csr", X_as_any_csr)
def test_minibatch_update_consistency(X_csr, global_random_seed):
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(global_random_seed)
centers_old = centers + rng.normal(size=centers.shape)
centers_old_csr = centers_old.copy()
centers_new = np.zeros_like(centers_old)
centers_new_csr = np.zeros_like(centers_old_csr)
weight_sums = np.zeros(centers_old.shape[0], dtype=X.dtype)
weight_sums_csr = np.zeros(centers_old.shape[0], dtype=X.dtype)
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
sample_weight_mb = sample_weight[:10]
# step 1: compute the dense minibatch update
old_inertia = _mini_batch_step(
X_mb,
sample_weight_mb,
centers_old,
centers_new,
weight_sums,
np.random.RandomState(global_random_seed),
random_reassign=False,
)
assert old_inertia > 0.0
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(X_mb, sample_weight_mb, centers_new)
assert new_inertia > 0.0
assert new_inertia < old_inertia
# step 2: compute the sparse minibatch update
old_inertia_csr = _mini_batch_step(
X_mb_csr,
sample_weight_mb,
centers_old_csr,
centers_new_csr,
weight_sums_csr,
np.random.RandomState(global_random_seed),
random_reassign=False,
)
assert old_inertia_csr > 0.0
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, sample_weight_mb, centers_new_csr
)
assert new_inertia_csr > 0.0
assert new_inertia_csr < old_inertia_csr
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_allclose(centers_new, centers_new_csr)
assert_allclose(old_inertia, old_inertia_csr)
assert_allclose(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert centers.shape == (n_clusters, n_features)
labels = km.labels_
assert np.unique(labels).shape[0] == n_clusters
# check that the labels assignment are perfect (up to a permutation)
assert_allclose(v_measure_score(true_labels, labels), 1.0)
assert km.inertia_ > 0.0
@pytest.mark.parametrize(
"input_data",
[X] + X_as_any_csr,
ids=data_containers_ids,
)
@pytest.mark.parametrize(
"init",
["random", "k-means++", centers.copy(), lambda X, k, random_state: centers.copy()],
ids=["random", "k-means++", "ndarray", "callable"],
)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_all_init(Estimator, input_data, init):
# Check KMeans and MiniBatchKMeans with all possible init.
n_init = 10 if isinstance(init, str) else 1
km = Estimator(
init=init, n_clusters=n_clusters, random_state=42, n_init=n_init
).fit(input_data)
_check_fitted_model(km)
@pytest.mark.parametrize(
"init",
["random", "k-means++", centers, lambda X, k, random_state: centers.copy()],
ids=["random", "k-means++", "ndarray", "callable"],
)
def test_minibatch_kmeans_partial_fit_init(init):
if hasattr(init, "copy"):
# Avoid mutating a shared array in place to avoid side effects in other tests.
init = init.copy()
# Check MiniBatchKMeans init with partial_fit
n_init = 10 if isinstance(init, str) else 1
km = MiniBatchKMeans(
init=init, n_clusters=n_clusters, random_state=0, n_init=n_init
)
for i in range(100):
# "random" init requires many batches to recover the true labels.
km.partial_fit(X)
_check_fitted_model(km)
@pytest.mark.parametrize(
"init, expected_n_init",
[
("k-means++", 1),
("random", "default"),
(
lambda X, n_clusters, random_state: random_state.uniform(
size=(n_clusters, X.shape[1])
),
"default",
),
("array-like", 1),
],
)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_kmeans_init_auto_with_initial_centroids(Estimator, init, expected_n_init):
"""Check that `n_init="auto"` chooses the right number of initializations.
Non-regression test for #26657:
https://github.com/scikit-learn/scikit-learn/pull/26657
"""
n_sample, n_features, n_clusters = 100, 10, 5
X = np.random.randn(n_sample, n_features)
if init == "array-like":
init = np.random.randn(n_clusters, n_features)
if expected_n_init == "default":
expected_n_init = 3 if Estimator is MiniBatchKMeans else 10
kmeans = Estimator(n_clusters=n_clusters, init=init, n_init="auto").fit(X)
assert kmeans._n_init == expected_n_init
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_fortran_aligned_data(Estimator, global_random_seed):
# Check that KMeans works with fortran-aligned data.
X_fortran = np.asfortranarray(X)
centers_fortran = np.asfortranarray(centers)
km_c = Estimator(
n_clusters=n_clusters, init=centers, n_init=1, random_state=global_random_seed
).fit(X)
km_f = Estimator(
n_clusters=n_clusters,
init=centers_fortran,
n_init=1,
random_state=global_random_seed,
).fit(X_fortran)
assert_allclose(km_c.cluster_centers_, km_f.cluster_centers_)
assert_array_equal(km_c.labels_, km_f.labels_)
def test_minibatch_kmeans_verbose():
# Check verbose mode of MiniBatchKMeans for better coverage.
km = MiniBatchKMeans(n_clusters=n_clusters, random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
km.fit(X)
finally:
sys.stdout = old_stdout
@pytest.mark.parametrize("algorithm", ["lloyd", "elkan"])
@pytest.mark.parametrize("tol", [1e-2, 0])
def test_kmeans_verbose(algorithm, tol, capsys):
# Check verbose mode of KMeans for better coverage.
X = np.random.RandomState(0).normal(size=(5000, 10))
KMeans(
algorithm=algorithm,
n_clusters=n_clusters,
random_state=42,
init="random",
n_init=1,
tol=tol,
verbose=1,
).fit(X)
captured = capsys.readouterr()
assert re.search(r"Initialization complete", captured.out)
assert re.search(r"Iteration [0-9]+, inertia", captured.out)
if tol == 0:
assert re.search(r"strict convergence", captured.out)
else:
assert re.search(r"center shift .* within tolerance", captured.out)
def test_minibatch_kmeans_warning_init_size():
# Check that a warning is raised when init_size is smaller than n_clusters
with pytest.warns(
RuntimeWarning, match=r"init_size.* should be larger than n_clusters"
):
MiniBatchKMeans(init_size=10, n_clusters=20).fit(X)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_warning_n_init_precomputed_centers(Estimator):
# Check that a warning is raised when n_init > 1 and an array is passed for
# the init parameter.
with pytest.warns(
RuntimeWarning,
match="Explicit initial center position passed: performing only one init",
):
Estimator(init=centers, n_clusters=n_clusters, n_init=10).fit(X)
def test_minibatch_sensible_reassign(global_random_seed):
# check that identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(
n_samples=100, centers=5, random_state=global_random_seed
)
zeroed_X[::2, :] = 0
km = MiniBatchKMeans(
n_clusters=20, batch_size=10, random_state=global_random_seed, init="random"
).fit(zeroed_X)
# there should not be too many exact zero cluster centers
num_non_zero_clusters = km.cluster_centers_.any(axis=1).sum()
assert num_non_zero_clusters > 9, f"{num_non_zero_clusters=} is too small"
# do the same with batch-size > X.shape[0] (regression test)
km = MiniBatchKMeans(
n_clusters=20, batch_size=200, random_state=global_random_seed, init="random"
).fit(zeroed_X)
# there should not be too many exact zero cluster centers
num_non_zero_clusters = km.cluster_centers_.any(axis=1).sum()
assert num_non_zero_clusters > 9, f"{num_non_zero_clusters=} is too small"
# do the same with partial_fit API
km = MiniBatchKMeans(n_clusters=20, random_state=global_random_seed, init="random")
for i in range(100):
km.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
num_non_zero_clusters = km.cluster_centers_.any(axis=1).sum()
assert num_non_zero_clusters > 9, f"{num_non_zero_clusters=} is too small"
@pytest.mark.parametrize(
"input_data",
[X] + X_as_any_csr,
ids=data_containers_ids,
)
def test_minibatch_reassign(input_data, global_random_seed):
# Check the reassignment part of the minibatch step with very high or very
# low reassignment ratio.
perfect_centers = np.empty((n_clusters, n_features))
for i in range(n_clusters):
perfect_centers[i] = X[true_labels == i].mean(axis=0)
sample_weight = np.ones(n_samples)
centers_new = np.empty_like(perfect_centers)
# Give a perfect initialization, but a large reassignment_ratio, as a
# result many centers should be reassigned and the model should no longer
# be good
score_before = -_labels_inertia(input_data, sample_weight, perfect_centers, 1)[1]
_mini_batch_step(
input_data,
sample_weight,
perfect_centers,
centers_new,
np.zeros(n_clusters),
np.random.RandomState(global_random_seed),
random_reassign=True,
reassignment_ratio=1,
)
score_after = -_labels_inertia(input_data, sample_weight, centers_new, 1)[1]
assert score_before > score_after
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned.
_mini_batch_step(
input_data,
sample_weight,
perfect_centers,
centers_new,
np.zeros(n_clusters),
np.random.RandomState(global_random_seed),
random_reassign=True,
reassignment_ratio=1e-15,
)
assert_allclose(centers_new, perfect_centers)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size. Run the test with 100 clusters and a batch_size of
# 10 because it turned out that these values ensure that the number of
# clusters to reassign is always bigger than the batch_size.
MiniBatchKMeans(
n_clusters=100,
batch_size=10,
init_size=n_samples,
random_state=42,
verbose=True,
).fit(X)
def test_minibatch_kmeans_init_size():
# Check the internal _init_size attribute of MiniBatchKMeans
# default init size should be 3 * batch_size
km = MiniBatchKMeans(n_clusters=10, batch_size=5, n_init=1).fit(X)
assert km._init_size == 15
# if 3 * batch size < n_clusters, it should then be 3 * n_clusters
km = MiniBatchKMeans(n_clusters=10, batch_size=1, n_init=1).fit(X)
assert km._init_size == 30
# it should not be larger than n_samples
km = MiniBatchKMeans(
n_clusters=10, batch_size=5, n_init=1, init_size=n_samples + 1
).fit(X)
assert km._init_size == n_samples
@pytest.mark.parametrize("tol, max_no_improvement", [(1e-4, None), (0, 10)])
def test_minibatch_declared_convergence(capsys, tol, max_no_improvement):
# Check convergence detection based on ewa batch inertia or on
# small center change.
X, _, centers = make_blobs(centers=3, random_state=0, return_centers=True)
km = MiniBatchKMeans(
n_clusters=3,
init=centers,
batch_size=20,
tol=tol,
random_state=0,
max_iter=10,
n_init=1,
verbose=1,
max_no_improvement=max_no_improvement,
)
km.fit(X)
assert 1 < km.n_iter_ < 10
captured = capsys.readouterr()
if max_no_improvement is None:
assert "Converged (small centers change)" in captured.out
if tol == 0:
assert "Converged (lack of improvement in inertia)" in captured.out
def test_minibatch_iter_steps():
# Check consistency of n_iter_ and n_steps_ attributes.
batch_size = 30
n_samples = X.shape[0]
km = MiniBatchKMeans(n_clusters=3, batch_size=batch_size, random_state=0).fit(X)
# n_iter_ is the number of started epochs
assert km.n_iter_ == np.ceil((km.n_steps_ * batch_size) / n_samples)
assert isinstance(km.n_iter_, int)
# without stopping condition, max_iter should be reached
km = MiniBatchKMeans(
n_clusters=3,
batch_size=batch_size,
random_state=0,
tol=0,
max_no_improvement=None,
max_iter=10,
).fit(X)
assert km.n_iter_ == 10
assert km.n_steps_ == (10 * n_samples) // batch_size
assert isinstance(km.n_steps_, int)
def test_kmeans_copyx():
# Check that copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check that my_X is de-centered
assert_allclose(my_X, X)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_score_max_iter(Estimator, global_random_seed):
# Check that fitting KMeans or MiniBatchKMeans with more iterations gives
# better score
X = np.random.RandomState(global_random_seed).randn(100, 10)
km1 = Estimator(n_init=1, random_state=global_random_seed, max_iter=1)
s1 = km1.fit(X).score(X)
km2 = Estimator(n_init=1, random_state=global_random_seed, max_iter=10)
s2 = km2.fit(X).score(X)
assert s2 > s1
@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids)
@pytest.mark.parametrize(
"Estimator, algorithm",
[(KMeans, "lloyd"), (KMeans, "elkan"), (MiniBatchKMeans, None)],
)
@pytest.mark.parametrize("max_iter", [2, 100])
def test_kmeans_predict(
Estimator, algorithm, array_constr, max_iter, global_dtype, global_random_seed
):
# Check the predict method and the equivalence between fit.predict and
# fit_predict.
X, _ = make_blobs(
n_samples=200, n_features=10, centers=10, random_state=global_random_seed
)
X = array_constr(X, dtype=global_dtype)
km = Estimator(
n_clusters=10,
init="random",
n_init=10,
max_iter=max_iter,
random_state=global_random_seed,
)
if algorithm is not None:
km.set_params(algorithm=algorithm)
km.fit(X)
labels = km.labels_
# re-predict labels for training set using predict
pred = km.predict(X)
assert_array_equal(pred, labels)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, labels)
# predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(10))
@pytest.mark.parametrize("X_csr", X_as_any_csr)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_dense_sparse(Estimator, X_csr, global_random_seed):
# Check that the results are the same for dense and sparse input.
sample_weight = np.random.RandomState(global_random_seed).random_sample(
(n_samples,)
)
km_dense = Estimator(
n_clusters=n_clusters, random_state=global_random_seed, n_init=1
)
km_dense.fit(X, sample_weight=sample_weight)
km_sparse = Estimator(
n_clusters=n_clusters, random_state=global_random_seed, n_init=1
)
km_sparse.fit(X_csr, sample_weight=sample_weight)
assert_array_equal(km_dense.labels_, km_sparse.labels_)
assert_allclose(km_dense.cluster_centers_, km_sparse.cluster_centers_)
@pytest.mark.parametrize("X_csr", X_as_any_csr)
@pytest.mark.parametrize(
"init", ["random", "k-means++", centers], ids=["random", "k-means++", "ndarray"]
)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_predict_dense_sparse(Estimator, init, X_csr):
# check that models trained on sparse input also works for dense input at
# predict time and vice versa.
n_init = 10 if isinstance(init, str) else 1
km = Estimator(n_clusters=n_clusters, init=init, n_init=n_init, random_state=0)
km.fit(X_csr)
assert_array_equal(km.predict(X), km.labels_)
km.fit(X)
assert_array_equal(km.predict(X_csr), km.labels_)
@pytest.mark.parametrize("array_constr", data_containers, ids=data_containers_ids)
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("init", ["k-means++", "ndarray"])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_integer_input(Estimator, array_constr, dtype, init, global_random_seed):
# Check that KMeans and MiniBatchKMeans work with integer input.
X_dense = np.array([[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]])
X = array_constr(X_dense, dtype=dtype)
n_init = 1 if init == "ndarray" else 10
init = X_dense[:2] if init == "ndarray" else init
km = Estimator(
n_clusters=2, init=init, n_init=n_init, random_state=global_random_seed
)
if Estimator is MiniBatchKMeans:
km.set_params(batch_size=2)
km.fit(X)
# Internally integer input should be converted to float64
assert km.cluster_centers_.dtype == np.float64
expected_labels = [0, 1, 1, 0, 0, 1]
assert_allclose(v_measure_score(km.labels_, expected_labels), 1.0)
# Same with partial_fit (#14314)
if Estimator is MiniBatchKMeans:
km = clone(km).partial_fit(X)
assert km.cluster_centers_.dtype == np.float64
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_transform(Estimator, global_random_seed):
# Check the transform method
km = Estimator(n_clusters=n_clusters, random_state=global_random_seed).fit(X)
# Transorfming cluster_centers_ should return the pairwise distances
# between centers
Xt = km.transform(km.cluster_centers_)
assert_allclose(Xt, pairwise_distances(km.cluster_centers_))
# In particular, diagonal must be 0
assert_array_equal(Xt.diagonal(), np.zeros(n_clusters))
# Transforming X should return the pairwise distances between X and the
# centers
Xt = km.transform(X)
assert_allclose(Xt, pairwise_distances(X, km.cluster_centers_))
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_fit_transform(Estimator, global_random_seed):
# Check equivalence between fit.transform and fit_transform
X1 = Estimator(random_state=global_random_seed, n_init=1).fit(X).transform(X)
X2 = Estimator(random_state=global_random_seed, n_init=1).fit_transform(X)
assert_allclose(X1, X2)
def test_n_init(global_random_seed):
# Check that increasing the number of init increases the quality
previous_inertia = np.inf
for n_init in [1, 5, 10]:
# set max_iter=1 to avoid finding the global minimum and get the same
# inertia each time
km = KMeans(
n_clusters=n_clusters,
init="random",
n_init=n_init,
random_state=global_random_seed,
max_iter=1,
).fit(X)
assert km.inertia_ <= previous_inertia
def test_k_means_function(global_random_seed):
# test calling the k_means function directly
cluster_centers, labels, inertia = k_means(
X, n_clusters=n_clusters, sample_weight=None, random_state=global_random_seed
)
assert cluster_centers.shape == (n_clusters, n_features)
assert np.unique(labels).shape[0] == n_clusters
# check that the labels assignment are perfect (up to a permutation)
assert_allclose(v_measure_score(true_labels, labels), 1.0)
assert inertia > 0.0
@pytest.mark.parametrize(
"input_data",
[X] + X_as_any_csr,
ids=data_containers_ids,
)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
@pytest.mark.skipif(
not any(i for i in threadpool_info() if i["user_api"] == "blas"),
reason=(
"Fails for some global_random_seed on Atlas which cannot be detected by "
"threadpoolctl."
),
)
def test_float_precision(Estimator, input_data, global_random_seed):
# Check that the results are the same for single and double precision.
km = Estimator(n_init=1, random_state=global_random_seed)
inertia = {}
Xt = {}
centers = {}
labels = {}
for dtype in [np.float64, np.float32]:
X = input_data.astype(dtype, copy=False)
km.fit(X)
inertia[dtype] = km.inertia_
Xt[dtype] = km.transform(X)
centers[dtype] = km.cluster_centers_
labels[dtype] = km.labels_
# dtype of cluster centers has to be the dtype of the input data
assert km.cluster_centers_.dtype == dtype
# same with partial_fit
if Estimator is MiniBatchKMeans:
km.partial_fit(X[0:3])
assert km.cluster_centers_.dtype == dtype
# compare arrays with low precision since the difference between 32 and
# 64 bit comes from an accumulation of rounding errors.
rtol = 1e-4
assert_allclose(inertia[np.float32], inertia[np.float64], rtol=rtol)
assert_allclose(Xt[np.float32], Xt[np.float64], atol=Xt[np.float64].max() * rtol)
assert_allclose(
centers[np.float32], centers[np.float64], atol=centers[np.float64].max() * rtol
)
assert_array_equal(labels[np.float32], labels[np.float64])
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_centers_not_mutated(Estimator, dtype):
# Check that KMeans and MiniBatchKMeans won't mutate the user provided
# init centers silently even if input data and init centers have the same
# type.
X_new_type = X.astype(dtype, copy=False)
centers_new_type = centers.astype(dtype, copy=False)
km = Estimator(init=centers_new_type, n_clusters=n_clusters, n_init=1)
km.fit(X_new_type)
assert not np.may_share_memory(km.cluster_centers_, centers_new_type)
@pytest.mark.parametrize(
"input_data",
[X] + X_as_any_csr,
ids=data_containers_ids,
)
def test_kmeans_init_fitted_centers(input_data):
# Check that starting fitting from a local optimum shouldn't change the
# solution
km1 = KMeans(n_clusters=n_clusters).fit(input_data)
km2 = KMeans(n_clusters=n_clusters, init=km1.cluster_centers_, n_init=1).fit(
input_data
)
assert_allclose(km1.cluster_centers_, km2.cluster_centers_)
def test_kmeans_warns_less_centers_than_unique_points(global_random_seed):
# Check KMeans when the number of found clusters is smaller than expected
X = np.asarray([[0, 0], [0, 1], [1, 0], [1, 0]]) # last point is duplicated
km = KMeans(n_clusters=4, random_state=global_random_seed)
# KMeans should warn that fewer labels than cluster centers have been used
msg = (
r"Number of distinct clusters \(3\) found smaller than "
r"n_clusters \(4\). Possibly due to duplicate points in X."
)
with pytest.warns(ConvergenceWarning, match=msg):
km.fit(X)
# only three distinct points, so only three clusters
# can have points assigned to them
assert set(km.labels_) == set(range(3))
def _sort_centers(centers):
return np.sort(centers, axis=0)
def test_weighted_vs_repeated(global_random_seed):
# Check that a sample weight of N should yield the same result as an N-fold
# repetition of the sample. Valid only if init is precomputed, otherwise
# rng produces different results. Not valid for MinibatchKMeans due to rng
# to extract minibatches.
sample_weight = np.random.RandomState(global_random_seed).randint(
1, 5, size=n_samples
)
X_repeat = np.repeat(X, sample_weight, axis=0)
km = KMeans(
init=centers, n_init=1, n_clusters=n_clusters, random_state=global_random_seed
)
km_weighted = clone(km).fit(X, sample_weight=sample_weight)
repeated_labels = np.repeat(km_weighted.labels_, sample_weight)
km_repeated = clone(km).fit(X_repeat)
assert_array_equal(km_repeated.labels_, repeated_labels)
assert_allclose(km_weighted.inertia_, km_repeated.inertia_)
assert_allclose(
_sort_centers(km_weighted.cluster_centers_),
_sort_centers(km_repeated.cluster_centers_),
)
@pytest.mark.parametrize(
"input_data",
[X] + X_as_any_csr,
ids=data_containers_ids,
)
@pytest.mark.parametrize("Estimator", [KMeans, MiniBatchKMeans])
def test_unit_weights_vs_no_weights(Estimator, input_data, global_random_seed):
# Check that not passing sample weights should be equivalent to passing
# sample weights all equal to one.
sample_weight = np.ones(n_samples)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/tests/test_feature_agglomeration.py | sklearn/cluster/tests/test_feature_agglomeration.py | """
Tests for sklearn.cluster._feature_agglomeration
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.cluster import FeatureAgglomeration
from sklearn.datasets import make_blobs
from sklearn.utils._testing import assert_array_almost_equal
def test_feature_agglomeration():
n_clusters = 1
X = np.array([0, 0, 1]).reshape(1, 3) # (n_samples, n_features)
agglo_mean = FeatureAgglomeration(n_clusters=n_clusters, pooling_func=np.mean)
agglo_median = FeatureAgglomeration(n_clusters=n_clusters, pooling_func=np.median)
agglo_mean.fit(X)
agglo_median.fit(X)
assert np.size(np.unique(agglo_mean.labels_)) == n_clusters
assert np.size(np.unique(agglo_median.labels_)) == n_clusters
assert np.size(agglo_mean.labels_) == X.shape[1]
assert np.size(agglo_median.labels_) == X.shape[1]
# Test transform
Xt_mean = agglo_mean.transform(X)
Xt_median = agglo_median.transform(X)
assert Xt_mean.shape[1] == n_clusters
assert Xt_median.shape[1] == n_clusters
assert Xt_mean == np.array([1 / 3.0])
assert Xt_median == np.array([0.0])
# Test inverse transform
X_full_mean = agglo_mean.inverse_transform(Xt_mean)
X_full_median = agglo_median.inverse_transform(Xt_median)
assert np.unique(X_full_mean[0]).size == n_clusters
assert np.unique(X_full_median[0]).size == n_clusters
assert_array_almost_equal(agglo_mean.transform(X_full_mean), Xt_mean)
assert_array_almost_equal(agglo_median.transform(X_full_median), Xt_median)
def test_feature_agglomeration_feature_names_out():
"""Check `get_feature_names_out` for `FeatureAgglomeration`."""
X, _ = make_blobs(n_features=6, random_state=0)
agglo = FeatureAgglomeration(n_clusters=3)
agglo.fit(X)
n_clusters = agglo.n_clusters_
names_out = agglo.get_feature_names_out()
assert_array_equal(
[f"featureagglomeration{i}" for i in range(n_clusters)], names_out
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/tests/__init__.py | sklearn/cluster/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/tests/test_optics.py | sklearn/cluster/tests/test_optics.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
import numpy as np
import pytest
from sklearn.cluster import DBSCAN, OPTICS
from sklearn.cluster._optics import _extend_region, _extract_xi_labels
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.datasets import make_blobs
from sklearn.exceptions import DataConversionWarning, EfficiencyWarning
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.utils import shuffle
from sklearn.utils._testing import assert_allclose, assert_array_equal
from sklearn.utils.fixes import CSR_CONTAINERS
rng = np.random.RandomState(0)
n_points_per_cluster = 10
C1 = [-5, -2] + 0.8 * rng.randn(n_points_per_cluster, 2)
C2 = [4, -1] + 0.1 * rng.randn(n_points_per_cluster, 2)
C3 = [1, -2] + 0.2 * rng.randn(n_points_per_cluster, 2)
C4 = [-2, 3] + 0.3 * rng.randn(n_points_per_cluster, 2)
C5 = [3, -2] + 1.6 * rng.randn(n_points_per_cluster, 2)
C6 = [5, 6] + 2 * rng.randn(n_points_per_cluster, 2)
X = np.vstack((C1, C2, C3, C4, C5, C6))
@pytest.mark.parametrize(
("r_plot", "end"),
[
[[10, 8.9, 8.8, 8.7, 7, 10], 3],
[[10, 8.9, 8.8, 8.7, 8.6, 7, 10], 0],
[[10, 8.9, 8.8, 8.7, 7, 6, np.inf], 4],
[[10, 8.9, 8.8, 8.7, 7, 6, np.inf], 4],
],
)
def test_extend_downward(r_plot, end):
r_plot = np.array(r_plot)
ratio = r_plot[:-1] / r_plot[1:]
steep_downward = ratio >= 1 / 0.9
upward = ratio < 1
e = _extend_region(steep_downward, upward, 0, 2)
assert e == end
@pytest.mark.parametrize(
("r_plot", "end"),
[
[[1, 2, 2.1, 2.2, 4, 8, 8, np.inf], 6],
[[1, 2, 2.1, 2.2, 2.3, 4, 8, 8, np.inf], 0],
[[1, 2, 2.1, 2, np.inf], 0],
[[1, 2, 2.1, np.inf], 2],
],
)
def test_extend_upward(r_plot, end):
r_plot = np.array(r_plot)
ratio = r_plot[:-1] / r_plot[1:]
steep_upward = ratio <= 0.9
downward = ratio > 1
e = _extend_region(steep_upward, downward, 0, 2)
assert e == end
@pytest.mark.parametrize(
("ordering", "clusters", "expected"),
[
[[0, 1, 2, 3], [[0, 1], [2, 3]], [0, 0, 1, 1]],
[[0, 1, 2, 3], [[0, 1], [3, 3]], [0, 0, -1, 1]],
[[0, 1, 2, 3], [[0, 1], [3, 3], [0, 3]], [0, 0, -1, 1]],
[[3, 1, 2, 0], [[0, 1], [3, 3], [0, 3]], [1, 0, -1, 0]],
],
)
def test_the_extract_xi_labels(ordering, clusters, expected):
labels = _extract_xi_labels(ordering, clusters)
assert_array_equal(labels, expected)
def test_extract_xi(global_dtype):
# small and easy test (no clusters around other clusters)
# but with a clear noise data.
# global_random_seed is not used here since the expected labels
# are hardcoded for these specific data.
rng = np.random.RandomState(0)
n_points_per_cluster = 5
C1 = [-5, -2] + 0.8 * rng.randn(n_points_per_cluster, 2)
C2 = [4, -1] + 0.1 * rng.randn(n_points_per_cluster, 2)
C3 = [1, -2] + 0.2 * rng.randn(n_points_per_cluster, 2)
C4 = [-2, 3] + 0.3 * rng.randn(n_points_per_cluster, 2)
C5 = [3, -2] + 0.6 * rng.randn(n_points_per_cluster, 2)
C6 = [5, 6] + 0.2 * rng.randn(n_points_per_cluster, 2)
X = np.vstack((C1, C2, C3, C4, C5, np.array([[100, 100]]), C6)).astype(
global_dtype, copy=False
)
expected_labels = np.r_[[2] * 5, [0] * 5, [1] * 5, [3] * 5, [1] * 5, -1, [4] * 5]
X, expected_labels = shuffle(X, expected_labels, random_state=rng)
clust = OPTICS(
min_samples=3, min_cluster_size=2, max_eps=20, cluster_method="xi", xi=0.4
).fit(X)
assert_array_equal(clust.labels_, expected_labels)
# check float min_samples and min_cluster_size
clust = OPTICS(
min_samples=0.1, min_cluster_size=0.08, max_eps=20, cluster_method="xi", xi=0.4
).fit(X)
assert_array_equal(clust.labels_, expected_labels)
X = np.vstack((C1, C2, C3, C4, C5, np.array([[100, 100]] * 2), C6)).astype(
global_dtype, copy=False
)
expected_labels = np.r_[
[1] * 5, [3] * 5, [2] * 5, [0] * 5, [2] * 5, -1, -1, [4] * 5
]
X, expected_labels = shuffle(X, expected_labels, random_state=rng)
clust = OPTICS(
min_samples=3, min_cluster_size=3, max_eps=20, cluster_method="xi", xi=0.3
).fit(X)
# this may fail if the predecessor correction is not at work!
assert_array_equal(clust.labels_, expected_labels)
C1 = [[0, 0], [0, 0.1], [0, -0.1], [0.1, 0]]
C2 = [[10, 10], [10, 9], [10, 11], [9, 10]]
C3 = [[100, 100], [100, 90], [100, 110], [90, 100]]
X = np.vstack((C1, C2, C3)).astype(global_dtype, copy=False)
expected_labels = np.r_[[0] * 4, [1] * 4, [2] * 4]
X, expected_labels = shuffle(X, expected_labels, random_state=rng)
clust = OPTICS(
min_samples=2, min_cluster_size=2, max_eps=np.inf, cluster_method="xi", xi=0.04
).fit(X)
assert_array_equal(clust.labels_, expected_labels)
def test_cluster_hierarchy(global_dtype, global_random_seed):
rng = np.random.RandomState(global_random_seed)
n_points_per_cluster = 100
C1 = [0, 0] + 2 * rng.randn(n_points_per_cluster, 2).astype(
global_dtype, copy=False
)
C2 = [0, 0] + 50 * rng.randn(n_points_per_cluster, 2).astype(
global_dtype, copy=False
)
X = np.vstack((C1, C2))
X = shuffle(X, random_state=rng)
clusters = OPTICS(min_samples=20, xi=0.2).fit(X).cluster_hierarchy_
assert clusters.shape == (2, 2)
# The first cluster should contain all point from C1 but due to how the data is
# generated, some points from C2 may end up in it.
assert 100 <= np.diff(clusters[0]) + 1 <= 115
# The second cluster should contain all points from C1 and C2.
assert np.diff(clusters[-1]) + 1 == 200
@pytest.mark.parametrize(
"csr_container, metric",
[(None, "minkowski")] + [(container, "euclidean") for container in CSR_CONTAINERS],
)
def test_correct_number_of_clusters(metric, csr_container):
# in 'auto' mode
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
# Parameters chosen specifically for this task.
# Compute OPTICS
clust = OPTICS(max_eps=5.0 * 6.0, min_samples=4, xi=0.1, metric=metric)
clust.fit(csr_container(X) if csr_container is not None else X)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(clust.labels_)) - int(-1 in clust.labels_)
assert n_clusters_1 == n_clusters
# check attribute types and sizes
assert clust.labels_.shape == (len(X),)
assert clust.labels_.dtype.kind == "i"
assert clust.reachability_.shape == (len(X),)
assert clust.reachability_.dtype.kind == "f"
assert clust.core_distances_.shape == (len(X),)
assert clust.core_distances_.dtype.kind == "f"
assert clust.ordering_.shape == (len(X),)
assert clust.ordering_.dtype.kind == "i"
assert set(clust.ordering_) == set(range(len(X)))
def test_minimum_number_of_sample_check():
# test that we check a minimum number of samples
msg = "min_samples must be no greater than"
# Compute OPTICS
X = [[1, 1]]
clust = OPTICS(max_eps=5.0 * 0.3, min_samples=10, min_cluster_size=1.0)
# Run the fit
with pytest.raises(ValueError, match=msg):
clust.fit(X)
def test_bad_extract():
# Test an extraction of eps too close to original eps
msg = "Specify an epsilon smaller than 0.15. Got 0.3."
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(
n_samples=750, centers=centers, cluster_std=0.4, random_state=0
)
# Compute OPTICS
clust = OPTICS(max_eps=5.0 * 0.03, cluster_method="dbscan", eps=0.3, min_samples=10)
with pytest.raises(ValueError, match=msg):
clust.fit(X)
def test_bad_reachability():
msg = "All reachability values are inf. Set a larger max_eps."
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(
n_samples=750, centers=centers, cluster_std=0.4, random_state=0
)
with pytest.warns(UserWarning, match=msg):
clust = OPTICS(max_eps=5.0 * 0.003, min_samples=10, eps=0.015)
clust.fit(X)
def test_nowarn_if_metric_bool_data_bool():
# make sure no warning is raised if metric and data are both boolean
# non-regression test for
# https://github.com/scikit-learn/scikit-learn/issues/18996
pairwise_metric = "rogerstanimoto"
X = np.random.randint(2, size=(5, 2), dtype=bool)
with warnings.catch_warnings():
warnings.simplefilter("error", DataConversionWarning)
OPTICS(metric=pairwise_metric).fit(X)
def test_warn_if_metric_bool_data_no_bool():
# make sure a *single* conversion warning is raised if metric is boolean
# but data isn't
# non-regression test for
# https://github.com/scikit-learn/scikit-learn/issues/18996
pairwise_metric = "rogerstanimoto"
X = np.random.randint(2, size=(5, 2), dtype=np.int32)
msg = f"Data will be converted to boolean for metric {pairwise_metric}"
with pytest.warns(DataConversionWarning, match=msg) as warn_record:
# Silence a DeprecationWarning from joblib <= 1.5.1 in Python 3.14+.
warnings.filterwarnings(
"ignore",
message="'asyncio.iscoroutinefunction' is deprecated",
category=DeprecationWarning,
)
OPTICS(metric=pairwise_metric).fit(X)
assert len(warn_record) == 1
def test_nowarn_if_metric_no_bool():
# make sure no conversion warning is raised if
# metric isn't boolean, no matter what the data type is
pairwise_metric = "minkowski"
X_bool = np.random.randint(2, size=(5, 2), dtype=bool)
X_num = np.random.randint(2, size=(5, 2), dtype=np.int32)
with warnings.catch_warnings():
warnings.simplefilter("error", DataConversionWarning)
# fit boolean data
OPTICS(metric=pairwise_metric).fit(X_bool)
# fit numeric data
OPTICS(metric=pairwise_metric).fit(X_num)
def test_close_extract():
# Test extract where extraction eps is close to scaled max_eps
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(
n_samples=750, centers=centers, cluster_std=0.4, random_state=0
)
# Compute OPTICS
clust = OPTICS(max_eps=1.0, cluster_method="dbscan", eps=0.3, min_samples=10).fit(X)
# Cluster ordering starts at 0; max cluster label = 2 is 3 clusters
assert max(clust.labels_) == 2
@pytest.mark.parametrize("eps", [0.1, 0.3, 0.5])
@pytest.mark.parametrize("min_samples", [3, 10, 20])
@pytest.mark.parametrize(
"csr_container, metric",
[(None, "minkowski"), (None, "euclidean")]
+ [(container, "euclidean") for container in CSR_CONTAINERS],
)
def test_dbscan_optics_parity(eps, min_samples, metric, global_dtype, csr_container):
# Test that OPTICS clustering labels are <= 5% difference of DBSCAN
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(
n_samples=150, centers=centers, cluster_std=0.4, random_state=0
)
X = csr_container(X) if csr_container is not None else X
X = X.astype(global_dtype, copy=False)
# calculate optics with dbscan extract at 0.3 epsilon
op = OPTICS(
min_samples=min_samples, cluster_method="dbscan", eps=eps, metric=metric
).fit(X)
# calculate dbscan labels
db = DBSCAN(eps=eps, min_samples=min_samples).fit(X)
contingency = contingency_matrix(db.labels_, op.labels_)
agree = min(
np.sum(np.max(contingency, axis=0)), np.sum(np.max(contingency, axis=1))
)
disagree = X.shape[0] - agree
percent_mismatch = np.round((disagree - 1) / X.shape[0], 2)
# verify label mismatch is <= 5% labels
assert percent_mismatch <= 0.05
def test_min_samples_edge_case(global_dtype):
C1 = [[0, 0], [0, 0.1], [0, -0.1]]
C2 = [[10, 10], [10, 9], [10, 11]]
C3 = [[100, 100], [100, 96], [100, 106]]
X = np.vstack((C1, C2, C3)).astype(global_dtype, copy=False)
expected_labels = np.r_[[0] * 3, [1] * 3, [2] * 3]
clust = OPTICS(min_samples=3, max_eps=7, cluster_method="xi", xi=0.04).fit(X)
assert_array_equal(clust.labels_, expected_labels)
expected_labels = np.r_[[0] * 3, [1] * 3, [-1] * 3]
clust = OPTICS(min_samples=3, max_eps=3, cluster_method="xi", xi=0.04).fit(X)
assert_array_equal(clust.labels_, expected_labels)
expected_labels = np.r_[[-1] * 9]
with pytest.warns(UserWarning, match="All reachability values"):
clust = OPTICS(min_samples=4, max_eps=3, cluster_method="xi", xi=0.04).fit(X)
assert_array_equal(clust.labels_, expected_labels)
# try arbitrary minimum sizes
@pytest.mark.parametrize("min_cluster_size", range(2, X.shape[0] // 10, 23))
def test_min_cluster_size(min_cluster_size, global_dtype):
redX = X[::2].astype(global_dtype, copy=False) # reduce for speed
clust = OPTICS(min_samples=9, min_cluster_size=min_cluster_size).fit(redX)
cluster_sizes = np.bincount(clust.labels_[clust.labels_ != -1])
if cluster_sizes.size:
assert min(cluster_sizes) >= min_cluster_size
# check behaviour is the same when min_cluster_size is a fraction
clust_frac = OPTICS(
min_samples=9,
min_cluster_size=min_cluster_size / redX.shape[0],
)
clust_frac.fit(redX)
assert_array_equal(clust.labels_, clust_frac.labels_)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_min_cluster_size_invalid2(csr_container):
clust = OPTICS(min_cluster_size=len(X) + 1)
with pytest.raises(ValueError, match="must be no greater than the "):
clust.fit(X)
clust = OPTICS(min_cluster_size=len(X) + 1, metric="euclidean")
with pytest.raises(ValueError, match="must be no greater than the "):
clust.fit(csr_container(X))
def test_processing_order():
# Ensure that we consider all unprocessed points,
# not only direct neighbors. when picking the next point.
Y = [[0], [10], [-10], [25]]
clust = OPTICS(min_samples=3, max_eps=15).fit(Y)
assert_array_equal(clust.reachability_, [np.inf, 10, 10, 15])
assert_array_equal(clust.core_distances_, [10, 15, np.inf, np.inf])
assert_array_equal(clust.ordering_, [0, 1, 2, 3])
def test_compare_to_ELKI():
# Expected values, computed with (future) ELKI 0.7.5 using:
# java -jar elki.jar cli -dbc.in csv -dbc.filter FixedDBIDsFilter
# -algorithm clustering.optics.OPTICSHeap -optics.minpts 5
# where the FixedDBIDsFilter gives 0-indexed ids.
r1 = [
np.inf,
1.0574896366427478,
0.7587934993548423,
0.7290174038973836,
0.7290174038973836,
0.7290174038973836,
0.6861627576116127,
0.7587934993548423,
0.9280118450166668,
1.1748022534146194,
3.3355455741292257,
0.49618389254482587,
0.2552805046961355,
0.2552805046961355,
0.24944622248445714,
0.24944622248445714,
0.24944622248445714,
0.2552805046961355,
0.2552805046961355,
0.3086779122185853,
4.163024452756142,
1.623152630340929,
0.45315840475822655,
0.25468325192031926,
0.2254004358159971,
0.18765711877083036,
0.1821471333893275,
0.1821471333893275,
0.18765711877083036,
0.18765711877083036,
0.2240202988740153,
1.154337614548715,
1.342604473837069,
1.323308536402633,
0.8607514948648837,
0.27219111215810565,
0.13260875220533205,
0.13260875220533205,
0.09890587675958984,
0.09890587675958984,
0.13548790801634494,
0.1575483940837384,
0.17515137170530226,
0.17575920159442388,
0.27219111215810565,
0.6101447895405373,
1.3189208094864302,
1.323308536402633,
2.2509184159764577,
2.4517810628594527,
3.675977064404973,
3.8264795626020365,
2.9130735341510614,
2.9130735341510614,
2.9130735341510614,
2.9130735341510614,
2.8459300127258036,
2.8459300127258036,
2.8459300127258036,
3.0321982337972537,
]
o1 = [
0,
3,
6,
4,
7,
8,
2,
9,
5,
1,
31,
30,
32,
34,
33,
38,
39,
35,
37,
36,
44,
21,
23,
24,
22,
25,
27,
29,
26,
28,
20,
40,
45,
46,
10,
15,
11,
13,
17,
19,
18,
12,
16,
14,
47,
49,
43,
48,
42,
41,
53,
57,
51,
52,
56,
59,
54,
55,
58,
50,
]
p1 = [
-1,
0,
3,
6,
6,
6,
8,
3,
7,
5,
1,
31,
30,
30,
34,
34,
34,
32,
32,
37,
36,
44,
21,
23,
24,
22,
25,
25,
22,
22,
22,
21,
40,
45,
46,
10,
15,
15,
13,
13,
15,
11,
19,
15,
10,
47,
12,
45,
14,
43,
42,
53,
57,
57,
57,
57,
59,
59,
59,
58,
]
# Tests against known extraction array
# Does NOT work with metric='euclidean', because sklearn euclidean has
# worse numeric precision. 'minkowski' is slower but more accurate.
clust1 = OPTICS(min_samples=5).fit(X)
assert_array_equal(clust1.ordering_, np.array(o1))
assert_array_equal(clust1.predecessor_[clust1.ordering_], np.array(p1))
assert_allclose(clust1.reachability_[clust1.ordering_], np.array(r1))
# ELKI currently does not print the core distances (which are not used much
# in literature, but we can at least ensure to have this consistency:
for i in clust1.ordering_[1:]:
assert clust1.reachability_[i] >= clust1.core_distances_[clust1.predecessor_[i]]
# Expected values, computed with (future) ELKI 0.7.5 using
r2 = [
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
0.27219111215810565,
0.13260875220533205,
0.13260875220533205,
0.09890587675958984,
0.09890587675958984,
0.13548790801634494,
0.1575483940837384,
0.17515137170530226,
0.17575920159442388,
0.27219111215810565,
0.4928068613197889,
np.inf,
0.2666183922512113,
0.18765711877083036,
0.1821471333893275,
0.1821471333893275,
0.1821471333893275,
0.18715928772277457,
0.18765711877083036,
0.18765711877083036,
0.25468325192031926,
np.inf,
0.2552805046961355,
0.2552805046961355,
0.24944622248445714,
0.24944622248445714,
0.24944622248445714,
0.2552805046961355,
0.2552805046961355,
0.3086779122185853,
0.34466409325984865,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
np.inf,
]
o2 = [
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
15,
11,
13,
17,
19,
18,
12,
16,
14,
47,
46,
20,
22,
25,
23,
27,
29,
24,
26,
28,
21,
30,
32,
34,
33,
38,
39,
35,
37,
36,
31,
40,
41,
42,
43,
44,
45,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
]
p2 = [
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
10,
15,
15,
13,
13,
15,
11,
19,
15,
10,
47,
-1,
20,
22,
25,
25,
25,
25,
22,
22,
23,
-1,
30,
30,
34,
34,
34,
32,
32,
37,
38,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
]
clust2 = OPTICS(min_samples=5, max_eps=0.5).fit(X)
assert_array_equal(clust2.ordering_, np.array(o2))
assert_array_equal(clust2.predecessor_[clust2.ordering_], np.array(p2))
assert_allclose(clust2.reachability_[clust2.ordering_], np.array(r2))
index = np.where(clust1.core_distances_ <= 0.5)[0]
assert_allclose(clust1.core_distances_[index], clust2.core_distances_[index])
def test_extract_dbscan(global_dtype, global_random_seed):
# testing an easy dbscan case. Not including clusters with different
# densities.
rng = np.random.RandomState(global_random_seed)
n_points_per_cluster = 20
C1 = [-5, -2] + 0.2 * rng.randn(n_points_per_cluster, 2)
C2 = [4, -1] + 0.2 * rng.randn(n_points_per_cluster, 2)
C3 = [1, 2] + 0.2 * rng.randn(n_points_per_cluster, 2)
C4 = [-2, 3] + 0.2 * rng.randn(n_points_per_cluster, 2)
X = np.vstack((C1, C2, C3, C4)).astype(global_dtype, copy=False)
clust = OPTICS(cluster_method="dbscan", eps=0.5).fit(X)
assert_array_equal(
np.sort(np.unique(clust.labels_[clust.labels_ != -1])), [0, 1, 2, 3]
)
@pytest.mark.parametrize("csr_container", [None] + CSR_CONTAINERS)
def test_precomputed_dists(global_dtype, csr_container):
redX = X[::2].astype(global_dtype, copy=False)
dists = pairwise_distances(redX, metric="euclidean")
dists = csr_container(dists) if csr_container is not None else dists
with warnings.catch_warnings():
warnings.simplefilter("ignore", EfficiencyWarning)
clust1 = OPTICS(min_samples=10, algorithm="brute", metric="precomputed").fit(
dists
)
clust2 = OPTICS(min_samples=10, algorithm="brute", metric="euclidean").fit(redX)
assert_allclose(clust1.reachability_, clust2.reachability_)
assert_array_equal(clust1.labels_, clust2.labels_)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_optics_input_not_modified_precomputed_sparse_nodiag(
csr_container, global_random_seed
):
"""Check that we don't modify in-place the pre-computed sparse matrix.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27508
"""
X = np.random.RandomState(global_random_seed).rand(6, 6)
# Add zeros on the diagonal that will be implicit when creating
# the sparse matrix. If `X` is modified in-place, the zeros from
# the diagonal will be made explicit.
np.fill_diagonal(X, 0)
X = csr_container(X)
assert all(row != col for row, col in zip(*X.nonzero()))
X_copy = X.copy()
OPTICS(metric="precomputed").fit(X)
# Make sure that we did not modify `X` in-place even by creating
# explicit 0s values.
assert X.nnz == X_copy.nnz
assert_array_equal(X.toarray(), X_copy.toarray())
def test_optics_predecessor_correction_ordering():
"""Check that cluster correction using predecessor is working as expected.
In the following example, the predecessor correction was not working properly
since it was not using the right indices.
This non-regression test check that reordering the data does not change the results.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/26324
"""
X_1 = np.array([1, 2, 3, 1, 8, 8, 7, 100]).reshape(-1, 1)
reorder = [0, 1, 2, 4, 5, 6, 7, 3]
X_2 = X_1[reorder]
optics_1 = OPTICS(min_samples=3, metric="euclidean").fit(X_1)
optics_2 = OPTICS(min_samples=3, metric="euclidean").fit(X_2)
assert_array_equal(optics_1.labels_[reorder], optics_2.labels_)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/cluster/tests/test_bisect_k_means.py | sklearn/cluster/tests/test_bisect_k_means.py | import numpy as np
import pytest
from sklearn.cluster import BisectingKMeans
from sklearn.metrics import v_measure_score
from sklearn.utils._testing import assert_allclose, assert_array_equal
from sklearn.utils.fixes import CSR_CONTAINERS
@pytest.mark.parametrize("bisecting_strategy", ["biggest_inertia", "largest_cluster"])
@pytest.mark.parametrize("init", ["k-means++", "random"])
def test_three_clusters(bisecting_strategy, init):
"""Tries to perform bisect k-means for three clusters to check
if splitting data is performed correctly.
"""
X = np.array(
[[1, 1], [10, 1], [3, 1], [10, 0], [2, 1], [10, 2], [10, 8], [10, 9], [10, 10]]
)
bisect_means = BisectingKMeans(
n_clusters=3,
random_state=0,
bisecting_strategy=bisecting_strategy,
init=init,
)
bisect_means.fit(X)
expected_centers = [[2, 1], [10, 1], [10, 9]]
expected_labels = [0, 1, 0, 1, 0, 1, 2, 2, 2]
assert_allclose(
sorted(expected_centers), sorted(bisect_means.cluster_centers_.tolist())
)
assert_allclose(v_measure_score(expected_labels, bisect_means.labels_), 1.0)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse(csr_container):
"""Test Bisecting K-Means with sparse data.
Checks if labels and centers are the same between dense and sparse.
"""
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
X[X < 0.8] = 0
X_csr = csr_container(X)
bisect_means = BisectingKMeans(n_clusters=3, random_state=0)
bisect_means.fit(X_csr)
sparse_centers = bisect_means.cluster_centers_
bisect_means.fit(X)
normal_centers = bisect_means.cluster_centers_
# Check if results is the same for dense and sparse data
assert_allclose(normal_centers, sparse_centers, atol=1e-8)
@pytest.mark.parametrize("n_clusters", [4, 5])
def test_n_clusters(n_clusters):
"""Test if resulting labels are in range [0, n_clusters - 1]."""
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
bisect_means = BisectingKMeans(n_clusters=n_clusters, random_state=0)
bisect_means.fit(X)
assert_array_equal(np.unique(bisect_means.labels_), np.arange(n_clusters))
def test_one_cluster():
"""Test single cluster."""
X = np.array([[1, 2], [10, 2], [10, 8]])
bisect_means = BisectingKMeans(n_clusters=1, random_state=0).fit(X)
# All labels from fit or predict should be equal 0
assert all(bisect_means.labels_ == 0)
assert all(bisect_means.predict(X) == 0)
assert_allclose(bisect_means.cluster_centers_, X.mean(axis=0).reshape(1, -1))
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS + [None])
def test_fit_predict(csr_container):
"""Check if labels from fit(X) method are same as from fit(X).predict(X)."""
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
if csr_container is not None:
X[X < 0.8] = 0
X = csr_container(X)
bisect_means = BisectingKMeans(n_clusters=3, random_state=0)
bisect_means.fit(X)
assert_array_equal(bisect_means.labels_, bisect_means.predict(X))
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS + [None])
def test_dtype_preserved(csr_container, global_dtype):
"""Check that centers dtype is the same as input data dtype."""
rng = np.random.RandomState(0)
X = rng.rand(10, 2).astype(global_dtype, copy=False)
if csr_container is not None:
X[X < 0.8] = 0
X = csr_container(X)
km = BisectingKMeans(n_clusters=3, random_state=0)
km.fit(X)
assert km.cluster_centers_.dtype == global_dtype
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS + [None])
def test_float32_float64_equivalence(csr_container):
"""Check that the results are the same between float32 and float64."""
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
if csr_container is not None:
X[X < 0.8] = 0
X = csr_container(X)
km64 = BisectingKMeans(n_clusters=3, random_state=0).fit(X)
km32 = BisectingKMeans(n_clusters=3, random_state=0).fit(X.astype(np.float32))
assert_allclose(km32.cluster_centers_, km64.cluster_centers_)
assert_array_equal(km32.labels_, km64.labels_)
@pytest.mark.parametrize("algorithm", ("lloyd", "elkan"))
def test_no_crash_on_empty_bisections(algorithm):
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/27081
rng = np.random.RandomState(0)
X_train = rng.rand(3000, 10)
bkm = BisectingKMeans(n_clusters=10, algorithm=algorithm).fit(X_train)
# predict on scaled data to trigger pathologic case
# where the inner mask leads to empty bisections.
X_test = 50 * rng.rand(100, 10)
labels = bkm.predict(X_test) # should not crash with idiv by 0
assert np.isin(np.unique(labels), np.arange(10)).all()
def test_one_feature():
# Check that no error is raised when there is only one feature
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/27236
X = np.random.normal(size=(128, 1))
BisectingKMeans(bisecting_strategy="biggest_inertia", random_state=0).fit(X)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/_base.py | sklearn/decomposition/_base.py | """Principal Component Analysis Base Classes"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
)
from sklearn.utils._array_api import _add_to_diagonal, device, get_namespace
from sklearn.utils.validation import check_array, check_is_fitted, validate_data
class _BasePCA(
ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta
):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array of shape=(n_features, n_features)
Estimated covariance of data.
"""
xp, _ = get_namespace(self.components_)
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * xp.sqrt(exp_var[:, np.newaxis])
exp_var_diff = exp_var - self.noise_variance_
exp_var_diff = xp.where(
exp_var > self.noise_variance_,
exp_var_diff,
xp.asarray(0.0, device=device(exp_var), dtype=exp_var.dtype),
)
cov = (components_.T * exp_var_diff) @ components_
_add_to_diagonal(cov, self.noise_variance_, xp)
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
xp, is_array_api_compliant = get_namespace(self.components_)
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return xp.eye(n_features) / self.noise_variance_
if is_array_api_compliant:
linalg_inv = xp.linalg.inv
else:
linalg_inv = linalg.inv
if self.noise_variance_ == 0.0:
return linalg_inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * xp.sqrt(exp_var[:, np.newaxis])
exp_var_diff = exp_var - self.noise_variance_
exp_var_diff = xp.where(
exp_var > self.noise_variance_,
exp_var_diff,
xp.asarray(0.0, device=device(exp_var)),
)
precision = components_ @ components_.T / self.noise_variance_
_add_to_diagonal(precision, 1.0 / exp_var_diff, xp)
precision = components_.T @ linalg_inv(precision) @ components_
precision /= -(self.noise_variance_**2)
_add_to_diagonal(precision, 1.0 / self.noise_variance_, xp)
return precision
@abstractmethod
def fit(self, X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like of shape (n_samples, n_components)
Projection of X in the first principal components, where `n_samples`
is the number of samples and `n_components` is the number of the components.
"""
xp, _ = get_namespace(X, self.components_, self.explained_variance_)
check_is_fitted(self)
X = validate_data(
self,
X,
dtype=[xp.float64, xp.float32],
accept_sparse=("csr", "csc"),
reset=False,
)
return self._transform(X, xp=xp, x_is_centered=False)
def _transform(self, X, xp, x_is_centered=False):
X_transformed = X @ self.components_.T
if not x_is_centered:
# Apply the centering after the projection.
# For dense X this avoids copying or mutating the data passed by
# the caller.
# For sparse X it keeps sparsity and avoids having to wrap X into
# a linear operator.
X_transformed -= xp.reshape(self.mean_, (1, -1)) @ self.components_.T
if self.whiten:
# For some solvers (such as "arpack" and "covariance_eigh"), on
# rank deficient data, some components can have a variance
# arbitrarily close to zero, leading to non-finite results when
# whitening. To avoid this problem we clip the variance below.
scale = xp.sqrt(self.explained_variance_)
min_scale = xp.finfo(scale.dtype).eps
scale[scale < min_scale] = min_scale
X_transformed /= scale
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space.
In other words, return an input `X_original` whose transform would be X.
Parameters
----------
X : array-like of shape (n_samples, n_components)
New data, where `n_samples` is the number of samples
and `n_components` is the number of components.
Returns
-------
X_original : array-like of shape (n_samples, n_features)
Original data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
xp, _ = get_namespace(X, self.components_, self.explained_variance_)
check_is_fitted(self)
X = check_array(X, input_name="X", dtype=[xp.float64, xp.float32])
if self.whiten:
scaled_components = (
xp.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_
)
return X @ scaled_components + self.mean_
else:
return X @ self.components_ + self.mean_
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.components_.shape[0]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/_kernel_pca.py | sklearn/decomposition/_kernel_pca.py | """Kernel Principal Components Analysis."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Integral, Real
import numpy as np
from scipy import linalg
from scipy.linalg import eigh
from scipy.sparse.linalg import eigsh
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
)
from sklearn.exceptions import NotFittedError
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.preprocessing import KernelCenterer
from sklearn.utils._arpack import _init_arpack_v0
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils.extmath import _randomized_eigsh, svd_flip
from sklearn.utils.validation import (
_check_psd_eigenvalues,
check_is_fitted,
validate_data,
)
class KernelPCA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""Kernel Principal component analysis (KPCA).
Non-linear dimensionality reduction through the use of kernels [1]_, see also
:ref:`metrics`.
It uses the :func:`scipy.linalg.eigh` LAPACK implementation of the full SVD
or the :func:`scipy.sparse.linalg.eigsh` ARPACK implementation of the
truncated SVD, depending on the shape of the input data and the number of
components to extract. It can also use a randomized truncated SVD by the
method proposed in [3]_, see `eigen_solver`.
For a usage example and comparison between
Principal Components Analysis (PCA) and its kernelized version (KPCA), see
:ref:`sphx_glr_auto_examples_decomposition_plot_kernel_pca.py`.
For a usage example in denoising images using KPCA, see
:ref:`sphx_glr_auto_examples_applications_plot_digits_denoising.py`.
Read more in the :ref:`User Guide <kernel_PCA>`.
Parameters
----------
n_components : int, default=None
Number of components. If None, all non-zero components are kept.
kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'cosine', 'precomputed'} \
or callable, default='linear'
Kernel used for PCA.
gamma : float, default=None
Kernel coefficient for rbf, poly and sigmoid kernels. Ignored by other
kernels. If ``gamma`` is ``None``, then it is set to ``1/n_features``.
degree : float, default=3
Degree for poly kernels. Ignored by other kernels.
coef0 : float, default=1
Independent term in poly and sigmoid kernels.
Ignored by other kernels.
kernel_params : dict, default=None
Parameters (keyword arguments) and
values for kernel passed as callable object.
Ignored by other kernels.
alpha : float, default=1.0
Hyperparameter of the ridge regression that learns the
inverse transform (when fit_inverse_transform=True).
fit_inverse_transform : bool, default=False
Learn the inverse transform for non-precomputed kernels
(i.e. learn to find the pre-image of a point). This method is based
on [2]_.
eigen_solver : {'auto', 'dense', 'arpack', 'randomized'}, \
default='auto'
Select eigensolver to use. If `n_components` is much
less than the number of training samples, randomized (or arpack to a
smaller extent) may be more efficient than the dense eigensolver.
Randomized SVD is performed according to the method of Halko et al
[3]_.
auto :
the solver is selected by a default policy based on n_samples
(the number of training samples) and `n_components`:
if the number of components to extract is less than 10 (strict) and
the number of samples is more than 200 (strict), the 'arpack'
method is enabled. Otherwise the exact full eigenvalue
decomposition is computed and optionally truncated afterwards
('dense' method).
dense :
run exact full eigenvalue decomposition calling the standard
LAPACK solver via `scipy.linalg.eigh`, and select the components
by postprocessing
arpack :
run SVD truncated to n_components calling ARPACK solver using
`scipy.sparse.linalg.eigsh`. It requires strictly
0 < n_components < n_samples
randomized :
run randomized SVD by the method of Halko et al. [3]_. The current
implementation selects eigenvalues based on their module; therefore
using this method can lead to unexpected results if the kernel is
not positive semi-definite. See also [4]_.
.. versionchanged:: 1.0
`'randomized'` was added.
tol : float, default=0
Convergence tolerance for arpack.
If 0, optimal value will be chosen by arpack.
max_iter : int, default=None
Maximum number of iterations for arpack.
If None, optimal value will be chosen by arpack.
iterated_power : int >= 0, or 'auto', default='auto'
Number of iterations for the power method computed by
svd_solver == 'randomized'. When 'auto', it is set to 7 when
`n_components < 0.1 * min(X.shape)`, other it is set to 4.
.. versionadded:: 1.0
remove_zero_eig : bool, default=False
If True, then all components with zero eigenvalues are removed, so
that the number of components in the output may be < n_components
(and sometimes even zero due to numerical instability).
When n_components is None, this parameter is ignored and components
with zero eigenvalues are removed regardless.
random_state : int, RandomState instance or None, default=None
Used when ``eigen_solver`` == 'arpack' or 'randomized'. Pass an int
for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
.. versionadded:: 0.18
copy_X : bool, default=True
If True, input X is copied and stored by the model in the `X_fit_`
attribute. If no further changes will be done to X, setting
`copy_X=False` saves memory by storing a reference.
.. versionadded:: 0.18
n_jobs : int, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.18
Attributes
----------
eigenvalues_ : ndarray of shape (n_components,)
Eigenvalues of the centered kernel matrix in decreasing order.
If `n_components` and `remove_zero_eig` are not set,
then all values are stored.
eigenvectors_ : ndarray of shape (n_samples, n_components)
Eigenvectors of the centered kernel matrix. If `n_components` and
`remove_zero_eig` are not set, then all components are stored.
dual_coef_ : ndarray of shape (n_samples, n_features)
Inverse transform matrix. Only available when
``fit_inverse_transform`` is True.
X_transformed_fit_ : ndarray of shape (n_samples, n_components)
Projection of the fitted data on the kernel principal components.
Only available when ``fit_inverse_transform`` is True.
X_fit_ : ndarray of shape (n_samples, n_features)
The data used to fit the model. If `copy_X=False`, then `X_fit_` is
a reference. This attribute is used for the calls to transform.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
gamma_ : float
Kernel coefficient for rbf, poly and sigmoid kernels. When `gamma`
is explicitly provided, this is just the same as `gamma`. When `gamma`
is `None`, this is the actual value of kernel coefficient.
.. versionadded:: 1.3
See Also
--------
FastICA : A fast algorithm for Independent Component Analysis.
IncrementalPCA : Incremental Principal Component Analysis.
NMF : Non-Negative Matrix Factorization.
PCA : Principal Component Analysis.
SparsePCA : Sparse Principal Component Analysis.
TruncatedSVD : Dimensionality reduction using truncated SVD.
References
----------
.. [1] `Schölkopf, Bernhard, Alexander Smola, and Klaus-Robert Müller.
"Kernel principal component analysis."
International conference on artificial neural networks.
Springer, Berlin, Heidelberg, 1997.
<https://graphics.stanford.edu/courses/cs233-25-spring/ReferencedPapers/scholkopf_kernel.pdf>`_
.. [2] `Bakır, Gökhan H., Jason Weston, and Bernhard Schölkopf.
"Learning to find pre-images."
Advances in neural information processing systems 16 (2004): 449-456.
<https://papers.nips.cc/paper/2003/file/ac1ad983e08ad3304a97e147f522747e-Paper.pdf>`_
.. [3] :arxiv:`Halko, Nathan, Per-Gunnar Martinsson, and Joel A. Tropp.
"Finding structure with randomness: Probabilistic algorithms for
constructing approximate matrix decompositions."
SIAM review 53.2 (2011): 217-288. <0909.4061>`
.. [4] `Martinsson, Per-Gunnar, Vladimir Rokhlin, and Mark Tygert.
"A randomized algorithm for the decomposition of matrices."
Applied and Computational Harmonic Analysis 30.1 (2011): 47-68.
<https://www.sciencedirect.com/science/article/pii/S1063520310000242>`_
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.decomposition import KernelPCA
>>> X, _ = load_digits(return_X_y=True)
>>> transformer = KernelPCA(n_components=7, kernel='linear')
>>> X_transformed = transformer.fit_transform(X)
>>> X_transformed.shape
(1797, 7)
"""
_parameter_constraints: dict = {
"n_components": [
Interval(Integral, 1, None, closed="left"),
None,
],
"kernel": [
StrOptions({"linear", "poly", "rbf", "sigmoid", "cosine", "precomputed"}),
callable,
],
"gamma": [
Interval(Real, 0, None, closed="left"),
None,
],
"degree": [Interval(Real, 0, None, closed="left")],
"coef0": [Interval(Real, None, None, closed="neither")],
"kernel_params": [dict, None],
"alpha": [Interval(Real, 0, None, closed="left")],
"fit_inverse_transform": ["boolean"],
"eigen_solver": [StrOptions({"auto", "dense", "arpack", "randomized"})],
"tol": [Interval(Real, 0, None, closed="left")],
"max_iter": [
Interval(Integral, 1, None, closed="left"),
None,
],
"iterated_power": [
Interval(Integral, 0, None, closed="left"),
StrOptions({"auto"}),
],
"remove_zero_eig": ["boolean"],
"random_state": ["random_state"],
"copy_X": ["boolean"],
"n_jobs": [None, Integral],
}
def __init__(
self,
n_components=None,
*,
kernel="linear",
gamma=None,
degree=3,
coef0=1,
kernel_params=None,
alpha=1.0,
fit_inverse_transform=False,
eigen_solver="auto",
tol=0,
max_iter=None,
iterated_power="auto",
remove_zero_eig=False,
random_state=None,
copy_X=True,
n_jobs=None,
):
self.n_components = n_components
self.kernel = kernel
self.kernel_params = kernel_params
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.alpha = alpha
self.fit_inverse_transform = fit_inverse_transform
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.iterated_power = iterated_power
self.remove_zero_eig = remove_zero_eig
self.random_state = random_state
self.n_jobs = n_jobs
self.copy_X = copy_X
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma_, "degree": self.degree, "coef0": self.coef0}
return pairwise_kernels(
X, Y, metric=self.kernel, filter_params=True, n_jobs=self.n_jobs, **params
)
def _fit_transform_in_place(self, K):
"""Fit's using kernel K"""
# center kernel in place
K = self._centerer.fit(K).transform(K, copy=False)
# adjust n_components according to user inputs
if self.n_components is None:
n_components = K.shape[0] # use all dimensions
else:
n_components = min(K.shape[0], self.n_components)
# compute eigenvectors
if self.eigen_solver == "auto":
if K.shape[0] > 200 and n_components < 10:
eigen_solver = "arpack"
else:
eigen_solver = "dense"
else:
eigen_solver = self.eigen_solver
if eigen_solver == "dense":
# Note: subset_by_index specifies the indices of smallest/largest to return
self.eigenvalues_, self.eigenvectors_ = eigh(
K, subset_by_index=(K.shape[0] - n_components, K.shape[0] - 1)
)
elif eigen_solver == "arpack":
v0 = _init_arpack_v0(K.shape[0], self.random_state)
self.eigenvalues_, self.eigenvectors_ = eigsh(
K, n_components, which="LA", tol=self.tol, maxiter=self.max_iter, v0=v0
)
elif eigen_solver == "randomized":
self.eigenvalues_, self.eigenvectors_ = _randomized_eigsh(
K,
n_components=n_components,
n_iter=self.iterated_power,
random_state=self.random_state,
selection="module",
)
# make sure that the eigenvalues are ok and fix numerical issues
self.eigenvalues_ = _check_psd_eigenvalues(
self.eigenvalues_, enable_warnings=False
)
# flip eigenvectors' sign to enforce deterministic output
self.eigenvectors_, _ = svd_flip(u=self.eigenvectors_, v=None)
# sort eigenvectors in descending order
indices = self.eigenvalues_.argsort()[::-1]
self.eigenvalues_ = self.eigenvalues_[indices]
self.eigenvectors_ = self.eigenvectors_[:, indices]
# remove eigenvectors with a zero eigenvalue (null space) if required
if self.remove_zero_eig or self.n_components is None:
self.eigenvectors_ = self.eigenvectors_[:, self.eigenvalues_ > 0]
self.eigenvalues_ = self.eigenvalues_[self.eigenvalues_ > 0]
# Maintenance note on Eigenvectors normalization
# ----------------------------------------------
# there is a link between
# the eigenvectors of K=Phi(X)'Phi(X) and the ones of Phi(X)Phi(X)'
# if v is an eigenvector of K
# then Phi(X)v is an eigenvector of Phi(X)Phi(X)'
# if u is an eigenvector of Phi(X)Phi(X)'
# then Phi(X)'u is an eigenvector of Phi(X)'Phi(X)
#
# At this stage our self.eigenvectors_ (the v) have norm 1, we need to scale
# them so that eigenvectors in kernel feature space (the u) have norm=1
# instead
#
# We COULD scale them here:
# self.eigenvectors_ = self.eigenvectors_ / np.sqrt(self.eigenvalues_)
#
# But choose to perform that LATER when needed, in `fit()` and in
# `transform()`.
return K
def _fit_inverse_transform(self, X_transformed, X):
if hasattr(X, "tocsr"):
raise NotImplementedError(
"Inverse transform not implemented for sparse matrices!"
)
n_samples = X_transformed.shape[0]
K = self._get_kernel(X_transformed)
K.flat[:: n_samples + 1] += self.alpha
self.dual_coef_ = linalg.solve(K, X, assume_a="pos", overwrite_a=True)
self.X_transformed_fit_ = X_transformed
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
if self.fit_inverse_transform and self.kernel == "precomputed":
raise ValueError("Cannot fit_inverse_transform with a precomputed kernel.")
X = validate_data(self, X, accept_sparse="csr", copy=self.copy_X)
self.gamma_ = 1 / X.shape[1] if self.gamma is None else self.gamma
self._centerer = KernelCenterer().set_output(transform="default")
K = self._get_kernel(X)
# When kernel="precomputed", K is X but it's safe to perform in place operations
# on K because a copy was made before if requested by copy_X.
self._fit_transform_in_place(K)
if self.fit_inverse_transform:
# no need to use the kernel to transform X, use shortcut expression
X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_)
self._fit_inverse_transform(X_transformed, X)
self.X_fit_ = X
return self
def fit_transform(self, X, y=None, **params):
"""Fit the model from data in X and transform X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
**params : kwargs
Parameters (keyword arguments) and values passed to
the fit_transform instance.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed values.
"""
self.fit(X, **params)
# no need to use the kernel to transform X, use shortcut expression
X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_)
if self.fit_inverse_transform:
self._fit_inverse_transform(X_transformed, X)
return X_transformed
def transform(self, X):
"""Transform X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Projection of X in the first principal components, where `n_samples`
is the number of samples and `n_components` is the number of the components.
"""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse="csr", reset=False)
# Compute centered gram matrix between X and training data X_fit_
K = self._centerer.transform(self._get_kernel(X, self.X_fit_))
# scale eigenvectors (properly account for null-space for dot product)
non_zeros = np.flatnonzero(self.eigenvalues_)
scaled_alphas = np.zeros_like(self.eigenvectors_)
scaled_alphas[:, non_zeros] = self.eigenvectors_[:, non_zeros] / np.sqrt(
self.eigenvalues_[non_zeros]
)
# Project with a scalar product between K and the scaled eigenvectors
return np.dot(K, scaled_alphas)
def inverse_transform(self, X):
"""Transform X back to original space.
``inverse_transform`` approximates the inverse transformation using
a learned pre-image. The pre-image is learned by kernel ridge
regression of the original data on their low-dimensional representation
vectors.
.. note:
:meth:`~sklearn.decomposition.fit` internally uses a centered
kernel. As the centered kernel no longer contains the information
of the mean of kernel features, such information is not taken into
account in reconstruction.
.. note::
When users want to compute inverse transformation for 'linear'
kernel, it is recommended that they use
:class:`~sklearn.decomposition.PCA` instead. Unlike
:class:`~sklearn.decomposition.PCA`,
:class:`~sklearn.decomposition.KernelPCA`'s ``inverse_transform``
does not reconstruct the mean of data when 'linear' kernel is used
due to the use of centered kernel.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_components)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Original data, where `n_samples` is the number of samples
and `n_features` is the number of features.
References
----------
`Bakır, Gökhan H., Jason Weston, and Bernhard Schölkopf.
"Learning to find pre-images."
Advances in neural information processing systems 16 (2004): 449-456.
<https://papers.nips.cc/paper/2003/file/ac1ad983e08ad3304a97e147f522747e-Paper.pdf>`_
"""
if not self.fit_inverse_transform:
raise NotFittedError(
"The fit_inverse_transform parameter was not"
" set to True when instantiating and hence "
"the inverse transform is not available."
)
K = self._get_kernel(X, self.X_transformed_fit_)
return np.dot(K, self.dual_coef_)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
tags.input_tags.pairwise = self.kernel == "precomputed"
return tags
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.eigenvalues_.shape[0]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/_sparse_pca.py | sklearn/decomposition/_sparse_pca.py | """Matrix factorization with Sparse PCA."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Integral, Real
import numpy as np
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
)
from sklearn.decomposition._dict_learning import (
MiniBatchDictionaryLearning,
dict_learning,
)
from sklearn.linear_model import ridge_regression
from sklearn.utils import check_random_state
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils.extmath import svd_flip
from sklearn.utils.validation import check_array, check_is_fitted, validate_data
class _BaseSparsePCA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""Base class for SparsePCA and MiniBatchSparsePCA"""
_parameter_constraints: dict = {
"n_components": [None, Interval(Integral, 1, None, closed="left")],
"alpha": [Interval(Real, 0.0, None, closed="left")],
"ridge_alpha": [Interval(Real, 0.0, None, closed="left")],
"max_iter": [Interval(Integral, 0, None, closed="left")],
"tol": [Interval(Real, 0.0, None, closed="left")],
"method": [StrOptions({"lars", "cd"})],
"n_jobs": [Integral, None],
"verbose": ["verbose"],
"random_state": ["random_state"],
}
def __init__(
self,
n_components=None,
*,
alpha=1,
ridge_alpha=0.01,
max_iter=1000,
tol=1e-8,
method="lars",
n_jobs=None,
verbose=False,
random_state=None,
):
self.n_components = n_components
self.alpha = alpha
self.ridge_alpha = ridge_alpha
self.max_iter = max_iter
self.tol = tol
self.method = method
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = validate_data(self, X)
self.mean_ = X.mean(axis=0)
X = X - self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
return self._fit(X, n_components, random_state)
def transform(self, X):
"""Least Squares projection of the data onto the sparse components.
To avoid instability issues in case the system is under-determined,
regularization can be applied (Ridge regression) via the
`ridge_alpha` parameter.
Note that Sparse PCA components orthogonality is not enforced as in PCA
hence one cannot use a simple linear projection.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
X = X - self.mean_
U = ridge_regression(
self.components_.T, X.T, self.ridge_alpha, solver="cholesky"
)
return U
def inverse_transform(self, X):
"""Transform data from the latent space to the original space.
This inversion is an approximation due to the loss of information
induced by the forward decomposition.
.. versionadded:: 1.2
Parameters
----------
X : ndarray of shape (n_samples, n_components)
Data in the latent space.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Reconstructed data in the original space.
"""
check_is_fitted(self)
X = check_array(X)
return (X @ self.components_) + self.mean_
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.components_.shape[0]
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
return tags
class SparsePCA(_BaseSparsePCA):
"""Sparse Principal Components Analysis (SparsePCA).
Finds the set of sparse components that can optimally reconstruct
the data. The amount of sparseness is controllable by the coefficient
of the L1 penalty, given by the parameter alpha.
Read more in the :ref:`User Guide <SparsePCA>`.
Parameters
----------
n_components : int, default=None
Number of sparse atoms to extract. If None, then ``n_components``
is set to ``n_features``.
alpha : float, default=1
Sparsity controlling parameter. Higher values lead to sparser
components.
ridge_alpha : float, default=0.01
Amount of ridge shrinkage to apply in order to improve
conditioning when calling the transform method.
max_iter : int, default=1000
Maximum number of iterations to perform.
tol : float, default=1e-8
Tolerance for the stopping condition.
method : {'lars', 'cd'}, default='lars'
Method to be used for optimization.
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
U_init : ndarray of shape (n_samples, n_components), default=None
Initial values for the loadings for warm restart scenarios. Only used
if `U_init` and `V_init` are not None.
V_init : ndarray of shape (n_components, n_features), default=None
Initial values for the components for warm restart scenarios. Only used
if `U_init` and `V_init` are not None.
verbose : int or bool, default=False
Controls the verbosity; the higher, the more messages. Defaults to 0.
random_state : int, RandomState instance or None, default=None
Used during dictionary learning. Pass an int for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Sparse components extracted from the data.
error_ : ndarray
Vector of errors at each iteration.
n_components_ : int
Estimated number of components.
.. versionadded:: 0.23
n_iter_ : int
Number of iterations run.
mean_ : ndarray of shape (n_features,)
Per-feature empirical mean, estimated from the training set.
Equal to ``X.mean(axis=0)``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
PCA : Principal Component Analysis implementation.
MiniBatchSparsePCA : Mini batch variant of `SparsePCA` that is faster but less
accurate.
DictionaryLearning : Generic dictionary learning problem using a sparse code.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.decomposition import SparsePCA
>>> X, _ = make_friedman1(n_samples=200, n_features=30, random_state=0)
>>> transformer = SparsePCA(n_components=5, random_state=0)
>>> transformer.fit(X)
SparsePCA(...)
>>> X_transformed = transformer.transform(X)
>>> X_transformed.shape
(200, 5)
>>> # most values in the components_ are zero (sparsity)
>>> np.mean(transformer.components_ == 0)
np.float64(0.9666)
"""
_parameter_constraints: dict = {
**_BaseSparsePCA._parameter_constraints,
"U_init": [None, np.ndarray],
"V_init": [None, np.ndarray],
}
def __init__(
self,
n_components=None,
*,
alpha=1,
ridge_alpha=0.01,
max_iter=1000,
tol=1e-8,
method="lars",
n_jobs=None,
U_init=None,
V_init=None,
verbose=False,
random_state=None,
):
super().__init__(
n_components=n_components,
alpha=alpha,
ridge_alpha=ridge_alpha,
max_iter=max_iter,
tol=tol,
method=method,
n_jobs=n_jobs,
verbose=verbose,
random_state=random_state,
)
self.U_init = U_init
self.V_init = V_init
def _fit(self, X, n_components, random_state):
"""Specialized `fit` for SparsePCA."""
code_init = self.V_init.T if self.V_init is not None else None
dict_init = self.U_init.T if self.U_init is not None else None
code, dictionary, E, self.n_iter_ = dict_learning(
X.T,
n_components,
alpha=self.alpha,
tol=self.tol,
max_iter=self.max_iter,
method=self.method,
n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=random_state,
code_init=code_init,
dict_init=dict_init,
return_n_iter=True,
)
# flip eigenvectors' sign to enforce deterministic output
code, dictionary = svd_flip(code, dictionary, u_based_decision=True)
self.components_ = code.T
components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis]
components_norm[components_norm == 0] = 1
self.components_ /= components_norm
self.n_components_ = len(self.components_)
self.error_ = E
return self
class MiniBatchSparsePCA(_BaseSparsePCA):
"""Mini-batch Sparse Principal Components Analysis.
Finds the set of sparse components that can optimally reconstruct
the data. The amount of sparseness is controllable by the coefficient
of the L1 penalty, given by the parameter alpha.
For an example comparing sparse PCA to PCA, see
:ref:`sphx_glr_auto_examples_decomposition_plot_faces_decomposition.py`
Read more in the :ref:`User Guide <SparsePCA>`.
Parameters
----------
n_components : int, default=None
Number of sparse atoms to extract. If None, then ``n_components``
is set to ``n_features``.
alpha : int, default=1
Sparsity controlling parameter. Higher values lead to sparser
components.
ridge_alpha : float, default=0.01
Amount of ridge shrinkage to apply in order to improve
conditioning when calling the transform method.
max_iter : int, default=1_000
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
.. versionadded:: 1.2
callback : callable, default=None
Callable that gets invoked every five iterations.
batch_size : int, default=3
The number of features to take in each mini batch.
verbose : int or bool, default=False
Controls the verbosity; the higher, the more messages. Defaults to 0.
shuffle : bool, default=True
Whether to shuffle the data before splitting it in batches.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
method : {'lars', 'cd'}, default='lars'
Method to be used for optimization.
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
random_state : int, RandomState instance or None, default=None
Used for random shuffling when ``shuffle`` is set to ``True``,
during online dictionary learning. Pass an int for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
tol : float, default=1e-3
Control early stopping based on the norm of the differences in the
dictionary between 2 steps.
To disable early stopping based on changes in the dictionary, set
`tol` to 0.0.
.. versionadded:: 1.1
max_no_improvement : int or None, default=10
Control early stopping based on the consecutive number of mini batches
that does not yield an improvement on the smoothed cost function.
To disable convergence detection based on cost function, set
`max_no_improvement` to `None`.
.. versionadded:: 1.1
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Sparse components extracted from the data.
n_components_ : int
Estimated number of components.
.. versionadded:: 0.23
n_iter_ : int
Number of iterations run.
mean_ : ndarray of shape (n_features,)
Per-feature empirical mean, estimated from the training set.
Equal to ``X.mean(axis=0)``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
DictionaryLearning : Find a dictionary that sparsely encodes data.
IncrementalPCA : Incremental principal components analysis.
PCA : Principal component analysis.
SparsePCA : Sparse Principal Components Analysis.
TruncatedSVD : Dimensionality reduction using truncated SVD.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.decomposition import MiniBatchSparsePCA
>>> X, _ = make_friedman1(n_samples=200, n_features=30, random_state=0)
>>> transformer = MiniBatchSparsePCA(n_components=5, batch_size=50,
... max_iter=10, random_state=0)
>>> transformer.fit(X)
MiniBatchSparsePCA(...)
>>> X_transformed = transformer.transform(X)
>>> X_transformed.shape
(200, 5)
>>> # most values in the components_ are zero (sparsity)
>>> np.mean(transformer.components_ == 0)
np.float64(0.9)
"""
_parameter_constraints: dict = {
**_BaseSparsePCA._parameter_constraints,
"max_iter": [Interval(Integral, 0, None, closed="left")],
"callback": [None, callable],
"batch_size": [Interval(Integral, 1, None, closed="left")],
"shuffle": ["boolean"],
"max_no_improvement": [Interval(Integral, 0, None, closed="left"), None],
}
def __init__(
self,
n_components=None,
*,
alpha=1,
ridge_alpha=0.01,
max_iter=1_000,
callback=None,
batch_size=3,
verbose=False,
shuffle=True,
n_jobs=None,
method="lars",
random_state=None,
tol=1e-3,
max_no_improvement=10,
):
super().__init__(
n_components=n_components,
alpha=alpha,
ridge_alpha=ridge_alpha,
max_iter=max_iter,
tol=tol,
method=method,
n_jobs=n_jobs,
verbose=verbose,
random_state=random_state,
)
self.callback = callback
self.batch_size = batch_size
self.shuffle = shuffle
self.max_no_improvement = max_no_improvement
def _fit(self, X, n_components, random_state):
"""Specialized `fit` for MiniBatchSparsePCA."""
transform_algorithm = "lasso_" + self.method
est = MiniBatchDictionaryLearning(
n_components=n_components,
alpha=self.alpha,
max_iter=self.max_iter,
dict_init=None,
batch_size=self.batch_size,
shuffle=self.shuffle,
n_jobs=self.n_jobs,
fit_algorithm=self.method,
random_state=random_state,
transform_algorithm=transform_algorithm,
transform_alpha=self.alpha,
verbose=self.verbose,
callback=self.callback,
tol=self.tol,
max_no_improvement=self.max_no_improvement,
)
est.set_output(transform="default")
est.fit(X.T)
self.components_, self.n_iter_ = est.transform(X.T).T, est.n_iter_
components_norm = np.linalg.norm(self.components_, axis=1)[:, np.newaxis]
components_norm[components_norm == 0] = 1
self.components_ /= components_norm
self.n_components_ = len(self.components_)
return self
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/_incremental_pca.py | sklearn/decomposition/_incremental_pca.py | """Incremental Principal Components Analysis."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Integral
import numpy as np
from scipy import linalg, sparse
from sklearn.base import _fit_context
from sklearn.decomposition._base import _BasePCA
from sklearn.utils import gen_batches, metadata_routing
from sklearn.utils._param_validation import Interval
from sklearn.utils.extmath import _incremental_mean_and_var, svd_flip
from sklearn.utils.validation import validate_data
class IncrementalPCA(_BasePCA):
"""Incremental principal components analysis (IPCA).
Linear dimensionality reduction using Singular Value Decomposition of
the data, keeping only the most significant singular vectors to
project the data to a lower dimensional space. The input data is centered
but not scaled for each feature before applying the SVD.
Depending on the size of the input data, this algorithm can be much more
memory efficient than a PCA, and allows sparse input.
This algorithm has constant memory complexity, on the order
of ``batch_size * n_features``, enabling use of np.memmap files without
loading the entire file into memory. For sparse matrices, the input
is converted to dense in batches (in order to be able to subtract the
mean) which avoids storing the entire dense matrix at any one time.
The computational overhead of each SVD is
``O(batch_size * n_features ** 2)``, but only 2 * batch_size samples
remain in memory at a time. There will be ``n_samples / batch_size`` SVD
computations to get the principal components, versus 1 large SVD of
complexity ``O(n_samples * n_features ** 2)`` for PCA.
For a usage example, see
:ref:`sphx_glr_auto_examples_decomposition_plot_incremental_pca.py`.
Read more in the :ref:`User Guide <IncrementalPCA>`.
.. versionadded:: 0.16
Parameters
----------
n_components : int, default=None
Number of components to keep. If ``n_components`` is ``None``,
then ``n_components`` is set to ``min(n_samples, n_features)``.
whiten : bool, default=False
When True (False by default) the ``components_`` vectors are divided
by ``n_samples`` times ``components_`` to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometimes
improve the predictive accuracy of the downstream estimators by
making data respect some hard-wired assumptions.
copy : bool, default=True
If False, X will be overwritten. ``copy=False`` can be used to
save memory but is unsafe for general use.
batch_size : int, default=None
The number of samples to use for each batch. Only used when calling
``fit``. If ``batch_size`` is ``None``, then ``batch_size``
is inferred from the data and set to ``5 * n_features``, to provide a
balance between approximation accuracy and memory consumption.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Principal axes in feature space, representing the directions of
maximum variance in the data. Equivalently, the right singular
vectors of the centered input data, parallel to its eigenvectors.
The components are sorted by decreasing ``explained_variance_``.
explained_variance_ : ndarray of shape (n_components,)
Variance explained by each of the selected components.
explained_variance_ratio_ : ndarray of shape (n_components,)
Percentage of variance explained by each of the selected components.
If all components are stored, the sum of explained variances is equal
to 1.0.
singular_values_ : ndarray of shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
mean_ : ndarray of shape (n_features,)
Per-feature empirical mean, aggregate over calls to ``partial_fit``.
var_ : ndarray of shape (n_features,)
Per-feature empirical variance, aggregate over calls to
``partial_fit``.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf.
n_components_ : int
The estimated number of components. Relevant when
``n_components=None``.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
batch_size_ : int
Inferred batch size from ``batch_size``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
PCA : Principal component analysis (PCA).
KernelPCA : Kernel Principal component analysis (KPCA).
SparsePCA : Sparse Principal Components Analysis (SparsePCA).
TruncatedSVD : Dimensionality reduction using truncated SVD.
Notes
-----
Implements the incremental PCA model from Ross et al. (2008) [1]_.
This model is an extension of the Sequential Karhunen-Loeve Transform
from Levy and Lindenbaum (2000) [2]_.
We have specifically abstained from an optimization used by authors of both
papers, a QR decomposition used in specific situations to reduce the
algorithmic complexity of the SVD. The source for this technique is
*Matrix Computations* (Golub and Van Loan 1997 [3]_).
This technique has been omitted because it is
advantageous only when decomposing a matrix with ``n_samples`` (rows)
>= 5/3 * ``n_features`` (columns), and hurts the readability of the
implemented algorithm. This would be a good opportunity for future
optimization, if it is deemed necessary.
References
----------
.. [1] D. Ross, J. Lim, R. Lin, M. Yang. Incremental Learning for Robust
Visual Tracking, International Journal of Computer Vision, Volume 77,
Issue 1-3, pp. 125-141, May 2008.
https://www.cs.toronto.edu/~dross/ivt/RossLimLinYang_ijcv.pdf
.. [2] :doi:`A. Levy and M. Lindenbaum, Sequential Karhunen-Loeve
Basis Extraction and its Application to Images,
IEEE Transactions on Image Processing, Volume 9,
Number 8, pp. 1371-1374, August 2000. <10.1109/83.855432>`
.. [3] G. Golub and C. Van Loan. Matrix Computations, Third Edition,
Chapter 5, Section 5.4.4, pp. 252-253, 1997.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.decomposition import IncrementalPCA
>>> from scipy import sparse
>>> X, _ = load_digits(return_X_y=True)
>>> transformer = IncrementalPCA(n_components=7, batch_size=200)
>>> # either partially fit on smaller batches of data
>>> transformer.partial_fit(X[:100, :])
IncrementalPCA(batch_size=200, n_components=7)
>>> # or let the fit function itself divide the data into batches
>>> X_sparse = sparse.csr_matrix(X)
>>> X_transformed = transformer.fit_transform(X_sparse)
>>> X_transformed.shape
(1797, 7)
"""
__metadata_request__partial_fit = {"check_input": metadata_routing.UNUSED}
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 1, None, closed="left"), None],
"whiten": ["boolean"],
"copy": ["boolean"],
"batch_size": [Interval(Integral, 1, None, closed="left"), None],
}
def __init__(self, n_components=None, *, whiten=False, copy=True, batch_size=None):
self.n_components = n_components
self.whiten = whiten
self.copy = copy
self.batch_size = batch_size
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model with X, using minibatches of size batch_size.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
self.components_ = None
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
self.singular_values_ = None
self.explained_variance_ = None
self.explained_variance_ratio_ = None
self.noise_variance_ = None
X = validate_data(
self,
X,
accept_sparse=["csr", "csc", "lil"],
copy=self.copy,
dtype=[np.float64, np.float32],
force_writeable=True,
)
n_samples, n_features = X.shape
if self.batch_size is None:
self.batch_size_ = 5 * n_features
else:
self.batch_size_ = self.batch_size
for batch in gen_batches(
n_samples, self.batch_size_, min_batch_size=self.n_components or 0
):
X_batch = X[batch]
if sparse.issparse(X_batch):
X_batch = X_batch.toarray()
self.partial_fit(X_batch, check_input=False)
return self
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y=None, check_input=True):
"""Incremental fit with X. All of X is processed as a single batch.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
check_input : bool, default=True
Run check_array on X.
Returns
-------
self : object
Returns the instance itself.
"""
first_pass = not hasattr(self, "components_")
if check_input:
if sparse.issparse(X):
raise TypeError(
"IncrementalPCA.partial_fit does not support "
"sparse input. Either convert data to dense "
"or use IncrementalPCA.fit to do so in batches."
)
X = validate_data(
self,
X,
copy=self.copy,
dtype=[np.float64, np.float32],
force_writeable=True,
reset=first_pass,
)
n_samples, n_features = X.shape
if first_pass:
self.components_ = None
if self.n_components is None:
if self.components_ is None:
self.n_components_ = min(n_samples, n_features)
else:
self.n_components_ = self.components_.shape[0]
elif not self.n_components <= n_features:
raise ValueError(
"n_components=%r invalid for n_features=%d, need "
"more rows than columns for IncrementalPCA "
"processing" % (self.n_components, n_features)
)
elif self.n_components > n_samples and first_pass:
raise ValueError(
f"n_components={self.n_components} must be less or equal to "
f"the batch number of samples {n_samples} for the first "
"partial_fit call."
)
else:
self.n_components_ = self.n_components
if (self.components_ is not None) and (
self.components_.shape[0] != self.n_components_
):
raise ValueError(
"Number of input features has changed from %i "
"to %i between calls to partial_fit! Try "
"setting n_components to a fixed value."
% (self.components_.shape[0], self.n_components_)
)
# This is the first partial_fit
if not hasattr(self, "n_samples_seen_"):
self.n_samples_seen_ = 0
self.mean_ = 0.0
self.var_ = 0.0
# Update stats - they are 0 if this is the first step
col_mean, col_var, n_total_samples = _incremental_mean_and_var(
X,
last_mean=self.mean_,
last_variance=self.var_,
last_sample_count=np.repeat(self.n_samples_seen_, X.shape[1]),
)
n_total_samples = n_total_samples[0]
# Whitening
if self.n_samples_seen_ == 0:
# If it is the first step, simply whiten X
X -= col_mean
else:
col_batch_mean = np.mean(X, axis=0)
X -= col_batch_mean
# Build matrix of combined previous basis and new data
mean_correction = np.sqrt(
(self.n_samples_seen_ / n_total_samples) * n_samples
) * (self.mean_ - col_batch_mean)
X = np.vstack(
(
self.singular_values_.reshape((-1, 1)) * self.components_,
X,
mean_correction,
)
)
U, S, Vt = linalg.svd(X, full_matrices=False, check_finite=False)
U, Vt = svd_flip(U, Vt, u_based_decision=False)
explained_variance = S**2 / (n_total_samples - 1)
explained_variance_ratio = S**2 / np.sum(col_var * n_total_samples)
self.n_samples_seen_ = n_total_samples
self.components_ = Vt[: self.n_components_]
self.singular_values_ = S[: self.n_components_]
self.mean_ = col_mean
self.var_ = col_var
self.explained_variance_ = explained_variance[: self.n_components_]
self.explained_variance_ratio_ = explained_variance_ratio[: self.n_components_]
# we already checked `self.n_components <= n_samples` above
if self.n_components_ not in (n_samples, n_features):
self.noise_variance_ = explained_variance[self.n_components_ :].mean()
else:
self.noise_variance_ = 0.0
return self
def transform(self, X):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set, using minibatches of size batch_size if X is
sparse.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Projection of X in the first principal components.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2],
... [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, n_components=2)
>>> ipca.transform(X) # doctest: +SKIP
"""
if sparse.issparse(X):
n_samples = X.shape[0]
output = []
for batch in gen_batches(
n_samples, self.batch_size_, min_batch_size=self.n_components or 0
):
output.append(super().transform(X[batch].toarray()))
return np.vstack(output)
else:
return super().transform(X)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
# Beware that fit accepts sparse data but partial_fit doesn't
tags.input_tags.sparse = True
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/_lda.py | sklearn/decomposition/_lda.py | """
=============================================================
Online Latent Dirichlet Allocation with variational inference
=============================================================
This implementation is modified from Matthew D. Hoffman's onlineldavb code
Link: https://github.com/blei-lab/onlineldavb
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Integral, Real
import numpy as np
import scipy.sparse as sp
from joblib import effective_n_jobs
from scipy.special import gammaln, logsumexp
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
)
from sklearn.decomposition._online_lda_fast import (
_dirichlet_expectation_1d as cy_dirichlet_expectation_1d,
)
from sklearn.decomposition._online_lda_fast import _dirichlet_expectation_2d
from sklearn.decomposition._online_lda_fast import mean_change as cy_mean_change
from sklearn.utils import check_random_state, gen_batches, gen_even_slices
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import check_is_fitted, check_non_negative, validate_data
EPS = np.finfo(float).eps
def _update_doc_distribution(
X,
exp_topic_word_distr,
doc_topic_prior,
max_doc_update_iter,
mean_change_tol,
cal_sstats,
random_state,
):
"""E-step: update document-topic distribution.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
exp_topic_word_distr : ndarray of shape (n_topics, n_features)
Exponential value of expectation of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
doc_topic_prior : float
Prior of document topic distribution `theta`.
max_doc_update_iter : int
Max number of iterations for updating document topic distribution in
the E-step.
mean_change_tol : float
Stopping tolerance for updating document topic distribution in E-step.
cal_sstats : bool
Parameter that indicate to calculate sufficient statistics or not.
Set `cal_sstats` to `True` when we need to run M-step.
random_state : RandomState instance or None
Parameter that indicate how to initialize document topic distribution.
Set `random_state` to None will initialize document topic distribution
to a constant number.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each document.
In the literature, this is `gamma`. we can calculate `E[log(theta)]`
from it.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, this will be None.
"""
is_sparse_x = sp.issparse(X)
n_samples, n_features = X.shape
n_topics = exp_topic_word_distr.shape[0]
if random_state:
doc_topic_distr = random_state.gamma(100.0, 0.01, (n_samples, n_topics)).astype(
X.dtype, copy=False
)
else:
doc_topic_distr = np.ones((n_samples, n_topics), dtype=X.dtype)
# In the literature, this is `exp(E[log(theta)])`
exp_doc_topic = np.exp(_dirichlet_expectation_2d(doc_topic_distr))
# diff on `component_` (only calculate it when `cal_diff` is True)
suff_stats = (
np.zeros(exp_topic_word_distr.shape, dtype=X.dtype) if cal_sstats else None
)
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# These cython functions are called in a nested loop on usually very small arrays
# (length=n_topics). In that case, finding the appropriate signature of the
# fused-typed function can be more costly than its execution, hence the dispatch
# is done outside of the loop.
ctype = "float" if X.dtype == np.float32 else "double"
mean_change = cy_mean_change[ctype]
dirichlet_expectation_1d = cy_dirichlet_expectation_1d[ctype]
eps = np.finfo(X.dtype).eps
for idx_d in range(n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d] : X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d] : X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
doc_topic_d = doc_topic_distr[idx_d, :]
# The next one is a copy, since the inner loop overwrites it.
exp_doc_topic_d = exp_doc_topic[idx_d, :].copy()
exp_topic_word_d = exp_topic_word_distr[:, ids]
# Iterate between `doc_topic_d` and `norm_phi` until convergence
for _ in range(0, max_doc_update_iter):
last_d = doc_topic_d
# The optimal phi_{dwk} is proportional to
# exp(E[log(theta_{dk})]) * exp(E[log(beta_{dw})]).
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + eps
doc_topic_d = exp_doc_topic_d * np.dot(cnts / norm_phi, exp_topic_word_d.T)
# Note: adds doc_topic_prior to doc_topic_d, in-place.
dirichlet_expectation_1d(doc_topic_d, doc_topic_prior, exp_doc_topic_d)
if mean_change(last_d, doc_topic_d) < mean_change_tol:
break
doc_topic_distr[idx_d, :] = doc_topic_d
# Contribution of document d to the expected sufficient
# statistics for the M step.
if cal_sstats:
norm_phi = np.dot(exp_doc_topic_d, exp_topic_word_d) + eps
suff_stats[:, ids] += np.outer(exp_doc_topic_d, cnts / norm_phi)
return (doc_topic_distr, suff_stats)
class LatentDirichletAllocation(
ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
):
"""Latent Dirichlet Allocation with online variational Bayes algorithm.
The implementation is based on [1]_ and [2]_.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <LatentDirichletAllocation>`.
Parameters
----------
n_components : int, default=10
Number of topics.
.. versionchanged:: 0.19
``n_topics`` was renamed to ``n_components``
doc_topic_prior : float, default=None
Prior of document topic distribution `theta`. If the value is None,
defaults to `1 / n_components`.
In [1]_, this is called `alpha`.
topic_word_prior : float, default=None
Prior of topic word distribution `beta`. If the value is None, defaults
to `1 / n_components`.
In [1]_, this is called `eta`.
learning_method : {'batch', 'online'}, default='batch'
Method used to update `_component`. Only used in :meth:`fit` method.
In general, if the data size is large, the online update will be much
faster than the batch update.
Valid options:
- 'batch': Batch variational Bayes method. Use all training data in each EM
update. Old `components_` will be overwritten in each iteration.
- 'online': Online variational Bayes method. In each EM update, use mini-batch
of training data to update the ``components_`` variable incrementally. The
learning rate is controlled by the ``learning_decay`` and the
``learning_offset`` parameters.
.. versionchanged:: 0.20
The default learning method is now ``"batch"``.
learning_decay : float, default=0.7
It is a parameter that control learning rate in the online learning
method. The value should be set between (0.5, 1.0] to guarantee
asymptotic convergence. When the value is 0.0 and batch_size is
``n_samples``, the update method is same as batch learning. In the
literature, this is called kappa.
learning_offset : float, default=10.0
A (positive) parameter that downweights early iterations in online
learning. It should be greater than 1.0. In the literature, this is
called tau_0.
max_iter : int, default=10
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the :meth:`fit` method, and not the
:meth:`partial_fit` method.
batch_size : int, default=128
Number of documents to use in each EM iteration. Only used in online
learning.
evaluate_every : int, default=-1
How often to evaluate perplexity. Only used in `fit` method.
set it to 0 or negative number to not evaluate perplexity in
training at all. Evaluating perplexity can help you check convergence
in training process, but it will also increase total training time.
Evaluating perplexity in every iteration might increase training time
up to two-fold.
total_samples : int, default=1e6
Total number of documents. Only used in the :meth:`partial_fit` method.
perp_tol : float, default=1e-1
Perplexity tolerance. Only used when ``evaluate_every`` is greater than 0.
mean_change_tol : float, default=1e-3
Stopping tolerance for updating document topic distribution in E-step.
max_doc_update_iter : int, default=100
Max number of iterations for updating document topic distribution in
the E-step.
n_jobs : int, default=None
The number of jobs to use in the E-step.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
Verbosity level.
random_state : int, RandomState instance or None, default=None
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Variational parameters for topic word distribution. Since the complete
conditional for topic word distribution is a Dirichlet,
``components_[i, j]`` can be viewed as pseudocount that represents the
number of times word `j` was assigned to topic `i`.
It can also be viewed as distribution over the words for each topic
after normalization:
``model.components_ / model.components_.sum(axis=1)[:, np.newaxis]``.
exp_dirichlet_component_ : ndarray of shape (n_components, n_features)
Exponential value of expectation of log topic word distribution.
In the literature, this is `exp(E[log(beta)])`.
n_batch_iter_ : int
Number of iterations of the EM step.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
Number of passes over the dataset.
bound_ : float
Final perplexity score on training set.
doc_topic_prior_ : float
Prior of document topic distribution `theta`. If the value is None,
it is `1 / n_components`.
random_state_ : RandomState instance
RandomState instance that is generated either from a seed, the random
number generator or by `np.random`.
topic_word_prior_ : float
Prior of topic word distribution `beta`. If the value is None, it is
`1 / n_components`.
See Also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis:
A classifier with a linear decision boundary, generated by fitting
class conditional densities to the data and using Bayes' rule.
References
----------
.. [1] "Online Learning for Latent Dirichlet Allocation", Matthew D.
Hoffman, David M. Blei, Francis Bach, 2010.
https://github.com/blei-lab/onlineldavb
.. [2] "Stochastic Variational Inference", Matthew D. Hoffman,
David M. Blei, Chong Wang, John Paisley, 2013.
https://jmlr.org/papers/volume14/hoffman13a/hoffman13a.pdf
Examples
--------
>>> from sklearn.decomposition import LatentDirichletAllocation
>>> from sklearn.datasets import make_multilabel_classification
>>> # This produces a feature matrix of token counts, similar to what
>>> # CountVectorizer would produce on text.
>>> X, _ = make_multilabel_classification(random_state=0)
>>> lda = LatentDirichletAllocation(n_components=5,
... random_state=0)
>>> lda.fit(X)
LatentDirichletAllocation(...)
>>> # get topics for some given samples:
>>> lda.transform(X[-2:])
array([[0.00360392, 0.25499205, 0.0036211 , 0.64236448, 0.09541846],
[0.15297572, 0.00362644, 0.44412786, 0.39568399, 0.003586 ]])
"""
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 0, None, closed="neither")],
"doc_topic_prior": [None, Interval(Real, 0, 1, closed="both")],
"topic_word_prior": [None, Interval(Real, 0, 1, closed="both")],
"learning_method": [StrOptions({"batch", "online"})],
"learning_decay": [Interval(Real, 0, 1, closed="both")],
"learning_offset": [Interval(Real, 1.0, None, closed="left")],
"max_iter": [Interval(Integral, 0, None, closed="left")],
"batch_size": [Interval(Integral, 0, None, closed="neither")],
"evaluate_every": [Interval(Integral, None, None, closed="neither")],
"total_samples": [Interval(Real, 0, None, closed="neither")],
"perp_tol": [Interval(Real, 0, None, closed="left")],
"mean_change_tol": [Interval(Real, 0, None, closed="left")],
"max_doc_update_iter": [Interval(Integral, 0, None, closed="left")],
"n_jobs": [None, Integral],
"verbose": ["verbose"],
"random_state": ["random_state"],
}
def __init__(
self,
n_components=10,
*,
doc_topic_prior=None,
topic_word_prior=None,
learning_method="batch",
learning_decay=0.7,
learning_offset=10.0,
max_iter=10,
batch_size=128,
evaluate_every=-1,
total_samples=1e6,
perp_tol=1e-1,
mean_change_tol=1e-3,
max_doc_update_iter=100,
n_jobs=None,
verbose=0,
random_state=None,
):
self.n_components = n_components
self.doc_topic_prior = doc_topic_prior
self.topic_word_prior = topic_word_prior
self.learning_method = learning_method
self.learning_decay = learning_decay
self.learning_offset = learning_offset
self.max_iter = max_iter
self.batch_size = batch_size
self.evaluate_every = evaluate_every
self.total_samples = total_samples
self.perp_tol = perp_tol
self.mean_change_tol = mean_change_tol
self.max_doc_update_iter = max_doc_update_iter
self.n_jobs = n_jobs
self.verbose = verbose
self.random_state = random_state
def _init_latent_vars(self, n_features, dtype=np.float64):
"""Initialize latent variables."""
self.random_state_ = check_random_state(self.random_state)
self.n_batch_iter_ = 1
self.n_iter_ = 0
if self.doc_topic_prior is None:
self.doc_topic_prior_ = 1.0 / self.n_components
else:
self.doc_topic_prior_ = self.doc_topic_prior
if self.topic_word_prior is None:
self.topic_word_prior_ = 1.0 / self.n_components
else:
self.topic_word_prior_ = self.topic_word_prior
init_gamma = 100.0
init_var = 1.0 / init_gamma
# In the literature, this is called `lambda`
self.components_ = self.random_state_.gamma(
init_gamma, init_var, (self.n_components, n_features)
).astype(dtype, copy=False)
# In the literature, this is `exp(E[log(beta)])`
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_)
)
def _e_step(self, X, cal_sstats, random_init, parallel=None):
"""E-step in EM update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
cal_sstats : bool
Parameter that indicate whether to calculate sufficient statistics
or not. Set ``cal_sstats`` to True when we need to run M-step.
random_init : bool
Parameter that indicate whether to initialize document topic
distribution randomly in the E-step. Set it to True in training
steps.
parallel : joblib.Parallel, default=None
Pre-initialized instance of joblib.Parallel.
Returns
-------
(doc_topic_distr, suff_stats) :
`doc_topic_distr` is unnormalized topic distribution for each
document. In the literature, this is called `gamma`.
`suff_stats` is expected sufficient statistics for the M-step.
When `cal_sstats == False`, it will be None.
"""
# Run e-step in parallel
random_state = self.random_state_ if random_init else None
# TODO: make Parallel._effective_n_jobs public instead?
n_jobs = effective_n_jobs(self.n_jobs)
if parallel is None:
parallel = Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1))
results = parallel(
delayed(_update_doc_distribution)(
X[idx_slice, :],
self.exp_dirichlet_component_,
self.doc_topic_prior_,
self.max_doc_update_iter,
self.mean_change_tol,
cal_sstats,
random_state,
)
for idx_slice in gen_even_slices(X.shape[0], n_jobs)
)
# merge result
doc_topics, sstats_list = zip(*results)
doc_topic_distr = np.vstack(doc_topics)
if cal_sstats:
# This step finishes computing the sufficient statistics for the
# M-step.
suff_stats = np.zeros(self.components_.shape, dtype=self.components_.dtype)
for sstats in sstats_list:
suff_stats += sstats
suff_stats *= self.exp_dirichlet_component_
else:
suff_stats = None
return (doc_topic_distr, suff_stats)
def _em_step(self, X, total_samples, batch_update, parallel=None):
"""EM update for 1 iteration.
update `component_` by batch VB or online VB.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
total_samples : int
Total number of documents. It is only used when
batch_update is `False`.
batch_update : bool
Parameter that controls updating method.
`True` for batch learning, `False` for online learning.
parallel : joblib.Parallel, default=None
Pre-initialized instance of joblib.Parallel
Returns
-------
doc_topic_distr : ndarray of shape (n_samples, n_components)
Unnormalized document topic distribution.
"""
# E-step
_, suff_stats = self._e_step(
X, cal_sstats=True, random_init=True, parallel=parallel
)
# M-step
if batch_update:
self.components_ = self.topic_word_prior_ + suff_stats
else:
# online update
# In the literature, the weight is `rho`
weight = np.power(
self.learning_offset + self.n_batch_iter_, -self.learning_decay
)
doc_ratio = float(total_samples) / X.shape[0]
self.components_ *= 1 - weight
self.components_ += weight * (
self.topic_word_prior_ + doc_ratio * suff_stats
)
# update `component_` related variables
self.exp_dirichlet_component_ = np.exp(
_dirichlet_expectation_2d(self.components_)
)
self.n_batch_iter_ += 1
return
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.positive_only = True
tags.input_tags.sparse = True
tags.transformer_tags.preserves_dtype = ["float32", "float64"]
return tags
def _check_non_neg_array(self, X, reset_n_features, whom):
"""check X format
check X format and make sure no negative value in X.
Parameters
----------
X : array-like or sparse matrix
"""
dtype = [np.float64, np.float32] if reset_n_features else self.components_.dtype
X = validate_data(
self,
X,
reset=reset_n_features,
accept_sparse="csr",
dtype=dtype,
)
check_non_negative(X, whom)
return X
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y=None):
"""Online VB with Mini-Batch update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Partially fitted estimator.
"""
first_time = not hasattr(self, "components_")
X = self._check_non_neg_array(
X, reset_n_features=first_time, whom="LatentDirichletAllocation.partial_fit"
)
n_samples, n_features = X.shape
batch_size = self.batch_size
# initialize parameters or check
if first_time:
self._init_latent_vars(n_features, dtype=X.dtype)
if n_features != self.components_.shape[1]:
raise ValueError(
"The provided data has %d dimensions while "
"the model was trained with feature size %d."
% (n_features, self.components_.shape[1])
)
n_jobs = effective_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(
X[idx_slice, :],
total_samples=self.total_samples,
batch_update=False,
parallel=parallel,
)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Learn model for the data X with variational Bayes method.
When `learning_method` is 'online', use mini-batch update.
Otherwise, use batch update.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
X = self._check_non_neg_array(
X, reset_n_features=True, whom="LatentDirichletAllocation.fit"
)
n_samples, n_features = X.shape
max_iter = self.max_iter
evaluate_every = self.evaluate_every
learning_method = self.learning_method
batch_size = self.batch_size
# initialize parameters
self._init_latent_vars(n_features, dtype=X.dtype)
# change to perplexity later
last_bound = None
n_jobs = effective_n_jobs(self.n_jobs)
with Parallel(n_jobs=n_jobs, verbose=max(0, self.verbose - 1)) as parallel:
for i in range(max_iter):
if learning_method == "online":
for idx_slice in gen_batches(n_samples, batch_size):
self._em_step(
X[idx_slice, :],
total_samples=n_samples,
batch_update=False,
parallel=parallel,
)
else:
# batch update
self._em_step(
X, total_samples=n_samples, batch_update=True, parallel=parallel
)
# check perplexity
if evaluate_every > 0 and (i + 1) % evaluate_every == 0:
doc_topics_distr, _ = self._e_step(
X, cal_sstats=False, random_init=False, parallel=parallel
)
bound = self._perplexity_precomp_distr(
X, doc_topics_distr, sub_sampling=False
)
if self.verbose:
print(
"iteration: %d of max_iter: %d, perplexity: %.4f"
% (i + 1, max_iter, bound)
)
if last_bound and abs(last_bound - bound) < self.perp_tol:
break
last_bound = bound
elif self.verbose:
print("iteration: %d of max_iter: %d" % (i + 1, max_iter))
self.n_iter_ += 1
# calculate final perplexity value on train set
doc_topics_distr, _ = self._e_step(
X, cal_sstats=False, random_init=False, parallel=parallel
)
self.bound_ = self._perplexity_precomp_distr(
X, doc_topics_distr, sub_sampling=False
)
return self
def _unnormalized_transform(self, X):
"""Transform data X according to fitted model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
Returns
-------
doc_topic_distr : ndarray of shape (n_samples, n_components)
Document topic distribution for X.
"""
doc_topic_distr, _ = self._e_step(X, cal_sstats=False, random_init=False)
return doc_topic_distr
def transform(self, X, *, normalize=True):
"""Transform data X according to the fitted model.
.. versionchanged:: 0.18
`doc_topic_distr` is now normalized.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
normalize : bool, default=True
Whether to normalize the document topic distribution.
Returns
-------
doc_topic_distr : ndarray of shape (n_samples, n_components)
Document topic distribution for X.
"""
check_is_fitted(self)
X = self._check_non_neg_array(
X, reset_n_features=False, whom="LatentDirichletAllocation.transform"
)
doc_topic_distr = self._unnormalized_transform(X)
if normalize:
doc_topic_distr /= doc_topic_distr.sum(axis=1)[:, np.newaxis]
return doc_topic_distr
def fit_transform(self, X, y=None, *, normalize=True):
"""
Fit to data, then transform it.
Fits transformer to `X` and `y` and returns a transformed version of `X`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
Target values (None for unsupervised transformations).
normalize : bool, default=True
Whether to normalize the document topic distribution in `transform`.
Returns
-------
X_new : ndarray array of shape (n_samples, n_components)
Transformed array.
"""
return self.fit(X, y).transform(X, normalize=normalize)
def _approx_bound(self, X, doc_topic_distr, sub_sampling):
"""Estimate the variational bound.
Estimate the variational bound over "all documents" using only the
documents passed in as X. Since log-likelihood of each word cannot
be computed directly, we use this bound to estimate it.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
doc_topic_distr : ndarray of shape (n_samples, n_components)
Document topic distribution. In the literature, this is called
gamma.
sub_sampling : bool, default=False
Compensate for subsampling of documents.
It is used in calculate bound in online learning.
Returns
-------
score : float
"""
def _loglikelihood(prior, distr, dirichlet_distr, size):
# calculate log-likelihood
score = np.sum((prior - distr) * dirichlet_distr)
score += np.sum(gammaln(distr) - gammaln(prior))
score += np.sum(gammaln(prior * size) - gammaln(np.sum(distr, 1)))
return score
is_sparse_x = sp.issparse(X)
n_samples, n_components = doc_topic_distr.shape
n_features = self.components_.shape[1]
score = 0
dirichlet_doc_topic = _dirichlet_expectation_2d(doc_topic_distr)
dirichlet_component_ = _dirichlet_expectation_2d(self.components_)
doc_topic_prior = self.doc_topic_prior_
topic_word_prior = self.topic_word_prior_
if is_sparse_x:
X_data = X.data
X_indices = X.indices
X_indptr = X.indptr
# E[log p(docs | theta, beta)]
for idx_d in range(0, n_samples):
if is_sparse_x:
ids = X_indices[X_indptr[idx_d] : X_indptr[idx_d + 1]]
cnts = X_data[X_indptr[idx_d] : X_indptr[idx_d + 1]]
else:
ids = np.nonzero(X[idx_d, :])[0]
cnts = X[idx_d, ids]
temp = (
dirichlet_doc_topic[idx_d, :, np.newaxis] + dirichlet_component_[:, ids]
)
norm_phi = logsumexp(temp, axis=0)
score += np.dot(cnts, norm_phi)
# compute E[log p(theta | alpha) - log q(theta | gamma)]
score += _loglikelihood(
doc_topic_prior, doc_topic_distr, dirichlet_doc_topic, self.n_components
)
# Compensate for the subsampling of the population of documents
if sub_sampling:
doc_ratio = float(self.total_samples) / n_samples
score *= doc_ratio
# E[log p(beta | eta) - log q (beta | lambda)]
score += _loglikelihood(
topic_word_prior, self.components_, dirichlet_component_, n_features
)
return score
def score(self, X, y=None):
"""Calculate approximate log-likelihood as score.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
score : float
Use approximate bound as score.
"""
check_is_fitted(self)
X = self._check_non_neg_array(
X, reset_n_features=False, whom="LatentDirichletAllocation.score"
)
doc_topic_distr = self._unnormalized_transform(X)
score = self._approx_bound(X, doc_topic_distr, sub_sampling=False)
return score
def _perplexity_precomp_distr(self, X, doc_topic_distr=None, sub_sampling=False):
"""Calculate approximate perplexity for data X with ability to accept
precomputed doc_topic_distr
Perplexity is defined as exp(-1. * log-likelihood per word)
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Document word matrix.
doc_topic_distr : ndarray of shape (n_samples, n_components), \
default=None
Document topic distribution.
If it is None, it will be generated by applying transform on X.
Returns
-------
score : float
Perplexity score.
"""
if doc_topic_distr is None:
doc_topic_distr = self._unnormalized_transform(X)
else:
n_samples, n_components = doc_topic_distr.shape
if n_samples != X.shape[0]:
raise ValueError(
"Number of samples in X and doc_topic_distr do not match."
)
if n_components != self.n_components:
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/_fastica.py | sklearn/decomposition/_fastica.py | """
Python implementation of the fast ICA algorithms.
Reference: Tables 8.3 and 8.4 page 196 in the book:
Independent Component Analysis, by Hyvarinen et al.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from numbers import Integral, Real
import numpy as np
from scipy import linalg
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
)
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import as_float_array, check_array, check_random_state
from sklearn.utils._param_validation import (
Interval,
Options,
StrOptions,
validate_params,
)
from sklearn.utils.validation import check_is_fitted, validate_data
__all__ = ["FastICA", "fastica"]
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W.
Parameters
----------
w : ndarray of shape (n,)
Array to be orthogonalized
W : ndarray of shape (p, n)
Null space definition
j : int < p
The no of (from the first) rows of Null space W wrt which w is
orthogonalized.
Notes
-----
Assumes that W is orthogonal
w changed in place
"""
w -= np.linalg.multi_dot([w, W[:j].T, W[:j]])
return w
def _sym_decorrelation(W):
"""Symmetric decorrelation
i.e. W <- (W * W.T) ^{-1/2} * W
"""
s, u = linalg.eigh(np.dot(W, W.T))
# Avoid sqrt of negative values because of rounding errors. Note that
# np.sqrt(tiny) is larger than tiny and therefore this clipping also
# prevents division by zero in the next step.
s = np.clip(s, a_min=np.finfo(W.dtype).tiny, a_max=None)
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
return np.linalg.multi_dot([u * (1.0 / np.sqrt(s)), u.T, W])
def _ica_def(X, tol, g, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X.dtype)
n_iter = []
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w**2).sum())
for i in range(max_iter):
gwtx, g_wtx = g(np.dot(w.T, X), fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1**2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
n_iter.append(i + 1)
W[j, :] = w
return W, max(n_iter)
def _ica_par(X, tol, g, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
W = _sym_decorrelation(w_init)
del w_init
p_ = float(X.shape[1])
for ii in range(max_iter):
gwtx, g_wtx = g(np.dot(W, X), fun_args)
W1 = _sym_decorrelation(np.dot(gwtx, X.T) / p_ - g_wtx[:, np.newaxis] * W)
del gwtx, g_wtx
# builtin max, abs are faster than numpy counter parts.
# np.einsum allows having the lowest memory footprint.
# It is faster than np.diag(np.dot(W1, W.T)).
lim = max(abs(abs(np.einsum("ij,ij->i", W1, W)) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn(
(
"FastICA did not converge. Consider increasing "
"tolerance or the maximum number of iterations."
),
ConvergenceWarning,
)
return W, ii + 1
# Some standard non-linear functions.
# XXX: these should be optimized, as they can be a bottleneck.
def _logcosh(x, fun_args=None):
alpha = fun_args.get("alpha", 1.0) # comment it out?
x *= alpha
gx = np.tanh(x, x) # apply the tanh inplace
g_x = np.empty(x.shape[0], dtype=x.dtype)
# XXX compute in chunks to avoid extra allocation
for i, gx_i in enumerate(gx): # please don't vectorize.
g_x[i] = (alpha * (1 - gx_i**2)).mean()
return gx, g_x
def _exp(x, fun_args):
exp = np.exp(-(x**2) / 2)
gx = x * exp
g_x = (1 - x**2) * exp
return gx, g_x.mean(axis=-1)
def _cube(x, fun_args):
return x**3, (3 * x**2).mean(axis=-1)
@validate_params(
{
"X": ["array-like"],
"return_X_mean": ["boolean"],
"compute_sources": ["boolean"],
"return_n_iter": ["boolean"],
},
prefer_skip_nested_validation=False,
)
def fastica(
X,
n_components=None,
*,
algorithm="parallel",
whiten="unit-variance",
fun="logcosh",
fun_args=None,
max_iter=200,
tol=1e-04,
w_init=None,
whiten_solver="svd",
random_state=None,
return_X_mean=False,
compute_sources=True,
return_n_iter=False,
):
"""Perform Fast Independent Component Analysis.
The implementation is based on [1]_.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
n_components : int, default=None
Number of components to use. If None is passed, all are used.
algorithm : {'parallel', 'deflation'}, default='parallel'
Specify which algorithm to use for FastICA.
whiten : str or bool, default='unit-variance'
Specify the whitening strategy to use.
- If 'arbitrary-variance', a whitening with variance
arbitrary is used.
- If 'unit-variance', the whitening matrix is rescaled to ensure that
each recovered source has unit variance.
- If False, the data is already considered to be whitened, and no
whitening is performed.
.. versionchanged:: 1.3
The default value of `whiten` changed to 'unit-variance' in 1.3.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. The derivative should be averaged along its last dimension.
Example::
def my_g(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}.
max_iter : int, default=200
Maximum number of iterations to perform.
tol : float, default=1e-4
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : ndarray of shape (n_components, n_components), default=None
Initial un-mixing array. If `w_init=None`, then an array of values
drawn from a normal distribution is used.
whiten_solver : {"eigh", "svd"}, default="svd"
The solver to use for whitening.
- "svd" is more stable numerically if the problem is degenerate, and
often faster when `n_samples <= n_features`.
- "eigh" is generally more memory efficient when
`n_samples >= n_features`, and can be faster when
`n_samples >= 50 * n_features`.
.. versionadded:: 1.2
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
return_X_mean : bool, default=False
If True, X_mean is returned too.
compute_sources : bool, default=True
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
K : ndarray of shape (n_components, n_features) or None
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : ndarray of shape (n_components, n_components)
The square matrix that unmixes the data after whitening.
The mixing matrix is the pseudo-inverse of matrix ``W K``
if K is not None, else it is the inverse of W.
S : ndarray of shape (n_samples, n_components) or None
Estimated source matrix.
X_mean : ndarray of shape (n_features,)
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
While FastICA was proposed to estimate as many sources
as features, it is possible to estimate less by setting
n_components < n_features. It this case K is not a square matrix
and the estimated A is the pseudo-inverse of ``W K``.
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
References
----------
.. [1] A. Hyvarinen and E. Oja, "Fast Independent Component Analysis",
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.decomposition import fastica
>>> X, _ = load_digits(return_X_y=True)
>>> K, W, S = fastica(X, n_components=7, random_state=0, whiten='unit-variance')
>>> K.shape
(7, 64)
>>> W.shape
(7, 7)
>>> S.shape
(1797, 7)
"""
est = FastICA(
n_components=n_components,
algorithm=algorithm,
whiten=whiten,
fun=fun,
fun_args=fun_args,
max_iter=max_iter,
tol=tol,
w_init=w_init,
whiten_solver=whiten_solver,
random_state=random_state,
)
est._validate_params()
S = est._fit_transform(X, compute_sources=compute_sources)
if est.whiten in ["unit-variance", "arbitrary-variance"]:
K = est.whitening_
X_mean = est.mean_
else:
K = None
X_mean = None
returned_values = [K, est._unmixing, S]
if return_X_mean:
returned_values.append(X_mean)
if return_n_iter:
returned_values.append(est.n_iter_)
return returned_values
class FastICA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""FastICA: a fast algorithm for Independent Component Analysis.
The implementation is based on [1]_.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
n_components : int, default=None
Number of components to use. If None is passed, all are used.
algorithm : {'parallel', 'deflation'}, default='parallel'
Specify which algorithm to use for FastICA.
whiten : str or bool, default='unit-variance'
Specify the whitening strategy to use.
- If 'arbitrary-variance', a whitening with variance
arbitrary is used.
- If 'unit-variance', the whitening matrix is rescaled to ensure that
each recovered source has unit variance.
- If False, the data is already considered to be whitened, and no
whitening is performed.
.. versionchanged:: 1.3
The default value of `whiten` changed to 'unit-variance' in 1.3.
fun : {'logcosh', 'exp', 'cube'} or callable, default='logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. The derivative should be averaged along its last dimension.
Example::
def my_g(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
fun_args : dict, default=None
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}.
max_iter : int, default=200
Maximum number of iterations during fit.
tol : float, default=1e-4
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : array-like of shape (n_components, n_components), default=None
Initial un-mixing array. If `w_init=None`, then an array of values
drawn from a normal distribution is used.
whiten_solver : {"eigh", "svd"}, default="svd"
The solver to use for whitening.
- "svd" is more stable numerically if the problem is degenerate, and
often faster when `n_samples <= n_features`.
- "eigh" is generally more memory efficient when
`n_samples >= n_features`, and can be faster when
`n_samples >= 50 * n_features`.
.. versionadded:: 1.2
random_state : int, RandomState instance or None, default=None
Used to initialize ``w_init`` when not specified, with a
normal distribution. Pass an int, for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
The linear operator to apply to the data to get the independent
sources. This is equal to the unmixing matrix when ``whiten`` is
False, and equal to ``np.dot(unmixing_matrix, self.whitening_)`` when
``whiten`` is True.
mixing_ : ndarray of shape (n_features, n_components)
The pseudo-inverse of ``components_``. It is the linear operator
that maps independent sources to the data.
mean_ : ndarray of shape(n_features,)
The mean over features. Only set if `self.whiten` is True.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge.
whitening_ : ndarray of shape (n_components, n_features)
Only set if whiten is 'True'. This is the pre-whitening matrix
that projects data onto the first `n_components` principal components.
See Also
--------
PCA : Principal component analysis (PCA).
IncrementalPCA : Incremental principal components analysis (IPCA).
KernelPCA : Kernel Principal component analysis (KPCA).
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
SparsePCA : Sparse Principal Components Analysis (SparsePCA).
References
----------
.. [1] A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.decomposition import FastICA
>>> X, _ = load_digits(return_X_y=True)
>>> transformer = FastICA(n_components=7,
... random_state=0,
... whiten='unit-variance')
>>> X_transformed = transformer.fit_transform(X)
>>> X_transformed.shape
(1797, 7)
"""
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 1, None, closed="left"), None],
"algorithm": [StrOptions({"parallel", "deflation"})],
"whiten": [
StrOptions({"arbitrary-variance", "unit-variance"}),
Options(bool, {False}),
],
"fun": [StrOptions({"logcosh", "exp", "cube"}), callable],
"fun_args": [dict, None],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"tol": [Interval(Real, 0.0, None, closed="left")],
"w_init": ["array-like", None],
"whiten_solver": [StrOptions({"eigh", "svd"})],
"random_state": ["random_state"],
}
def __init__(
self,
n_components=None,
*,
algorithm="parallel",
whiten="unit-variance",
fun="logcosh",
fun_args=None,
max_iter=200,
tol=1e-4,
w_init=None,
whiten_solver="svd",
random_state=None,
):
super().__init__()
self.n_components = n_components
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.fun_args = fun_args
self.max_iter = max_iter
self.tol = tol
self.w_init = w_init
self.whiten_solver = whiten_solver
self.random_state = random_state
def _fit_transform(self, X, compute_sources=False):
"""Fit the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
compute_sources : bool, default=False
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
S : ndarray of shape (n_samples, n_components) or None
Sources matrix. `None` if `compute_sources` is `False`.
"""
XT = validate_data(
self,
X,
copy=self.whiten,
dtype=[np.float64, np.float32],
ensure_min_samples=2,
).T
fun_args = {} if self.fun_args is None else self.fun_args
random_state = check_random_state(self.random_state)
alpha = fun_args.get("alpha", 1.0)
if not 1 <= alpha <= 2:
raise ValueError("alpha must be in [1,2]")
if self.fun == "logcosh":
g = _logcosh
elif self.fun == "exp":
g = _exp
elif self.fun == "cube":
g = _cube
elif callable(self.fun):
def g(x, fun_args):
return self.fun(x, **fun_args)
n_features, n_samples = XT.shape
n_components = self.n_components
if not self.whiten and n_components is not None:
n_components = None
warnings.warn("Ignoring n_components with whiten=False.")
if n_components is None:
n_components = min(n_samples, n_features)
if n_components > min(n_samples, n_features):
n_components = min(n_samples, n_features)
warnings.warn(
"n_components is too large: it will be set to %s" % n_components
)
if self.whiten:
# Centering the features of X
X_mean = XT.mean(axis=-1)
XT -= X_mean[:, np.newaxis]
# Whitening and preprocessing by PCA
if self.whiten_solver == "eigh":
# Faster when num_samples >> n_features
d, u = linalg.eigh(XT.dot(X))
sort_indices = np.argsort(d)[::-1]
eps = np.finfo(d.dtype).eps * 10
degenerate_idx = d < eps
if np.any(degenerate_idx):
warnings.warn(
"There are some small singular values, using "
"whiten_solver = 'svd' might lead to more "
"accurate results."
)
d[degenerate_idx] = eps # For numerical issues
np.sqrt(d, out=d)
d, u = d[sort_indices], u[:, sort_indices]
elif self.whiten_solver == "svd":
u, d = linalg.svd(XT, full_matrices=False, check_finite=False)[:2]
# Give consistent eigenvectors for both svd solvers
u *= np.sign(u[0])
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, XT)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(n_samples)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(XT, copy=False) # copy has been taken care of
w_init = self.w_init
if w_init is None:
w_init = np.asarray(
random_state.normal(size=(n_components, n_components)), dtype=X1.dtype
)
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError(
"w_init has invalid shape -- should be %(shape)s"
% {"shape": (n_components, n_components)}
)
kwargs = {
"tol": self.tol,
"g": g,
"fun_args": fun_args,
"max_iter": self.max_iter,
"w_init": w_init,
}
if self.algorithm == "parallel":
W, n_iter = _ica_par(X1, **kwargs)
elif self.algorithm == "deflation":
W, n_iter = _ica_def(X1, **kwargs)
del X1
self.n_iter_ = n_iter
if compute_sources:
if self.whiten:
S = np.linalg.multi_dot([W, K, XT]).T
else:
S = np.dot(W, XT).T
else:
S = None
if self.whiten:
if self.whiten == "unit-variance":
if not compute_sources:
S = np.linalg.multi_dot([W, K, XT]).T
S_std = np.std(S, axis=0, keepdims=True)
S /= S_std
W /= S_std.T
self.components_ = np.dot(W, K)
self.mean_ = X_mean
self.whitening_ = K
else:
self.components_ = W
self.mixing_ = linalg.pinv(self.components_, check_finite=False)
self._unmixing = W
return S
@_fit_context(prefer_skip_nested_validation=True)
def fit_transform(self, X, y=None):
"""Fit the model and recover the sources from X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Estimated sources obtained by transforming the data with the
estimated unmixing matrix.
"""
return self._fit_transform(X, compute_sources=True)
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit_transform(X, compute_sources=False)
return self
def transform(self, X, copy=True):
"""Recover the sources from X (apply the unmixing matrix).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to transform, where `n_samples` is the number of samples
and `n_features` is the number of features.
copy : bool, default=True
If False, data passed to fit can be overwritten. Defaults to True.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Estimated sources obtained by transforming the data with the
estimated unmixing matrix.
"""
check_is_fitted(self)
X = validate_data(
self,
X,
copy=(copy and self.whiten),
dtype=[np.float64, np.float32],
reset=False,
)
if self.whiten:
X -= self.mean_
return np.dot(X, self.components_.T)
def inverse_transform(self, X, copy=True):
"""Transform the sources back to the mixed data (apply mixing matrix).
Parameters
----------
X : array-like of shape (n_samples, n_components)
Sources, where `n_samples` is the number of samples
and `n_components` is the number of components.
copy : bool, default=True
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Reconstructed data obtained with the mixing matrix.
"""
check_is_fitted(self)
X = check_array(X, copy=(copy and self.whiten), dtype=[np.float64, np.float32])
X = np.dot(X, self.mixing_.T)
if self.whiten:
X += self.mean_
return X
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.components_.shape[0]
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/__init__.py | sklearn/decomposition/__init__.py | """Matrix decomposition algorithms.
These include PCA, NMF, ICA, and more. Most of the algorithms of this module can be
regarded as dimensionality reduction techniques.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.decomposition._dict_learning import (
DictionaryLearning,
MiniBatchDictionaryLearning,
SparseCoder,
dict_learning,
dict_learning_online,
sparse_encode,
)
from sklearn.decomposition._factor_analysis import FactorAnalysis
from sklearn.decomposition._fastica import FastICA, fastica
from sklearn.decomposition._incremental_pca import IncrementalPCA
from sklearn.decomposition._kernel_pca import KernelPCA
from sklearn.decomposition._lda import LatentDirichletAllocation
from sklearn.decomposition._nmf import NMF, MiniBatchNMF, non_negative_factorization
from sklearn.decomposition._pca import PCA
from sklearn.decomposition._sparse_pca import MiniBatchSparsePCA, SparsePCA
from sklearn.decomposition._truncated_svd import TruncatedSVD
from sklearn.utils.extmath import randomized_svd
__all__ = [
"NMF",
"PCA",
"DictionaryLearning",
"FactorAnalysis",
"FastICA",
"IncrementalPCA",
"KernelPCA",
"LatentDirichletAllocation",
"MiniBatchDictionaryLearning",
"MiniBatchNMF",
"MiniBatchSparsePCA",
"SparseCoder",
"SparsePCA",
"TruncatedSVD",
"dict_learning",
"dict_learning_online",
"fastica",
"non_negative_factorization",
"randomized_svd",
"sparse_encode",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/_pca.py | sklearn/decomposition/_pca.py | """Principal Component Analysis."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from math import lgamma, log, sqrt
from numbers import Integral, Real
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from scipy.sparse.linalg import svds
from sklearn.base import _fit_context
from sklearn.decomposition._base import _BasePCA
from sklearn.utils import check_random_state
from sklearn.utils._arpack import _init_arpack_v0
from sklearn.utils._array_api import device, get_namespace
from sklearn.utils._param_validation import Interval, RealNotInt, StrOptions
from sklearn.utils.extmath import _randomized_svd, fast_logdet, svd_flip
from sklearn.utils.sparsefuncs import _implicit_column_offset, mean_variance_axis
from sklearn.utils.validation import check_is_fitted, validate_data
def _assess_dimension(spectrum, rank, n_samples):
"""Compute the log-likelihood of a rank ``rank`` dataset.
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``. This implements the method of
T. P. Minka.
Parameters
----------
spectrum : ndarray of shape (n_features,)
Data spectrum.
rank : int
Tested rank value. It should be strictly lower than n_features,
otherwise the method isn't specified (division by zero in equation
(31) from the paper).
n_samples : int
Number of samples.
Returns
-------
ll : float
The log-likelihood.
References
----------
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
<https://proceedings.neurips.cc/paper/2000/file/7503cfacd12053d309b6bed5c89de212-Paper.pdf>`_
"""
xp, _ = get_namespace(spectrum)
n_features = spectrum.shape[0]
if not 1 <= rank < n_features:
raise ValueError("the tested rank should be in [1, n_features - 1]")
eps = 1e-15
if spectrum[rank - 1] < eps:
# When the tested rank is associated with a small eigenvalue, there's
# no point in computing the log-likelihood: it's going to be very
# small and won't be the max anyway. Also, it can lead to numerical
# issues below when computing pa, in particular in log((spectrum[i] -
# spectrum[j]) because this will take the log of something very small.
return -xp.inf
pu = -rank * log(2.0)
for i in range(1, rank + 1):
pu += (
lgamma((n_features - i + 1) / 2.0) - log(xp.pi) * (n_features - i + 1) / 2.0
)
pl = xp.sum(xp.log(spectrum[:rank]))
pl = -pl * n_samples / 2.0
v = max(eps, xp.sum(spectrum[rank:]) / (n_features - rank))
pv = -log(v) * n_samples * (n_features - rank) / 2.0
m = n_features * rank - rank * (rank + 1.0) / 2.0
pp = log(2.0 * xp.pi) * (m + rank) / 2.0
pa = 0.0
spectrum_ = xp.asarray(spectrum, copy=True)
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, spectrum.shape[0]):
pa += log(
(spectrum[i] - spectrum[j]) * (1.0 / spectrum_[j] - 1.0 / spectrum_[i])
) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2.0 - rank * log(n_samples) / 2.0
return ll
def _infer_dimension(spectrum, n_samples):
"""Infers the dimension of a dataset with a given spectrum.
The returned value will be in [1, n_features - 1].
"""
xp, _ = get_namespace(spectrum)
ll = xp.empty_like(spectrum)
ll[0] = -xp.inf # we don't want to return n_components = 0
for rank in range(1, spectrum.shape[0]):
ll[rank] = _assess_dimension(spectrum, rank, n_samples)
return xp.argmax(ll)
class PCA(_BasePCA):
"""Principal component analysis (PCA).
Linear dimensionality reduction using Singular Value Decomposition of the
data to project it to a lower dimensional space. The input data is centered
but not scaled for each feature before applying the SVD.
It uses the LAPACK implementation of the full SVD or a randomized truncated
SVD by the method of Halko et al. 2009, depending on the shape of the input
data and the number of components to extract.
With sparse inputs, the ARPACK implementation of the truncated SVD can be
used (i.e. through :func:`scipy.sparse.linalg.svds`). Alternatively, one
may consider :class:`TruncatedSVD` where the data are not centered.
Notice that this class only supports sparse inputs for some solvers such as
"arpack" and "covariance_eigh". See :class:`TruncatedSVD` for an
alternative with sparse data.
For a usage example, see
:ref:`sphx_glr_auto_examples_decomposition_plot_pca_iris.py`
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, float or 'mle', default=None
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
If ``n_components == 'mle'`` and ``svd_solver == 'full'``, Minka's
MLE is used to guess the dimension. Use of ``n_components == 'mle'``
will interpret ``svd_solver == 'auto'`` as ``svd_solver == 'full'``.
If ``0 < n_components < 1`` and ``svd_solver == 'full'``, select the
number of components such that the amount of variance that needs to be
explained is greater than the percentage specified by n_components.
If ``svd_solver == 'arpack'``, the number of components must be
strictly less than the minimum of n_features and n_samples.
Hence, the None case results in::
n_components == min(n_samples, n_features) - 1
copy : bool, default=True
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, default=False
When True (False by default) the `components_` vectors are multiplied
by the square root of n_samples and then divided by the singular values
to ensure uncorrelated outputs with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
svd_solver : {'auto', 'full', 'covariance_eigh', 'arpack', 'randomized'},\
default='auto'
"auto" :
The solver is selected by a default 'auto' policy is based on `X.shape` and
`n_components`: if the input data has fewer than 1000 features and
more than 10 times as many samples, then the "covariance_eigh"
solver is used. Otherwise, if the input data is larger than 500x500
and the number of components to extract is lower than 80% of the
smallest dimension of the data, then the more efficient
"randomized" method is selected. Otherwise the exact "full" SVD is
computed and optionally truncated afterwards.
"full" :
Run exact full SVD calling the standard LAPACK solver via
`scipy.linalg.svd` and select the components by postprocessing
"covariance_eigh" :
Precompute the covariance matrix (on centered data), run a
classical eigenvalue decomposition on the covariance matrix
typically using LAPACK and select the components by postprocessing.
This solver is very efficient for n_samples >> n_features and small
n_features. It is, however, not tractable otherwise for large
n_features (large memory footprint required to materialize the
covariance matrix). Also note that compared to the "full" solver,
this solver effectively doubles the condition number and is
therefore less numerical stable (e.g. on input data with a large
range of singular values).
"arpack" :
Run SVD truncated to `n_components` calling ARPACK solver via
`scipy.sparse.linalg.svds`. It requires strictly
`0 < n_components < min(X.shape)`
"randomized" :
Run randomized SVD by the method of Halko et al.
.. versionadded:: 0.18.0
.. versionchanged:: 1.5
Added the 'covariance_eigh' solver.
tol : float, default=0.0
Tolerance for singular values computed by svd_solver == 'arpack'.
Must be of range [0.0, infinity).
.. versionadded:: 0.18.0
iterated_power : int or 'auto', default='auto'
Number of iterations for the power method computed by
svd_solver == 'randomized'.
Must be of range [0, infinity).
.. versionadded:: 0.18.0
n_oversamples : int, default=10
This parameter is only relevant when `svd_solver="randomized"`.
It corresponds to the additional number of random vectors to sample the
range of `X` so as to ensure proper conditioning. See
:func:`~sklearn.utils.extmath.randomized_svd` for more details.
.. versionadded:: 1.1
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Power iteration normalizer for randomized SVD solver.
Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd`
for more details.
.. versionadded:: 1.1
random_state : int, RandomState instance or None, default=None
Used when the 'arpack' or 'randomized' solvers are used. Pass an int
for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
.. versionadded:: 0.18.0
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Principal axes in feature space, representing the directions of
maximum variance in the data. Equivalently, the right singular
vectors of the centered input data, parallel to its eigenvectors.
The components are sorted by decreasing ``explained_variance_``.
explained_variance_ : ndarray of shape (n_components,)
The amount of variance explained by each of the selected components.
The variance estimation uses `n_samples - 1` degrees of freedom.
Equal to n_components largest eigenvalues
of the covariance matrix of X.
.. versionadded:: 0.18
explained_variance_ratio_ : ndarray of shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of the ratios is equal to 1.0.
singular_values_ : ndarray of shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
.. versionadded:: 0.19
mean_ : ndarray of shape (n_features,)
Per-feature empirical mean, estimated from the training set.
Equal to `X.mean(axis=0)`.
n_components_ : int
The estimated number of components. When n_components is set
to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this
number is estimated from input data. Otherwise it equals the parameter
n_components, or the lesser value of n_features and n_samples
if n_components is None.
n_samples_ : int
Number of samples in the training data.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
compute the estimated data covariance and score samples.
Equal to the average of (min(n_features, n_samples) - n_components)
smallest eigenvalues of the covariance matrix of X.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
KernelPCA : Kernel Principal Component Analysis.
SparsePCA : Sparse Principal Component Analysis.
TruncatedSVD : Dimensionality reduction using truncated SVD.
IncrementalPCA : Incremental Principal Component Analysis.
References
----------
For n_components == 'mle', this class uses the method from:
`Minka, T. P.. "Automatic choice of dimensionality for PCA".
In NIPS, pp. 598-604 <https://tminka.github.io/papers/pca/minka-pca.pdf>`_
Implements the probabilistic PCA model from:
`Tipping, M. E., and Bishop, C. M. (1999). "Probabilistic principal
component analysis". Journal of the Royal Statistical Society:
Series B (Statistical Methodology), 61(3), 611-622.
<http://www.miketipping.com/papers/met-mppca.pdf>`_
via the score and score_samples methods.
For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`.
For svd_solver == 'randomized', see:
:doi:`Halko, N., Martinsson, P. G., and Tropp, J. A. (2011).
"Finding structure with randomness: Probabilistic algorithms for
constructing approximate matrix decompositions".
SIAM review, 53(2), 217-288.
<10.1137/090771806>`
and also
:doi:`Martinsson, P. G., Rokhlin, V., and Tygert, M. (2011).
"A randomized algorithm for the decomposition of matrices".
Applied and Computational Harmonic Analysis, 30(1), 47-68.
<10.1016/j.acha.2010.02.003>`
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(n_components=2)
>>> print(pca.explained_variance_ratio_)
[0.9924 0.0075]
>>> print(pca.singular_values_)
[6.30061 0.54980]
>>> pca = PCA(n_components=2, svd_solver='full')
>>> pca.fit(X)
PCA(n_components=2, svd_solver='full')
>>> print(pca.explained_variance_ratio_)
[0.9924 0.00755]
>>> print(pca.singular_values_)
[6.30061 0.54980]
>>> pca = PCA(n_components=1, svd_solver='arpack')
>>> pca.fit(X)
PCA(n_components=1, svd_solver='arpack')
>>> print(pca.explained_variance_ratio_)
[0.99244]
>>> print(pca.singular_values_)
[6.30061]
"""
_parameter_constraints: dict = {
"n_components": [
Interval(Integral, 0, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="neither"),
StrOptions({"mle"}),
None,
],
"copy": ["boolean"],
"whiten": ["boolean"],
"svd_solver": [
StrOptions({"auto", "full", "covariance_eigh", "arpack", "randomized"})
],
"tol": [Interval(Real, 0, None, closed="left")],
"iterated_power": [
StrOptions({"auto"}),
Interval(Integral, 0, None, closed="left"),
],
"n_oversamples": [Interval(Integral, 1, None, closed="left")],
"power_iteration_normalizer": [StrOptions({"auto", "QR", "LU", "none"})],
"random_state": ["random_state"],
}
def __init__(
self,
n_components=None,
*,
copy=True,
whiten=False,
svd_solver="auto",
tol=0.0,
iterated_power="auto",
n_oversamples=10,
power_iteration_normalizer="auto",
random_state=None,
):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
self.svd_solver = svd_solver
self.tol = tol
self.iterated_power = iterated_power
self.n_oversamples = n_oversamples
self.power_iteration_normalizer = power_iteration_normalizer
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Ignored.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Ignored.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed values.
Notes
-----
This method returns a Fortran-ordered array. To convert it to a
C-ordered array, use 'np.ascontiguousarray'.
"""
U, S, _, X, x_is_centered, xp = self._fit(X)
if U is not None:
U = U[:, : self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0] - 1)
else:
# X_new = X * V = U * S * Vt * V = U * S
U *= S[: self.n_components_]
return U
else: # solver="covariance_eigh" does not compute U at fit time.
return self._transform(X, xp, x_is_centered=x_is_centered)
def _fit(self, X):
"""Dispatch to the right submethod depending on the chosen solver."""
xp, is_array_api_compliant = get_namespace(X)
# Raise an error for sparse input and unsupported svd_solver
if issparse(X) and self.svd_solver not in ["auto", "arpack", "covariance_eigh"]:
raise TypeError(
'PCA only support sparse inputs with the "arpack" and'
f' "covariance_eigh" solvers, while "{self.svd_solver}" was passed. See'
" TruncatedSVD for a possible alternative."
)
if self.svd_solver == "arpack" and is_array_api_compliant:
raise ValueError(
"PCA with svd_solver='arpack' is not supported for Array API inputs."
)
# Validate the data, without ever forcing a copy as any solver that
# supports sparse input data and the `covariance_eigh` solver are
# written in a way to avoid the need for any inplace modification of
# the input data contrary to the other solvers.
# The copy will happen
# later, only if needed, once the solver negotiation below is done.
X = validate_data(
self,
X,
dtype=[xp.float64, xp.float32],
force_writeable=True,
accept_sparse=("csr", "csc"),
ensure_2d=True,
copy=False,
)
self._fit_svd_solver = self.svd_solver
if self._fit_svd_solver == "auto" and issparse(X):
self._fit_svd_solver = "arpack"
if self.n_components is None:
if self._fit_svd_solver != "arpack":
n_components = min(X.shape)
else:
n_components = min(X.shape) - 1
else:
n_components = self.n_components
if self._fit_svd_solver == "auto":
# Tall and skinny problems are best handled by precomputing the
# covariance matrix.
if X.shape[1] <= 1_000 and X.shape[0] >= 10 * X.shape[1]:
self._fit_svd_solver = "covariance_eigh"
# Small problem or n_components == 'mle', just call full PCA
elif max(X.shape) <= 500 or n_components == "mle":
self._fit_svd_solver = "full"
elif 1 <= n_components < 0.8 * min(X.shape):
self._fit_svd_solver = "randomized"
# This is also the case of n_components in (0, 1)
else:
self._fit_svd_solver = "full"
# Call different fits for either full or truncated SVD
if self._fit_svd_solver in ("full", "covariance_eigh"):
return self._fit_full(X, n_components, xp, is_array_api_compliant)
elif self._fit_svd_solver in ["arpack", "randomized"]:
return self._fit_truncated(X, n_components, xp)
def _fit_full(self, X, n_components, xp, is_array_api_compliant):
"""Fit the model by computing full SVD on X."""
n_samples, n_features = X.shape
if n_components == "mle":
if n_samples < n_features:
raise ValueError(
"n_components='mle' is only supported if n_samples >= n_features"
)
elif not 0 <= n_components <= min(n_samples, n_features):
raise ValueError(
f"n_components={n_components} must be between 0 and "
f"min(n_samples, n_features)={min(n_samples, n_features)} with "
f"svd_solver={self._fit_svd_solver!r}"
)
self.mean_ = xp.mean(X, axis=0)
# When X is a scipy sparse matrix, self.mean_ is a numpy matrix, so we need
# to transform it to a 1D array. Note that this is not the case when X
# is a scipy sparse array.
# TODO: remove the following two lines when scikit-learn only depends
# on scipy versions that no longer support scipy.sparse matrices.
self.mean_ = xp.reshape(xp.asarray(self.mean_), (-1,))
if self._fit_svd_solver == "full":
X_centered = xp.asarray(X, copy=True) if self.copy else X
X_centered -= self.mean_
x_is_centered = not self.copy
if not is_array_api_compliant:
# Use scipy.linalg with NumPy/SciPy inputs for the sake of not
# introducing unanticipated behavior changes. In the long run we
# could instead decide to always use xp.linalg.svd for all inputs,
# but that would make this code rely on numpy's SVD instead of
# scipy's. It's not 100% clear whether they use the same LAPACK
# solver by default though (assuming both are built against the
# same BLAS).
U, S, Vt = linalg.svd(X_centered, full_matrices=False)
else:
U, S, Vt = xp.linalg.svd(X_centered, full_matrices=False)
explained_variance_ = (S**2) / (n_samples - 1)
else:
assert self._fit_svd_solver == "covariance_eigh"
# In the following, we center the covariance matrix C afterwards
# (without centering the data X first) to avoid an unnecessary copy
# of X. Note that the mean_ attribute is still needed to center
# test data in the transform method.
#
# Note: at the time of writing, `xp.cov` does not exist in the
# Array API standard:
# https://github.com/data-apis/array-api/issues/43
#
# Besides, using `numpy.cov`, as of numpy 1.26.0, would not be
# memory efficient for our use case when `n_samples >> n_features`:
# `numpy.cov` centers a copy of the data before computing the
# matrix product instead of subtracting a small `(n_features,
# n_features)` square matrix from the gram matrix X.T @ X, as we do
# below.
x_is_centered = False
C = X.T @ X
C -= (
n_samples
* xp.reshape(self.mean_, (-1, 1))
* xp.reshape(self.mean_, (1, -1))
)
C /= n_samples - 1
eigenvals, eigenvecs = xp.linalg.eigh(C)
# When X is a scipy sparse matrix, the following two datastructures
# are returned as instances of the soft-deprecated numpy.matrix
# class. Note that this problem does not occur when X is a scipy
# sparse array (or another other kind of supported array).
# TODO: remove the following two lines when scikit-learn only
# depends on scipy versions that no longer support scipy.sparse
# matrices.
eigenvals = xp.reshape(xp.asarray(eigenvals), (-1,))
eigenvecs = xp.asarray(eigenvecs)
eigenvals = xp.flip(eigenvals, axis=0)
eigenvecs = xp.flip(eigenvecs, axis=1)
# The covariance matrix C is positive semi-definite by
# construction. However, the eigenvalues returned by xp.linalg.eigh
# can be slightly negative due to numerical errors. This would be
# an issue for the subsequent sqrt, hence the manual clipping.
eigenvals[eigenvals < 0.0] = 0.0
explained_variance_ = eigenvals
# Re-construct SVD of centered X indirectly and make it consistent
# with the other solvers.
S = xp.sqrt(eigenvals * (n_samples - 1))
Vt = eigenvecs.T
U = None
# flip eigenvectors' sign to enforce deterministic output
U, Vt = svd_flip(U, Vt, u_based_decision=False)
components_ = Vt
# Get variance explained by singular values
total_var = xp.sum(explained_variance_)
explained_variance_ratio_ = explained_variance_ / total_var
singular_values_ = xp.asarray(S, copy=True) # Store the singular values.
# Postprocess the number of components required
if n_components == "mle":
n_components = _infer_dimension(explained_variance_, n_samples)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
# side='right' ensures that number of features selected
# their variance is always greater than n_components float
# passed. More discussion in issue: #15669
ratio_cumsum = xp.cumulative_sum(explained_variance_ratio_)
n_components = (
xp.searchsorted(
ratio_cumsum,
xp.asarray(n_components, device=device(ratio_cumsum)),
side="right",
)
+ 1
)
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = xp.mean(explained_variance_[n_components:])
else:
self.noise_variance_ = 0.0
self.n_samples_ = n_samples
self.n_components_ = n_components
# Assign a copy of the result of the truncation of the components in
# order to:
# - release the memory used by the discarded components,
# - ensure that the kept components are allocated contiguously in
# memory to make the transform method faster by leveraging cache
# locality.
self.components_ = xp.asarray(components_[:n_components, :], copy=True)
# We do the same for the other arrays for the sake of consistency.
self.explained_variance_ = xp.asarray(
explained_variance_[:n_components], copy=True
)
self.explained_variance_ratio_ = xp.asarray(
explained_variance_ratio_[:n_components], copy=True
)
self.singular_values_ = xp.asarray(singular_values_[:n_components], copy=True)
return U, S, Vt, X, x_is_centered, xp
def _fit_truncated(self, X, n_components, xp):
"""Fit the model by computing truncated SVD (by ARPACK or randomized)
on X.
"""
n_samples, n_features = X.shape
svd_solver = self._fit_svd_solver
if isinstance(n_components, str):
raise ValueError(
"n_components=%r cannot be a string with svd_solver='%s'"
% (n_components, svd_solver)
)
elif not 1 <= n_components <= min(n_samples, n_features):
raise ValueError(
"n_components=%r must be between 1 and "
"min(n_samples, n_features)=%r with "
"svd_solver='%s'"
% (n_components, min(n_samples, n_features), svd_solver)
)
elif svd_solver == "arpack" and n_components == min(n_samples, n_features):
raise ValueError(
"n_components=%r must be strictly less than "
"min(n_samples, n_features)=%r with "
"svd_solver='%s'"
% (n_components, min(n_samples, n_features), svd_solver)
)
random_state = check_random_state(self.random_state)
# Center data
total_var = None
if issparse(X):
self.mean_, var = mean_variance_axis(X, axis=0)
total_var = var.sum() * n_samples / (n_samples - 1) # ddof=1
X_centered = _implicit_column_offset(X, self.mean_)
x_is_centered = False
else:
self.mean_ = xp.mean(X, axis=0)
X_centered = xp.asarray(X, copy=True) if self.copy else X
X_centered -= self.mean_
x_is_centered = not self.copy
if svd_solver == "arpack":
v0 = _init_arpack_v0(min(X.shape), random_state)
U, S, Vt = svds(X_centered, k=n_components, tol=self.tol, v0=v0)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
S = S[::-1]
# flip eigenvectors' sign to enforce deterministic output
U, Vt = svd_flip(U[:, ::-1], Vt[::-1], u_based_decision=False)
elif svd_solver == "randomized":
# sign flipping is done inside
U, S, Vt = _randomized_svd(
X_centered,
n_components=n_components,
n_oversamples=self.n_oversamples,
n_iter=self.iterated_power,
power_iteration_normalizer=self.power_iteration_normalizer,
flip_sign=False,
random_state=random_state,
)
U, Vt = svd_flip(U, Vt, u_based_decision=False)
self.n_samples_ = n_samples
self.components_ = Vt
self.n_components_ = n_components
# Get variance explained by singular values
self.explained_variance_ = (S**2) / (n_samples - 1)
# Workaround in-place variance calculation since at the time numpy
# did not have a way to calculate variance in-place.
#
# TODO: update this code to either:
# * Use the array-api variance calculation, unless memory usage suffers
# * Update sklearn.utils.extmath._incremental_mean_and_var to support array-api
# See: https://github.com/scikit-learn/scikit-learn/pull/18689#discussion_r1335540991
if total_var is None:
N = X.shape[0] - 1
X_centered **= 2
total_var = xp.sum(X_centered) / N
self.explained_variance_ratio_ = self.explained_variance_ / total_var
self.singular_values_ = xp.asarray(S, copy=True) # Store the singular values.
if self.n_components_ < min(n_features, n_samples):
self.noise_variance_ = total_var - xp.sum(self.explained_variance_)
self.noise_variance_ /= min(n_features, n_samples) - n_components
else:
self.noise_variance_ = 0.0
return U, S, Vt, X, x_is_centered, xp
def score_samples(self, X):
"""Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data.
Returns
-------
ll : ndarray of shape (n_samples,)
Log-likelihood of each sample under the current model.
"""
check_is_fitted(self)
xp, _ = get_namespace(X)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/_factor_analysis.py | sklearn/decomposition/_factor_analysis.py | """Factor Analysis.
A latent linear variable model.
FactorAnalysis is similar to probabilistic PCA implemented by PCA.score
While PCA assumes Gaussian noise with the same variance for each
feature, the FactorAnalysis model assumes different variances for
each of them.
This implementation is based on David Barber's Book,
Bayesian Reasoning and Machine Learning,
http://www.cs.ucl.ac.uk/staff/d.barber/brml,
Algorithm 21.1
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from math import log, sqrt
from numbers import Integral, Real
import numpy as np
from scipy import linalg
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
)
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import check_random_state
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils.extmath import _randomized_svd, fast_logdet, squared_norm
from sklearn.utils.validation import check_is_fitted, validate_data
class FactorAnalysis(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""Factor Analysis (FA).
A simple linear generative model with Gaussian latent variables.
The observations are assumed to be caused by a linear transformation of
lower dimensional latent factors and added Gaussian noise.
Without loss of generality the factors are distributed according to a
Gaussian with zero mean and unit covariance. The noise is also zero mean
and has an arbitrary diagonal covariance matrix.
If we would restrict the model further, by assuming that the Gaussian
noise is even isotropic (all diagonal entries are the same) we would obtain
:class:`PCA`.
FactorAnalysis performs a maximum likelihood estimate of the so-called
`loading` matrix, the transformation of the latent variables to the
observed ones, using SVD based approach.
Read more in the :ref:`User Guide <FA>`.
.. versionadded:: 0.13
Parameters
----------
n_components : int, default=None
Dimensionality of latent space, the number of components
of ``X`` that are obtained after ``transform``.
If None, n_components is set to the number of features.
tol : float, default=1e-2
Stopping tolerance for log-likelihood increase.
copy : bool, default=True
Whether to make a copy of X. If ``False``, the input X gets overwritten
during fitting.
max_iter : int, default=1000
Maximum number of iterations.
noise_variance_init : array-like of shape (n_features,), default=None
The initial guess of the noise variance for each feature.
If None, it defaults to np.ones(n_features).
svd_method : {'lapack', 'randomized'}, default='randomized'
Which SVD method to use. If 'lapack' use standard SVD from
scipy.linalg, if 'randomized' use fast ``randomized_svd`` function.
Defaults to 'randomized'. For most applications 'randomized' will
be sufficiently precise while providing significant speed gains.
Accuracy can also be improved by setting higher values for
`iterated_power`. If this is not sufficient, for maximum precision
you should choose 'lapack'.
iterated_power : int, default=3
Number of iterations for the power method. 3 by default. Only used
if ``svd_method`` equals 'randomized'.
rotation : {'varimax', 'quartimax'}, default=None
If not None, apply the indicated rotation. Currently, varimax and
quartimax are implemented. See
`"The varimax criterion for analytic rotation in factor analysis"
<https://link.springer.com/article/10.1007%2FBF02289233>`_
H. F. Kaiser, 1958.
.. versionadded:: 0.24
random_state : int or RandomState instance, default=0
Only used when ``svd_method`` equals 'randomized'. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
Components with maximum variance.
loglike_ : list of shape (n_iterations,)
The log likelihood at each iteration.
noise_variance_ : ndarray of shape (n_features,)
The estimated noise variance for each feature.
n_iter_ : int
Number of iterations run.
mean_ : ndarray of shape (n_features,)
Per-feature empirical mean, estimated from the training set.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
PCA: Principal component analysis is also a latent linear variable model
which however assumes equal noise variance for each feature.
This extra assumption makes probabilistic PCA faster as it can be
computed in closed form.
FastICA: Independent component analysis, a latent variable model with
non-Gaussian latent variables.
References
----------
- David Barber, Bayesian Reasoning and Machine Learning,
Algorithm 21.1.
- Christopher M. Bishop: Pattern Recognition and Machine Learning,
Chapter 12.2.4.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.decomposition import FactorAnalysis
>>> X, _ = load_digits(return_X_y=True)
>>> transformer = FactorAnalysis(n_components=7, random_state=0)
>>> X_transformed = transformer.fit_transform(X)
>>> X_transformed.shape
(1797, 7)
"""
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 0, None, closed="left"), None],
"tol": [Interval(Real, 0.0, None, closed="left")],
"copy": ["boolean"],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"noise_variance_init": ["array-like", None],
"svd_method": [StrOptions({"randomized", "lapack"})],
"iterated_power": [Interval(Integral, 0, None, closed="left")],
"rotation": [StrOptions({"varimax", "quartimax"}), None],
"random_state": ["random_state"],
}
def __init__(
self,
n_components=None,
*,
tol=1e-2,
copy=True,
max_iter=1000,
noise_variance_init=None,
svd_method="randomized",
iterated_power=3,
rotation=None,
random_state=0,
):
self.n_components = n_components
self.copy = copy
self.tol = tol
self.max_iter = max_iter
self.svd_method = svd_method
self.noise_variance_init = noise_variance_init
self.iterated_power = iterated_power
self.random_state = random_state
self.rotation = rotation
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the FactorAnalysis model to X using SVD based approach.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : Ignored
Ignored parameter.
Returns
-------
self : object
FactorAnalysis class instance.
"""
X = validate_data(
self, X, copy=self.copy, dtype=np.float64, force_writeable=True
)
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
# some constant terms
nsqrt = sqrt(n_samples)
llconst = n_features * log(2.0 * np.pi) + n_components
var = np.var(X, axis=0)
if self.noise_variance_init is None:
psi = np.ones(n_features, dtype=X.dtype)
else:
if len(self.noise_variance_init) != n_features:
raise ValueError(
"noise_variance_init dimension does not "
"with number of features : %d != %d"
% (len(self.noise_variance_init), n_features)
)
psi = np.array(self.noise_variance_init)
loglike = []
old_ll = -np.inf
SMALL = 1e-12
# we'll modify svd outputs to return unexplained variance
# to allow for unified computation of loglikelihood
if self.svd_method == "lapack":
def my_svd(X):
_, s, Vt = linalg.svd(X, full_matrices=False, check_finite=False)
return (
s[:n_components],
Vt[:n_components],
squared_norm(s[n_components:]),
)
else: # svd_method == "randomized"
random_state = check_random_state(self.random_state)
def my_svd(X):
_, s, Vt = _randomized_svd(
X,
n_components,
random_state=random_state,
n_iter=self.iterated_power,
)
return s, Vt, squared_norm(X) - squared_norm(s)
for i in range(self.max_iter):
# SMALL helps numerics
sqrt_psi = np.sqrt(psi) + SMALL
s, Vt, unexp_var = my_svd(X / (sqrt_psi * nsqrt))
s **= 2
# Use 'maximum' here to avoid sqrt problems.
W = np.sqrt(np.maximum(s - 1.0, 0.0))[:, np.newaxis] * Vt
del Vt
W *= sqrt_psi
# loglikelihood
ll = llconst + np.sum(np.log(s))
ll += unexp_var + np.sum(np.log(psi))
ll *= -n_samples / 2.0
loglike.append(ll)
if (ll - old_ll) < self.tol:
break
old_ll = ll
psi = np.maximum(var - np.sum(W**2, axis=0), SMALL)
else:
warnings.warn(
"FactorAnalysis did not converge."
" You might want"
" to increase the number of iterations.",
ConvergenceWarning,
)
self.components_ = W
if self.rotation is not None:
self.components_ = self._rotate(W)
self.noise_variance_ = psi
self.loglike_ = loglike
self.n_iter_ = i + 1
return self
def transform(self, X):
"""Apply dimensionality reduction to X using the model.
Compute the expected mean of the latent variables.
See Barber, 21.2.33 (or Bishop, 12.66).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
The latent variables of X.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
Ih = np.eye(len(self.components_))
X_transformed = X - self.mean_
Wpsi = self.components_ / self.noise_variance_
cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T))
tmp = np.dot(X_transformed, Wpsi.T)
X_transformed = np.dot(tmp, cov_z)
return X_transformed
def get_covariance(self):
"""Compute data covariance with the FactorAnalysis model.
``cov = components_.T * components_ + diag(noise_variance)``
Returns
-------
cov : ndarray of shape (n_features, n_features)
Estimated covariance of data.
"""
check_is_fitted(self)
cov = np.dot(self.components_.T, self.components_)
cov.flat[:: len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the FactorAnalysis model.
Returns
-------
precision : ndarray of shape (n_features, n_features)
Estimated precision of data.
"""
check_is_fitted(self)
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components == 0:
return np.diag(1.0 / self.noise_variance_)
if self.n_components == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
precision = np.dot(components_ / self.noise_variance_, components_.T)
precision.flat[:: len(precision) + 1] += 1.0
precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))
precision /= self.noise_variance_[:, np.newaxis]
precision /= -self.noise_variance_[np.newaxis, :]
precision.flat[:: len(precision) + 1] += 1.0 / self.noise_variance_
return precision
def score_samples(self, X):
"""Compute the log-likelihood of each sample.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data.
Returns
-------
ll : ndarray of shape (n_samples,)
Log-likelihood of each sample under the current model.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
Xr = X - self.mean_
precision = self.get_precision()
n_features = X.shape[1]
log_like = -0.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Compute the average log-likelihood of the samples.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data.
y : Ignored
Ignored parameter.
Returns
-------
ll : float
Average log-likelihood of the samples under the current model.
"""
return np.mean(self.score_samples(X))
def _rotate(self, components, n_components=None, tol=1e-6):
"Rotate the factor analysis solution."
# note that tol is not exposed
return _ortho_rotation(components.T, method=self.rotation, tol=tol)[
: self.n_components
]
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.components_.shape[0]
def _ortho_rotation(components, method="varimax", tol=1e-6, max_iter=100):
"""Return rotated components."""
nrow, ncol = components.shape
rotation_matrix = np.eye(ncol)
var = 0
for _ in range(max_iter):
comp_rot = np.dot(components, rotation_matrix)
if method == "varimax":
tmp = comp_rot * np.transpose((comp_rot**2).sum(axis=0) / nrow)
elif method == "quartimax":
tmp = 0
u, s, v = np.linalg.svd(np.dot(components.T, comp_rot**3 - tmp))
rotation_matrix = np.dot(u, v)
var_new = np.sum(s)
if var != 0 and var_new < var * (1 + tol):
break
var = var_new
return np.dot(components, rotation_matrix).T
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/_truncated_svd.py | sklearn/decomposition/_truncated_svd.py | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA)."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Integral, Real
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import svds
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
)
from sklearn.utils import check_array, check_random_state
from sklearn.utils._arpack import _init_arpack_v0
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils.extmath import _randomized_svd, safe_sparse_dot, svd_flip
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.utils.validation import check_is_fitted, validate_data
__all__ = ["TruncatedSVD"]
class TruncatedSVD(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). Contrary to PCA, this
estimator does not center the data before computing the singular value
decomposition. This means it can work with sparse matrices
efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in :mod:`sklearn.feature_extraction.text`. In
that context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithms: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on `X * X.T` or
`X.T * X`, whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default=2
Desired dimensionality of output data.
If algorithm='arpack', must be strictly less than the number of features.
If algorithm='randomized', must be less than or equal to the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : {'arpack', 'randomized'}, default='randomized'
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, default=5
Number of iterations for randomized SVD solver. Not used by ARPACK. The
default is larger than the default in
:func:`~sklearn.utils.extmath.randomized_svd` to handle sparse
matrices that may have large slowly decaying spectrum.
n_oversamples : int, default=10
Number of oversamples for randomized SVD solver. Not used by ARPACK.
See :func:`~sklearn.utils.extmath.randomized_svd` for a complete
description.
.. versionadded:: 1.1
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Power iteration normalizer for randomized SVD solver.
Not used by ARPACK. See :func:`~sklearn.utils.extmath.randomized_svd`
for more details.
.. versionadded:: 1.1
random_state : int, RandomState instance or None, default=None
Used during randomized svd. Pass an int for reproducible results across
multiple function calls.
See :term:`Glossary <random_state>`.
tol : float, default=0.0
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
The right singular vectors of the input data.
explained_variance_ : ndarray of shape (n_components,)
The variance of the training samples transformed by a projection to
each component.
explained_variance_ratio_ : ndarray of shape (n_components,)
Percentage of variance explained by each of the selected components.
singular_values_ : ndarray of shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
DictionaryLearning : Find a dictionary that sparsely encodes data.
FactorAnalysis : A simple linear generative model with
Gaussian latent variables.
IncrementalPCA : Incremental principal components analysis.
KernelPCA : Kernel Principal component analysis.
NMF : Non-Negative Matrix Factorization.
PCA : Principal component analysis.
Notes
-----
SVD suffers from a problem called "sign indeterminacy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
References
----------
:arxiv:`Halko, et al. (2009). "Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions"
<0909.4061>`
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from scipy.sparse import csr_matrix
>>> import numpy as np
>>> np.random.seed(0)
>>> X_dense = np.random.rand(100, 100)
>>> X_dense[:, 2 * np.arange(50)] = 0
>>> X = csr_matrix(X_dense)
>>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42)
>>> svd.fit(X)
TruncatedSVD(n_components=5, n_iter=7, random_state=42)
>>> print(svd.explained_variance_ratio_)
[0.0157 0.0512 0.0499 0.0479 0.0453]
>>> print(svd.explained_variance_ratio_.sum())
0.2102
>>> print(svd.singular_values_)
[35.2410 4.5981 4.5420 4.4486 4.3288]
"""
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 1, None, closed="left")],
"algorithm": [StrOptions({"arpack", "randomized"})],
"n_iter": [Interval(Integral, 0, None, closed="left")],
"n_oversamples": [Interval(Integral, 1, None, closed="left")],
"power_iteration_normalizer": [StrOptions({"auto", "OR", "LU", "none"})],
"random_state": ["random_state"],
"tol": [Interval(Real, 0, None, closed="left")],
}
def __init__(
self,
n_components=2,
*,
algorithm="randomized",
n_iter=5,
n_oversamples=10,
power_iteration_normalizer="auto",
random_state=None,
tol=0.0,
):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.n_oversamples = n_oversamples
self.power_iteration_normalizer = power_iteration_normalizer
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit model on training data X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit_transform(self, X, y=None):
"""Fit model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = validate_data(self, X, accept_sparse=["csr", "csc"], ensure_min_features=2)
random_state = check_random_state(self.random_state)
if self.algorithm == "arpack":
v0 = _init_arpack_v0(min(X.shape), random_state)
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol, v0=v0)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
# u_based_decision=False is needed to be consistent with PCA.
U, VT = svd_flip(U[:, ::-1], VT[::-1], u_based_decision=False)
elif self.algorithm == "randomized":
if self.n_components > X.shape[1]:
raise ValueError(
f"n_components({self.n_components}) must be <="
f" n_features({X.shape[1]})."
)
U, Sigma, VT = _randomized_svd(
X,
self.n_components,
n_iter=self.n_iter,
n_oversamples=self.n_oversamples,
power_iteration_normalizer=self.power_iteration_normalizer,
random_state=random_state,
flip_sign=False,
)
U, VT = svd_flip(U, VT, u_based_decision=False)
self.components_ = VT
# As a result of the SVD approximation error on X ~ U @ Sigma @ V.T,
# X @ V is not the same as U @ Sigma
if self.algorithm == "randomized" or (
self.algorithm == "arpack" and self.tol > 0
):
X_transformed = safe_sparse_dot(X, self.components_.T)
else:
X_transformed = U * Sigma
# Calculate explained variance & explained variance ratio
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
self.singular_values_ = Sigma # Store the singular values.
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
New data.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
check_is_fitted(self)
X = validate_data(self, X, accept_sparse=["csr", "csc"], reset=False)
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like of shape (n_samples, n_components)
New data.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
return tags
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.components_.shape[0]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/_nmf.py | sklearn/decomposition/_nmf.py | """Non-negative matrix factorization."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
import time
import warnings
from abc import ABC
from math import sqrt
from numbers import Integral, Real
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn._config import config_context
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
)
from sklearn.decomposition._cdnmf_fast import _update_cdnmf_fast
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import check_array, check_random_state, gen_batches
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
from sklearn.utils.extmath import _randomized_svd, safe_sparse_dot, squared_norm
from sklearn.utils.validation import check_is_fitted, check_non_negative, validate_data
EPSILON = np.finfo(np.float32).eps
def norm(x):
"""Dot product-based Euclidean norm implementation.
See: http://fa.bianp.net/blog/2011/computing-the-vector-norm/
Parameters
----------
x : array-like
Vector for which to compute the norm.
"""
return sqrt(squared_norm(x))
def trace_dot(X, Y):
"""Trace of np.dot(X, Y.T).
Parameters
----------
X : array-like
First matrix.
Y : array-like
Second matrix.
"""
return np.dot(X.ravel(), Y.ravel())
def _check_init(A, shape, whom):
A = check_array(A)
if shape[0] != "auto" and A.shape[0] != shape[0]:
raise ValueError(
f"Array with wrong first dimension passed to {whom}. Expected {shape[0]}, "
f"but got {A.shape[0]}."
)
if shape[1] != "auto" and A.shape[1] != shape[1]:
raise ValueError(
f"Array with wrong second dimension passed to {whom}. Expected {shape[1]}, "
f"but got {A.shape[1]}."
)
check_non_negative(A, whom)
if np.max(A) == 0:
raise ValueError(f"Array passed to {whom} is full of zeros.")
def _beta_divergence(X, W, H, beta, square_root=False):
"""Compute the beta-divergence of X and dot(W, H).
Parameters
----------
X : float or array-like of shape (n_samples, n_features)
W : float or array-like of shape (n_samples, n_components)
H : float or array-like of shape (n_components, n_features)
beta : float or {'frobenius', 'kullback-leibler', 'itakura-saito'}
Parameter of the beta-divergence.
If beta == 2, this is half the Frobenius *squared* norm.
If beta == 1, this is the generalized Kullback-Leibler divergence.
If beta == 0, this is the Itakura-Saito divergence.
Else, this is the general beta-divergence.
square_root : bool, default=False
If True, return np.sqrt(2 * res)
For beta == 2, it corresponds to the Frobenius norm.
Returns
-------
res : float
Beta divergence of X and np.dot(X, H).
"""
beta = _beta_loss_to_float(beta)
# The method can be called with scalars
if not sp.issparse(X):
X = np.atleast_2d(X)
W = np.atleast_2d(W)
H = np.atleast_2d(H)
# Frobenius norm
if beta == 2:
# Avoid the creation of the dense np.dot(W, H) if X is sparse.
if sp.issparse(X):
norm_X = np.dot(X.data, X.data)
norm_WH = trace_dot(np.linalg.multi_dot([W.T, W, H]), H)
cross_prod = trace_dot((X @ H.T), W)
res = (norm_X + norm_WH - 2.0 * cross_prod) / 2.0
else:
res = squared_norm(X - np.dot(W, H)) / 2.0
if square_root:
return np.sqrt(res * 2)
else:
return res
if sp.issparse(X):
# compute np.dot(W, H) only where X is nonzero
WH_data = _special_sparse_dot(W, H, X).data
X_data = X.data
else:
WH = np.dot(W, H)
WH_data = WH.ravel()
X_data = X.ravel()
# do not affect the zeros: here 0 ** (-1) = 0 and not infinity
indices = X_data > EPSILON
WH_data = WH_data[indices]
X_data = X_data[indices]
# used to avoid division by zero
WH_data[WH_data < EPSILON] = EPSILON
# generalized Kullback-Leibler divergence
if beta == 1:
# fast and memory efficient computation of np.sum(np.dot(W, H))
sum_WH = np.dot(np.sum(W, axis=0), np.sum(H, axis=1))
# computes np.sum(X * log(X / WH)) only where X is nonzero
div = X_data / WH_data
res = np.dot(X_data, np.log(div))
# add full np.sum(np.dot(W, H)) - np.sum(X)
res += sum_WH - X_data.sum()
# Itakura-Saito divergence
elif beta == 0:
div = X_data / WH_data
res = np.sum(div) - np.prod(X.shape) - np.sum(np.log(div))
# beta-divergence, beta not in (0, 1, 2)
else:
if sp.issparse(X):
# slow loop, but memory efficient computation of :
# np.sum(np.dot(W, H) ** beta)
sum_WH_beta = 0
for i in range(X.shape[1]):
sum_WH_beta += np.sum(np.dot(W, H[:, i]) ** beta)
else:
sum_WH_beta = np.sum(WH**beta)
sum_X_WH = np.dot(X_data, WH_data ** (beta - 1))
res = (X_data**beta).sum() - beta * sum_X_WH
res += sum_WH_beta * (beta - 1)
res /= beta * (beta - 1)
if square_root:
res = max(res, 0) # avoid negative number due to rounding errors
return np.sqrt(2 * res)
else:
return res
def _special_sparse_dot(W, H, X):
"""Computes np.dot(W, H), only where X is non zero."""
if sp.issparse(X):
ii, jj = X.nonzero()
n_vals = ii.shape[0]
dot_vals = np.empty(n_vals)
n_components = W.shape[1]
batch_size = max(n_components, n_vals // n_components)
for start in range(0, n_vals, batch_size):
batch = slice(start, start + batch_size)
dot_vals[batch] = np.multiply(W[ii[batch], :], H.T[jj[batch], :]).sum(
axis=1
)
WH = sp.coo_matrix((dot_vals, (ii, jj)), shape=X.shape)
return WH.tocsr()
else:
return np.dot(W, H)
def _beta_loss_to_float(beta_loss):
"""Convert string beta_loss to float."""
beta_loss_map = {"frobenius": 2, "kullback-leibler": 1, "itakura-saito": 0}
if isinstance(beta_loss, str):
beta_loss = beta_loss_map[beta_loss]
return beta_loss
def _initialize_nmf(X, n_components, init=None, eps=1e-6, random_state=None):
"""Algorithms for NMF initialization.
Computes an initial guess for the non-negative
rank k matrix approximation for X: X = WH.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix to be decomposed.
n_components : int
The number of components desired in the approximation.
init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar'}, default=None
Method used to initialize the procedure.
Valid options:
- None: 'nndsvda' if n_components <= min(n_samples, n_features),
otherwise 'random'.
- 'random': non-negative random matrices, scaled with:
sqrt(X.mean() / n_components)
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': use custom matrices W and H
.. versionchanged:: 1.1
When `init=None` and n_components is less than n_samples and n_features
defaults to `nndsvda` instead of `nndsvd`.
eps : float, default=1e-6
Truncate all values less then this in output to zero.
random_state : int, RandomState instance or None, default=None
Used when ``init`` == 'nndsvdar' or 'random'. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
W : array-like of shape (n_samples, n_components)
Initial guesses for solving X ~= WH.
H : array-like of shape (n_components, n_features)
Initial guesses for solving X ~= WH.
References
----------
C. Boutsidis, E. Gallopoulos: SVD based initialization: A head start for
nonnegative matrix factorization - Pattern Recognition, 2008
http://tinyurl.com/nndsvd
"""
check_non_negative(X, "NMF initialization")
n_samples, n_features = X.shape
if (
init is not None
and init != "random"
and n_components > min(n_samples, n_features)
):
raise ValueError(
"init = '{}' can only be used when "
"n_components <= min(n_samples, n_features)".format(init)
)
if init is None:
if n_components <= min(n_samples, n_features):
init = "nndsvda"
else:
init = "random"
# Random initialization
if init == "random":
avg = np.sqrt(X.mean() / n_components)
rng = check_random_state(random_state)
H = avg * rng.standard_normal(size=(n_components, n_features)).astype(
X.dtype, copy=False
)
W = avg * rng.standard_normal(size=(n_samples, n_components)).astype(
X.dtype, copy=False
)
np.abs(H, out=H)
np.abs(W, out=W)
return W, H
# NNDSVD initialization
U, S, V = _randomized_svd(X, n_components, random_state=random_state)
W = np.zeros_like(U)
H = np.zeros_like(V)
# The leading singular triplet is non-negative
# so it can be used as is for initialization.
W[:, 0] = np.sqrt(S[0]) * np.abs(U[:, 0])
H[0, :] = np.sqrt(S[0]) * np.abs(V[0, :])
for j in range(1, n_components):
x, y = U[:, j], V[j, :]
# extract positive and negative parts of column vectors
x_p, y_p = np.maximum(x, 0), np.maximum(y, 0)
x_n, y_n = np.abs(np.minimum(x, 0)), np.abs(np.minimum(y, 0))
# and their norms
x_p_nrm, y_p_nrm = norm(x_p), norm(y_p)
x_n_nrm, y_n_nrm = norm(x_n), norm(y_n)
m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm
# choose update
if m_p > m_n:
u = x_p / x_p_nrm
v = y_p / y_p_nrm
sigma = m_p
else:
u = x_n / x_n_nrm
v = y_n / y_n_nrm
sigma = m_n
lbd = np.sqrt(S[j] * sigma)
W[:, j] = lbd * u
H[j, :] = lbd * v
W[W < eps] = 0
H[H < eps] = 0
if init == "nndsvd":
pass
elif init == "nndsvda":
avg = X.mean()
W[W == 0] = avg
H[H == 0] = avg
elif init == "nndsvdar":
rng = check_random_state(random_state)
avg = X.mean()
W[W == 0] = abs(avg * rng.standard_normal(size=len(W[W == 0])) / 100)
H[H == 0] = abs(avg * rng.standard_normal(size=len(H[H == 0])) / 100)
else:
raise ValueError(
"Invalid init parameter: got %r instead of one of %r"
% (init, (None, "random", "nndsvd", "nndsvda", "nndsvdar"))
)
return W, H
def _update_coordinate_descent(X, W, Ht, l1_reg, l2_reg, shuffle, random_state):
"""Helper function for _fit_coordinate_descent.
Update W to minimize the objective function, iterating once over all
coordinates. By symmetry, to update H, one can call
_update_coordinate_descent(X.T, Ht, W, ...).
"""
n_components = Ht.shape[1]
HHt = np.dot(Ht.T, Ht)
XHt = safe_sparse_dot(X, Ht)
# L2 regularization corresponds to increase of the diagonal of HHt
if l2_reg != 0.0:
# adds l2_reg only on the diagonal
HHt.flat[:: n_components + 1] += l2_reg
# L1 regularization corresponds to decrease of each element of XHt
if l1_reg != 0.0:
XHt -= l1_reg
if shuffle:
permutation = random_state.permutation(n_components)
else:
permutation = np.arange(n_components)
# The following seems to be required on 64-bit Windows w/ Python 3.5.
permutation = np.asarray(permutation, dtype=np.intp)
return _update_cdnmf_fast(W, HHt, XHt, permutation)
def _fit_coordinate_descent(
X,
W,
H,
tol=1e-4,
max_iter=200,
l1_reg_W=0,
l1_reg_H=0,
l2_reg_W=0,
l2_reg_H=0,
update_H=True,
verbose=0,
shuffle=False,
random_state=None,
):
"""Compute Non-negative Matrix Factorization (NMF) with Coordinate Descent
The objective function is minimized with an alternating minimization of W
and H. Each minimization is done with a cyclic (up to a permutation of the
features) Coordinate Descent.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Constant matrix.
W : array-like of shape (n_samples, n_components)
Initial guess for the solution.
H : array-like of shape (n_components, n_features)
Initial guess for the solution.
tol : float, default=1e-4
Tolerance of the stopping condition.
max_iter : int, default=200
Maximum number of iterations before timing out.
l1_reg_W : float, default=0.
L1 regularization parameter for W.
l1_reg_H : float, default=0.
L1 regularization parameter for H.
l2_reg_W : float, default=0.
L2 regularization parameter for W.
l2_reg_H : float, default=0.
L2 regularization parameter for H.
update_H : bool, default=True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : int, default=0
The verbosity level.
shuffle : bool, default=False
If true, randomize the order of coordinates in the CD solver.
random_state : int, RandomState instance or None, default=None
Used to randomize the coordinates in the CD solver, when
``shuffle`` is set to ``True``. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : ndarray of shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
.. [1] :doi:`"Fast local algorithms for large scale nonnegative matrix and tensor
factorizations" <10.1587/transfun.E92.A.708>`
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. IEICE transactions on fundamentals
of electronics, communications and computer sciences 92.3: 708-721, 2009.
"""
# so W and Ht are both in C order in memory
Ht = check_array(H.T, order="C")
X = check_array(X, accept_sparse="csr")
rng = check_random_state(random_state)
for n_iter in range(1, max_iter + 1):
violation = 0.0
# Update W
violation += _update_coordinate_descent(
X, W, Ht, l1_reg_W, l2_reg_W, shuffle, rng
)
# Update H
if update_H:
violation += _update_coordinate_descent(
X.T, Ht, W, l1_reg_H, l2_reg_H, shuffle, rng
)
if n_iter == 1:
violation_init = violation
if violation_init == 0:
break
if verbose:
print("violation:", violation / violation_init)
if violation / violation_init <= tol:
if verbose:
print("Converged at iteration", n_iter + 1)
break
return W, Ht.T, n_iter
def _multiplicative_update_w(
X,
W,
H,
beta_loss,
l1_reg_W,
l2_reg_W,
gamma,
H_sum=None,
HHt=None,
XHt=None,
update_H=True,
):
"""Update W in Multiplicative Update NMF."""
if beta_loss == 2:
# Numerator
if XHt is None:
XHt = safe_sparse_dot(X, H.T)
if update_H:
# avoid a copy of XHt, which will be re-computed (update_H=True)
numerator = XHt
else:
# preserve the XHt, which is not re-computed (update_H=False)
numerator = XHt.copy()
# Denominator
if HHt is None:
HHt = np.dot(H, H.T)
denominator = np.dot(W, HHt)
else:
# Numerator
# if X is sparse, compute WH only where X is non zero
WH_safe_X = _special_sparse_dot(W, H, X)
if sp.issparse(X):
WH_safe_X_data = WH_safe_X.data
X_data = X.data
else:
WH_safe_X_data = WH_safe_X
X_data = X
# copy used in the Denominator
WH = WH_safe_X.copy()
if beta_loss - 1.0 < 0:
WH[WH < EPSILON] = EPSILON
# to avoid taking a negative power of zero
if beta_loss - 2.0 < 0:
WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON
if beta_loss == 1:
np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
elif beta_loss == 0:
# speeds up computation time
# refer to /numpy/numpy/issues/9363
WH_safe_X_data **= -1
WH_safe_X_data **= 2
# element-wise multiplication
WH_safe_X_data *= X_data
else:
WH_safe_X_data **= beta_loss - 2
# element-wise multiplication
WH_safe_X_data *= X_data
# here numerator = dot(X * (dot(W, H) ** (beta_loss - 2)), H.T)
numerator = safe_sparse_dot(WH_safe_X, H.T)
# Denominator
if beta_loss == 1:
if H_sum is None:
H_sum = np.sum(H, axis=1) # shape(n_components, )
denominator = H_sum[np.newaxis, :]
else:
# computation of WHHt = dot(dot(W, H) ** beta_loss - 1, H.T)
if sp.issparse(X):
# memory efficient computation
# (compute row by row, avoiding the dense matrix WH)
WHHt = np.empty(W.shape)
for i in range(X.shape[0]):
WHi = np.dot(W[i, :], H)
if beta_loss - 1 < 0:
WHi[WHi < EPSILON] = EPSILON
WHi **= beta_loss - 1
WHHt[i, :] = np.dot(WHi, H.T)
else:
WH **= beta_loss - 1
WHHt = np.dot(WH, H.T)
denominator = WHHt
# Add L1 and L2 regularization
if l1_reg_W > 0:
denominator += l1_reg_W
if l2_reg_W > 0:
denominator = denominator + l2_reg_W * W
denominator[denominator == 0] = EPSILON
numerator /= denominator
delta_W = numerator
# gamma is in ]0, 1]
if gamma != 1:
delta_W **= gamma
W *= delta_W
return W, H_sum, HHt, XHt
def _multiplicative_update_h(
X, W, H, beta_loss, l1_reg_H, l2_reg_H, gamma, A=None, B=None, rho=None
):
"""update H in Multiplicative Update NMF."""
if beta_loss == 2:
numerator = safe_sparse_dot(W.T, X)
denominator = np.linalg.multi_dot([W.T, W, H])
else:
# Numerator
WH_safe_X = _special_sparse_dot(W, H, X)
if sp.issparse(X):
WH_safe_X_data = WH_safe_X.data
X_data = X.data
else:
WH_safe_X_data = WH_safe_X
X_data = X
# copy used in the Denominator
WH = WH_safe_X.copy()
if beta_loss - 1.0 < 0:
WH[WH < EPSILON] = EPSILON
# to avoid division by zero
if beta_loss - 2.0 < 0:
WH_safe_X_data[WH_safe_X_data < EPSILON] = EPSILON
if beta_loss == 1:
np.divide(X_data, WH_safe_X_data, out=WH_safe_X_data)
elif beta_loss == 0:
# speeds up computation time
# refer to /numpy/numpy/issues/9363
WH_safe_X_data **= -1
WH_safe_X_data **= 2
# element-wise multiplication
WH_safe_X_data *= X_data
else:
WH_safe_X_data **= beta_loss - 2
# element-wise multiplication
WH_safe_X_data *= X_data
# here numerator = dot(W.T, (dot(W, H) ** (beta_loss - 2)) * X)
numerator = safe_sparse_dot(W.T, WH_safe_X)
# Denominator
if beta_loss == 1:
W_sum = np.sum(W, axis=0) # shape(n_components, )
W_sum[W_sum == 0] = 1.0
denominator = W_sum[:, np.newaxis]
# beta_loss not in (1, 2)
else:
# computation of WtWH = dot(W.T, dot(W, H) ** beta_loss - 1)
if sp.issparse(X):
# memory efficient computation
# (compute column by column, avoiding the dense matrix WH)
WtWH = np.empty(H.shape)
for i in range(X.shape[1]):
WHi = np.dot(W, H[:, i])
if beta_loss - 1 < 0:
WHi[WHi < EPSILON] = EPSILON
WHi **= beta_loss - 1
WtWH[:, i] = np.dot(W.T, WHi)
else:
WH **= beta_loss - 1
WtWH = np.dot(W.T, WH)
denominator = WtWH
# Add L1 and L2 regularization
if l1_reg_H > 0:
denominator += l1_reg_H
if l2_reg_H > 0:
denominator = denominator + l2_reg_H * H
denominator[denominator == 0] = EPSILON
if A is not None and B is not None:
# Updates for the online nmf
if gamma != 1:
H **= 1 / gamma
numerator *= H
A *= rho
B *= rho
A += numerator
B += denominator
H = A / B
if gamma != 1:
H **= gamma
else:
delta_H = numerator
delta_H /= denominator
if gamma != 1:
delta_H **= gamma
H *= delta_H
return H
def _fit_multiplicative_update(
X,
W,
H,
beta_loss="frobenius",
max_iter=200,
tol=1e-4,
l1_reg_W=0,
l1_reg_H=0,
l2_reg_W=0,
l2_reg_H=0,
update_H=True,
verbose=0,
):
"""Compute Non-negative Matrix Factorization with Multiplicative Update.
The objective function is _beta_divergence(X, WH) and is minimized with an
alternating minimization of W and H. Each minimization is done with a
Multiplicative Update.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Constant input matrix.
W : array-like of shape (n_samples, n_components)
Initial guess for the solution.
H : array-like of shape (n_components, n_features)
Initial guess for the solution.
beta_loss : float or {'frobenius', 'kullback-leibler', \
'itakura-saito'}, default='frobenius'
String must be in {'frobenius', 'kullback-leibler', 'itakura-saito'}.
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
matrix X cannot contain zeros.
max_iter : int, default=200
Number of iterations.
tol : float, default=1e-4
Tolerance of the stopping condition.
l1_reg_W : float, default=0.
L1 regularization parameter for W.
l1_reg_H : float, default=0.
L1 regularization parameter for H.
l2_reg_W : float, default=0.
L2 regularization parameter for W.
l2_reg_H : float, default=0.
L2 regularization parameter for H.
update_H : bool, default=True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
verbose : int, default=0
The verbosity level.
Returns
-------
W : ndarray of shape (n_samples, n_components)
Solution to the non-negative least squares problem.
H : ndarray of shape (n_components, n_features)
Solution to the non-negative least squares problem.
n_iter : int
The number of iterations done by the algorithm.
References
----------
Lee, D. D., & Seung, H., S. (2001). Algorithms for Non-negative Matrix
Factorization. Adv. Neural Inform. Process. Syst.. 13.
Fevotte, C., & Idier, J. (2011). Algorithms for nonnegative matrix
factorization with the beta-divergence. Neural Computation, 23(9).
"""
start_time = time.time()
beta_loss = _beta_loss_to_float(beta_loss)
# gamma for Maximization-Minimization (MM) algorithm [Fevotte 2011]
if beta_loss < 1:
gamma = 1.0 / (2.0 - beta_loss)
elif beta_loss > 2:
gamma = 1.0 / (beta_loss - 1.0)
else:
gamma = 1.0
# used for the convergence criterion
error_at_init = _beta_divergence(X, W, H, beta_loss, square_root=True)
previous_error = error_at_init
H_sum, HHt, XHt = None, None, None
for n_iter in range(1, max_iter + 1):
# update W
# H_sum, HHt and XHt are saved and reused if not update_H
W, H_sum, HHt, XHt = _multiplicative_update_w(
X,
W,
H,
beta_loss=beta_loss,
l1_reg_W=l1_reg_W,
l2_reg_W=l2_reg_W,
gamma=gamma,
H_sum=H_sum,
HHt=HHt,
XHt=XHt,
update_H=update_H,
)
# necessary for stability with beta_loss < 1
if beta_loss < 1:
W[W < np.finfo(np.float64).eps] = 0.0
# update H (only at fit or fit_transform)
if update_H:
H = _multiplicative_update_h(
X,
W,
H,
beta_loss=beta_loss,
l1_reg_H=l1_reg_H,
l2_reg_H=l2_reg_H,
gamma=gamma,
)
# These values will be recomputed since H changed
H_sum, HHt, XHt = None, None, None
# necessary for stability with beta_loss < 1
if beta_loss <= 1:
H[H < np.finfo(np.float64).eps] = 0.0
# test convergence criterion every 10 iterations
if tol > 0 and n_iter % 10 == 0:
error = _beta_divergence(X, W, H, beta_loss, square_root=True)
if verbose:
iter_time = time.time()
print(
"Epoch %02d reached after %.3f seconds, error: %f"
% (n_iter, iter_time - start_time, error)
)
if (previous_error - error) / error_at_init < tol:
break
previous_error = error
# do not print if we have already printed in the convergence test
if verbose and (tol == 0 or n_iter % 10 != 0):
end_time = time.time()
print(
"Epoch %02d reached after %.3f seconds." % (n_iter, end_time - start_time)
)
return W, H, n_iter
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"W": ["array-like", None],
"H": ["array-like", None],
"update_H": ["boolean"],
},
prefer_skip_nested_validation=False,
)
def non_negative_factorization(
X,
W=None,
H=None,
n_components="auto",
*,
init=None,
update_H=True,
solver="cd",
beta_loss="frobenius",
tol=1e-4,
max_iter=200,
alpha_W=0.0,
alpha_H="same",
l1_ratio=0.0,
random_state=None,
verbose=0,
shuffle=False,
):
"""Compute Non-negative Matrix Factorization (NMF).
Find two non-negative matrices (W, H) whose product approximates the non-
negative matrix X. This factorization can be used for example for
dimensionality reduction, source separation or topic extraction.
The objective function is:
.. math::
L(W, H) &= 0.5 * ||X - WH||_{loss}^2
&+ alpha\\_W * l1\\_ratio * n\\_features * ||vec(W)||_1
&+ alpha\\_H * l1\\_ratio * n\\_samples * ||vec(H)||_1
&+ 0.5 * alpha\\_W * (1 - l1\\_ratio) * n\\_features * ||W||_{Fro}^2
&+ 0.5 * alpha\\_H * (1 - l1\\_ratio) * n\\_samples * ||H||_{Fro}^2,
where :math:`||A||_{Fro}^2 = \\sum_{i,j} A_{ij}^2` (Frobenius norm) and
:math:`||vec(A)||_1 = \\sum_{i,j} abs(A_{ij})` (Elementwise L1 norm)
The generic norm :math:`||X - WH||_{loss}^2` may represent
the Frobenius norm or another supported beta-divergence loss.
The choice between options is controlled by the `beta_loss` parameter.
The regularization terms are scaled by `n_features` for `W` and by `n_samples` for
`H` to keep their impact balanced with respect to one another and to the data fit
term as independent as possible of the size `n_samples` of the training set.
The objective function is minimized with an alternating minimization of W
and H. If H is given and update_H=False, it solves for W only.
Note that the transformed data is named W and the components matrix is named H. In
the NMF literature, the naming convention is usually the opposite since the data
matrix X is transposed.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Constant matrix.
W : array-like of shape (n_samples, n_components), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is initialised as an array of zeros, unless
`solver='mu'`, then it is filled with values calculated by
`np.sqrt(X.mean() / self._n_components)`.
If `None`, uses the initialisation method specified in `init`.
H : array-like of shape (n_components, n_features), default=None
If `init='custom'`, it is used as initial guess for the solution.
If `update_H=False`, it is used as a constant, to solve for W only.
If `None`, uses the initialisation method specified in `init`.
n_components : int or {'auto'} or None, default='auto'
Number of components. If `None`, all features are kept.
If `n_components='auto'`, the number of components is automatically inferred
from `W` or `H` shapes.
.. versionchanged:: 1.4
Added `'auto'` value.
.. versionchanged:: 1.6
Default value changed from `None` to `'auto'`.
init : {'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'}, default=None
Method used to initialize the procedure.
Valid options:
- None: 'nndsvda' if n_components < n_features, otherwise 'random'.
- 'random': non-negative random matrices, scaled with:
`sqrt(X.mean() / n_components)`
- 'nndsvd': Nonnegative Double Singular Value Decomposition (NNDSVD)
initialization (better for sparseness)
- 'nndsvda': NNDSVD with zeros filled with the average of X
(better when sparsity is not desired)
- 'nndsvdar': NNDSVD with zeros filled with small random values
(generally faster, less accurate alternative to NNDSVDa
for when sparsity is not desired)
- 'custom': If `update_H=True`, use custom matrices W and H which must both
be provided. If `update_H=False`, then only custom matrix H is used.
.. versionchanged:: 0.23
The default value of `init` changed from 'random' to None in 0.23.
.. versionchanged:: 1.1
When `init=None` and n_components is less than n_samples and n_features
defaults to `nndsvda` instead of `nndsvd`.
update_H : bool, default=True
Set to True, both W and H will be estimated from initial guesses.
Set to False, only W will be estimated.
solver : {'cd', 'mu'}, default='cd'
Numerical solver to use:
- 'cd' is a Coordinate Descent solver that uses Fast Hierarchical
Alternating Least Squares (Fast HALS).
- 'mu' is a Multiplicative Update solver.
.. versionadded:: 0.17
Coordinate Descent solver.
.. versionadded:: 0.19
Multiplicative Update solver.
beta_loss : float or {'frobenius', 'kullback-leibler', \
'itakura-saito'}, default='frobenius'
Beta divergence to be minimized, measuring the distance between X
and the dot product WH. Note that values different from 'frobenius'
(or 2) and 'kullback-leibler' (or 1) lead to significantly slower
fits. Note that for beta_loss <= 0 (or 'itakura-saito'), the input
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/_dict_learning.py | sklearn/decomposition/_dict_learning.py | """Dictionary learning."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
import sys
import time
from numbers import Integral, Real
import numpy as np
from joblib import effective_n_jobs
from scipy import linalg
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
)
from sklearn.linear_model import Lars, Lasso, LassoLars, orthogonal_mp_gram
from sklearn.utils import check_array, check_random_state, gen_batches, gen_even_slices
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
from sklearn.utils.extmath import _randomized_svd, row_norms, svd_flip
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import check_is_fitted, validate_data
def _check_positive_coding(method, positive):
if positive and method in ["omp", "lars"]:
raise ValueError(
"Positive constraint not supported for '{}' coding method.".format(method)
)
def _sparse_encode_precomputed(
X,
dictionary,
*,
gram=None,
cov=None,
algorithm="lasso_lars",
regularization=None,
copy_cov=True,
init=None,
max_iter=1000,
verbose=0,
positive=False,
):
"""Generic sparse coding with precomputed Gram and/or covariance matrices.
Each row of the result is the solution to a Lasso problem.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data matrix.
dictionary : ndarray of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram : ndarray of shape (n_components, n_components), default=None
Precomputed Gram matrix, `dictionary * dictionary'`
gram can be `None` if method is 'threshold'.
cov : ndarray of shape (n_components, n_samples), default=None
Precomputed covariance, `dictionary * X'`.
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
default='lasso_lars'
The algorithm used:
* `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
* `'lasso_lars'`: uses Lars to compute the Lasso solution;
* `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
the estimated components are sparse;
* `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
* `'threshold'`: squashes to zero all coefficients less than
regularization from the projection `dictionary * data'`.
regularization : int or float, default=None
The regularization parameter. It corresponds to alpha when
algorithm is `'lasso_lars'`, `'lasso_cd'` or `'threshold'`.
Otherwise it corresponds to `n_nonzero_coefs`.
init : ndarray of shape (n_samples, n_components), default=None
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
copy_cov : bool, default=True
Whether to copy the precomputed covariance matrix; if `False`, it may
be overwritten.
verbose : int, default=0
Controls the verbosity; the higher, the more messages.
positive: bool, default=False
Whether to enforce a positivity constraint on the sparse code.
.. versionadded:: 0.20
Returns
-------
code : ndarray of shape (n_components, n_features)
The sparse codes.
"""
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if algorithm == "lasso_lars":
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all="ignore")
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lasso_lars = LassoLars(
alpha=alpha,
fit_intercept=False,
verbose=verbose,
precompute=gram,
fit_path=False,
positive=positive,
max_iter=max_iter,
)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == "lasso_cd":
alpha = float(regularization) / n_features # account for scaling
# TODO: Make verbosity argument for Lasso?
# sklearn.linear_model.coordinate_descent.enet_path has a verbosity
# argument that we could pass in from Lasso.
clf = Lasso(
alpha=alpha,
fit_intercept=False,
precompute=gram,
tol=1e-8, # TODO: This parameter should be exposed.
max_iter=max_iter,
warm_start=True,
positive=positive,
)
if init is not None:
# In some workflows using coordinate descent algorithms:
# - users might provide NumPy arrays with read-only buffers
# - `joblib` might memmap arrays making their buffer read-only
# TODO: move this handling (which is currently too broad)
# closer to the actual private function which need buffers to be writable.
if not init.flags["WRITEABLE"]:
init = np.array(init)
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=False)
new_code = clf.coef_
elif algorithm == "lars":
try:
err_mgt = np.seterr(all="ignore")
# Not passing in verbose=max(0, verbose-1) because Lars.fit already
# corrects the verbosity level.
lars = Lars(
fit_intercept=False,
verbose=verbose,
precompute=gram,
n_nonzero_coefs=int(regularization),
fit_path=False,
)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == "threshold":
new_code = (np.sign(cov) * np.maximum(np.abs(cov) - regularization, 0)).T
if positive:
np.clip(new_code, 0, None, out=new_code)
elif algorithm == "omp":
new_code = orthogonal_mp_gram(
Gram=gram,
Xy=cov,
n_nonzero_coefs=int(regularization),
tol=None,
norms_squared=row_norms(X, squared=True),
copy_Xy=copy_cov,
).T
return new_code.reshape(n_samples, n_components)
@validate_params(
{
"X": ["array-like"],
"dictionary": ["array-like"],
"gram": ["array-like", None],
"cov": ["array-like", None],
"algorithm": [
StrOptions({"lasso_lars", "lasso_cd", "lars", "omp", "threshold"})
],
"n_nonzero_coefs": [Interval(Integral, 1, None, closed="left"), None],
"alpha": [Interval(Real, 0, None, closed="left"), None],
"copy_cov": ["boolean"],
"init": ["array-like", None],
"max_iter": [Interval(Integral, 0, None, closed="left")],
"n_jobs": [Integral, None],
"check_input": ["boolean"],
"verbose": ["verbose"],
"positive": ["boolean"],
},
prefer_skip_nested_validation=True,
)
# XXX : could be moved to the linear_model module
def sparse_encode(
X,
dictionary,
*,
gram=None,
cov=None,
algorithm="lasso_lars",
n_nonzero_coefs=None,
alpha=None,
copy_cov=True,
init=None,
max_iter=1000,
n_jobs=None,
check_input=True,
verbose=0,
positive=False,
):
"""Sparse coding.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data matrix.
dictionary : array-like of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram : array-like of shape (n_components, n_components), default=None
Precomputed Gram matrix, `dictionary * dictionary'`.
cov : array-like of shape (n_components, n_samples), default=None
Precomputed covariance, `dictionary' * X`.
algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}, \
default='lasso_lars'
The algorithm used:
* `'lars'`: uses the least angle regression method
(`linear_model.lars_path`);
* `'lasso_lars'`: uses Lars to compute the Lasso solution;
* `'lasso_cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). lasso_lars will be faster if
the estimated components are sparse;
* `'omp'`: uses orthogonal matching pursuit to estimate the sparse
solution;
* `'threshold'`: squashes to zero all coefficients less than
regularization from the projection `dictionary * data'`.
n_nonzero_coefs : int, default=None
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case. If `None`, then
`n_nonzero_coefs=int(n_features / 10)`.
alpha : float, default=None
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
If `None`, default to 1.
copy_cov : bool, default=True
Whether to copy the precomputed covariance matrix; if `False`, it may
be overwritten.
init : ndarray of shape (n_samples, n_components), default=None
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter : int, default=1000
Maximum number of iterations to perform if `algorithm='lasso_cd'` or
`'lasso_lars'`.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
check_input : bool, default=True
If `False`, the input arrays X and dictionary will not be checked.
verbose : int, default=0
Controls the verbosity; the higher, the more messages.
positive : bool, default=False
Whether to enforce positivity when finding the encoding.
.. versionadded:: 0.20
Returns
-------
code : ndarray of shape (n_samples, n_components)
The sparse codes.
See Also
--------
sklearn.linear_model.lars_path : Compute Least Angle Regression or Lasso
path using LARS algorithm.
sklearn.linear_model.orthogonal_mp : Solves Orthogonal Matching Pursuit problems.
sklearn.linear_model.Lasso : Train Linear Model with L1 prior as regularizer.
SparseCoder : Find a sparse representation of data from a fixed precomputed
dictionary.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import sparse_encode
>>> X = np.array([[-1, -1, -1], [0, 0, 3]])
>>> dictionary = np.array(
... [[0, 1, 0],
... [-1, -1, 2],
... [1, 1, 1],
... [0, 1, 1],
... [0, 2, 1]],
... dtype=np.float64
... )
>>> sparse_encode(X, dictionary, alpha=1e-10)
array([[ 0., 0., -1., 0., 0.],
[ 0., 1., 1., 0., 0.]])
"""
if check_input:
order = "C" if algorithm == "lasso_cd" else None
dictionary = check_array(
dictionary, order=order, dtype=[np.float64, np.float32]
)
X = check_array(X, order=order, dtype=[np.float64, np.float32])
if dictionary.shape[1] != X.shape[1]:
raise ValueError(
"Dictionary and X have different numbers of features:"
"dictionary.shape: {} X.shape{}".format(dictionary.shape, X.shape)
)
_check_positive_coding(algorithm, positive)
return _sparse_encode(
X,
dictionary,
gram=gram,
cov=cov,
algorithm=algorithm,
n_nonzero_coefs=n_nonzero_coefs,
alpha=alpha,
copy_cov=copy_cov,
init=init,
max_iter=max_iter,
n_jobs=n_jobs,
verbose=verbose,
positive=positive,
)
def _sparse_encode(
X,
dictionary,
*,
gram=None,
cov=None,
algorithm="lasso_lars",
n_nonzero_coefs=None,
alpha=None,
copy_cov=True,
init=None,
max_iter=1000,
n_jobs=None,
verbose=0,
positive=False,
):
"""Sparse coding without input/parameter validation."""
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if algorithm in ("lars", "omp"):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.0
if gram is None and algorithm != "threshold":
gram = np.dot(dictionary, dictionary.T).astype(X.dtype, copy=False)
if cov is None and algorithm != "lasso_cd":
copy_cov = False
cov = np.dot(dictionary, X.T)
if effective_n_jobs(n_jobs) == 1 or algorithm == "threshold":
code = _sparse_encode_precomputed(
X,
dictionary,
gram=gram,
cov=cov,
algorithm=algorithm,
regularization=regularization,
copy_cov=copy_cov,
init=init,
max_iter=max_iter,
verbose=verbose,
positive=positive,
)
return code
# Enter parallel code block
n_samples = X.shape[0]
n_components = dictionary.shape[0]
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, effective_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_sparse_encode_precomputed)(
X[this_slice],
dictionary,
gram=gram,
cov=cov[:, this_slice] if cov is not None else None,
algorithm=algorithm,
regularization=regularization,
copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter,
verbose=verbose,
positive=positive,
)
for this_slice in slices
)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(
dictionary,
Y,
code,
A=None,
B=None,
verbose=False,
random_state=None,
positive=False,
):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary : ndarray of shape (n_components, n_features)
Value of the dictionary at the previous iteration.
Y : ndarray of shape (n_samples, n_features)
Data matrix.
code : ndarray of shape (n_samples, n_components)
Sparse coding of the data against which to optimize the dictionary.
A : ndarray of shape (n_components, n_components), default=None
Together with `B`, sufficient stats of the online model to update the
dictionary.
B : ndarray of shape (n_features, n_components), default=None
Together with `A`, sufficient stats of the online model to update the
dictionary.
verbose: bool, default=False
Degree of output the procedure will print.
random_state : int, RandomState instance or None, default=None
Used for randomly initializing the dictionary. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
positive : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
"""
n_samples, n_components = code.shape
random_state = check_random_state(random_state)
if A is None:
A = code.T @ code
if B is None:
B = Y.T @ code
n_unused = 0
for k in range(n_components):
if A[k, k] > 1e-6:
# 1e-6 is arbitrary but consistent with the spams implementation
dictionary[k] += (B[:, k] - A[k] @ dictionary) / A[k, k]
else:
# kth atom is almost never used -> sample a new one from the data
newd = Y[random_state.choice(n_samples)]
# add small noise to avoid making the sparse coding ill conditioned
noise_level = 0.01 * (newd.std() or 1) # avoid 0 std
noise = random_state.normal(0, noise_level, size=len(newd))
dictionary[k] = newd + noise
code[:, k] = 0
n_unused += 1
if positive:
np.clip(dictionary[k], 0, None, out=dictionary[k])
# Projection on the constraint set ||V_k|| <= 1
dictionary[k] /= max(linalg.norm(dictionary[k]), 1)
if verbose and n_unused > 0:
print(f"{n_unused} unused atoms resampled.")
def _dict_learning(
X,
n_components,
*,
alpha,
max_iter,
tol,
method,
n_jobs,
dict_init,
code_init,
callback,
verbose,
random_state,
return_n_iter,
positive_dict,
positive_code,
method_max_iter,
):
"""Main dictionary learning algorithm"""
t0 = time.time()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order="F")
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
# flip the initial code's sign to enforce deterministic output
code, dictionary = svd_flip(code, dictionary)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[
dictionary, np.zeros((n_components - r, dictionary.shape[1]))
]
# Fortran-order dict better suited for the sparse coding which is the
# bottleneck of this algorithm.
dictionary = np.asfortranarray(dictionary)
errors = []
current_cost = np.nan
if verbose == 1:
print("[dict_learning]", end=" ")
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = time.time() - t0
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print(
"Iteration % 3i (elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost)
)
# Update code
code = sparse_encode(
X,
dictionary,
algorithm=method,
alpha=alpha,
init=code,
n_jobs=n_jobs,
positive=positive_code,
max_iter=method_max_iter,
verbose=verbose,
)
# Update dictionary in place
_update_dict(
dictionary,
X,
code,
verbose=verbose,
random_state=random_state,
positive=positive_dict,
)
# Cost function
current_cost = 0.5 * np.sum((X - code @ dictionary) ** 2) + alpha * np.sum(
np.abs(code)
)
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
@validate_params(
{
"X": ["array-like"],
"return_code": ["boolean"],
"method": [StrOptions({"cd", "lars"})],
"method_max_iter": [Interval(Integral, 0, None, closed="left")],
},
prefer_skip_nested_validation=False,
)
def dict_learning_online(
X,
n_components=2,
*,
alpha=1,
max_iter=100,
return_code=True,
dict_init=None,
callback=None,
batch_size=256,
verbose=False,
shuffle=True,
n_jobs=None,
method="lars",
random_state=None,
positive_dict=False,
positive_code=False,
method_max_iter=1000,
tol=1e-3,
max_no_improvement=10,
):
"""Solve a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. ||.||_Fro stands for
the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
which is the sum of the absolute values of all the entries in the matrix.
This is accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data matrix.
n_components : int or None, default=2
Number of dictionary atoms to extract. If None, then ``n_components``
is set to ``n_features``.
alpha : float, default=1
Sparsity controlling parameter.
max_iter : int, default=100
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
.. versionadded:: 1.1
return_code : bool, default=True
Whether to also return the code U or just the dictionary `V`.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial values for the dictionary for warm restart scenarios.
If `None`, the initial values for the dictionary are created
with an SVD decomposition of the data via
:func:`~sklearn.utils.extmath.randomized_svd`.
callback : callable, default=None
A callable that gets invoked at the end of each iteration.
batch_size : int, default=256
The number of samples to take in each batch.
.. versionchanged:: 1.3
The default value of `batch_size` changed from 3 to 256 in version 1.3.
verbose : bool, default=False
To control the verbosity of the procedure.
shuffle : bool, default=True
Whether to shuffle the data before splitting it in batches.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
method : {'lars', 'cd'}, default='lars'
* `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
random_state : int, RandomState instance or None, default=None
Used for initializing the dictionary when ``dict_init`` is not
specified, randomly shuffling the data when ``shuffle`` is set to
``True``, and updating the dictionary. Pass an int for reproducible
results across multiple function calls.
See :term:`Glossary <random_state>`.
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
method_max_iter : int, default=1000
Maximum number of iterations to perform when solving the lasso problem.
.. versionadded:: 0.22
tol : float, default=1e-3
Control early stopping based on the norm of the differences in the
dictionary between 2 steps.
To disable early stopping based on changes in the dictionary, set
`tol` to 0.0.
.. versionadded:: 1.1
max_no_improvement : int, default=10
Control early stopping based on the consecutive number of mini batches
that does not yield an improvement on the smoothed cost function.
To disable convergence detection based on cost function, set
`max_no_improvement` to None.
.. versionadded:: 1.1
Returns
-------
code : ndarray of shape (n_samples, n_components),
The sparse code (only returned if `return_code=True`).
dictionary : ndarray of shape (n_components, n_features),
The solutions to the dictionary learning problem.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See Also
--------
dict_learning : Solve a dictionary learning matrix factorization problem.
DictionaryLearning : Find a dictionary that sparsely encodes data.
MiniBatchDictionaryLearning : A faster, less accurate, version of the dictionary
learning algorithm.
SparsePCA : Sparse Principal Components Analysis.
MiniBatchSparsePCA : Mini-batch Sparse Principal Components Analysis.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_coded_signal
>>> from sklearn.decomposition import dict_learning_online
>>> X, _, _ = make_sparse_coded_signal(
... n_samples=30, n_components=15, n_features=20, n_nonzero_coefs=10,
... random_state=42,
... )
>>> U, V = dict_learning_online(
... X, n_components=15, alpha=0.2, max_iter=20, batch_size=3, random_state=42
... )
We can check the level of sparsity of `U`:
>>> np.mean(U == 0)
np.float64(0.53)
We can compare the average squared euclidean norm of the reconstruction
error of the sparse coded signal relative to the squared euclidean norm of
the original signal:
>>> X_hat = U @ V
>>> np.mean(np.sum((X_hat - X) ** 2, axis=1) / np.sum(X ** 2, axis=1))
np.float64(0.053)
"""
transform_algorithm = "lasso_" + method
est = MiniBatchDictionaryLearning(
n_components=n_components,
alpha=alpha,
max_iter=max_iter,
n_jobs=n_jobs,
fit_algorithm=method,
batch_size=batch_size,
shuffle=shuffle,
dict_init=dict_init,
random_state=random_state,
transform_algorithm=transform_algorithm,
transform_alpha=alpha,
positive_code=positive_code,
positive_dict=positive_dict,
transform_max_iter=method_max_iter,
verbose=verbose,
callback=callback,
tol=tol,
max_no_improvement=max_no_improvement,
).fit(X)
if not return_code:
return est.components_
else:
code = est.transform(X)
return code, est.components_
@validate_params(
{
"X": ["array-like"],
"method": [StrOptions({"lars", "cd"})],
"return_n_iter": ["boolean"],
"method_max_iter": [Interval(Integral, 0, None, closed="left")],
},
prefer_skip_nested_validation=False,
)
def dict_learning(
X,
n_components,
*,
alpha,
max_iter=100,
tol=1e-8,
method="lars",
n_jobs=None,
dict_init=None,
code_init=None,
callback=None,
verbose=False,
random_state=None,
return_n_iter=False,
positive_dict=False,
positive_code=False,
method_max_iter=1000,
):
"""Solve a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_Fro^2 + alpha * || U ||_1,1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. ||.||_Fro stands for
the Frobenius norm and ||.||_1,1 stands for the entry-wise matrix norm
which is the sum of the absolute values of all the entries in the matrix.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data matrix.
n_components : int
Number of dictionary atoms to extract.
alpha : int or float
Sparsity controlling parameter.
max_iter : int, default=100
Maximum number of iterations to perform.
tol : float, default=1e-8
Tolerance for the stopping condition.
method : {'lars', 'cd'}, default='lars'
The method used:
* `'lars'`: uses the least angle regression method to solve the lasso
problem (`linear_model.lars_path`);
* `'cd'`: uses the coordinate descent method to compute the
Lasso solution (`linear_model.Lasso`). Lars will be faster if
the estimated components are sparse.
n_jobs : int, default=None
Number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
dict_init : ndarray of shape (n_components, n_features), default=None
Initial value for the dictionary for warm restart scenarios. Only used
if `code_init` and `dict_init` are not None.
code_init : ndarray of shape (n_samples, n_components), default=None
Initial value for the sparse code for warm restart scenarios. Only used
if `code_init` and `dict_init` are not None.
callback : callable, default=None
Callable that gets invoked every five iterations.
verbose : bool, default=False
To control the verbosity of the procedure.
random_state : int, RandomState instance or None, default=None
Used for randomly initializing the dictionary. Pass an int for
reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
positive_dict : bool, default=False
Whether to enforce positivity when finding the dictionary.
.. versionadded:: 0.20
positive_code : bool, default=False
Whether to enforce positivity when finding the code.
.. versionadded:: 0.20
method_max_iter : int, default=1000
Maximum number of iterations to perform.
.. versionadded:: 0.22
Returns
-------
code : ndarray of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary : ndarray of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors : array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See Also
--------
dict_learning_online : Solve a dictionary learning matrix factorization
problem online.
DictionaryLearning : Find a dictionary that sparsely encodes data.
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/tests/test_sparse_pca.py | sklearn/decomposition/tests/test_sparse_pca.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from sklearn.datasets import make_low_rank_matrix
from sklearn.decomposition import PCA, MiniBatchSparsePCA, SparsePCA
from sklearn.utils import check_random_state
from sklearn.utils._testing import (
assert_allclose,
assert_array_almost_equal,
if_safe_multiprocessing_with_blas,
)
from sklearn.utils.extmath import svd_flip
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert spca.components_.shape == (8, 10)
assert U.shape == (12, 8)
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert spca.components_.shape == (13, 10)
assert U.shape == (12, 13)
def test_fit_transform(global_random_seed):
alpha = 1
rng = np.random.RandomState(global_random_seed)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(
n_components=3, method="lars", alpha=alpha, random_state=global_random_seed
)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(
n_components=3, method="cd", random_state=global_random_seed, alpha=alpha
)
spca_lasso.fit(Y)
assert_allclose(spca_lasso.components_, spca_lars.components_, rtol=5e-4, atol=2e-4)
# TODO: remove mark once loky bug is fixed:
# https://github.com/joblib/loky/issues/458
@pytest.mark.thread_unsafe
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel(global_random_seed):
alpha = 1
rng = np.random.RandomState(global_random_seed)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(
n_components=3, method="lars", alpha=alpha, random_state=global_random_seed
)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(
n_components=3,
n_jobs=2,
method="lars",
alpha=alpha,
random_state=global_random_seed,
).fit(Y)
U2 = spca.transform(Y)
assert not np.all(spca_lars.components_ == 0)
assert_array_almost_equal(U1, U2)
def test_transform_nan(global_random_seed):
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(global_random_seed)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8, random_state=global_random_seed)
assert not np.any(np.isnan(estimator.fit_transform(Y)))
def test_fit_transform_tall(global_random_seed):
rng = np.random.RandomState(global_random_seed)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method="lars", random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method="cd", random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_allclose(U1, U2, rtol=1e-4, atol=2e-5)
def test_initialization(global_random_seed):
rng = np.random.RandomState(global_random_seed)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(
n_components=3, U_init=U_init, V_init=V_init, max_iter=0, random_state=rng
)
model.fit(rng.randn(5, 4))
expected_components = V_init / np.linalg.norm(V_init, axis=1, keepdims=True)
expected_components = svd_flip(u=expected_components.T, v=None)[0].T
assert_allclose(model.components_, expected_components)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, max_iter=1, random_state=rng)
U = pca.fit_transform(X)
assert pca.components_.shape == (8, 10)
assert U.shape == (12, 8)
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, max_iter=1, random_state=rng)
U = pca.fit_transform(X)
assert pca.components_.shape == (13, 10)
assert U.shape == (12, 13)
def test_scaling_fit_transform(global_random_seed):
alpha = 1
rng = np.random.RandomState(global_random_seed)
Y, _, _ = generate_toy_data(3, 1000, (8, 8), random_state=rng)
spca_lars = SparsePCA(n_components=3, method="lars", alpha=alpha, random_state=rng)
results_train = spca_lars.fit_transform(Y)
results_test = spca_lars.transform(Y[:10])
assert_allclose(results_train[0], results_test[0])
def test_pca_vs_spca(global_random_seed):
rng = np.random.RandomState(global_random_seed)
Y, _, _ = generate_toy_data(3, 1000, (8, 8), random_state=rng)
Z, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng)
spca = SparsePCA(alpha=0, ridge_alpha=0, n_components=2, random_state=rng)
pca = PCA(n_components=2, random_state=rng)
pca.fit(Y)
spca.fit(Y)
results_test_pca = pca.transform(Z)
results_test_spca = spca.transform(Z)
assert_allclose(
np.abs(spca.components_.dot(pca.components_.T)), np.eye(2), atol=1e-4
)
results_test_pca *= np.sign(results_test_pca[0, :])
results_test_spca *= np.sign(results_test_spca[0, :])
assert_allclose(results_test_pca, results_test_spca, atol=1e-4)
@pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA])
@pytest.mark.parametrize("n_components", [None, 3])
def test_spca_n_components_(SPCA, n_components):
rng = np.random.RandomState(0)
n_samples, n_features = 12, 10
X = rng.randn(n_samples, n_features)
model = SPCA(n_components=n_components).fit(X)
if n_components is not None:
assert model.n_components_ == n_components
else:
assert model.n_components_ == n_features
@pytest.mark.parametrize("SPCA", (SparsePCA, MiniBatchSparsePCA))
@pytest.mark.parametrize("method", ("lars", "cd"))
@pytest.mark.parametrize(
"data_type, expected_type",
(
(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
),
)
def test_sparse_pca_dtype_match(SPCA, method, data_type, expected_type):
# Verify output matrix dtype
n_samples, n_features, n_components = 12, 10, 3
rng = np.random.RandomState(0)
input_array = rng.randn(n_samples, n_features).astype(data_type)
model = SPCA(n_components=n_components, method=method)
transformed = model.fit_transform(input_array)
assert transformed.dtype == expected_type
assert model.components_.dtype == expected_type
@pytest.mark.parametrize("SPCA", (SparsePCA, MiniBatchSparsePCA))
@pytest.mark.parametrize("method", ("lars", "cd"))
def test_sparse_pca_numerical_consistency(SPCA, method, global_random_seed):
# Verify numericall consistentency among np.float32 and np.float64
n_samples, n_features, n_components = 20, 20, 5
input_array = make_low_rank_matrix(
n_samples=n_samples,
n_features=n_features,
effective_rank=n_components,
random_state=global_random_seed,
)
model_32 = SPCA(
n_components=n_components,
method=method,
random_state=global_random_seed,
)
transformed_32 = model_32.fit_transform(input_array.astype(np.float32))
model_64 = SPCA(
n_components=n_components,
method=method,
random_state=global_random_seed,
)
transformed_64 = model_64.fit_transform(input_array.astype(np.float64))
assert_allclose(transformed_64, transformed_32)
assert_allclose(model_64.components_, model_32.components_)
@pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA])
def test_spca_feature_names_out(SPCA):
"""Check feature names out for *SparsePCA."""
rng = np.random.RandomState(0)
n_samples, n_features = 12, 10
X = rng.randn(n_samples, n_features)
model = SPCA(n_components=4).fit(X)
names = model.get_feature_names_out()
estimator_name = SPCA.__name__.lower()
assert_array_equal([f"{estimator_name}{i}" for i in range(4)], names)
def test_spca_early_stopping(global_random_seed):
"""Check that `tol` and `max_no_improvement` act as early stopping."""
rng = np.random.RandomState(global_random_seed)
n_samples, n_features = 50, 10
X = rng.randn(n_samples, n_features)
# vary the tolerance to force the early stopping of one of the model
model_early_stopped = MiniBatchSparsePCA(
max_iter=100, tol=0.5, random_state=global_random_seed
).fit(X)
model_not_early_stopped = MiniBatchSparsePCA(
max_iter=100, tol=1e-3, random_state=global_random_seed
).fit(X)
assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_
# force the max number of no improvement to a large value to check that
# it does help to early stop
model_early_stopped = MiniBatchSparsePCA(
max_iter=100, tol=1e-6, max_no_improvement=2, random_state=global_random_seed
).fit(X)
model_not_early_stopped = MiniBatchSparsePCA(
max_iter=100, tol=1e-6, max_no_improvement=100, random_state=global_random_seed
).fit(X)
assert model_early_stopped.n_iter_ < model_not_early_stopped.n_iter_
def test_equivalence_components_pca_spca(global_random_seed):
"""Check the equivalence of the components found by PCA and SparsePCA.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/23932
"""
rng = np.random.RandomState(global_random_seed)
X = rng.randn(50, 4)
n_components = 2
pca = PCA(
n_components=n_components,
svd_solver="randomized",
random_state=0,
).fit(X)
spca = SparsePCA(
n_components=n_components,
method="lars",
ridge_alpha=0,
alpha=0,
random_state=0,
).fit(X)
assert_allclose(pca.components_, spca.components_)
def test_sparse_pca_inverse_transform(global_random_seed):
"""Check that `inverse_transform` in `SparsePCA` and `PCA` are similar."""
rng = np.random.RandomState(global_random_seed)
n_samples, n_features = 10, 5
X = rng.randn(n_samples, n_features)
n_components = 2
spca = SparsePCA(
n_components=n_components,
alpha=1e-12,
ridge_alpha=1e-12,
random_state=global_random_seed,
)
pca = PCA(n_components=n_components, random_state=global_random_seed)
X_trans_spca = spca.fit_transform(X)
X_trans_pca = pca.fit_transform(X)
assert_allclose(
spca.inverse_transform(X_trans_spca), pca.inverse_transform(X_trans_pca)
)
@pytest.mark.parametrize("SPCA", [SparsePCA, MiniBatchSparsePCA])
def test_transform_inverse_transform_round_trip(SPCA, global_random_seed):
"""Check the `transform` and `inverse_transform` round trip with no loss of
information.
"""
rng = np.random.RandomState(global_random_seed)
n_samples, n_features = 10, 5
X = rng.randn(n_samples, n_features)
n_components = n_features
spca = SPCA(
n_components=n_components,
alpha=1e-12,
ridge_alpha=1e-12,
random_state=global_random_seed,
)
X_trans_spca = spca.fit_transform(X)
assert_allclose(spca.inverse_transform(X_trans_spca), X)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/tests/test_truncated_svd.py | sklearn/decomposition/tests/test_truncated_svd.py | """Test truncated SVD transformer."""
import numpy as np
import pytest
import scipy.sparse as sp
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils._testing import assert_allclose, assert_array_less
SVD_SOLVERS = ["arpack", "randomized"]
@pytest.fixture(scope="module")
def X_sparse():
# Make an X that looks somewhat like a small tf-idf matrix.
rng = check_random_state(42)
X = sp.random(60, 55, density=0.2, format="csr", random_state=rng)
X.data[:] = 1 + np.log(X.data)
return X
@pytest.mark.parametrize("solver", ["randomized"])
@pytest.mark.parametrize("kind", ("dense", "sparse"))
def test_solvers(X_sparse, solver, kind):
X = X_sparse if kind == "sparse" else X_sparse.toarray()
svd_a = TruncatedSVD(30, algorithm="arpack")
svd = TruncatedSVD(30, algorithm=solver, random_state=42, n_oversamples=100)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd.fit_transform(X)[:, :6]
assert_allclose(Xa, Xr, rtol=2e-3)
comp_a = np.abs(svd_a.components_)
comp = np.abs(svd.components_)
# All elements are equal, but some elements are more equal than others.
assert_allclose(comp_a[:9], comp[:9], rtol=1e-3)
assert_allclose(comp_a[9:], comp[9:], atol=1e-2)
@pytest.mark.parametrize("n_components", (10, 25, 41, 55))
def test_attributes(n_components, X_sparse):
n_features = X_sparse.shape[1]
tsvd = TruncatedSVD(n_components).fit(X_sparse)
assert tsvd.n_components == n_components
assert tsvd.components_.shape == (n_components, n_features)
@pytest.mark.parametrize(
"algorithm, n_components",
[
("arpack", 55),
("arpack", 56),
("randomized", 56),
],
)
def test_too_many_components(X_sparse, algorithm, n_components):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
with pytest.raises(ValueError):
tsvd.fit(X_sparse)
@pytest.mark.parametrize("fmt", ("array", "csr", "csc", "coo", "lil"))
def test_sparse_formats(fmt, X_sparse):
n_samples = X_sparse.shape[0]
Xfmt = X_sparse.toarray() if fmt == "dense" else getattr(X_sparse, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert Xtrans.shape == (n_samples, 11)
Xtrans = tsvd.transform(Xfmt)
assert Xtrans.shape == (n_samples, 11)
@pytest.mark.parametrize("algo", SVD_SOLVERS)
def test_inverse_transform(algo, X_sparse):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42, algorithm=algo)
Xt = tsvd.fit_transform(X_sparse)
Xinv = tsvd.inverse_transform(Xt)
assert_allclose(Xinv, X_sparse.toarray(), rtol=1e-1, atol=2e-1)
def test_integers(X_sparse):
n_samples = X_sparse.shape[0]
Xint = X_sparse.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert Xtrans.shape == (n_samples, tsvd.n_components)
@pytest.mark.parametrize("kind", ("dense", "sparse"))
@pytest.mark.parametrize("n_components", [10, 20])
@pytest.mark.parametrize("solver", SVD_SOLVERS)
def test_explained_variance(X_sparse, kind, n_components, solver):
X = X_sparse if kind == "sparse" else X_sparse.toarray()
svd = TruncatedSVD(n_components, algorithm=solver)
X_tr = svd.fit_transform(X)
# Assert that all the values are greater than 0
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Test that explained_variance is correct
total_variance = np.var(X_sparse.toarray(), axis=0).sum()
variances = np.var(X_tr, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_allclose(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
@pytest.mark.parametrize("kind", ("dense", "sparse"))
@pytest.mark.parametrize("solver", SVD_SOLVERS)
def test_explained_variance_components_10_20(X_sparse, kind, solver):
X = X_sparse if kind == "sparse" else X_sparse.toarray()
svd_10 = TruncatedSVD(10, algorithm=solver, n_iter=10).fit(X)
svd_20 = TruncatedSVD(20, algorithm=solver, n_iter=10).fit(X)
# Assert the 1st component is equal
assert_allclose(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
rtol=5e-3,
)
# Assert that 20 components has higher explained variance than 10
assert (
svd_20.explained_variance_ratio_.sum() > svd_10.explained_variance_ratio_.sum()
)
@pytest.mark.parametrize("solver", SVD_SOLVERS)
def test_singular_values_consistency(solver, global_random_seed):
# Check that the TruncatedSVD output has the correct singular values
rng = np.random.RandomState(global_random_seed)
n_samples, n_features = 100, 80
X = rng.randn(n_samples, n_features)
pca = TruncatedSVD(n_components=2, algorithm=solver, random_state=rng).fit(X)
# Compare to the Frobenius norm
X_pca = pca.transform(X)
assert_allclose(
np.sum(pca.singular_values_**2.0),
np.linalg.norm(X_pca, "fro") ** 2.0,
rtol=1e-2,
)
# Compare to the 2-norms of the score vectors
assert_allclose(
pca.singular_values_, np.sqrt(np.sum(X_pca**2.0, axis=0)), rtol=1e-2
)
@pytest.mark.parametrize("solver", SVD_SOLVERS)
def test_singular_values_expected(solver, global_random_seed):
# Set the singular values and see what we get back
rng = np.random.RandomState(global_random_seed)
n_samples = 100
n_features = 110
X = rng.randn(n_samples, n_features)
pca = TruncatedSVD(n_components=3, algorithm=solver, random_state=rng)
X_pca = pca.fit_transform(X)
X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
X_pca[:, 0] *= 3.142
X_pca[:, 1] *= 2.718
X_hat_pca = np.dot(X_pca, pca.components_)
pca.fit(X_hat_pca)
assert_allclose(pca.singular_values_, [3.142, 2.718, 1.0], rtol=1e-14)
def test_truncated_svd_eq_pca(X_sparse):
# TruncatedSVD should be equal to PCA on centered data
X_dense = X_sparse.toarray()
X_c = X_dense - X_dense.mean(axis=0)
params = dict(n_components=10, random_state=42)
svd = TruncatedSVD(algorithm="arpack", **params)
pca = PCA(svd_solver="arpack", **params)
Xt_svd = svd.fit_transform(X_c)
Xt_pca = pca.fit_transform(X_c)
assert_allclose(Xt_svd, Xt_pca, rtol=1e-9)
assert_allclose(pca.mean_, 0, atol=1e-9)
assert_allclose(svd.components_, pca.components_)
@pytest.mark.parametrize(
"algorithm, tol", [("randomized", 0.0), ("arpack", 1e-6), ("arpack", 0.0)]
)
@pytest.mark.parametrize("kind", ("dense", "sparse"))
def test_fit_transform(X_sparse, algorithm, tol, kind):
# fit_transform(X) should equal fit(X).transform(X)
X = X_sparse if kind == "sparse" else X_sparse.toarray()
svd = TruncatedSVD(
n_components=5, n_iter=7, random_state=42, algorithm=algorithm, tol=tol
)
X_transformed_1 = svd.fit_transform(X)
X_transformed_2 = svd.fit(X).transform(X)
assert_allclose(X_transformed_1, X_transformed_2)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/tests/test_kernel_pca.py | sklearn/decomposition/tests/test_kernel_pca.py | import warnings
import numpy as np
import pytest
import sklearn
from sklearn.datasets import load_iris, make_blobs, make_circles
from sklearn.decomposition import PCA, KernelPCA
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import Perceptron
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.utils._testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
)
from sklearn.utils.fixes import CSR_CONTAINERS
from sklearn.utils.validation import _check_psd_eigenvalues
def test_kernel_pca(global_random_seed):
"""Nominal test for all solvers and all known kernels + a custom one
It tests
- that fit_transform is equivalent to fit+transform
- that the shapes of transforms and inverse transforms are correct
"""
rng = np.random.RandomState(global_random_seed)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert kwargs == {} # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack", "randomized"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(
4, kernel=kernel, eigen_solver=eigen_solver, fit_inverse_transform=inv
)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(
np.abs(X_fit_transformed), np.abs(X_fit_transformed2)
)
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert X_fit_transformed.size != 0
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert X_pred_transformed.shape[1] == X_fit_transformed.shape[1]
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert X_pred2.shape == X_pred.shape
def test_kernel_pca_invalid_parameters():
"""Check that kPCA raises an error if the parameters are invalid
Tests fitting inverse transform with a precomputed kernel raises a
ValueError.
"""
estimator = KernelPCA(
n_components=10, fit_inverse_transform=True, kernel="precomputed"
)
err_ms = "Cannot fit_inverse_transform with a precomputed kernel"
with pytest.raises(ValueError, match=err_ms):
estimator.fit(np.random.randn(10, 10))
def test_kernel_pca_consistent_transform(global_random_seed):
"""Check robustness to mutations in the original training array
Test that after fitting a kPCA model, it stays independent of any
mutation of the values of the original data object by relying on an
internal copy.
"""
# X_fit_ needs to retain the old, unmodified copy of X
state = np.random.RandomState(global_random_seed)
X = state.rand(10, 10)
kpca = KernelPCA(random_state=state).fit(X)
transformed1 = kpca.transform(X)
X_copy = X.copy()
X[:, 0] = 666
transformed2 = kpca.transform(X_copy)
assert_array_almost_equal(transformed1, transformed2)
def test_kernel_pca_deterministic_output(global_random_seed):
"""Test that Kernel PCA produces deterministic output
Tests that the same inputs and random state produce the same output.
"""
rng = np.random.RandomState(global_random_seed)
X = rng.rand(10, 10)
eigen_solver = ("arpack", "dense")
for solver in eigen_solver:
transformed_X = np.zeros((20, 2))
for i in range(20):
kpca = KernelPCA(n_components=2, eigen_solver=solver, random_state=rng)
transformed_X[i, :] = kpca.fit_transform(X)[0]
assert_allclose(transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2))
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_kernel_pca_sparse(csr_container, global_random_seed):
"""Test that kPCA works on a sparse data input.
Same test as ``test_kernel_pca except inverse_transform`` since it's not
implemented for sparse matrices.
"""
rng = np.random.RandomState(global_random_seed)
X_fit = csr_container(rng.random_sample((5, 4)))
X_pred = csr_container(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack", "randomized"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(
4,
kernel=kernel,
eigen_solver=eigen_solver,
fit_inverse_transform=False,
random_state=0,
)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(
np.abs(X_fit_transformed), np.abs(X_fit_transformed2)
)
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert X_pred_transformed.shape[1] == X_fit_transformed.shape[1]
# inverse transform: not available for sparse matrices
# XXX: should we raise another exception type here? For instance:
# NotImplementedError.
with pytest.raises(NotFittedError):
kpca.inverse_transform(X_pred_transformed)
@pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"])
@pytest.mark.parametrize("n_features", [4, 10])
def test_kernel_pca_linear_kernel(solver, n_features, global_random_seed):
"""Test that kPCA with linear kernel is equivalent to PCA for all solvers.
KernelPCA with linear kernel should produce the same output as PCA.
"""
rng = np.random.RandomState(global_random_seed)
X_fit = rng.random_sample((5, n_features))
X_pred = rng.random_sample((2, n_features))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
n_comps = 3 if solver == "arpack" else 4
assert_array_almost_equal(
np.abs(KernelPCA(n_comps, eigen_solver=solver).fit(X_fit).transform(X_pred)),
np.abs(
PCA(n_comps, svd_solver=solver if solver != "dense" else "full")
.fit(X_fit)
.transform(X_pred)
),
)
def test_kernel_pca_n_components():
"""Test that `n_components` is correctly taken into account for projections
For all solvers this tests that the output has the correct shape depending
on the selected number of components.
"""
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack", "randomized"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert shape == (2, c)
def test_remove_zero_eig():
"""Check that the ``remove_zero_eig`` parameter works correctly.
Tests that the null-space (Zero) eigenvalues are removed when
remove_zero_eig=True, whereas they are not by default.
"""
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert Xt.shape == (3, 0)
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert Xt.shape == (3, 2)
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert Xt.shape == (3, 0)
def test_leave_zero_eig():
"""Non-regression test for issue #12141 (PR #12143)
This test checks that fit().transform() returns the same result as
fit_transform() in case of non-removed zero eigenvalue.
"""
X_fit = np.array([[1, 1], [0, 0]])
# Assert that even with all np warnings on, there is no div by zero warning
with warnings.catch_warnings():
# There might be warnings about the kernel being badly conditioned,
# but there should not be warnings about division by zero.
# (Numpy division by zero warning can have many message variants, but
# at least we know that it is a RuntimeWarning so let's check only this)
warnings.simplefilter("error", RuntimeWarning)
with np.errstate(all="warn"):
k = KernelPCA(n_components=2, remove_zero_eig=False, eigen_solver="dense")
# Fit, then transform
A = k.fit(X_fit).transform(X_fit)
# Do both at once
B = k.fit_transform(X_fit)
# Compare
assert_array_almost_equal(np.abs(A), np.abs(B))
def test_kernel_pca_precomputed(global_random_seed):
"""Test that kPCA works with a precomputed kernel, for all solvers"""
rng = np.random.RandomState(global_random_seed)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack", "randomized"):
X_kpca = (
KernelPCA(4, eigen_solver=eigen_solver, random_state=0)
.fit(X_fit)
.transform(X_pred)
)
X_kpca2 = (
KernelPCA(
4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0
)
.fit(np.dot(X_fit, X_fit.T))
.transform(np.dot(X_pred, X_fit.T))
)
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0
).fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = (
KernelPCA(
4, eigen_solver=eigen_solver, kernel="precomputed", random_state=0
)
.fit(np.dot(X_fit, X_fit.T))
.transform(np.dot(X_fit, X_fit.T))
)
assert_array_almost_equal(np.abs(X_kpca), np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train), np.abs(X_kpca_train2))
@pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"])
def test_kernel_pca_precomputed_non_symmetric(solver):
"""Check that the kernel centerer works.
Tests that a non symmetric precomputed kernel is actually accepted
because the kernel centerer does its job correctly.
"""
# a non symmetric gram matrix
K = [[1, 2], [3, 40]]
kpca = KernelPCA(
kernel="precomputed", eigen_solver=solver, n_components=1, random_state=0
)
kpca.fit(K) # no error
# same test with centered kernel
Kc = [[9, -9], [-9, 9]]
kpca_c = KernelPCA(
kernel="precomputed", eigen_solver=solver, n_components=1, random_state=0
)
kpca_c.fit(Kc)
# comparison between the non-centered and centered versions
assert_array_equal(kpca.eigenvectors_, kpca_c.eigenvectors_)
assert_array_equal(kpca.eigenvalues_, kpca_c.eigenvalues_)
def test_gridsearch_pipeline():
"""Check that kPCA works as expected in a grid search pipeline
Test if we can do a grid-search to find parameters to separate
circles with a perceptron model.
"""
X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))])
param_grid = dict(kernel_pca__gamma=2.0 ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert grid_search.best_score_ == 1
def test_gridsearch_pipeline_precomputed():
"""Check that kPCA works as expected in a grid search pipeline (2)
Test if we can do a grid-search to find parameters to separate
circles with a perceptron model. This test uses a precomputed kernel.
"""
X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron(max_iter=5))])
param_grid = dict(Perceptron__max_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.0)
grid_search.fit(X_kernel, y)
assert grid_search.best_score_ == 1
def test_nested_circles():
"""Check that kPCA projects in a space where nested circles are separable
Tests that 2D nested circles become separable with a perceptron when
projected in the first 2 kPCA using an RBF kernel, while raw samples
are not directly separable in the original space.
"""
X, y = make_circles(n_samples=400, factor=0.3, noise=0.05, random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron(max_iter=5).fit(X, y).score(X, y)
assert train_score < 0.8
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(
kernel="rbf", n_components=2, fit_inverse_transform=True, gamma=2.0
)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron(max_iter=5).fit(X_kpca, y).score(X_kpca, y)
assert train_score == 1.0
def test_kernel_conditioning():
"""Check that ``_check_psd_eigenvalues`` is correctly called in kPCA
Non-regression test for issue #12140 (PR #12145).
"""
# create a pathological X leading to small non-zero eigenvalue
X = [[5, 1], [5 + 1e-8, 1e-8], [5 + 1e-8, 0]]
kpca = KernelPCA(kernel="linear", n_components=2, fit_inverse_transform=True)
kpca.fit(X)
# check that the small non-zero eigenvalue was correctly set to zero
assert kpca.eigenvalues_.min() == 0
assert np.all(kpca.eigenvalues_ == _check_psd_eigenvalues(kpca.eigenvalues_))
@pytest.mark.parametrize("solver", ["auto", "dense", "arpack", "randomized"])
def test_precomputed_kernel_not_psd(solver):
"""Check how KernelPCA works with non-PSD kernels depending on n_components
Tests for all methods what happens with a non PSD gram matrix (this
can happen in an isomap scenario, or with custom kernel functions, or
maybe with ill-posed datasets).
When ``n_component`` is large enough to capture a negative eigenvalue, an
error should be raised. Otherwise, KernelPCA should run without error
since the negative eigenvalues are not selected.
"""
# a non PSD kernel with large eigenvalues, already centered
# it was captured from an isomap call and multiplied by 100 for compacity
K = [
[4.48, -1.0, 8.07, 2.33, 2.33, 2.33, -5.76, -12.78],
[-1.0, -6.48, 4.5, -1.24, -1.24, -1.24, -0.81, 7.49],
[8.07, 4.5, 15.48, 2.09, 2.09, 2.09, -11.1, -23.23],
[2.33, -1.24, 2.09, 4.0, -3.65, -3.65, 1.02, -0.9],
[2.33, -1.24, 2.09, -3.65, 4.0, -3.65, 1.02, -0.9],
[2.33, -1.24, 2.09, -3.65, -3.65, 4.0, 1.02, -0.9],
[-5.76, -0.81, -11.1, 1.02, 1.02, 1.02, 4.86, 9.75],
[-12.78, 7.49, -23.23, -0.9, -0.9, -0.9, 9.75, 21.46],
]
# this gram matrix has 5 positive eigenvalues and 3 negative ones
# [ 52.72, 7.65, 7.65, 5.02, 0. , -0. , -6.13, -15.11]
# 1. ask for enough components to get a significant negative one
kpca = KernelPCA(kernel="precomputed", eigen_solver=solver, n_components=7)
# make sure that the appropriate error is raised
with pytest.raises(ValueError, match="There are significant negative eigenvalues"):
kpca.fit(K)
# 2. ask for a small enough n_components to get only positive ones
kpca = KernelPCA(kernel="precomputed", eigen_solver=solver, n_components=2)
if solver == "randomized":
# the randomized method is still inconsistent with the others on this
# since it selects the eigenvalues based on the largest 2 modules, not
# on the largest 2 values.
#
# At least we can ensure that we return an error instead of returning
# the wrong eigenvalues
with pytest.raises(
ValueError, match="There are significant negative eigenvalues"
):
kpca.fit(K)
else:
# general case: make sure that it works
kpca.fit(K)
@pytest.mark.parametrize("n_components", [4, 10, 20])
def test_kernel_pca_solvers_equivalence(n_components):
"""Check that 'dense' 'arpack' & 'randomized' solvers give similar results"""
# Generate random data
n_train, n_test = 1_000, 100
X, _ = make_circles(
n_samples=(n_train + n_test), factor=0.3, noise=0.05, random_state=0
)
X_fit, X_pred = X[:n_train, :], X[n_train:, :]
# reference (full)
ref_pred = (
KernelPCA(n_components, eigen_solver="dense", random_state=0)
.fit(X_fit)
.transform(X_pred)
)
# arpack
a_pred = (
KernelPCA(n_components, eigen_solver="arpack", random_state=0)
.fit(X_fit)
.transform(X_pred)
)
# check that the result is still correct despite the approx
assert_array_almost_equal(np.abs(a_pred), np.abs(ref_pred))
# randomized
r_pred = (
KernelPCA(n_components, eigen_solver="randomized", random_state=0)
.fit(X_fit)
.transform(X_pred)
)
# check that the result is still correct despite the approximation
assert_array_almost_equal(np.abs(r_pred), np.abs(ref_pred))
def test_kernel_pca_inverse_transform_reconstruction():
"""Test if the reconstruction is a good approximation.
Note that in general it is not possible to get an arbitrarily good
reconstruction because of kernel centering that does not
preserve all the information of the original data.
"""
X, *_ = make_blobs(n_samples=100, n_features=4, random_state=0)
kpca = KernelPCA(
n_components=20, kernel="rbf", fit_inverse_transform=True, alpha=1e-3
)
X_trans = kpca.fit_transform(X)
X_reconst = kpca.inverse_transform(X_trans)
assert np.linalg.norm(X - X_reconst) / np.linalg.norm(X) < 1e-1
def test_kernel_pca_raise_not_fitted_error():
X = np.random.randn(15).reshape(5, 3)
kpca = KernelPCA()
kpca.fit(X)
with pytest.raises(NotFittedError):
kpca.inverse_transform(X)
def test_32_64_decomposition_shape():
"""Test that the decomposition is similar for 32 and 64 bits data
Non regression test for
https://github.com/scikit-learn/scikit-learn/issues/18146
"""
X, y = make_blobs(
n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, cluster_std=0.1
)
X = StandardScaler().fit_transform(X)
X -= X.min()
# Compare the shapes (corresponds to the number of non-zero eigenvalues)
kpca = KernelPCA()
assert kpca.fit_transform(X).shape == kpca.fit_transform(X.astype(np.float32)).shape
def test_kernel_pca_feature_names_out():
"""Check feature names out for KernelPCA."""
X, *_ = make_blobs(n_samples=100, n_features=4, random_state=0)
kpca = KernelPCA(n_components=2).fit(X)
names = kpca.get_feature_names_out()
assert_array_equal([f"kernelpca{i}" for i in range(2)], names)
def test_kernel_pca_inverse_correct_gamma(global_random_seed):
"""Check that gamma is set correctly when not provided.
Non-regression test for #26280
"""
rng = np.random.RandomState(global_random_seed)
X = rng.random_sample((5, 4))
kwargs = {
"n_components": 2,
"random_state": rng,
"fit_inverse_transform": True,
"kernel": "rbf",
}
expected_gamma = 1 / X.shape[1]
kpca1 = KernelPCA(gamma=None, **kwargs).fit(X)
kpca2 = KernelPCA(gamma=expected_gamma, **kwargs).fit(X)
assert kpca1.gamma_ == expected_gamma
assert kpca2.gamma_ == expected_gamma
X1_recon = kpca1.inverse_transform(kpca1.transform(X))
X2_recon = kpca2.inverse_transform(kpca1.transform(X))
assert_allclose(X1_recon, X2_recon)
def test_kernel_pca_pandas_output():
"""Check that KernelPCA works with pandas output when the solver is arpack.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27579
"""
pytest.importorskip("pandas")
X, _ = load_iris(as_frame=True, return_X_y=True)
with sklearn.config_context(transform_output="pandas"):
KernelPCA(n_components=2, eigen_solver="arpack").fit_transform(X)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/tests/test_factor_analysis.py | sklearn/decomposition/tests/test_factor_analysis.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from itertools import combinations
import numpy as np
import pytest
from sklearn.decomposition import FactorAnalysis
from sklearn.decomposition._factor_analysis import _ortho_rotation
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils._testing import assert_almost_equal, assert_array_almost_equal
def test_factor_analysis(global_random_seed):
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(global_random_seed)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
fas = []
for method in ["randomized", "lapack"]:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert X_t.shape == (n_samples, n_components)
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert diff > 0.0, "Log likelihood dif not increase"
# Sample Covariance
scov = np.cov(X, rowvar=0.0, bias=1.0)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert diff < 0.2, "Mean absolute difference is %f" % diff
fa = FactorAnalysis(
n_components=n_components, noise_variance_init=np.ones(n_features)
)
with pytest.raises(ValueError):
fa.fit(X[:, :2])
def f(x, y):
return np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ["loglike_", "components_", "noise_variance_"]:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
with pytest.warns(ConvergenceWarning):
fa1.fit(X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12)
# test rotation
n_components = 2
results, projections = {}, {}
for method in (None, "varimax", "quartimax"):
fa_var = FactorAnalysis(n_components=n_components, rotation=method)
results[method] = fa_var.fit_transform(X)
projections[method] = fa_var.get_covariance()
for rot1, rot2 in combinations([None, "varimax", "quartimax"], 2):
assert not np.allclose(results[rot1], results[rot2])
assert np.allclose(projections[rot1], projections[rot2], atol=3)
# test against R's psych::principal with rotate="varimax"
# (i.e., the values below stem from rotating the components in R)
# R's factor analysis returns quite different values; therefore, we only
# test the rotation itself
factors = np.array(
[
[0.89421016, -0.35854928, -0.27770122, 0.03773647],
[-0.45081822, -0.89132754, 0.0932195, -0.01787973],
[0.99500666, -0.02031465, 0.05426497, -0.11539407],
[0.96822861, -0.06299656, 0.24411001, 0.07540887],
]
)
r_solution = np.array(
[[0.962, 0.052], [-0.141, 0.989], [0.949, -0.300], [0.937, -0.251]]
)
rotated = _ortho_rotation(factors[:, :n_components], method="varimax").T
assert_array_almost_equal(np.abs(rotated), np.abs(r_solution), decimal=3)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/tests/test_pca.py | sklearn/decomposition/tests/test_pca.py | import os
import re
import warnings
import numpy as np
import pytest
import scipy as sp
from numpy.testing import assert_array_equal
from sklearn import config_context, datasets
from sklearn.base import clone
from sklearn.datasets import load_iris, make_classification, make_low_rank_matrix
from sklearn.decomposition import PCA
from sklearn.decomposition._pca import _assess_dimension, _infer_dimension
from sklearn.utils._array_api import (
_atol_for_type,
_convert_to_numpy,
_get_namespace_device_dtype_ids,
yield_namespace_device_dtype_combinations,
)
from sklearn.utils._array_api import device as array_device
from sklearn.utils._test_common.instance_generator import _get_check_estimator_ids
from sklearn.utils._testing import _array_api_for_tests, assert_allclose
from sklearn.utils.estimator_checks import (
check_array_api_input_and_values,
)
from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS
iris = datasets.load_iris()
PCA_SOLVERS = ["full", "covariance_eigh", "arpack", "randomized", "auto"]
# `SPARSE_M` and `SPARSE_N` could be larger, but be aware:
# * SciPy's generation of random sparse matrix can be costly
# * A (SPARSE_M, SPARSE_N) dense array is allocated to compare against
SPARSE_M, SPARSE_N = 1000, 300 # arbitrary
SPARSE_MAX_COMPONENTS = min(SPARSE_M, SPARSE_N)
def _check_fitted_pca_close(pca1, pca2, rtol=1e-7, atol=1e-12):
assert_allclose(pca1.components_, pca2.components_, rtol=rtol, atol=atol)
assert_allclose(
pca1.explained_variance_, pca2.explained_variance_, rtol=rtol, atol=atol
)
assert_allclose(pca1.singular_values_, pca2.singular_values_, rtol=rtol, atol=atol)
assert_allclose(pca1.mean_, pca2.mean_, rtol=rtol, atol=atol)
assert_allclose(pca1.noise_variance_, pca2.noise_variance_, rtol=rtol, atol=atol)
assert pca1.n_components_ == pca2.n_components_
assert pca1.n_samples_ == pca2.n_samples_
assert pca1.n_features_in_ == pca2.n_features_in_
@pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
@pytest.mark.parametrize("n_components", range(1, iris.data.shape[1]))
def test_pca(svd_solver, n_components):
X = iris.data
pca = PCA(n_components=n_components, svd_solver=svd_solver)
# check the shape of fit.transform
X_r = pca.fit(X).transform(X)
assert X_r.shape[1] == n_components
# check the equivalence of fit.transform and fit_transform
X_r2 = pca.fit_transform(X)
assert_allclose(X_r, X_r2)
X_r = pca.transform(X)
assert_allclose(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_allclose(np.dot(cov, precision), np.eye(X.shape[1]), atol=1e-12)
@pytest.mark.parametrize("density", [0.01, 0.1, 0.30])
@pytest.mark.parametrize("n_components", [1, 2, 10])
@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
@pytest.mark.parametrize("svd_solver", ["arpack", "covariance_eigh"])
@pytest.mark.parametrize("scale", [1, 10, 100])
def test_pca_sparse(
global_random_seed, svd_solver, sparse_container, n_components, density, scale
):
"""Check that the results are the same for sparse and dense input."""
# Set atol in addition of the default rtol to account for the very wide range of
# result values (1e-8 to 1e0).
atol = 1e-12
transform_atol = 1e-10
random_state = np.random.default_rng(global_random_seed)
X = sparse_container(
sp.sparse.random(
SPARSE_M,
SPARSE_N,
random_state=random_state,
density=density,
)
)
# Scale the data + vary the column means
scale_vector = random_state.random(X.shape[1]) * scale
X = X.multiply(scale_vector)
pca = PCA(
n_components=n_components,
svd_solver=svd_solver,
random_state=global_random_seed,
)
pca.fit(X)
Xd = X.toarray()
pcad = PCA(
n_components=n_components,
svd_solver=svd_solver,
random_state=global_random_seed,
)
pcad.fit(Xd)
# Fitted attributes equality
_check_fitted_pca_close(pca, pcad, atol=atol)
# Test transform
X2 = sparse_container(
sp.sparse.random(
SPARSE_M,
SPARSE_N,
random_state=random_state,
density=density,
)
)
X2d = X2.toarray()
assert_allclose(pca.transform(X2), pca.transform(X2d), atol=transform_atol)
assert_allclose(pca.transform(X2), pcad.transform(X2d), atol=transform_atol)
@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
def test_pca_sparse_fit_transform(global_random_seed, sparse_container):
random_state = np.random.default_rng(global_random_seed)
X = sparse_container(
sp.sparse.random(
SPARSE_M,
SPARSE_N,
random_state=random_state,
density=0.01,
)
)
X2 = sparse_container(
sp.sparse.random(
SPARSE_M,
SPARSE_N,
random_state=random_state,
density=0.01,
)
)
pca_fit = PCA(n_components=10, svd_solver="arpack", random_state=global_random_seed)
pca_fit_transform = PCA(
n_components=10, svd_solver="arpack", random_state=global_random_seed
)
pca_fit.fit(X)
transformed_X = pca_fit_transform.fit_transform(X)
_check_fitted_pca_close(pca_fit, pca_fit_transform)
assert_allclose(transformed_X, pca_fit_transform.transform(X))
assert_allclose(transformed_X, pca_fit.transform(X))
assert_allclose(pca_fit.transform(X2), pca_fit_transform.transform(X2))
@pytest.mark.parametrize("svd_solver", ["randomized", "full"])
@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
def test_sparse_pca_solver_error(global_random_seed, svd_solver, sparse_container):
random_state = np.random.RandomState(global_random_seed)
X = sparse_container(
sp.sparse.random(
SPARSE_M,
SPARSE_N,
random_state=random_state,
)
)
pca = PCA(n_components=30, svd_solver=svd_solver)
error_msg_pattern = (
'PCA only support sparse inputs with the "arpack" and "covariance_eigh"'
f' solvers, while "{svd_solver}" was passed'
)
with pytest.raises(TypeError, match=error_msg_pattern):
pca.fit(X)
@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
def test_sparse_pca_auto_arpack_singluar_values_consistency(
global_random_seed, sparse_container
):
"""Check that "auto" and "arpack" solvers are equivalent for sparse inputs."""
random_state = np.random.RandomState(global_random_seed)
X = sparse_container(
sp.sparse.random(
SPARSE_M,
SPARSE_N,
random_state=random_state,
)
)
pca_arpack = PCA(n_components=10, svd_solver="arpack").fit(X)
pca_auto = PCA(n_components=10, svd_solver="auto").fit(X)
assert_allclose(pca_arpack.singular_values_, pca_auto.singular_values_, rtol=5e-3)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggered it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
pca.fit(X)
@pytest.mark.parametrize("copy", [True, False])
@pytest.mark.parametrize("solver", PCA_SOLVERS)
def test_whitening(solver, copy):
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(
rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)), rng.randn(rank, n_features)),
)
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaining 30 features
X[:, :50] *= 3
assert X.shape == (n_samples, n_features)
# the component-wise variance is thus highly varying:
assert X.std(axis=0).std() > 43.8
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = PCA(
n_components=n_components,
whiten=True,
copy=copy,
svd_solver=solver,
random_state=0,
iterated_power=7,
)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert X_whitened.shape == (n_samples, n_components)
X_whitened2 = pca.transform(X_)
assert_allclose(X_whitened, X_whitened2, rtol=5e-4)
assert_allclose(X_whitened.std(ddof=1, axis=0), np.ones(n_components))
assert_allclose(X_whitened.mean(axis=0), np.zeros(n_components), atol=1e-12)
X_ = X.copy()
pca = PCA(
n_components=n_components, whiten=False, copy=copy, svd_solver=solver
).fit(X_.copy())
X_unwhitened = pca.transform(X_)
assert X_unwhitened.shape == (n_samples, n_components)
# in that case the output components still have varying variances
assert X_unwhitened.std(axis=0).std() == pytest.approx(74.1, rel=1e-1)
# we always center, so no test for non-centering.
@pytest.mark.parametrize(
"other_svd_solver", sorted(list(set(PCA_SOLVERS) - {"full", "auto"}))
)
@pytest.mark.parametrize("data_shape", ["tall", "wide"])
@pytest.mark.parametrize("rank_deficient", [False, True])
@pytest.mark.parametrize("whiten", [False, True])
def test_pca_solver_equivalence(
other_svd_solver,
data_shape,
rank_deficient,
whiten,
global_random_seed,
global_dtype,
):
if data_shape == "tall":
n_samples, n_features = 100, 30
else:
n_samples, n_features = 30, 100
n_samples_test = 10
if rank_deficient:
rng = np.random.default_rng(global_random_seed)
rank = min(n_samples, n_features) // 2
X = rng.standard_normal(
size=(n_samples + n_samples_test, rank)
) @ rng.standard_normal(size=(rank, n_features))
else:
X = make_low_rank_matrix(
n_samples=n_samples + n_samples_test,
n_features=n_features,
tail_strength=0.5,
random_state=global_random_seed,
)
# With a non-zero tail strength, the data is actually full-rank.
rank = min(n_samples, n_features)
X = X.astype(global_dtype, copy=False)
X_train, X_test = X[:n_samples], X[n_samples:]
if global_dtype == np.float32:
tols = dict(atol=3e-2, rtol=1e-5)
variance_threshold = 1e-5
else:
tols = dict(atol=1e-10, rtol=1e-12)
variance_threshold = 1e-12
extra_other_kwargs = {}
if other_svd_solver == "randomized":
# Only check for a truncated result with a large number of iterations
# to make sure that we can recover precise results.
n_components = 10
extra_other_kwargs = {"iterated_power": 50}
elif other_svd_solver == "arpack":
# Test all components except the last one which cannot be estimated by
# arpack.
n_components = np.minimum(n_samples, n_features) - 1
else:
# Test all components to high precision.
n_components = None
pca_full = PCA(n_components=n_components, svd_solver="full", whiten=whiten)
pca_other = PCA(
n_components=n_components,
svd_solver=other_svd_solver,
whiten=whiten,
random_state=global_random_seed,
**extra_other_kwargs,
)
X_trans_full_train = pca_full.fit_transform(X_train)
assert np.isfinite(X_trans_full_train).all()
assert X_trans_full_train.dtype == global_dtype
X_trans_other_train = pca_other.fit_transform(X_train)
assert np.isfinite(X_trans_other_train).all()
assert X_trans_other_train.dtype == global_dtype
assert (pca_full.explained_variance_ >= 0).all()
assert_allclose(pca_full.explained_variance_, pca_other.explained_variance_, **tols)
assert_allclose(
pca_full.explained_variance_ratio_,
pca_other.explained_variance_ratio_,
**tols,
)
reference_components = pca_full.components_
assert np.isfinite(reference_components).all()
other_components = pca_other.components_
assert np.isfinite(other_components).all()
# For some choice of n_components and data distribution, some components
# might be pure noise, let's ignore them in the comparison:
stable = pca_full.explained_variance_ > variance_threshold
assert stable.sum() > 1
assert_allclose(reference_components[stable], other_components[stable], **tols)
# As a result the output of fit_transform should be the same:
assert_allclose(
X_trans_other_train[:, stable], X_trans_full_train[:, stable], **tols
)
# And similarly for the output of transform on new data (except for the
# last component that can be underdetermined):
X_trans_full_test = pca_full.transform(X_test)
assert np.isfinite(X_trans_full_test).all()
assert X_trans_full_test.dtype == global_dtype
X_trans_other_test = pca_other.transform(X_test)
assert np.isfinite(X_trans_other_test).all()
assert X_trans_other_test.dtype == global_dtype
assert_allclose(X_trans_other_test[:, stable], X_trans_full_test[:, stable], **tols)
# Check that inverse transform reconstructions for both solvers are
# compatible.
X_recons_full_test = pca_full.inverse_transform(X_trans_full_test)
assert np.isfinite(X_recons_full_test).all()
assert X_recons_full_test.dtype == global_dtype
X_recons_other_test = pca_other.inverse_transform(X_trans_other_test)
assert np.isfinite(X_recons_other_test).all()
assert X_recons_other_test.dtype == global_dtype
if pca_full.components_.shape[0] == pca_full.components_.shape[1]:
# In this case, the models should have learned the same invertible
# transform. They should therefore both be able to reconstruct the test
# data.
assert_allclose(X_recons_full_test, X_test, **tols)
assert_allclose(X_recons_other_test, X_test, **tols)
elif pca_full.components_.shape[0] < rank:
# In the absence of noisy components, both models should be able to
# reconstruct the same low-rank approximation of the original data.
assert pca_full.explained_variance_.min() > variance_threshold
assert_allclose(X_recons_full_test, X_recons_other_test, **tols)
else:
# When n_features > n_samples and n_components is larger than the rank
# of the training set, the output of the `inverse_transform` function
# is ill-defined. We can only check that we reach the same fixed point
# after another round of transform:
assert_allclose(
pca_full.transform(X_recons_full_test)[:, stable],
pca_other.transform(X_recons_other_test)[:, stable],
**tols,
)
@pytest.mark.parametrize(
"X",
[
np.random.RandomState(0).randn(100, 80),
datasets.make_classification(100, 80, n_informative=78, random_state=0)[0],
np.random.RandomState(0).randn(10, 100),
],
ids=["random-tall", "correlated-tall", "random-wide"],
)
@pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
def test_pca_explained_variance_empirical(X, svd_solver):
pca = PCA(n_components=2, svd_solver=svd_solver, random_state=0)
X_pca = pca.fit_transform(X)
assert_allclose(pca.explained_variance_, np.var(X_pca, ddof=1, axis=0))
expected_result = np.linalg.eig(np.cov(X, rowvar=False))[0]
expected_result = sorted(expected_result, reverse=True)[:2]
assert_allclose(pca.explained_variance_, expected_result, rtol=5e-3)
@pytest.mark.parametrize("svd_solver", ["arpack", "randomized"])
def test_pca_singular_values_consistency(svd_solver):
rng = np.random.RandomState(0)
n_samples, n_features = 100, 80
X = rng.randn(n_samples, n_features)
pca_full = PCA(n_components=2, svd_solver="full", random_state=rng)
pca_other = PCA(n_components=2, svd_solver=svd_solver, random_state=rng)
pca_full.fit(X)
pca_other.fit(X)
assert_allclose(pca_full.singular_values_, pca_other.singular_values_, rtol=5e-3)
@pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
def test_pca_singular_values(svd_solver):
rng = np.random.RandomState(0)
n_samples, n_features = 100, 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2, svd_solver=svd_solver, random_state=rng)
X_trans = pca.fit_transform(X)
# compare to the Frobenius norm
assert_allclose(
np.sum(pca.singular_values_**2), np.linalg.norm(X_trans, "fro") ** 2
)
# Compare to the 2-norms of the score vectors
assert_allclose(pca.singular_values_, np.sqrt(np.sum(X_trans**2, axis=0)))
# set the singular values and see what er get back
n_samples, n_features = 100, 110
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=3, svd_solver=svd_solver, random_state=rng)
X_trans = pca.fit_transform(X)
X_trans /= np.sqrt(np.sum(X_trans**2, axis=0))
X_trans[:, 0] *= 3.142
X_trans[:, 1] *= 2.718
X_hat = np.dot(X_trans, pca.components_)
pca.fit(X_hat)
assert_allclose(pca.singular_values_, [3.142, 2.718, 1.0])
@pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
def test_pca_check_projection(svd_solver):
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * 0.1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2, svd_solver=svd_solver).fit(X).transform(Xt)
Yt /= np.sqrt((Yt**2).sum())
assert_allclose(np.abs(Yt[0][0]), 1.0, rtol=5e-3)
@pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
def test_pca_check_projection_list(svd_solver):
# Test that the projection of data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
pca = PCA(n_components=1, svd_solver=svd_solver, random_state=0)
X_trans = pca.fit_transform(X)
assert X_trans.shape, (2, 1)
assert_allclose(X_trans.mean(), 0.00, atol=1e-12)
assert_allclose(X_trans.std(), 0.71, rtol=5e-3)
@pytest.mark.parametrize("svd_solver", ["full", "arpack", "randomized"])
@pytest.mark.parametrize("whiten", [False, True])
def test_pca_inverse(svd_solver, whiten):
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= 0.00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver=svd_solver, whiten=whiten).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_allclose(X, Y_inverse, rtol=5e-6)
@pytest.mark.parametrize(
"data", [np.array([[0, 1, 0], [1, 0, 0]]), np.array([[0, 1, 0], [1, 0, 0]]).T]
)
@pytest.mark.parametrize(
"svd_solver, n_components, err_msg",
[
("arpack", 0, r"must be between 1 and min\(n_samples, n_features\)"),
("randomized", 0, r"must be between 1 and min\(n_samples, n_features\)"),
("arpack", 2, r"must be strictly less than min"),
(
"auto",
3,
(
r"n_components=3 must be between 0 and min\(n_samples, "
r"n_features\)=2 with svd_solver='full'"
),
),
],
)
def test_pca_validation(svd_solver, data, n_components, err_msg):
# Ensures that solver-specific extreme inputs for the n_components
# parameter raise errors
smallest_d = 2 # The smallest dimension
pca_fitted = PCA(n_components, svd_solver=svd_solver)
with pytest.raises(ValueError, match=err_msg):
pca_fitted.fit(data)
# Additional case for arpack
if svd_solver == "arpack":
n_components = smallest_d
err_msg = (
"n_components={}L? must be strictly less than "
r"min\(n_samples, n_features\)={}L? with "
"svd_solver='arpack'".format(n_components, smallest_d)
)
with pytest.raises(ValueError, match=err_msg):
PCA(n_components, svd_solver=svd_solver).fit(data)
@pytest.mark.parametrize(
"solver, n_components_",
[
("full", min(iris.data.shape)),
("arpack", min(iris.data.shape) - 1),
("randomized", min(iris.data.shape)),
],
)
@pytest.mark.parametrize("data", [iris.data, iris.data.T])
def test_n_components_none(data, solver, n_components_):
pca = PCA(svd_solver=solver)
pca.fit(data)
assert pca.n_components_ == n_components_
@pytest.mark.parametrize("svd_solver", ["auto", "full"])
def test_n_components_mle(svd_solver):
# Ensure that n_components == 'mle' doesn't raise error for auto/full
rng = np.random.RandomState(0)
n_samples, n_features = 600, 10
X = rng.randn(n_samples, n_features)
pca = PCA(n_components="mle", svd_solver=svd_solver)
pca.fit(X)
assert pca.n_components_ == 1
@pytest.mark.parametrize("svd_solver", ["arpack", "randomized"])
def test_n_components_mle_error(svd_solver):
# Ensure that n_components == 'mle' will raise an error for unsupported
# solvers
rng = np.random.RandomState(0)
n_samples, n_features = 600, 10
X = rng.randn(n_samples, n_features)
pca = PCA(n_components="mle", svd_solver=svd_solver)
err_msg = "n_components='mle' cannot be a string with svd_solver='{}'".format(
svd_solver
)
with pytest.raises(ValueError, match=err_msg):
pca.fit(X)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * 0.1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components="mle", svd_solver="full").fit(X)
assert pca.n_components == "mle"
assert pca.n_components_ == 1
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (
rng.randn(n, p) * 0.1
+ rng.randn(n, 1) * np.array([3, 4, 5, 1, 2])
+ np.array([1, 0, 7, 4, 6])
)
pca = PCA(n_components=p, svd_solver="full")
pca.fit(X)
spect = pca.explained_variance_
ll = np.array([_assess_dimension(spect, k, n) for k in range(1, p)])
assert ll[1] > ll.max() - 0.01 * n
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * 0.1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p, svd_solver="full")
pca.fit(X)
spect = pca.explained_variance_
assert _infer_dimension(spect, n) > 1
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * 0.1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p, svd_solver="full")
pca.fit(X)
spect = pca.explained_variance_
assert _infer_dimension(spect, n) > 2
@pytest.mark.parametrize(
"X, n_components, n_components_validated",
[
(iris.data, 0.95, 2), # row > col
(iris.data, 0.01, 1), # row > col
(np.random.RandomState(0).rand(5, 20), 0.5, 2),
], # row < col
)
def test_infer_dim_by_explained_variance(X, n_components, n_components_validated):
pca = PCA(n_components=n_components, svd_solver="full")
pca.fit(X)
assert pca.n_components == pytest.approx(n_components)
assert pca.n_components_ == n_components_validated
@pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
def test_pca_score(svd_solver):
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * 0.1 + np.array([3, 4, 5])
pca = PCA(n_components=2, svd_solver=svd_solver)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1**2) * p
assert_allclose(ll1 / h, 1, rtol=5e-2)
ll2 = pca.score(rng.randn(n, p) * 0.2 + np.array([3, 4, 5]))
assert ll1 > ll2
pca = PCA(n_components=2, whiten=True, svd_solver=svd_solver)
pca.fit(X)
ll2 = pca.score(X)
assert ll1 > ll2
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7])
Xt = rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7])
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k, svd_solver="full")
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert ll.argmax() == 1
@pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
def test_pca_sanity_noise_variance(svd_solver):
# Sanity check for the noise_variance_. For more details see
# https://github.com/scikit-learn/scikit-learn/issues/7568
# https://github.com/scikit-learn/scikit-learn/issues/8541
# https://github.com/scikit-learn/scikit-learn/issues/8544
X, _ = datasets.load_digits(return_X_y=True)
pca = PCA(n_components=30, svd_solver=svd_solver, random_state=0)
pca.fit(X)
assert np.all((pca.explained_variance_ - pca.noise_variance_) >= 0)
@pytest.mark.parametrize("svd_solver", ["arpack", "randomized"])
def test_pca_score_consistency_solvers(svd_solver):
# Check the consistency of score between solvers
X, _ = datasets.load_digits(return_X_y=True)
pca_full = PCA(n_components=30, svd_solver="full", random_state=0)
pca_other = PCA(n_components=30, svd_solver=svd_solver, random_state=0)
pca_full.fit(X)
pca_other.fit(X)
assert_allclose(pca_full.score(X), pca_other.score(X), rtol=5e-6)
# arpack raises ValueError for n_components == min(n_samples, n_features)
@pytest.mark.parametrize("svd_solver", ["full", "randomized"])
def test_pca_zero_noise_variance_edge_cases(svd_solver):
# ensure that noise_variance_ is 0 in edge cases
# when n_components == min(n_samples, n_features)
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * 0.1 + np.array([3, 4, 5])
pca = PCA(n_components=p, svd_solver=svd_solver)
pca.fit(X)
assert pca.noise_variance_ == 0
# Non-regression test for gh-12489
# ensure no divide-by-zero error for n_components == n_features < n_samples
pca.score(X)
pca.fit(X.T)
assert pca.noise_variance_ == 0
# Non-regression test for gh-12489
# ensure no divide-by-zero error for n_components == n_samples < n_features
pca.score(X.T)
@pytest.mark.parametrize(
"n_samples, n_features, n_components, expected_solver",
[
# case: n_samples < 10 * n_features and max(X.shape) <= 500 => 'full'
(10, 50, 5, "full"),
# case: n_samples > 10 * n_features and n_features < 500 => 'covariance_eigh'
(1000, 50, 50, "covariance_eigh"),
# case: n_components >= .8 * min(X.shape) => 'full'
(1000, 500, 400, "full"),
# n_components >= 1 and n_components < .8*min(X.shape) => 'randomized'
(1000, 500, 10, "randomized"),
# case: n_components in (0,1) => 'full'
(1000, 500, 0.5, "full"),
],
)
def test_pca_svd_solver_auto(n_samples, n_features, n_components, expected_solver):
data = np.random.RandomState(0).uniform(size=(n_samples, n_features))
pca_auto = PCA(n_components=n_components, random_state=0)
pca_test = PCA(
n_components=n_components, svd_solver=expected_solver, random_state=0
)
pca_auto.fit(data)
assert pca_auto._fit_svd_solver == expected_solver
pca_test.fit(data)
assert_allclose(pca_auto.components_, pca_test.components_)
@pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
def test_pca_deterministic_output(svd_solver):
rng = np.random.RandomState(0)
X = rng.rand(10, 10)
transformed_X = np.zeros((20, 2))
for i in range(20):
pca = PCA(n_components=2, svd_solver=svd_solver, random_state=rng)
transformed_X[i, :] = pca.fit_transform(X)[0]
assert_allclose(transformed_X, np.tile(transformed_X[0, :], 20).reshape(20, 2))
@pytest.mark.parametrize("svd_solver", PCA_SOLVERS)
def test_pca_dtype_preservation(svd_solver, global_random_seed):
check_pca_float_dtype_preservation(svd_solver, global_random_seed)
check_pca_int_dtype_upcast_to_double(svd_solver)
def check_pca_float_dtype_preservation(svd_solver, seed):
# Ensure that PCA does not upscale the dtype when input is float32
X = np.random.RandomState(seed).rand(1000, 4)
X_float64 = X.astype(np.float64, copy=False)
X_float32 = X.astype(np.float32)
pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=seed).fit(
X_float64
)
pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=seed).fit(
X_float32
)
assert pca_64.components_.dtype == np.float64
assert pca_32.components_.dtype == np.float32
assert pca_64.transform(X_float64).dtype == np.float64
assert pca_32.transform(X_float32).dtype == np.float32
# The atol and rtol are set such that the test passes for all random seeds
# on all supported platforms on our CI and conda-forge with the default
# random seed.
assert_allclose(pca_64.components_, pca_32.components_, rtol=1e-3, atol=1e-3)
def check_pca_int_dtype_upcast_to_double(svd_solver):
# Ensure that all int types will be upcast to float64
X_i64 = np.random.RandomState(0).randint(0, 1000, (1000, 4))
X_i64 = X_i64.astype(np.int64, copy=False)
X_i32 = X_i64.astype(np.int32, copy=False)
pca_64 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i64)
pca_32 = PCA(n_components=3, svd_solver=svd_solver, random_state=0).fit(X_i32)
assert pca_64.components_.dtype == np.float64
assert pca_32.components_.dtype == np.float64
assert pca_64.transform(X_i64).dtype == np.float64
assert pca_32.transform(X_i32).dtype == np.float64
assert_allclose(pca_64.components_, pca_32.components_, rtol=1e-4)
def test_pca_n_components_mostly_explained_variance_ratio():
# when n_components is the second highest cumulative sum of the
# explained_variance_ratio_, then n_components_ should equal the
# number of features in the dataset #15669
X, y = load_iris(return_X_y=True)
pca1 = PCA().fit(X, y)
n_components = pca1.explained_variance_ratio_.cumsum()[-2]
pca2 = PCA(n_components=n_components).fit(X, y)
assert pca2.n_components_ == X.shape[1]
def test_assess_dimension_bad_rank():
# Test error when tested rank not in [1, n_features - 1]
spectrum = np.array([1, 1e-30, 1e-30, 1e-30])
n_samples = 10
for rank in (0, 5):
with pytest.raises(ValueError, match=r"should be in \[1, n_features - 1\]"):
_assess_dimension(spectrum, rank, n_samples)
def test_small_eigenvalues_mle():
# Test rank associated with tiny eigenvalues are given a log-likelihood of
# -inf. The inferred rank will be 1
spectrum = np.array([1, 1e-30, 1e-30, 1e-30])
assert _assess_dimension(spectrum, rank=1, n_samples=10) > -np.inf
for rank in (2, 3):
assert _assess_dimension(spectrum, rank, 10) == -np.inf
assert _infer_dimension(spectrum, 10) == 1
def test_mle_redundant_data():
# Test 'mle' with pathological X: only one relevant feature should give a
# rank of 1
X, _ = datasets.make_classification(
n_features=20,
n_informative=1,
n_repeated=18,
n_redundant=1,
n_clusters_per_class=1,
random_state=42,
)
pca = PCA(n_components="mle").fit(X)
assert pca.n_components_ == 1
def test_fit_mle_too_few_samples():
# Tests that an error is raised when the number of samples is smaller
# than the number of features during an mle fit
X, _ = datasets.make_classification(n_samples=20, n_features=21, random_state=42)
pca = PCA(n_components="mle", svd_solver="full")
with pytest.raises(
ValueError,
match="n_components='mle' is only supported if n_samples >= n_features",
):
pca.fit(X)
def test_mle_simple_case():
# non-regression test for issue
# https://github.com/scikit-learn/scikit-learn/issues/16730
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/tests/test_incremental_pca.py | sklearn/decomposition/tests/test_incremental_pca.py | """Tests for Incremental PCA."""
import itertools
import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
from sklearn.utils._testing import (
assert_allclose_dense_sparse,
assert_almost_equal,
assert_array_almost_equal,
)
from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS, LIL_CONTAINERS
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
assert X_transformed.shape == (X.shape[0], 2)
np.testing.assert_allclose(
ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(),
rtol=1e-3,
)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
np.testing.assert_allclose(
np.dot(cov, precision), np.eye(X.shape[1]), atol=1e-13
)
@pytest.mark.parametrize(
"sparse_container", CSC_CONTAINERS + CSR_CONTAINERS + LIL_CONTAINERS
)
def test_incremental_pca_sparse(sparse_container):
# Incremental PCA on sparse arrays.
X = iris.data
pca = PCA(n_components=2)
pca.fit_transform(X)
X_sparse = sparse_container(X)
batch_size = X_sparse.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
X_transformed = ipca.fit_transform(X_sparse)
assert X_transformed.shape == (X_sparse.shape[0], 2)
np.testing.assert_allclose(
ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(),
rtol=1e-3,
)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X_sparse)
cov = ipca.get_covariance()
precision = ipca.get_precision()
np.testing.assert_allclose(
np.dot(cov, precision), np.eye(X_sparse.shape[1]), atol=1e-13
)
with pytest.raises(
TypeError,
match=(
"IncrementalPCA.partial_fit does not support "
"sparse input. Either convert data to dense "
"or use IncrementalPCA.fit to do so in batches."
),
):
ipca.partial_fit(X_sparse)
def test_incremental_pca_check_projection(global_random_seed):
# Test that the projection of data is correct.
rng = np.random.RandomState(global_random_seed)
n, p = 100, 3
X = rng.randn(n, p) * 0.1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt**2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1.0, 1)
def test_incremental_pca_inverse(global_random_seed):
# Test that the projection of data can be inverted.
rng = np.random.RandomState(global_random_seed)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= 0.00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is <= n_features.
X = np.array([[0, 1, 0], [1, 0, 0]])
n_samples, n_features = X.shape
n_components = 4
with pytest.raises(
ValueError,
match=(
"n_components={} invalid"
" for n_features={}, need more rows than"
" columns for IncrementalPCA"
" processing".format(n_components, n_features)
),
):
IncrementalPCA(n_components, batch_size=10).fit(X)
# Test that n_components is also <= n_samples in first call to partial fit.
n_components = 3
with pytest.raises(
ValueError,
match=(
f"n_components={n_components} must be less or equal to the batch "
f"number of samples {n_samples} for the first partial_fit call."
),
):
IncrementalPCA(n_components=n_components).partial_fit(X)
def test_n_samples_equal_n_components():
# Ensures no warning is raised when n_samples==n_components
# Non-regression test for gh-19050
ipca = IncrementalPCA(n_components=5)
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
ipca.partial_fit(np.random.randn(5, 7))
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
ipca.fit(np.random.randn(5, 7))
def test_n_components_none():
# Ensures that n_components == None is handled correctly
rng = np.random.RandomState(1999)
for n_samples, n_features in [(50, 10), (10, 50)]:
X = rng.rand(n_samples, n_features)
ipca = IncrementalPCA(n_components=None)
# First partial_fit call, ipca.n_components_ is inferred from
# min(X.shape)
ipca.partial_fit(X)
assert ipca.n_components_ == min(X.shape)
# Second partial_fit call, ipca.n_components_ is inferred from
# ipca.components_ computed from the first partial_fit call
ipca.partial_fit(X)
assert ipca.n_components_ == ipca.components_.shape[0]
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
with pytest.raises(ValueError):
ipca.partial_fit(X2)
# Increasing number of components
ipca.set_params(n_components=15)
with pytest.raises(ValueError):
ipca.partial_fit(X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
with pytest.raises(ValueError):
ipca.partial_fit(X2)
def test_incremental_pca_batch_signs(global_random_seed):
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(global_random_seed)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in itertools.pairwise(all_components):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_partial_fit_small_batch():
# Test that there is no minimum batch size after the first partial_fit
# Non-regression test
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= 0.00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
n_components = p
pipca = IncrementalPCA(n_components=n_components)
pipca.partial_fit(X[:n_components])
for idx in range(n_components, n):
pipca.partial_fit(X[idx : idx + 1])
pca = PCA(n_components=n_components)
pca.fit(X)
assert_allclose(pca.components_, pipca.components_, atol=1e-3)
def test_incremental_pca_batch_values(global_random_seed):
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(global_random_seed)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in itertools.pairwise(all_components):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_batch_rank():
# Test sample size in each batch is always larger or equal to n_components
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 90, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=20, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for components_i, components_j in itertools.pairwise(all_components):
assert_allclose_dense_sparse(components_i, components_j)
def test_incremental_pca_partial_fit(global_random_seed):
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(global_random_seed)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= 0.00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in itertools.pairwise(batch_itr):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data(global_random_seed):
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(global_random_seed)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(
1000, 100, tail_strength=0.0, effective_rank=10, random_state=1999
)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(
pca.explained_variance_, ipca.explained_variance_, decimal=prec
)
assert_almost_equal(
pca.explained_variance_ratio_, ipca.explained_variance_ratio_, decimal=prec
)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_, decimal=prec)
def test_singular_values(global_random_seed):
# Check that the IncrementalPCA output has the correct singular values
rng = np.random.RandomState(global_random_seed)
n_samples = 1000
n_features = 100
X = datasets.make_low_rank_matrix(
n_samples, n_features, tail_strength=0.0, effective_rank=10, random_state=rng
)
pca = PCA(n_components=10, svd_solver="full", random_state=rng).fit(X)
ipca = IncrementalPCA(n_components=10, batch_size=150).fit(X)
assert_array_almost_equal(pca.singular_values_, ipca.singular_values_, 2)
# Compare to the Frobenius norm
X_pca = pca.transform(X)
X_ipca = ipca.transform(X)
assert_array_almost_equal(
np.sum(pca.singular_values_**2.0), np.linalg.norm(X_pca, "fro") ** 2.0, 12
)
assert_array_almost_equal(
np.sum(ipca.singular_values_**2.0), np.linalg.norm(X_ipca, "fro") ** 2.0, 2
)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(
pca.singular_values_, np.sqrt(np.sum(X_pca**2.0, axis=0)), 12
)
assert_array_almost_equal(
ipca.singular_values_, np.sqrt(np.sum(X_ipca**2.0, axis=0)), 2
)
# Set the singular values and see what we get back
rng = np.random.RandomState(global_random_seed)
n_samples = 100
n_features = 110
X = datasets.make_low_rank_matrix(
n_samples, n_features, tail_strength=0.0, effective_rank=3, random_state=rng
)
pca = PCA(n_components=3, svd_solver="full", random_state=rng)
ipca = IncrementalPCA(n_components=3, batch_size=100)
X_pca = pca.fit_transform(X)
X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
X_pca[:, 0] *= 3.142
X_pca[:, 1] *= 2.718
X_hat = np.dot(X_pca, pca.components_)
pca.fit(X_hat)
ipca.fit(X_hat)
assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(ipca.singular_values_, [3.142, 2.718, 1.0], 14)
def test_whitening(global_random_seed):
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(
1000, 10, tail_strength=0.0, effective_rank=2, random_state=global_random_seed
)
atol = 1e-3
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc, batch_size=250).fit(X)
# Since the data is rank deficient, some components are pure noise. We
# should not expect those dimensions to carry any signal and their
# values might be arbitrarily changed by implementation details of the
# internal SVD solver. We therefore filter them out before comparison.
stable_mask = pca.explained_variance_ratio_ > 1e-12
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_allclose(
np.abs(Xt_pca)[:, stable_mask],
np.abs(Xt_ipca)[:, stable_mask],
atol=atol,
)
# The noisy dimensions are in the null space of the inverse transform,
# so they are not influencing the reconstruction. We therefore don't
# need to apply the mask here.
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_allclose(X, Xinv_ipca, atol=atol)
assert_allclose(X, Xinv_pca, atol=atol)
assert_allclose(Xinv_pca, Xinv_ipca, atol=atol)
def test_incremental_pca_partial_fit_float_division():
# Test to ensure float division is used in all versions of Python
# (non-regression test for issue #9489)
rng = np.random.RandomState(0)
A = rng.randn(5, 3) + 2
B = rng.randn(7, 3) + 5
pca = IncrementalPCA(n_components=2)
pca.partial_fit(A)
# Set n_samples_seen_ to be a floating point number instead of an int
pca.n_samples_seen_ = float(pca.n_samples_seen_)
pca.partial_fit(B)
singular_vals_float_samples_seen = pca.singular_values_
pca2 = IncrementalPCA(n_components=2)
pca2.partial_fit(A)
pca2.partial_fit(B)
singular_vals_int_samples_seen = pca2.singular_values_
np.testing.assert_allclose(
singular_vals_float_samples_seen, singular_vals_int_samples_seen
)
def test_incremental_pca_fit_overflow_error():
# Test for overflow error on Windows OS
# (non-regression test for issue #17693)
rng = np.random.RandomState(0)
A = rng.rand(500000, 2)
ipca = IncrementalPCA(n_components=2, batch_size=10000)
ipca.fit(A)
pca = PCA(n_components=2)
pca.fit(A)
np.testing.assert_allclose(ipca.singular_values_, pca.singular_values_)
def test_incremental_pca_feature_names_out():
"""Check feature names out for IncrementalPCA."""
ipca = IncrementalPCA(n_components=2).fit(iris.data)
names = ipca.get_feature_names_out()
assert_array_equal([f"incrementalpca{i}" for i in range(2)], names)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/tests/test_fastica.py | sklearn/decomposition/tests/test_fastica.py | """
Test the fastica algorithm.
"""
import itertools
import os
import warnings
import numpy as np
import pytest
from scipy import stats
from sklearn.decomposition import PCA, FastICA, fastica
from sklearn.decomposition._fastica import _gs_decorrelation
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils._testing import assert_allclose, ignore_warnings
def center_and_norm(x, axis=-1):
"""Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs(global_random_seed):
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(global_random_seed)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert (w**2).sum() < 1.0e-10
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert (tmp[:5] ** 2).sum() < 1.0e-10
def test_fastica_attributes_dtypes(global_dtype):
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10)).astype(global_dtype, copy=False)
fica = FastICA(
n_components=5, max_iter=1000, whiten="unit-variance", random_state=0
).fit(X)
assert fica.components_.dtype == global_dtype
assert fica.mixing_.dtype == global_dtype
assert fica.mean_.dtype == global_dtype
assert fica.whitening_.dtype == global_dtype
def test_fastica_return_dtypes(global_dtype):
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10)).astype(global_dtype, copy=False)
k_, mixing_, s_ = fastica(
X, max_iter=1000, whiten="unit-variance", random_state=rng
)
assert k_.dtype == global_dtype
assert mixing_.dtype == global_dtype
assert s_.dtype == global_dtype
@pytest.mark.parametrize("add_noise", [True, False])
def test_fastica_simple(add_noise, global_random_seed, global_dtype):
if (
global_random_seed == 20
and global_dtype == np.float32
and not add_noise
and os.getenv("DISTRIB") == "ubuntu"
):
pytest.xfail(
"FastICA instability with Ubuntu Atlas build with float32 "
"global_dtype. For more details, see "
"https://github.com/scikit-learn/scikit-learn/issues/24131#issuecomment-1208091119"
)
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(global_random_seed)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples, random_state=global_random_seed)
s = np.c_[s1, s2].T
center_and_norm(s)
s = s.astype(global_dtype)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]])
mixing = mixing.astype(global_dtype)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x**3, (3 * x**2).mean(axis=-1)
algos = ["parallel", "deflation"]
nls = ["logcosh", "exp", "cube", g_test]
whitening = ["arbitrary-variance", "unit-variance", False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(
m.T, fun=nl, whiten=whiten, algorithm=algo, random_state=rng
)
with pytest.raises(ValueError):
fastica(m.T, fun=np.tanh, whiten=whiten, algorithm=algo)
else:
pca = PCA(n_components=2, whiten=True, random_state=rng)
X = pca.fit_transform(m.T)
k_, mixing_, s_ = fastica(
X, fun=nl, algorithm=algo, whiten=False, random_state=rng
)
with pytest.raises(ValueError):
fastica(X, fun=np.tanh, algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
# XXX: exact reconstruction to standard relative tolerance is not
# possible. This is probably expected when add_noise is True but we
# also need a non-trivial atol in float32 when add_noise is False.
#
# Note that the 2 sources are non-Gaussian in this test.
atol = 1e-5 if global_dtype == np.float32 else 0
assert_allclose(np.dot(np.dot(mixing_, k_), m), s_, atol=atol)
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-2)
assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-2)
else:
assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-1)
assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-1)
# Test FastICA class
_, _, sources_fun = fastica(
m.T, fun=nl, algorithm=algo, random_state=global_random_seed
)
ica = FastICA(fun=nl, algorithm=algo, random_state=global_random_seed)
sources = ica.fit_transform(m.T)
assert ica.components_.shape == (2, 2)
assert sources.shape == (1000, 2)
assert_allclose(sources_fun, sources)
# Set atol to account for the different magnitudes of the elements in sources
# (from 1e-4 to 1e1).
atol = np.max(np.abs(sources)) * (1e-5 if global_dtype == np.float32 else 1e-7)
assert_allclose(sources, ica.transform(m.T), atol=atol)
assert ica.mixing_.shape == (2, 2)
ica = FastICA(fun=np.tanh, algorithm=algo)
with pytest.raises(ValueError):
ica.fit(m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
warn_msg = "Ignoring n_components with whiten=False."
with pytest.warns(UserWarning, match=warn_msg):
ica.fit(m)
assert hasattr(ica, "mixing_")
def test_fastica_convergence_fail(global_random_seed):
# Test the FastICA algorithm on very simple data
# (see test_non_square_fastica).
# Ensure a ConvergenceWarning raised if the tolerance is sufficiently low.
rng = np.random.RandomState(global_random_seed)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
# Do fastICA with tolerance 0. to ensure failing convergence
warn_msg = (
"FastICA did not converge. Consider increasing tolerance "
"or the maximum number of iterations."
)
with pytest.warns(ConvergenceWarning, match=warn_msg):
ica = FastICA(
algorithm="parallel", n_components=2, random_state=rng, max_iter=2, tol=0.0
)
ica.fit(m.T)
@pytest.mark.parametrize("add_noise", [True, False])
def test_non_square_fastica(global_random_seed, add_noise):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(global_random_seed)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(
m.T, n_components=2, whiten="unit-variance", random_state=rng
)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_allclose(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_allclose(np.dot(s1_, s1) / n_samples, 1, atol=1e-3)
assert_allclose(np.dot(s2_, s2) / n_samples, 1, atol=1e-3)
def test_fit_transform(global_random_seed, global_dtype):
"""Test unit variance of transformed data using FastICA algorithm.
Check that `fit_transform` gives the same result as applying
`fit` and then `transform`.
Bug #13056
"""
# multivariate uniform data in [0, 1]
rng = np.random.RandomState(global_random_seed)
X = rng.random_sample((100, 10)).astype(global_dtype)
max_iter = 300
for whiten, n_components in [["unit-variance", 5], [False, None]]:
n_components_ = n_components if n_components is not None else X.shape[1]
ica = FastICA(
n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0
)
with warnings.catch_warnings():
# make sure that numerical errors do not cause sqrt of negative
# values
warnings.simplefilter("error", RuntimeWarning)
# XXX: for some seeds, the model does not converge.
# However this is not what we test here.
warnings.simplefilter("ignore", ConvergenceWarning)
Xt = ica.fit_transform(X)
assert ica.components_.shape == (n_components_, 10)
assert Xt.shape == (X.shape[0], n_components_)
ica2 = FastICA(
n_components=n_components, max_iter=max_iter, whiten=whiten, random_state=0
)
with warnings.catch_warnings():
# make sure that numerical errors do not cause sqrt of negative
# values
warnings.simplefilter("error", RuntimeWarning)
warnings.simplefilter("ignore", ConvergenceWarning)
ica2.fit(X)
assert ica2.components_.shape == (n_components_, 10)
Xt2 = ica2.transform(X)
# XXX: we have to set atol for this test to pass for all seeds when
# fitting with float32 data. Is this revealing a bug?
if global_dtype:
atol = np.abs(Xt2).mean() / 1e6
else:
atol = 0.0 # the default rtol is enough for float64 data
assert_allclose(Xt, Xt2, atol=atol)
@pytest.mark.filterwarnings("ignore:Ignoring n_components with whiten=False.")
@pytest.mark.parametrize(
"whiten, n_components, expected_mixing_shape",
[
("arbitrary-variance", 5, (10, 5)),
("arbitrary-variance", 10, (10, 10)),
("unit-variance", 5, (10, 5)),
("unit-variance", 10, (10, 10)),
(False, 5, (10, 10)),
(False, 10, (10, 10)),
],
)
def test_inverse_transform(
whiten, n_components, expected_mixing_shape, global_random_seed, global_dtype
):
# Test FastICA.inverse_transform
n_samples = 100
rng = np.random.RandomState(global_random_seed)
X = rng.random_sample((n_samples, 10)).astype(global_dtype)
ica = FastICA(n_components=n_components, random_state=rng, whiten=whiten)
with warnings.catch_warnings():
# For some dataset (depending on the value of global_dtype) the model
# can fail to converge but this should not impact the definition of
# a valid inverse transform.
warnings.simplefilter("ignore", ConvergenceWarning)
Xt = ica.fit_transform(X)
assert ica.mixing_.shape == expected_mixing_shape
X2 = ica.inverse_transform(Xt)
assert X.shape == X2.shape
# reversibility test in non-reduction case
if n_components == X.shape[1]:
# XXX: we have to set atol for this test to pass for all seeds when
# fitting with float32 data. Is this revealing a bug?
if global_dtype:
# XXX: dividing by a smaller number makes
# tests fail for some seeds.
atol = np.abs(X2).mean() / 1e5
else:
atol = 0.0 # the default rtol is enough for float64 data
assert_allclose(X, X2, atol=atol)
def test_fastica_errors():
n_features = 3
n_samples = 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
w_init = rng.randn(n_features + 1, n_features + 1)
with pytest.raises(ValueError, match=r"alpha must be in \[1,2\]"):
fastica(X, fun_args={"alpha": 0})
with pytest.raises(
ValueError, match=r"w_init has invalid shape.+should be \(3L?, 3L?\)"
):
fastica(X, w_init=w_init)
def test_fastica_whiten_unit_variance(global_random_seed):
"""Test unit variance of transformed data using FastICA algorithm.
Bug #13056
"""
rng = np.random.RandomState(global_random_seed)
X = rng.random_sample((100, 10))
n_components = X.shape[1]
ica = FastICA(n_components=n_components, whiten="unit-variance", random_state=0)
Xt = ica.fit_transform(X)
assert np.var(Xt) == pytest.approx(1.0)
@pytest.mark.parametrize("whiten", ["arbitrary-variance", "unit-variance", False])
@pytest.mark.parametrize("return_X_mean", [True, False])
@pytest.mark.parametrize("return_n_iter", [True, False])
def test_fastica_output_shape(whiten, return_X_mean, return_n_iter):
n_features = 3
n_samples = 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected_len = 3 + return_X_mean + return_n_iter
out = fastica(
X, whiten=whiten, return_n_iter=return_n_iter, return_X_mean=return_X_mean
)
assert len(out) == expected_len
if not whiten:
assert out[0] is None
@pytest.mark.parametrize("add_noise", [True, False])
def test_fastica_simple_different_solvers(add_noise, global_random_seed):
"""Test FastICA is consistent between whiten_solvers."""
rng = np.random.RandomState(global_random_seed)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples, random_state=rng)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = rng.rand() * 2 * np.pi
mixing = np.array([[np.cos(phi), np.sin(phi)], [np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
outs = {}
for solver in ("svd", "eigh"):
ica = FastICA(random_state=0, whiten="unit-variance", whiten_solver=solver)
sources = ica.fit_transform(m.T)
outs[solver] = sources
assert ica.components_.shape == (2, 2)
assert sources.shape == (1000, 2)
# compared numbers are not all on the same magnitude. Using a small atol to
# make the test less brittle
assert_allclose(outs["eigh"], outs["svd"], atol=1e-12)
def test_fastica_eigh_low_rank_warning(global_random_seed):
"""Test FastICA eigh solver raises warning for low-rank data."""
rng = np.random.RandomState(global_random_seed)
A = rng.randn(10, 2)
X = A @ A.T
ica = FastICA(random_state=0, whiten="unit-variance", whiten_solver="eigh")
msg = "There are some small singular values"
with pytest.warns(UserWarning, match=msg):
with ignore_warnings(category=ConvergenceWarning):
# The FastICA solver may not converge for some data with specific
# random seeds but this happens after the whiten step so this is
# not want we want to test here.
ica.fit(X)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/tests/__init__.py | sklearn/decomposition/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/tests/test_nmf.py | sklearn/decomposition/tests/test_nmf.py | import re
import sys
from io import StringIO
import numpy as np
import pytest
from scipy import linalg
from sklearn.base import clone
from sklearn.decomposition import NMF, MiniBatchNMF, non_negative_factorization
from sklearn.decomposition import _nmf as nmf # For testing internals
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils._testing import (
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
)
from sklearn.utils.extmath import squared_norm
from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
def test_convergence_warning(Estimator, solver):
convergence_warning = (
"Maximum number of iterations 1 reached. Increase it to improve convergence."
)
A = np.ones((2, 2))
with pytest.warns(ConvergenceWarning, match=convergence_warning):
Estimator(max_iter=1, n_components="auto", **solver).fit(A)
def test_initialize_nn_output():
# Test that initialization does not return negative values
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
for init in ("random", "nndsvd", "nndsvda", "nndsvdar"):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert not ((W < 0).any() or (H < 0).any())
@pytest.mark.filterwarnings(
r"ignore:The multiplicative update \('mu'\) solver cannot update zeros present in"
r" the initialization",
)
def test_parameter_checking():
# Here we only check for invalid parameter values that are not already
# automatically tested in the common tests.
A = np.ones((2, 2))
msg = "Invalid beta_loss parameter: solver 'cd' does not handle beta_loss = 1.0"
with pytest.raises(ValueError, match=msg):
NMF(solver="cd", beta_loss=1.0).fit(A)
msg = "Negative values in data passed to"
with pytest.raises(ValueError, match=msg):
NMF().fit(-A)
clf = NMF(2, tol=0.1).fit(A)
with pytest.raises(ValueError, match=msg):
clf.transform(-A)
with pytest.raises(ValueError, match=msg):
nmf._initialize_nmf(-A, 2, "nndsvd")
for init in ["nndsvd", "nndsvda", "nndsvdar"]:
msg = re.escape(
"init = '{}' can only be used when "
"n_components <= min(n_samples, n_features)".format(init)
)
with pytest.raises(ValueError, match=msg):
NMF(3, init=init).fit(A)
with pytest.raises(ValueError, match=msg):
MiniBatchNMF(3, init=init).fit(A)
with pytest.raises(ValueError, match=msg):
nmf._initialize_nmf(A, 3, init)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init="nndsvd")
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert error <= sdev
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init="nndsvd")
Wa, Ha = nmf._initialize_nmf(data, 10, init="nndsvda")
War, Har = nmf._initialize_nmf(data, 10, init="nndsvdar", random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_almost_equal(evl[ref != 0], ref[ref != 0])
# ignore UserWarning raised when both solver='mu' and init='nndsvd'
@pytest.mark.filterwarnings(
r"ignore:The multiplicative update \('mu'\) solver cannot update zeros present in"
r" the initialization"
)
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
@pytest.mark.parametrize("init", (None, "nndsvd", "nndsvda", "nndsvdar", "random"))
@pytest.mark.parametrize("alpha_W", (0.0, 1.0))
@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
def test_nmf_fit_nn_output(Estimator, solver, init, alpha_W, alpha_H):
# Test that the decomposition does not contain negative values
A = np.c_[5.0 - np.arange(1, 6), 5.0 + np.arange(1, 6)]
model = Estimator(
n_components=2,
init=init,
alpha_W=alpha_W,
alpha_H=alpha_H,
random_state=0,
**solver,
)
transf = model.fit_transform(A)
assert not ((model.components_ < 0).any() or (transf < 0).any())
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
def test_nmf_fit_close(Estimator, solver):
rng = np.random.mtrand.RandomState(42)
# Test that the fit is not too far away
pnmf = Estimator(
5,
init="nndsvdar",
random_state=0,
max_iter=600,
**solver,
)
X = np.abs(rng.randn(6, 5))
assert pnmf.fit(X).reconstruction_err_ < 0.1
def test_nmf_true_reconstruction():
# Test that the fit is not too far away from an exact solution
# (by construction)
n_samples = 15
n_features = 10
n_components = 5
beta_loss = 1
batch_size = 3
max_iter = 1000
rng = np.random.mtrand.RandomState(42)
W_true = np.zeros([n_samples, n_components])
W_array = np.abs(rng.randn(n_samples))
for j in range(n_components):
W_true[j % n_samples, j] = W_array[j % n_samples]
H_true = np.zeros([n_components, n_features])
H_array = np.abs(rng.randn(n_components))
for j in range(n_features):
H_true[j % n_components, j] = H_array[j % n_components]
X = np.dot(W_true, H_true)
model = NMF(
n_components=n_components,
solver="mu",
beta_loss=beta_loss,
max_iter=max_iter,
random_state=0,
)
transf = model.fit_transform(X)
X_calc = np.dot(transf, model.components_)
assert model.reconstruction_err_ < 0.1
assert_allclose(X, X_calc)
mbmodel = MiniBatchNMF(
n_components=n_components,
beta_loss=beta_loss,
batch_size=batch_size,
random_state=0,
max_iter=max_iter,
)
transf = mbmodel.fit_transform(X)
X_calc = np.dot(transf, mbmodel.components_)
assert mbmodel.reconstruction_err_ < 0.1
assert_allclose(X, X_calc, atol=1)
@pytest.mark.parametrize("solver", ["cd", "mu"])
def test_nmf_transform(solver):
# Test that fit_transform is equivalent to fit.transform for NMF
# Test that NMF.transform returns close values
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(6, 5))
m = NMF(
solver=solver,
n_components=3,
init="random",
random_state=0,
tol=1e-6,
)
ft = m.fit_transform(A)
t = m.transform(A)
assert_allclose(ft, t, atol=1e-1)
def test_minibatch_nmf_transform():
# Test that fit_transform is equivalent to fit.transform for MiniBatchNMF
# Only guaranteed with fresh restarts
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(6, 5))
m = MiniBatchNMF(
n_components=3,
random_state=0,
tol=1e-3,
fresh_restarts=True,
)
ft = m.fit_transform(A)
t = m.transform(A)
assert_allclose(ft, t)
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
def test_nmf_transform_custom_init(Estimator, solver):
# Smoke test that checks if NMF.transform works with custom initialization
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 5))
n_components = 4
avg = np.sqrt(A.mean() / n_components)
H_init = np.abs(avg * random_state.randn(n_components, 5))
W_init = np.abs(avg * random_state.randn(6, n_components))
m = Estimator(
n_components=n_components, init="custom", random_state=0, tol=1e-3, **solver
)
m.fit_transform(A, W=W_init, H=H_init)
m.transform(A)
@pytest.mark.parametrize("solver", ("cd", "mu"))
def test_nmf_inverse_transform(solver):
# Test that NMF.inverse_transform returns close values
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 4))
m = NMF(
solver=solver,
n_components=4,
init="random",
random_state=0,
max_iter=1000,
)
ft = m.fit_transform(A)
A_new = m.inverse_transform(ft)
assert_array_almost_equal(A, A_new, decimal=2)
def test_mbnmf_inverse_transform():
# Test that MiniBatchNMF.transform followed by MiniBatchNMF.inverse_transform
# is close to the identity
rng = np.random.RandomState(0)
A = np.abs(rng.randn(6, 4))
nmf = MiniBatchNMF(
random_state=rng,
max_iter=500,
init="nndsvdar",
fresh_restarts=True,
)
ft = nmf.fit_transform(A)
A_new = nmf.inverse_transform(ft)
assert_allclose(A, A_new, rtol=1e-3, atol=1e-2)
@pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF])
def test_n_components_greater_n_features(Estimator):
# Smoke test for the case of more components than features.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(30, 10))
Estimator(n_components=15, random_state=0, tol=1e-2).fit(A)
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS)
@pytest.mark.parametrize("alpha_W", (0.0, 1.0))
@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
def test_nmf_sparse_input(Estimator, solver, sparse_container, alpha_W, alpha_H):
# Test that sparse matrices are accepted as input
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = sparse_container(A)
est1 = Estimator(
n_components=5,
init="random",
alpha_W=alpha_W,
alpha_H=alpha_H,
random_state=0,
tol=0,
max_iter=100,
**solver,
)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_allclose(W1, W2)
assert_allclose(H1, H2)
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_nmf_sparse_transform(Estimator, solver, csc_container):
# Test that transform works on sparse data. Issue #2124
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(3, 2))
A[1, 1] = 0
A = csc_container(A)
model = Estimator(random_state=0, n_components=2, max_iter=400, **solver)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_allclose(A_fit_tr, A_tr, atol=1e-1)
@pytest.mark.parametrize("init", ["random", "nndsvd"])
@pytest.mark.parametrize("solver", ("cd", "mu"))
@pytest.mark.parametrize("alpha_W", (0.0, 1.0))
@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
def test_non_negative_factorization_consistency(init, solver, alpha_W, alpha_H):
# Test that the function is called in the same way, either directly
# or through the NMF class
max_iter = 500
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
W_nmf, H, _ = non_negative_factorization(
A,
init=init,
solver=solver,
max_iter=max_iter,
alpha_W=alpha_W,
alpha_H=alpha_H,
random_state=1,
tol=1e-2,
)
W_nmf_2, H, _ = non_negative_factorization(
A,
H=H,
update_H=False,
init=init,
solver=solver,
max_iter=max_iter,
alpha_W=alpha_W,
alpha_H=alpha_H,
random_state=1,
tol=1e-2,
)
model_class = NMF(
init=init,
solver=solver,
max_iter=max_iter,
alpha_W=alpha_W,
alpha_H=alpha_H,
random_state=1,
tol=1e-2,
)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_allclose(W_nmf, W_cls)
assert_allclose(W_nmf_2, W_cls_2)
def test_non_negative_factorization_checking():
# Note that the validity of parameter types and range of possible values
# for scalar numerical or str parameters is already checked in the common
# tests. Here we only check for problems that cannot be captured by simple
# declarative constraints on the valid parameter values.
A = np.ones((2, 2))
# Test parameters checking in public function
nnmf = non_negative_factorization
msg = re.escape("Negative values in data passed to NMF (input H)")
with pytest.raises(ValueError, match=msg):
nnmf(A, A, -A, 2, init="custom")
msg = re.escape("Negative values in data passed to NMF (input W)")
with pytest.raises(ValueError, match=msg):
nnmf(A, -A, A, 2, init="custom")
msg = re.escape("Array passed to NMF (input H) is full of zeros")
with pytest.raises(ValueError, match=msg):
nnmf(A, A, 0 * A, 2, init="custom")
def _beta_divergence_dense(X, W, H, beta):
"""Compute the beta-divergence of X and W.H for dense array only.
Used as a reference for testing nmf._beta_divergence.
"""
WH = np.dot(W, H)
if beta == 2:
return squared_norm(X - WH) / 2
WH_Xnonzero = WH[X != 0]
X_nonzero = X[X != 0]
np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)
if beta == 1:
res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))
res += WH.sum() - X.sum()
elif beta == 0:
div = X_nonzero / WH_Xnonzero
res = np.sum(div) - X.size - np.sum(np.log(div))
else:
res = (X_nonzero**beta).sum()
res += (beta - 1) * (WH**beta).sum()
res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()
res /= beta * (beta - 1)
return res
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_beta_divergence(csr_container):
# Compare _beta_divergence with the reference _beta_divergence_dense
n_samples = 20
n_features = 10
n_components = 5
beta_losses = [0.0, 0.5, 1.0, 1.5, 2.0, 3.0]
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.clip(X, 0, None, out=X)
X_csr = csr_container(X)
W, H = nmf._initialize_nmf(X, n_components, init="random", random_state=42)
for beta in beta_losses:
ref = _beta_divergence_dense(X, W, H, beta)
loss = nmf._beta_divergence(X, W, H, beta)
loss_csr = nmf._beta_divergence(X_csr, W, H, beta)
assert_almost_equal(ref, loss, decimal=7)
assert_almost_equal(ref, loss_csr, decimal=7)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_special_sparse_dot(csr_container):
# Test the function that computes np.dot(W, H), only where X is non zero.
n_samples = 10
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.clip(X, 0, None, out=X)
X_csr = csr_container(X)
W = np.abs(rng.randn(n_samples, n_components))
H = np.abs(rng.randn(n_components, n_features))
WH_safe = nmf._special_sparse_dot(W, H, X_csr)
WH = nmf._special_sparse_dot(W, H, X)
# test that both results have same values, in X_csr nonzero elements
ii, jj = X_csr.nonzero()
WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel()
assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10)
# test that WH_safe and X_csr have the same sparse structure
assert_array_equal(WH_safe.indices, X_csr.indices)
assert_array_equal(WH_safe.indptr, X_csr.indptr)
assert_array_equal(WH_safe.shape, X_csr.shape)
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_nmf_multiplicative_update_sparse(csr_container):
# Compare sparse and dense input in multiplicative update NMF
# Also test continuity of the results with respect to beta_loss parameter
n_samples = 20
n_features = 10
n_components = 5
alpha = 0.1
l1_ratio = 0.5
n_iter = 20
# initialization
rng = np.random.mtrand.RandomState(1337)
X = rng.randn(n_samples, n_features)
X = np.abs(X)
X_csr = csr_container(X)
W0, H0 = nmf._initialize_nmf(X, n_components, init="random", random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1.0, 2.0, 2.5):
# Reference with dense array X
W, H = W0.copy(), H0.copy()
W1, H1, _ = non_negative_factorization(
X,
W,
H,
n_components,
init="custom",
update_H=True,
solver="mu",
beta_loss=beta_loss,
max_iter=n_iter,
alpha_W=alpha,
l1_ratio=l1_ratio,
random_state=42,
)
# Compare with sparse X
W, H = W0.copy(), H0.copy()
W2, H2, _ = non_negative_factorization(
X_csr,
W,
H,
n_components,
init="custom",
update_H=True,
solver="mu",
beta_loss=beta_loss,
max_iter=n_iter,
alpha_W=alpha,
l1_ratio=l1_ratio,
random_state=42,
)
assert_allclose(W1, W2, atol=1e-7)
assert_allclose(H1, H2, atol=1e-7)
# Compare with almost same beta_loss, since some values have a specific
# behavior, but the results should be continuous w.r.t beta_loss
beta_loss -= 1.0e-5
W, H = W0.copy(), H0.copy()
W3, H3, _ = non_negative_factorization(
X_csr,
W,
H,
n_components,
init="custom",
update_H=True,
solver="mu",
beta_loss=beta_loss,
max_iter=n_iter,
alpha_W=alpha,
l1_ratio=l1_ratio,
random_state=42,
)
assert_allclose(W1, W3, atol=1e-4)
assert_allclose(H1, H3, atol=1e-4)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_nmf_negative_beta_loss(csr_container):
# Test that an error is raised if beta_loss < 0 and X contains zeros.
# Test that the output has not NaN values when the input contains zeros.
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.clip(X, 0, None, out=X)
X_csr = csr_container(X)
def _assert_nmf_no_nan(X, beta_loss):
W, H, _ = non_negative_factorization(
X,
init="random",
n_components=n_components,
solver="mu",
beta_loss=beta_loss,
random_state=0,
max_iter=1000,
)
assert not np.any(np.isnan(W))
assert not np.any(np.isnan(H))
msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
for beta_loss in (-0.6, 0.0):
with pytest.raises(ValueError, match=msg):
_assert_nmf_no_nan(X, beta_loss)
_assert_nmf_no_nan(X + 1e-9, beta_loss)
for beta_loss in (0.2, 1.0, 1.2, 2.0, 2.5):
_assert_nmf_no_nan(X, beta_loss)
_assert_nmf_no_nan(X_csr, beta_loss)
@pytest.mark.parametrize("beta_loss", [-0.5, 0.0])
def test_minibatch_nmf_negative_beta_loss(beta_loss):
"""Check that an error is raised if beta_loss < 0 and X contains zeros."""
rng = np.random.RandomState(0)
X = rng.normal(size=(6, 5))
X[X < 0] = 0
nmf = MiniBatchNMF(beta_loss=beta_loss, random_state=0)
msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
with pytest.raises(ValueError, match=msg):
nmf.fit(X)
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
def test_nmf_regularization(Estimator, solver):
# Test the effect of L1 and L2 regularizations
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = np.abs(rng.randn(n_samples, n_features))
# L1 regularization should increase the number of zeros
l1_ratio = 1.0
regul = Estimator(
n_components=n_components,
alpha_W=0.5,
l1_ratio=l1_ratio,
random_state=42,
**solver,
)
model = Estimator(
n_components=n_components,
alpha_W=0.0,
l1_ratio=l1_ratio,
random_state=42,
**solver,
)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
eps = np.finfo(np.float64).eps
W_regul_n_zeros = W_regul[W_regul <= eps].size
W_model_n_zeros = W_model[W_model <= eps].size
H_regul_n_zeros = H_regul[H_regul <= eps].size
H_model_n_zeros = H_model[H_model <= eps].size
assert W_regul_n_zeros > W_model_n_zeros
assert H_regul_n_zeros > H_model_n_zeros
# L2 regularization should decrease the sum of the squared norm
# of the matrices W and H
l1_ratio = 0.0
regul = Estimator(
n_components=n_components,
alpha_W=0.5,
l1_ratio=l1_ratio,
random_state=42,
**solver,
)
model = Estimator(
n_components=n_components,
alpha_W=0.0,
l1_ratio=l1_ratio,
random_state=42,
**solver,
)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
assert (linalg.norm(W_model)) ** 2.0 + (linalg.norm(H_model)) ** 2.0 > (
linalg.norm(W_regul)
) ** 2.0 + (linalg.norm(H_regul)) ** 2.0
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
@pytest.mark.parametrize("solver", ("cd", "mu"))
def test_nmf_decreasing(solver):
# test that the objective function is decreasing at each iteration
n_samples = 20
n_features = 15
n_components = 10
alpha = 0.1
l1_ratio = 0.5
tol = 0.0
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.abs(X, X)
W0, H0 = nmf._initialize_nmf(X, n_components, init="random", random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1.0, 2.0, 2.5):
if solver != "mu" and beta_loss != 2:
# not implemented
continue
W, H = W0.copy(), H0.copy()
previous_loss = None
for _ in range(30):
# one more iteration starting from the previous results
W, H, _ = non_negative_factorization(
X,
W,
H,
beta_loss=beta_loss,
init="custom",
n_components=n_components,
max_iter=1,
alpha_W=alpha,
solver=solver,
tol=tol,
l1_ratio=l1_ratio,
verbose=0,
random_state=0,
update_H=True,
)
loss = (
nmf._beta_divergence(X, W, H, beta_loss)
+ alpha * l1_ratio * n_features * W.sum()
+ alpha * l1_ratio * n_samples * H.sum()
+ alpha * (1 - l1_ratio) * n_features * (W**2).sum()
+ alpha * (1 - l1_ratio) * n_samples * (H**2).sum()
)
if previous_loss is not None:
assert previous_loss > loss
previous_loss = loss
def test_nmf_underflow():
# Regression test for an underflow issue in _beta_divergence
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 10, 2, 2
X = np.abs(rng.randn(n_samples, n_features)) * 10
W = np.abs(rng.randn(n_samples, n_components)) * 10
H = np.abs(rng.randn(n_components, n_features))
X[0, 0] = 0
ref = nmf._beta_divergence(X, W, H, beta=1.0)
X[0, 0] = 1e-323
res = nmf._beta_divergence(X, W, H, beta=1.0)
assert_almost_equal(res, ref)
@pytest.mark.parametrize(
"dtype_in, dtype_out",
[
(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
],
)
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
def test_nmf_dtype_match(Estimator, solver, dtype_in, dtype_out):
# Check that NMF preserves dtype (float32 and float64)
X = np.random.RandomState(0).randn(20, 15).astype(dtype_in, copy=False)
np.abs(X, out=X)
nmf = Estimator(
alpha_W=1.0,
alpha_H=1.0,
tol=1e-2,
random_state=0,
**solver,
)
assert nmf.fit(X).transform(X).dtype == dtype_out
assert nmf.fit_transform(X).dtype == dtype_out
assert nmf.components_.dtype == dtype_out
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
def test_nmf_float32_float64_consistency(Estimator, solver):
# Check that the result of NMF is the same between float32 and float64
X = np.random.RandomState(0).randn(50, 7)
np.abs(X, out=X)
nmf32 = Estimator(random_state=0, tol=1e-3, **solver)
W32 = nmf32.fit_transform(X.astype(np.float32))
nmf64 = Estimator(random_state=0, tol=1e-3, **solver)
W64 = nmf64.fit_transform(X)
assert_allclose(W32, W64, atol=1e-5)
@pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF])
def test_nmf_custom_init_dtype_error(Estimator):
# Check that an error is raise if custom H and/or W don't have the same
# dtype as X.
rng = np.random.RandomState(0)
X = rng.random_sample((20, 15))
H = rng.random_sample((15, 15)).astype(np.float32)
W = rng.random_sample((20, 15))
with pytest.raises(TypeError, match="should have the same dtype as X"):
Estimator(init="custom").fit(X, H=H, W=W)
with pytest.raises(TypeError, match="should have the same dtype as X"):
non_negative_factorization(X, H=H, update_H=False)
@pytest.mark.parametrize("beta_loss", [-0.5, 0, 0.5, 1, 1.5, 2, 2.5])
def test_nmf_minibatchnmf_equivalence(beta_loss):
# Test that MiniBatchNMF is equivalent to NMF when batch_size = n_samples and
# forget_factor 0.0 (stopping criterion put aside)
rng = np.random.mtrand.RandomState(42)
X = np.abs(rng.randn(48, 5))
nmf = NMF(
n_components=5,
beta_loss=beta_loss,
solver="mu",
random_state=0,
tol=0,
)
mbnmf = MiniBatchNMF(
n_components=5,
beta_loss=beta_loss,
random_state=0,
tol=0,
max_no_improvement=None,
batch_size=X.shape[0],
forget_factor=0.0,
)
W = nmf.fit_transform(X)
mbW = mbnmf.fit_transform(X)
assert_allclose(W, mbW)
def test_minibatch_nmf_partial_fit():
# Check fit / partial_fit equivalence. Applicable only with fresh restarts.
rng = np.random.mtrand.RandomState(42)
X = np.abs(rng.randn(100, 5))
n_components = 5
batch_size = 10
max_iter = 2
mbnmf1 = MiniBatchNMF(
n_components=n_components,
init="custom",
random_state=0,
max_iter=max_iter,
batch_size=batch_size,
tol=0,
max_no_improvement=None,
fresh_restarts=False,
)
mbnmf2 = MiniBatchNMF(n_components=n_components, init="custom", random_state=0)
# Force the same init of H (W is recomputed anyway) to be able to compare results.
W, H = nmf._initialize_nmf(
X, n_components=n_components, init="random", random_state=0
)
mbnmf1.fit(X, W=W, H=H)
for i in range(max_iter):
for j in range(batch_size):
mbnmf2.partial_fit(X[j : j + batch_size], W=W[:batch_size], H=H)
assert mbnmf1.n_steps_ == mbnmf2.n_steps_
assert_allclose(mbnmf1.components_, mbnmf2.components_)
def test_feature_names_out():
"""Check feature names out for NMF."""
random_state = np.random.RandomState(0)
X = np.abs(random_state.randn(10, 4))
nmf = NMF(n_components=3).fit(X)
names = nmf.get_feature_names_out()
assert_array_equal([f"nmf{i}" for i in range(3)], names)
def test_minibatch_nmf_verbose():
# Check verbose mode of MiniBatchNMF for better coverage.
A = np.random.RandomState(0).random_sample((100, 10))
nmf = MiniBatchNMF(tol=1e-2, random_state=0, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
nmf.fit(A)
finally:
sys.stdout = old_stdout
@pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF])
def test_nmf_n_components_auto(Estimator):
# Check that n_components is correctly inferred
# from the provided custom initialization.
rng = np.random.RandomState(0)
X = rng.random_sample((6, 5))
W = rng.random_sample((6, 2))
H = rng.random_sample((2, 5))
est = Estimator(
n_components="auto",
init="custom",
random_state=0,
tol=1e-6,
)
est.fit_transform(X, W=W, H=H)
assert est._n_components == H.shape[0]
def test_nmf_non_negative_factorization_n_components_auto():
# Check that n_components is correctly inferred from the provided
# custom initialization.
rng = np.random.RandomState(0)
X = rng.random_sample((6, 5))
W_init = rng.random_sample((6, 2))
H_init = rng.random_sample((2, 5))
W, H, _ = non_negative_factorization(
X, W=W_init, H=H_init, init="custom", n_components="auto"
)
assert H.shape == H_init.shape
assert W.shape == W_init.shape
def test_nmf_n_components_auto_no_h_update():
# Tests that non_negative_factorization does not fail when setting
# n_components="auto" also tests that the inferred n_component
# value is the right one.
rng = np.random.RandomState(0)
X = rng.random_sample((6, 5))
H_true = rng.random_sample((2, 5))
W, H, _ = non_negative_factorization(
X, H=H_true, n_components="auto", update_H=False
) # should not fail
assert_allclose(H, H_true)
assert W.shape == (X.shape[0], H_true.shape[0])
def test_nmf_w_h_not_used_warning():
# Check that warnings are raised if user provided W and H are not used
# and initialization overrides value of W or H
rng = np.random.RandomState(0)
X = rng.random_sample((6, 5))
W_init = rng.random_sample((6, 2))
H_init = rng.random_sample((2, 5))
with pytest.warns(
RuntimeWarning,
match="When init!='custom', provided W or H are ignored",
):
non_negative_factorization(X, H=H_init, update_H=True, n_components="auto")
with pytest.warns(
RuntimeWarning,
match="When init!='custom', provided W or H are ignored",
):
non_negative_factorization(
X, W=W_init, H=H_init, update_H=True, n_components="auto"
)
with pytest.warns(
RuntimeWarning, match="When update_H=False, the provided initial W is not used."
):
# When update_H is False, W is ignored regardless of init
# TODO: use the provided W when init="custom".
non_negative_factorization(
X, W=W_init, H=H_init, update_H=False, n_components="auto"
)
def test_nmf_custom_init_shape_error():
# Check that an informative error is raised when custom initialization does not
# have the right shape
rng = np.random.RandomState(0)
X = rng.random_sample((6, 5))
H = rng.random_sample((2, 5))
nmf = NMF(n_components=2, init="custom", random_state=0)
with pytest.raises(ValueError, match="Array with wrong first dimension passed"):
nmf.fit(X, H=H, W=rng.random_sample((5, 2)))
with pytest.raises(ValueError, match="Array with wrong second dimension passed"):
nmf.fit(X, H=H, W=rng.random_sample((6, 3)))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/tests/test_online_lda.py | sklearn/decomposition/tests/test_online_lda.py | import sys
from io import StringIO
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from scipy.linalg import block_diag
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda_fast import (
_dirichlet_expectation_1d,
_dirichlet_expectation_2d,
)
from sklearn.exceptions import NotFittedError
from sklearn.utils._testing import (
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
if_safe_multiprocessing_with_blas,
)
from sklearn.utils.fixes import CSR_CONTAINERS
def _build_sparse_array(csr_container):
# Create 3 topics and each topic has 3 distinct words.
# (Each word only belongs to a single topic.)
n_components = 3
block = np.full((3, 3), n_components, dtype=int)
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_container(X)
return (n_components, X)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_lda_default_prior_params(csr_container):
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_components, X = _build_sparse_array(csr_container)
prior = 1.0 / n_components
lda_1 = LatentDirichletAllocation(
n_components=n_components,
doc_topic_prior=prior,
topic_word_prior=prior,
random_state=0,
)
lda_2 = LatentDirichletAllocation(n_components=n_components, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_lda_fit_batch(csr_container):
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_array(csr_container)
lda = LatentDirichletAllocation(
n_components=n_components,
evaluate_every=1,
learning_method="batch",
random_state=rng,
)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_lda_fit_online(csr_container):
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_array(csr_container)
lda = LatentDirichletAllocation(
n_components=n_components,
learning_offset=10.0,
evaluate_every=1,
learning_method="online",
random_state=rng,
)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_lda_partial_fit(csr_container):
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_array(csr_container)
lda = LatentDirichletAllocation(
n_components=n_components,
learning_offset=10.0,
total_samples=100,
random_state=rng,
)
for i in range(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_lda_dense_input(csr_container):
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_components, X = _build_sparse_array(csr_container)
lda = LatentDirichletAllocation(
n_components=n_components, learning_method="batch", random_state=rng
)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative and should be normalized by default
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_components = 3
lda = LatentDirichletAllocation(n_components=n_components, random_state=rng)
X_trans = lda.fit_transform(X)
assert (X_trans > 0.0).any()
assert_array_almost_equal(np.sum(X_trans, axis=1), np.ones(X_trans.shape[0]))
X_trans_unnormalized = lda.transform(X, normalize=False)
assert_array_almost_equal(
X_trans, X_trans_unnormalized / X_trans_unnormalized.sum(axis=1)[:, np.newaxis]
)
@pytest.mark.parametrize("method", ("online", "batch"))
def test_lda_fit_transform(method):
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(
n_components=5, learning_method=method, random_state=rng
)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = np.full((5, 10), -1.0)
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
with pytest.raises(ValueError, match=regex):
lda.fit(X)
def test_lda_no_component_error():
# test `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = (
"This LatentDirichletAllocation instance is not fitted yet. "
"Call 'fit' with appropriate arguments before using this "
"estimator."
)
with pytest.raises(NotFittedError, match=regex):
lda.perplexity(X)
# TODO: remove mark once loky bug is fixed:
# https://github.com/joblib/loky/issues/458
@pytest.mark.thread_unsafe
@if_safe_multiprocessing_with_blas
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
@pytest.mark.parametrize("method", ("online", "batch"))
def test_lda_multi_jobs(method, csr_container):
n_components, X = _build_sparse_array(csr_container)
# Test LDA batch training with multi CPU
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(
n_components=n_components,
n_jobs=2,
learning_method=method,
evaluate_every=1,
random_state=rng,
)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
# TODO: remove mark once loky bug is fixed:
# https://github.com/joblib/loky/issues/458
@pytest.mark.thread_unsafe
@if_safe_multiprocessing_with_blas
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_lda_partial_fit_multi_jobs(csr_container):
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_components, X = _build_sparse_array(csr_container)
lda = LatentDirichletAllocation(
n_components=n_components,
n_jobs=2,
learning_offset=5.0,
total_samples=30,
random_state=rng,
)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_components = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(
n_components=n_components,
learning_offset=5.0,
total_samples=20,
random_state=rng,
)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_components))
with pytest.raises(ValueError, match=r"Number of samples"):
lda._perplexity_precomp_distr(X, invalid_n_samples)
# invalid topic number
invalid_n_components = rng.randint(4, size=(n_samples, n_components + 1))
with pytest.raises(ValueError, match=r"Number of topics"):
lda._perplexity_precomp_distr(X, invalid_n_components)
@pytest.mark.parametrize("method", ("online", "batch"))
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_lda_perplexity(method, csr_container):
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_components, X = _build_sparse_array(csr_container)
lda_1 = LatentDirichletAllocation(
n_components=n_components,
max_iter=1,
learning_method=method,
total_samples=100,
random_state=0,
)
lda_2 = LatentDirichletAllocation(
n_components=n_components,
max_iter=10,
learning_method=method,
total_samples=100,
random_state=0,
)
lda_1.fit(X)
perp_1 = lda_1.perplexity(X, sub_sampling=False)
lda_2.fit(X)
perp_2 = lda_2.perplexity(X, sub_sampling=False)
assert perp_1 >= perp_2
perp_1_subsampling = lda_1.perplexity(X, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, sub_sampling=True)
assert perp_1_subsampling >= perp_2_subsampling
@pytest.mark.parametrize("method", ("online", "batch"))
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_lda_score(method, csr_container):
# Test LDA score for batch training
# score should be higher after each iteration
n_components, X = _build_sparse_array(csr_container)
lda_1 = LatentDirichletAllocation(
n_components=n_components,
max_iter=1,
learning_method=method,
total_samples=100,
random_state=0,
)
lda_2 = LatentDirichletAllocation(
n_components=n_components,
max_iter=10,
learning_method=method,
total_samples=100,
random_state=0,
)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert score_2 >= score_1
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_perplexity_input_format(csr_container):
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_components, X = _build_sparse_array(csr_container)
lda = LatentDirichletAllocation(
n_components=n_components,
max_iter=1,
learning_method="batch",
total_samples=100,
random_state=0,
)
lda.fit(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X.toarray())
assert_almost_equal(perp_1, perp_2)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_lda_score_perplexity(csr_container):
# Test the relationship between LDA score and perplexity
n_components, X = _build_sparse_array(csr_container)
lda = LatentDirichletAllocation(
n_components=n_components, max_iter=10, random_state=0
)
lda.fit(X)
perplexity_1 = lda.perplexity(X, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1.0 * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_lda_fit_perplexity(csr_container):
# Test that the perplexity computed during fit is consistent with what is
# returned by the perplexity method
n_components, X = _build_sparse_array(csr_container)
lda = LatentDirichletAllocation(
n_components=n_components,
max_iter=1,
learning_method="batch",
random_state=0,
evaluate_every=1,
)
lda.fit(X)
# Perplexity computed at end of fit method
perplexity1 = lda.bound_
# Result of perplexity method on the train set
perplexity2 = lda.perplexity(X)
assert_almost_equal(perplexity1, perplexity2)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_lda_empty_docs(csr_container):
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_container(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(
lda.components_.sum(axis=0), np.ones(lda.components_.shape[1])
)
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))), atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(
_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11,
atol=3e-9,
)
def check_verbosity(
verbose, evaluate_every, expected_lines, expected_perplexities, csr_container
):
n_components, X = _build_sparse_array(csr_container)
lda = LatentDirichletAllocation(
n_components=n_components,
max_iter=3,
learning_method="batch",
verbose=verbose,
evaluate_every=evaluate_every,
random_state=0,
)
out = StringIO()
old_out, sys.stdout = sys.stdout, out
try:
lda.fit(X)
finally:
sys.stdout = old_out
n_lines = out.getvalue().count("\n")
n_perplexity = out.getvalue().count("perplexity")
assert expected_lines == n_lines
assert expected_perplexities == n_perplexity
@pytest.mark.parametrize(
"verbose,evaluate_every,expected_lines,expected_perplexities",
[
(False, 1, 0, 0),
(False, 0, 0, 0),
(True, 0, 3, 0),
(True, 1, 3, 3),
(True, 2, 3, 1),
],
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
@pytest.mark.thread_unsafe # manually captured stdout
def test_verbosity(
verbose, evaluate_every, expected_lines, expected_perplexities, csr_container
):
check_verbosity(
verbose, evaluate_every, expected_lines, expected_perplexities, csr_container
)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_lda_feature_names_out(csr_container):
"""Check feature names out for LatentDirichletAllocation."""
n_components, X = _build_sparse_array(csr_container)
lda = LatentDirichletAllocation(n_components=n_components).fit(X)
names = lda.get_feature_names_out()
assert_array_equal(
[f"latentdirichletallocation{i}" for i in range(n_components)], names
)
@pytest.mark.parametrize("learning_method", ("batch", "online"))
def test_lda_dtype_match(learning_method, global_dtype):
"""Check data type preservation of fitted attributes."""
rng = np.random.RandomState(0)
X = rng.uniform(size=(20, 10)).astype(global_dtype, copy=False)
lda = LatentDirichletAllocation(
n_components=5, random_state=0, learning_method=learning_method
)
lda.fit(X)
assert lda.components_.dtype == global_dtype
assert lda.exp_dirichlet_component_.dtype == global_dtype
@pytest.mark.parametrize("learning_method", ("batch", "online"))
def test_lda_numerical_consistency(learning_method, global_random_seed):
"""Check numerical consistency between np.float32 and np.float64."""
rng = np.random.RandomState(global_random_seed)
X64 = rng.uniform(size=(20, 10))
X32 = X64.astype(np.float32)
lda_64 = LatentDirichletAllocation(
n_components=5, random_state=global_random_seed, learning_method=learning_method
).fit(X64)
lda_32 = LatentDirichletAllocation(
n_components=5, random_state=global_random_seed, learning_method=learning_method
).fit(X32)
assert_allclose(lda_32.components_, lda_64.components_)
assert_allclose(lda_32.transform(X32), lda_64.transform(X64))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/decomposition/tests/test_dict_learning.py | sklearn/decomposition/tests/test_dict_learning.py | import itertools
import warnings
from functools import partial
import numpy as np
import pytest
import sklearn
from sklearn.base import clone
from sklearn.decomposition import (
DictionaryLearning,
MiniBatchDictionaryLearning,
SparseCoder,
dict_learning,
dict_learning_online,
sparse_encode,
)
from sklearn.decomposition._dict_learning import _update_dict
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import check_array
from sklearn.utils._testing import (
TempMemmap,
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
ignore_warnings,
)
from sklearn.utils.estimator_checks import (
check_transformer_data_not_an_array,
check_transformer_general,
check_transformers_unfitted,
)
from sklearn.utils.parallel import Parallel
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
# TODO: remove mark once loky bug is fixed:
# https://github.com/joblib/loky/issues/458
@pytest.mark.thread_unsafe
def test_sparse_encode_shapes_omp():
rng = np.random.RandomState(0)
algorithms = ["omp", "lasso_lars", "lasso_cd", "lars", "threshold"]
for n_components, n_samples in itertools.product([1, 5], [1, 9]):
X_ = rng.randn(n_samples, n_features)
dictionary = rng.randn(n_components, n_features)
for algorithm, n_jobs in itertools.product(algorithms, [1, 2]):
code = sparse_encode(X_, dictionary, algorithm=algorithm, n_jobs=n_jobs)
assert code.shape == (n_samples, n_components)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert dico.components_.shape == (n_components, n_features)
n_components = 1
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert dico.components_.shape == (n_components, n_features)
assert dico.transform(X).shape == (X.shape[0], n_components)
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert dico.components_.shape == (n_components, n_features)
def test_max_iter():
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = (
(2 / (np.sqrt(3 * width) * np.pi**0.25))
* (1 - (x - center) ** 2 / width**2)
* np.exp(-((x - center) ** 2) / (2 * width**2))
)
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D**2, axis=1))[:, np.newaxis]
return D
transform_algorithm = "lasso_cd"
resolution = 256
subsampling = 3 # subsampling factor
n_components = resolution // subsampling
# Compute a wavelet dictionary
D_multi = np.r_[
tuple(
ricker_matrix(
width=w, resolution=resolution, n_components=n_components // 5
)
for w in (10, 50, 100, 500)
)
]
X = np.linspace(0, resolution - 1, resolution)
first_quarter = X < resolution / 4
X[first_quarter] = 3.0
X[np.logical_not(first_quarter)] = -1.0
X = X.reshape(1, -1)
# check that the underlying model fails to converge
with pytest.warns(ConvergenceWarning):
model = SparseCoder(
D_multi, transform_algorithm=transform_algorithm, transform_max_iter=1
)
model.fit_transform(X)
# check that the underlying model converges w/o warnings
with warnings.catch_warnings():
warnings.simplefilter("error", ConvergenceWarning)
model = SparseCoder(
D_multi, transform_algorithm=transform_algorithm, transform_max_iter=500
)
model.fit_transform(X)
def test_dict_learning_lars_positive_parameter():
n_components = 5
alpha = 1
err_msg = "Positive constraint not supported for 'lars' coding method."
with pytest.raises(ValueError, match=err_msg):
dict_learning(X, n_components, alpha=alpha, positive_code=True)
@pytest.mark.parametrize(
"transform_algorithm",
[
"lasso_lars",
"lasso_cd",
"threshold",
],
)
@pytest.mark.parametrize("positive_code", [False, True])
@pytest.mark.parametrize("positive_dict", [False, True])
def test_dict_learning_positivity(transform_algorithm, positive_code, positive_dict):
n_components = 5
dico = DictionaryLearning(
n_components,
transform_algorithm=transform_algorithm,
random_state=0,
positive_code=positive_code,
positive_dict=positive_dict,
fit_algorithm="cd",
).fit(X)
code = dico.transform(X)
if positive_dict:
assert (dico.components_ >= 0).all()
else:
assert (dico.components_ < 0).any()
if positive_code:
assert (code >= 0).all()
else:
assert (code < 0).any()
@pytest.mark.parametrize("positive_dict", [False, True])
def test_dict_learning_lars_dict_positivity(positive_dict):
n_components = 5
dico = DictionaryLearning(
n_components,
transform_algorithm="lars",
random_state=0,
positive_dict=positive_dict,
fit_algorithm="cd",
).fit(X)
if positive_dict:
assert (dico.components_ >= 0).all()
else:
assert (dico.components_ < 0).any()
def test_dict_learning_lars_code_positivity():
n_components = 5
dico = DictionaryLearning(
n_components,
transform_algorithm="lars",
random_state=0,
positive_code=True,
fit_algorithm="cd",
).fit(X)
err_msg = "Positive constraint not supported for '{}' coding method."
err_msg = err_msg.format("lars")
with pytest.raises(ValueError, match=err_msg):
dico.transform(X)
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(
n_components, transform_algorithm="omp", transform_alpha=0.001, random_state=0
)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
assert_array_almost_equal(dico.inverse_transform(code), X)
dico.set_params(transform_algorithm="lasso_lars")
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
assert_array_almost_equal(dico.inverse_transform(code), X, decimal=2)
# test error raised for wrong code size
with pytest.raises(ValueError, match="Expected 12, got 11."):
dico.inverse_transform(code[:, :-1])
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
# TODO: remove mark once loky bug is fixed:
# https://github.com/joblib/loky/issues/458
@pytest.mark.thread_unsafe
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs>1
n_components = 12
dico = DictionaryLearning(
n_components,
transform_algorithm="omp",
transform_alpha=0.001,
random_state=0,
n_jobs=4,
)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm="lasso_lars")
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# TODO: remove mark once loky bug is fixed:
# https://github.com/joblib/loky/issues/458
@pytest.mark.thread_unsafe
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(
n_components,
transform_algorithm="lasso_cd",
transform_alpha=0.001,
random_state=0,
n_jobs=4,
)
with ignore_warnings(category=ConvergenceWarning):
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(
np.dot(code, dico.components_), X_read_only, decimal=2
)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(
n_components,
transform_algorithm="lars",
transform_n_nonzero_coefs=3,
random_state=0,
)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert len(np.flatnonzero(code)) == 3
dico.set_params(transform_algorithm="omp")
code = dico.transform(X[np.newaxis, 1])
assert len(np.flatnonzero(code)) == 3
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(
n_components, transform_algorithm="threshold", random_state=0
)
code = dico.fit(X).transform(X)
Xr = dico.inverse_transform(code)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_almost_equal(
split_code[:, :n_components] - split_code[:, n_components:], code
)
Xr2 = dico.inverse_transform(split_code)
assert_array_almost_equal(Xr, Xr2)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(
X,
n_components=n_components,
batch_size=4,
max_iter=10,
method="cd",
random_state=rng,
return_code=True,
)
assert code.shape == (n_samples, n_components)
assert dictionary.shape == (n_components, n_features)
assert np.dot(code, dictionary).shape == X.shape
dictionary = dict_learning_online(
X,
n_components=n_components,
batch_size=4,
max_iter=10,
method="cd",
random_state=rng,
return_code=False,
)
assert dictionary.shape == (n_components, n_features)
def test_dict_learning_online_lars_positive_parameter():
err_msg = "Positive constraint not supported for 'lars' coding method."
with pytest.raises(ValueError, match=err_msg):
dict_learning_online(X, batch_size=4, max_iter=10, positive_code=True)
@pytest.mark.parametrize(
"transform_algorithm",
[
"lasso_lars",
"lasso_cd",
"threshold",
],
)
@pytest.mark.parametrize("positive_code", [False, True])
@pytest.mark.parametrize("positive_dict", [False, True])
def test_minibatch_dictionary_learning_positivity(
transform_algorithm, positive_code, positive_dict
):
n_components = 8
dico = MiniBatchDictionaryLearning(
n_components,
batch_size=4,
max_iter=10,
transform_algorithm=transform_algorithm,
random_state=0,
positive_code=positive_code,
positive_dict=positive_dict,
fit_algorithm="cd",
).fit(X)
code = dico.transform(X)
if positive_dict:
assert (dico.components_ >= 0).all()
else:
assert (dico.components_ < 0).any()
if positive_code:
assert (code >= 0).all()
else:
assert (code < 0).any()
@pytest.mark.parametrize("positive_dict", [False, True])
def test_minibatch_dictionary_learning_lars(positive_dict):
n_components = 8
dico = MiniBatchDictionaryLearning(
n_components,
batch_size=4,
max_iter=10,
transform_algorithm="lars",
random_state=0,
positive_dict=positive_dict,
fit_algorithm="cd",
).fit(X)
if positive_dict:
assert (dico.components_ >= 0).all()
else:
assert (dico.components_ < 0).any()
@pytest.mark.parametrize("positive_code", [False, True])
@pytest.mark.parametrize("positive_dict", [False, True])
def test_dict_learning_online_positivity(positive_code, positive_dict):
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(
X,
n_components=n_components,
batch_size=4,
method="cd",
alpha=1,
random_state=rng,
positive_dict=positive_dict,
positive_code=positive_code,
)
if positive_dict:
assert (dictionary >= 0).all()
else:
assert (dictionary < 0).any()
if positive_code:
assert (code >= 0).all()
else:
assert (code < 0).any()
def test_dict_learning_online_verbosity():
# test verbosity for better coverage
n_components = 5
import sys
from io import StringIO
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
# convergence monitoring verbosity
dico = MiniBatchDictionaryLearning(
n_components, batch_size=4, max_iter=5, verbose=1, tol=0.1, random_state=0
)
dico.fit(X)
dico = MiniBatchDictionaryLearning(
n_components,
batch_size=4,
max_iter=5,
verbose=1,
max_no_improvement=2,
random_state=0,
)
dico.fit(X)
# higher verbosity level
dico = MiniBatchDictionaryLearning(
n_components, batch_size=4, max_iter=5, verbose=2, random_state=0
)
dico.fit(X)
# function API verbosity
dict_learning_online(
X,
n_components=n_components,
batch_size=4,
alpha=1,
verbose=1,
random_state=0,
)
dict_learning_online(
X,
n_components=n_components,
batch_size=4,
alpha=1,
verbose=2,
random_state=0,
)
finally:
sys.stdout = old_stdout
assert dico.components_.shape == (n_components, n_features)
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(
n_components, batch_size=4, max_iter=5, random_state=0
)
dico.fit(X)
assert dico.components_.shape == (n_components, n_features)
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(
n_components, batch_size=4, max_iter=5, random_state=0
).fit(X)
assert dico.components_.shape == (n_components, n_features)
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(
n_components, batch_size=4, max_iter=0, dict_init=V, random_state=0
).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_readonly_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
V.setflags(write=False)
MiniBatchDictionaryLearning(
n_components,
batch_size=4,
max_iter=1,
dict_init=V,
random_state=0,
shuffle=False,
).fit(X)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V**2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(
n_components,
max_iter=10,
batch_size=1,
alpha=1,
shuffle=False,
dict_init=V,
max_no_improvement=None,
tol=0.0,
random_state=0,
).fit(X)
dict2 = MiniBatchDictionaryLearning(
n_components, alpha=1, dict_init=V, random_state=0
)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)
assert_array_almost_equal(dict1.components_, dict2.components_, decimal=2)
# partial_fit should ignore max_iter (#17433)
assert dict1.n_steps_ == dict2.n_steps_ == 100
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V**2, axis=1)[:, np.newaxis]
for algo in ("lasso_lars", "lasso_cd", "lars", "omp", "threshold"):
code = sparse_encode(X, V, algorithm=algo)
assert code.shape == (n_samples, n_components)
@pytest.mark.parametrize("algo", ["lasso_lars", "lasso_cd", "threshold"])
@pytest.mark.parametrize("positive", [False, True])
def test_sparse_encode_positivity(algo, positive):
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V**2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, algorithm=algo, positive=positive)
if positive:
assert (code >= 0).all()
else:
assert (code < 0).any()
@pytest.mark.parametrize("algo", ["lars", "omp"])
def test_sparse_encode_unavailable_positivity(algo):
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V**2, axis=1)[:, np.newaxis]
err_msg = "Positive constraint not supported for '{}' coding method."
err_msg = err_msg.format(algo)
with pytest.raises(ValueError, match=err_msg):
sparse_encode(X, V, algorithm=algo, positive=True)
def test_sparse_encode_input():
n_components = 100
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V**2, axis=1)[:, np.newaxis]
Xf = check_array(X, order="F")
for algo in ("lasso_lars", "lasso_cd", "lars", "omp", "threshold"):
a = sparse_encode(X, V, algorithm=algo)
b = sparse_encode(Xf, V, algorithm=algo)
assert_array_almost_equal(a, b)
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V**2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert not np.all(code == 0)
assert np.sqrt(np.sum((np.dot(code, V) - X) ** 2)) < 0.1
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm="omp", n_nonzero_coefs=None)
assert code.shape == (100, 2)
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V**2, axis=1)[:, np.newaxis]
coder = SparseCoder(
dictionary=V, transform_algorithm="lasso_lars", transform_alpha=0.001
)
code = coder.fit_transform(X)
Xr = coder.inverse_transform(code)
assert not np.all(code == 0)
assert np.sqrt(np.sum((np.dot(code, V) - X) ** 2)) < 0.1
np.testing.assert_allclose(Xr, np.dot(code, V))
def test_sparse_coder_estimator_clone():
n_components = 12
rng = np.random.RandomState(0)
V = rng.normal(size=(n_components, n_features)) # random init
V /= np.sum(V**2, axis=1)[:, np.newaxis]
coder = SparseCoder(
dictionary=V, transform_algorithm="lasso_lars", transform_alpha=0.001
)
cloned = clone(coder)
assert id(cloned) != id(coder)
np.testing.assert_allclose(cloned.dictionary, coder.dictionary)
assert id(cloned.dictionary) != id(coder.dictionary)
data = np.random.rand(n_samples, n_features).astype(np.float32)
np.testing.assert_allclose(cloned.transform(data), coder.transform(data))
# TODO: remove mark once loky bug is fixed:
# https://github.com/joblib/loky/issues/458
@pytest.mark.thread_unsafe
def test_sparse_coder_parallel_mmap():
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/5956
# Test that SparseCoder does not error by passing reading only
# arrays to child processes
rng = np.random.RandomState(777)
n_components, n_features = 40, 64
init_dict = rng.rand(n_components, n_features)
# Ensure that `data` is >2M. Joblib memory maps arrays
# if they are larger than 1MB. The 4 accounts for float32
# data type
n_samples = int(2e6) // (4 * n_features)
data = np.random.rand(n_samples, n_features).astype(np.float32)
sc = SparseCoder(init_dict, transform_algorithm="omp", n_jobs=2)
sc.fit_transform(data)
def test_sparse_coder_common_transformer():
rng = np.random.RandomState(777)
n_components, n_features = 40, 3
init_dict = rng.rand(n_components, n_features)
sc = SparseCoder(init_dict)
check_transformer_data_not_an_array(sc.__class__.__name__, sc)
check_transformer_general(sc.__class__.__name__, sc)
check_transformer_general_memmap = partial(
check_transformer_general, readonly_memmap=True
)
check_transformer_general_memmap(sc.__class__.__name__, sc)
check_transformers_unfitted(sc.__class__.__name__, sc)
def test_sparse_coder_n_features_in():
d = np.array([[1, 2, 3], [1, 2, 3]])
X = np.array([[1, 2, 3]])
sc = SparseCoder(d)
sc.fit(X)
assert sc.n_features_in_ == d.shape[1]
def test_sparse_encoder_feature_number_error():
n_components = 10
rng = np.random.RandomState(0)
D = rng.uniform(size=(n_components, n_features))
X = rng.uniform(size=(n_samples, n_features + 1))
coder = SparseCoder(D)
with pytest.raises(
ValueError, match="Dictionary and X have different numbers of features"
):
coder.fit(X)
def test_update_dict():
# Check the dict update in batch mode vs online mode
# Non-regression test for #4866
rng = np.random.RandomState(0)
code = np.array([[0.5, -0.5], [0.1, 0.9]])
dictionary = np.array([[1.0, 0.0], [0.6, 0.8]])
X = np.dot(code, dictionary) + rng.randn(2, 2)
# full batch update
newd_batch = dictionary.copy()
_update_dict(newd_batch, X, code)
# online update
A = np.dot(code.T, code)
B = np.dot(X.T, code)
newd_online = dictionary.copy()
_update_dict(newd_online, X, code, A, B)
assert_allclose(newd_batch, newd_online)
@pytest.mark.parametrize(
"algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp")
)
@pytest.mark.parametrize("data_type", (np.float32, np.float64))
# Note: do not check integer input because `lasso_lars` and `lars` fail with
# `ValueError` in `_lars_path_solver`
def test_sparse_encode_dtype_match(data_type, algorithm):
n_components = 6
rng = np.random.RandomState(0)
dictionary = rng.randn(n_components, n_features)
code = sparse_encode(
X.astype(data_type), dictionary.astype(data_type), algorithm=algorithm
)
assert code.dtype == data_type
@pytest.mark.parametrize(
"algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp")
)
def test_sparse_encode_numerical_consistency(algorithm):
# verify numerical consistency among np.float32 and np.float64
rtol = 1e-4
n_components = 6
rng = np.random.RandomState(0)
dictionary = rng.randn(n_components, n_features)
code_32 = sparse_encode(
X.astype(np.float32), dictionary.astype(np.float32), algorithm=algorithm
)
code_64 = sparse_encode(
X.astype(np.float64), dictionary.astype(np.float64), algorithm=algorithm
)
assert_allclose(code_32, code_64, rtol=rtol)
@pytest.mark.parametrize(
"transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp")
)
@pytest.mark.parametrize("data_type", (np.float32, np.float64))
# Note: do not check integer input because `lasso_lars` and `lars` fail with
# `ValueError` in `_lars_path_solver`
def test_sparse_coder_dtype_match(data_type, transform_algorithm):
# Verify preserving dtype for transform in sparse coder
n_components = 6
rng = np.random.RandomState(0)
dictionary = rng.randn(n_components, n_features)
coder = SparseCoder(
dictionary.astype(data_type), transform_algorithm=transform_algorithm
)
code = coder.transform(X.astype(data_type))
assert code.dtype == data_type
@pytest.mark.parametrize("fit_algorithm", ("lars", "cd"))
@pytest.mark.parametrize(
"transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp")
)
@pytest.mark.parametrize(
"data_type, expected_type",
(
(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
),
)
def test_dictionary_learning_dtype_match(
data_type,
expected_type,
fit_algorithm,
transform_algorithm,
):
# Verify preserving dtype for fit and transform in dictionary learning class
dict_learner = DictionaryLearning(
n_components=8,
fit_algorithm=fit_algorithm,
transform_algorithm=transform_algorithm,
random_state=0,
)
dict_learner.fit(X.astype(data_type))
assert dict_learner.components_.dtype == expected_type
assert dict_learner.transform(X.astype(data_type)).dtype == expected_type
@pytest.mark.parametrize("fit_algorithm", ("lars", "cd"))
@pytest.mark.parametrize(
"transform_algorithm", ("lasso_lars", "lasso_cd", "lars", "threshold", "omp")
)
@pytest.mark.parametrize(
"data_type, expected_type",
(
(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
),
)
def test_minibatch_dictionary_learning_dtype_match(
data_type,
expected_type,
fit_algorithm,
transform_algorithm,
):
# Verify preserving dtype for fit and transform in minibatch dictionary learning
dict_learner = MiniBatchDictionaryLearning(
n_components=8,
batch_size=10,
fit_algorithm=fit_algorithm,
transform_algorithm=transform_algorithm,
max_iter=100,
tol=1e-1,
random_state=0,
)
dict_learner.fit(X.astype(data_type))
assert dict_learner.components_.dtype == expected_type
assert dict_learner.transform(X.astype(data_type)).dtype == expected_type
assert dict_learner._A.dtype == expected_type
assert dict_learner._B.dtype == expected_type
@pytest.mark.parametrize("method", ("lars", "cd"))
@pytest.mark.parametrize(
"data_type, expected_type",
(
(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
),
)
def test_dict_learning_dtype_match(data_type, expected_type, method):
# Verify output matrix dtype
rng = np.random.RandomState(0)
n_components = 8
code, dictionary, _ = dict_learning(
X.astype(data_type),
n_components=n_components,
alpha=1,
random_state=rng,
method=method,
)
assert code.dtype == expected_type
assert dictionary.dtype == expected_type
@pytest.mark.parametrize("method", ("lars", "cd"))
def test_dict_learning_numerical_consistency(method):
# verify numerically consistent among np.float32 and np.float64
rtol = 1e-4
n_components = 4
alpha = 2
U_64, V_64, _ = dict_learning(
X.astype(np.float64),
n_components=n_components,
alpha=alpha,
random_state=0,
method=method,
)
U_32, V_32, _ = dict_learning(
X.astype(np.float32),
n_components=n_components,
alpha=alpha,
random_state=0,
method=method,
)
# Optimal solution (U*, V*) is not unique.
# If (U*, V*) is optimal solution, (-U*,-V*) is also optimal,
# and (column permutated U*, row permutated V*) are also optional
# as long as holding UV.
# So here UV, ||U||_1,1 and sum(||V_k||_2^2) are verified
# instead of comparing directly U and V.
assert_allclose(np.matmul(U_64, V_64), np.matmul(U_32, V_32), rtol=rtol)
assert_allclose(np.sum(np.abs(U_64)), np.sum(np.abs(U_32)), rtol=rtol)
assert_allclose(np.sum(V_64**2), np.sum(V_32**2), rtol=rtol)
# verify an obtained solution is not degenerate
assert np.mean(U_64 != 0.0) > 0.05
assert np.count_nonzero(U_64 != 0.0) == np.count_nonzero(U_32 != 0.0)
@pytest.mark.parametrize("method", ("lars", "cd"))
@pytest.mark.parametrize(
"data_type, expected_type",
(
(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
),
)
def test_dict_learning_online_dtype_match(data_type, expected_type, method):
# Verify output matrix dtype
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(
X.astype(data_type),
n_components=n_components,
alpha=1,
batch_size=10,
random_state=rng,
method=method,
)
assert code.dtype == expected_type
assert dictionary.dtype == expected_type
@pytest.mark.parametrize("method", ("lars", "cd"))
def test_dict_learning_online_numerical_consistency(method):
# verify numerically consistent among np.float32 and np.float64
rtol = 1e-4
n_components = 4
alpha = 1
U_64, V_64 = dict_learning_online(
X.astype(np.float64),
n_components=n_components,
max_iter=1_000,
alpha=alpha,
batch_size=10,
random_state=0,
method=method,
tol=0.0,
max_no_improvement=None,
)
U_32, V_32 = dict_learning_online(
X.astype(np.float32),
n_components=n_components,
max_iter=1_000,
alpha=alpha,
batch_size=10,
random_state=0,
method=method,
tol=0.0,
max_no_improvement=None,
)
# Optimal solution (U*, V*) is not unique.
# If (U*, V*) is optimal solution, (-U*,-V*) is also optimal,
# and (column permutated U*, row permutated V*) are also optional
# as long as holding UV.
# So here UV, ||U||_1,1 and sum(||V_k||_2) are verified
# instead of comparing directly U and V.
assert_allclose(np.matmul(U_64, V_64), np.matmul(U_32, V_32), rtol=rtol)
assert_allclose(np.sum(np.abs(U_64)), np.sum(np.abs(U_32)), rtol=rtol)
assert_allclose(np.sum(V_64**2), np.sum(V_32**2), rtol=rtol)
# verify an obtained solution is not degenerate
assert np.mean(U_64 != 0.0) > 0.05
assert np.count_nonzero(U_64 != 0.0) == np.count_nonzero(U_32 != 0.0)
@pytest.mark.parametrize(
"estimator",
[
SparseCoder(rng_global.uniform(size=(n_features, n_features))),
DictionaryLearning(),
MiniBatchDictionaryLearning(batch_size=4, max_iter=10),
],
ids=lambda x: x.__class__.__name__,
)
def test_get_feature_names_out(estimator):
"""Check feature names for dict learning estimators."""
estimator.fit(X)
n_components = X.shape[1]
feature_names_out = estimator.get_feature_names_out()
estimator_name = estimator.__class__.__name__.lower()
assert_array_equal(
feature_names_out,
[f"{estimator_name}{i}" for i in range(n_components)],
)
# TODO: remove mark once loky bug is fixed:
# https://github.com/joblib/loky/issues/458
@pytest.mark.thread_unsafe
def test_cd_work_on_joblib_memmapped_data(monkeypatch):
monkeypatch.setattr(
sklearn.decomposition._dict_learning,
"Parallel",
partial(Parallel, max_nbytes=100),
)
rng = np.random.RandomState(0)
X_train = rng.randn(10, 10)
dict_learner = DictionaryLearning(
n_components=5,
random_state=0,
n_jobs=2,
fit_algorithm="cd",
max_iter=50,
verbose=True,
)
# This must run and complete without error.
dict_learner.fit(X_train)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/experimental/enable_halving_search_cv.py | sklearn/experimental/enable_halving_search_cv.py | """Enables Successive Halving search-estimators
The API and results of these estimators might change without any deprecation
cycle.
Importing this file dynamically sets the
:class:`~sklearn.model_selection.HalvingRandomSearchCV` and
:class:`~sklearn.model_selection.HalvingGridSearchCV` as attributes of the
`model_selection` module::
>>> # explicitly require this experimental feature
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> # now you can import normally from model_selection
>>> from sklearn.model_selection import HalvingRandomSearchCV
>>> from sklearn.model_selection import HalvingGridSearchCV
The ``# noqa`` comment comment can be removed: it just tells linters like
flake8 to ignore the import, which appears as unused.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn import model_selection
from sklearn.model_selection._search_successive_halving import (
HalvingGridSearchCV,
HalvingRandomSearchCV,
)
# use settattr to avoid mypy errors when monkeypatching
setattr(model_selection, "HalvingRandomSearchCV", HalvingRandomSearchCV)
setattr(model_selection, "HalvingGridSearchCV", HalvingGridSearchCV)
model_selection.__all__ += ["HalvingRandomSearchCV", "HalvingGridSearchCV"]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/experimental/enable_iterative_imputer.py | sklearn/experimental/enable_iterative_imputer.py | """Enables IterativeImputer
The API and results of this estimator might change without any deprecation
cycle.
Importing this file dynamically sets :class:`~sklearn.impute.IterativeImputer`
as an attribute of the impute module::
>>> # explicitly require this experimental feature
>>> from sklearn.experimental import enable_iterative_imputer # noqa
>>> # now you can import normally from impute
>>> from sklearn.impute import IterativeImputer
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn import impute
from sklearn.impute._iterative import IterativeImputer
# use settattr to avoid mypy errors when monkeypatching
setattr(impute, "IterativeImputer", IterativeImputer)
impute.__all__ += ["IterativeImputer"]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/experimental/enable_hist_gradient_boosting.py | sklearn/experimental/enable_hist_gradient_boosting.py | """This is now a no-op and can be safely removed from your code.
It used to enable the use of
:class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
:class:`~sklearn.ensemble.HistGradientBoostingRegressor` when they were still
:term:`experimental`, but these estimators are now stable and can be imported
normally from `sklearn.ensemble`.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Don't remove this file, we don't want to break users code just because the
# feature isn't experimental anymore.
import warnings
warnings.warn(
"Since version 1.0, "
"it is not needed to import enable_hist_gradient_boosting anymore. "
"HistGradientBoostingClassifier and HistGradientBoostingRegressor are now "
"stable and can be normally imported from sklearn.ensemble."
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/experimental/__init__.py | sklearn/experimental/__init__.py | """Importable modules that enable the use of experimental features or estimators.
.. warning::
The features and estimators that are experimental aren't subject to
deprecation cycles. Use them at your own risks!
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/experimental/tests/test_enable_successive_halving.py | sklearn/experimental/tests/test_enable_successive_halving.py | """Tests for making sure experimental imports work as expected."""
import textwrap
import pytest
from sklearn.utils._testing import assert_run_python_script_without_output
from sklearn.utils.fixes import _IS_WASM
@pytest.mark.xfail(_IS_WASM, reason="cannot start subprocess")
def test_imports_strategies():
# Make sure different import strategies work or fail as expected.
# Since Python caches the imported modules, we need to run a child process
# for every test case. Else, the tests would not be independent
# (manually removing the imports from the cache (sys.modules) is not
# recommended and can lead to many complications).
pattern = "Halving(Grid|Random)SearchCV is experimental"
good_import = """
from sklearn.experimental import enable_halving_search_cv
from sklearn.model_selection import HalvingGridSearchCV
from sklearn.model_selection import HalvingRandomSearchCV
"""
assert_run_python_script_without_output(
textwrap.dedent(good_import), pattern=pattern
)
good_import_with_model_selection_first = """
import sklearn.model_selection
from sklearn.experimental import enable_halving_search_cv
from sklearn.model_selection import HalvingGridSearchCV
from sklearn.model_selection import HalvingRandomSearchCV
"""
assert_run_python_script_without_output(
textwrap.dedent(good_import_with_model_selection_first),
pattern=pattern,
)
bad_imports = f"""
import pytest
with pytest.raises(ImportError, match={pattern!r}):
from sklearn.model_selection import HalvingGridSearchCV
import sklearn.experimental
with pytest.raises(ImportError, match={pattern!r}):
from sklearn.model_selection import HalvingRandomSearchCV
"""
assert_run_python_script_without_output(
textwrap.dedent(bad_imports),
pattern=pattern,
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/experimental/tests/test_enable_hist_gradient_boosting.py | sklearn/experimental/tests/test_enable_hist_gradient_boosting.py | """Tests for making sure experimental imports work as expected."""
import textwrap
import pytest
from sklearn.utils._testing import assert_run_python_script_without_output
from sklearn.utils.fixes import _IS_WASM
@pytest.mark.xfail(_IS_WASM, reason="cannot start subprocess")
def test_import_raises_warning():
code = """
import pytest
with pytest.warns(UserWarning, match="it is not needed to import"):
from sklearn.experimental import enable_hist_gradient_boosting # noqa
"""
pattern = "it is not needed to import enable_hist_gradient_boosting anymore"
assert_run_python_script_without_output(textwrap.dedent(code), pattern=pattern)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/experimental/tests/test_enable_iterative_imputer.py | sklearn/experimental/tests/test_enable_iterative_imputer.py | """Tests for making sure experimental imports work as expected."""
import textwrap
import pytest
from sklearn.utils._testing import assert_run_python_script_without_output
from sklearn.utils.fixes import _IS_WASM
@pytest.mark.xfail(_IS_WASM, reason="cannot start subprocess")
def test_imports_strategies():
# Make sure different import strategies work or fail as expected.
# Since Python caches the imported modules, we need to run a child process
# for every test case. Else, the tests would not be independent
# (manually removing the imports from the cache (sys.modules) is not
# recommended and can lead to many complications).
pattern = "IterativeImputer is experimental"
good_import = """
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
"""
assert_run_python_script_without_output(
textwrap.dedent(good_import), pattern=pattern
)
good_import_with_ensemble_first = """
import sklearn.ensemble
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
"""
assert_run_python_script_without_output(
textwrap.dedent(good_import_with_ensemble_first),
pattern=pattern,
)
bad_imports = f"""
import pytest
with pytest.raises(ImportError, match={pattern!r}):
from sklearn.impute import IterativeImputer
import sklearn.experimental
with pytest.raises(ImportError, match={pattern!r}):
from sklearn.impute import IterativeImputer
"""
assert_run_python_script_without_output(
textwrap.dedent(bad_imports),
pattern=pattern,
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/experimental/tests/__init__.py | sklearn/experimental/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/manifold/_mds.py | sklearn/manifold/_mds.py | """
Multi-dimensional Scaling (MDS).
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from numbers import Integral, Real
import numpy as np
from joblib import effective_n_jobs
from sklearn.base import BaseEstimator, _fit_context
from sklearn.isotonic import IsotonicRegression
from sklearn.manifold import ClassicalMDS
from sklearn.metrics import euclidean_distances, pairwise_distances
from sklearn.utils import check_array, check_random_state, check_symmetric
from sklearn.utils._param_validation import (
Hidden,
Interval,
StrOptions,
validate_params,
)
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import validate_data
def _smacof_single(
dissimilarities,
metric=True,
n_components=2,
init=None,
max_iter=300,
verbose=0,
eps=1e-6,
random_state=None,
normalized_stress=False,
):
"""Computes multidimensional scaling using SMACOF algorithm.
Parameters
----------
dissimilarities : ndarray of shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Must be symmetric.
metric : bool, default=True
Compute metric or nonmetric SMACOF algorithm.
When ``False`` (i.e. non-metric MDS), dissimilarities with 0 are considered as
missing values.
n_components : int, default=2
Number of dimensions in which to immerse the dissimilarities. If an
``init`` array is provided, this option is overridden and the shape of
``init`` is used to determine the dimensionality of the embedding
space.
init : ndarray of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the algorithm. By
default, the algorithm is initialized with a randomly chosen array.
max_iter : int, default=300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, default=0
Level of verbosity.
eps : float, default=1e-6
The tolerance with respect to stress (normalized by the sum of squared
embedding distances) at which to declare convergence.
.. versionchanged:: 1.7
The default value for `eps` has changed from 1e-3 to 1e-6, as a result
of a bugfix in the computation of the convergence criterion.
random_state : int, RandomState instance or None, default=None
Determines the random number generator used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
normalized_stress : bool, default=False
Whether to return normalized stress value (Stress-1) instead of raw
stress.
.. versionadded:: 1.2
.. versionchanged:: 1.7
Normalized stress is now supported for metric MDS as well.
Returns
-------
X : ndarray of shape (n_samples, n_components)
Coordinates of the points in a ``n_components``-space.
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
If `normalized_stress=True`, returns Stress-1.
A value of 0 indicates "perfect" fit, 0.025 excellent, 0.05 good,
0.1 fair, and 0.2 poor [1]_.
n_iter : int
The number of iterations corresponding to the best stress.
References
----------
.. [1] "Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
.. [2] "Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
.. [3] "Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
"""
dissimilarities = check_symmetric(dissimilarities, raise_exception=True)
n_samples = dissimilarities.shape[0]
random_state = check_random_state(random_state)
dissimilarities_flat = ((1 - np.tri(n_samples)) * dissimilarities).ravel()
dissimilarities_flat_w = dissimilarities_flat[dissimilarities_flat != 0]
if init is None:
# Randomly choose initial configuration
X = random_state.uniform(size=n_samples * n_components)
X = X.reshape((n_samples, n_components))
else:
# overrides the parameter p
n_components = init.shape[1]
if n_samples != init.shape[0]:
raise ValueError(
"init matrix should be of shape (%d, %d)" % (n_samples, n_components)
)
X = init
distances = euclidean_distances(X)
# Out of bounds condition cannot happen because we are transforming
# the training set here, but does sometimes get triggered in
# practice due to machine precision issues. Hence "clip".
ir = IsotonicRegression(out_of_bounds="clip")
old_stress = None
for it in range(max_iter):
# Compute distance and monotonic regression
if metric:
disparities = dissimilarities
else:
distances_flat = distances.ravel()
# dissimilarities with 0 are considered as missing values
distances_flat_w = distances_flat[dissimilarities_flat != 0]
# Compute the disparities using isotonic regression.
# For the first SMACOF iteration, use scaled original dissimilarities.
# (This choice follows the R implementation described in this paper:
# https://www.jstatsoft.org/article/view/v102i10)
if it < 1:
disparities_flat = dissimilarities_flat_w
else:
disparities_flat = ir.fit_transform(
dissimilarities_flat_w, distances_flat_w
)
disparities = np.zeros_like(distances_flat)
disparities[dissimilarities_flat != 0] = disparities_flat
disparities = disparities.reshape((n_samples, n_samples))
disparities *= np.sqrt(
(n_samples * (n_samples - 1) / 2) / (disparities**2).sum()
)
disparities = disparities + disparities.T
# Update X using the Guttman transform
distances[distances == 0] = 1e-5
ratio = disparities / distances
B = -ratio
B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1)
X = 1.0 / n_samples * np.dot(B, X)
# Compute stress
distances = euclidean_distances(X)
stress = ((distances.ravel() - disparities.ravel()) ** 2).sum() / 2
if verbose >= 2: # pragma: no cover
print(f"Iteration {it}, stress {stress:.4f}")
if old_stress is not None:
sum_squared_distances = (distances.ravel() ** 2).sum()
if ((old_stress - stress) / (sum_squared_distances / 2)) < eps:
if verbose: # pragma: no cover
print(f"Convergence criterion reached (iteration {it}).")
break
old_stress = stress
if normalized_stress:
sum_squared_distances = (distances.ravel() ** 2).sum()
stress = np.sqrt(stress / (sum_squared_distances / 2))
return X, stress, it + 1
# TODO(1.9): change default `n_init` to 1, see PR #31117
@validate_params(
{
"dissimilarities": ["array-like"],
"metric": ["boolean"],
"n_components": [Interval(Integral, 1, None, closed="left")],
"init": ["array-like", None],
"n_init": [Interval(Integral, 1, None, closed="left"), StrOptions({"warn"})],
"n_jobs": [Integral, None],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"verbose": ["verbose"],
"eps": [Interval(Real, 0, None, closed="left")],
"random_state": ["random_state"],
"return_n_iter": ["boolean"],
"normalized_stress": ["boolean", StrOptions({"auto"})],
},
prefer_skip_nested_validation=True,
)
def smacof(
dissimilarities,
*,
metric=True,
n_components=2,
init=None,
n_init="warn",
n_jobs=None,
max_iter=300,
verbose=0,
eps=1e-6,
random_state=None,
return_n_iter=False,
normalized_stress="auto",
):
"""Compute multidimensional scaling using the SMACOF algorithm.
The SMACOF (Scaling by MAjorizing a COmplicated Function) algorithm is a
multidimensional scaling algorithm which minimizes an objective function
(the *stress*) using a majorization technique. Stress majorization, also
known as the Guttman Transform, guarantees a monotone convergence of
stress, and is more powerful than traditional techniques such as gradient
descent.
The SMACOF algorithm for metric MDS can be summarized by the following
steps:
1. Set an initial start configuration, randomly or not.
2. Compute the stress
3. Compute the Guttman Transform
4. Iterate 2 and 3 until convergence.
The nonmetric algorithm adds a monotonic regression step before computing
the stress.
Parameters
----------
dissimilarities : array-like of shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Must be symmetric.
metric : bool, default=True
Compute metric or nonmetric SMACOF algorithm.
When ``False`` (i.e. non-metric MDS), dissimilarities with 0 are considered as
missing values.
n_components : int, default=2
Number of dimensions in which to immerse the dissimilarities. If an
``init`` array is provided, this option is overridden and the shape of
``init`` is used to determine the dimensionality of the embedding
space.
init : array-like of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the algorithm. By
default, the algorithm is initialized with a randomly chosen array.
n_init : int, default=8
Number of times the SMACOF algorithm will be run with different
initializations. The final results will be the best output of the runs,
determined by the run with the smallest final stress. If ``init`` is
provided, this option is overridden and a single run is performed.
.. versionchanged:: 1.9
The default value for `n_iter` will change from 8 to 1 in version 1.9.
n_jobs : int, default=None
The number of jobs to use for the computation. If multiple
initializations are used (``n_init``), each run of the algorithm is
computed in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
max_iter : int, default=300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, default=0
Level of verbosity.
eps : float, default=1e-6
The tolerance with respect to stress (normalized by the sum of squared
embedding distances) at which to declare convergence.
.. versionchanged:: 1.7
The default value for `eps` has changed from 1e-3 to 1e-6, as a result
of a bugfix in the computation of the convergence criterion.
random_state : int, RandomState instance or None, default=None
Determines the random number generator used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
normalized_stress : bool or "auto", default="auto"
Whether to return normalized stress value (Stress-1) instead of raw
stress. By default, metric MDS returns raw stress while non-metric MDS
returns normalized stress.
.. versionadded:: 1.2
.. versionchanged:: 1.4
The default value changed from `False` to `"auto"` in version 1.4.
.. versionchanged:: 1.7
Normalized stress is now supported for metric MDS as well.
Returns
-------
X : ndarray of shape (n_samples, n_components)
Coordinates of the points in a ``n_components``-space.
stress : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
If `normalized_stress=True`, returns Stress-1.
A value of 0 indicates "perfect" fit, 0.025 excellent, 0.05 good,
0.1 fair, and 0.2 poor [1]_.
n_iter : int
The number of iterations corresponding to the best stress. Returned
only if ``return_n_iter`` is set to ``True``.
References
----------
.. [1] "Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
.. [2] "Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
.. [3] "Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import smacof
>>> from sklearn.metrics import euclidean_distances
>>> X = np.array([[0, 1, 2], [1, 0, 3], [2, 3, 0]])
>>> dissimilarities = euclidean_distances(X)
>>> Z, stress = smacof(
... dissimilarities, n_components=2, n_init=1, eps=1e-6, random_state=42
... )
>>> Z.shape
(3, 2)
>>> np.round(stress, 6).item()
3.2e-05
"""
if n_init == "warn":
warnings.warn(
"The default value of `n_init` will change from 8 to 1 in 1.9.",
FutureWarning,
)
n_init = 8
dissimilarities = check_array(dissimilarities)
random_state = check_random_state(random_state)
if normalized_stress == "auto":
normalized_stress = not metric
if hasattr(init, "__array__"):
init = np.asarray(init).copy()
if not n_init == 1:
warnings.warn(
"Explicit initial positions passed: "
"performing only one init of the MDS instead of %d" % n_init
)
n_init = 1
best_pos, best_stress = None, None
if effective_n_jobs(n_jobs) == 1:
for it in range(n_init):
pos, stress, n_iter_ = _smacof_single(
dissimilarities,
metric=metric,
n_components=n_components,
init=init,
max_iter=max_iter,
verbose=verbose,
eps=eps,
random_state=random_state,
normalized_stress=normalized_stress,
)
if best_stress is None or stress < best_stress:
best_stress = stress
best_pos = pos.copy()
best_iter = n_iter_
else:
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=max(verbose - 1, 0))(
delayed(_smacof_single)(
dissimilarities,
metric=metric,
n_components=n_components,
init=init,
max_iter=max_iter,
verbose=verbose,
eps=eps,
random_state=seed,
normalized_stress=normalized_stress,
)
for seed in seeds
)
positions, stress, n_iters = zip(*results)
best = np.argmin(stress)
best_stress = stress[best]
best_pos = positions[best]
best_iter = n_iters[best]
if return_n_iter:
return best_pos, best_stress, best_iter
else:
return best_pos, best_stress
# TODO(1.9): change default `n_init` to 1, see PR #31117
# TODO(1.10): change default `init` to "classical_mds", see PR #32229
# TODO(1.10): drop support for boolean `metric`, see PR #32229
# TODO(1.10): drop support for `dissimilarity`, see PR #32229
class MDS(BaseEstimator):
"""Multidimensional scaling.
Read more in the :ref:`User Guide <multidimensional_scaling>`.
Parameters
----------
n_components : int, default=2
Number of dimensions in which to immerse the dissimilarities.
metric_mds : bool, default=True
If ``True``, perform metric MDS; otherwise, perform nonmetric MDS.
When ``False`` (i.e. non-metric MDS), dissimilarities with 0 are considered as
missing values.
.. versionchanged:: 1.8
The parameter `metric` was renamed into `metric_mds`.
n_init : int, default=4
Number of times the SMACOF algorithm will be run with different
initializations. The final results will be the best output of the runs,
determined by the run with the smallest final stress.
.. versionchanged:: 1.9
The default value for `n_init` will change from 4 to 1 in version 1.9.
init : {'random', 'classical_mds'}, default='random'
The initialization approach. If `random`, random initialization is used.
If `classical_mds`, then classical MDS is run and used as initialization
for MDS (in this case, the value of `n_init` is ignored).
.. versionadded:: 1.8
.. versionchanged:: 1.10
The default value for `init` will change to `classical_mds`.
max_iter : int, default=300
Maximum number of iterations of the SMACOF algorithm for a single run.
verbose : int, default=0
Level of verbosity.
eps : float, default=1e-6
The tolerance with respect to stress (normalized by the sum of squared
embedding distances) at which to declare convergence.
.. versionchanged:: 1.7
The default value for `eps` has changed from 1e-3 to 1e-6, as a result
of a bugfix in the computation of the convergence criterion.
n_jobs : int, default=None
The number of jobs to use for the computation. If multiple
initializations are used (``n_init``), each run of the algorithm is
computed in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, default=None
Determines the random number generator used to initialize the centers.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
dissimilarity : {'euclidean', 'precomputed'}
Dissimilarity measure to use:
- 'euclidean':
Pairwise Euclidean distances between points in the dataset.
- 'precomputed':
Pre-computed dissimilarities are passed directly to ``fit`` and
``fit_transform``.
.. deprecated:: 1.8
`dissimilarity` was renamed `metric` in 1.8 and will be removed in 1.10.
metric : str or callable, default='euclidean'
Metric to use for dissimilarity computation. Default is "euclidean".
If metric is a string, it must be one of the options allowed by
`scipy.spatial.distance.pdist` for its metric parameter, or a metric
listed in :func:`sklearn.metrics.pairwise.distance_metrics`
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
.. versionchanged:: 1.8
Prior to 1.8, `metric=True/False` was used to select metric/non-metric
MDS, which is now the role of `metric_mds`. The support for ``True``
and ``False`` will be dropped in version 1.10, use `metric_mds` instead.
metric_params : dict, default=None
Additional keyword arguments for the dissimilarity computation.
.. versionadded:: 1.8
normalized_stress : bool or "auto" default="auto"
Whether to return normalized stress value (Stress-1) instead of raw
stress. By default, metric MDS returns raw stress while non-metric MDS
returns normalized stress.
.. versionadded:: 1.2
.. versionchanged:: 1.4
The default value changed from `False` to `"auto"` in version 1.4.
.. versionchanged:: 1.7
Normalized stress is now supported for metric MDS as well.
Attributes
----------
embedding_ : ndarray of shape (n_samples, n_components)
Stores the position of the dataset in the embedding space.
stress_ : float
The final value of the stress (sum of squared distance of the
disparities and the distances for all constrained points).
If `normalized_stress=True`, returns Stress-1.
A value of 0 indicates "perfect" fit, 0.025 excellent, 0.05 good,
0.1 fair, and 0.2 poor [1]_.
dissimilarity_matrix_ : ndarray of shape (n_samples, n_samples)
Pairwise dissimilarities between the points. Symmetric matrix that:
- either uses a custom dissimilarity matrix by setting `dissimilarity`
to 'precomputed';
- or constructs a dissimilarity matrix from data using
Euclidean distances.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
The number of iterations corresponding to the best stress.
See Also
--------
sklearn.decomposition.PCA : Principal component analysis that is a linear
dimensionality reduction method.
sklearn.decomposition.KernelPCA : Non-linear dimensionality reduction using
kernels and PCA.
TSNE : T-distributed Stochastic Neighbor Embedding.
Isomap : Manifold learning based on Isometric Mapping.
LocallyLinearEmbedding : Manifold learning using Locally Linear Embedding.
SpectralEmbedding : Spectral embedding for non-linear dimensionality.
References
----------
.. [1] "Nonmetric multidimensional scaling: a numerical method" Kruskal, J.
Psychometrika, 29 (1964)
.. [2] "Multidimensional scaling by optimizing goodness of fit to a nonmetric
hypothesis" Kruskal, J. Psychometrika, 29, (1964)
.. [3] "Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import MDS
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding = MDS(n_components=2, n_init=1, init="random")
>>> X_transformed = embedding.fit_transform(X[:100])
>>> X_transformed.shape
(100, 2)
For a more detailed example of usage, see
:ref:`sphx_glr_auto_examples_manifold_plot_mds.py`.
For a comparison of manifold learning techniques, see
:ref:`sphx_glr_auto_examples_manifold_plot_compare_methods.py`.
"""
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 1, None, closed="left")],
"metric_mds": ["boolean"],
"n_init": [
Interval(Integral, 1, None, closed="left"),
Hidden(StrOptions({"warn"})),
],
"init": [StrOptions({"random", "classical_mds"}), Hidden(StrOptions({"warn"}))],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"verbose": ["verbose"],
"eps": [Interval(Real, 0.0, None, closed="left")],
"n_jobs": [None, Integral],
"random_state": ["random_state"],
"dissimilarity": [
StrOptions({"euclidean", "precomputed"}),
Hidden(StrOptions({"deprecated"})),
],
"metric": [str, callable, Hidden("boolean")],
"metric_params": [dict, None],
"normalized_stress": ["boolean", StrOptions({"auto"})],
}
def __init__(
self,
n_components=2,
*,
metric_mds=True,
n_init="warn",
init="warn",
max_iter=300,
verbose=0,
eps=1e-6,
n_jobs=None,
random_state=None,
dissimilarity="deprecated",
metric="euclidean",
metric_params=None,
normalized_stress="auto",
):
self.n_components = n_components
self.dissimilarity = dissimilarity
self.metric = metric
self.metric_params = metric_params
self.metric_mds = metric_mds
self.n_init = n_init
self.init = init
self.max_iter = max_iter
self.eps = eps
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.normalized_stress = normalized_stress
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.pairwise = (self.dissimilarity == "precomputed") | (
self.metric == "precomputed"
)
return tags
def fit(self, X, y=None, init=None):
"""
Compute the position of the points in the embedding space.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
Input data. If ``metric=='precomputed'``, the input should
be the dissimilarity matrix.
y : Ignored
Not used, present for API consistency by convention.
init : ndarray of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the SMACOF
algorithm. By default, the algorithm is initialized with a randomly
chosen array.
Returns
-------
self : object
Fitted estimator.
"""
self.fit_transform(X, init=init)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit_transform(self, X, y=None, init=None):
"""
Fit the data from `X`, and returns the embedded coordinates.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
Input data. If ``metric=='precomputed'``, the input should
be the dissimilarity matrix.
y : Ignored
Not used, present for API consistency by convention.
init : ndarray of shape (n_samples, n_components), default=None
Starting configuration of the embedding to initialize the SMACOF
algorithm. By default, the algorithm is initialized with a randomly
chosen array.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
X transformed in the new space.
"""
if self.n_init == "warn":
warnings.warn(
"The default value of `n_init` will change from 4 to 1 in 1.9. "
"To suppress this warning, provide some value of `n_init`.",
FutureWarning,
)
self._n_init = 4
else:
self._n_init = self.n_init
if self.init == "warn":
warnings.warn(
"The default value of `init` will change from 'random' to "
"'classical_mds' in 1.10. To suppress this warning, provide "
"some value of `init`.",
FutureWarning,
)
self._init = "random"
else:
self._init = self.init
if self.dissimilarity != "deprecated":
if not isinstance(self.metric, bool) and self.metric != "euclidean":
raise ValueError(
"You provided both `dissimilarity` and `metric`. Please use "
"only `metric`."
)
else:
warnings.warn(
"The `dissimilarity` parameter is deprecated and will be "
"removed in 1.10. Use `metric` instead.",
FutureWarning,
)
self._metric = self.dissimilarity
if isinstance(self.metric, bool):
warnings.warn(
f"Use metric_mds={self.metric} instead of metric={self.metric}. The "
"support for metric={True/False} will be dropped in 1.10.",
FutureWarning,
)
if self.dissimilarity == "deprecated":
self._metric = "euclidean"
self._metric_mds = self.metric
else:
if self.dissimilarity == "deprecated":
self._metric = self.metric
self._metric_mds = self.metric_mds
X = validate_data(self, X)
if X.shape[0] == X.shape[1] and self._metric != "precomputed":
warnings.warn(
"The provided input is a square matrix. Note that ``fit`` constructs "
"a dissimilarity matrix from data and will treat rows as samples "
"and columns as features. To use a pre-computed dissimilarity matrix, "
"set ``metric='precomputed'``."
)
if self._metric == "precomputed":
self.dissimilarity_matrix_ = X
self.dissimilarity_matrix_ = check_symmetric(
self.dissimilarity_matrix_, raise_exception=True
)
else:
self.dissimilarity_matrix_ = pairwise_distances(
X,
metric=self._metric,
**(self.metric_params if self.metric_params is not None else {}),
)
if init is not None:
init_array = init
elif self._init == "classical_mds":
cmds = ClassicalMDS(metric="precomputed")
init_array = cmds.fit_transform(self.dissimilarity_matrix_)
else:
init_array = None
self.embedding_, self.stress_, self.n_iter_ = smacof(
self.dissimilarity_matrix_,
metric=self._metric_mds,
n_components=self.n_components,
init=init_array,
n_init=self._n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
eps=self.eps,
random_state=self.random_state,
return_n_iter=True,
normalized_stress=self.normalized_stress,
)
return self.embedding_
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/manifold/_classical_mds.py | sklearn/manifold/_classical_mds.py | """
Classical multi-dimensional scaling (classical MDS).
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Integral
import numpy as np
from scipy import linalg
from sklearn.base import BaseEstimator, _fit_context
from sklearn.metrics import pairwise_distances
from sklearn.utils import check_symmetric
from sklearn.utils._param_validation import Interval
from sklearn.utils.extmath import svd_flip
from sklearn.utils.validation import validate_data
class ClassicalMDS(BaseEstimator):
"""Classical multidimensional scaling (MDS).
This is also known as principal coordinates analysis (PCoA) or
Torgerson's scaling. It is a version of MDS that has exact solution
in terms of eigendecomposition. If the input dissimilarity matrix
consists of the pairwise Euclidean distances between some vectors,
then classical MDS is equivalent to PCA applied to this set of vectors.
Read more in the :ref:`User Guide <multidimensional_scaling>`.
Parameters
----------
n_components : int, default=2
Number of embedding dimensions.
metric : str or callable, default='euclidean'
Metric to use for dissimilarity computation. Default is "euclidean".
If metric is a string, it must be one of the options allowed by
`scipy.spatial.distance.pdist` for its metric parameter, or a metric
listed in :func:`sklearn.metrics.pairwise.distance_metrics`
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
metric_params : dict, default=None
Additional keyword arguments for the dissimilarity computation.
Attributes
----------
embedding_ : ndarray of shape (n_samples, n_components)
Stores the position of the dataset in the embedding space.
dissimilarity_matrix_ : ndarray of shape (n_samples, n_samples)
Pairwise dissimilarities between the points.
eigenvalues_ : ndarray of shape (n_components,)
Eigenvalues of the double-centered dissimilarity matrix, corresponding
to each of the selected components. They are equal to the squared 2-norms
of the `n_components` variables in the embedding space.
n_features_in_ : int
Number of features seen during :term:`fit`.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
See Also
--------
sklearn.decomposition.PCA : Principal component analysis.
MDS : Metric and non-metric MDS.
References
----------
.. [1] "Modern Multidimensional Scaling - Theory and Applications" Borg, I.;
Groenen P. Springer Series in Statistics (1997)
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import ClassicalMDS
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> cmds = ClassicalMDS(n_components=2)
>>> X_emb = cmds.fit_transform(X[:100])
>>> X_emb.shape
(100, 2)
"""
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 1, None, closed="left")],
"metric": [str, callable],
"metric_params": [dict, None],
}
def __init__(
self,
n_components=2,
*,
metric="euclidean",
metric_params=None,
):
self.n_components = n_components
self.metric = metric
self.metric_params = metric_params
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.pairwise = self.metric == "precomputed"
return tags
def fit(self, X, y=None):
"""
Compute the embedding positions.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
Input data. If ``metric=='precomputed'``, the input should
be the dissimilarity matrix.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted estimator.
"""
self.fit_transform(X)
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit_transform(self, X, y=None):
"""
Compute and return the embedding positions.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
Input data. If ``metric=='precomputed'``, the input should
be the dissimilarity matrix.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
The embedding coordinates.
"""
X = validate_data(self, X)
if self.metric == "precomputed":
self.dissimilarity_matrix_ = X
self.dissimilarity_matrix_ = check_symmetric(
self.dissimilarity_matrix_, raise_exception=True
)
else:
self.dissimilarity_matrix_ = pairwise_distances(
X,
metric=self.metric,
**(self.metric_params if self.metric_params is not None else {}),
)
# Double centering
B = self.dissimilarity_matrix_**2
B = B.astype(np.float64)
B -= np.mean(B, axis=0)
B -= np.mean(B, axis=1, keepdims=True)
B *= -0.5
# Eigendecomposition
w, U = linalg.eigh(B)
# Reversing the order of the eigenvalues/eigenvectors to put
# the eigenvalues in decreasing order
w = w[::-1][: self.n_components]
U = U[:, ::-1][:, : self.n_components]
# Set the signs of eigenvectors to enforce deterministic output
U, _ = svd_flip(U, None)
self.embedding_ = np.sqrt(w) * U
self.eigenvalues_ = w
return self.embedding_
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/manifold/_isomap.py | sklearn/manifold/_isomap.py | """Isomap for manifold learning"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from numbers import Integral, Real
import numpy as np
from scipy.sparse import issparse
from scipy.sparse.csgraph import connected_components, shortest_path
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
)
from sklearn.decomposition import KernelPCA
from sklearn.metrics.pairwise import _VALID_METRICS
from sklearn.neighbors import NearestNeighbors, kneighbors_graph, radius_neighbors_graph
from sklearn.preprocessing import KernelCenterer
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils.graph import _fix_connected_components
from sklearn.utils.validation import check_is_fitted
class Isomap(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""Isomap Embedding.
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : int or None, default=5
Number of neighbors to consider for each point. If `n_neighbors` is an int,
then `radius` must be `None`.
radius : float or None, default=None
Limiting distance of neighbors to return. If `radius` is a float,
then `n_neighbors` must be set to `None`.
.. versionadded:: 1.1
n_components : int, default=2
Number of coordinates for the manifold.
eigen_solver : {'auto', 'arpack', 'dense'}, default='auto'
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float, default=0
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : int, default=None
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : {'auto', 'FW', 'D'}, default='auto'
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : {'auto', 'brute', 'kd_tree', 'ball_tree'}, \
default='auto'
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
n_jobs : int or None, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
metric : str, or callable, default="minkowski"
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by :func:`sklearn.metrics.pairwise_distances` for
its metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a :term:`Glossary <sparse graph>`.
.. versionadded:: 0.22
p : float, default=2
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
.. versionadded:: 0.22
metric_params : dict, default=None
Additional keyword arguments for the metric function.
.. versionadded:: 0.22
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
:class:`~sklearn.decomposition.KernelPCA` object used to implement the
embedding.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
sklearn.decomposition.PCA : Principal component analysis that is a linear
dimensionality reduction method.
sklearn.decomposition.KernelPCA : Non-linear dimensionality reduction using
kernels and PCA.
MDS : Manifold learning using multidimensional scaling.
TSNE : T-distributed Stochastic Neighbor Embedding.
LocallyLinearEmbedding : Manifold learning using Locally Linear Embedding.
SpectralEmbedding : Spectral embedding for non-linear dimensionality.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import Isomap
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding = Isomap(n_components=2)
>>> X_transformed = embedding.fit_transform(X[:100])
>>> X_transformed.shape
(100, 2)
"""
_parameter_constraints: dict = {
"n_neighbors": [Interval(Integral, 1, None, closed="left"), None],
"radius": [Interval(Real, 0, None, closed="both"), None],
"n_components": [Interval(Integral, 1, None, closed="left")],
"eigen_solver": [StrOptions({"auto", "arpack", "dense"})],
"tol": [Interval(Real, 0, None, closed="left")],
"max_iter": [Interval(Integral, 1, None, closed="left"), None],
"path_method": [StrOptions({"auto", "FW", "D"})],
"neighbors_algorithm": [StrOptions({"auto", "brute", "kd_tree", "ball_tree"})],
"n_jobs": [Integral, None],
"p": [Interval(Real, 1, None, closed="left")],
"metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
"metric_params": [dict, None],
}
def __init__(
self,
*,
n_neighbors=5,
radius=None,
n_components=2,
eigen_solver="auto",
tol=0,
max_iter=None,
path_method="auto",
neighbors_algorithm="auto",
n_jobs=None,
metric="minkowski",
p=2,
metric_params=None,
):
self.n_neighbors = n_neighbors
self.radius = radius
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
self.metric = metric
self.p = p
self.metric_params = metric_params
def _fit_transform(self, X):
if self.n_neighbors is not None and self.radius is not None:
raise ValueError(
"Both n_neighbors and radius are provided. Use"
f" Isomap(radius={self.radius}, n_neighbors=None) if intended to use"
" radius-based neighbors"
)
self.nbrs_ = NearestNeighbors(
n_neighbors=self.n_neighbors,
radius=self.radius,
algorithm=self.neighbors_algorithm,
metric=self.metric,
p=self.p,
metric_params=self.metric_params,
n_jobs=self.n_jobs,
)
self.nbrs_.fit(X)
self.n_features_in_ = self.nbrs_.n_features_in_
if hasattr(self.nbrs_, "feature_names_in_"):
self.feature_names_in_ = self.nbrs_.feature_names_in_
self.kernel_pca_ = KernelPCA(
n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol,
max_iter=self.max_iter,
n_jobs=self.n_jobs,
).set_output(transform="default")
if self.n_neighbors is not None:
nbg = kneighbors_graph(
self.nbrs_,
self.n_neighbors,
metric=self.metric,
p=self.p,
metric_params=self.metric_params,
mode="distance",
n_jobs=self.n_jobs,
)
else:
nbg = radius_neighbors_graph(
self.nbrs_,
radius=self.radius,
metric=self.metric,
p=self.p,
metric_params=self.metric_params,
mode="distance",
n_jobs=self.n_jobs,
)
# Compute the number of connected components, and connect the different
# components to be able to compute a shortest path between all pairs
# of samples in the graph.
# Similar fix to cluster._agglomerative._fix_connectivity.
n_connected_components, labels = connected_components(nbg)
if n_connected_components > 1:
if self.metric == "precomputed" and issparse(X):
raise RuntimeError(
"The number of connected components of the neighbors graph"
f" is {n_connected_components} > 1. The graph cannot be "
"completed with metric='precomputed', and Isomap cannot be"
"fitted. Increase the number of neighbors to avoid this "
"issue, or precompute the full distance matrix instead "
"of passing a sparse neighbors graph."
)
warnings.warn(
(
"The number of connected components of the neighbors graph "
f"is {n_connected_components} > 1. Completing the graph to fit"
" Isomap might be slow. Increase the number of neighbors to "
"avoid this issue."
),
stacklevel=2,
)
# use array validated by NearestNeighbors
nbg = _fix_connected_components(
X=self.nbrs_._fit_X,
graph=nbg,
n_connected_components=n_connected_components,
component_labels=labels,
mode="distance",
metric=self.nbrs_.effective_metric_,
**self.nbrs_.effective_metric_params_,
)
self.dist_matrix_ = shortest_path(nbg, method=self.path_method, directed=False)
if self.nbrs_._fit_X.dtype == np.float32:
self.dist_matrix_ = self.dist_matrix_.astype(
self.nbrs_._fit_X.dtype, copy=False
)
G = self.dist_matrix_**2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
self._n_features_out = self.embedding_.shape[1]
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Reconstruction error.
Notes
-----
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_**2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.eigenvalues_
return np.sqrt(np.sum(G_center**2) - np.sum(evals**2)) / G.shape[0]
@_fit_context(
# Isomap.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None):
"""Compute the embedding vectors for data X.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse matrix, precomputed tree, or NearestNeighbors
object.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns a fitted instance of self.
"""
self._fit_transform(X)
return self
@_fit_context(
# Isomap.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
X transformed in the new space.
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_queries, n_features)
If neighbors_algorithm='precomputed', X is assumed to be a
distance matrix or a sparse graph of shape
(n_queries, n_samples_fit).
Returns
-------
X_new : array-like, shape (n_queries, n_components)
X transformed in the new space.
"""
check_is_fitted(self)
if self.n_neighbors is not None:
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
else:
distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True)
# Create the graph of shortest distances from X to
# training data via the nearest neighbors of X.
# This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
n_samples_fit = self.nbrs_.n_samples_fit_
n_queries = distances.shape[0]
if hasattr(X, "dtype") and X.dtype == np.float32:
dtype = np.float32
else:
dtype = np.float64
G_X = np.zeros((n_queries, n_samples_fit), dtype)
for i in range(n_queries):
G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = ["float64", "float32"]
tags.input_tags.sparse = True
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.