repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/gaussian_process/tests/test_gpr.py | sklearn/gaussian_process/tests/test_gpr.py | """Testing for Gaussian process regression"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import re
import sys
import warnings
import numpy as np
import pytest
from scipy.optimize import approx_fprime
from sklearn.exceptions import ConvergenceWarning
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import (
RBF,
DotProduct,
ExpSineSquared,
WhiteKernel,
)
from sklearn.gaussian_process.kernels import (
ConstantKernel as C,
)
from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel
from sklearn.utils._testing import (
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_less,
)
def f(x):
return x * np.sin(x)
X = np.atleast_2d([1.0, 3.0, 5.0, 6.0, 7.0, 8.0]).T
X2 = np.atleast_2d([2.0, 4.0, 5.5, 6.5, 7.5]).T
y = f(X).ravel()
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [
RBF(length_scale=1.0),
fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))
+ C(1e-5, (1e-5, 1e2)),
C(0.1, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))
+ C(1e-5, (1e-5, 1e2)),
]
non_fixed_kernels = [kernel for kernel in kernels if kernel != fixed_kernel]
@pytest.mark.parametrize("kernel", kernels)
def test_gpr_interpolation(kernel):
if sys.maxsize <= 2**32:
pytest.xfail("This test may fail on 32 bit Python")
# Test the interpolating property for different kernels.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_pred, y)
assert_almost_equal(np.diag(y_cov), 0.0)
def test_gpr_interpolation_structured():
# Test the interpolating property for different kernels.
kernel = MiniSeqKernel(baseline_similarity_bounds="fixed")
X = ["A", "B", "C"]
y = np.array([1, 2, 3])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(
kernel(X, eval_gradient=True)[1].ravel(), (1 - np.eye(len(X))).ravel()
)
assert_almost_equal(y_pred, y)
assert_almost_equal(np.diag(y_cov), 0.0)
@pytest.mark.parametrize("kernel", non_fixed_kernels)
def test_lml_improving(kernel):
if sys.maxsize <= 2**32:
pytest.xfail("This test may fail on 32 bit Python")
# Test that hyperparameter-tuning improves log-marginal likelihood.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert gpr.log_marginal_likelihood(gpr.kernel_.theta) > gpr.log_marginal_likelihood(
kernel.theta
)
@pytest.mark.parametrize("kernel", kernels)
def test_lml_precomputed(kernel):
# Test that lml of optimized kernel is stored correctly.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert gpr.log_marginal_likelihood(gpr.kernel_.theta) == pytest.approx(
gpr.log_marginal_likelihood()
)
@pytest.mark.parametrize("kernel", kernels)
def test_lml_without_cloning_kernel(kernel):
# Test that lml of optimized kernel is stored correctly.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
input_theta = np.ones(gpr.kernel_.theta.shape, dtype=np.float64)
gpr.log_marginal_likelihood(input_theta, clone_kernel=False)
assert_almost_equal(gpr.kernel_.theta, input_theta, 7)
@pytest.mark.parametrize("kernel", non_fixed_kernels)
def test_converged_to_local_maximum(kernel):
# Test that we are in local maximum after hyperparameter-optimization.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(gpr.kernel_.theta, True)
assert np.all(
(np.abs(lml_gradient) < 1e-4)
| (gpr.kernel_.theta == gpr.kernel_.bounds[:, 0])
| (gpr.kernel_.theta == gpr.kernel_.bounds[:, 1])
)
@pytest.mark.parametrize("kernel", non_fixed_kernels)
def test_solution_inside_bounds(kernel):
# Test that hyperparameter-optimization remains in bounds#
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
bounds = gpr.kernel_.bounds
max_ = np.finfo(gpr.kernel_.theta.dtype).max
tiny = 1e-10
bounds[~np.isfinite(bounds[:, 1]), 1] = max_
assert_array_less(bounds[:, 0], gpr.kernel_.theta + tiny)
assert_array_less(gpr.kernel_.theta, bounds[:, 1] + tiny)
@pytest.mark.parametrize("kernel", kernels)
def test_lml_gradient(kernel):
# Compare analytic and numeric gradient of log marginal likelihood.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
lml, lml_gradient = gpr.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = approx_fprime(
kernel.theta, lambda theta: gpr.log_marginal_likelihood(theta, False), 1e-10
)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
@pytest.mark.parametrize("kernel", kernels)
def test_prior(kernel):
# Test that GP prior has mean 0 and identical variances.
gpr = GaussianProcessRegressor(kernel=kernel)
y_mean, y_cov = gpr.predict(X, return_cov=True)
assert_almost_equal(y_mean, 0, 5)
if len(gpr.kernel.theta) > 1:
# XXX: quite hacky, works only for current kernels
assert_almost_equal(np.diag(y_cov), np.exp(kernel.theta[0]), 5)
else:
assert_almost_equal(np.diag(y_cov), 1, 5)
@pytest.mark.parametrize("kernel", kernels)
def test_sample_statistics(kernel):
# Test that statistics of samples drawn from GP are correct.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
samples = gpr.sample_y(X2, 300000)
# More digits accuracy would require many more samples
assert_almost_equal(y_mean, np.mean(samples, 1), 1)
assert_almost_equal(
np.diag(y_cov) / np.diag(y_cov).max(),
np.var(samples, 1) / np.diag(y_cov).max(),
1,
)
def test_no_optimizer():
# Test that kernel parameters are unmodified when optimizer is None.
kernel = RBF(1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None).fit(X, y)
assert np.exp(gpr.kernel_.theta) == 1.0
@pytest.mark.parametrize("kernel", kernels)
@pytest.mark.parametrize("target", [y, np.ones(X.shape[0], dtype=np.float64)])
def test_predict_cov_vs_std(kernel, target):
if sys.maxsize <= 2**32:
pytest.xfail("This test may fail on 32 bit Python")
# Test that predicted std.-dev. is consistent with cov's diagonal.
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_mean, y_cov = gpr.predict(X2, return_cov=True)
y_mean, y_std = gpr.predict(X2, return_std=True)
assert_almost_equal(np.sqrt(np.diag(y_cov)), y_std)
def test_anisotropic_kernel():
# Test that GPR can identify meaningful anisotropic length-scales.
# We learn a function which varies in one dimension ten-times slower
# than in the other. The corresponding length-scales should differ by at
# least a factor 5
rng = np.random.RandomState(0)
X = rng.uniform(-1, 1, (50, 2))
y = X[:, 0] + 0.1 * X[:, 1]
kernel = RBF([1.0, 1.0])
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
assert np.exp(gpr.kernel_.theta[1]) > np.exp(gpr.kernel_.theta[0]) * 5
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the log marginal likelihood of the chosen theta.
n_samples, n_features = 25, 2
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = (
np.sin(X).sum(axis=1)
+ np.sin(3 * X).sum(axis=1)
+ rng.normal(scale=0.1, size=n_samples)
)
kernel = C(1.0, (1e-2, 1e2)) * RBF(
length_scale=[1.0] * n_features, length_scale_bounds=[(1e-4, 1e2)] * n_features
) + WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-5, 1e1))
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessRegressor(
kernel=kernel,
n_restarts_optimizer=n_restarts_optimizer,
random_state=0,
).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert lml > last_lml - np.finfo(np.float32).eps
last_lml = lml
@pytest.mark.parametrize("kernel", kernels)
def test_y_normalization(kernel):
"""
Test normalization of the target values in GP
Fitting non-normalizing GP on normalized y and fitting normalizing GP
on unnormalized y should yield identical results. Note that, here,
'normalized y' refers to y that has been made zero mean and unit
variance.
"""
y_mean = np.mean(y)
y_std = np.std(y)
y_norm = (y - y_mean) / y_std
# Fit non-normalizing GP on normalized y
gpr = GaussianProcessRegressor(kernel=kernel)
gpr.fit(X, y_norm)
# Fit normalizing GP on unnormalized y
gpr_norm = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_norm.fit(X, y)
# Compare predicted mean, std-devs and covariances
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
y_pred = y_pred * y_std + y_mean
y_pred_std = y_pred_std * y_std
y_pred_norm, y_pred_std_norm = gpr_norm.predict(X2, return_std=True)
assert_almost_equal(y_pred, y_pred_norm)
assert_almost_equal(y_pred_std, y_pred_std_norm)
_, y_cov = gpr.predict(X2, return_cov=True)
y_cov = y_cov * y_std**2
_, y_cov_norm = gpr_norm.predict(X2, return_cov=True)
assert_almost_equal(y_cov, y_cov_norm)
def test_large_variance_y():
"""
Here we test that, when noramlize_y=True, our GP can produce a
sensible fit to training data whose variance is significantly
larger than unity. This test was made in response to issue #15612.
GP predictions are verified against predictions that were made
using GPy which, here, is treated as the 'gold standard'. Note that we
only investigate the RBF kernel here, as that is what was used in the
GPy implementation.
The following code can be used to recreate the GPy data:
--------------------------------------------------------------------------
import GPy
kernel_gpy = GPy.kern.RBF(input_dim=1, lengthscale=1.)
gpy = GPy.models.GPRegression(X, np.vstack(y_large), kernel_gpy)
gpy.optimize()
y_pred_gpy, y_var_gpy = gpy.predict(X2)
y_pred_std_gpy = np.sqrt(y_var_gpy)
--------------------------------------------------------------------------
"""
# Here we utilise a larger variance version of the training data
y_large = 10 * y
# Standard GP with normalize_y=True
RBF_params = {"length_scale": 1.0}
kernel = RBF(**RBF_params)
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y_large)
y_pred, y_pred_std = gpr.predict(X2, return_std=True)
# 'Gold standard' mean predictions from GPy
y_pred_gpy = np.array(
[15.16918303, -27.98707845, -39.31636019, 14.52605515, 69.18503589]
)
# 'Gold standard' std predictions from GPy
y_pred_std_gpy = np.array(
[7.78860962, 3.83179178, 0.63149951, 0.52745188, 0.86170042]
)
# Based on numerical experiments, it's reasonable to expect our
# GP's mean predictions to get within 7% of predictions of those
# made by GPy.
assert_allclose(y_pred, y_pred_gpy, rtol=0.07, atol=0)
# Based on numerical experiments, it's reasonable to expect our
# GP's std predictions to get within 15% of predictions of those
# made by GPy.
assert_allclose(y_pred_std, y_pred_std_gpy, rtol=0.15, atol=0)
def test_y_multioutput():
# Test that GPR can deal with multi-dimensional target values
y_2d = np.vstack((y, y * 2)).T
# Test for fixed kernel that first dimension of 2d GP equals the output
# of 1d GP and that second dimension is twice as large
kernel = RBF(length_scale=1.0)
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=None, normalize_y=False)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, optimizer=None, normalize_y=False)
gpr_2d.fit(X, y_2d)
y_pred_1d, y_std_1d = gpr.predict(X2, return_std=True)
y_pred_2d, y_std_2d = gpr_2d.predict(X2, return_std=True)
_, y_cov_1d = gpr.predict(X2, return_cov=True)
_, y_cov_2d = gpr_2d.predict(X2, return_cov=True)
assert_almost_equal(y_pred_1d, y_pred_2d[:, 0])
assert_almost_equal(y_pred_1d, y_pred_2d[:, 1] / 2)
# Standard deviation and covariance do not depend on output
for target in range(y_2d.shape[1]):
assert_almost_equal(y_std_1d, y_std_2d[..., target])
assert_almost_equal(y_cov_1d, y_cov_2d[..., target])
y_sample_1d = gpr.sample_y(X2, n_samples=10)
y_sample_2d = gpr_2d.sample_y(X2, n_samples=10)
assert y_sample_1d.shape == (5, 10)
assert y_sample_2d.shape == (5, 2, 10)
# Only the first target will be equal
assert_almost_equal(y_sample_1d, y_sample_2d[:, 0, :])
# Test hyperparameter optimization
for kernel in kernels:
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y)
gpr_2d = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr_2d.fit(X, np.vstack((y, y)).T)
assert_almost_equal(gpr.kernel_.theta, gpr_2d.kernel_.theta, 4)
@pytest.mark.parametrize("kernel", non_fixed_kernels)
def test_custom_optimizer(kernel):
# Test that GPR can use externally defined optimizers.
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = (
initial_theta,
obj_func(initial_theta, eval_gradient=False),
)
for _ in range(50):
theta = np.atleast_1d(
rng.uniform(np.maximum(-2, bounds[:, 0]), np.minimum(1, bounds[:, 1]))
)
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
gpr = GaussianProcessRegressor(kernel=kernel, optimizer=optimizer)
gpr.fit(X, y)
# Checks that optimizer improved marginal likelihood
assert gpr.log_marginal_likelihood(gpr.kernel_.theta) > gpr.log_marginal_likelihood(
gpr.kernel.theta
)
def test_gpr_correct_error_message():
X = np.arange(12).reshape(6, -1)
y = np.ones(6)
kernel = DotProduct()
gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0)
message = (
"The kernel, %s, is not returning a "
"positive definite matrix. Try gradually increasing "
"the 'alpha' parameter of your "
"GaussianProcessRegressor estimator." % kernel
)
with pytest.raises(np.linalg.LinAlgError, match=re.escape(message)):
gpr.fit(X, y)
@pytest.mark.parametrize("kernel", kernels)
def test_duplicate_input(kernel):
# Test GPR can handle two different output-values for the same input.
gpr_equal_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
gpr_similar_inputs = GaussianProcessRegressor(kernel=kernel, alpha=1e-2)
X_ = np.vstack((X, X[0]))
y_ = np.hstack((y, y[0] + 1))
gpr_equal_inputs.fit(X_, y_)
X_ = np.vstack((X, X[0] + 1e-15))
y_ = np.hstack((y, y[0] + 1))
gpr_similar_inputs.fit(X_, y_)
X_test = np.linspace(0, 10, 100)[:, None]
y_pred_equal, y_std_equal = gpr_equal_inputs.predict(X_test, return_std=True)
y_pred_similar, y_std_similar = gpr_similar_inputs.predict(X_test, return_std=True)
assert_almost_equal(y_pred_equal, y_pred_similar)
assert_almost_equal(y_std_equal, y_std_similar)
def test_no_fit_default_predict():
# Test that GPR predictions without fit does not break by default.
default_kernel = C(1.0, constant_value_bounds="fixed") * RBF(
1.0, length_scale_bounds="fixed"
)
gpr1 = GaussianProcessRegressor()
_, y_std1 = gpr1.predict(X, return_std=True)
_, y_cov1 = gpr1.predict(X, return_cov=True)
gpr2 = GaussianProcessRegressor(kernel=default_kernel)
_, y_std2 = gpr2.predict(X, return_std=True)
_, y_cov2 = gpr2.predict(X, return_cov=True)
assert_array_almost_equal(y_std1, y_std2)
assert_array_almost_equal(y_cov1, y_cov2)
def test_warning_bounds():
kernel = RBF(length_scale_bounds=[1e-5, 1e-3])
gpr = GaussianProcessRegressor(kernel=kernel)
warning_message = (
"The optimal value found for dimension 0 of parameter "
"length_scale is close to the specified upper bound "
"0.001. Increasing the bound and calling fit again may "
"find a better value."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
gpr.fit(X, y)
kernel_sum = WhiteKernel(noise_level_bounds=[1e-5, 1e-3]) + RBF(
length_scale_bounds=[1e3, 1e5]
)
gpr_sum = GaussianProcessRegressor(kernel=kernel_sum)
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter("always")
gpr_sum.fit(X, y)
assert len(record) == 2
assert issubclass(record[0].category, ConvergenceWarning)
assert (
record[0].message.args[0] == "The optimal value found for "
"dimension 0 of parameter "
"k1__noise_level is close to the "
"specified upper bound 0.001. "
"Increasing the bound and calling "
"fit again may find a better value."
)
assert issubclass(record[1].category, ConvergenceWarning)
assert (
record[1].message.args[0] == "The optimal value found for "
"dimension 0 of parameter "
"k2__length_scale is close to the "
"specified lower bound 1000.0. "
"Decreasing the bound and calling "
"fit again may find a better value."
)
X_tile = np.tile(X, 2)
kernel_dims = RBF(length_scale=[1.0, 2.0], length_scale_bounds=[1e1, 1e2])
gpr_dims = GaussianProcessRegressor(kernel=kernel_dims)
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter("always")
gpr_dims.fit(X_tile, y)
assert len(record) == 2
assert issubclass(record[0].category, ConvergenceWarning)
assert (
record[0].message.args[0] == "The optimal value found for "
"dimension 0 of parameter "
"length_scale is close to the "
"specified lower bound 10.0. "
"Decreasing the bound and calling "
"fit again may find a better value."
)
assert issubclass(record[1].category, ConvergenceWarning)
assert (
record[1].message.args[0] == "The optimal value found for "
"dimension 1 of parameter "
"length_scale is close to the "
"specified lower bound 10.0. "
"Decreasing the bound and calling "
"fit again may find a better value."
)
def test_bound_check_fixed_hyperparameter():
# Regression test for issue #17943
# Check that having a hyperparameter with fixed bounds doesn't cause an
# error
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = ExpSineSquared(
length_scale=1.0, periodicity=1.0, periodicity_bounds="fixed"
) # seasonal component
kernel = k1 + k2
GaussianProcessRegressor(kernel=kernel).fit(X, y)
@pytest.mark.parametrize("kernel", kernels)
def test_constant_target(kernel):
"""Check that the std. dev. is affected to 1 when normalizing a constant
feature.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/18318
NaN where affected to the target when scaling due to null std. dev. with
constant target.
"""
y_constant = np.ones(X.shape[0], dtype=np.float64)
gpr = GaussianProcessRegressor(kernel=kernel, normalize_y=True)
gpr.fit(X, y_constant)
assert gpr._y_train_std == pytest.approx(1.0)
y_pred, y_cov = gpr.predict(X, return_cov=True)
assert_allclose(y_pred, y_constant)
# set atol because we compare to zero
assert_allclose(np.diag(y_cov), 0.0, atol=1e-9)
# Test multi-target data
n_samples, n_targets = X.shape[0], 2
rng = np.random.RandomState(0)
y = np.concatenate(
[
rng.normal(size=(n_samples, 1)), # non-constant target
np.full(shape=(n_samples, 1), fill_value=2), # constant target
],
axis=1,
)
gpr.fit(X, y)
Y_pred, Y_cov = gpr.predict(X, return_cov=True)
assert_allclose(Y_pred[:, 1], 2)
assert_allclose(np.diag(Y_cov[..., 1]), 0.0, atol=1e-9)
assert Y_pred.shape == (n_samples, n_targets)
assert Y_cov.shape == (n_samples, n_samples, n_targets)
def test_gpr_consistency_std_cov_non_invertible_kernel():
"""Check the consistency between the returned std. dev. and the covariance.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/19936
Inconsistencies were observed when the kernel cannot be inverted (or
numerically stable).
"""
kernel = C(8.98576054e05, (1e-12, 1e12)) * RBF(
[5.91326520e02, 1.32584051e03], (1e-12, 1e12)
) + WhiteKernel(noise_level=1e-5)
gpr = GaussianProcessRegressor(kernel=kernel, alpha=0, optimizer=None)
X_train = np.array(
[
[0.0, 0.0],
[1.54919334, -0.77459667],
[-1.54919334, 0.0],
[0.0, -1.54919334],
[0.77459667, 0.77459667],
[-0.77459667, 1.54919334],
]
)
y_train = np.array(
[
[-2.14882017e-10],
[-4.66975823e00],
[4.01823986e00],
[-1.30303674e00],
[-1.35760156e00],
[3.31215668e00],
]
)
gpr.fit(X_train, y_train)
X_test = np.array(
[
[-1.93649167, -1.93649167],
[1.93649167, -1.93649167],
[-1.93649167, 1.93649167],
[1.93649167, 1.93649167],
]
)
pred1, std = gpr.predict(X_test, return_std=True)
pred2, cov = gpr.predict(X_test, return_cov=True)
assert_allclose(std, np.sqrt(np.diagonal(cov)), rtol=1e-5)
@pytest.mark.parametrize(
"params, TypeError, err_msg",
[
(
{"alpha": np.zeros(100)},
ValueError,
"alpha must be a scalar or an array with same number of entries as y",
),
(
{
"kernel": WhiteKernel(noise_level_bounds=(-np.inf, np.inf)),
"n_restarts_optimizer": 2,
},
ValueError,
"requires that all bounds are finite",
),
],
)
def test_gpr_fit_error(params, TypeError, err_msg):
"""Check that expected error are raised during fit."""
gpr = GaussianProcessRegressor(**params)
with pytest.raises(TypeError, match=err_msg):
gpr.fit(X, y)
def test_gpr_lml_error():
"""Check that we raise the proper error in the LML method."""
gpr = GaussianProcessRegressor(kernel=RBF()).fit(X, y)
err_msg = "Gradient can only be evaluated for theta!=None"
with pytest.raises(ValueError, match=err_msg):
gpr.log_marginal_likelihood(eval_gradient=True)
def test_gpr_predict_error():
"""Check that we raise the proper error during predict."""
gpr = GaussianProcessRegressor(kernel=RBF()).fit(X, y)
err_msg = "At most one of return_std or return_cov can be requested."
with pytest.raises(RuntimeError, match=err_msg):
gpr.predict(X, return_cov=True, return_std=True)
@pytest.mark.parametrize("normalize_y", [True, False])
@pytest.mark.parametrize("n_targets", [None, 1, 10])
def test_predict_shapes(normalize_y, n_targets):
"""Check the shapes of y_mean, y_std, and y_cov in single-output
(n_targets=None) and multi-output settings, including the edge case when
n_targets=1, where the sklearn convention is to squeeze the predictions.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/17394
https://github.com/scikit-learn/scikit-learn/issues/18065
https://github.com/scikit-learn/scikit-learn/issues/22174
"""
rng = np.random.RandomState(1234)
n_features, n_samples_train, n_samples_test = 6, 9, 7
y_train_shape = (n_samples_train,)
if n_targets is not None:
y_train_shape = y_train_shape + (n_targets,)
# By convention single-output data is squeezed upon prediction
y_test_shape = (n_samples_test,)
if n_targets is not None and n_targets > 1:
y_test_shape = y_test_shape + (n_targets,)
X_train = rng.randn(n_samples_train, n_features)
X_test = rng.randn(n_samples_test, n_features)
y_train = rng.randn(*y_train_shape)
model = GaussianProcessRegressor(normalize_y=normalize_y)
model.fit(X_train, y_train)
y_pred, y_std = model.predict(X_test, return_std=True)
_, y_cov = model.predict(X_test, return_cov=True)
assert y_pred.shape == y_test_shape
assert y_std.shape == y_test_shape
assert y_cov.shape == (n_samples_test,) + y_test_shape
@pytest.mark.parametrize("normalize_y", [True, False])
@pytest.mark.parametrize("n_targets", [None, 1, 10])
def test_sample_y_shapes(normalize_y, n_targets):
"""Check the shapes of y_samples in single-output (n_targets=0) and
multi-output settings, including the edge case when n_targets=1, where the
sklearn convention is to squeeze the predictions.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/22175
"""
rng = np.random.RandomState(1234)
n_features, n_samples_train = 6, 9
# Number of spatial locations to predict at
n_samples_X_test = 7
# Number of sample predictions per test point
n_samples_y_test = 5
y_train_shape = (n_samples_train,)
if n_targets is not None:
y_train_shape = y_train_shape + (n_targets,)
# By convention single-output data is squeezed upon prediction
if n_targets is not None and n_targets > 1:
y_test_shape = (n_samples_X_test, n_targets, n_samples_y_test)
else:
y_test_shape = (n_samples_X_test, n_samples_y_test)
X_train = rng.randn(n_samples_train, n_features)
X_test = rng.randn(n_samples_X_test, n_features)
y_train = rng.randn(*y_train_shape)
model = GaussianProcessRegressor(normalize_y=normalize_y)
# FIXME: before fitting, the estimator does not have information regarding
# the number of targets and default to 1. This is inconsistent with the shape
# provided after `fit`. This assert should be made once the following issue
# is fixed:
# https://github.com/scikit-learn/scikit-learn/issues/22430
# y_samples = model.sample_y(X_test, n_samples=n_samples_y_test)
# assert y_samples.shape == y_test_shape
model.fit(X_train, y_train)
y_samples = model.sample_y(X_test, n_samples=n_samples_y_test)
assert y_samples.shape == y_test_shape
@pytest.mark.parametrize("n_targets", [None, 1, 2, 3])
@pytest.mark.parametrize("n_samples", [1, 5])
def test_sample_y_shape_with_prior(n_targets, n_samples):
"""Check the output shape of `sample_y` is consistent before and after `fit`."""
rng = np.random.RandomState(1024)
X = rng.randn(10, 3)
y = rng.randn(10, n_targets if n_targets is not None else 1)
model = GaussianProcessRegressor(n_targets=n_targets)
shape_before_fit = model.sample_y(X, n_samples=n_samples).shape
model.fit(X, y)
shape_after_fit = model.sample_y(X, n_samples=n_samples).shape
assert shape_before_fit == shape_after_fit
@pytest.mark.parametrize("n_targets", [None, 1, 2, 3])
def test_predict_shape_with_prior(n_targets):
"""Check the output shape of `predict` with prior distribution."""
rng = np.random.RandomState(1024)
n_sample = 10
X = rng.randn(n_sample, 3)
y = rng.randn(n_sample, n_targets if n_targets is not None else 1)
model = GaussianProcessRegressor(n_targets=n_targets)
mean_prior, cov_prior = model.predict(X, return_cov=True)
_, std_prior = model.predict(X, return_std=True)
model.fit(X, y)
mean_post, cov_post = model.predict(X, return_cov=True)
_, std_post = model.predict(X, return_std=True)
assert mean_prior.shape == mean_post.shape
assert cov_prior.shape == cov_post.shape
assert std_prior.shape == std_post.shape
def test_n_targets_error():
"""Check that an error is raised when the number of targets seen at fit is
inconsistent with n_targets.
"""
rng = np.random.RandomState(0)
X = rng.randn(10, 3)
y = rng.randn(10, 2)
model = GaussianProcessRegressor(n_targets=1)
with pytest.raises(ValueError, match="The number of targets seen in `y`"):
model.fit(X, y)
class CustomKernel(C):
"""
A custom kernel that has a diag method that returns the first column of the
input matrix X. This is a helper for the test to check that the input
matrix X is not mutated.
"""
def diag(self, X):
return X[:, 0]
def test_gpr_predict_input_not_modified():
"""
Check that the input X is not modified by the predict method of the
GaussianProcessRegressor when setting return_std=True.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/24340
"""
gpr = GaussianProcessRegressor(kernel=CustomKernel()).fit(X, y)
X2_copy = np.copy(X2)
_, _ = gpr.predict(X2, return_std=True)
assert_allclose(X2, X2_copy)
@pytest.mark.parametrize("kernel", kernels)
def test_gpr_predict_no_cov_no_std_return(kernel):
"""
Check that only y_mean is returned when return_cov=False and
return_std=False.
"""
gpr = GaussianProcessRegressor(kernel=kernel).fit(X, y)
y_pred = gpr.predict(X, return_cov=False, return_std=False)
assert_allclose(y_pred, y)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/model_selection/_plot.py | sklearn/model_selection/_plot.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from sklearn.model_selection._validation import learning_curve, validation_curve
from sklearn.utils._optional_dependencies import check_matplotlib_support
from sklearn.utils._plotting import _interval_max_min_ratio, _validate_score_name
class _BaseCurveDisplay:
def _plot_curve(
self,
x_data,
*,
ax=None,
negate_score=False,
score_name=None,
score_type="test",
std_display_style="fill_between",
line_kw=None,
fill_between_kw=None,
errorbar_kw=None,
):
check_matplotlib_support(f"{self.__class__.__name__}.plot")
import matplotlib.pyplot as plt
if ax is None:
_, ax = plt.subplots()
if negate_score:
train_scores, test_scores = -self.train_scores, -self.test_scores
else:
train_scores, test_scores = self.train_scores, self.test_scores
if std_display_style not in ("errorbar", "fill_between", None):
raise ValueError(
f"Unknown std_display_style: {std_display_style}. Should be one of"
" 'errorbar', 'fill_between', or None."
)
if score_type not in ("test", "train", "both"):
raise ValueError(
f"Unknown score_type: {score_type}. Should be one of 'test', "
"'train', or 'both'."
)
if score_type == "train":
scores = {"Train": train_scores}
elif score_type == "test":
scores = {"Test": test_scores}
else: # score_type == "both"
scores = {"Train": train_scores, "Test": test_scores}
if std_display_style in ("fill_between", None):
# plot the mean score
if line_kw is None:
line_kw = {}
self.lines_ = []
for line_label, score in scores.items():
self.lines_.append(
*ax.plot(
x_data,
score.mean(axis=1),
label=line_label,
**line_kw,
)
)
self.errorbar_ = None
self.fill_between_ = None # overwritten below by fill_between
if std_display_style == "errorbar":
if errorbar_kw is None:
errorbar_kw = {}
self.errorbar_ = []
for line_label, score in scores.items():
self.errorbar_.append(
ax.errorbar(
x_data,
score.mean(axis=1),
score.std(axis=1),
label=line_label,
**errorbar_kw,
)
)
self.lines_, self.fill_between_ = None, None
elif std_display_style == "fill_between":
if fill_between_kw is None:
fill_between_kw = {}
default_fill_between_kw = {"alpha": 0.5}
fill_between_kw = {**default_fill_between_kw, **fill_between_kw}
self.fill_between_ = []
for line_label, score in scores.items():
self.fill_between_.append(
ax.fill_between(
x_data,
score.mean(axis=1) - score.std(axis=1),
score.mean(axis=1) + score.std(axis=1),
**fill_between_kw,
)
)
score_name = self.score_name if score_name is None else score_name
ax.legend()
# We found that a ratio, smaller or bigger than 5, between the largest and
# smallest gap of the x values is a good indicator to choose between linear
# and log scale.
if _interval_max_min_ratio(x_data) > 5:
xscale = "symlog" if x_data.min() <= 0 else "log"
else:
xscale = "linear"
ax.set_xscale(xscale)
ax.set_ylabel(f"{score_name}")
self.ax_ = ax
self.figure_ = ax.figure
class LearningCurveDisplay(_BaseCurveDisplay):
"""Learning Curve visualization.
It is recommended to use
:meth:`~sklearn.model_selection.LearningCurveDisplay.from_estimator` to
create a :class:`~sklearn.model_selection.LearningCurveDisplay` instance.
All parameters are stored as attributes.
Read more in the :ref:`User Guide <visualizations>` for general information
about the visualization API and
:ref:`detailed documentation <learning_curve>` regarding the learning
curve visualization.
.. versionadded:: 1.2
Parameters
----------
train_sizes : ndarray of shape (n_unique_ticks,)
Numbers of training examples that has been used to generate the
learning curve.
train_scores : ndarray of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : ndarray of shape (n_ticks, n_cv_folds)
Scores on test set.
score_name : str, default=None
The name of the score used in `learning_curve`. It will override the name
inferred from the `scoring` parameter. If `score` is `None`, we use `"Score"` if
`negate_score` is `False` and `"Negative score"` otherwise. If `scoring` is a
string or a callable, we infer the name. We replace `_` by spaces and capitalize
the first letter. We remove `neg_` and replace it by `"Negative"` if
`negate_score` is `False` or just remove it otherwise.
Attributes
----------
ax_ : matplotlib Axes
Axes with the learning curve.
figure_ : matplotlib Figure
Figure containing the learning curve.
errorbar_ : list of matplotlib Artist or None
When the `std_display_style` is `"errorbar"`, this is a list of
`matplotlib.container.ErrorbarContainer` objects. If another style is
used, `errorbar_` is `None`.
lines_ : list of matplotlib Artist or None
When the `std_display_style` is `"fill_between"`, this is a list of
`matplotlib.lines.Line2D` objects corresponding to the mean train and
test scores. If another style is used, `line_` is `None`.
fill_between_ : list of matplotlib Artist or None
When the `std_display_style` is `"fill_between"`, this is a list of
`matplotlib.collections.PolyCollection` objects. If another style is
used, `fill_between_` is `None`.
See Also
--------
sklearn.model_selection.learning_curve : Compute the learning curve.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import LearningCurveDisplay, learning_curve
>>> from sklearn.tree import DecisionTreeClassifier
>>> X, y = load_iris(return_X_y=True)
>>> tree = DecisionTreeClassifier(random_state=0)
>>> train_sizes, train_scores, test_scores = learning_curve(
... tree, X, y)
>>> display = LearningCurveDisplay(train_sizes=train_sizes,
... train_scores=train_scores, test_scores=test_scores, score_name="Score")
>>> display.plot()
<...>
>>> plt.show()
"""
def __init__(self, *, train_sizes, train_scores, test_scores, score_name=None):
self.train_sizes = train_sizes
self.train_scores = train_scores
self.test_scores = test_scores
self.score_name = score_name
def plot(
self,
ax=None,
*,
negate_score=False,
score_name=None,
score_type="both",
std_display_style="fill_between",
line_kw=None,
fill_between_kw=None,
errorbar_kw=None,
):
"""Plot visualization.
Parameters
----------
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.learning_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If None, no standard deviation representation is
displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.LearningCurveDisplay`
Object that stores computed values.
"""
self._plot_curve(
self.train_sizes,
ax=ax,
negate_score=negate_score,
score_name=score_name,
score_type=score_type,
std_display_style=std_display_style,
line_kw=line_kw,
fill_between_kw=fill_between_kw,
errorbar_kw=errorbar_kw,
)
self.ax_.set_xlabel("Number of samples in the training set")
return self
@classmethod
def from_estimator(
cls,
estimator,
X,
y,
*,
groups=None,
train_sizes=np.linspace(0.1, 1.0, 5),
cv=None,
scoring=None,
exploit_incremental_learning=False,
n_jobs=None,
pre_dispatch="all",
verbose=0,
shuffle=False,
random_state=None,
error_score=np.nan,
fit_params=None,
ax=None,
negate_score=False,
score_name=None,
score_type="both",
std_display_style="fill_between",
line_kw=None,
fill_between_kw=None,
errorbar_kw=None,
):
"""Create a learning curve display from an estimator.
Read more in the :ref:`User Guide <visualizations>` for general
information about the visualization API and :ref:`detailed
documentation <learning_curve>` regarding the learning curve
visualization.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
train_sizes : array-like of shape (n_ticks,), \
default=np.linspace(0.1, 1.0, 5)
Relative or absolute numbers of training examples that will be used
to generate the learning curve. If the dtype is float, it is
regarded as a fraction of the maximum size of the training set
(that is determined by the selected validation method), i.e. it has
to be within (0, 1]. Otherwise it is interpreted as absolute sizes
of the training sets. Note that for classification the number of
samples usually have to be big enough to contain at least one
sample from each class.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and `y` is
either binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`~sklearn.model_selection.KFold` is used. These
splitters are instantiated with `shuffle=False` so the splits will
be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : str or callable, default=None
The scoring method to use when calculating the learning curve. Options:
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
exploit_incremental_learning : bool, default=False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and
computing the score are parallelized over the different training
and test sets. `None` means 1 unless in a
:obj:`joblib.parallel_backend` context. `-1` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
shuffle : bool, default=False
Whether to shuffle training data before taking prefixes of it
based on`train_sizes`.
random_state : int, RandomState instance or None, default=None
Used when `shuffle` is True. Pass an int for reproducible
output across multiple function calls.
See :term:`Glossary <random_state>`.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator
fitting. If set to 'raise', the error is raised. If a numeric value
is given, FitFailedWarning is raised.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.learning_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If `None`, no representation of the standard deviation
is displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.LearningCurveDisplay`
Object that stores computed values.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import LearningCurveDisplay
>>> from sklearn.tree import DecisionTreeClassifier
>>> X, y = load_iris(return_X_y=True)
>>> tree = DecisionTreeClassifier(random_state=0)
>>> LearningCurveDisplay.from_estimator(tree, X, y)
<...>
>>> plt.show()
"""
check_matplotlib_support(f"{cls.__name__}.from_estimator")
score_name = _validate_score_name(score_name, scoring, negate_score)
train_sizes, train_scores, test_scores = learning_curve(
estimator,
X,
y,
groups=groups,
train_sizes=train_sizes,
cv=cv,
scoring=scoring,
exploit_incremental_learning=exploit_incremental_learning,
n_jobs=n_jobs,
pre_dispatch=pre_dispatch,
verbose=verbose,
shuffle=shuffle,
random_state=random_state,
error_score=error_score,
return_times=False,
params=fit_params,
)
viz = cls(
train_sizes=train_sizes,
train_scores=train_scores,
test_scores=test_scores,
score_name=score_name,
)
return viz.plot(
ax=ax,
negate_score=negate_score,
score_type=score_type,
std_display_style=std_display_style,
line_kw=line_kw,
fill_between_kw=fill_between_kw,
errorbar_kw=errorbar_kw,
)
class ValidationCurveDisplay(_BaseCurveDisplay):
"""Validation Curve visualization.
It is recommended to use
:meth:`~sklearn.model_selection.ValidationCurveDisplay.from_estimator` to
create a :class:`~sklearn.model_selection.ValidationCurveDisplay` instance.
All parameters are stored as attributes.
Read more in the :ref:`User Guide <visualizations>` for general information
about the visualization API and :ref:`detailed documentation
<validation_curve>` regarding the validation curve visualization.
.. versionadded:: 1.3
Parameters
----------
param_name : str
Name of the parameter that has been varied.
param_range : array-like of shape (n_ticks,)
The values of the parameter that have been evaluated.
train_scores : ndarray of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : ndarray of shape (n_ticks, n_cv_folds)
Scores on test set.
score_name : str, default=None
The name of the score used in `validation_curve`. It will override the name
inferred from the `scoring` parameter. If `score` is `None`, we use `"Score"` if
`negate_score` is `False` and `"Negative score"` otherwise. If `scoring` is a
string or a callable, we infer the name. We replace `_` by spaces and capitalize
the first letter. We remove `neg_` and replace it by `"Negative"` if
`negate_score` is `False` or just remove it otherwise.
Attributes
----------
ax_ : matplotlib Axes
Axes with the validation curve.
figure_ : matplotlib Figure
Figure containing the validation curve.
errorbar_ : list of matplotlib Artist or None
When the `std_display_style` is `"errorbar"`, this is a list of
`matplotlib.container.ErrorbarContainer` objects. If another style is
used, `errorbar_` is `None`.
lines_ : list of matplotlib Artist or None
When the `std_display_style` is `"fill_between"`, this is a list of
`matplotlib.lines.Line2D` objects corresponding to the mean train and
test scores. If another style is used, `line_` is `None`.
fill_between_ : list of matplotlib Artist or None
When the `std_display_style` is `"fill_between"`, this is a list of
`matplotlib.collections.PolyCollection` objects. If another style is
used, `fill_between_` is `None`.
See Also
--------
sklearn.model_selection.validation_curve : Compute the validation curve.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import ValidationCurveDisplay, validation_curve
>>> from sklearn.linear_model import LogisticRegression
>>> X, y = make_classification(n_samples=1_000, random_state=0)
>>> logistic_regression = LogisticRegression()
>>> param_name, param_range = "C", np.logspace(-8, 3, 10)
>>> train_scores, test_scores = validation_curve(
... logistic_regression, X, y, param_name=param_name, param_range=param_range
... )
>>> display = ValidationCurveDisplay(
... param_name=param_name, param_range=param_range,
... train_scores=train_scores, test_scores=test_scores, score_name="Score"
... )
>>> display.plot()
<...>
>>> plt.show()
"""
def __init__(
self, *, param_name, param_range, train_scores, test_scores, score_name=None
):
self.param_name = param_name
self.param_range = param_range
self.train_scores = train_scores
self.test_scores = test_scores
self.score_name = score_name
def plot(
self,
ax=None,
*,
negate_score=False,
score_name=None,
score_type="both",
std_display_style="fill_between",
line_kw=None,
fill_between_kw=None,
errorbar_kw=None,
):
"""Plot visualization.
Parameters
----------
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.validation_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If None, no standard deviation representation is
displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.ValidationCurveDisplay`
Object that stores computed values.
"""
self._plot_curve(
self.param_range,
ax=ax,
negate_score=negate_score,
score_name=score_name,
score_type=score_type,
std_display_style=std_display_style,
line_kw=line_kw,
fill_between_kw=fill_between_kw,
errorbar_kw=errorbar_kw,
)
self.ax_.set_xlabel(f"{self.param_name}")
return self
@classmethod
def from_estimator(
cls,
estimator,
X,
y,
*,
param_name,
param_range,
groups=None,
cv=None,
scoring=None,
n_jobs=None,
pre_dispatch="all",
verbose=0,
error_score=np.nan,
fit_params=None,
ax=None,
negate_score=False,
score_name=None,
score_type="both",
std_display_style="fill_between",
line_kw=None,
fill_between_kw=None,
errorbar_kw=None,
):
"""Create a validation curve display from an estimator.
Read more in the :ref:`User Guide <visualizations>` for general
information about the visualization API and :ref:`detailed
documentation <validation_curve>` regarding the validation curve
visualization.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : str
Name of the parameter that will be varied.
param_range : array-like of shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and `y` is
either binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`~sklearn.model_selection.KFold` is used. These
splitters are instantiated with `shuffle=False` so the splits will
be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : str or callable, default=None
Scoring method to use when computing the validation curve. Options:
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and
computing the score are parallelized over the different training
and test sets. `None` means 1 unless in a
:obj:`joblib.parallel_backend` context. `-1` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator
fitting. If set to 'raise', the error is raised. If a numeric value
is given, FitFailedWarning is raised.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
ax : matplotlib Axes, default=None
Axes object to plot on. If `None`, a new figure and axes is
created.
negate_score : bool, default=False
Whether or not to negate the scores obtained through
:func:`~sklearn.model_selection.validation_curve`. This is
particularly useful when using the error denoted by `neg_*` in
`scikit-learn`.
score_name : str, default=None
The name of the score used to decorate the y-axis of the plot. It will
override the name inferred from the `scoring` parameter. If `score` is
`None`, we use `"Score"` if `negate_score` is `False` and `"Negative score"`
otherwise. If `scoring` is a string or a callable, we infer the name. We
replace `_` by spaces and capitalize the first letter. We remove `neg_` and
replace it by `"Negative"` if `negate_score` is
`False` or just remove it otherwise.
score_type : {"test", "train", "both"}, default="both"
The type of score to plot. Can be one of `"test"`, `"train"`, or
`"both"`.
std_display_style : {"errorbar", "fill_between"} or None, default="fill_between"
The style used to display the score standard deviation around the
mean score. If `None`, no representation of the standard deviation
is displayed.
line_kw : dict, default=None
Additional keyword arguments passed to the `plt.plot` used to draw
the mean score.
fill_between_kw : dict, default=None
Additional keyword arguments passed to the `plt.fill_between` used
to draw the score standard deviation.
errorbar_kw : dict, default=None
Additional keyword arguments passed to the `plt.errorbar` used to
draw mean score and standard deviation score.
Returns
-------
display : :class:`~sklearn.model_selection.ValidationCurveDisplay`
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/model_selection/_split.py | sklearn/model_selection/_split.py | """
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numbers
import warnings
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from collections.abc import Iterable
from inspect import signature
from itertools import chain, combinations
from math import ceil, floor
import numpy as np
from scipy.special import comb
from sklearn.utils import (
_safe_indexing,
check_random_state,
indexable,
metadata_routing,
)
from sklearn.utils._array_api import (
_convert_to_numpy,
get_namespace,
get_namespace_and_device,
move_to,
)
from sklearn.utils._param_validation import Interval, RealNotInt, validate_params
from sklearn.utils.extmath import _approximate_mode
from sklearn.utils.metadata_routing import _MetadataRequester
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import _num_samples, check_array, column_or_1d
__all__ = [
"BaseCrossValidator",
"GroupKFold",
"GroupShuffleSplit",
"KFold",
"LeaveOneGroupOut",
"LeaveOneOut",
"LeavePGroupsOut",
"LeavePOut",
"PredefinedSplit",
"RepeatedKFold",
"RepeatedStratifiedKFold",
"ShuffleSplit",
"StratifiedGroupKFold",
"StratifiedKFold",
"StratifiedShuffleSplit",
"check_cv",
"train_test_split",
]
class _UnsupportedGroupCVMixin:
"""Mixin for splitters that do not support Groups."""
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Always ignored, exists for API compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
if groups is not None:
warnings.warn(
f"The groups parameter is ignored by {self.__class__.__name__}",
UserWarning,
)
return super().split(X, y, groups=groups)
class GroupsConsumerMixin(_MetadataRequester):
"""A Mixin to ``groups`` by default.
This Mixin makes the object to request ``groups`` by default as ``True``.
.. versionadded:: 1.3
"""
__metadata_request__split = {"groups": True}
class BaseCrossValidator(_MetadataRequester, metaclass=ABCMeta):
"""Base class for all cross-validators.
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
# This indicates that by default CV splitters don't have a "groups" kwarg,
# unless indicated by inheriting from ``GroupsConsumerMixin``.
# This also prevents ``set_split_request`` to be generated for splitters
# which don't support ``groups``.
__metadata_request__split = {"groups": metadata_routing.UNUSED}
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator."""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(_UnsupportedGroupCVMixin, BaseCrossValidator):
"""Leave-One-Out cross-validator.
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <leave_one_out>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for i, (train_index, test_index) in enumerate(loo.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[1]
Test: index=[0]
Fold 1:
Train: index=[0]
Test: index=[1]
See Also
--------
LeaveOneGroupOut : For splitting the data according to explicit,
domain-specific stratification of the dataset.
GroupKFold : K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
if n_samples <= 1:
raise ValueError(
"Cannot perform LeaveOneOut with n_samples={}.".format(n_samples)
)
return range(n_samples)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
Always ignored, exists for API compatibility.
groups : array-like of shape (n_samples,), default=None
Always ignored, exists for API compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The 'X' parameter should not be None.")
return _num_samples(X)
class LeavePOut(_UnsupportedGroupCVMixin, BaseCrossValidator):
"""Leave-P-Out cross-validator.
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <leave_p_out>`.
Parameters
----------
p : int
Size of the test sets. Must be strictly less than the number of
samples.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for i, (train_index, test_index) in enumerate(lpo.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[2 3]
Test: index=[0 1]
Fold 1:
Train: index=[1 3]
Test: index=[0 2]
Fold 2:
Train: index=[1 2]
Test: index=[0 3]
Fold 3:
Train: index=[0 3]
Test: index=[1 2]
Fold 4:
Train: index=[0 2]
Test: index=[1 3]
Fold 5:
Train: index=[0 1]
Test: index=[2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
if n_samples <= self.p:
raise ValueError(
"p={} must be strictly less than the number of samples={}".format(
self.p, n_samples
)
)
for combination in combinations(range(n_samples), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
Always ignored, exists for API compatibility.
groups : array-like of shape (n_samples,), default=None
Always ignored, exists for API compatibility.
"""
if X is None:
raise ValueError("The 'X' parameter should not be None.")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(BaseCrossValidator, metaclass=ABCMeta):
"""Base class for K-Fold cross-validators and TimeSeriesSplit."""
@abstractmethod
def __init__(self, n_splits, *, shuffle, random_state):
if not isinstance(n_splits, numbers.Integral):
raise ValueError(
"The number of folds must be of Integral type. "
"%s of type %s was passed." % (n_splits, type(n_splits))
)
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits)
)
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False; got {0}".format(shuffle))
if not shuffle and random_state is not None: # None is the default
raise ValueError(
(
"Setting a random_state has no effect since shuffle is "
"False. You should leave "
"random_state to its default (None), or set shuffle=True."
),
)
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
(
"Cannot have number of splits n_splits={0} greater"
" than the number of samples: n_samples={1}."
).format(self.n_splits, n_samples)
)
for train, test in super().split(X, y, groups):
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations as set with the `n_splits` param
when instantiating the cross-validator.
Parameters
----------
X : array-like of shape (n_samples, n_features), default=None
Always ignored, exists for API compatibility.
y : array-like of shape (n_samples,), default=None
Always ignored, exists for API compatibility.
groups : array-like of shape (n_samples,), default=None
Always ignored, exists for API compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFold(_UnsupportedGroupCVMixin, _BaseKFold):
"""K-Fold cross-validator.
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <k_fold>`.
For visualisation of cross-validation behaviour and
comparison between common scikit-learn split methods
refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py`
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
shuffle : bool, default=False
Whether to shuffle the data before splitting into batches.
Note that the samples within each split will not be shuffled.
random_state : int, RandomState instance or None, default=None
When `shuffle` is True, `random_state` affects the ordering of the
indices, which controls the randomness of each fold. Otherwise, this
parameter has no effect.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_splits=2)
>>> kf.get_n_splits()
2
>>> print(kf)
KFold(n_splits=2, random_state=None, shuffle=False)
>>> for i, (train_index, test_index) in enumerate(kf.split(X)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[2 3]
Test: index=[0 1]
Fold 1:
Train: index=[0 1]
Test: index=[2 3]
Notes
-----
The first ``n_samples % n_splits`` folds have size
``n_samples // n_splits + 1``, other folds have size
``n_samples // n_splits``, where ``n_samples`` is the number of samples.
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
See Also
--------
StratifiedKFold : Takes class information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
GroupKFold : K-fold iterator variant with non-overlapping groups.
RepeatedKFold : Repeats K-Fold n times.
"""
def __init__(self, n_splits=5, *, shuffle=False, random_state=None):
super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state)
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_splits = self.n_splits
fold_sizes = np.full(n_splits, n_samples // n_splits, dtype=int)
fold_sizes[: n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class GroupKFold(GroupsConsumerMixin, _BaseKFold):
"""K-fold iterator variant with non-overlapping groups.
Each group will appear exactly once in the test set across all folds (the
number of distinct groups has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
samples is approximately the same in each test fold when `shuffle` is True.
Read more in the :ref:`User Guide <group_k_fold>`.
For visualisation of cross-validation behaviour and
comparison between common scikit-learn split methods
refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py`
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
shuffle : bool, default=False
Whether to shuffle the groups before splitting into batches.
Note that the samples within each split will not be shuffled.
.. versionadded:: 1.6
random_state : int, RandomState instance or None, default=None
When `shuffle` is True, `random_state` affects the ordering of the
indices, which controls the randomness of each fold. Otherwise, this
parameter has no effect.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
.. versionadded:: 1.6
Notes
-----
Groups appear in an arbitrary order throughout the folds.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import GroupKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]])
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> groups = np.array([0, 0, 2, 2, 3, 3])
>>> group_kfold = GroupKFold(n_splits=2)
>>> group_kfold.get_n_splits()
2
>>> print(group_kfold)
GroupKFold(n_splits=2, random_state=None, shuffle=False)
>>> for i, (train_index, test_index) in enumerate(group_kfold.split(X, y, groups)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}, group={groups[train_index]}")
... print(f" Test: index={test_index}, group={groups[test_index]}")
Fold 0:
Train: index=[2 3], group=[2 2]
Test: index=[0 1 4 5], group=[0 0 3 3]
Fold 1:
Train: index=[0 1 4 5], group=[0 0 3 3]
Test: index=[2 3], group=[2 2]
See Also
--------
LeaveOneGroupOut : For splitting the data according to explicit
domain-specific stratification of the dataset.
StratifiedKFold : Takes class information into account to avoid building
folds with imbalanced class proportions (for binary or multiclass
classification tasks).
"""
def __init__(self, n_splits=5, *, shuffle=False, random_state=None):
super().__init__(n_splits, shuffle=shuffle, random_state=random_state)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, input_name="groups", ensure_2d=False, dtype=None)
unique_groups, group_idx = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError(
"Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d." % (self.n_splits, n_groups)
)
if self.shuffle:
# Split and shuffle unique groups across n_splits
rng = check_random_state(self.random_state)
unique_groups = rng.permutation(unique_groups)
split_groups = np.array_split(unique_groups, self.n_splits)
for test_group_ids in split_groups:
test_mask = np.isin(groups, test_group_ids)
yield np.where(test_mask)[0]
else:
# Weight groups by their number of occurrences
n_samples_per_group = np.bincount(group_idx)
# Distribute the most frequent groups first
indices = np.argsort(n_samples_per_group)[::-1]
n_samples_per_group = n_samples_per_group[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from group index to fold index
group_to_fold = np.zeros(len(unique_groups))
# Distribute samples by adding the largest weight to the lightest fold
for group_index, weight in enumerate(n_samples_per_group):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
group_to_fold[indices[group_index]] = lightest_fold
indices = group_to_fold[group_idx]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super().split(X, y, groups)
class StratifiedKFold(_BaseKFold):
"""Class-wise stratified K-Fold cross-validator.
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class in `y` in a binary or multiclass classification
setting.
Read more in the :ref:`User Guide <stratified_k_fold>`.
For visualisation of cross-validation behaviour and
comparison between common scikit-learn split methods
refer to :ref:`sphx_glr_auto_examples_model_selection_plot_cv_indices.py`
.. note::
Stratification on the class label solves an engineering problem rather
than a statistical one. See :ref:`stratification` for more details.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
.. versionchanged:: 0.22
``n_splits`` default value changed from 3 to 5.
shuffle : bool, default=False
Whether to shuffle each class's samples before splitting into batches.
Note that the samples within each split will not be shuffled.
random_state : int, RandomState instance or None, default=None
When `shuffle` is True, `random_state` affects the ordering of the
indices, which controls the randomness of each fold for each class.
Otherwise, leave `random_state` as `None`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits()
2
>>> print(skf)
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for i, (train_index, test_index) in enumerate(skf.split(X, y)):
... print(f"Fold {i}:")
... print(f" Train: index={train_index}")
... print(f" Test: index={test_index}")
Fold 0:
Train: index=[1 3]
Test: index=[0 2]
Fold 1:
Train: index=[0 2]
Test: index=[1 3]
Notes
-----
The implementation is designed to:
* Generate test sets such that all contain the same distribution of
classes, or as close as possible.
* Be invariant to class label: relabelling ``y = ["Happy", "Sad"]`` to
``y = [1, 0]`` should not change the indices generated.
* Preserve order dependencies in the dataset ordering, when
``shuffle=False``: all samples from class k in some test set were
contiguous in y, or separated in y by samples from classes other than k.
* Generate test sets where the smallest and largest differ by at most one
sample.
.. versionchanged:: 0.22
The previous implementation did not follow the last constraint.
See Also
--------
RepeatedStratifiedKFold : Repeats Stratified K-Fold n times.
"""
def __init__(self, n_splits=5, *, shuffle=False, random_state=None):
super().__init__(n_splits=n_splits, shuffle=shuffle, random_state=random_state)
def _make_test_folds(self, X, y=None):
rng = check_random_state(self.random_state)
# XXX: as of now, cross-validation splitters only operate in NumPy-land
# without attempting to leverage array API namespace features. However
# they might be fed by array API inputs, e.g. in CV-enabled estimators so
# we need the following explicit conversion:
xp, is_array_api = get_namespace(y)
if is_array_api:
y = _convert_to_numpy(y, xp)
else:
y = np.asarray(y)
type_of_target_y = type_of_target(y)
allowed_target_types = ("binary", "multiclass")
if type_of_target_y not in allowed_target_types:
raise ValueError(
"Supported target types are: {}. Got {!r} instead.".format(
allowed_target_types, type_of_target_y
)
)
y = column_or_1d(y)
_, y_idx, y_inv = np.unique(y, return_index=True, return_inverse=True)
# y_inv encodes y according to lexicographic order. We invert y_idx to
# map the classes so that they are encoded by order of appearance:
# 0 represents the first label appearing in y, 1 the second, etc.
_, class_perm = np.unique(y_idx, return_inverse=True)
y_encoded = class_perm[y_inv]
n_classes = len(y_idx)
y_counts = np.bincount(y_encoded)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError(
"n_splits=%d cannot be greater than the"
" number of members in each class." % (self.n_splits)
)
if self.n_splits > min_groups:
warnings.warn(
"The least populated class in y has only %d"
" members, which is less than n_splits=%d."
% (min_groups, self.n_splits),
UserWarning,
)
# Determine the optimal number of samples from each class in each fold,
# using round robin over the sorted y. (This can be done direct from
# counts, but that code is unreadable.)
y_order = np.sort(y_encoded)
allocation = np.asarray(
[
np.bincount(y_order[i :: self.n_splits], minlength=n_classes)
for i in range(self.n_splits)
]
)
# To maintain the data order dependencies as best as possible within
# the stratification constraint, we assign samples from each class in
# blocks (and then mess that up when shuffle=True).
test_folds = np.empty(len(y), dtype="i")
for k in range(n_classes):
# since the kth column of allocation stores the number of samples
# of class k in each test set, this generates blocks of fold
# indices corresponding to the allocation for class k.
folds_for_class = np.arange(self.n_splits).repeat(allocation[:, k])
if self.shuffle:
rng.shuffle(folds_for_class)
test_folds[y_encoded == k] = folds_for_class
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like of shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : array-like of shape (n_samples,), default=None
Always ignored, exists for API compatibility.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
Notes
-----
Randomized CV splitters may return different results for each call of
split. You can make the results identical by setting `random_state`
to an integer.
"""
if groups is not None:
warnings.warn(
f"The groups parameter is ignored by {self.__class__.__name__}",
UserWarning,
)
y = check_array(y, input_name="y", ensure_2d=False, dtype=None)
return super().split(X, y, groups)
class StratifiedGroupKFold(GroupsConsumerMixin, _BaseKFold):
"""Class-wise stratified K-Fold iterator variant with non-overlapping groups.
This cross-validation object is a variation of :class:`StratifiedKFold` that
attempts to return stratified folds with non-overlapping groups. The folds are made
by preserving the percentage of samples for each class in `y` in a binary or
multiclass classification setting.
Each group will appear exactly once in the test set across all folds (the
number of distinct groups has to be at least equal to the number of folds).
The difference between :class:`GroupKFold`
and `StratifiedGroupKFold` is that
the former attempts to create balanced folds such that the number of
distinct groups is approximately the same in each fold, whereas
`StratifiedGroupKFold` attempts to create folds which preserve the
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/model_selection/_classification_threshold.py | sklearn/model_selection/_classification_threshold.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from collections.abc import MutableMapping
from numbers import Integral, Real
import numpy as np
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
MetaEstimatorMixin,
_fit_context,
clone,
)
from sklearn.exceptions import NotFittedError
from sklearn.metrics import check_scoring, get_scorer_names
from sklearn.metrics._scorer import _CurveScorer, _threshold_scores_to_class_labels
from sklearn.model_selection._split import StratifiedShuffleSplit, check_cv
from sklearn.utils import _safe_indexing, get_tags
from sklearn.utils._param_validation import HasMethods, Interval, RealNotInt, StrOptions
from sklearn.utils._response import _get_response_values_binary
from sklearn.utils.metadata_routing import (
MetadataRouter,
MethodMapping,
_raise_for_params,
process_routing,
)
from sklearn.utils.metaestimators import available_if
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import (
_check_method_params,
_estimator_has,
_num_samples,
check_is_fitted,
indexable,
)
def _check_is_fitted(estimator):
try:
check_is_fitted(estimator.estimator)
except NotFittedError:
check_is_fitted(estimator, "estimator_")
class BaseThresholdClassifier(ClassifierMixin, MetaEstimatorMixin, BaseEstimator):
"""Base class for binary classifiers that set a non-default decision threshold.
In this base class, we define the following interface:
- the validation of common parameters in `fit`;
- the different prediction methods that can be used with the classifier.
.. versionadded:: 1.5
Parameters
----------
estimator : estimator instance
The binary classifier, fitted or not, for which we want to optimize
the decision threshold used during `predict`.
response_method : {"auto", "decision_function", "predict_proba"}, default="auto"
Methods by the classifier `estimator` corresponding to the
decision function for which we want to find a threshold. It can be:
* if `"auto"`, it will try to invoke, for each classifier,
`"predict_proba"` or `"decision_function"` in that order.
* otherwise, one of `"predict_proba"` or `"decision_function"`.
If the method is not implemented by the classifier, it will raise an
error.
"""
_parameter_constraints: dict = {
"estimator": [
HasMethods(["fit", "predict_proba"]),
HasMethods(["fit", "decision_function"]),
],
"response_method": [StrOptions({"auto", "predict_proba", "decision_function"})],
}
def __init__(self, estimator, *, response_method="auto"):
self.estimator = estimator
self.response_method = response_method
def _get_response_method(self):
"""Define the response method."""
if self.response_method == "auto":
response_method = ["predict_proba", "decision_function"]
else:
response_method = self.response_method
return response_method
@_fit_context(
# *ThresholdClassifier*.estimator is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y, **params):
"""Fit the classifier.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
**params : dict
Parameters to pass to the `fit` method of the underlying
classifier.
Returns
-------
self : object
Returns an instance of self.
"""
_raise_for_params(params, self, None)
X, y = indexable(X, y)
y_type = type_of_target(y, input_name="y")
if y_type != "binary":
raise ValueError(
f"Only binary classification is supported. Unknown label type: {y_type}"
)
self._fit(X, y, **params)
if hasattr(self.estimator_, "n_features_in_"):
self.n_features_in_ = self.estimator_.n_features_in_
if hasattr(self.estimator_, "feature_names_in_"):
self.feature_names_in_ = self.estimator_.feature_names_in_
return self
@property
def classes_(self):
"""Classes labels."""
return self.estimator_.classes_
@available_if(_estimator_has("predict_proba"))
def predict_proba(self, X):
"""Predict class probabilities for `X` using the fitted estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
probabilities : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples.
"""
_check_is_fitted(self)
estimator = getattr(self, "estimator_", self.estimator)
return estimator.predict_proba(X)
@available_if(_estimator_has("predict_log_proba"))
def predict_log_proba(self, X):
"""Predict logarithm class probabilities for `X` using the fitted estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
log_probabilities : ndarray of shape (n_samples, n_classes)
The logarithm class probabilities of the input samples.
"""
_check_is_fitted(self)
estimator = getattr(self, "estimator_", self.estimator)
return estimator.predict_log_proba(X)
@available_if(_estimator_has("decision_function"))
def decision_function(self, X):
"""Decision function for samples in `X` using the fitted estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
decisions : ndarray of shape (n_samples,)
The decision function computed the fitted estimator.
"""
_check_is_fitted(self)
estimator = getattr(self, "estimator_", self.estimator)
return estimator.decision_function(X)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.classifier_tags.multi_class = False
tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse
return tags
class FixedThresholdClassifier(BaseThresholdClassifier):
"""Binary classifier that manually sets the decision threshold.
This classifier allows to change the default decision threshold used for
converting posterior probability estimates (i.e. output of `predict_proba`) or
decision scores (i.e. output of `decision_function`) into a class label.
Here, the threshold is not optimized and is set to a constant value.
Read more in the :ref:`User Guide <FixedThresholdClassifier>`.
.. versionadded:: 1.5
Parameters
----------
estimator : estimator instance
The binary classifier, fitted or not, for which we want to optimize
the decision threshold used during `predict`.
threshold : {"auto"} or float, default="auto"
The decision threshold to use when converting posterior probability estimates
(i.e. output of `predict_proba`) or decision scores (i.e. output of
`decision_function`) into a class label. When `"auto"`, the threshold is set
to 0.5 if `predict_proba` is used as `response_method`, otherwise it is set to
0 (i.e. the default threshold for `decision_function`).
pos_label : int, float, bool or str, default=None
The label of the positive class. Used to process the output of the
`response_method` method. When `pos_label=None`, if `y_true` is in `{-1, 1}` or
`{0, 1}`, `pos_label` is set to 1, otherwise an error will be raised.
response_method : {"auto", "decision_function", "predict_proba"}, default="auto"
Methods by the classifier `estimator` corresponding to the
decision function for which we want to find a threshold. It can be:
* if `"auto"`, it will try to invoke `"predict_proba"` or `"decision_function"`
in that order.
* otherwise, one of `"predict_proba"` or `"decision_function"`.
If the method is not implemented by the classifier, it will raise an
error.
Attributes
----------
estimator_ : estimator instance
The fitted classifier used when predicting.
classes_ : ndarray of shape (n_classes,)
The class labels.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
See Also
--------
sklearn.model_selection.TunedThresholdClassifierCV : Classifier that post-tunes
the decision threshold based on some metrics and using cross-validation.
sklearn.calibration.CalibratedClassifierCV : Estimator that calibrates
probabilities.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.metrics import confusion_matrix
>>> from sklearn.model_selection import FixedThresholdClassifier, train_test_split
>>> X, y = make_classification(
... n_samples=1_000, weights=[0.9, 0.1], class_sep=0.8, random_state=42
... )
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, stratify=y, random_state=42
... )
>>> classifier = LogisticRegression(random_state=0).fit(X_train, y_train)
>>> print(confusion_matrix(y_test, classifier.predict(X_test)))
[[217 7]
[ 19 7]]
>>> classifier_other_threshold = FixedThresholdClassifier(
... classifier, threshold=0.1, response_method="predict_proba"
... ).fit(X_train, y_train)
>>> print(confusion_matrix(y_test, classifier_other_threshold.predict(X_test)))
[[184 40]
[ 6 20]]
"""
_parameter_constraints: dict = {
**BaseThresholdClassifier._parameter_constraints,
"threshold": [StrOptions({"auto"}), Real],
"pos_label": [Real, str, "boolean", None],
}
def __init__(
self,
estimator,
*,
threshold="auto",
pos_label=None,
response_method="auto",
):
super().__init__(estimator=estimator, response_method=response_method)
self.pos_label = pos_label
self.threshold = threshold
@property
def classes_(self):
if estimator := getattr(self, "estimator_", None):
return estimator.classes_
try:
check_is_fitted(self.estimator)
return self.estimator.classes_
except NotFittedError:
raise AttributeError(
"The underlying estimator is not fitted yet."
) from NotFittedError
def _fit(self, X, y, **params):
"""Fit the classifier.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
**params : dict
Parameters to pass to the `fit` method of the underlying
classifier.
Returns
-------
self : object
Returns an instance of self.
"""
routed_params = process_routing(self, "fit", **params)
self.estimator_ = clone(self.estimator).fit(X, y, **routed_params.estimator.fit)
return self
def predict(self, X):
"""Predict the target of new samples.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The samples, as accepted by `estimator.predict`.
Returns
-------
class_labels : ndarray of shape (n_samples,)
The predicted class.
"""
_check_is_fitted(self)
estimator = getattr(self, "estimator_", self.estimator)
y_score, _, response_method_used = _get_response_values_binary(
estimator,
X,
self._get_response_method(),
pos_label=self.pos_label,
return_response_method_used=True,
)
if self.threshold == "auto":
decision_threshold = 0.5 if response_method_used == "predict_proba" else 0.0
else:
decision_threshold = self.threshold
return _threshold_scores_to_class_labels(
y_score, decision_threshold, self.classes_, self.pos_label
)
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self).add(
estimator=self.estimator,
method_mapping=MethodMapping().add(callee="fit", caller="fit"),
)
return router
def _fit_and_score_over_thresholds(
classifier,
X,
y,
*,
fit_params,
train_idx,
val_idx,
curve_scorer,
score_params,
):
"""Fit a classifier and compute the scores for different decision thresholds.
Parameters
----------
classifier : estimator instance
The classifier to fit and use for scoring. If `classifier` is already fitted,
it will be used as is.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The entire dataset.
y : array-like of shape (n_samples,)
The entire target vector.
fit_params : dict
Parameters to pass to the `fit` method of the underlying classifier.
train_idx : ndarray of shape (n_train_samples,) or None
The indices of the training set. If `None`, `classifier` is expected to be
already fitted.
val_idx : ndarray of shape (n_val_samples,)
The indices of the validation set used to score `classifier`. If `train_idx`,
the entire set will be used.
curve_scorer : scorer instance
The scorer taking `classifier` and the validation set as input and outputting
decision thresholds and scores as a curve. Note that this is different from
the usual scorer that outputs a single score value as `curve_scorer`
outputs a single score value for each threshold.
score_params : dict
Parameters to pass to the `score` method of the underlying scorer.
Returns
-------
scores : ndarray of shape (thresholds,) or tuple of such arrays
The scores computed for each decision threshold. When TPR/TNR or precision/
recall are computed, `scores` is a tuple of two arrays.
potential_thresholds : ndarray of shape (thresholds,)
The decision thresholds used to compute the scores. They are returned in
ascending order.
"""
if train_idx is not None:
X_train, X_val = _safe_indexing(X, train_idx), _safe_indexing(X, val_idx)
y_train, y_val = _safe_indexing(y, train_idx), _safe_indexing(y, val_idx)
fit_params_train = _check_method_params(X, fit_params, indices=train_idx)
score_params_val = _check_method_params(X, score_params, indices=val_idx)
classifier.fit(X_train, y_train, **fit_params_train)
else: # prefit estimator, only a validation set is provided
X_val, y_val, score_params_val = X, y, score_params
return curve_scorer(classifier, X_val, y_val, **score_params_val)
def _mean_interpolated_score(target_thresholds, cv_thresholds, cv_scores):
"""Compute the mean interpolated score across folds by defining common thresholds.
Parameters
----------
target_thresholds : ndarray of shape (thresholds,)
The thresholds to use to compute the mean score.
cv_thresholds : ndarray of shape (n_folds, thresholds_fold)
The thresholds used to compute the scores for each fold.
cv_scores : ndarray of shape (n_folds, thresholds_fold)
The scores computed for each threshold for each fold.
Returns
-------
mean_score : ndarray of shape (thresholds,)
The mean score across all folds for each target threshold.
"""
return np.mean(
[
np.interp(target_thresholds, split_thresholds, split_score)
for split_thresholds, split_score in zip(cv_thresholds, cv_scores)
],
axis=0,
)
class TunedThresholdClassifierCV(BaseThresholdClassifier):
"""Classifier that post-tunes the decision threshold using cross-validation.
This estimator post-tunes the decision threshold (cut-off point) that is
used for converting posterior probability estimates (i.e. output of
`predict_proba`) or decision scores (i.e. output of `decision_function`)
into a class label. The tuning is done by optimizing a binary metric,
potentially constrained by another metric.
Read more in the :ref:`User Guide <TunedThresholdClassifierCV>`.
.. versionadded:: 1.5
Parameters
----------
estimator : estimator instance
The classifier, fitted or not, for which we want to optimize
the decision threshold used during `predict`.
scoring : str or callable, default="balanced_accuracy"
The objective metric to be optimized. Can be one of:
- str: string associated to a scoring function for binary classification,
see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
response_method : {"auto", "decision_function", "predict_proba"}, default="auto"
Methods by the classifier `estimator` corresponding to the
decision function for which we want to find a threshold. It can be:
* if `"auto"`, it will try to invoke, for each classifier,
`"predict_proba"` or `"decision_function"` in that order.
* otherwise, one of `"predict_proba"` or `"decision_function"`.
If the method is not implemented by the classifier, it will raise an
error.
thresholds : int or array-like, default=100
The number of decision threshold to use when discretizing the output of the
classifier `method`. Pass an array-like to manually specify the thresholds
to use.
cv : int, float, cross-validation generator, iterable or "prefit", default=None
Determines the cross-validation splitting strategy to train classifier.
Possible inputs for cv are:
* `None`, to use the default 5-fold stratified K-fold cross validation;
* An integer number, to specify the number of folds in a stratified k-fold;
* A float number, to specify a single shuffle split. The floating number should
be in (0, 1) and represent the size of the validation set;
* An object to be used as a cross-validation generator;
* An iterable yielding train, test splits;
* `"prefit"`, to bypass the cross-validation.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. warning::
Using `cv="prefit"` and passing the same dataset for fitting `estimator`
and tuning the cut-off point is subject to undesired overfitting. You can
refer to :ref:`TunedThresholdClassifierCV_no_cv` for an example.
This option should only be used when the set used to fit `estimator` is
different from the one used to tune the cut-off point (by calling
:meth:`TunedThresholdClassifierCV.fit`).
refit : bool, default=True
Whether or not to refit the classifier on the entire training set once
the decision threshold has been found.
Note that forcing `refit=False` on cross-validation having more
than a single split will raise an error. Similarly, `refit=True` in
conjunction with `cv="prefit"` will raise an error.
n_jobs : int, default=None
The number of jobs to run in parallel. When `cv` represents a
cross-validation strategy, the fitting and scoring on each data split
is done in parallel. ``None`` means 1 unless in a
:obj:`joblib.parallel_backend` context. ``-1`` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls the randomness of cross-validation when `cv` is a float.
See :term:`Glossary <random_state>`.
store_cv_results : bool, default=False
Whether to store all scores and thresholds computed during the cross-validation
process.
Attributes
----------
estimator_ : estimator instance
The fitted classifier used when predicting.
best_threshold_ : float
The new decision threshold.
best_score_ : float or None
The optimal score of the objective metric, evaluated at `best_threshold_`.
cv_results_ : dict or None
A dictionary containing the scores and thresholds computed during the
cross-validation process. Only exist if `store_cv_results=True`. The
keys are `"thresholds"` and `"scores"`.
classes_ : ndarray of shape (n_classes,)
The class labels.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
See Also
--------
sklearn.model_selection.FixedThresholdClassifier : Classifier that uses a
constant threshold.
sklearn.calibration.CalibratedClassifierCV : Estimator that calibrates
probabilities.
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.metrics import classification_report
>>> from sklearn.model_selection import TunedThresholdClassifierCV, train_test_split
>>> X, y = make_classification(
... n_samples=1_000, weights=[0.9, 0.1], class_sep=0.8, random_state=42
... )
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, stratify=y, random_state=42
... )
>>> classifier = RandomForestClassifier(random_state=0).fit(X_train, y_train)
>>> print(classification_report(y_test, classifier.predict(X_test)))
precision recall f1-score support
<BLANKLINE>
0 0.94 0.99 0.96 224
1 0.80 0.46 0.59 26
<BLANKLINE>
accuracy 0.93 250
macro avg 0.87 0.72 0.77 250
weighted avg 0.93 0.93 0.92 250
<BLANKLINE>
>>> classifier_tuned = TunedThresholdClassifierCV(
... classifier, scoring="balanced_accuracy"
... ).fit(X_train, y_train)
>>> print(
... f"Cut-off point found at {classifier_tuned.best_threshold_:.3f}"
... )
Cut-off point found at 0.342
>>> print(classification_report(y_test, classifier_tuned.predict(X_test)))
precision recall f1-score support
<BLANKLINE>
0 0.96 0.95 0.96 224
1 0.61 0.65 0.63 26
<BLANKLINE>
accuracy 0.92 250
macro avg 0.78 0.80 0.79 250
weighted avg 0.92 0.92 0.92 250
<BLANKLINE>
"""
_parameter_constraints: dict = {
**BaseThresholdClassifier._parameter_constraints,
"scoring": [
StrOptions(set(get_scorer_names())),
callable,
MutableMapping,
],
"thresholds": [Interval(Integral, 1, None, closed="left"), "array-like"],
"cv": [
"cv_object",
StrOptions({"prefit"}),
Interval(RealNotInt, 0.0, 1.0, closed="neither"),
],
"refit": ["boolean"],
"n_jobs": [Integral, None],
"random_state": ["random_state"],
"store_cv_results": ["boolean"],
}
def __init__(
self,
estimator,
*,
scoring="balanced_accuracy",
response_method="auto",
thresholds=100,
cv=None,
refit=True,
n_jobs=None,
random_state=None,
store_cv_results=False,
):
super().__init__(estimator=estimator, response_method=response_method)
self.scoring = scoring
self.thresholds = thresholds
self.cv = cv
self.refit = refit
self.n_jobs = n_jobs
self.random_state = random_state
self.store_cv_results = store_cv_results
def _fit(self, X, y, **params):
"""Fit the classifier and post-tune the decision threshold.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
**params : dict
Parameters to pass to the `fit` method of the underlying
classifier and to the `scoring` scorer.
Returns
-------
self : object
Returns an instance of self.
"""
if isinstance(self.cv, Real) and 0 < self.cv < 1:
cv = StratifiedShuffleSplit(
n_splits=1, test_size=self.cv, random_state=self.random_state
)
elif self.cv == "prefit":
if self.refit is True:
raise ValueError("When cv='prefit', refit cannot be True.")
try:
check_is_fitted(self.estimator, "classes_")
except NotFittedError as exc:
raise NotFittedError(
"""When cv='prefit', `estimator` must be fitted."""
) from exc
cv = self.cv
else:
cv = check_cv(self.cv, y=y, classifier=True)
if self.refit is False and cv.get_n_splits() > 1:
raise ValueError("When cv has several folds, refit cannot be False.")
routed_params = process_routing(self, "fit", **params)
self._curve_scorer = self._get_curve_scorer()
# in the following block, we:
# - define the final classifier `self.estimator_` and train it if necessary
# - define `classifier` to be used to post-tune the decision threshold
# - define `split` to be used to fit/score `classifier`
if cv == "prefit":
self.estimator_ = self.estimator
classifier = self.estimator_
splits = [(None, range(_num_samples(X)))]
else:
self.estimator_ = clone(self.estimator)
classifier = clone(self.estimator)
splits = cv.split(X, y, **routed_params.splitter.split)
if self.refit:
# train on the whole dataset
X_train, y_train, fit_params_train = X, y, routed_params.estimator.fit
else:
# single split cross-validation
train_idx, _ = next(cv.split(X, y, **routed_params.splitter.split))
X_train = _safe_indexing(X, train_idx)
y_train = _safe_indexing(y, train_idx)
fit_params_train = _check_method_params(
X, routed_params.estimator.fit, indices=train_idx
)
self.estimator_.fit(X_train, y_train, **fit_params_train)
cv_scores, cv_thresholds = zip(
*Parallel(n_jobs=self.n_jobs)(
delayed(_fit_and_score_over_thresholds)(
clone(classifier) if cv != "prefit" else classifier,
X,
y,
fit_params=routed_params.estimator.fit,
train_idx=train_idx,
val_idx=val_idx,
curve_scorer=self._curve_scorer,
score_params=routed_params.scorer.score,
)
for train_idx, val_idx in splits
)
)
if any(np.isclose(th[0], th[-1]) for th in cv_thresholds):
raise ValueError(
"The provided estimator makes constant predictions. Therefore, it is "
"impossible to optimize the decision threshold."
)
# find the global min and max thresholds across all folds
min_threshold = min(
split_thresholds.min() for split_thresholds in cv_thresholds
)
max_threshold = max(
split_thresholds.max() for split_thresholds in cv_thresholds
)
if isinstance(self.thresholds, Integral):
decision_thresholds = np.linspace(
min_threshold, max_threshold, num=self.thresholds
)
else:
decision_thresholds = np.asarray(self.thresholds)
objective_scores = _mean_interpolated_score(
decision_thresholds, cv_thresholds, cv_scores
)
best_idx = objective_scores.argmax()
self.best_score_ = objective_scores[best_idx]
self.best_threshold_ = decision_thresholds[best_idx]
if self.store_cv_results:
self.cv_results_ = {
"thresholds": decision_thresholds,
"scores": objective_scores,
}
return self
def predict(self, X):
"""Predict the target of new samples.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The samples, as accepted by `estimator.predict`.
Returns
-------
class_labels : ndarray of shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, "estimator_")
pos_label = self._curve_scorer._get_pos_label()
y_score, _ = _get_response_values_binary(
self.estimator_,
X,
self._get_response_method(),
pos_label=pos_label,
)
return _threshold_scores_to_class_labels(
y_score, self.best_threshold_, self.classes_, pos_label
)
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = (
MetadataRouter(owner=self)
.add(
estimator=self.estimator,
method_mapping=MethodMapping().add(callee="fit", caller="fit"),
)
.add(
splitter=self.cv,
method_mapping=MethodMapping().add(callee="split", caller="fit"),
)
.add(
scorer=self._get_curve_scorer(),
method_mapping=MethodMapping().add(callee="score", caller="fit"),
)
)
return router
def _get_curve_scorer(self):
"""Get the curve scorer based on the objective metric used."""
scoring = check_scoring(self.estimator, scoring=self.scoring)
curve_scorer = _CurveScorer.from_scorer(
scoring, self._get_response_method(), self.thresholds
)
return curve_scorer
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/model_selection/_search_successive_halving.py | sklearn/model_selection/_search_successive_halving.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from abc import abstractmethod
from math import ceil, floor, log
from numbers import Integral, Real
import numpy as np
from sklearn.base import _fit_context, is_classifier
from sklearn.metrics._scorer import get_scorer_names
from sklearn.model_selection import ParameterGrid, ParameterSampler
from sklearn.model_selection._search import BaseSearchCV
from sklearn.model_selection._split import _yields_constant_splits, check_cv
from sklearn.utils import resample
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import _num_samples, validate_data
__all__ = ["HalvingGridSearchCV", "HalvingRandomSearchCV"]
class _SubsampleMetaSplitter:
"""Splitter that subsamples a given fraction of the dataset"""
def __init__(self, *, base_cv, fraction, subsample_test, random_state):
self.base_cv = base_cv
self.fraction = fraction
self.subsample_test = subsample_test
self.random_state = random_state
def split(self, X, y, **kwargs):
for train_idx, test_idx in self.base_cv.split(X, y, **kwargs):
train_idx = resample(
train_idx,
replace=False,
random_state=self.random_state,
n_samples=int(self.fraction * len(train_idx)),
)
if self.subsample_test:
test_idx = resample(
test_idx,
replace=False,
random_state=self.random_state,
n_samples=int(self.fraction * len(test_idx)),
)
yield train_idx, test_idx
def _top_k(results, k, itr):
# Return the best candidates of a given iteration
iteration, mean_test_score, params = (
np.asarray(a)
for a in (results["iter"], results["mean_test_score"], results["params"])
)
iter_indices = np.flatnonzero(iteration == itr)
scores = mean_test_score[iter_indices]
# argsort() places NaNs at the end of the array so we move NaNs to the
# front of the array so the last `k` items are the those with the
# highest scores.
sorted_indices = np.roll(np.argsort(scores), np.count_nonzero(np.isnan(scores)))
return np.array(params[iter_indices][sorted_indices[-k:]])
class BaseSuccessiveHalving(BaseSearchCV):
"""Implements successive halving.
Ref:
Almost optimal exploration in multi-armed bandits, ICML 13
Zohar Karnin, Tomer Koren, Oren Somekh
"""
_parameter_constraints: dict = {
**BaseSearchCV._parameter_constraints,
# overwrite `scoring` since multi-metrics are not supported
"scoring": [StrOptions(set(get_scorer_names())), callable, None],
"random_state": ["random_state"],
"max_resources": [
Interval(Integral, 0, None, closed="neither"),
StrOptions({"auto"}),
],
"min_resources": [
Interval(Integral, 0, None, closed="neither"),
StrOptions({"exhaust", "smallest"}),
],
"resource": [str],
"factor": [Interval(Real, 0, None, closed="neither")],
"aggressive_elimination": ["boolean"],
}
_parameter_constraints.pop("pre_dispatch") # not used in this class
def __init__(
self,
estimator,
*,
scoring=None,
n_jobs=None,
refit=True,
cv=5,
verbose=0,
random_state=None,
error_score=np.nan,
return_train_score=True,
max_resources="auto",
min_resources="exhaust",
resource="n_samples",
factor=3,
aggressive_elimination=False,
):
super().__init__(
estimator,
scoring=scoring,
n_jobs=n_jobs,
refit=refit,
cv=cv,
verbose=verbose,
error_score=error_score,
return_train_score=return_train_score,
)
self.random_state = random_state
self.max_resources = max_resources
self.resource = resource
self.factor = factor
self.min_resources = min_resources
self.aggressive_elimination = aggressive_elimination
def _check_input_parameters(self, X, y, split_params):
# We need to enforce that successive calls to cv.split() yield the same
# splits: see https://github.com/scikit-learn/scikit-learn/issues/15149
if not _yields_constant_splits(self._checked_cv_orig):
raise ValueError(
"The cv parameter must yield consistent folds across "
"calls to split(). Set its random_state to an int, or set "
"shuffle=False."
)
if (
self.resource != "n_samples"
and self.resource not in self.estimator.get_params()
):
raise ValueError(
f"Cannot use resource={self.resource} which is not supported "
f"by estimator {self.estimator.__class__.__name__}"
)
if isinstance(self, HalvingRandomSearchCV):
if self.min_resources == self.n_candidates == "exhaust":
# for n_candidates=exhaust to work, we need to know what
# min_resources is. Similarly min_resources=exhaust needs to
# know the actual number of candidates.
raise ValueError(
"n_candidates and min_resources cannot be both set to 'exhaust'."
)
self.min_resources_ = self.min_resources
if self.min_resources_ in ("smallest", "exhaust"):
if self.resource == "n_samples":
n_splits = self._checked_cv_orig.get_n_splits(X, y, **split_params)
# please see https://gph.is/1KjihQe for a justification
magic_factor = 2
self.min_resources_ = n_splits * magic_factor
if is_classifier(self.estimator):
y = validate_data(self, X="no_validation", y=y)
check_classification_targets(y)
n_classes = np.unique(y).shape[0]
self.min_resources_ *= n_classes
else:
self.min_resources_ = 1
# if 'exhaust', min_resources_ might be set to a higher value later
# in _run_search
self.max_resources_ = self.max_resources
if self.max_resources_ == "auto":
if not self.resource == "n_samples":
raise ValueError(
"resource can only be 'n_samples' when max_resources='auto'"
)
self.max_resources_ = _num_samples(X)
if self.min_resources_ > self.max_resources_:
raise ValueError(
f"min_resources_={self.min_resources_} is greater "
f"than max_resources_={self.max_resources_}."
)
if self.min_resources_ == 0:
raise ValueError(
f"min_resources_={self.min_resources_}: you might have passed "
"an empty dataset X."
)
@staticmethod
def _select_best_index(refit, refit_metric, results):
"""Custom refit callable to return the index of the best candidate.
We want the best candidate out of the last iteration. By default
BaseSearchCV would return the best candidate out of all iterations.
Currently, we only support for a single metric thus `refit` and
`refit_metric` are not required.
"""
last_iter = np.max(results["iter"])
last_iter_indices = np.flatnonzero(results["iter"] == last_iter)
test_scores = results["mean_test_score"][last_iter_indices]
# If all scores are NaNs there is no way to pick between them,
# so we (arbitrarily) declare the zero'th entry the best one
if np.isnan(test_scores).all():
best_idx = 0
else:
best_idx = np.nanargmax(test_scores)
return last_iter_indices[best_idx]
@_fit_context(
# Halving*SearchCV.estimator is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None, **params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like, shape (n_samples,) or (n_samples, n_output), optional
Target relative to X for classification or regression;
None for unsupervised learning.
**params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator.
Returns
-------
self : object
Instance of fitted estimator.
"""
self._checked_cv_orig = check_cv(
self.cv, y, classifier=is_classifier(self.estimator)
)
routed_params = self._get_routed_params_for_fit(params)
self._check_input_parameters(
X=X, y=y, split_params=routed_params.splitter.split
)
self._n_samples_orig = _num_samples(X)
super().fit(X, y=y, **params)
# Set best_score_: BaseSearchCV does not set it, as refit is a callable
self.best_score_ = self.cv_results_["mean_test_score"][self.best_index_]
return self
def _run_search(self, evaluate_candidates):
candidate_params = self._generate_candidate_params()
if self.resource != "n_samples" and any(
self.resource in candidate for candidate in candidate_params
):
# Can only check this now since we need the candidates list
raise ValueError(
f"Cannot use parameter {self.resource} as the resource since "
"it is part of the searched parameters."
)
# n_required_iterations is the number of iterations needed so that the
# last iterations evaluates less than `factor` candidates.
n_required_iterations = 1 + floor(log(len(candidate_params), self.factor))
if self.min_resources == "exhaust":
# To exhaust the resources, we want to start with the biggest
# min_resources possible so that the last (required) iteration
# uses as many resources as possible
last_iteration = n_required_iterations - 1
self.min_resources_ = max(
self.min_resources_,
self.max_resources_ // self.factor**last_iteration,
)
# n_possible_iterations is the number of iterations that we can
# actually do starting from min_resources and without exceeding
# max_resources. Depending on max_resources and the number of
# candidates, this may be higher or smaller than
# n_required_iterations.
n_possible_iterations = 1 + floor(
log(self.max_resources_ // self.min_resources_, self.factor)
)
if self.aggressive_elimination:
n_iterations = n_required_iterations
else:
n_iterations = min(n_possible_iterations, n_required_iterations)
if self.verbose:
print(f"n_iterations: {n_iterations}")
print(f"n_required_iterations: {n_required_iterations}")
print(f"n_possible_iterations: {n_possible_iterations}")
print(f"min_resources_: {self.min_resources_}")
print(f"max_resources_: {self.max_resources_}")
print(f"aggressive_elimination: {self.aggressive_elimination}")
print(f"factor: {self.factor}")
self.n_resources_ = []
self.n_candidates_ = []
for itr in range(n_iterations):
power = itr # default
if self.aggressive_elimination:
# this will set n_resources to the initial value (i.e. the
# value of n_resources at the first iteration) for as many
# iterations as needed (while candidates are being
# eliminated), and then go on as usual.
power = max(0, itr - n_required_iterations + n_possible_iterations)
n_resources = int(self.factor**power * self.min_resources_)
# guard, probably not needed
n_resources = min(n_resources, self.max_resources_)
self.n_resources_.append(n_resources)
n_candidates = len(candidate_params)
self.n_candidates_.append(n_candidates)
if self.verbose:
print("-" * 10)
print(f"iter: {itr}")
print(f"n_candidates: {n_candidates}")
print(f"n_resources: {n_resources}")
if self.resource == "n_samples":
# subsampling will be done in cv.split()
cv = _SubsampleMetaSplitter(
base_cv=self._checked_cv_orig,
fraction=n_resources / self._n_samples_orig,
subsample_test=True,
random_state=self.random_state,
)
else:
# Need copy so that the n_resources of next iteration does
# not overwrite
candidate_params = [c.copy() for c in candidate_params]
for candidate in candidate_params:
candidate[self.resource] = n_resources
cv = self._checked_cv_orig
more_results = {
"iter": [itr] * n_candidates,
"n_resources": [n_resources] * n_candidates,
}
results = evaluate_candidates(
candidate_params, cv, more_results=more_results
)
n_candidates_to_keep = ceil(n_candidates / self.factor)
candidate_params = _top_k(results, n_candidates_to_keep, itr)
self.n_remaining_candidates_ = len(candidate_params)
self.n_required_iterations_ = n_required_iterations
self.n_possible_iterations_ = n_possible_iterations
self.n_iterations_ = n_iterations
@abstractmethod
def _generate_candidate_params(self):
pass
class HalvingGridSearchCV(BaseSuccessiveHalving):
"""Search over specified parameter values with successive halving.
The search strategy starts evaluating all the candidates with a small
amount of resources and iteratively selects the best candidates, using
more and more resources.
Read more in the :ref:`User guide <successive_halving_user_guide>`.
.. note::
This estimator is still **experimental** for now: the predictions
and the API might change without any deprecation cycle. To use it,
you need to explicitly import ``enable_halving_search_cv``::
>>> # explicitly require this experimental feature
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> # now you can import normally from model_selection
>>> from sklearn.model_selection import HalvingGridSearchCV
Parameters
----------
estimator : estimator object
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
factor : int or float, default=3
The 'halving' parameter, which determines the proportion of candidates
that are selected for each subsequent iteration. For example,
``factor=3`` means that only one third of the candidates are selected.
resource : ``'n_samples'`` or str, default='n_samples'
Defines the resource that increases with each iteration. By default,
the resource is the number of samples. It can also be set to any
parameter of the base estimator that accepts positive integer
values, e.g. 'n_iterations' or 'n_estimators' for a gradient
boosting estimator. In this case ``max_resources`` cannot be 'auto'
and must be set explicitly.
max_resources : int, default='auto'
The maximum amount of resource that any candidate is allowed to use
for a given iteration. By default, this is set to ``n_samples`` when
``resource='n_samples'`` (default), else an error is raised.
min_resources : {'exhaust', 'smallest'} or int, default='exhaust'
The minimum amount of resource that any candidate is allowed to use
for a given iteration. Equivalently, this defines the amount of
resources `r0` that are allocated for each candidate at the first
iteration.
- 'smallest' is a heuristic that sets `r0` to a small value:
- ``n_splits * 2`` when ``resource='n_samples'`` for a regression problem
- ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a
classification problem
- ``1`` when ``resource != 'n_samples'``
- 'exhaust' will set `r0` such that the **last** iteration uses as
much resources as possible. Namely, the last iteration will use the
highest value smaller than ``max_resources`` that is a multiple of
both ``min_resources`` and ``factor``. In general, using 'exhaust'
leads to a more accurate estimator, but is slightly more time
consuming.
Note that the amount of resources used at each iteration is always a
multiple of ``min_resources``.
aggressive_elimination : bool, default=False
This is only relevant in cases where there isn't enough resources to
reduce the remaining candidates to at most `factor` after the last
iteration. If ``True``, then the search process will 'replay' the
first iteration for as long as needed until the number of candidates
is small enough. This is ``False`` by default, which means that the
last iteration may evaluate more than ``factor`` candidates. See
:ref:`aggressive_elimination` for more details.
cv : int, cross-validation generator or iterable, default=5
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. note::
Due to implementation details, the folds produced by `cv` must be
the same across multiple calls to `cv.split()`. For
built-in `scikit-learn` iterators, this can be achieved by
deactivating shuffling (`shuffle=False`), or by setting the
`cv`'s `random_state` parameter to an integer.
scoring : str or callable, default=None
Scoring method to use to evaluate the predictions on the test set.
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``. See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
refit : bool or callable, default=True
Refit an estimator using the best found parameters on the whole
dataset.
Where there are considerations other than maximum score in
choosing a best estimator, ``refit`` can be set to a function which
returns the selected ``best_index_`` given ``cv_results_``. In that
case, the ``best_estimator_`` and ``best_params_`` will be set
according to the returned ``best_index_`` while the ``best_score_``
attribute will not be available.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``HalvingGridSearchCV`` instance.
See :ref:`this example
<sphx_glr_auto_examples_model_selection_plot_grid_search_refit_callable.py>`
for an example of how to use ``refit=callable`` to balance model
complexity and cross-validated score.
error_score : 'raise' or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error. Default is ``np.nan``.
return_train_score : bool, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
random_state : int, RandomState instance or None, default=None
Pseudo random number generator state used for subsampling the dataset
when `resources != 'n_samples'`. Ignored otherwise.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
n_jobs : int or None, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int
Controls the verbosity: the higher, the more messages.
Attributes
----------
n_resources_ : list of int
The amount of resources used at each iteration.
n_candidates_ : list of int
The number of candidate parameters that were evaluated at each
iteration.
n_remaining_candidates_ : int
The number of candidate parameters that are left after the last
iteration. It corresponds to `ceil(n_candidates[-1] / factor)`
max_resources_ : int
The maximum number of resources that any candidate is allowed to use
for a given iteration. Note that since the number of resources used
at each iteration must be a multiple of ``min_resources_``, the
actual number of resources used at the last iteration may be smaller
than ``max_resources_``.
min_resources_ : int
The amount of resources that are allocated for each candidate at the
first iteration.
n_iterations_ : int
The actual number of iterations that were run. This is equal to
``n_required_iterations_`` if ``aggressive_elimination`` is ``True``.
Else, this is equal to ``min(n_possible_iterations_,
n_required_iterations_)``.
n_possible_iterations_ : int
The number of iterations that are possible starting with
``min_resources_`` resources and without exceeding
``max_resources_``.
n_required_iterations_ : int
The number of iterations that are required to end up with less than
``factor`` candidates at the last iteration, starting with
``min_resources_`` resources. This will be smaller than
``n_possible_iterations_`` when there isn't enough resources.
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``. It contains lots of information
for analysing the results of a search.
Please refer to the :ref:`User guide<successive_halving_cv_results>`
for details.
For an example of analysing ``cv_results_``,
see :ref:`sphx_glr_auto_examples_model_selection_plot_grid_search_stats.py`.
best_estimator_ : estimator or dict
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
best_score_ : float
Mean cross-validated score of the best_estimator.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
multimetric_ : bool
Whether or not the scorers compute several metrics.
classes_ : ndarray of shape (n_classes,)
The classes labels. This is present only if ``refit`` is specified and
the underlying estimator is a classifier.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if
`best_estimator_` is defined (see the documentation for the `refit`
parameter for more details) and that `best_estimator_` exposes
`n_features_in_` when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if
`best_estimator_` is defined (see the documentation for the `refit`
parameter for more details) and that `best_estimator_` exposes
`feature_names_in_` when fit.
.. versionadded:: 1.0
See Also
--------
:class:`HalvingRandomSearchCV`:
Random search over a set of parameters using successive halving.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
All parameter combinations scored with a NaN will share the lowest rank.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> from sklearn.model_selection import HalvingGridSearchCV
...
>>> X, y = load_iris(return_X_y=True)
>>> clf = RandomForestClassifier(random_state=0)
...
>>> param_grid = {"max_depth": [3, None],
... "min_samples_split": [5, 10]}
>>> search = HalvingGridSearchCV(clf, param_grid, resource='n_estimators',
... max_resources=10,
... random_state=0).fit(X, y)
>>> search.best_params_ # doctest: +SKIP
{'max_depth': None, 'min_samples_split': 10, 'n_estimators': 9}
"""
_parameter_constraints: dict = {
**BaseSuccessiveHalving._parameter_constraints,
"param_grid": [dict, list],
}
def __init__(
self,
estimator,
param_grid,
*,
factor=3,
resource="n_samples",
max_resources="auto",
min_resources="exhaust",
aggressive_elimination=False,
cv=5,
scoring=None,
refit=True,
error_score=np.nan,
return_train_score=True,
random_state=None,
n_jobs=None,
verbose=0,
):
super().__init__(
estimator,
scoring=scoring,
n_jobs=n_jobs,
refit=refit,
verbose=verbose,
cv=cv,
random_state=random_state,
error_score=error_score,
return_train_score=return_train_score,
max_resources=max_resources,
resource=resource,
factor=factor,
min_resources=min_resources,
aggressive_elimination=aggressive_elimination,
)
self.param_grid = param_grid
def _generate_candidate_params(self):
return ParameterGrid(self.param_grid)
class HalvingRandomSearchCV(BaseSuccessiveHalving):
"""Randomized search on hyper parameters.
The search strategy starts evaluating all the candidates with a small
amount of resources and iteratively selects the best candidates, using more
and more resources.
The candidates are sampled at random from the parameter space and the
number of sampled candidates is determined by ``n_candidates``.
Read more in the :ref:`User guide<successive_halving_user_guide>`.
.. note::
This estimator is still **experimental** for now: the predictions
and the API might change without any deprecation cycle. To use it,
you need to explicitly import ``enable_halving_search_cv``::
>>> # explicitly require this experimental feature
>>> from sklearn.experimental import enable_halving_search_cv # noqa
>>> # now you can import normally from model_selection
>>> from sklearn.model_selection import HalvingRandomSearchCV
Parameters
----------
estimator : estimator object
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict or list of dicts
Dictionary with parameters names (`str`) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
If a list of dicts is given, first a dict is sampled uniformly, and
then a parameter is sampled using that dict as above.
n_candidates : "exhaust" or int, default="exhaust"
The number of candidate parameters to sample, at the first
iteration. Using 'exhaust' will sample enough candidates so that the
last iteration uses as many resources as possible, based on
`min_resources`, `max_resources` and `factor`. In this case,
`min_resources` cannot be 'exhaust'.
factor : int or float, default=3
The 'halving' parameter, which determines the proportion of candidates
that are selected for each subsequent iteration. For example,
``factor=3`` means that only one third of the candidates are selected.
resource : ``'n_samples'`` or str, default='n_samples'
Defines the resource that increases with each iteration. By default,
the resource is the number of samples. It can also be set to any
parameter of the base estimator that accepts positive integer
values, e.g. 'n_iterations' or 'n_estimators' for a gradient
boosting estimator. In this case ``max_resources`` cannot be 'auto'
and must be set explicitly.
max_resources : int, default='auto'
The maximum number of resources that any candidate is allowed to use
for a given iteration. By default, this is set ``n_samples`` when
``resource='n_samples'`` (default), else an error is raised.
min_resources : {'exhaust', 'smallest'} or int, default='smallest'
The minimum amount of resource that any candidate is allowed to use
for a given iteration. Equivalently, this defines the amount of
resources `r0` that are allocated for each candidate at the first
iteration.
- 'smallest' is a heuristic that sets `r0` to a small value:
- ``n_splits * 2`` when ``resource='n_samples'`` for a regression problem
- ``n_classes * n_splits * 2`` when ``resource='n_samples'`` for a
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/model_selection/_search.py | sklearn/model_selection/_search.py | """
The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the
parameters of an estimator.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numbers
import operator
import time
import warnings
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from collections.abc import Iterable, Mapping, Sequence
from copy import deepcopy
from functools import partial, reduce
from inspect import signature
from itertools import product
import numpy as np
from numpy.ma import MaskedArray
from scipy.stats import rankdata
from sklearn.base import (
BaseEstimator,
MetaEstimatorMixin,
_fit_context,
clone,
is_classifier,
)
from sklearn.exceptions import NotFittedError
from sklearn.metrics import check_scoring
from sklearn.metrics._scorer import (
_check_multimetric_scoring,
_MultimetricScorer,
get_scorer_names,
)
from sklearn.model_selection._split import check_cv
from sklearn.model_selection._validation import (
_aggregate_score_dicts,
_fit_and_score,
_insert_error_scores,
_normalize_score_results,
_warn_or_raise_about_fit_failures,
)
from sklearn.utils import Bunch, check_random_state
from sklearn.utils._array_api import xpx
from sklearn.utils._param_validation import HasMethods, Interval, StrOptions
from sklearn.utils._repr_html.estimator import _VisualBlock
from sklearn.utils._tags import get_tags
from sklearn.utils.metadata_routing import (
MetadataRouter,
MethodMapping,
_raise_for_params,
_routing_enabled,
process_routing,
)
from sklearn.utils.metaestimators import available_if
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.validation import _check_method_params, check_is_fitted, indexable
__all__ = ["GridSearchCV", "ParameterGrid", "ParameterSampler", "RandomizedSearchCV"]
class ParameterGrid:
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
The order of the generated parameter combinations is deterministic.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of str to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.model_selection import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See Also
--------
GridSearchCV : Uses :class:`ParameterGrid` to perform a full parallelized
parameter search.
"""
def __init__(self, param_grid):
if not isinstance(param_grid, (Mapping, Iterable)):
raise TypeError(
f"Parameter grid should be a dict or a list, got: {param_grid!r} of"
f" type {type(param_grid).__name__}"
)
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
# check if all entries are dictionaries of lists
for grid in param_grid:
if not isinstance(grid, dict):
raise TypeError(f"Parameter grid is not a dict ({grid!r})")
for key, value in grid.items():
if isinstance(value, np.ndarray) and value.ndim > 1:
raise ValueError(
f"Parameter array for {key!r} should be one-dimensional, got:"
f" {value!r} with shape {value.shape}"
)
if isinstance(value, str) or not isinstance(
value, (np.ndarray, Sequence)
):
raise TypeError(
f"Parameter grid for parameter {key!r} needs to be a list or a"
f" numpy array, but got {value!r} (of type "
f"{type(value).__name__}) instead. Single values "
"need to be wrapped in a list with one element."
)
if len(value) == 0:
raise ValueError(
f"Parameter grid for parameter {key!r} need "
f"to be a non-empty sequence, got: {value!r}"
)
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of str to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.prod can't).
product = partial(reduce, operator.mul)
return sum(
product(len(v) for v in p.values()) if p else 1 for p in self.param_grid
)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of str to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.prod(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError("ParameterGrid index out of range")
class ParameterSampler:
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary with parameters names (`str`) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
If a list of dicts is given, first a dict is sampled uniformly, and
then a parameter is sampled using that dict as above.
n_iter : int
Number of parameter settings that are produced.
random_state : int, RandomState instance or None, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
Returns
-------
params : dict of str to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.model_selection import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> rng = np.random.RandomState(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4,
... random_state=rng))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, *, random_state=None):
if not isinstance(param_distributions, (Mapping, Iterable)):
raise TypeError(
"Parameter distribution is not a dict or a list,"
f" got: {param_distributions!r} of type "
f"{type(param_distributions).__name__}"
)
if isinstance(param_distributions, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_distributions = [param_distributions]
for dist in param_distributions:
if not isinstance(dist, dict):
raise TypeError(
"Parameter distribution is not a dict ({!r})".format(dist)
)
for key in dist:
if not isinstance(dist[key], Iterable) and not hasattr(
dist[key], "rvs"
):
raise TypeError(
f"Parameter grid for parameter {key!r} is not iterable "
f"or a distribution (value={dist[key]})"
)
self.n_iter = n_iter
self.random_state = random_state
self.param_distributions = param_distributions
def _is_all_lists(self):
return all(
all(not hasattr(v, "rvs") for v in dist.values())
for dist in self.param_distributions
)
def __iter__(self):
rng = check_random_state(self.random_state)
# if all distributions are given as lists, we want to sample without
# replacement
if self._is_all_lists():
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
n_iter = self.n_iter
if grid_size < n_iter:
warnings.warn(
"The total space of parameters %d is smaller "
"than n_iter=%d. Running %d iterations. For exhaustive "
"searches, use GridSearchCV." % (grid_size, self.n_iter, grid_size),
UserWarning,
)
n_iter = grid_size
for i in sample_without_replacement(grid_size, n_iter, random_state=rng):
yield param_grid[i]
else:
for _ in range(self.n_iter):
dist = rng.choice(self.param_distributions)
# Always sort the keys of a dictionary, for reproducibility
items = sorted(dist.items())
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs(random_state=rng)
else:
params[k] = v[rng.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
if self._is_all_lists():
grid_size = len(ParameterGrid(self.param_distributions))
return min(self.n_iter, grid_size)
else:
return self.n_iter
def _check_refit(search_cv, attr):
if not search_cv.refit:
raise AttributeError(
f"This {type(search_cv).__name__} instance was initialized with "
f"`refit=False`. {attr} is available only after refitting on the best "
"parameters. You can refit an estimator manually using the "
"`best_params_` attribute"
)
def _search_estimator_has(attr):
"""Check if we can delegate a method to the underlying estimator.
Calling a prediction method will only be available if `refit=True`. In
such case, we check first the fitted best estimator. If it is not
fitted, we check the unfitted estimator.
Checking the unfitted estimator allows to use `hasattr` on the `SearchCV`
instance even before calling `fit`.
"""
def check(self):
_check_refit(self, attr)
if hasattr(self, "best_estimator_"):
# raise an AttributeError if `attr` does not exist
getattr(self.best_estimator_, attr)
return True
# raise an AttributeError if `attr` does not exist
getattr(self.estimator, attr)
return True
return check
def _yield_masked_array_for_each_param(candidate_params):
"""
Yield a masked array for each candidate param.
`candidate_params` is a sequence of params which were used in
a `GridSearchCV`. We use masked arrays for the results, as not
all params are necessarily present in each element of
`candidate_params`. For example, if using `GridSearchCV` with
a `SVC` model, then one might search over params like:
- kernel=["rbf"], gamma=[0.1, 1]
- kernel=["poly"], degree=[1, 2]
and then param `'gamma'` would not be present in entries of
`candidate_params` corresponding to `kernel='poly'`.
"""
n_candidates = len(candidate_params)
param_results = defaultdict(dict)
for cand_idx, params in enumerate(candidate_params):
for name, value in params.items():
param_results["param_%s" % name][cand_idx] = value
for key, param_result in param_results.items():
param_list = list(param_result.values())
try:
arr = np.array(param_list)
except ValueError:
# This can happen when param_list contains lists of different
# lengths, for example:
# param_list=[[1], [2, 3]]
arr_dtype = np.dtype(object)
else:
# There are two cases when we don't use the automatically inferred
# dtype when creating the array and we use object instead:
# - string dtype
# - when array.ndim > 1, that means that param_list was something
# like a list of same-size sequences, which gets turned into a
# multi-dimensional array but we want a 1d array
arr_dtype = arr.dtype if arr.dtype.kind != "U" and arr.ndim == 1 else object
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate (which may not contain all the params).
ma = MaskedArray(np.empty(n_candidates, dtype=arr_dtype), mask=True)
for index, value in param_result.items():
# Setting the value at an index unmasks that index
ma[index] = value
yield (key, ma)
class BaseSearchCV(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta):
"""Abstract base class for hyper parameter search with cross-validation."""
_parameter_constraints: dict = {
"estimator": [HasMethods(["fit"])],
"scoring": [
StrOptions(set(get_scorer_names())),
callable,
list,
tuple,
dict,
None,
],
"n_jobs": [numbers.Integral, None],
"refit": ["boolean", str, callable],
"cv": ["cv_object"],
"verbose": ["verbose"],
"pre_dispatch": [numbers.Integral, str],
"error_score": [StrOptions({"raise"}), numbers.Real],
"return_train_score": ["boolean"],
}
@abstractmethod
def __init__(
self,
estimator,
*,
scoring=None,
n_jobs=None,
refit=True,
cv=None,
verbose=0,
pre_dispatch="2*n_jobs",
error_score=np.nan,
return_train_score=True,
):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
self.return_train_score = return_train_score
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
sub_estimator_tags = get_tags(self.estimator)
tags.estimator_type = sub_estimator_tags.estimator_type
tags.classifier_tags = deepcopy(sub_estimator_tags.classifier_tags)
tags.regressor_tags = deepcopy(sub_estimator_tags.regressor_tags)
# allows cross-validation to see 'precomputed' metrics
tags.input_tags.pairwise = sub_estimator_tags.input_tags.pairwise
tags.input_tags.sparse = sub_estimator_tags.input_tags.sparse
tags.array_api_support = sub_estimator_tags.array_api_support
return tags
def score(self, X, y=None, **params):
"""Return the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples, n_output) \
or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
**params : dict
Parameters to be passed to the underlying scorer(s).
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
score : float
The score defined by ``scoring`` if provided, and the
``best_estimator_.score`` method otherwise.
"""
_check_refit(self, "score")
check_is_fitted(self)
_raise_for_params(params, self, "score")
if _routing_enabled():
score_params = process_routing(self, "score", **params).scorer["score"]
else:
score_params = dict()
if self.scorer_ is None:
raise ValueError(
"No score function explicitly defined, "
"and the estimator doesn't provide one %s" % self.best_estimator_
)
if isinstance(self.scorer_, dict):
if self.multimetric_:
scorer = self.scorer_[self.refit]
else:
scorer = self.scorer_
return scorer(self.best_estimator_, X, y, **score_params)
# callable
score = self.scorer_(self.best_estimator_, X, y, **score_params)
if self.multimetric_:
score = score[self.refit]
return score
@available_if(_search_estimator_has("score_samples"))
def score_samples(self, X):
"""Call score_samples on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``score_samples``.
.. versionadded:: 0.24
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements
of the underlying estimator.
Returns
-------
y_score : ndarray of shape (n_samples,)
The ``best_estimator_.score_samples`` method.
"""
check_is_fitted(self)
return self.best_estimator_.score_samples(X)
@available_if(_search_estimator_has("predict"))
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
Returns
-------
y_pred : ndarray of shape (n_samples,)
The predicted labels or values for `X` based on the estimator with
the best found parameters.
"""
check_is_fitted(self)
return self.best_estimator_.predict(X)
@available_if(_search_estimator_has("predict_proba"))
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_classes)
Predicted class probabilities for `X` based on the estimator with
the best found parameters. The order of the classes corresponds
to that in the fitted attribute :term:`classes_`.
"""
check_is_fitted(self)
return self.best_estimator_.predict_proba(X)
@available_if(_search_estimator_has("predict_log_proba"))
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_classes)
Predicted class log-probabilities for `X` based on the estimator
with the best found parameters. The order of the classes
corresponds to that in the fitted attribute :term:`classes_`.
"""
check_is_fitted(self)
return self.best_estimator_.predict_log_proba(X)
@available_if(_search_estimator_has("decision_function"))
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
Returns
-------
y_score : ndarray of shape (n_samples,) or (n_samples, n_classes) \
or (n_samples, n_classes * (n_classes-1) / 2)
Result of the decision function for `X` based on the estimator with
the best found parameters.
"""
check_is_fitted(self)
return self.best_estimator_.decision_function(X)
@available_if(_search_estimator_has("transform"))
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features)
`X` transformed in the new space based on the estimator with
the best found parameters.
"""
check_is_fitted(self)
return self.best_estimator_.transform(X)
@available_if(_search_estimator_has("inverse_transform"))
def inverse_transform(self, X):
"""Call inverse_transform on the estimator with the best found params.
Only available if the underlying estimator implements
``inverse_transform`` and ``refit=True``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
Returns
-------
X_original : {ndarray, sparse matrix} of shape (n_samples, n_features)
Result of the `inverse_transform` function for `X` based on the
estimator with the best found parameters.
"""
check_is_fitted(self)
return self.best_estimator_.inverse_transform(X)
@property
def n_features_in_(self):
"""Number of features seen during :term:`fit`.
Only available when `refit=True`.
"""
# For consistency with other estimators we raise an AttributeError so
# that hasattr() fails if the search estimator isn't fitted.
try:
check_is_fitted(self)
except NotFittedError as nfe:
raise AttributeError(
"{} object has no n_features_in_ attribute.".format(
self.__class__.__name__
)
) from nfe
return self.best_estimator_.n_features_in_
@property
def classes_(self):
"""Class labels.
Only available when `refit=True` and the estimator is a classifier.
"""
_search_estimator_has("classes_")(self)
return self.best_estimator_.classes_
def _run_search(self, evaluate_candidates):
"""Repeatedly calls `evaluate_candidates` to conduct a search.
This method, implemented in sub-classes, makes it possible to
customize the scheduling of evaluations: GridSearchCV and
RandomizedSearchCV schedule evaluations for their whole parameter
search space at once but other more sequential approaches are also
possible: for instance is possible to iteratively schedule evaluations
for new regions of the parameter search space based on previously
collected evaluation results. This makes it possible to implement
Bayesian optimization or more generally sequential model-based
optimization by deriving from the BaseSearchCV abstract base class.
For example, Successive Halving is implemented by calling
`evaluate_candidates` multiples times (once per iteration of the SH
process), each time passing a different set of candidates with `X`
and `y` of increasing sizes.
Parameters
----------
evaluate_candidates : callable
This callback accepts:
- a list of candidates, where each candidate is a dict of
parameter settings.
- an optional `cv` parameter which can be used to e.g.
evaluate candidates on different dataset splits, or
evaluate candidates on subsampled data (as done in the
Successive Halving estimators). By default, the original
`cv` parameter is used, and it is available as a private
`_checked_cv_orig` attribute.
- an optional `more_results` dict. Each key will be added to
the `cv_results_` attribute. Values should be lists of
length `n_candidates`
It returns a dict of all results so far, formatted like
``cv_results_``.
Important note (relevant whether the default cv is used or not):
in randomized splitters, and unless the random_state parameter of
cv was set to an int, calling cv.split() multiple times will
yield different splits. Since cv.split() is called in
evaluate_candidates, this means that candidates will be evaluated
on different splits each time evaluate_candidates is called. This
might be a methodological issue depending on the search strategy
that you're implementing. To prevent randomized splitters from
being used, you may use _split._yields_constant_splits()
Examples
--------
::
def _run_search(self, evaluate_candidates):
'Try C=0.1 only if C=1 is better than C=10'
all_results = evaluate_candidates([{'C': 1}, {'C': 10}])
score = all_results['mean_test_score']
if score[0] < score[1]:
evaluate_candidates([{'C': 0.1}])
"""
raise NotImplementedError("_run_search not implemented.")
def _check_refit_for_multimetric(self, scores):
"""Check `refit` is compatible with `scores` is valid"""
multimetric_refit_msg = (
"For multi-metric scoring, the parameter refit must be set to a "
"scorer key or a callable to refit an estimator with the best "
"parameter setting on the whole data and make the best_* "
"attributes available for that metric. If this is not needed, "
f"refit should be set to False explicitly. {self.refit!r} was "
"passed."
)
valid_refit_dict = isinstance(self.refit, str) and self.refit in scores
if (
self.refit is not False
and not valid_refit_dict
and not callable(self.refit)
):
raise ValueError(multimetric_refit_msg)
@staticmethod
def _select_best_index(refit, refit_metric, results):
"""Select index of the best combination of hyperparemeters."""
if callable(refit):
# If callable, refit is expected to return the index of the best
# parameter set.
best_index = refit(results)
if not isinstance(best_index, numbers.Integral):
raise TypeError("best_index_ returned is not an integer")
if best_index < 0 or best_index >= len(results["params"]):
raise IndexError("best_index_ index out of range")
else:
best_index = results[f"rank_test_{refit_metric}"].argmin()
return best_index
def _get_scorers(self):
"""Get the scorer(s) to be used.
This is used in ``fit`` and ``get_metadata_routing``.
Returns
-------
scorers, refit_metric
"""
refit_metric = "score"
if callable(self.scoring):
scorers = self.scoring
elif self.scoring is None or isinstance(self.scoring, str):
scorers = check_scoring(self.estimator, self.scoring)
else:
scorers = _check_multimetric_scoring(self.estimator, self.scoring)
self._check_refit_for_multimetric(scorers)
refit_metric = self.refit
scorers = _MultimetricScorer(
scorers=scorers, raise_exc=(self.error_score == "raise")
)
return scorers, refit_metric
def _check_scorers_accept_sample_weight(self):
# TODO(slep006): remove when metadata routing is the only way
scorers, _ = self._get_scorers()
# In the multimetric case, warn the user for each scorer separately
if isinstance(scorers, _MultimetricScorer):
for name, scorer in scorers._scorers.items():
if not scorer._accept_sample_weight():
warnings.warn(
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/model_selection/_validation.py | sklearn/model_selection/_validation.py | """
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numbers
import time
import warnings
from collections import Counter
from contextlib import suppress
from functools import partial
from numbers import Real
from traceback import format_exc
import numpy as np
import scipy.sparse as sp
from joblib import logger
from sklearn.base import clone, is_classifier
from sklearn.exceptions import FitFailedWarning, UnsetMetadataPassedError
from sklearn.metrics import check_scoring, get_scorer_names
from sklearn.metrics._scorer import _MultimetricScorer
from sklearn.model_selection._split import check_cv
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import Bunch, _safe_indexing, check_random_state, indexable
from sklearn.utils._array_api import (
_convert_to_numpy,
device,
get_namespace,
get_namespace_and_device,
move_to,
)
from sklearn.utils._param_validation import (
HasMethods,
Integral,
Interval,
StrOptions,
validate_params,
)
from sklearn.utils.metadata_routing import (
MetadataRouter,
MethodMapping,
_routing_enabled,
process_routing,
)
from sklearn.utils.metaestimators import _safe_split
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import _check_method_params, _num_samples
__all__ = [
"cross_val_predict",
"cross_val_score",
"cross_validate",
"learning_curve",
"permutation_test_score",
"validation_curve",
]
# TODO(SLEP6): To be removed when set_config(enable_metadata_routing=False) is not
# possible.
def _check_groups_routing_disabled(groups):
if groups is not None and _routing_enabled():
raise ValueError(
"`groups` can only be passed if metadata routing is not enabled via"
" `sklearn.set_config(enable_metadata_routing=True)`. When routing is"
" enabled, pass `groups` alongside other metadata via the `params` argument"
" instead."
)
@validate_params(
{
"estimator": [HasMethods("fit")],
"X": ["array-like", "sparse matrix"],
"y": ["array-like", None],
"groups": ["array-like", None],
"scoring": [
StrOptions(set(get_scorer_names())),
callable,
list,
tuple,
dict,
None,
],
"cv": ["cv_object"],
"n_jobs": [Integral, None],
"verbose": ["verbose"],
"params": [dict, None],
"pre_dispatch": [Integral, str],
"return_train_score": ["boolean"],
"return_estimator": ["boolean"],
"return_indices": ["boolean"],
"error_score": [StrOptions({"raise"}), Real],
},
prefer_skip_nested_validation=False, # estimator is not validated yet
)
def cross_validate(
estimator,
X,
y=None,
*,
groups=None,
scoring=None,
cv=None,
n_jobs=None,
verbose=0,
params=None,
pre_dispatch="2*n_jobs",
return_train_score=False,
return_estimator=False,
return_indices=False,
error_score=np.nan,
):
"""Evaluate metric(s) by cross-validation and also record fit/score times.
Read more in the :ref:`User Guide <multimetric_cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to fit. Can be for example a list, or an array.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), default=None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
.. versionchanged:: 1.4
``groups`` can only be passed if metadata routing is not enabled
via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
is enabled, pass ``groups`` alongside other metadata via the ``params``
argument instead. E.g.:
``cross_validate(..., params={'groups': groups})``.
scoring : str, callable, list, tuple, or dict, default=None
Strategy to evaluate the performance of the `estimator` across cross-validation
splits.
If `scoring` represents a single score, one can use:
- a single string (see :ref:`scoring_string_names`);
- a callable (see :ref:`scoring_callable`) that returns a single value.
- `None`, the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
If `scoring` represents multiple scores, one can use:
- a list or tuple of unique strings;
- a callable returning a dictionary where the keys are the metric
names and the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
See :ref:`multimetric_grid_search` for an example.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the cross-validation splits.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
The verbosity level.
params : dict, default=None
Parameters to pass to the underlying estimator's ``fit``, the scorer,
and the CV splitter.
.. versionadded:: 1.4
pre_dispatch : int or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- An int, giving the exact number of total jobs that are spawned
- A str, giving an expression as a function of n_jobs, as in '2*n_jobs'
return_train_score : bool, default=False
Whether to include train scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
.. versionadded:: 0.19
.. versionchanged:: 0.21
Default value was changed from ``True`` to ``False``
return_estimator : bool, default=False
Whether to return the estimators fitted on each split.
.. versionadded:: 0.20
return_indices : bool, default=False
Whether to return the train-test indices selected for each split.
.. versionadded:: 1.3
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
Returns
-------
scores : dict of float arrays of shape (n_splits,)
Array of scores of the estimator for each run of the cross validation.
A dict of arrays containing the score/time arrays for each scorer is
returned. The possible keys for this ``dict`` are:
``test_score``
The score array for test scores on each cv split.
Suffix ``_score`` in ``test_score`` changes to a specific
metric like ``test_r2`` or ``test_auc`` if there are
multiple scoring metrics in the scoring parameter.
``train_score``
The score array for train scores on each cv split.
Suffix ``_score`` in ``train_score`` changes to a specific
metric like ``train_r2`` or ``train_auc`` if there are
multiple scoring metrics in the scoring parameter.
This is available only if ``return_train_score`` parameter
is ``True``.
``fit_time``
The time for fitting the estimator on the train
set for each cv split.
``score_time``
The time for scoring the estimator on the test set for each
cv split. (Note: time for scoring on the train set is not
included even if ``return_train_score`` is set to ``True``).
``estimator``
The estimator objects for each cv split.
This is available only if ``return_estimator`` parameter
is set to ``True``.
``indices``
The train/test positional indices for each cv split. A dictionary
is returned where the keys are either `"train"` or `"test"`
and the associated values are a list of integer-dtyped NumPy
arrays with the indices. Available only if `return_indices=True`.
See Also
--------
cross_val_score : Run cross-validation for single metric evaluation.
cross_val_predict : Get predictions from each split of cross-validation for
diagnostic purposes.
sklearn.metrics.make_scorer : Make a scorer from a performance metric or
loss function.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_validate
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
Single metric evaluation using ``cross_validate``
>>> cv_results = cross_validate(lasso, X, y, cv=3)
>>> sorted(cv_results.keys())
['fit_time', 'score_time', 'test_score']
>>> cv_results['test_score']
array([0.3315057 , 0.08022103, 0.03531816])
Multiple metric evaluation using ``cross_validate``
(please refer the ``scoring`` parameter doc for more information)
>>> scores = cross_validate(lasso, X, y, cv=3,
... scoring=('r2', 'neg_mean_squared_error'),
... return_train_score=True)
>>> print(scores['test_neg_mean_squared_error'])
[-3635.5 -3573.3 -6114.7]
>>> print(scores['train_r2'])
[0.28009951 0.3908844 0.22784907]
"""
_check_groups_routing_disabled(groups)
X, y = indexable(X, y)
params = {} if params is None else params
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorers = check_scoring(
estimator, scoring=scoring, raise_exc=(error_score == "raise")
)
if _routing_enabled():
# For estimators, a MetadataRouter is created in get_metadata_routing
# methods. For these router methods, we create the router to use
# `process_routing` on it.
router = (
MetadataRouter(owner="cross_validate")
.add(
splitter=cv,
method_mapping=MethodMapping().add(caller="fit", callee="split"),
)
.add(
estimator=estimator,
# TODO(SLEP6): also pass metadata to the predict method for
# scoring?
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
.add(
scorer=scorers,
method_mapping=MethodMapping().add(caller="fit", callee="score"),
)
)
try:
routed_params = process_routing(router, "fit", **params)
except UnsetMetadataPassedError as e:
# The default exception would mention `fit` since in the above
# `process_routing` code, we pass `fit` as the caller. However,
# the user is not calling `fit` directly, so we change the message
# to make it more suitable for this case.
raise UnsetMetadataPassedError(
message=str(e).replace("cross_validate.fit", "cross_validate"),
unrequested_params=e.unrequested_params,
routed_params=e.routed_params,
)
else:
routed_params = Bunch()
routed_params.splitter = Bunch(split={"groups": groups})
routed_params.estimator = Bunch(fit=params)
routed_params.scorer = Bunch(score={})
indices = cv.split(X, y, **routed_params.splitter.split)
if return_indices:
# materialize the indices since we need to store them in the returned dict
indices = list(indices)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)
results = parallel(
delayed(_fit_and_score)(
clone(estimator),
X,
y,
scorer=scorers,
train=train,
test=test,
verbose=verbose,
parameters=None,
fit_params=routed_params.estimator.fit,
score_params=routed_params.scorer.score,
return_train_score=return_train_score,
return_times=True,
return_estimator=return_estimator,
error_score=error_score,
)
for train, test in indices
)
_warn_or_raise_about_fit_failures(results, error_score)
# For callable scoring, the return type is only know after calling. If the
# return type is a dictionary, the error scores can now be inserted with
# the correct key.
if callable(scoring):
_insert_error_scores(results, error_score)
results = _aggregate_score_dicts(results)
ret = {}
ret["fit_time"] = results["fit_time"]
ret["score_time"] = results["score_time"]
if return_estimator:
ret["estimator"] = results["estimator"]
if return_indices:
ret["indices"] = {}
ret["indices"]["train"], ret["indices"]["test"] = zip(*indices)
test_scores_dict = _normalize_score_results(results["test_scores"])
if return_train_score:
train_scores_dict = _normalize_score_results(results["train_scores"])
for name in test_scores_dict:
ret["test_%s" % name] = test_scores_dict[name]
if return_train_score:
key = "train_%s" % name
ret[key] = train_scores_dict[name]
return ret
def _insert_error_scores(results, error_score):
"""Insert error in `results` by replacing them inplace with `error_score`.
This only applies to multimetric scores because `_fit_and_score` will
handle the single metric case.
"""
successful_score = None
failed_indices = []
for i, result in enumerate(results):
if result["fit_error"] is not None:
failed_indices.append(i)
elif successful_score is None:
successful_score = result["test_scores"]
if isinstance(successful_score, dict):
formatted_error = {name: error_score for name in successful_score}
for i in failed_indices:
results[i]["test_scores"] = formatted_error.copy()
if "train_scores" in results[i]:
results[i]["train_scores"] = formatted_error.copy()
def _normalize_score_results(scores, scaler_score_key="score"):
"""Creates a scoring dictionary based on the type of `scores`"""
if isinstance(scores[0], dict):
# multimetric scoring
return _aggregate_score_dicts(scores)
# scaler
return {scaler_score_key: scores}
def _warn_or_raise_about_fit_failures(results, error_score):
fit_errors = [
result["fit_error"] for result in results if result["fit_error"] is not None
]
if fit_errors:
num_failed_fits = len(fit_errors)
num_fits = len(results)
fit_errors_counter = Counter(fit_errors)
delimiter = "-" * 80 + "\n"
fit_errors_summary = "\n".join(
f"{delimiter}{n} fits failed with the following error:\n{error}"
for error, n in fit_errors_counter.items()
)
if num_failed_fits == num_fits:
all_fits_failed_message = (
f"\nAll the {num_fits} fits failed.\n"
"It is very likely that your model is misconfigured.\n"
"You can try to debug the error by setting error_score='raise'.\n\n"
f"Below are more details about the failures:\n{fit_errors_summary}"
)
raise ValueError(all_fits_failed_message)
else:
some_fits_failed_message = (
f"\n{num_failed_fits} fits failed out of a total of {num_fits}.\n"
"The score on these train-test partitions for these parameters"
f" will be set to {error_score}.\n"
"If these failures are not expected, you can try to debug them "
"by setting error_score='raise'.\n\n"
f"Below are more details about the failures:\n{fit_errors_summary}"
)
warnings.warn(some_fits_failed_message, FitFailedWarning)
@validate_params(
{
"estimator": [HasMethods("fit")],
"X": ["array-like", "sparse matrix"],
"y": ["array-like", None],
"groups": ["array-like", None],
"scoring": [StrOptions(set(get_scorer_names())), callable, None],
"cv": ["cv_object"],
"n_jobs": [Integral, None],
"verbose": ["verbose"],
"params": [dict, None],
"pre_dispatch": [Integral, str, None],
"error_score": [StrOptions({"raise"}), Real],
},
prefer_skip_nested_validation=False, # estimator is not validated yet
)
def cross_val_score(
estimator,
X,
y=None,
*,
groups=None,
scoring=None,
cv=None,
n_jobs=None,
verbose=0,
params=None,
pre_dispatch="2*n_jobs",
error_score=np.nan,
):
"""Evaluate a score by cross-validation.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data to fit. Can be for example a list, or an array.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
.. versionchanged:: 1.4
``groups`` can only be passed if metadata routing is not enabled
via ``sklearn.set_config(enable_metadata_routing=True)``. When routing
is enabled, pass ``groups`` alongside other metadata via the ``params``
argument instead. E.g.:
``cross_val_score(..., params={'groups': groups})``.
scoring : str or callable, default=None
Strategy to evaluate the performance of the `estimator` across cross-validation
splits.
- str: see :ref:`scoring_string_names` for options.
- callable: a scorer callable object (e.g., function) with signature
``scorer(estimator, X, y)``, which should return only a single value.
See :ref:`scoring_callable` for details.
- `None`: the `estimator`'s
:ref:`default evaluation criterion <scoring_api_overview>` is used.
Similar to the use of `scoring` in :func:`cross_validate` but only a
single metric is permitted.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- `None`, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable that generates (train, test) splits as arrays of indices.
For `int`/`None` inputs, if the estimator is a classifier and `y` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
`cv` default value if `None` changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the cross-validation splits.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
The verbosity level.
params : dict, default=None
Parameters to pass to the underlying estimator's ``fit``, the scorer,
and the CV splitter.
.. versionadded:: 1.4
pre_dispatch : int or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- ``None``, in which case all the jobs are immediately created and spawned. Use
this for lightweight and fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are spawned
- A str, giving an expression as a function of n_jobs, as in '2*n_jobs'
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
Returns
-------
scores : ndarray of float of shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
See Also
--------
cross_validate : To run cross-validation on multiple metrics and also to
return train scores, fit times and score times.
cross_val_predict : Get predictions from each split of cross-validation for
diagnostic purposes.
sklearn.metrics.make_scorer : Make a scorer from a performance metric or
loss function.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y, cv=3))
[0.3315057 0.08022103 0.03531816]
"""
# To ensure multimetric format is not supported
scorer = check_scoring(estimator, scoring=scoring)
cv_results = cross_validate(
estimator=estimator,
X=X,
y=y,
groups=groups,
scoring={"score": scorer},
cv=cv,
n_jobs=n_jobs,
verbose=verbose,
params=params,
pre_dispatch=pre_dispatch,
error_score=error_score,
)
return cv_results["test_score"]
def _fit_and_score(
estimator,
X,
y,
*,
scorer,
train,
test,
verbose,
parameters,
fit_params,
score_params,
return_train_score=False,
return_parameters=False,
return_n_test_samples=False,
return_times=False,
return_estimator=False,
split_progress=None,
candidate_progress=None,
error_score=np.nan,
):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
verbose : int
The verbosity level.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
score_params : dict or None
Parameters that will be passed to the scorer.
return_train_score : bool, default=False
Compute and return score on training set.
return_parameters : bool, default=False
Return parameters that has been used for the estimator.
split_progress : {list, tuple} of int, default=None
A list or tuple of format (<current_split_id>, <total_num_of_splits>).
candidate_progress : {list, tuple} of int, default=None
A list or tuple of format
(<current_candidate_id>, <total_number_of_candidates>).
return_n_test_samples : bool, default=False
Whether to return the ``n_test_samples``.
return_times : bool, default=False
Whether to return the fit/score times.
return_estimator : bool, default=False
Whether to return the fitted estimator.
Returns
-------
result : dict with the following attributes
train_scores : dict of scorer name -> float
Score on training set (for all the scorers),
returned only if `return_train_score` is `True`.
test_scores : dict of scorer name -> float
Score on testing set (for all the scorers).
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None
The parameters that have been evaluated.
estimator : estimator object
The fitted estimator.
fit_error : str or None
Traceback str if the fit failed, None if the fit succeeded.
"""
xp, _ = get_namespace(X)
X_device = device(X)
# Make sure that we can fancy index X even if train and test are provided
# as NumPy arrays by NumPy only cross-validation splitters.
train, test = xp.asarray(train, device=X_device), xp.asarray(test, device=X_device)
if not isinstance(error_score, numbers.Number) and error_score != "raise":
raise ValueError(
"error_score must be the string 'raise' or a numeric value. "
"(Hint: if using 'raise', please make sure that it has been "
"spelled correctly.)"
)
progress_msg = ""
if verbose > 2:
if split_progress is not None:
progress_msg = f" {split_progress[0] + 1}/{split_progress[1]}"
if candidate_progress and verbose > 9:
progress_msg += f"; {candidate_progress[0] + 1}/{candidate_progress[1]}"
if verbose > 1:
if parameters is None:
params_msg = ""
else:
sorted_keys = sorted(parameters) # Ensure deterministic o/p
params_msg = ", ".join(f"{k}={parameters[k]}" for k in sorted_keys)
if verbose > 9:
start_msg = f"[CV{progress_msg}] START {params_msg}"
print(f"{start_msg}{(80 - len(start_msg)) * '.'}")
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_method_params(X, params=fit_params, indices=train)
score_params = score_params if score_params is not None else {}
score_params_train = _check_method_params(X, params=score_params, indices=train)
score_params_test = _check_method_params(X, params=score_params, indices=test)
if parameters is not None:
# here we clone the parameters, since sometimes the parameters
# themselves might be estimators, e.g. when we search over different
# estimators in a pipeline.
# ref: https://github.com/scikit-learn/scikit-learn/pull/26786
estimator = estimator.set_params(**clone(parameters, safe=False))
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
result = {}
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == "raise":
raise
elif isinstance(error_score, numbers.Number):
if isinstance(scorer, _MultimetricScorer):
test_scores = {name: error_score for name in scorer._scorers}
if return_train_score:
train_scores = test_scores.copy()
else:
test_scores = error_score
if return_train_score:
train_scores = error_score
result["fit_error"] = format_exc()
else:
result["fit_error"] = None
fit_time = time.time() - start_time
test_scores = _score(
estimator, X_test, y_test, scorer, score_params_test, error_score
)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_scores = _score(
estimator, X_train, y_train, scorer, score_params_train, error_score
)
if verbose > 1:
total_time = score_time + fit_time
end_msg = f"[CV{progress_msg}] END "
result_msg = params_msg + (";" if params_msg else "")
if verbose > 2:
if isinstance(test_scores, dict):
for scorer_name in sorted(test_scores):
result_msg += f" {scorer_name}: ("
if return_train_score:
scorer_scores = train_scores[scorer_name]
result_msg += f"train={scorer_scores:.3f}, "
result_msg += f"test={test_scores[scorer_name]:.3f})"
else:
result_msg += ", score="
if return_train_score:
result_msg += f"(train={train_scores:.3f}, test={test_scores:.3f})"
else:
result_msg += f"{test_scores:.3f}"
result_msg += f" total time={logger.short_format_time(total_time)}"
# Right align the result_msg
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/model_selection/__init__.py | sklearn/model_selection/__init__.py | """Tools for model selection, such as cross validation and hyper-parameter tuning."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import typing
from sklearn.model_selection._classification_threshold import (
FixedThresholdClassifier,
TunedThresholdClassifierCV,
)
from sklearn.model_selection._plot import LearningCurveDisplay, ValidationCurveDisplay
from sklearn.model_selection._search import (
GridSearchCV,
ParameterGrid,
ParameterSampler,
RandomizedSearchCV,
)
from sklearn.model_selection._split import (
BaseCrossValidator,
BaseShuffleSplit,
GroupKFold,
GroupShuffleSplit,
KFold,
LeaveOneGroupOut,
LeaveOneOut,
LeavePGroupsOut,
LeavePOut,
PredefinedSplit,
RepeatedKFold,
RepeatedStratifiedKFold,
ShuffleSplit,
StratifiedGroupKFold,
StratifiedKFold,
StratifiedShuffleSplit,
TimeSeriesSplit,
check_cv,
train_test_split,
)
from sklearn.model_selection._validation import (
cross_val_predict,
cross_val_score,
cross_validate,
learning_curve,
permutation_test_score,
validation_curve,
)
if typing.TYPE_CHECKING:
# Avoid errors in type checkers (e.g. mypy) for experimental estimators.
# TODO: remove this check once the estimator is no longer experimental.
from sklearn.model_selection._search_successive_halving import (
HalvingGridSearchCV,
HalvingRandomSearchCV,
)
__all__ = [
"BaseCrossValidator",
"BaseShuffleSplit",
"FixedThresholdClassifier",
"GridSearchCV",
"GroupKFold",
"GroupShuffleSplit",
"HalvingGridSearchCV",
"HalvingRandomSearchCV",
"KFold",
"LearningCurveDisplay",
"LeaveOneGroupOut",
"LeaveOneOut",
"LeavePGroupsOut",
"LeavePOut",
"ParameterGrid",
"ParameterSampler",
"PredefinedSplit",
"RandomizedSearchCV",
"RepeatedKFold",
"RepeatedStratifiedKFold",
"ShuffleSplit",
"StratifiedGroupKFold",
"StratifiedKFold",
"StratifiedShuffleSplit",
"TimeSeriesSplit",
"TunedThresholdClassifierCV",
"ValidationCurveDisplay",
"check_cv",
"cross_val_predict",
"cross_val_score",
"cross_validate",
"learning_curve",
"permutation_test_score",
"train_test_split",
"validation_curve",
]
# TODO: remove this check once the estimator is no longer experimental.
def __getattr__(name):
if name in {"HalvingGridSearchCV", "HalvingRandomSearchCV"}:
raise ImportError(
f"{name} is experimental and the API might change without any "
"deprecation cycle. To use it, you need to explicitly import "
"enable_halving_search_cv:\n"
"from sklearn.experimental import enable_halving_search_cv"
)
raise AttributeError(f"module {__name__} has no attribute {name}")
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/model_selection/tests/test_classification_threshold.py | sklearn/model_selection/tests/test_classification_threshold.py | import numpy as np
import pytest
from sklearn import config_context
from sklearn.base import clone
from sklearn.datasets import (
load_breast_cancer,
load_iris,
make_classification,
make_multilabel_classification,
)
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (
balanced_accuracy_score,
f1_score,
fbeta_score,
make_scorer,
)
from sklearn.metrics._scorer import _CurveScorer
from sklearn.model_selection import (
FixedThresholdClassifier,
StratifiedShuffleSplit,
TunedThresholdClassifierCV,
)
from sklearn.model_selection._classification_threshold import (
_fit_and_score_over_thresholds,
)
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils._mocking import CheckingClassifier
from sklearn.utils._testing import (
_convert_container,
assert_allclose,
assert_array_equal,
)
def test_fit_and_score_over_thresholds_curve_scorers():
"""Check that `_fit_and_score_over_thresholds` returns thresholds in ascending order
for the different accepted curve scorers."""
X, y = make_classification(n_samples=100, random_state=0)
train_idx, val_idx = np.arange(50), np.arange(50, 100)
classifier = LogisticRegression()
curve_scorer = _CurveScorer(
score_func=balanced_accuracy_score,
sign=1,
response_method="predict_proba",
thresholds=10,
kwargs={},
)
scores, thresholds = _fit_and_score_over_thresholds(
classifier,
X,
y,
fit_params={},
train_idx=train_idx,
val_idx=val_idx,
curve_scorer=curve_scorer,
score_params={},
)
assert np.all(thresholds[:-1] <= thresholds[1:])
assert isinstance(scores, np.ndarray)
assert np.logical_and(scores >= 0, scores <= 1).all()
def test_fit_and_score_over_thresholds_prefit():
"""Check the behaviour with a prefit classifier."""
X, y = make_classification(n_samples=100, random_state=0)
# `train_idx is None` to indicate that the classifier is prefit
train_idx, val_idx = None, np.arange(50, 100)
classifier = DecisionTreeClassifier(random_state=0).fit(X, y)
# make sure that the classifier memorized the full dataset such that
# we get perfect predictions and thus match the expected score
assert classifier.score(X[val_idx], y[val_idx]) == pytest.approx(1.0)
curve_scorer = _CurveScorer(
score_func=balanced_accuracy_score,
sign=1,
response_method="predict_proba",
thresholds=2,
kwargs={},
)
scores, thresholds = _fit_and_score_over_thresholds(
classifier,
X,
y,
fit_params={},
train_idx=train_idx,
val_idx=val_idx,
curve_scorer=curve_scorer,
score_params={},
)
assert np.all(thresholds[:-1] <= thresholds[1:])
assert_allclose(scores, [0.5, 1.0])
@config_context(enable_metadata_routing=True)
def test_fit_and_score_over_thresholds_sample_weight():
"""Check that we dispatch the sample-weight to fit and score the classifier."""
X, y = load_iris(return_X_y=True)
X, y = X[:100], y[:100] # only 2 classes
# create a dataset and repeat twice the sample of class #0
X_repeated, y_repeated = np.vstack([X, X[y == 0]]), np.hstack([y, y[y == 0]])
# create a sample weight vector that is equivalent to the repeated dataset
sample_weight = np.ones_like(y)
sample_weight[:50] *= 2
classifier = LogisticRegression()
train_repeated_idx = np.arange(X_repeated.shape[0])
val_repeated_idx = np.arange(X_repeated.shape[0])
curve_scorer = _CurveScorer(
score_func=balanced_accuracy_score,
sign=1,
response_method="predict_proba",
thresholds=10,
kwargs={},
)
scores_repeated, thresholds_repeated = _fit_and_score_over_thresholds(
classifier,
X_repeated,
y_repeated,
fit_params={},
train_idx=train_repeated_idx,
val_idx=val_repeated_idx,
curve_scorer=curve_scorer,
score_params={},
)
train_idx, val_idx = np.arange(X.shape[0]), np.arange(X.shape[0])
scores, thresholds = _fit_and_score_over_thresholds(
classifier.set_fit_request(sample_weight=True),
X,
y,
fit_params={"sample_weight": sample_weight},
train_idx=train_idx,
val_idx=val_idx,
curve_scorer=curve_scorer.set_score_request(sample_weight=True),
score_params={"sample_weight": sample_weight},
)
assert_allclose(thresholds_repeated, thresholds)
assert_allclose(scores_repeated, scores)
@pytest.mark.parametrize("fit_params_type", ["list", "array"])
@config_context(enable_metadata_routing=True)
def test_fit_and_score_over_thresholds_fit_params(fit_params_type):
"""Check that we pass `fit_params` to the classifier when calling `fit`."""
X, y = make_classification(n_samples=100, random_state=0)
fit_params = {
"a": _convert_container(y, fit_params_type),
"b": _convert_container(y, fit_params_type),
}
classifier = CheckingClassifier(expected_fit_params=["a", "b"], random_state=0)
classifier.set_fit_request(a=True, b=True)
train_idx, val_idx = np.arange(50), np.arange(50, 100)
curve_scorer = _CurveScorer(
score_func=balanced_accuracy_score,
sign=1,
response_method="predict_proba",
thresholds=10,
kwargs={},
)
_fit_and_score_over_thresholds(
classifier,
X,
y,
fit_params=fit_params,
train_idx=train_idx,
val_idx=val_idx,
curve_scorer=curve_scorer,
score_params={},
)
@pytest.mark.parametrize(
"data",
[
make_classification(n_classes=3, n_clusters_per_class=1, random_state=0),
make_multilabel_classification(random_state=0),
],
)
def test_tuned_threshold_classifier_no_binary(data):
"""Check that we raise an informative error message for non-binary problem."""
err_msg = "Only binary classification is supported."
with pytest.raises(ValueError, match=err_msg):
TunedThresholdClassifierCV(LogisticRegression()).fit(*data)
@pytest.mark.parametrize(
"params, err_type, err_msg",
[
(
{"cv": "prefit", "refit": True},
ValueError,
"When cv='prefit', refit cannot be True.",
),
(
{"cv": 10, "refit": False},
ValueError,
"When cv has several folds, refit cannot be False.",
),
(
{"cv": "prefit", "refit": False},
NotFittedError,
"`estimator` must be fitted.",
),
],
)
def test_tuned_threshold_classifier_conflict_cv_refit(params, err_type, err_msg):
"""Check that we raise an informative error message when `cv` and `refit`
cannot be used together.
"""
X, y = make_classification(n_samples=100, random_state=0)
with pytest.raises(err_type, match=err_msg):
TunedThresholdClassifierCV(LogisticRegression(), **params).fit(X, y)
@pytest.mark.parametrize(
"estimator",
[LogisticRegression(), SVC(), GradientBoostingClassifier(n_estimators=4)],
)
@pytest.mark.parametrize(
"response_method", ["predict_proba", "predict_log_proba", "decision_function"]
)
@pytest.mark.parametrize(
"ThresholdClassifier", [FixedThresholdClassifier, TunedThresholdClassifierCV]
)
def test_threshold_classifier_estimator_response_methods(
ThresholdClassifier, estimator, response_method
):
"""Check that `TunedThresholdClassifierCV` exposes the same response methods as the
underlying estimator.
"""
X, y = make_classification(n_samples=100, random_state=0)
model = ThresholdClassifier(estimator=estimator)
assert hasattr(model, response_method) == hasattr(estimator, response_method)
model.fit(X, y)
assert hasattr(model, response_method) == hasattr(estimator, response_method)
if hasattr(model, response_method):
y_pred_cutoff = getattr(model, response_method)(X)
y_pred_underlying_estimator = getattr(model.estimator_, response_method)(X)
assert_allclose(y_pred_cutoff, y_pred_underlying_estimator)
@pytest.mark.parametrize(
"response_method", ["auto", "decision_function", "predict_proba"]
)
def test_tuned_threshold_classifier_without_constraint_value(response_method):
"""Check that `TunedThresholdClassifierCV` is optimizing a given objective
metric."""
X, y = load_breast_cancer(return_X_y=True)
# remove feature to degrade performances
X = X[:, :5]
# make the problem completely imbalanced such that the balanced accuracy is low
indices_pos = np.flatnonzero(y == 1)
indices_pos = indices_pos[: indices_pos.size // 50]
indices_neg = np.flatnonzero(y == 0)
X = np.vstack([X[indices_neg], X[indices_pos]])
y = np.hstack([y[indices_neg], y[indices_pos]])
lr = make_pipeline(StandardScaler(), LogisticRegression()).fit(X, y)
thresholds = 100
model = TunedThresholdClassifierCV(
estimator=lr,
scoring="balanced_accuracy",
response_method=response_method,
thresholds=thresholds,
store_cv_results=True,
)
score_optimized = balanced_accuracy_score(y, model.fit(X, y).predict(X))
score_baseline = balanced_accuracy_score(y, lr.predict(X))
assert score_optimized > score_baseline
assert model.cv_results_["thresholds"].shape == (thresholds,)
assert model.cv_results_["scores"].shape == (thresholds,)
def test_tuned_threshold_classifier_metric_with_parameter():
"""Check that we can pass a metric with a parameter in addition check that
`f_beta` with `beta=1` is equivalent to `f1` and different from `f_beta` with
`beta=2`.
"""
X, y = load_breast_cancer(return_X_y=True)
lr = make_pipeline(StandardScaler(), LogisticRegression()).fit(X, y)
model_fbeta_1 = TunedThresholdClassifierCV(
estimator=lr, scoring=make_scorer(fbeta_score, beta=1)
).fit(X, y)
model_fbeta_2 = TunedThresholdClassifierCV(
estimator=lr, scoring=make_scorer(fbeta_score, beta=2)
).fit(X, y)
model_f1 = TunedThresholdClassifierCV(
estimator=lr, scoring=make_scorer(f1_score)
).fit(X, y)
assert model_fbeta_1.best_threshold_ == pytest.approx(model_f1.best_threshold_)
assert model_fbeta_1.best_threshold_ != pytest.approx(model_fbeta_2.best_threshold_)
@pytest.mark.parametrize(
"response_method", ["auto", "decision_function", "predict_proba"]
)
@pytest.mark.parametrize(
"metric",
[
make_scorer(balanced_accuracy_score),
make_scorer(f1_score, pos_label="cancer"),
],
)
def test_tuned_threshold_classifier_with_string_targets(response_method, metric):
"""Check that targets represented by str are properly managed.
Also, check with several metrics to be sure that `pos_label` is properly
dispatched.
"""
X, y = load_breast_cancer(return_X_y=True)
# Encode numeric targets by meaningful strings. We purposely designed the class
# names such that the `pos_label` is the first alphabetically sorted class and thus
# encoded as 0.
classes = np.array(["cancer", "healthy"], dtype=object)
y = classes[y]
model = TunedThresholdClassifierCV(
estimator=make_pipeline(StandardScaler(), LogisticRegression()),
scoring=metric,
response_method=response_method,
thresholds=100,
).fit(X, y)
assert_array_equal(model.classes_, np.sort(classes))
y_pred = model.predict(X)
assert_array_equal(np.unique(y_pred), np.sort(classes))
@pytest.mark.parametrize("with_sample_weight", [True, False])
@config_context(enable_metadata_routing=True)
def test_tuned_threshold_classifier_refit(with_sample_weight, global_random_seed):
"""Check the behaviour of the `refit` parameter."""
rng = np.random.RandomState(global_random_seed)
X, y = make_classification(n_samples=100, random_state=0)
if with_sample_weight:
sample_weight = rng.randn(X.shape[0])
sample_weight = np.abs(sample_weight, out=sample_weight)
else:
sample_weight = None
# check that `estimator_` if fitted on the full dataset when `refit=True`
estimator = LogisticRegression().set_fit_request(sample_weight=True)
model = TunedThresholdClassifierCV(estimator, refit=True).fit(
X, y, sample_weight=sample_weight
)
assert model.estimator_ is not estimator
estimator.fit(X, y, sample_weight=sample_weight)
assert_allclose(model.estimator_.coef_, estimator.coef_)
assert_allclose(model.estimator_.intercept_, estimator.intercept_)
# check that `estimator_` was not altered when `refit=False` and `cv="prefit"`
estimator = LogisticRegression().set_fit_request(sample_weight=True)
estimator.fit(X, y, sample_weight=sample_weight)
coef = estimator.coef_.copy()
model = TunedThresholdClassifierCV(estimator, cv="prefit", refit=False).fit(
X, y, sample_weight=sample_weight
)
assert model.estimator_ is estimator
assert_allclose(model.estimator_.coef_, coef)
# check that we train `estimator_` on the training split of a given cross-validation
estimator = LogisticRegression().set_fit_request(sample_weight=True)
cv = [
(np.arange(50), np.arange(50, 100)),
] # single split
model = TunedThresholdClassifierCV(estimator, cv=cv, refit=False).fit(
X, y, sample_weight=sample_weight
)
assert model.estimator_ is not estimator
if with_sample_weight:
sw_train = sample_weight[cv[0][0]]
else:
sw_train = None
estimator.fit(X[cv[0][0]], y[cv[0][0]], sample_weight=sw_train)
assert_allclose(model.estimator_.coef_, estimator.coef_)
@pytest.mark.parametrize("fit_params_type", ["list", "array"])
@config_context(enable_metadata_routing=True)
def test_tuned_threshold_classifier_fit_params(fit_params_type):
"""Check that we pass `fit_params` to the classifier when calling `fit`."""
X, y = make_classification(n_samples=100, random_state=0)
fit_params = {
"a": _convert_container(y, fit_params_type),
"b": _convert_container(y, fit_params_type),
}
classifier = CheckingClassifier(expected_fit_params=["a", "b"], random_state=0)
classifier.set_fit_request(a=True, b=True)
model = TunedThresholdClassifierCV(classifier)
model.fit(X, y, **fit_params)
@config_context(enable_metadata_routing=True)
def test_tuned_threshold_classifier_cv_zeros_sample_weights_equivalence():
"""Check that passing removing some sample from the dataset `X` is
equivalent to passing a `sample_weight` with a factor 0."""
X, y = load_iris(return_X_y=True)
# Scale the data to avoid any convergence issue
X = StandardScaler().fit_transform(X)
# Only use 2 classes and select samples such that 2-fold cross-validation
# split will lead to an equivalence with a `sample_weight` of 0
X = np.vstack((X[:40], X[50:90]))
y = np.hstack((y[:40], y[50:90]))
sample_weight = np.zeros_like(y)
sample_weight[::2] = 1
estimator = LogisticRegression().set_fit_request(sample_weight=True)
model_without_weights = TunedThresholdClassifierCV(estimator, cv=2)
model_with_weights = clone(model_without_weights)
model_with_weights.fit(X, y, sample_weight=sample_weight)
model_without_weights.fit(X[::2], y[::2])
assert_allclose(
model_with_weights.estimator_.coef_, model_without_weights.estimator_.coef_
)
y_pred_with_weights = model_with_weights.predict_proba(X)
y_pred_without_weights = model_without_weights.predict_proba(X)
assert_allclose(y_pred_with_weights, y_pred_without_weights)
def test_tuned_threshold_classifier_thresholds_array():
"""Check that we can pass an array to `thresholds` and it is used as candidate
threshold internally."""
X, y = make_classification(random_state=0)
estimator = LogisticRegression()
thresholds = np.linspace(0, 1, 11)
tuned_model = TunedThresholdClassifierCV(
estimator,
thresholds=thresholds,
response_method="predict_proba",
store_cv_results=True,
).fit(X, y)
assert_allclose(tuned_model.cv_results_["thresholds"], thresholds)
@pytest.mark.parametrize("store_cv_results", [True, False])
def test_tuned_threshold_classifier_store_cv_results(store_cv_results):
"""Check that if `cv_results_` exists depending on `store_cv_results`."""
X, y = make_classification(random_state=0)
estimator = LogisticRegression()
tuned_model = TunedThresholdClassifierCV(
estimator, store_cv_results=store_cv_results
).fit(X, y)
if store_cv_results:
assert hasattr(tuned_model, "cv_results_")
else:
assert not hasattr(tuned_model, "cv_results_")
def test_tuned_threshold_classifier_cv_float():
"""Check the behaviour when `cv` is set to a float."""
X, y = make_classification(random_state=0)
# case where `refit=False` and cv is a float: the underlying estimator will be fit
# on the training set given by a ShuffleSplit. We check that we get the same model
# coefficients.
test_size = 0.3
estimator = LogisticRegression()
tuned_model = TunedThresholdClassifierCV(
estimator, cv=test_size, refit=False, random_state=0
).fit(X, y)
tuned_model.fit(X, y)
cv = StratifiedShuffleSplit(n_splits=1, test_size=test_size, random_state=0)
train_idx, val_idx = next(cv.split(X, y))
cloned_estimator = clone(estimator).fit(X[train_idx], y[train_idx])
assert_allclose(tuned_model.estimator_.coef_, cloned_estimator.coef_)
# case where `refit=True`, then the underlying estimator is fitted on the full
# dataset.
tuned_model.set_params(refit=True).fit(X, y)
cloned_estimator = clone(estimator).fit(X, y)
assert_allclose(tuned_model.estimator_.coef_, cloned_estimator.coef_)
def test_tuned_threshold_classifier_error_constant_predictor():
"""Check that we raise a ValueError if the underlying classifier returns constant
probabilities such that we cannot find any threshold.
"""
X, y = make_classification(random_state=0)
estimator = DummyClassifier(strategy="constant", constant=1)
tuned_model = TunedThresholdClassifierCV(estimator, response_method="predict_proba")
err_msg = "The provided estimator makes constant predictions"
with pytest.raises(ValueError, match=err_msg):
tuned_model.fit(X, y)
@pytest.mark.parametrize(
"response_method", ["auto", "predict_proba", "decision_function"]
)
def test_fixed_threshold_classifier_equivalence_default(response_method):
"""Check that `FixedThresholdClassifier` has the same behaviour as the vanilla
classifier.
"""
X, y = make_classification(random_state=0)
classifier = LogisticRegression().fit(X, y)
classifier_default_threshold = FixedThresholdClassifier(
estimator=clone(classifier), response_method=response_method
)
classifier_default_threshold.fit(X, y)
# emulate the response method that should take into account the `pos_label`
if response_method in ("auto", "predict_proba"):
y_score = classifier_default_threshold.predict_proba(X)[:, 1]
threshold = 0.5
else: # response_method == "decision_function"
y_score = classifier_default_threshold.decision_function(X)
threshold = 0.0
y_pred_lr = (y_score >= threshold).astype(int)
assert_allclose(classifier_default_threshold.predict(X), y_pred_lr)
@pytest.mark.parametrize(
"response_method, threshold", [("predict_proba", 0.7), ("decision_function", 2.0)]
)
@pytest.mark.parametrize("pos_label", [0, 1])
def test_fixed_threshold_classifier(response_method, threshold, pos_label):
"""Check that applying `predict` lead to the same prediction as applying the
threshold to the output of the response method.
"""
X, y = make_classification(n_samples=50, random_state=0)
logistic_regression = LogisticRegression().fit(X, y)
model = FixedThresholdClassifier(
estimator=clone(logistic_regression),
threshold=threshold,
response_method=response_method,
pos_label=pos_label,
).fit(X, y)
# check that the underlying estimator is the same
assert_allclose(model.estimator_.coef_, logistic_regression.coef_)
# emulate the response method that should take into account the `pos_label`
if response_method == "predict_proba":
y_score = model.predict_proba(X)[:, pos_label]
else: # response_method == "decision_function"
y_score = model.decision_function(X)
y_score = y_score if pos_label == 1 else -y_score
# create a mapping from boolean values to class labels
map_to_label = np.array([0, 1]) if pos_label == 1 else np.array([1, 0])
y_pred_lr = map_to_label[(y_score >= threshold).astype(int)]
assert_allclose(model.predict(X), y_pred_lr)
for method in ("predict_proba", "predict_log_proba", "decision_function"):
assert_allclose(
getattr(model, method)(X), getattr(logistic_regression, method)(X)
)
assert_allclose(
getattr(model.estimator_, method)(X),
getattr(logistic_regression, method)(X),
)
@config_context(enable_metadata_routing=True)
def test_fixed_threshold_classifier_metadata_routing():
"""Check that everything works with metadata routing."""
X, y = make_classification(random_state=0)
sample_weight = np.ones_like(y)
sample_weight[::2] = 2
classifier = LogisticRegression().set_fit_request(sample_weight=True)
classifier.fit(X, y, sample_weight=sample_weight)
classifier_default_threshold = FixedThresholdClassifier(estimator=clone(classifier))
classifier_default_threshold.fit(X, y, sample_weight=sample_weight)
assert_allclose(classifier_default_threshold.estimator_.coef_, classifier.coef_)
@pytest.mark.parametrize(
"method", ["predict_proba", "decision_function", "predict", "predict_log_proba"]
)
def test_fixed_threshold_classifier_fitted_estimator(method):
"""Check that if the underlying estimator is already fitted, no fit is required."""
X, y = make_classification(random_state=0)
classifier = LogisticRegression().fit(X, y)
fixed_threshold_classifier = FixedThresholdClassifier(estimator=classifier)
# This should not raise an error
getattr(fixed_threshold_classifier, method)(X)
def test_fixed_threshold_classifier_classes_():
"""Check that the classes_ attribute is properly set."""
X, y = make_classification(random_state=0)
with pytest.raises(
AttributeError, match="The underlying estimator is not fitted yet."
):
FixedThresholdClassifier(estimator=LogisticRegression()).classes_
classifier = LogisticRegression().fit(X, y)
fixed_threshold_classifier = FixedThresholdClassifier(estimator=classifier)
assert_array_equal(fixed_threshold_classifier.classes_, classifier.classes_)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/model_selection/tests/test_split.py | sklearn/model_selection/tests/test_split.py | """Test the split module"""
import re
import warnings
from itertools import combinations, combinations_with_replacement, permutations
import numpy as np
import pytest
from scipy import stats
from scipy.sparse import issparse
from scipy.special import comb
from sklearn import config_context
from sklearn.datasets import load_digits, make_classification
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import (
GridSearchCV,
GroupKFold,
GroupShuffleSplit,
KFold,
LeaveOneGroupOut,
LeaveOneOut,
LeavePGroupsOut,
LeavePOut,
PredefinedSplit,
RepeatedKFold,
RepeatedStratifiedKFold,
ShuffleSplit,
StratifiedGroupKFold,
StratifiedKFold,
StratifiedShuffleSplit,
TimeSeriesSplit,
check_cv,
cross_val_score,
train_test_split,
)
from sklearn.model_selection._split import (
_build_repr,
_validate_shuffle_split,
_yields_constant_splits,
)
from sklearn.svm import SVC
from sklearn.tests.metadata_routing_common import assert_request_is_empty
from sklearn.utils._array_api import (
_convert_to_numpy,
_get_namespace_device_dtype_ids,
get_namespace,
yield_namespace_device_dtype_combinations,
)
from sklearn.utils._array_api import (
device as array_api_device,
)
from sklearn.utils._mocking import MockDataFrame
from sklearn.utils._testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
ignore_warnings,
)
from sklearn.utils.estimator_checks import (
_array_api_for_tests,
)
from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS
from sklearn.utils.validation import _num_samples
NO_GROUP_SPLITTERS = [
KFold(),
StratifiedKFold(),
TimeSeriesSplit(),
LeaveOneOut(),
LeavePOut(p=2),
ShuffleSplit(),
StratifiedShuffleSplit(test_size=0.5),
PredefinedSplit([1, 1, 2, 2]),
RepeatedKFold(),
RepeatedStratifiedKFold(),
]
GROUP_SPLITTERS = [
GroupKFold(),
LeavePGroupsOut(n_groups=1),
StratifiedGroupKFold(),
LeaveOneGroupOut(),
GroupShuffleSplit(),
]
GROUP_SPLITTER_NAMES = set(splitter.__class__.__name__ for splitter in GROUP_SPLITTERS)
ALL_SPLITTERS = NO_GROUP_SPLITTERS + GROUP_SPLITTERS # type: ignore[list-item]
SPLITTERS_REQUIRING_TARGET = [
StratifiedKFold(),
StratifiedShuffleSplit(),
RepeatedStratifiedKFold(),
]
X = np.ones(10)
y = np.arange(10) // 2
test_groups = (
np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
["1", "1", "1", "1", "2", "2", "2", "3", "3", "3", "3", "3"],
)
digits = load_digits()
pytestmark = pytest.mark.filterwarnings(
"error:The groups parameter:UserWarning:sklearn.*"
)
def _split(splitter, X, y, groups):
if splitter.__class__.__name__ in GROUP_SPLITTER_NAMES:
return splitter.split(X, y, groups=groups)
else:
return splitter.split(X, y)
def test_cross_validator_with_default_params():
n_samples = 4
n_unique_groups = 4
n_splits = 2
p = 2
n_shuffle_splits = 10 # (the default value)
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
X_1d = np.array([1, 2, 3, 4])
y = np.array([1, 1, 2, 2])
groups = np.array([1, 2, 3, 4])
loo = LeaveOneOut()
lpo = LeavePOut(p)
kf = KFold(n_splits)
skf = StratifiedKFold(n_splits)
lolo = LeaveOneGroupOut()
lopo = LeavePGroupsOut(p)
ss = ShuffleSplit(random_state=0)
ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2
sgkf = StratifiedGroupKFold(n_splits)
loo_repr = "LeaveOneOut()"
lpo_repr = "LeavePOut(p=2)"
kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)"
skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)"
lolo_repr = "LeaveOneGroupOut()"
lopo_repr = "LeavePGroupsOut(n_groups=2)"
ss_repr = (
"ShuffleSplit(n_splits=10, random_state=0, test_size=None, train_size=None)"
)
ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))"
sgkf_repr = "StratifiedGroupKFold(n_splits=2, random_state=None, shuffle=False)"
n_splits_expected = [
n_samples,
comb(n_samples, p),
n_splits,
n_splits,
n_unique_groups,
comb(n_unique_groups, p),
n_shuffle_splits,
2,
n_splits,
]
for i, (cv, cv_repr) in enumerate(
zip(
[loo, lpo, kf, skf, lolo, lopo, ss, ps, sgkf],
[
loo_repr,
lpo_repr,
kf_repr,
skf_repr,
lolo_repr,
lopo_repr,
ss_repr,
ps_repr,
sgkf_repr,
],
)
):
# Test if get_n_splits works correctly
assert n_splits_expected[i] == cv.get_n_splits(X, y, groups)
# Test if the cross-validator works as expected even if
# the data is 1d
np.testing.assert_equal(
list(_split(cv, X, y, groups)), list(_split(cv, X_1d, y, groups))
)
# Test that train, test indices returned are integers
for train, test in _split(cv, X, y, groups):
assert np.asarray(train).dtype.kind == "i"
assert np.asarray(test).dtype.kind == "i"
# Test if the repr works without any errors
assert cv_repr == repr(cv)
# ValueError for get_n_splits methods
msg = "The 'X' parameter should not be None."
with pytest.raises(ValueError, match=msg):
loo.get_n_splits(None, y, groups)
with pytest.raises(ValueError, match=msg):
lpo.get_n_splits(None, y, groups)
def test_2d_y():
# smoke test for 2d y and multi-label
n_samples = 30
rng = np.random.RandomState(1)
X = rng.randint(0, 3, size=(n_samples, 2))
y = rng.randint(0, 3, size=(n_samples,))
y_2d = y.reshape(-1, 1)
y_multilabel = rng.randint(0, 2, size=(n_samples, 3))
groups = rng.randint(0, 3, size=(n_samples,))
splitters = [
LeaveOneOut(),
LeavePOut(p=2),
KFold(),
StratifiedKFold(),
RepeatedKFold(),
RepeatedStratifiedKFold(),
StratifiedGroupKFold(),
ShuffleSplit(),
StratifiedShuffleSplit(test_size=0.5),
GroupShuffleSplit(),
LeaveOneGroupOut(),
LeavePGroupsOut(n_groups=2),
GroupKFold(n_splits=3),
TimeSeriesSplit(),
PredefinedSplit(test_fold=groups),
]
for splitter in splitters:
list(_split(splitter, X, y, groups=groups))
list(_split(splitter, X, y_2d, groups=groups))
try:
list(_split(splitter, X, y_multilabel, groups=groups))
except ValueError as e:
allowed_target_types = ("binary", "multiclass")
msg = "Supported target types are: {}. Got 'multilabel".format(
allowed_target_types
)
assert msg in str(e)
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert train.intersection(test) == set()
if n_samples is not None:
# Check that the union of train and test split cover all the indices
assert train.union(test) == set(range(n_samples))
def check_cv_coverage(cv, X, y, groups, expected_n_splits):
n_samples = _num_samples(X)
# Check that all the samples appear at least once in a test fold
assert cv.get_n_splits(X, y, groups) == expected_n_splits
collected_test_samples = set()
iterations = 0
for train, test in cv.split(X, y, groups):
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert iterations == expected_n_splits
if n_samples is not None:
assert collected_test_samples == set(range(n_samples))
def test_kfold_valueerrors():
X1 = np.array([[1, 2], [3, 4], [5, 6]])
X2 = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]])
# Check that errors are raised if there is not enough samples
(ValueError, next, KFold(4).split(X1))
# Check that a warning is raised if the least populated class has too few
# members.
y = np.array([3, 3, -1, -1, 3])
skf_3 = StratifiedKFold(3)
with pytest.warns(Warning, match="The least populated class"):
next(skf_3.split(X2, y))
sgkf_3 = StratifiedGroupKFold(3)
naive_groups = np.arange(len(y))
with pytest.warns(Warning, match="The least populated class"):
next(sgkf_3.split(X2, y, naive_groups))
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
with warnings.catch_warnings():
warnings.simplefilter("ignore")
check_cv_coverage(skf_3, X2, y, groups=None, expected_n_splits=3)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
check_cv_coverage(sgkf_3, X2, y, groups=naive_groups, expected_n_splits=3)
# Check that errors are raised if all n_groups for individual
# classes are less than n_splits.
y = np.array([3, 3, -1, -1, 2])
with pytest.raises(ValueError):
next(skf_3.split(X2, y))
with pytest.raises(ValueError):
next(sgkf_3.split(X2, y))
# Error when number of folds is <= 1
with pytest.raises(ValueError):
KFold(0)
with pytest.raises(ValueError):
KFold(1)
error_string = "k-fold cross-validation requires at least one train/test split"
with pytest.raises(ValueError, match=error_string):
StratifiedKFold(0)
with pytest.raises(ValueError, match=error_string):
StratifiedKFold(1)
with pytest.raises(ValueError, match=error_string):
StratifiedGroupKFold(0)
with pytest.raises(ValueError, match=error_string):
StratifiedGroupKFold(1)
# When n_splits is not integer:
with pytest.raises(ValueError):
KFold(1.5)
with pytest.raises(ValueError):
KFold(2.0)
with pytest.raises(ValueError):
StratifiedKFold(1.5)
with pytest.raises(ValueError):
StratifiedKFold(2.0)
with pytest.raises(ValueError):
StratifiedGroupKFold(1.5)
with pytest.raises(ValueError):
StratifiedGroupKFold(2.0)
# When shuffle is not a bool:
with pytest.raises(TypeError):
KFold(n_splits=4, shuffle=None)
def test_kfold_indices():
# Check all indices are returned in the test folds
X1 = np.ones(18)
kf = KFold(3)
check_cv_coverage(kf, X1, y=None, groups=None, expected_n_splits=3)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
X2 = np.ones(17)
kf = KFold(3)
check_cv_coverage(kf, X2, y=None, groups=None, expected_n_splits=3)
# Check if get_n_splits returns the number of folds
assert 5 == KFold(5).get_n_splits(X2)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
X2 = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
splits = KFold(2).split(X2[:-1])
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = KFold(2).split(X2)
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
X, y = np.ones(4), [1, 1, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
X, y = np.ones(7), [1, 1, 1, 0, 0, 0, 0]
splits = StratifiedKFold(2).split(X, y)
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
# Check if get_n_splits returns the number of folds
assert 5 == StratifiedKFold(5).get_n_splits(X, y)
# Make sure string labels are also supported
X = np.ones(7)
y1 = ["1", "1", "1", "0", "0", "0", "0"]
y2 = [1, 1, 1, 0, 0, 0, 0]
np.testing.assert_equal(
list(StratifiedKFold(2).split(X, y1)), list(StratifiedKFold(2).split(X, y2))
)
# Check equivalence to KFold
y = [0, 1, 0, 1, 0, 1, 0, 1]
X = np.ones_like(y)
np.testing.assert_equal(
list(StratifiedKFold(3).split(X, y)), list(KFold(3).split(X, y))
)
@pytest.mark.parametrize("shuffle", [False, True])
@pytest.mark.parametrize("k", [4, 5, 6, 7, 8, 9, 10])
@pytest.mark.parametrize("kfold", [StratifiedKFold, StratifiedGroupKFold])
def test_stratified_kfold_ratios(k, shuffle, kfold):
# Check that stratified kfold preserves class ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
X = np.ones(n_samples)
y = np.array(
[4] * int(0.10 * n_samples)
+ [0] * int(0.89 * n_samples)
+ [1] * int(0.01 * n_samples)
)
# ensure perfect stratification with StratifiedGroupKFold
groups = np.arange(len(y))
distr = np.bincount(y) / len(y)
test_sizes = []
random_state = None if not shuffle else 0
skf = kfold(k, random_state=random_state, shuffle=shuffle)
for train, test in _split(skf, X, y, groups=groups):
assert_allclose(np.bincount(y[train]) / len(train), distr, atol=0.02)
assert_allclose(np.bincount(y[test]) / len(test), distr, atol=0.02)
test_sizes.append(len(test))
assert np.ptp(test_sizes) <= 1
@pytest.mark.parametrize("shuffle", [False, True])
@pytest.mark.parametrize("k", [4, 6, 7])
@pytest.mark.parametrize("kfold", [StratifiedKFold, StratifiedGroupKFold])
def test_stratified_kfold_label_invariance(k, shuffle, kfold):
# Check that stratified kfold gives the same indices regardless of labels
n_samples = 100
y = np.array(
[2] * int(0.10 * n_samples)
+ [0] * int(0.89 * n_samples)
+ [1] * int(0.01 * n_samples)
)
X = np.ones(len(y))
# ensure perfect stratification with StratifiedGroupKFold
groups = np.arange(len(y))
def get_splits(y):
random_state = None if not shuffle else 0
return [
(list(train), list(test))
for train, test in _split(
kfold(k, random_state=random_state, shuffle=shuffle),
X,
y,
groups=groups,
)
]
splits_base = get_splits(y)
for perm in permutations([0, 1, 2]):
y_perm = np.take(perm, y)
splits_perm = get_splits(y_perm)
assert splits_perm == splits_base
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for i in range(11, 17):
kf = KFold(5).split(X=np.ones(i))
sizes = [len(test) for _, test in kf]
assert (np.max(sizes) - np.min(sizes)) <= 1
assert np.sum(sizes) == i
@pytest.mark.parametrize("kfold", [StratifiedKFold, StratifiedGroupKFold])
def test_stratifiedkfold_balance(kfold):
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
X = np.ones(17)
y = [0] * 3 + [1] * 14
# ensure perfect stratification with StratifiedGroupKFold
groups = np.arange(len(y))
for shuffle in (True, False):
cv = kfold(3, shuffle=shuffle)
for i in range(11, 17):
skf = _split(cv, X[:i], y[:i], groups[:i])
sizes = [len(test) for _, test in skf]
assert (np.max(sizes) - np.min(sizes)) <= 1
assert np.sum(sizes) == i
def test_shuffle_kfold():
# Check the indices are shuffled properly
kf = KFold(3)
kf2 = KFold(3, shuffle=True, random_state=0)
kf3 = KFold(3, shuffle=True, random_state=1)
X = np.ones(300)
all_folds = np.zeros(300)
for (tr1, te1), (tr2, te2), (tr3, te3) in zip(
kf.split(X), kf2.split(X), kf3.split(X)
):
for tr_a, tr_b in combinations((tr1, tr2, tr3), 2):
# Assert that there is no complete overlap
assert len(np.intersect1d(tr_a, tr_b)) != len(tr1)
# Set all test indices in successive iterations of kf2 to 1
all_folds[te2] = 1
# Check that all indices are returned in the different test folds
assert sum(all_folds) == 300
@pytest.mark.parametrize("kfold", [KFold, StratifiedKFold, StratifiedGroupKFold])
def test_shuffle_kfold_stratifiedkfold_reproducibility(kfold):
X = np.ones(15) # Divisible by 3
y = [0] * 7 + [1] * 8
groups_1 = np.arange(len(y))
X2 = np.ones(16) # Not divisible by 3
y2 = [0] * 8 + [1] * 8
groups_2 = np.arange(len(y2))
# Check that when the shuffle is True, multiple split calls produce the
# same split when random_state is int
kf = kfold(3, shuffle=True, random_state=0)
np.testing.assert_equal(
list(_split(kf, X, y, groups_1)), list(_split(kf, X, y, groups_1))
)
# Check that when the shuffle is True, multiple split calls often
# (not always) produce different splits when random_state is
# RandomState instance or None
kf = kfold(3, shuffle=True, random_state=np.random.RandomState(0))
for data in zip((X, X2), (y, y2), (groups_1, groups_2)):
# Test if the two splits are different cv
for (_, test_a), (_, test_b) in zip(_split(kf, *data), _split(kf, *data)):
# cv.split(...) returns an array of tuples, each tuple
# consisting of an array with train indices and test indices
# Ensure that the splits for data are not same
# when random state is not set
with pytest.raises(AssertionError):
np.testing.assert_array_equal(test_a, test_b)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
X_40 = np.ones(40)
y = [0] * 20 + [1] * 20
kf0 = StratifiedKFold(5, shuffle=True, random_state=0)
kf1 = StratifiedKFold(5, shuffle=True, random_state=1)
for (_, test0), (_, test1) in zip(kf0.split(X_40, y), kf1.split(X_40, y)):
assert set(test0) != set(test1)
check_cv_coverage(kf0, X_40, y, groups=None, expected_n_splits=5)
# Ensure that we shuffle each class's samples with different
# random_state in StratifiedKFold
# See https://github.com/scikit-learn/scikit-learn/pull/13124
X = np.arange(10)
y = [0] * 5 + [1] * 5
kf1 = StratifiedKFold(5, shuffle=True, random_state=0)
kf2 = StratifiedKFold(5, shuffle=True, random_state=1)
test_set1 = sorted([tuple(s[1]) for s in kf1.split(X, y)])
test_set2 = sorted([tuple(s[1]) for s in kf2.split(X, y)])
assert test_set1 != test_set2
def test_shuffle_groupkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
X = np.ones(40)
y = [0] * 20 + [1] * 20
groups = np.arange(40) // 3
gkf0 = GroupKFold(4, shuffle=True, random_state=0)
gkf1 = GroupKFold(4, shuffle=True, random_state=1)
# Check that the groups are shuffled differently
test_groups0 = [
set(groups[test_idx]) for _, test_idx in gkf0.split(X, None, groups)
]
test_groups1 = [
set(groups[test_idx]) for _, test_idx in gkf1.split(X, None, groups)
]
for g0, g1 in zip(test_groups0, test_groups1):
assert g0 != g1, "Test groups should differ with different random states"
# Check coverage and splits
check_cv_coverage(gkf0, X, y, groups, expected_n_splits=4)
check_cv_coverage(gkf1, X, y, groups, expected_n_splits=4)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact by computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.93) than that the non
# shuffling variant (around 0.81).
X, y = digits.data[:600], digits.target[:600]
model = SVC(C=10, gamma=0.005)
n_splits = 3
cv = KFold(n_splits=n_splits, shuffle=False)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert 0.92 > mean_score
assert mean_score > 0.80
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = KFold(n_splits, shuffle=True, random_state=0)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert mean_score > 0.92
cv = KFold(n_splits, shuffle=True, random_state=1)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert mean_score > 0.92
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = StratifiedKFold(n_splits)
mean_score = cross_val_score(model, X, y, cv=cv).mean()
assert 0.94 > mean_score
assert mean_score > 0.80
def test_stratified_group_kfold_trivial():
sgkf = StratifiedGroupKFold(n_splits=3)
# Trivial example - groups with the same distribution
y = np.array([1] * 6 + [0] * 12)
X = np.ones_like(y).reshape(-1, 1)
groups = np.asarray((1, 2, 3, 4, 5, 6, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6))
distr = np.bincount(y) / len(y)
test_sizes = []
for train, test in sgkf.split(X, y, groups):
# check group constraint
assert np.intersect1d(groups[train], groups[test]).size == 0
# check y distribution
assert_allclose(np.bincount(y[train]) / len(train), distr, atol=0.02)
assert_allclose(np.bincount(y[test]) / len(test), distr, atol=0.02)
test_sizes.append(len(test))
assert np.ptp(test_sizes) <= 1
def test_stratified_group_kfold_approximate():
# Not perfect stratification (even though it is possible) because of
# iteration over groups
sgkf = StratifiedGroupKFold(n_splits=3)
y = np.array([1] * 6 + [0] * 12)
X = np.ones_like(y).reshape(-1, 1)
groups = np.array([1, 2, 3, 3, 4, 4, 1, 1, 2, 2, 3, 4, 5, 5, 5, 6, 6, 6])
expected = np.asarray([[0.833, 0.166], [0.666, 0.333], [0.5, 0.5]])
test_sizes = []
for (train, test), expect_dist in zip(sgkf.split(X, y, groups), expected):
# check group constraint
assert np.intersect1d(groups[train], groups[test]).size == 0
split_dist = np.bincount(y[test]) / len(test)
assert_allclose(split_dist, expect_dist, atol=0.001)
test_sizes.append(len(test))
assert np.ptp(test_sizes) <= 1
@pytest.mark.parametrize(
"y, groups, expected",
[
(
np.array([0] * 6 + [1] * 6),
np.array([1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6]),
np.asarray([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]),
),
(
np.array([0] * 9 + [1] * 3),
np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 6]),
np.asarray([[0.75, 0.25], [0.75, 0.25], [0.75, 0.25]]),
),
],
)
def test_stratified_group_kfold_homogeneous_groups(y, groups, expected):
sgkf = StratifiedGroupKFold(n_splits=3)
X = np.ones_like(y).reshape(-1, 1)
for (train, test), expect_dist in zip(sgkf.split(X, y, groups), expected):
# check group constraint
assert np.intersect1d(groups[train], groups[test]).size == 0
split_dist = np.bincount(y[test]) / len(test)
assert_allclose(split_dist, expect_dist, atol=0.001)
def test_stratified_group_kfold_shuffle_preserves_stratification():
# Check StratifiedGroupKFold with shuffle=True preserves stratification:
# shuffling only affects tie-breaking among groups with identical
# standard deviation of class distribution (see #32478)
y = np.array([0] * 12 + [1] * 6)
X = np.ones((len(y), 1))
# Groups are arranged so perfect stratification across 3 folds is
# achievable
groups = np.array([1, 1, 3, 3, 3, 4, 5, 5, 5, 5, 7, 7, 2, 2, 6, 6, 8, 8])
expected_class_ratios = np.asarray([2.0 / 3, 1.0 / 3])
# Run multiple seeds to ensure the property holds regardless of the
# tie-breaking order among groups with identical std of class distribution
n_iters = 100
for seed in range(n_iters):
sgkf = StratifiedGroupKFold(n_splits=3, shuffle=True, random_state=seed)
test_sizes = []
for train, test in sgkf.split(X, y, groups):
# check group constraint
assert np.intersect1d(groups[train], groups[test]).size == 0
# check y distribution
assert_allclose(
np.bincount(y[train]) / len(train), expected_class_ratios, atol=1e-8
)
assert_allclose(
np.bincount(y[test]) / len(test), expected_class_ratios, atol=1e-8
)
test_sizes.append(len(test))
assert np.ptp(test_sizes) <= 1
@pytest.mark.parametrize("cls_distr", [(0.4, 0.6), (0.3, 0.7), (0.2, 0.8), (0.8, 0.2)])
@pytest.mark.parametrize("n_groups", [5, 30, 70])
def test_stratified_group_kfold_against_group_kfold(cls_distr, n_groups):
# Check that given sufficient amount of samples StratifiedGroupKFold
# produces better stratified folds than regular GroupKFold
n_splits = 5
sgkf = StratifiedGroupKFold(n_splits=n_splits)
gkf = GroupKFold(n_splits=n_splits)
rng = np.random.RandomState(0)
n_points = 1000
y = rng.choice(2, size=n_points, p=cls_distr)
X = np.ones_like(y).reshape(-1, 1)
g = rng.choice(n_groups, n_points)
sgkf_folds = sgkf.split(X, y, groups=g)
gkf_folds = gkf.split(X, y, groups=g)
sgkf_entr = 0
gkf_entr = 0
for (sgkf_train, sgkf_test), (_, gkf_test) in zip(sgkf_folds, gkf_folds):
# check group constraint
assert np.intersect1d(g[sgkf_train], g[sgkf_test]).size == 0
sgkf_distr = np.bincount(y[sgkf_test]) / len(sgkf_test)
gkf_distr = np.bincount(y[gkf_test]) / len(gkf_test)
sgkf_entr += stats.entropy(sgkf_distr, qk=cls_distr)
gkf_entr += stats.entropy(gkf_distr, qk=cls_distr)
sgkf_entr /= n_splits
gkf_entr /= n_splits
assert sgkf_entr <= gkf_entr
def test_shuffle_split():
ss1 = ShuffleSplit(test_size=0.2, random_state=0).split(X)
ss2 = ShuffleSplit(test_size=2, random_state=0).split(X)
ss3 = ShuffleSplit(test_size=np.int32(2), random_state=0).split(X)
ss4 = ShuffleSplit(test_size=2, random_state=0).split(X)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
@pytest.mark.parametrize("split_class", [ShuffleSplit, StratifiedShuffleSplit])
@pytest.mark.parametrize(
"train_size, exp_train, exp_test", [(None, 9, 1), (8, 8, 2), (0.8, 8, 2)]
)
def test_shuffle_split_default_test_size(split_class, train_size, exp_train, exp_test):
# Check that the default value has the expected behavior, i.e. 0.1 if both
# unspecified or complement train_size unless both are specified.
X = np.ones(10)
y = np.ones(10)
X_train, X_test = next(split_class(train_size=train_size).split(X, y))
assert len(X_train) == exp_train
assert len(X_test) == exp_test
@pytest.mark.parametrize(
"train_size, exp_train, exp_test", [(None, 8, 2), (7, 7, 3), (0.7, 7, 3)]
)
def test_group_shuffle_split_default_test_size(train_size, exp_train, exp_test):
# Check that the default value has the expected behavior, i.e. 0.2 if both
# unspecified or complement train_size unless both are specified.
X = np.ones(10)
y = np.ones(10)
groups = range(10)
X_train, X_test = next(GroupShuffleSplit(train_size=train_size).split(X, y, groups))
assert len(X_train) == exp_train
assert len(X_test) == exp_test
def test_stratified_shuffle_split_init():
X = np.arange(7)
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
with pytest.raises(ValueError):
next(StratifiedShuffleSplit(3, test_size=0.2).split(X, y))
# Check that error is raised if the test set size is smaller than n_classes
with pytest.raises(ValueError):
next(StratifiedShuffleSplit(3, test_size=2).split(X, y))
# Check that error is raised if the train set size is smaller than
# n_classes
with pytest.raises(ValueError):
next(StratifiedShuffleSplit(3, test_size=3, train_size=2).split(X, y))
X = np.arange(9)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Train size or test size too small
with pytest.raises(ValueError):
next(StratifiedShuffleSplit(train_size=2).split(X, y))
with pytest.raises(ValueError):
next(StratifiedShuffleSplit(test_size=2).split(X, y))
def test_stratified_shuffle_split_respects_test_size():
y = np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2])
test_size = 5
train_size = 10
sss = StratifiedShuffleSplit(
6, test_size=test_size, train_size=train_size, random_state=0
).split(np.ones(len(y)), y)
for train, test in sss:
assert len(train) == train_size
assert len(test) == test_size
def test_stratified_shuffle_split_iter():
ys = [
np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50),
np.concatenate([[i] * (100 + i) for i in range(11)]),
[1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3],
["1", "1", "1", "1", "2", "2", "2", "3", "3", "3", "3", "3"],
]
for y in ys:
sss = StratifiedShuffleSplit(6, test_size=0.33, random_state=0).split(
np.ones(len(y)), y
)
y = np.asanyarray(y) # To make it indexable for y[train]
# this is how test-size is computed internally
# in _validate_shuffle_split
test_size = np.ceil(0.33 * len(y))
train_size = len(y) - test_size
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = np.bincount(np.unique(y[train], return_inverse=True)[1]) / float(
len(y[train])
)
p_test = np.bincount(np.unique(y[test], return_inverse=True)[1]) / float(
len(y[test])
)
assert_array_almost_equal(p_train, p_test, 1)
assert len(train) + len(test) == y.size
assert len(train) == train_size
assert len(test) == test_size
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_splits = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
prob = bf.pmf(count)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/model_selection/tests/common.py | sklearn/model_selection/tests/common.py | """
Common utilities for testing model selection.
"""
import numpy as np
from sklearn.model_selection import KFold
class OneTimeSplitter:
"""A wrapper to make KFold single entry cv iterator"""
def __init__(self, n_splits=4, n_samples=99):
self.n_splits = n_splits
self.n_samples = n_samples
self.indices = iter(KFold(n_splits=n_splits).split(np.ones(n_samples)))
def split(self, X=None, y=None, groups=None):
"""Split can be called only once"""
for index in self.indices:
yield index
def get_n_splits(self, X=None, y=None, groups=None):
return self.n_splits
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/model_selection/tests/test_successive_halving.py | sklearn/model_selection/tests/test_successive_halving.py | from math import ceil
import numpy as np
import pytest
from scipy.stats import expon, norm, randint
from sklearn.datasets import make_classification
from sklearn.dummy import DummyClassifier
from sklearn.experimental import enable_halving_search_cv # noqa: F401
from sklearn.model_selection import (
GroupKFold,
GroupShuffleSplit,
HalvingGridSearchCV,
HalvingRandomSearchCV,
KFold,
LeaveOneGroupOut,
LeavePGroupsOut,
ShuffleSplit,
StratifiedKFold,
StratifiedShuffleSplit,
)
from sklearn.model_selection._search_successive_halving import (
_SubsampleMetaSplitter,
_top_k,
)
from sklearn.model_selection.tests.test_search import (
check_cv_results_array_types,
check_cv_results_keys,
)
from sklearn.svm import SVC, LinearSVC
class FastClassifier(DummyClassifier):
"""Dummy classifier that accepts parameters a, b, ... z.
These parameter don't affect the predictions and are useful for fast
grid searching."""
# update the constraints such that we accept all parameters from a to z
_parameter_constraints: dict = {
**DummyClassifier._parameter_constraints,
**{chr(key): "no_validation" for key in range(ord("a"), ord("z") + 1)},
}
def __init__(
self, strategy="stratified", random_state=None, constant=None, **kwargs
):
super().__init__(
strategy=strategy, random_state=random_state, constant=constant
)
def get_params(self, deep=False):
params = super().get_params(deep=deep)
for char in range(ord("a"), ord("z") + 1):
params[chr(char)] = "whatever"
return params
class SometimesFailClassifier(DummyClassifier):
def __init__(
self,
strategy="stratified",
random_state=None,
constant=None,
n_estimators=10,
fail_fit=False,
fail_predict=False,
a=0,
):
self.fail_fit = fail_fit
self.fail_predict = fail_predict
self.n_estimators = n_estimators
self.a = a
super().__init__(
strategy=strategy, random_state=random_state, constant=constant
)
def fit(self, X, y):
if self.fail_fit:
raise Exception("fitting failed")
return super().fit(X, y)
def predict(self, X):
if self.fail_predict:
raise Exception("predict failed")
return super().predict(X)
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.FitFailedWarning")
@pytest.mark.filterwarnings("ignore:Scoring failed:UserWarning")
@pytest.mark.filterwarnings("ignore:One or more of the:UserWarning")
@pytest.mark.parametrize("HalvingSearch", (HalvingGridSearchCV, HalvingRandomSearchCV))
@pytest.mark.parametrize("fail_at", ("fit", "predict"))
def test_nan_handling(HalvingSearch, fail_at):
"""Check the selection of the best scores in presence of failure represented by
NaN values."""
n_samples = 1_000
X, y = make_classification(n_samples=n_samples, random_state=0)
search = HalvingSearch(
SometimesFailClassifier(),
{f"fail_{fail_at}": [False, True], "a": range(3)},
resource="n_estimators",
max_resources=6,
min_resources=1,
factor=2,
)
search.fit(X, y)
# estimators that failed during fit/predict should always rank lower
# than ones where the fit/predict succeeded
assert not search.best_params_[f"fail_{fail_at}"]
scores = search.cv_results_["mean_test_score"]
ranks = search.cv_results_["rank_test_score"]
# some scores should be NaN
assert np.isnan(scores).any()
unique_nan_ranks = np.unique(ranks[np.isnan(scores)])
# all NaN scores should have the same rank
assert unique_nan_ranks.shape[0] == 1
# NaNs should have the lowest rank
assert (unique_nan_ranks[0] >= ranks).all()
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
@pytest.mark.parametrize(
(
"aggressive_elimination,"
"max_resources,"
"expected_n_iterations,"
"expected_n_required_iterations,"
"expected_n_possible_iterations,"
"expected_n_remaining_candidates,"
"expected_n_candidates,"
"expected_n_resources,"
),
[
# notice how it loops at the beginning
# also, the number of candidates evaluated at the last iteration is
# <= factor
(True, "limited", 4, 4, 3, 1, [60, 20, 7, 3], [20, 20, 60, 180]),
# no aggressive elimination: we end up with less iterations, and
# the number of candidates at the last iter is > factor, which isn't
# ideal
(False, "limited", 3, 4, 3, 3, [60, 20, 7], [20, 60, 180]),
# # When the amount of resource isn't limited, aggressive_elimination
# # has no effect. Here the default min_resources='exhaust' will take
# # over.
(True, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]),
(False, "unlimited", 4, 4, 4, 1, [60, 20, 7, 3], [37, 111, 333, 999]),
],
)
def test_aggressive_elimination(
Est,
aggressive_elimination,
max_resources,
expected_n_iterations,
expected_n_required_iterations,
expected_n_possible_iterations,
expected_n_remaining_candidates,
expected_n_candidates,
expected_n_resources,
):
# Test the aggressive_elimination parameter.
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": ("l1", "l2"), "b": list(range(30))}
base_estimator = FastClassifier()
if max_resources == "limited":
max_resources = 180
else:
max_resources = n_samples
sh = Est(
base_estimator,
param_grid,
aggressive_elimination=aggressive_elimination,
max_resources=max_resources,
factor=3,
)
sh.set_params(verbose=True) # just for test coverage
if Est is HalvingRandomSearchCV:
# same number of candidates as with the grid
sh.set_params(n_candidates=2 * 30, min_resources="exhaust")
sh.fit(X, y)
assert sh.n_iterations_ == expected_n_iterations
assert sh.n_required_iterations_ == expected_n_required_iterations
assert sh.n_possible_iterations_ == expected_n_possible_iterations
assert sh.n_resources_ == expected_n_resources
assert sh.n_candidates_ == expected_n_candidates
assert sh.n_remaining_candidates_ == expected_n_remaining_candidates
assert ceil(sh.n_candidates_[-1] / sh.factor) == sh.n_remaining_candidates_
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
@pytest.mark.parametrize(
(
"min_resources,"
"max_resources,"
"expected_n_iterations,"
"expected_n_possible_iterations,"
"expected_n_resources,"
),
[
# with enough resources
("smallest", "auto", 2, 4, [20, 60]),
# with enough resources but min_resources set manually
(50, "auto", 2, 3, [50, 150]),
# without enough resources, only one iteration can be done
("smallest", 30, 1, 1, [20]),
# with exhaust: use as much resources as possible at the last iter
("exhaust", "auto", 2, 2, [333, 999]),
("exhaust", 1000, 2, 2, [333, 999]),
("exhaust", 999, 2, 2, [333, 999]),
("exhaust", 600, 2, 2, [200, 600]),
("exhaust", 599, 2, 2, [199, 597]),
("exhaust", 300, 2, 2, [100, 300]),
("exhaust", 60, 2, 2, [20, 60]),
("exhaust", 50, 1, 1, [20]),
("exhaust", 20, 1, 1, [20]),
],
)
def test_min_max_resources(
Est,
min_resources,
max_resources,
expected_n_iterations,
expected_n_possible_iterations,
expected_n_resources,
):
# Test the min_resources and max_resources parameters, and how they affect
# the number of resources used at each iteration
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": [1, 2], "b": [1, 2, 3]}
base_estimator = FastClassifier()
sh = Est(
base_estimator,
param_grid,
factor=3,
min_resources=min_resources,
max_resources=max_resources,
)
if Est is HalvingRandomSearchCV:
sh.set_params(n_candidates=6) # same number as with the grid
sh.fit(X, y)
expected_n_required_iterations = 2 # given 6 combinations and factor = 3
assert sh.n_iterations_ == expected_n_iterations
assert sh.n_required_iterations_ == expected_n_required_iterations
assert sh.n_possible_iterations_ == expected_n_possible_iterations
assert sh.n_resources_ == expected_n_resources
if min_resources == "exhaust":
assert sh.n_possible_iterations_ == sh.n_iterations_ == len(sh.n_resources_)
@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV))
@pytest.mark.parametrize(
"max_resources, n_iterations, n_possible_iterations",
[
("auto", 5, 9), # all resources are used
(1024, 5, 9),
(700, 5, 8),
(512, 5, 8),
(511, 5, 7),
(32, 4, 4),
(31, 3, 3),
(16, 3, 3),
(4, 1, 1), # max_resources == min_resources, only one iteration is
# possible
],
)
def test_n_iterations(Est, max_resources, n_iterations, n_possible_iterations):
# test the number of actual iterations that were run depending on
# max_resources
n_samples = 1024
X, y = make_classification(n_samples=n_samples, random_state=1)
param_grid = {"a": [1, 2], "b": list(range(10))}
base_estimator = FastClassifier()
factor = 2
sh = Est(
base_estimator,
param_grid,
cv=2,
factor=factor,
max_resources=max_resources,
min_resources=4,
)
if Est is HalvingRandomSearchCV:
sh.set_params(n_candidates=20) # same as for HalvingGridSearchCV
sh.fit(X, y)
assert sh.n_required_iterations_ == 5
assert sh.n_iterations_ == n_iterations
assert sh.n_possible_iterations_ == n_possible_iterations
@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV))
def test_resource_parameter(Est):
# Test the resource parameter
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": [1, 2], "b": list(range(10))}
base_estimator = FastClassifier()
sh = Est(base_estimator, param_grid, cv=2, resource="c", max_resources=10, factor=3)
sh.fit(X, y)
assert set(sh.n_resources_) == set([1, 3, 9])
for r_i, params, param_c in zip(
sh.cv_results_["n_resources"],
sh.cv_results_["params"],
sh.cv_results_["param_c"],
):
assert r_i == params["c"] == param_c
with pytest.raises(
ValueError, match="Cannot use resource=1234 which is not supported "
):
sh = HalvingGridSearchCV(
base_estimator, param_grid, cv=2, resource="1234", max_resources=10
)
sh.fit(X, y)
with pytest.raises(
ValueError,
match=(
"Cannot use parameter c as the resource since it is part "
"of the searched parameters."
),
):
param_grid = {"a": [1, 2], "b": [1, 2], "c": [1, 3]}
sh = HalvingGridSearchCV(
base_estimator, param_grid, cv=2, resource="c", max_resources=10
)
sh.fit(X, y)
@pytest.mark.parametrize(
"max_resources, n_candidates, expected_n_candidates",
[
(512, "exhaust", 128), # generate exactly as much as needed
(32, "exhaust", 8),
(32, 8, 8),
(32, 7, 7), # ask for less than what we could
(32, 9, 9), # ask for more than 'reasonable'
],
)
def test_random_search(max_resources, n_candidates, expected_n_candidates):
# Test random search and make sure the number of generated candidates is
# as expected
n_samples = 1024
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": norm, "b": norm}
base_estimator = FastClassifier()
sh = HalvingRandomSearchCV(
base_estimator,
param_grid,
n_candidates=n_candidates,
cv=2,
max_resources=max_resources,
factor=2,
min_resources=4,
)
sh.fit(X, y)
assert sh.n_candidates_[0] == expected_n_candidates
if n_candidates == "exhaust":
# Make sure 'exhaust' makes the last iteration use as much resources as
# we can
assert sh.n_resources_[-1] == max_resources
@pytest.mark.parametrize(
"param_distributions, expected_n_candidates",
[
({"a": [1, 2]}, 2), # all lists, sample less than n_candidates
({"a": randint(1, 3)}, 10), # not all list, respect n_candidates
],
)
def test_random_search_discrete_distributions(
param_distributions, expected_n_candidates
):
# Make sure random search samples the appropriate number of candidates when
# we ask for more than what's possible. How many parameters are sampled
# depends whether the distributions are 'all lists' or not (see
# ParameterSampler for details). This is somewhat redundant with the checks
# in ParameterSampler but interaction bugs were discovered during
# development of SH
n_samples = 1024
X, y = make_classification(n_samples=n_samples, random_state=0)
base_estimator = FastClassifier()
sh = HalvingRandomSearchCV(base_estimator, param_distributions, n_candidates=10)
sh.fit(X, y)
assert sh.n_candidates_[0] == expected_n_candidates
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
@pytest.mark.parametrize(
"params, expected_error_message",
[
(
{"resource": "not_a_parameter"},
"Cannot use resource=not_a_parameter which is not supported",
),
(
{"resource": "a", "max_resources": 100},
"Cannot use parameter a as the resource since it is part of",
),
(
{"max_resources": "auto", "resource": "b"},
"resource can only be 'n_samples' when max_resources='auto'",
),
(
{"min_resources": 15, "max_resources": 14},
"min_resources_=15 is greater than max_resources_=14",
),
({"cv": KFold(shuffle=True)}, "must yield consistent folds"),
({"cv": ShuffleSplit()}, "must yield consistent folds"),
],
)
def test_input_errors(Est, params, expected_error_message):
base_estimator = FastClassifier()
param_grid = {"a": [1]}
X, y = make_classification(100)
sh = Est(base_estimator, param_grid, **params)
with pytest.raises(ValueError, match=expected_error_message):
sh.fit(X, y)
@pytest.mark.parametrize(
"params, expected_error_message",
[
(
{"n_candidates": "exhaust", "min_resources": "exhaust"},
"cannot be both set to 'exhaust'",
),
],
)
def test_input_errors_randomized(params, expected_error_message):
# tests specific to HalvingRandomSearchCV
base_estimator = FastClassifier()
param_grid = {"a": [1]}
X, y = make_classification(100)
sh = HalvingRandomSearchCV(base_estimator, param_grid, **params)
with pytest.raises(ValueError, match=expected_error_message):
sh.fit(X, y)
@pytest.mark.parametrize(
"fraction, subsample_test, expected_train_size, expected_test_size",
[
(0.5, True, 40, 10),
(0.5, False, 40, 20),
(0.2, True, 16, 4),
(0.2, False, 16, 20),
],
)
def test_subsample_splitter_shapes(
fraction, subsample_test, expected_train_size, expected_test_size
):
# Make sure splits returned by SubsampleMetaSplitter are of appropriate
# size
n_samples = 100
X, y = make_classification(n_samples)
cv = _SubsampleMetaSplitter(
base_cv=KFold(5),
fraction=fraction,
subsample_test=subsample_test,
random_state=None,
)
for train, test in cv.split(X, y):
assert train.shape[0] == expected_train_size
assert test.shape[0] == expected_test_size
if subsample_test:
assert train.shape[0] + test.shape[0] == int(n_samples * fraction)
else:
assert test.shape[0] == n_samples // cv.base_cv.get_n_splits()
@pytest.mark.parametrize("subsample_test", (True, False))
def test_subsample_splitter_determinism(subsample_test):
# Make sure _SubsampleMetaSplitter is consistent across calls to split():
# - we're OK having training sets differ (they're always sampled with a
# different fraction anyway)
# - when we don't subsample the test set, we want it to be always the same.
# This check is the most important. This is ensured by the determinism
# of the base_cv.
# Note: we could force both train and test splits to be always the same if
# we drew an int seed in _SubsampleMetaSplitter.__init__
n_samples = 100
X, y = make_classification(n_samples)
cv = _SubsampleMetaSplitter(
base_cv=KFold(5), fraction=0.5, subsample_test=subsample_test, random_state=None
)
folds_a = list(cv.split(X, y, groups=None))
folds_b = list(cv.split(X, y, groups=None))
for (train_a, test_a), (train_b, test_b) in zip(folds_a, folds_b):
assert not np.all(train_a == train_b)
if subsample_test:
assert not np.all(test_a == test_b)
else:
assert np.all(test_a == test_b)
assert np.all(X[test_a] == X[test_b])
@pytest.mark.parametrize(
"k, itr, expected",
[
(1, 0, ["c"]),
(2, 0, ["a", "c"]),
(4, 0, ["d", "b", "a", "c"]),
(10, 0, ["d", "b", "a", "c"]),
(1, 1, ["e"]),
(2, 1, ["f", "e"]),
(10, 1, ["f", "e"]),
(1, 2, ["i"]),
(10, 2, ["g", "h", "i"]),
],
)
def test_top_k(k, itr, expected):
results = { # this isn't a 'real world' result dict
"iter": [0, 0, 0, 0, 1, 1, 2, 2, 2],
"mean_test_score": [4, 3, 5, 1, 11, 10, 5, 6, 9],
"params": ["a", "b", "c", "d", "e", "f", "g", "h", "i"],
}
got = _top_k(results, k=k, itr=itr)
assert np.all(got == expected)
@pytest.mark.parametrize("Est", (HalvingRandomSearchCV, HalvingGridSearchCV))
def test_cv_results(Est):
# test that the cv_results_ matches correctly the logic of the
# tournament: in particular that the candidates continued in each
# successive iteration are those that were best in the previous iteration
pd = pytest.importorskip("pandas")
rng = np.random.RandomState(0)
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": ("l1", "l2"), "b": list(range(30))}
base_estimator = FastClassifier()
# generate random scores: we want to avoid ties, which would otherwise
# mess with the ordering and make testing harder
def scorer(est, X, y):
return rng.rand()
sh = Est(base_estimator, param_grid, factor=2, scoring=scorer)
if Est is HalvingRandomSearchCV:
# same number of candidates as with the grid
sh.set_params(n_candidates=2 * 30, min_resources="exhaust")
sh.fit(X, y)
# non-regression check for
# https://github.com/scikit-learn/scikit-learn/issues/19203
assert isinstance(sh.cv_results_["iter"], np.ndarray)
assert isinstance(sh.cv_results_["n_resources"], np.ndarray)
cv_results_df = pd.DataFrame(sh.cv_results_)
# just make sure we don't have ties
assert len(cv_results_df["mean_test_score"].unique()) == len(cv_results_df)
cv_results_df["params_str"] = cv_results_df["params"].apply(str)
table = cv_results_df.pivot(
index="params_str", columns="iter", values="mean_test_score"
)
# table looks like something like this:
# iter 0 1 2 3 4 5
# params_str
# {'a': 'l2', 'b': 23} 0.75 NaN NaN NaN NaN NaN
# {'a': 'l1', 'b': 30} 0.90 0.875 NaN NaN NaN NaN
# {'a': 'l1', 'b': 0} 0.75 NaN NaN NaN NaN NaN
# {'a': 'l2', 'b': 3} 0.85 0.925 0.9125 0.90625 NaN NaN
# {'a': 'l1', 'b': 5} 0.80 NaN NaN NaN NaN NaN
# ...
# where a NaN indicates that the candidate wasn't evaluated at a given
# iteration, because it wasn't part of the top-K at some previous
# iteration. We here make sure that candidates that aren't in the top-k at
# any given iteration are indeed not evaluated at the subsequent
# iterations.
nan_mask = pd.isna(table)
n_iter = sh.n_iterations_
for it in range(n_iter - 1):
already_discarded_mask = nan_mask[it]
# make sure that if a candidate is already discarded, we don't evaluate
# it later
assert (
already_discarded_mask & nan_mask[it + 1] == already_discarded_mask
).all()
# make sure that the number of discarded candidate is correct
discarded_now_mask = ~already_discarded_mask & nan_mask[it + 1]
kept_mask = ~already_discarded_mask & ~discarded_now_mask
assert kept_mask.sum() == sh.n_candidates_[it + 1]
# make sure that all discarded candidates have a lower score than the
# kept candidates
discarded_max_score = table[it].where(discarded_now_mask).max()
kept_min_score = table[it].where(kept_mask).min()
assert discarded_max_score < kept_min_score
# We now make sure that the best candidate is chosen only from the last
# iteration.
# We also make sure this is true even if there were higher scores in
# earlier rounds (this isn't generally the case, but worth ensuring it's
# possible).
last_iter = cv_results_df["iter"].max()
idx_best_last_iter = cv_results_df[cv_results_df["iter"] == last_iter][
"mean_test_score"
].idxmax()
idx_best_all_iters = cv_results_df["mean_test_score"].idxmax()
assert sh.best_params_ == cv_results_df.iloc[idx_best_last_iter]["params"]
assert (
cv_results_df.iloc[idx_best_last_iter]["mean_test_score"]
< cv_results_df.iloc[idx_best_all_iters]["mean_test_score"]
)
assert (
cv_results_df.iloc[idx_best_last_iter]["params"]
!= cv_results_df.iloc[idx_best_all_iters]["params"]
)
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
def test_base_estimator_inputs(Est):
# make sure that the base estimators are passed the correct parameters and
# number of samples at each iteration.
pd = pytest.importorskip("pandas")
passed_n_samples_fit = []
passed_n_samples_predict = []
passed_params = []
class FastClassifierBookKeeping(FastClassifier):
def fit(self, X, y):
passed_n_samples_fit.append(X.shape[0])
return super().fit(X, y)
def predict(self, X):
passed_n_samples_predict.append(X.shape[0])
return super().predict(X)
def set_params(self, **params):
passed_params.append(params)
return super().set_params(**params)
n_samples = 1024
n_splits = 2
X, y = make_classification(n_samples=n_samples, random_state=0)
param_grid = {"a": ("l1", "l2"), "b": list(range(30))}
base_estimator = FastClassifierBookKeeping()
sh = Est(
base_estimator,
param_grid,
factor=2,
cv=n_splits,
return_train_score=False,
refit=False,
)
if Est is HalvingRandomSearchCV:
# same number of candidates as with the grid
sh.set_params(n_candidates=2 * 30, min_resources="exhaust")
sh.fit(X, y)
assert len(passed_n_samples_fit) == len(passed_n_samples_predict)
passed_n_samples = [
x + y for (x, y) in zip(passed_n_samples_fit, passed_n_samples_predict)
]
# Lists are of length n_splits * n_iter * n_candidates_at_i.
# Each chunk of size n_splits corresponds to the n_splits folds for the
# same candidate at the same iteration, so they contain equal values. We
# subsample such that the lists are of length n_iter * n_candidates_at_it
passed_n_samples = passed_n_samples[::n_splits]
passed_params = passed_params[::n_splits]
cv_results_df = pd.DataFrame(sh.cv_results_)
assert len(passed_params) == len(passed_n_samples) == len(cv_results_df)
uniques, counts = np.unique(passed_n_samples, return_counts=True)
assert (sh.n_resources_ == uniques).all()
assert (sh.n_candidates_ == counts).all()
assert (cv_results_df["params"] == passed_params).all()
assert (cv_results_df["n_resources"] == passed_n_samples).all()
@pytest.mark.parametrize("Est", (HalvingGridSearchCV, HalvingRandomSearchCV))
def test_groups_support(Est):
# Check if ValueError (when groups is None) propagates to
# HalvingGridSearchCV and HalvingRandomSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=50, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 50)
clf = LinearSVC(random_state=0)
grid = {"C": [1]}
group_cvs = [
LeaveOneGroupOut(),
LeavePGroupsOut(2),
GroupKFold(n_splits=3),
GroupShuffleSplit(random_state=0),
]
error_msg = "The 'groups' parameter should not be None."
for cv in group_cvs:
gs = Est(clf, grid, cv=cv, random_state=0)
with pytest.raises(ValueError, match=error_msg):
gs.fit(X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit(random_state=0)]
for cv in non_group_cvs:
gs = Est(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
@pytest.mark.parametrize("SearchCV", [HalvingRandomSearchCV, HalvingGridSearchCV])
def test_min_resources_null(SearchCV):
"""Check that we raise an error if the minimum resources is set to 0."""
base_estimator = FastClassifier()
param_grid = {"a": [1]}
X = np.empty(0).reshape(0, 3)
search = SearchCV(base_estimator, param_grid, min_resources="smallest")
err_msg = "min_resources_=0: you might have passed an empty dataset X."
with pytest.raises(ValueError, match=err_msg):
search.fit(X, [])
@pytest.mark.parametrize("SearchCV", [HalvingGridSearchCV, HalvingRandomSearchCV])
def test_select_best_index(SearchCV):
"""Check the selection strategy of the halving search."""
results = { # this isn't a 'real world' result dict
"iter": np.array([0, 0, 0, 0, 1, 1, 2, 2, 2]),
"mean_test_score": np.array([4, 3, 5, 1, 11, 10, 5, 6, 9]),
"params": np.array(["a", "b", "c", "d", "e", "f", "g", "h", "i"]),
}
# we expect the index of 'i'
best_index = SearchCV._select_best_index(None, None, results)
assert best_index == 8
def test_halving_random_search_list_of_dicts():
"""Check the behaviour of the `HalvingRandomSearchCV` with `param_distribution`
being a list of dictionary.
"""
X, y = make_classification(n_samples=150, n_features=4, random_state=42)
params = [
{"kernel": ["rbf"], "C": expon(scale=10), "gamma": expon(scale=0.1)},
{"kernel": ["poly"], "degree": [2, 3]},
]
param_keys = (
"param_C",
"param_degree",
"param_gamma",
"param_kernel",
)
score_keys = (
"mean_test_score",
"mean_train_score",
"rank_test_score",
"split0_test_score",
"split1_test_score",
"split2_test_score",
"split0_train_score",
"split1_train_score",
"split2_train_score",
"std_test_score",
"std_train_score",
"mean_fit_time",
"std_fit_time",
"mean_score_time",
"std_score_time",
)
extra_keys = ("n_resources", "iter")
search = HalvingRandomSearchCV(
SVC(), cv=3, param_distributions=params, return_train_score=True, random_state=0
)
search.fit(X, y)
n_candidates = sum(search.n_candidates_)
cv_results = search.cv_results_
# Check results structure
check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates, extra_keys)
expected_cv_results_kinds = {
"param_C": "f",
"param_degree": "i",
"param_gamma": "f",
"param_kernel": "O",
}
check_cv_results_array_types(
search, param_keys, score_keys, expected_cv_results_kinds
)
assert all(
(
cv_results["param_C"].mask[i]
and cv_results["param_gamma"].mask[i]
and not cv_results["param_degree"].mask[i]
)
for i in range(n_candidates)
if cv_results["param_kernel"][i] == "poly"
)
assert all(
(
not cv_results["param_C"].mask[i]
and not cv_results["param_gamma"].mask[i]
and cv_results["param_degree"].mask[i]
)
for i in range(n_candidates)
if cv_results["param_kernel"][i] == "rbf"
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/model_selection/tests/__init__.py | sklearn/model_selection/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/model_selection/tests/test_validation.py | sklearn/model_selection/tests/test_validation.py | """Test the validation module"""
import os
import re
import tempfile
import warnings
from functools import partial
from time import sleep
import numpy as np
import pytest
from scipy.sparse import issparse
from sklearn import config_context
from sklearn.base import BaseEstimator, ClassifierMixin, clone, is_classifier
from sklearn.cluster import KMeans
from sklearn.datasets import (
load_diabetes,
load_digits,
load_iris,
make_classification,
make_multilabel_classification,
make_regression,
)
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier
from sklearn.exceptions import FitFailedWarning, UnsetMetadataPassedError
from sklearn.impute import SimpleImputer
from sklearn.linear_model import (
LogisticRegression,
Ridge,
RidgeClassifier,
SGDClassifier,
)
from sklearn.metrics import (
accuracy_score,
check_scoring,
confusion_matrix,
explained_variance_score,
make_scorer,
mean_squared_error,
precision_recall_fscore_support,
precision_score,
r2_score,
)
from sklearn.metrics._scorer import _MultimetricScorer
from sklearn.model_selection import (
GridSearchCV,
GroupKFold,
GroupShuffleSplit,
KFold,
LeaveOneGroupOut,
LeaveOneOut,
LeavePGroupsOut,
ShuffleSplit,
StratifiedKFold,
cross_val_predict,
cross_val_score,
cross_validate,
learning_curve,
permutation_test_score,
validation_curve,
)
from sklearn.model_selection._validation import (
_check_is_permutation,
_fit_and_score,
_score,
)
from sklearn.model_selection.tests.common import OneTimeSplitter
from sklearn.model_selection.tests.test_search import FailingClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder, scale
from sklearn.svm import SVC, LinearSVC
from sklearn.tests.metadata_routing_common import (
ConsumingClassifier,
ConsumingScorer,
ConsumingSplitter,
_Registry,
check_recorded_metadata,
)
from sklearn.utils import shuffle
from sklearn.utils._array_api import (
_atol_for_type,
_convert_to_numpy,
_get_namespace_device_dtype_ids,
yield_namespace_device_dtype_combinations,
)
from sklearn.utils._mocking import CheckingClassifier, MockDataFrame
from sklearn.utils._testing import (
_array_api_for_tests,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
)
from sklearn.utils.fixes import COO_CONTAINERS, CSR_CONTAINERS
from sklearn.utils.validation import _num_samples
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2.0 - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes, expected_fit_params=None):
super().__init__(n_max_train_sizes)
self.x = None
self.expected_fit_params = expected_fit_params
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
if self.expected_fit_params:
missing = set(self.expected_fit_params) - set(params)
if missing:
raise AssertionError(
f"Expected fit parameter(s) {list(missing)} not seen."
)
for key, value in params.items():
if key in self.expected_fit_params and _num_samples(
value
) != _num_samples(X):
raise AssertionError(
f"Fit parameter {key} has length {_num_samples(value)}"
f"; expected {_num_samples(X)}."
)
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
class MockEstimatorWithSingleFitCallAllowed(MockEstimatorWithParameter):
"""Dummy classifier that disallows repeated calls of fit method"""
def fit(self, X_subset, y_subset):
assert not hasattr(self, "fit_called_"), "fit is called the second time"
self.fit_called_ = True
return super().fit(X_subset, y_subset)
def predict(self, X):
raise NotImplementedError
class MockClassifier(ClassifierMixin, BaseEstimator):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(
self,
X,
Y=None,
sample_weight=None,
class_prior=None,
sparse_sample_weight=None,
sparse_param=None,
dummy_int=None,
dummy_str=None,
dummy_obj=None,
callback=None,
):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError("X cannot be d")
if sample_weight is not None:
assert sample_weight.shape[0] == X.shape[0], (
"MockClassifier extra fit_param "
"sample_weight.shape[0] is {0}, should be {1}".format(
sample_weight.shape[0], X.shape[0]
)
)
if class_prior is not None:
assert class_prior.shape[0] == len(np.unique(y)), (
"MockClassifier extra fit_param class_prior.shape[0]"
" is {0}, should be {1}".format(class_prior.shape[0], len(np.unique(y)))
)
if sparse_sample_weight is not None:
fmt = (
"MockClassifier extra fit_param sparse_sample_weight"
".shape[0] is {0}, should be {1}"
)
assert sparse_sample_weight.shape[0] == X.shape[0], fmt.format(
sparse_sample_weight.shape[0], X.shape[0]
)
if sparse_param is not None:
fmt = (
"MockClassifier extra fit_param sparse_param.shape "
"is ({0}, {1}), should be ({2}, {3})"
)
assert sparse_param.shape == P.shape, fmt.format(
sparse_param.shape[0],
sparse_param.shape[1],
P.shape[0],
P.shape[1],
)
self.classes_ = np.unique(y)
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def predict_proba(self, T):
return T
def score(self, X=None, Y=None):
return 1.0 / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {"a": self.a, "allow_nd": self.allow_nd}
# XXX: use 2D array, since 1D X is being detected as a single sample in
# check_consistent_length
X = np.ones((15, 2))
y = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6])
# The number of samples per class needs to be > n_splits,
# for StratifiedKFold(n_splits=3)
y2 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
P = np.eye(5)
@pytest.mark.parametrize("coo_container", COO_CONTAINERS)
def test_cross_val_score(coo_container):
clf = MockClassifier()
X_sparse = coo_container(X)
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cross_val_score(clf, X, y2)
assert_array_equal(scores, clf.score(X, y2))
# test with multioutput y
multioutput_y = np.column_stack([y2, y2[::-1]])
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
scores = cross_val_score(clf, X_sparse, y2)
assert_array_equal(scores, clf.score(X_sparse, y2))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cross_val_score(clf, X.tolist(), y2.tolist(), cv=3)
clf = CheckingClassifier(check_y=list_check)
scores = cross_val_score(clf, X, y2.tolist(), cv=3)
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cross_val_score(clf, X_3d, y2)
clf = MockClassifier(allow_nd=False)
with pytest.raises(ValueError):
cross_val_score(clf, X_3d, y2, error_score="raise")
def test_cross_validate_many_jobs():
# regression test for #12154: cv='warn' with n_jobs>1 trigger a copy of
# the parameters leading to a failure in check_cv due to cv is 'warn'
# instead of cv == 'warn'.
X, y = load_iris(return_X_y=True)
clf = SVC(gamma="auto")
grid = GridSearchCV(clf, param_grid={"C": [1, 10]})
cross_validate(grid, X, y, n_jobs=2)
def test_cross_validate_invalid_scoring_param():
X, y = make_classification(random_state=0)
estimator = MockClassifier()
# Test the errors
error_message_regexp = ".*must be unique strings.*"
# List/tuple of callables should raise a message advising users to use
# dict of names to callables mapping
with pytest.raises(ValueError, match=error_message_regexp):
cross_validate(
estimator,
X,
y,
scoring=(make_scorer(precision_score), make_scorer(accuracy_score)),
)
with pytest.raises(ValueError, match=error_message_regexp):
cross_validate(estimator, X, y, scoring=(make_scorer(precision_score),))
# So should empty lists/tuples
with pytest.raises(ValueError, match=error_message_regexp + "Empty list.*"):
cross_validate(estimator, X, y, scoring=())
# So should duplicated entries
with pytest.raises(ValueError, match=error_message_regexp + "Duplicate.*"):
cross_validate(estimator, X, y, scoring=("f1_micro", "f1_micro"))
# Nested Lists should raise a generic error message
with pytest.raises(ValueError, match=error_message_regexp):
cross_validate(estimator, X, y, scoring=[[make_scorer(precision_score)]])
# Empty dict should raise invalid scoring error
with pytest.raises(ValueError, match="An empty dict"):
cross_validate(estimator, X, y, scoring=(dict()))
multiclass_scorer = make_scorer(precision_recall_fscore_support)
# Multiclass Scorers that return multiple values are not supported yet
# the warning message we're expecting to see
warning_message = (
"Scoring failed. The score on this train-test "
f"partition for these parameters will be set to {np.nan}. "
"Details: \n"
)
with pytest.warns(UserWarning, match=warning_message):
cross_validate(estimator, X, y, scoring=multiclass_scorer)
with pytest.warns(UserWarning, match=warning_message):
cross_validate(estimator, X, y, scoring={"foo": multiclass_scorer})
def test_cross_validate_nested_estimator():
# Non-regression test to ensure that nested
# estimators are properly returned in a list
# https://github.com/scikit-learn/scikit-learn/pull/17745
(X, y) = load_iris(return_X_y=True)
pipeline = Pipeline(
[
("imputer", SimpleImputer()),
("classifier", MockClassifier()),
]
)
results = cross_validate(pipeline, X, y, return_estimator=True)
estimators = results["estimator"]
assert isinstance(estimators, list)
assert all(isinstance(estimator, Pipeline) for estimator in estimators)
@pytest.mark.parametrize("use_sparse", [False, True])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_cross_validate(use_sparse: bool, csr_container):
# Compute train and test mse/r2 scores
cv = KFold()
# Regression
X_reg, y_reg = make_regression(n_samples=30, random_state=0)
reg = Ridge(random_state=0)
# Classification
X_clf, y_clf = make_classification(n_samples=30, random_state=0)
clf = SVC(kernel="linear", random_state=0)
if use_sparse:
X_reg = csr_container(X_reg)
X_clf = csr_container(X_clf)
for X, y, est in ((X_reg, y_reg, reg), (X_clf, y_clf, clf)):
# It's okay to evaluate regression metrics on classification too
mse_scorer = check_scoring(est, scoring="neg_mean_squared_error")
r2_scorer = check_scoring(est, scoring="r2")
train_mse_scores = []
test_mse_scores = []
train_r2_scores = []
test_r2_scores = []
fitted_estimators = []
for train, test in cv.split(X, y):
est = clone(est).fit(X[train], y[train])
train_mse_scores.append(mse_scorer(est, X[train], y[train]))
train_r2_scores.append(r2_scorer(est, X[train], y[train]))
test_mse_scores.append(mse_scorer(est, X[test], y[test]))
test_r2_scores.append(r2_scorer(est, X[test], y[test]))
fitted_estimators.append(est)
train_mse_scores = np.array(train_mse_scores)
test_mse_scores = np.array(test_mse_scores)
train_r2_scores = np.array(train_r2_scores)
test_r2_scores = np.array(test_r2_scores)
fitted_estimators = np.array(fitted_estimators)
scores = (
train_mse_scores,
test_mse_scores,
train_r2_scores,
test_r2_scores,
fitted_estimators,
)
# To ensure that the test does not suffer from
# large statistical fluctuations due to slicing small datasets,
# we pass the cross-validation instance
check_cross_validate_single_metric(est, X, y, scores, cv)
check_cross_validate_multi_metric(est, X, y, scores, cv)
def check_cross_validate_single_metric(clf, X, y, scores, cv):
(
train_mse_scores,
test_mse_scores,
train_r2_scores,
test_r2_scores,
fitted_estimators,
) = scores
# Test single metric evaluation when scoring is string or singleton list
for return_train_score, dict_len in ((True, 4), (False, 3)):
# Single metric passed as a string
if return_train_score:
mse_scores_dict = cross_validate(
clf,
X,
y,
scoring="neg_mean_squared_error",
return_train_score=True,
cv=cv,
)
assert_array_almost_equal(mse_scores_dict["train_score"], train_mse_scores)
else:
mse_scores_dict = cross_validate(
clf,
X,
y,
scoring="neg_mean_squared_error",
return_train_score=False,
cv=cv,
)
assert isinstance(mse_scores_dict, dict)
assert len(mse_scores_dict) == dict_len
assert_array_almost_equal(mse_scores_dict["test_score"], test_mse_scores)
# Single metric passed as a list
if return_train_score:
# It must be True by default - deprecated
r2_scores_dict = cross_validate(
clf, X, y, scoring=["r2"], return_train_score=True, cv=cv
)
assert_array_almost_equal(r2_scores_dict["train_r2"], train_r2_scores, True)
else:
r2_scores_dict = cross_validate(
clf, X, y, scoring=["r2"], return_train_score=False, cv=cv
)
assert isinstance(r2_scores_dict, dict)
assert len(r2_scores_dict) == dict_len
assert_array_almost_equal(r2_scores_dict["test_r2"], test_r2_scores)
# Test return_estimator option
mse_scores_dict = cross_validate(
clf, X, y, scoring="neg_mean_squared_error", return_estimator=True, cv=cv
)
for k, est in enumerate(mse_scores_dict["estimator"]):
est_coef = est.coef_.copy()
if issparse(est_coef):
est_coef = est_coef.toarray()
fitted_est_coef = fitted_estimators[k].coef_.copy()
if issparse(fitted_est_coef):
fitted_est_coef = fitted_est_coef.toarray()
assert_almost_equal(est_coef, fitted_est_coef)
assert_almost_equal(est.intercept_, fitted_estimators[k].intercept_)
def check_cross_validate_multi_metric(clf, X, y, scores, cv):
# Test multimetric evaluation when scoring is a list / dict
(
train_mse_scores,
test_mse_scores,
train_r2_scores,
test_r2_scores,
fitted_estimators,
) = scores
def custom_scorer(clf, X, y):
y_pred = clf.predict(X)
return {
"r2": r2_score(y, y_pred),
"neg_mean_squared_error": -mean_squared_error(y, y_pred),
}
all_scoring = (
("r2", "neg_mean_squared_error"),
{
"r2": make_scorer(r2_score),
"neg_mean_squared_error": "neg_mean_squared_error",
},
custom_scorer,
)
keys_sans_train = {
"test_r2",
"test_neg_mean_squared_error",
"fit_time",
"score_time",
}
keys_with_train = keys_sans_train.union(
{"train_r2", "train_neg_mean_squared_error"}
)
for return_train_score in (True, False):
for scoring in all_scoring:
if return_train_score:
# return_train_score must be True by default - deprecated
cv_results = cross_validate(
clf, X, y, scoring=scoring, return_train_score=True, cv=cv
)
assert_array_almost_equal(cv_results["train_r2"], train_r2_scores)
assert_array_almost_equal(
cv_results["train_neg_mean_squared_error"], train_mse_scores
)
else:
cv_results = cross_validate(
clf, X, y, scoring=scoring, return_train_score=False, cv=cv
)
assert isinstance(cv_results, dict)
assert set(cv_results.keys()) == (
keys_with_train if return_train_score else keys_sans_train
)
assert_array_almost_equal(cv_results["test_r2"], test_r2_scores)
assert_array_almost_equal(
cv_results["test_neg_mean_squared_error"], test_mse_scores
)
# Make sure all the arrays are of np.ndarray type
assert isinstance(cv_results["test_r2"], np.ndarray)
assert isinstance(cv_results["test_neg_mean_squared_error"], np.ndarray)
assert isinstance(cv_results["fit_time"], np.ndarray)
assert isinstance(cv_results["score_time"], np.ndarray)
# Ensure all the times are within sane limits
assert np.all(cv_results["fit_time"] >= 0)
assert np.all(cv_results["fit_time"] < 10)
assert np.all(cv_results["score_time"] >= 0)
assert np.all(cv_results["score_time"] < 10)
def test_cross_val_score_predict_groups():
# Check if ValueError (when groups is None) propagates to cross_val_score
# and cross_val_predict
# And also check if groups is correctly passed to the cv object
X, y = make_classification(n_samples=20, n_classes=2, random_state=0)
clf = SVC(kernel="linear")
group_cvs = [
LeaveOneGroupOut(),
LeavePGroupsOut(2),
GroupKFold(),
GroupShuffleSplit(),
]
error_message = "The 'groups' parameter should not be None."
for cv in group_cvs:
with pytest.raises(ValueError, match=error_message):
cross_val_score(estimator=clf, X=X, y=y, cv=cv)
with pytest.raises(ValueError, match=error_message):
cross_val_predict(estimator=clf, X=X, y=y, cv=cv)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import DataFrame, Series
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
# 3 fold cross val is used so we need at least 3 samples per class
X_df, y_ser = InputFeatureType(X), TargetType(y2)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cross_val_score(clf, X_df, y_ser, cv=3)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
kfold = KFold(5)
scores_indices = cross_val_score(svm, X, y, cv=kfold)
kfold = KFold(5)
cv_masks = []
for train, test in kfold.split(X, y):
mask_train = np.zeros(len(y), dtype=bool)
mask_test = np.zeros(len(y), dtype=bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cross_val_score(svm, X, y)
assert_array_almost_equal(score_precomputed, score_linear)
# test with callable
svm = SVC(kernel=lambda x, y: np.dot(x, y.T))
score_callable = cross_val_score(svm, X, y)
assert_array_almost_equal(score_precomputed, score_callable)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
with pytest.raises(ValueError):
cross_val_score(svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
with pytest.raises(ValueError):
cross_val_score(svm, linear_kernel.tolist(), y)
@pytest.mark.parametrize("coo_container", COO_CONTAINERS)
def test_cross_val_score_fit_params(coo_container):
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
W_sparse = coo_container(
(np.array([1]), (np.array([1]), np.array([0]))), shape=(15, 1)
)
P_sparse = coo_container(np.eye(5))
DUMMY_INT = 42
DUMMY_STR = "42"
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert clf.dummy_int == DUMMY_INT
assert clf.dummy_str == DUMMY_STR
assert clf.dummy_obj == DUMMY_OBJ
fit_params = {
"sample_weight": np.ones(n_samples),
"class_prior": np.full(n_classes, 1.0 / n_classes),
"sparse_sample_weight": W_sparse,
"sparse_param": P_sparse,
"dummy_int": DUMMY_INT,
"dummy_str": DUMMY_STR,
"dummy_obj": DUMMY_OBJ,
"callback": assert_fit_params,
}
cross_val_score(clf, X, y2, params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cross_val_score(clf, X, y, scoring=scoring, cv=3)
assert_array_equal(score, [1.0, 1.0, 1.0])
# Test that score function is called only 3 times (for cv=3)
assert len(_score_func_args) == 3
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel="linear")
# Default score (should be the accuracy score)
scores = cross_val_score(clf, iris.data, iris.target)
assert_array_almost_equal(scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cross_val_score(clf, iris.data, iris.target, scoring="accuracy")
assert_array_almost_equal(zo_scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cross_val_score(clf, iris.data, iris.target, scoring="f1_weighted")
assert_array_almost_equal(f1_scores, [0.97, 1.0, 0.97, 0.97, 1.0], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5, random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cross_val_score(reg, X, y)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cross_val_score(reg, X, y, scoring="r2")
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
neg_mse_scores = cross_val_score(reg, X, y, scoring="neg_mean_squared_error")
expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cross_val_score(reg, X, y, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
@pytest.mark.parametrize("coo_container", COO_CONTAINERS)
def test_permutation_score(coo_container):
iris = load_iris()
X = iris.data
X_sparse = coo_container(X)
y = iris.target
svm = SVC(kernel="linear")
cv = StratifiedKFold(2)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy"
)
assert score > 0.9
assert_almost_equal(pvalue, 0.0, 1)
score_group, _, pvalue_group = permutation_test_score(
svm,
X,
y,
n_permutations=30,
cv=cv,
scoring="accuracy",
groups=np.ones(y.size),
random_state=0,
)
assert score_group == score
assert pvalue_group == pvalue
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel="linear")
cv_sparse = StratifiedKFold(2)
score_group, _, pvalue_group = permutation_test_score(
svm_sparse,
X_sparse,
y,
n_permutations=30,
cv=cv_sparse,
scoring="accuracy",
groups=np.ones(y.size),
random_state=0,
)
assert score_group == score
assert pvalue_group == pvalue
# test with custom scoring object
def custom_score(y_true, y_pred):
return ((y_true == y_pred).sum() - (y_true != y_pred).sum()) / y_true.shape[0]
scorer = make_scorer(custom_score)
score, _, pvalue = permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0
)
assert_almost_equal(score, 0.93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy"
)
assert score < 0.5
assert pvalue > 0.2
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline(
[
("imputer", SimpleImputer(strategy="mean", missing_values=np.nan)),
("classifier", MockClassifier()),
]
)
permutation_test_score(p, X, y)
def test_permutation_test_score_params():
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_sample_weight=True)
err_msg = r"Expected sample_weight to be passed"
with pytest.raises(AssertionError, match=err_msg):
permutation_test_score(clf, X, y)
err_msg = r"sample_weight.shape == \(1,\), expected \(8,\)!"
with pytest.raises(ValueError, match=err_msg):
permutation_test_score(clf, X, y, params={"sample_weight": np.ones(1)})
permutation_test_score(clf, X, y, params={"sample_weight": np.ones(10)})
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline(
[
("imputer", SimpleImputer(strategy="mean", missing_values=np.nan)),
("classifier", MockClassifier()),
]
)
cross_val_score(p, X, y)
def test_cross_val_score_multilabel():
X = np.array(
[
[-3, 4],
[2, 4],
[3, 3],
[0, 2],
[-3, 1],
[-2, 1],
[0, 0],
[-2, -1],
[-1, -2],
[1, -2],
]
)
y = np.array(
[[1, 1], [0, 1], [0, 1], [0, 1], [1, 1], [0, 1], [1, 0], [1, 1], [1, 0], [0, 0]]
)
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average="micro")
scoring_macro = make_scorer(precision_score, average="macro")
scoring_samples = make_scorer(precision_score, average="samples")
score_micro = cross_val_score(clf, X, y, scoring=scoring_micro)
score_macro = cross_val_score(clf, X, y, scoring=scoring_macro)
score_samples = cross_val_score(clf, X, y, scoring=scoring_samples)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
@pytest.mark.parametrize("coo_container", COO_CONTAINERS)
def test_cross_val_predict(coo_container):
X, y = load_diabetes(return_X_y=True)
cv = KFold()
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv.split(X, y):
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cross_val_predict(est, X, y)
assert len(preds) == len(y)
cv = LeaveOneOut()
preds = cross_val_predict(est, X, y, cv=cv)
assert len(preds) == len(y)
Xsp = X.copy()
Xsp *= Xsp > np.median(Xsp)
Xsp = coo_container(Xsp)
preds = cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cross_val_predict(KMeans(n_init="auto"), X)
assert len(preds) == len(y)
class BadCV:
def split(self, X, y=None, groups=None):
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
with pytest.raises(ValueError):
cross_val_predict(est, X, y, cv=BadCV())
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/model_selection/tests/test_plot.py | sklearn/model_selection/tests/test_plot.py | import numpy as np
import pytest
from sklearn.datasets import load_iris
from sklearn.model_selection import (
LearningCurveDisplay,
ValidationCurveDisplay,
learning_curve,
validation_curve,
)
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import shuffle
from sklearn.utils._testing import assert_allclose, assert_array_equal
@pytest.fixture
def data():
return shuffle(*load_iris(return_X_y=True), random_state=0)
@pytest.mark.parametrize(
"params, err_type, err_msg",
[
({"std_display_style": "invalid"}, ValueError, "Unknown std_display_style:"),
({"score_type": "invalid"}, ValueError, "Unknown score_type:"),
],
)
@pytest.mark.parametrize(
"CurveDisplay, specific_params",
[
(ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}),
(LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}),
],
)
def test_curve_display_parameters_validation(
pyplot, data, params, err_type, err_msg, CurveDisplay, specific_params
):
"""Check that we raise a proper error when passing invalid parameters."""
X, y = data
estimator = DecisionTreeClassifier(random_state=0)
with pytest.raises(err_type, match=err_msg):
CurveDisplay.from_estimator(estimator, X, y, **specific_params, **params)
def test_learning_curve_display_default_usage(pyplot, data):
"""Check the default usage of the LearningCurveDisplay class."""
X, y = data
estimator = DecisionTreeClassifier(random_state=0)
train_sizes = [0.3, 0.6, 0.9]
display = LearningCurveDisplay.from_estimator(
estimator, X, y, train_sizes=train_sizes
)
import matplotlib as mpl
assert display.errorbar_ is None
assert isinstance(display.lines_, list)
for line in display.lines_:
assert isinstance(line, mpl.lines.Line2D)
assert isinstance(display.fill_between_, list)
for fill in display.fill_between_:
assert isinstance(fill, mpl.collections.PolyCollection)
assert fill.get_alpha() == 0.5
assert display.score_name == "Score"
assert display.ax_.get_xlabel() == "Number of samples in the training set"
assert display.ax_.get_ylabel() == "Score"
_, legend_labels = display.ax_.get_legend_handles_labels()
assert legend_labels == ["Train", "Test"]
train_sizes_abs, train_scores, test_scores = learning_curve(
estimator, X, y, train_sizes=train_sizes
)
assert_array_equal(display.train_sizes, train_sizes_abs)
assert_allclose(display.train_scores, train_scores)
assert_allclose(display.test_scores, test_scores)
def test_validation_curve_display_default_usage(pyplot, data):
"""Check the default usage of the ValidationCurveDisplay class."""
X, y = data
estimator = DecisionTreeClassifier(random_state=0)
param_name, param_range = "max_depth", [1, 3, 5]
display = ValidationCurveDisplay.from_estimator(
estimator, X, y, param_name=param_name, param_range=param_range
)
import matplotlib as mpl
assert display.errorbar_ is None
assert isinstance(display.lines_, list)
for line in display.lines_:
assert isinstance(line, mpl.lines.Line2D)
assert isinstance(display.fill_between_, list)
for fill in display.fill_between_:
assert isinstance(fill, mpl.collections.PolyCollection)
assert fill.get_alpha() == 0.5
assert display.score_name == "Score"
assert display.ax_.get_xlabel() == f"{param_name}"
assert display.ax_.get_ylabel() == "Score"
_, legend_labels = display.ax_.get_legend_handles_labels()
assert legend_labels == ["Train", "Test"]
train_scores, test_scores = validation_curve(
estimator, X, y, param_name=param_name, param_range=param_range
)
assert_array_equal(display.param_range, param_range)
assert_allclose(display.train_scores, train_scores)
assert_allclose(display.test_scores, test_scores)
@pytest.mark.parametrize(
"CurveDisplay, specific_params",
[
(ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}),
(LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}),
],
)
def test_curve_display_negate_score(pyplot, data, CurveDisplay, specific_params):
"""Check the behaviour of the `negate_score` parameter calling `from_estimator` and
`plot`.
"""
X, y = data
estimator = DecisionTreeClassifier(max_depth=1, random_state=0)
negate_score = False
display = CurveDisplay.from_estimator(
estimator, X, y, **specific_params, negate_score=negate_score
)
positive_scores = display.lines_[0].get_data()[1]
assert (positive_scores >= 0).all()
assert display.ax_.get_ylabel() == "Score"
negate_score = True
display = CurveDisplay.from_estimator(
estimator, X, y, **specific_params, negate_score=negate_score
)
negative_scores = display.lines_[0].get_data()[1]
assert (negative_scores <= 0).all()
assert_allclose(negative_scores, -positive_scores)
assert display.ax_.get_ylabel() == "Negative score"
negate_score = False
display = CurveDisplay.from_estimator(
estimator, X, y, **specific_params, negate_score=negate_score
)
assert display.ax_.get_ylabel() == "Score"
display.plot(negate_score=not negate_score)
assert display.ax_.get_ylabel() == "Score"
assert (display.lines_[0].get_data()[1] < 0).all()
@pytest.mark.parametrize(
"score_name, ylabel", [(None, "Score"), ("Accuracy", "Accuracy")]
)
@pytest.mark.parametrize(
"CurveDisplay, specific_params",
[
(ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}),
(LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}),
],
)
def test_curve_display_score_name(
pyplot, data, score_name, ylabel, CurveDisplay, specific_params
):
"""Check that we can overwrite the default score name shown on the y-axis."""
X, y = data
estimator = DecisionTreeClassifier(random_state=0)
display = CurveDisplay.from_estimator(
estimator, X, y, **specific_params, score_name=score_name
)
assert display.ax_.get_ylabel() == ylabel
X, y = data
estimator = DecisionTreeClassifier(max_depth=1, random_state=0)
display = CurveDisplay.from_estimator(
estimator, X, y, **specific_params, score_name=score_name
)
assert display.score_name == ylabel
@pytest.mark.parametrize("std_display_style", (None, "errorbar"))
def test_learning_curve_display_score_type(pyplot, data, std_display_style):
"""Check the behaviour of setting the `score_type` parameter."""
X, y = data
estimator = DecisionTreeClassifier(random_state=0)
train_sizes = [0.3, 0.6, 0.9]
train_sizes_abs, train_scores, test_scores = learning_curve(
estimator, X, y, train_sizes=train_sizes
)
score_type = "train"
display = LearningCurveDisplay.from_estimator(
estimator,
X,
y,
train_sizes=train_sizes,
score_type=score_type,
std_display_style=std_display_style,
)
_, legend_label = display.ax_.get_legend_handles_labels()
assert legend_label == ["Train"]
if std_display_style is None:
assert len(display.lines_) == 1
assert display.errorbar_ is None
x_data, y_data = display.lines_[0].get_data()
else:
assert display.lines_ is None
assert len(display.errorbar_) == 1
x_data, y_data = display.errorbar_[0].lines[0].get_data()
assert_array_equal(x_data, train_sizes_abs)
assert_allclose(y_data, train_scores.mean(axis=1))
score_type = "test"
display = LearningCurveDisplay.from_estimator(
estimator,
X,
y,
train_sizes=train_sizes,
score_type=score_type,
std_display_style=std_display_style,
)
_, legend_label = display.ax_.get_legend_handles_labels()
assert legend_label == ["Test"]
if std_display_style is None:
assert len(display.lines_) == 1
assert display.errorbar_ is None
x_data, y_data = display.lines_[0].get_data()
else:
assert display.lines_ is None
assert len(display.errorbar_) == 1
x_data, y_data = display.errorbar_[0].lines[0].get_data()
assert_array_equal(x_data, train_sizes_abs)
assert_allclose(y_data, test_scores.mean(axis=1))
score_type = "both"
display = LearningCurveDisplay.from_estimator(
estimator,
X,
y,
train_sizes=train_sizes,
score_type=score_type,
std_display_style=std_display_style,
)
_, legend_label = display.ax_.get_legend_handles_labels()
assert legend_label == ["Train", "Test"]
if std_display_style is None:
assert len(display.lines_) == 2
assert display.errorbar_ is None
x_data_train, y_data_train = display.lines_[0].get_data()
x_data_test, y_data_test = display.lines_[1].get_data()
else:
assert display.lines_ is None
assert len(display.errorbar_) == 2
x_data_train, y_data_train = display.errorbar_[0].lines[0].get_data()
x_data_test, y_data_test = display.errorbar_[1].lines[0].get_data()
assert_array_equal(x_data_train, train_sizes_abs)
assert_allclose(y_data_train, train_scores.mean(axis=1))
assert_array_equal(x_data_test, train_sizes_abs)
assert_allclose(y_data_test, test_scores.mean(axis=1))
@pytest.mark.parametrize("std_display_style", (None, "errorbar"))
def test_validation_curve_display_score_type(pyplot, data, std_display_style):
"""Check the behaviour of setting the `score_type` parameter."""
X, y = data
estimator = DecisionTreeClassifier(random_state=0)
param_name, param_range = "max_depth", [1, 3, 5]
train_scores, test_scores = validation_curve(
estimator, X, y, param_name=param_name, param_range=param_range
)
score_type = "train"
display = ValidationCurveDisplay.from_estimator(
estimator,
X,
y,
param_name=param_name,
param_range=param_range,
score_type=score_type,
std_display_style=std_display_style,
)
_, legend_label = display.ax_.get_legend_handles_labels()
assert legend_label == ["Train"]
if std_display_style is None:
assert len(display.lines_) == 1
assert display.errorbar_ is None
x_data, y_data = display.lines_[0].get_data()
else:
assert display.lines_ is None
assert len(display.errorbar_) == 1
x_data, y_data = display.errorbar_[0].lines[0].get_data()
assert_array_equal(x_data, param_range)
assert_allclose(y_data, train_scores.mean(axis=1))
score_type = "test"
display = ValidationCurveDisplay.from_estimator(
estimator,
X,
y,
param_name=param_name,
param_range=param_range,
score_type=score_type,
std_display_style=std_display_style,
)
_, legend_label = display.ax_.get_legend_handles_labels()
assert legend_label == ["Test"]
if std_display_style is None:
assert len(display.lines_) == 1
assert display.errorbar_ is None
x_data, y_data = display.lines_[0].get_data()
else:
assert display.lines_ is None
assert len(display.errorbar_) == 1
x_data, y_data = display.errorbar_[0].lines[0].get_data()
assert_array_equal(x_data, param_range)
assert_allclose(y_data, test_scores.mean(axis=1))
score_type = "both"
display = ValidationCurveDisplay.from_estimator(
estimator,
X,
y,
param_name=param_name,
param_range=param_range,
score_type=score_type,
std_display_style=std_display_style,
)
_, legend_label = display.ax_.get_legend_handles_labels()
assert legend_label == ["Train", "Test"]
if std_display_style is None:
assert len(display.lines_) == 2
assert display.errorbar_ is None
x_data_train, y_data_train = display.lines_[0].get_data()
x_data_test, y_data_test = display.lines_[1].get_data()
else:
assert display.lines_ is None
assert len(display.errorbar_) == 2
x_data_train, y_data_train = display.errorbar_[0].lines[0].get_data()
x_data_test, y_data_test = display.errorbar_[1].lines[0].get_data()
assert_array_equal(x_data_train, param_range)
assert_allclose(y_data_train, train_scores.mean(axis=1))
assert_array_equal(x_data_test, param_range)
assert_allclose(y_data_test, test_scores.mean(axis=1))
@pytest.mark.parametrize(
"CurveDisplay, specific_params, expected_xscale",
[
(
ValidationCurveDisplay,
{"param_name": "max_depth", "param_range": np.arange(1, 5)},
"linear",
),
(LearningCurveDisplay, {"train_sizes": np.linspace(0.1, 0.9, num=5)}, "linear"),
(
ValidationCurveDisplay,
{
"param_name": "max_depth",
"param_range": np.round(np.logspace(0, 2, num=5)).astype(np.int64),
},
"log",
),
(LearningCurveDisplay, {"train_sizes": np.logspace(-1, 0, num=5)}, "log"),
],
)
def test_curve_display_xscale_auto(
pyplot, data, CurveDisplay, specific_params, expected_xscale
):
"""Check the behaviour of the x-axis scaling depending on the data provided."""
X, y = data
estimator = DecisionTreeClassifier(random_state=0)
display = CurveDisplay.from_estimator(estimator, X, y, **specific_params)
assert display.ax_.get_xscale() == expected_xscale
@pytest.mark.parametrize(
"CurveDisplay, specific_params",
[
(ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}),
(LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}),
],
)
def test_curve_display_std_display_style(pyplot, data, CurveDisplay, specific_params):
"""Check the behaviour of the parameter `std_display_style`."""
X, y = data
estimator = DecisionTreeClassifier(random_state=0)
import matplotlib as mpl
std_display_style = None
display = CurveDisplay.from_estimator(
estimator,
X,
y,
**specific_params,
std_display_style=std_display_style,
)
assert len(display.lines_) == 2
for line in display.lines_:
assert isinstance(line, mpl.lines.Line2D)
assert display.errorbar_ is None
assert display.fill_between_ is None
_, legend_label = display.ax_.get_legend_handles_labels()
assert len(legend_label) == 2
std_display_style = "fill_between"
display = CurveDisplay.from_estimator(
estimator,
X,
y,
**specific_params,
std_display_style=std_display_style,
)
assert len(display.lines_) == 2
for line in display.lines_:
assert isinstance(line, mpl.lines.Line2D)
assert display.errorbar_ is None
assert len(display.fill_between_) == 2
for fill_between in display.fill_between_:
assert isinstance(fill_between, mpl.collections.PolyCollection)
_, legend_label = display.ax_.get_legend_handles_labels()
assert len(legend_label) == 2
std_display_style = "errorbar"
display = CurveDisplay.from_estimator(
estimator,
X,
y,
**specific_params,
std_display_style=std_display_style,
)
assert display.lines_ is None
assert len(display.errorbar_) == 2
for errorbar in display.errorbar_:
assert isinstance(errorbar, mpl.container.ErrorbarContainer)
assert display.fill_between_ is None
_, legend_label = display.ax_.get_legend_handles_labels()
assert len(legend_label) == 2
@pytest.mark.parametrize(
"CurveDisplay, specific_params",
[
(ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}),
(LearningCurveDisplay, {"train_sizes": [0.3, 0.6, 0.9]}),
],
)
def test_curve_display_plot_kwargs(pyplot, data, CurveDisplay, specific_params):
"""Check the behaviour of the different plotting keyword arguments: `line_kw`,
`fill_between_kw`, and `errorbar_kw`."""
X, y = data
estimator = DecisionTreeClassifier(random_state=0)
std_display_style = "fill_between"
line_kw = {"color": "red"}
fill_between_kw = {"color": "red", "alpha": 1.0}
display = CurveDisplay.from_estimator(
estimator,
X,
y,
**specific_params,
std_display_style=std_display_style,
line_kw=line_kw,
fill_between_kw=fill_between_kw,
)
assert display.lines_[0].get_color() == "red"
assert_allclose(
display.fill_between_[0].get_facecolor(),
[[1.0, 0.0, 0.0, 1.0]], # trust me, it's red
)
std_display_style = "errorbar"
errorbar_kw = {"color": "red"}
display = CurveDisplay.from_estimator(
estimator,
X,
y,
**specific_params,
std_display_style=std_display_style,
errorbar_kw=errorbar_kw,
)
assert display.errorbar_[0].lines[0].get_color() == "red"
@pytest.mark.parametrize(
"param_range, xscale",
[([5, 10, 15], "linear"), ([-50, 5, 50, 500], "symlog"), ([5, 50, 500], "log")],
)
def test_validation_curve_xscale_from_param_range_provided_as_a_list(
pyplot, data, param_range, xscale
):
"""Check the induced xscale from the provided param_range values."""
X, y = data
estimator = DecisionTreeClassifier(random_state=0)
param_name = "max_depth"
display = ValidationCurveDisplay.from_estimator(
estimator,
X,
y,
param_name=param_name,
param_range=param_range,
)
assert display.ax_.get_xscale() == xscale
@pytest.mark.parametrize(
"Display, params",
[
(LearningCurveDisplay, {}),
(ValidationCurveDisplay, {"param_name": "max_depth", "param_range": [1, 3, 5]}),
],
)
def test_subclassing_displays(pyplot, data, Display, params):
"""Check that named constructors return the correct type when subclassed.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/pull/27675
"""
X, y = data
estimator = DecisionTreeClassifier(random_state=0)
class SubclassOfDisplay(Display):
pass
display = SubclassOfDisplay.from_estimator(estimator, X, y, **params)
assert isinstance(display, SubclassOfDisplay)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/model_selection/tests/test_search.py | sklearn/model_selection/tests/test_search.py | """Test the search module"""
import pickle
import re
import sys
import warnings
from collections.abc import Iterable, Sized
from functools import partial
from io import StringIO
from itertools import chain, product
from types import GeneratorType
import numpy as np
import pytest
from scipy.stats import bernoulli, expon, uniform
from sklearn import config_context
from sklearn.base import BaseEstimator, ClassifierMixin, clone, is_classifier
from sklearn.cluster import KMeans
from sklearn.compose import ColumnTransformer
from sklearn.datasets import (
make_blobs,
make_classification,
make_multilabel_classification,
)
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.exceptions import FitFailedWarning
from sklearn.experimental import enable_halving_search_cv # noqa: F401
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.impute import SimpleImputer
from sklearn.linear_model import (
LinearRegression,
LogisticRegression,
Ridge,
SGDClassifier,
)
from sklearn.metrics import (
accuracy_score,
confusion_matrix,
f1_score,
make_scorer,
r2_score,
recall_score,
roc_auc_score,
)
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.model_selection import (
GridSearchCV,
GroupKFold,
GroupShuffleSplit,
HalvingGridSearchCV,
KFold,
LeaveOneGroupOut,
LeavePGroupsOut,
ParameterGrid,
ParameterSampler,
RandomizedSearchCV,
StratifiedKFold,
StratifiedShuffleSplit,
train_test_split,
)
from sklearn.model_selection._search import (
BaseSearchCV,
_yield_masked_array_for_each_param,
)
from sklearn.model_selection.tests.common import OneTimeSplitter
from sklearn.naive_bayes import ComplementNB
from sklearn.neighbors import KernelDensity, KNeighborsClassifier, LocalOutlierFactor
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import (
OneHotEncoder,
OrdinalEncoder,
SplineTransformer,
StandardScaler,
)
from sklearn.svm import SVC, LinearSVC
from sklearn.tests.metadata_routing_common import (
ConsumingScorer,
_Registry,
check_recorded_metadata,
)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils._array_api import (
_get_namespace_device_dtype_ids,
yield_namespace_device_dtype_combinations,
)
from sklearn.utils._mocking import CheckingClassifier, MockDataFrame
from sklearn.utils._testing import (
MinimalClassifier,
MinimalRegressor,
MinimalTransformer,
_array_api_for_tests,
assert_allclose,
assert_allclose_dense_sparse,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
set_random_state,
)
from sklearn.utils.estimator_checks import _enforce_estimator_tags_y
from sklearn.utils.fixes import CSR_CONTAINERS
from sklearn.utils.validation import _num_samples
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(ClassifierMixin, BaseEstimator):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert len(X) == len(Y)
self.classes_ = np.unique(Y)
return self
def predict(self, T):
return T.shape[0]
def transform(self, X):
return X + self.foo_param
def inverse_transform(self, X):
return X - self.foo_param
predict_proba = predict
predict_log_proba = predict
decision_function = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.0
else:
score = 0.0
return score
def get_params(self, deep=False):
return {"foo_param": self.foo_param}
def set_params(self, **params):
self.foo_param = params["foo_param"]
return self
class LinearSVCNoScore(LinearSVC):
"""A LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert list(grid) == [grid[i] for i in range(len(grid))]
@pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)])
@pytest.mark.parametrize(
"input, error_type, error_message",
[
(0, TypeError, r"Parameter .* a dict or a list, got: 0 of type int"),
([{"foo": [0]}, 0], TypeError, r"Parameter .* is not a dict \(0\)"),
(
{"foo": 0},
TypeError,
r"Parameter (grid|distribution) for parameter 'foo' (is not|needs to be) "
r"(a list or a numpy array|iterable or a distribution).*",
),
],
)
def test_validate_parameter_input(klass, input, error_type, error_message):
with pytest.raises(error_type, match=error_message):
klass(input)
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert isinstance(grid1, Iterable)
assert isinstance(grid1, Sized)
assert len(grid1) == 3
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert len(grid2) == 6
# loop to assert we can iterate over the grid multiple times
for i in range(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert points == set(
("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"])
)
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert len(empty) == 1
assert list(empty) == [{}]
assert_grid_iter_equals_getitem(empty)
with pytest.raises(IndexError):
empty[1]
has_empty = ParameterGrid([{"C": [1, 10]}, {}, {"C": [0.5]}])
assert len(has_empty) == 4
assert list(has_empty) == [{"C": 1}, {"C": 10}, {}, {"C": 0.5}]
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=2, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert grid_search.best_estimator_.foo_param == 2
assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3])
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = "sklearn"
with pytest.raises(ValueError):
grid_search.fit(X, y)
def test_grid_search_pipeline_steps():
# check that parameters that are estimators are cloned before fitting
pipe = Pipeline([("regressor", LinearRegression())])
param_grid = {"regressor": [LinearRegression(), Ridge()]}
grid_search = GridSearchCV(pipe, param_grid, cv=2)
grid_search.fit(X, y)
regressor_results = grid_search.cv_results_["param_regressor"]
assert isinstance(regressor_results[0], LinearRegression)
assert isinstance(regressor_results[1], Ridge)
assert not hasattr(regressor_results[0], "coef_")
assert not hasattr(regressor_results[1], "coef_")
assert regressor_results[0] is not grid_search.best_estimator_
assert regressor_results[1] is not grid_search.best_estimator_
# check that we didn't modify the parameter grid that was passed
assert not hasattr(param_grid["regressor"][0], "coef_")
assert not hasattr(param_grid["regressor"][1], "coef_")
@pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV])
def test_SearchCV_with_fit_params(SearchCV):
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=["spam", "eggs"])
searcher = SearchCV(clf, {"foo_param": [1, 2, 3]}, cv=2, error_score="raise")
# The CheckingClassifier generates an assertion error if
# a parameter is missing or has length != len(X).
err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen."
with pytest.raises(AssertionError, match=err_msg):
searcher.fit(X, y, spam=np.ones(10))
err_msg = "Fit parameter spam has length 1; expected"
with pytest.raises(AssertionError, match=err_msg):
searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10))
searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10))
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [0.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {"C": Cs}, scoring="accuracy")
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {"C": Cs}, scoring="accuracy")
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert grid_search_no_score.best_params_ == grid_search.best_params_
# check that we can call score and that it gives the correct result
assert grid_search.score(X, y) == grid_search_no_score.score(X, y)
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {"C": Cs})
with pytest.raises(TypeError, match="no scoring"):
grid_search_no_score.fit([[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=0.2, random_state=0)
clf = LinearSVC(random_state=0)
grid = {"C": [0.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring="accuracy").fit(X, y)
search_no_score_method_auc = GridSearchCV(
LinearSVCNoScore(), grid, scoring="roc_auc"
).fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring="roc_auc").fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = search_no_scoring.score(X, y)
score_accuracy = search_accuracy.score(X, y)
score_no_score_auc = search_no_score_method_auc.score(X, y)
score_auc = search_auc.score(X, y)
# ensure the test is sane
assert score_auc < 1.0
assert score_accuracy < 1.0
assert score_auc != score_accuracy
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_groups():
# Check if ValueError (when groups is None) propagates to GridSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {"C": [1]}
group_cvs = [
LeaveOneGroupOut(),
LeavePGroupsOut(2),
GroupKFold(n_splits=3),
GroupShuffleSplit(),
]
error_msg = "The 'groups' parameter should not be None."
for cv in group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
with pytest.raises(ValueError, match=error_msg):
gs.fit(X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_classes__property():
# Test that classes_ property matches best_estimator_.classes_
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
Cs = [0.1, 1, 10]
grid_search = GridSearchCV(LinearSVC(random_state=0), {"C": Cs})
grid_search.fit(X, y)
assert_array_equal(grid_search.best_estimator_.classes_, grid_search.classes_)
# Test that regressors do not have a classes_ attribute
grid_search = GridSearchCV(Ridge(), {"alpha": [1.0, 2.0]})
grid_search.fit(X, y)
assert not hasattr(grid_search, "classes_")
# Test that the grid searcher has no classes_ attribute before it's fit
grid_search = GridSearchCV(LinearSVC(random_state=0), {"C": Cs})
assert not hasattr(grid_search, "classes_")
# Test that the grid searcher has no classes_ attribute without a refit
grid_search = GridSearchCV(LinearSVC(random_state=0), {"C": Cs}, refit=False)
grid_search.fit(X, y)
assert not hasattr(grid_search, "classes_")
def test_trivial_cv_results_attr():
# Test search over a "grid" with only one point.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {"foo_param": [1]}, cv=2)
grid_search.fit(X, y)
assert hasattr(grid_search, "cv_results_")
random_search = RandomizedSearchCV(clf, {"foo_param": [0]}, n_iter=1, cv=2)
random_search.fit(X, y)
assert hasattr(random_search, "cv_results_")
def test_no_refit():
# Test that GSCV can be used for model selection alone without refitting
clf = MockClassifier()
for scoring in [None, ["accuracy", "precision"]]:
grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, refit=False, cv=2)
grid_search.fit(X, y)
assert (
not hasattr(grid_search, "best_estimator_")
and hasattr(grid_search, "best_index_")
and hasattr(grid_search, "best_params_")
)
# Make sure the functions predict/transform etc. raise meaningful
# error messages
for fn_name in (
"predict",
"predict_proba",
"predict_log_proba",
"transform",
"inverse_transform",
):
outer_msg = f"has no attribute '{fn_name}'"
inner_msg = (
f"`refit=False`. {fn_name} is available only after "
"refitting on the best parameters"
)
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
getattr(grid_search, fn_name)(X)
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg in str(exec_info.value.__cause__)
# Test that an invalid refit param raises appropriate error messages
error_msg = (
"For multi-metric scoring, the parameter refit must be set to a scorer key"
)
for refit in [True, "recall", "accuracy"]:
with pytest.raises(ValueError, match=error_msg):
GridSearchCV(
clf, {}, refit=refit, scoring={"acc": "accuracy", "prec": "precision"}
).fit(X, y)
def test_grid_search_error():
# Test that grid search will capture errors on data with different length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {"C": [0.1, 1.0]})
with pytest.raises(ValueError):
cv.fit(X_[:180], y_)
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC(gamma="auto")
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_when_param_grid_includes_range():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = None
grid_search = GridSearchCV(clf, {"foo_param": range(1, 4)}, cv=2)
grid_search.fit(X, y)
assert grid_search.best_estimator_.foo_param == 2
def test_grid_search_bad_param_grid():
X, y = make_classification(n_samples=10, n_features=5, random_state=0)
param_dict = {"C": 1}
clf = SVC(gamma="auto")
error_msg = re.escape(
"Parameter grid for parameter 'C' needs to be a list or "
"a numpy array, but got 1 (of type int) instead. Single "
"values need to be wrapped in a list with one element."
)
search = GridSearchCV(clf, param_dict)
with pytest.raises(TypeError, match=error_msg):
search.fit(X, y)
param_dict = {"C": []}
clf = SVC()
error_msg = re.escape(
"Parameter grid for parameter 'C' need to be a non-empty sequence, got: []"
)
search = GridSearchCV(clf, param_dict)
with pytest.raises(ValueError, match=error_msg):
search.fit(X, y)
param_dict = {"C": "1,2,3"}
clf = SVC(gamma="auto")
error_msg = re.escape(
"Parameter grid for parameter 'C' needs to be a list or a numpy array, "
"but got '1,2,3' (of type str) instead. Single values need to be "
"wrapped in a list with one element."
)
search = GridSearchCV(clf, param_dict)
with pytest.raises(TypeError, match=error_msg):
search.fit(X, y)
param_dict = {"C": np.ones((3, 2))}
clf = SVC()
search = GridSearchCV(clf, param_dict)
with pytest.raises(ValueError):
search.fit(X, y)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_grid_search_sparse(csr_container):
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {"C": [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = csr_container(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {"C": [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert np.mean(y_pred == y_pred2) >= 0.9
assert C == C2
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_grid_search_sparse_scoring(csr_container):
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {"C": [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = csr_container(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {"C": [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert C == C2
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {"C": [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert C == C3
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel="precomputed")
cv = GridSearchCV(clf, {"C": [0.1, 1.0]})
cv.fit(K_train, y_train)
assert cv.best_score_ >= 0
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert np.mean(y_pred == y_test) >= 0
# test error is raised when the precomputed kernel is not array-like
# or sparse
with pytest.raises(ValueError):
cv.fit(K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10,))
clf = SVC(kernel="precomputed")
cv = GridSearchCV(clf, {"C": [0.1, 1.0]})
with pytest.raises(ValueError):
cv.fit(K_train, y_train)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert not hasattr(self, "has_been_fit_")
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.UndefinedMetricWarning")
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(
BrokenClassifier(), [{"parameter": [0, 1]}], scoring="precision", refit=True
)
clf.fit(X, y)
def test_refit_callable():
"""
Test refit=callable, which adds flexibility in identifying the
"best" estimator.
"""
def refit_callable(cv_results):
"""
A dummy function tests `refit=callable` interface.
Return the index of a model that has the least
`mean_test_score`.
"""
# Fit a dummy clf with `refit=True` to get a list of keys in
# clf.cv_results_.
X, y = make_classification(n_samples=100, n_features=4, random_state=42)
clf = GridSearchCV(
LinearSVC(random_state=42),
{"C": [0.01, 0.1, 1]},
scoring="precision",
refit=True,
)
clf.fit(X, y)
# Ensure that `best_index_ != 0` for this dummy clf
assert clf.best_index_ != 0
# Assert every key matches those in `cv_results`
for key in clf.cv_results_.keys():
assert key in cv_results
return cv_results["mean_test_score"].argmin()
X, y = make_classification(n_samples=100, n_features=4, random_state=42)
clf = GridSearchCV(
LinearSVC(random_state=42),
{"C": [0.01, 0.1, 1]},
scoring="precision",
refit=refit_callable,
)
clf.fit(X, y)
assert clf.best_index_ == 0
# Ensure `best_score_` is disabled when using `refit=callable`
assert not hasattr(clf, "best_score_")
def test_refit_callable_invalid_type():
"""
Test implementation catches the errors when 'best_index_' returns an
invalid result.
"""
def refit_callable_invalid_type(cv_results):
"""
A dummy function tests when returned 'best_index_' is not integer.
"""
return None
X, y = make_classification(n_samples=100, n_features=4, random_state=42)
clf = GridSearchCV(
LinearSVC(random_state=42),
{"C": [0.1, 1]},
scoring="precision",
refit=refit_callable_invalid_type,
)
with pytest.raises(TypeError, match="best_index_ returned is not an integer"):
clf.fit(X, y)
@pytest.mark.parametrize("out_bound_value", [-1, 2])
@pytest.mark.parametrize("search_cv", [RandomizedSearchCV, GridSearchCV])
def test_refit_callable_out_bound(out_bound_value, search_cv):
"""
Test implementation catches the errors when 'best_index_' returns an
out of bound result.
"""
def refit_callable_out_bound(cv_results):
"""
A dummy function tests when returned 'best_index_' is out of bounds.
"""
return out_bound_value
X, y = make_classification(n_samples=100, n_features=4, random_state=42)
clf = search_cv(
LinearSVC(random_state=42),
{"C": [0.1, 1]},
scoring="precision",
refit=refit_callable_out_bound,
)
with pytest.raises(IndexError, match="best_index_ index out of range"):
clf.fit(X, y)
def test_refit_callable_multi_metric():
"""
Test refit=callable in multiple metric evaluation setting
"""
def refit_callable(cv_results):
"""
A dummy function tests `refit=callable` interface.
Return the index of a model that has the least
`mean_test_prec`.
"""
assert "mean_test_prec" in cv_results
return cv_results["mean_test_prec"].argmin()
X, y = make_classification(n_samples=100, n_features=4, random_state=42)
scoring = {"Accuracy": make_scorer(accuracy_score), "prec": "precision"}
clf = GridSearchCV(
LinearSVC(random_state=42),
{"C": [0.01, 0.1, 1]},
scoring=scoring,
refit=refit_callable,
)
clf.fit(X, y)
assert clf.best_index_ == 0
# Ensure `best_score_` is disabled when using `refit=callable`
assert not hasattr(clf, "best_score_")
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
def check_X(x):
return x.shape[1:] == (5, 3, 2)
def check_y(x):
return x.shape[1:] == (7, 11)
clf = CheckingClassifier(
check_X=check_X,
check_y=check_y,
methods_to_check=["fit"],
)
grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert hasattr(grid_search, "cv_results_")
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(
check_X=lambda x: isinstance(x, list),
methods_to_check=["fit"],
)
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert hasattr(grid_search, "cv_results_")
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(
check_y=lambda x: isinstance(x, list),
methods_to_check=["fit"],
)
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert hasattr(grid_search, "cv_results_")
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import DataFrame, Series
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
def check_df(x):
return isinstance(x, InputFeatureType)
def check_series(x):
return isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {"foo_param": [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert hasattr(grid_search, "cv_results_")
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(n_samples=50, random_state=0)
km = KMeans(random_state=0, init="random", n_init=1)
# Multi-metric evaluation unsupervised
scoring = ["adjusted_rand_score", "fowlkes_mallows_score"]
for refit in ["adjusted_rand_score", "fowlkes_mallows_score"]:
grid_search = GridSearchCV(
km, param_grid=dict(n_clusters=[2, 3, 4]), scoring=scoring, refit=refit
)
grid_search.fit(X, y)
# Both ARI and FMS can find the right number :)
assert grid_search.best_params_["n_clusters"] == 3
# Single metric evaluation unsupervised
grid_search = GridSearchCV(
km, param_grid=dict(n_clusters=[2, 3, 4]), scoring="fowlkes_mallows_score"
)
grid_search.fit(X, y)
assert grid_search.best_params_["n_clusters"] == 3
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert grid_search.best_params_["n_clusters"] == 4
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == 0.1 else 0
X, _ = make_blobs(cluster_std=0.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(
KernelDensity(),
param_grid=dict(bandwidth=[0.01, 0.1, 1]),
scoring=custom_scoring,
)
search.fit(X)
assert search.best_params_["bandwidth"] == 0.1
assert search.best_score_ == 42
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"], "C": uniform(0, 1)}
sampler = ParameterSampler(
param_distributions=param_distributions, n_iter=10, random_state=0
)
samples = [x for x in sampler]
assert len(samples) == 10
for sample in samples:
assert sample["kernel"] in ["rbf", "linear"]
assert 0 <= sample["C"] <= 1
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(
param_distributions=param_distributions, n_iter=3, random_state=0
)
assert [x for x in sampler] == [x for x in sampler]
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(
param_distributions=param_distributions, n_iter=10, random_state=0
)
assert [x for x in sampler] == [x for x in sampler]
def check_cv_results_array_types(
search, param_keys, score_keys, expected_cv_results_kinds
):
# Check if the search `cv_results`'s array are of correct types
cv_results = search.cv_results_
assert all(isinstance(cv_results[param], np.ma.MaskedArray) for param in param_keys)
assert {
key: cv_results[key].dtype.kind for key in param_keys
} == expected_cv_results_kinds
assert not any(isinstance(cv_results[key], np.ma.MaskedArray) for key in score_keys)
assert all(
cv_results[key].dtype == np.float64
for key in score_keys
if not key.startswith("rank")
)
scorer_keys = search.scorer_.keys() if search.multimetric_ else ["score"]
for key in scorer_keys:
assert cv_results["rank_test_%s" % key].dtype == np.int32
def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand, extra_keys=()):
# Test the search.cv_results_ contains all the required results
all_keys = param_keys + score_keys + extra_keys
assert_array_equal(sorted(cv_results.keys()), sorted(all_keys + ("params",)))
assert all(cv_results[key].shape == (n_cand,) for key in param_keys + score_keys)
def test_grid_search_cv_results():
X, y = make_classification(n_samples=50, n_features=4, random_state=42)
n_grid_points = 6
params = [
dict(
kernel=[
"rbf",
],
C=[1, 10],
gamma=[0.1, 1],
),
dict(
kernel=[
"poly",
],
degree=[1, 2],
),
]
param_keys = ("param_C", "param_degree", "param_gamma", "param_kernel")
score_keys = (
"mean_test_score",
"mean_train_score",
"rank_test_score",
"split0_test_score",
"split1_test_score",
"split2_test_score",
"split0_train_score",
"split1_train_score",
"split2_train_score",
"std_test_score",
"std_train_score",
"mean_fit_time",
"std_fit_time",
"mean_score_time",
"std_score_time",
)
n_candidates = n_grid_points
search = GridSearchCV(SVC(), cv=3, param_grid=params, return_train_score=True)
search.fit(X, y)
cv_results = search.cv_results_
# Check if score and timing are reasonable
assert all(cv_results["rank_test_score"] >= 1)
assert (all(cv_results[k] >= 0) for k in score_keys if k != "rank_test_score")
assert (
all(cv_results[k] <= 1)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/_loss/link.py | sklearn/_loss/link.py | """
Module contains classes for invertible (and differentiable) link functions.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from abc import ABC, abstractmethod
from dataclasses import dataclass
import numpy as np
from scipy.special import expit, logit
from scipy.stats import gmean
from sklearn.utils.extmath import softmax
@dataclass
class Interval:
low: float
high: float
low_inclusive: bool
high_inclusive: bool
def __post_init__(self):
"""Check that low <= high"""
if self.low > self.high:
raise ValueError(
f"One must have low <= high; got low={self.low}, high={self.high}."
)
def includes(self, x):
"""Test whether all values of x are in interval range.
Parameters
----------
x : ndarray
Array whose elements are tested to be in interval range.
Returns
-------
result : bool
"""
if self.low_inclusive:
low = np.greater_equal(x, self.low)
else:
low = np.greater(x, self.low)
if not np.all(low):
return False
if self.high_inclusive:
high = np.less_equal(x, self.high)
else:
high = np.less(x, self.high)
# Note: np.all returns numpy.bool_
return bool(np.all(high))
def _inclusive_low_high(interval, dtype=np.float64):
"""Generate values low and high to be within the interval range.
This is used in tests only.
Returns
-------
low, high : tuple
The returned values low and high lie within the interval.
"""
eps = 10 * np.finfo(dtype).eps
if interval.low == -np.inf:
low = -1e10
elif interval.low < 0:
low = interval.low * (1 - eps) + eps
else:
low = interval.low * (1 + eps) + eps
if interval.high == np.inf:
high = 1e10
elif interval.high < 0:
high = interval.high * (1 + eps) - eps
else:
high = interval.high * (1 - eps) - eps
return low, high
class BaseLink(ABC):
"""Abstract base class for differentiable, invertible link functions.
Convention:
- link function g: raw_prediction = g(y_pred)
- inverse link h: y_pred = h(raw_prediction)
For (generalized) linear models, `raw_prediction = X @ coef` is the so
called linear predictor, and `y_pred = h(raw_prediction)` is the predicted
conditional (on X) expected value of the target `y_true`.
The methods are not implemented as staticmethods in case a link function needs
parameters.
"""
is_multiclass = False # used for testing only
# Usually, raw_prediction may be any real number and y_pred is an open
# interval.
# interval_raw_prediction = Interval(-np.inf, np.inf, False, False)
interval_y_pred = Interval(-np.inf, np.inf, False, False)
@abstractmethod
def link(self, y_pred, out=None):
"""Compute the link function g(y_pred).
The link function maps (predicted) target values to raw predictions,
i.e. `g(y_pred) = raw_prediction`.
Parameters
----------
y_pred : array
Predicted target values.
out : array
A location into which the result is stored. If provided, it must
have a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned.
Returns
-------
out : array
Output array, element-wise link function.
"""
@abstractmethod
def inverse(self, raw_prediction, out=None):
"""Compute the inverse link function h(raw_prediction).
The inverse link function maps raw predictions to predicted target
values, i.e. `h(raw_prediction) = y_pred`.
Parameters
----------
raw_prediction : array
Raw prediction values (in link space).
out : array
A location into which the result is stored. If provided, it must
have a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned.
Returns
-------
out : array
Output array, element-wise inverse link function.
"""
class IdentityLink(BaseLink):
"""The identity link function g(x)=x."""
def link(self, y_pred, out=None):
if out is not None:
np.copyto(out, y_pred)
return out
else:
return y_pred
inverse = link
class LogLink(BaseLink):
"""The log link function g(x)=log(x)."""
interval_y_pred = Interval(0, np.inf, False, False)
def link(self, y_pred, out=None):
return np.log(y_pred, out=out)
def inverse(self, raw_prediction, out=None):
return np.exp(raw_prediction, out=out)
class LogitLink(BaseLink):
"""The logit link function g(x)=logit(x)."""
interval_y_pred = Interval(0, 1, False, False)
def link(self, y_pred, out=None):
return logit(y_pred, out=out)
def inverse(self, raw_prediction, out=None):
return expit(raw_prediction, out=out)
class HalfLogitLink(BaseLink):
"""Half the logit link function g(x)=1/2 * logit(x).
Used for the exponential loss.
"""
interval_y_pred = Interval(0, 1, False, False)
def link(self, y_pred, out=None):
out = logit(y_pred, out=out)
out *= 0.5
return out
def inverse(self, raw_prediction, out=None):
return expit(2 * raw_prediction, out)
class MultinomialLogit(BaseLink):
"""The symmetric multinomial logit function.
Convention:
- y_pred.shape = raw_prediction.shape = (n_samples, n_classes)
Notes:
- The inverse link h is the softmax function.
- The sum is over the second axis, i.e. axis=1 (n_classes).
We have to choose additional constraints in order to make
y_pred[k] = exp(raw_pred[k]) / sum(exp(raw_pred[k]), k=0..n_classes-1)
for n_classes classes identifiable and invertible.
We choose the symmetric side constraint where the geometric mean response
is set as reference category, see [2]:
The symmetric multinomial logit link function for a single data point is
then defined as
raw_prediction[k] = g(y_pred[k]) = log(y_pred[k]/gmean(y_pred))
= log(y_pred[k]) - mean(log(y_pred)).
Note that this is equivalent to the definition in [1] and implies mean
centered raw predictions:
sum(raw_prediction[k], k=0..n_classes-1) = 0.
For linear models with raw_prediction = X @ coef, this corresponds to
sum(coef[k], k=0..n_classes-1) = 0, i.e. the sum over classes for every
feature is zero.
Reference
---------
.. [1] Friedman, Jerome; Hastie, Trevor; Tibshirani, Robert. "Additive
logistic regression: a statistical view of boosting" Ann. Statist.
28 (2000), no. 2, 337--407. doi:10.1214/aos/1016218223.
https://projecteuclid.org/euclid.aos/1016218223
.. [2] Zahid, Faisal Maqbool and Gerhard Tutz. "Ridge estimation for
multinomial logit models with symmetric side constraints."
Computational Statistics 28 (2013): 1017-1034.
http://epub.ub.uni-muenchen.de/11001/1/tr067.pdf
"""
is_multiclass = True
interval_y_pred = Interval(0, 1, False, False)
def symmetrize_raw_prediction(self, raw_prediction):
return raw_prediction - np.mean(raw_prediction, axis=1)[:, np.newaxis]
def link(self, y_pred, out=None):
# geometric mean as reference category
gm = gmean(y_pred, axis=1)
return np.log(y_pred / gm[:, np.newaxis], out=out)
def inverse(self, raw_prediction, out=None):
if out is None:
return softmax(raw_prediction, copy=True)
else:
np.copyto(out, raw_prediction)
softmax(out, copy=False)
return out
_LINKS = {
"identity": IdentityLink,
"log": LogLink,
"logit": LogitLink,
"half_logit": HalfLogitLink,
"multinomial_logit": MultinomialLogit,
}
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/_loss/loss.py | sklearn/_loss/loss.py | """
This module contains loss classes suitable for fitting.
It is not part of the public API.
Specific losses are used for regression, binary classification or multiclass
classification.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Goals:
# - Provide a common private module for loss functions/classes.
# - To be used in:
# - LogisticRegression
# - PoissonRegressor, GammaRegressor, TweedieRegressor
# - HistGradientBoostingRegressor, HistGradientBoostingClassifier
# - GradientBoostingRegressor, GradientBoostingClassifier
# - SGDRegressor, SGDClassifier
# - Replace link module of GLMs.
import numbers
import numpy as np
from scipy.special import xlogy
from sklearn._loss._loss import (
CyAbsoluteError,
CyExponentialLoss,
CyHalfBinomialLoss,
CyHalfGammaLoss,
CyHalfMultinomialLoss,
CyHalfPoissonLoss,
CyHalfSquaredError,
CyHalfTweedieLoss,
CyHalfTweedieLossIdentity,
CyHuberLoss,
CyPinballLoss,
)
from sklearn._loss.link import (
HalfLogitLink,
IdentityLink,
Interval,
LogitLink,
LogLink,
MultinomialLogit,
)
from sklearn.utils import check_scalar
from sklearn.utils.stats import _weighted_percentile
# Note: The shape of raw_prediction for multiclass classifications are
# - GradientBoostingClassifier: (n_samples, n_classes)
# - HistGradientBoostingClassifier: (n_classes, n_samples)
#
# Note: Instead of inheritance like
#
# class BaseLoss(BaseLink, CyLossFunction):
# ...
#
# # Note: Naturally, we would inherit in the following order
# # class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss)
# # But because of https://github.com/cython/cython/issues/4350 we set BaseLoss as
# # the last one. This, of course, changes the MRO.
# class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss):
#
# we use composition. This way we improve maintainability by avoiding the above
# mentioned Cython edge case and have easier to understand code (which method calls
# which code).
class BaseLoss:
"""Base class for a loss function of 1-dimensional targets.
Conventions:
- y_true.shape = sample_weight.shape = (n_samples,)
- y_pred.shape = raw_prediction.shape = (n_samples,)
- If is_multiclass is true (multiclass classification), then
y_pred.shape = raw_prediction.shape = (n_samples, n_classes)
Note that this corresponds to the return value of decision_function.
y_true, y_pred, sample_weight and raw_prediction must either be all float64
or all float32.
gradient and hessian must be either both float64 or both float32.
Note that y_pred = link.inverse(raw_prediction).
Specific loss classes can inherit specific link classes to satisfy
BaseLink's abstractmethods.
Parameters
----------
sample_weight : {None, ndarray}
If sample_weight is None, the hessian might be constant.
n_classes : {None, int}
The number of classes for classification, else None.
Attributes
----------
closs: CyLossFunction
link : BaseLink
interval_y_true : Interval
Valid interval for y_true
interval_y_pred : Interval
Valid Interval for y_pred
differentiable : bool
Indicates whether or not loss function is differentiable in
raw_prediction everywhere.
need_update_leaves_values : bool
Indicates whether decision trees in gradient boosting need to uptade
leave values after having been fit to the (negative) gradients.
approx_hessian : bool
Indicates whether the hessian is approximated or exact. If,
approximated, it should be larger or equal to the exact one.
constant_hessian : bool
Indicates whether the hessian is one for this loss.
is_multiclass : bool
Indicates whether n_classes > 2 is allowed.
"""
# For gradient boosted decision trees:
# This variable indicates whether the loss requires the leaves values to
# be updated once the tree has been trained. The trees are trained to
# predict a Newton-Raphson step (see grower._finalize_leaf()). But for
# some losses (e.g. least absolute deviation) we need to adjust the tree
# values to account for the "line search" of the gradient descent
# procedure. See the original paper Greedy Function Approximation: A
# Gradient Boosting Machine by Friedman
# (https://statweb.stanford.edu/~jhf/ftp/trebst.pdf) for the theory.
differentiable = True
need_update_leaves_values = False
is_multiclass = False
def __init__(self, closs, link, n_classes=None):
self.closs = closs
self.link = link
self.approx_hessian = False
self.constant_hessian = False
self.n_classes = n_classes
self.interval_y_true = Interval(-np.inf, np.inf, False, False)
self.interval_y_pred = self.link.interval_y_pred
def in_y_true_range(self, y):
"""Return True if y is in the valid range of y_true.
Parameters
----------
y : ndarray
"""
return self.interval_y_true.includes(y)
def in_y_pred_range(self, y):
"""Return True if y is in the valid range of y_pred.
Parameters
----------
y : ndarray
"""
return self.interval_y_pred.includes(y)
def loss(
self,
y_true,
raw_prediction,
sample_weight=None,
loss_out=None,
n_threads=1,
):
"""Compute the pointwise loss value for each input.
Parameters
----------
y_true : C-contiguous array of shape (n_samples,)
Observed, true target values.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space).
sample_weight : None or C-contiguous array of shape (n_samples,)
Sample weights.
loss_out : None or C-contiguous array of shape (n_samples,)
A location into which the result is stored. If None, a new array
might be created.
n_threads : int, default=1
Might use openmp thread parallelism.
Returns
-------
loss : array of shape (n_samples,)
Element-wise loss function.
"""
if loss_out is None:
loss_out = np.empty_like(y_true)
# Be graceful to shape (n_samples, 1) -> (n_samples,)
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
raw_prediction = raw_prediction.squeeze(1)
self.closs.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=loss_out,
n_threads=n_threads,
)
return loss_out
def loss_gradient(
self,
y_true,
raw_prediction,
sample_weight=None,
loss_out=None,
gradient_out=None,
n_threads=1,
):
"""Compute loss and gradient w.r.t. raw_prediction for each input.
Parameters
----------
y_true : C-contiguous array of shape (n_samples,)
Observed, true target values.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space).
sample_weight : None or C-contiguous array of shape (n_samples,)
Sample weights.
loss_out : None or C-contiguous array of shape (n_samples,)
A location into which the loss is stored. If None, a new array
might be created.
gradient_out : None or C-contiguous array of shape (n_samples,) or array \
of shape (n_samples, n_classes)
A location into which the gradient is stored. If None, a new array
might be created.
n_threads : int, default=1
Might use openmp thread parallelism.
Returns
-------
loss : array of shape (n_samples,)
Element-wise loss function.
gradient : array of shape (n_samples,) or (n_samples, n_classes)
Element-wise gradients.
"""
if loss_out is None:
if gradient_out is None:
loss_out = np.empty_like(y_true)
gradient_out = np.empty_like(raw_prediction)
else:
loss_out = np.empty_like(y_true, dtype=gradient_out.dtype)
elif gradient_out is None:
gradient_out = np.empty_like(raw_prediction, dtype=loss_out.dtype)
# Be graceful to shape (n_samples, 1) -> (n_samples,)
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
raw_prediction = raw_prediction.squeeze(1)
if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:
gradient_out = gradient_out.squeeze(1)
self.closs.loss_gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=loss_out,
gradient_out=gradient_out,
n_threads=n_threads,
)
return loss_out, gradient_out
def gradient(
self,
y_true,
raw_prediction,
sample_weight=None,
gradient_out=None,
n_threads=1,
):
"""Compute gradient of loss w.r.t raw_prediction for each input.
Parameters
----------
y_true : C-contiguous array of shape (n_samples,)
Observed, true target values.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space).
sample_weight : None or C-contiguous array of shape (n_samples,)
Sample weights.
gradient_out : None or C-contiguous array of shape (n_samples,) or array \
of shape (n_samples, n_classes)
A location into which the result is stored. If None, a new array
might be created.
n_threads : int, default=1
Might use openmp thread parallelism.
Returns
-------
gradient : array of shape (n_samples,) or (n_samples, n_classes)
Element-wise gradients.
"""
if gradient_out is None:
gradient_out = np.empty_like(raw_prediction)
# Be graceful to shape (n_samples, 1) -> (n_samples,)
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
raw_prediction = raw_prediction.squeeze(1)
if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:
gradient_out = gradient_out.squeeze(1)
self.closs.gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=gradient_out,
n_threads=n_threads,
)
return gradient_out
def gradient_hessian(
self,
y_true,
raw_prediction,
sample_weight=None,
gradient_out=None,
hessian_out=None,
n_threads=1,
):
"""Compute gradient and hessian of loss w.r.t raw_prediction.
Parameters
----------
y_true : C-contiguous array of shape (n_samples,)
Observed, true target values.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space).
sample_weight : None or C-contiguous array of shape (n_samples,)
Sample weights.
gradient_out : None or C-contiguous array of shape (n_samples,) or array \
of shape (n_samples, n_classes)
A location into which the gradient is stored. If None, a new array
might be created.
hessian_out : None or C-contiguous array of shape (n_samples,) or array \
of shape (n_samples, n_classes)
A location into which the hessian is stored. If None, a new array
might be created.
n_threads : int, default=1
Might use openmp thread parallelism.
Returns
-------
gradient : arrays of shape (n_samples,) or (n_samples, n_classes)
Element-wise gradients.
hessian : arrays of shape (n_samples,) or (n_samples, n_classes)
Element-wise hessians.
"""
if gradient_out is None:
if hessian_out is None:
gradient_out = np.empty_like(raw_prediction)
hessian_out = np.empty_like(raw_prediction)
else:
gradient_out = np.empty_like(hessian_out)
elif hessian_out is None:
hessian_out = np.empty_like(gradient_out)
# Be graceful to shape (n_samples, 1) -> (n_samples,)
if raw_prediction.ndim == 2 and raw_prediction.shape[1] == 1:
raw_prediction = raw_prediction.squeeze(1)
if gradient_out.ndim == 2 and gradient_out.shape[1] == 1:
gradient_out = gradient_out.squeeze(1)
if hessian_out.ndim == 2 and hessian_out.shape[1] == 1:
hessian_out = hessian_out.squeeze(1)
self.closs.gradient_hessian(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=gradient_out,
hessian_out=hessian_out,
n_threads=n_threads,
)
return gradient_out, hessian_out
def __call__(self, y_true, raw_prediction, sample_weight=None, n_threads=1):
"""Compute the weighted average loss.
Parameters
----------
y_true : C-contiguous array of shape (n_samples,)
Observed, true target values.
raw_prediction : C-contiguous array of shape (n_samples,) or array of \
shape (n_samples, n_classes)
Raw prediction values (in link space).
sample_weight : None or C-contiguous array of shape (n_samples,)
Sample weights.
n_threads : int, default=1
Might use openmp thread parallelism.
Returns
-------
loss : float
Mean or averaged loss function.
"""
return np.average(
self.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=None,
loss_out=None,
n_threads=n_threads,
),
weights=sample_weight,
)
def fit_intercept_only(self, y_true, sample_weight=None):
"""Compute raw_prediction of an intercept-only model.
This can be used as initial estimates of predictions, i.e. before the
first iteration in fit.
Parameters
----------
y_true : array-like of shape (n_samples,)
Observed, true target values.
sample_weight : None or array of shape (n_samples,)
Sample weights.
Returns
-------
raw_prediction : numpy scalar or array of shape (n_classes,)
Raw predictions of an intercept-only model.
"""
# As default, take weighted average of the target over the samples
# axis=0 and then transform into link-scale (raw_prediction).
y_pred = np.average(y_true, weights=sample_weight, axis=0)
eps = 10 * np.finfo(y_pred.dtype).eps
if self.interval_y_pred.low == -np.inf:
a_min = None
elif self.interval_y_pred.low_inclusive:
a_min = self.interval_y_pred.low
else:
a_min = self.interval_y_pred.low + eps
if self.interval_y_pred.high == np.inf:
a_max = None
elif self.interval_y_pred.high_inclusive:
a_max = self.interval_y_pred.high
else:
a_max = self.interval_y_pred.high - eps
if a_min is None and a_max is None:
return self.link.link(y_pred)
else:
return self.link.link(np.clip(y_pred, a_min, a_max))
def constant_to_optimal_zero(self, y_true, sample_weight=None):
"""Calculate term dropped in loss.
With this term added, the loss of perfect predictions is zero.
Parameters
----------
y_true : array-like of shape (n_samples,)
Observed, true target values.
sample_weight : None or array of shape (n_samples,), default=None
Sample weights.
Returns
-------
constant : ndarray of shape (n_samples,)
Constant value to be added to raw predictions so that the loss
of perfect predictions becomes zero.
"""
return np.zeros_like(y_true)
def init_gradient_and_hessian(self, n_samples, dtype=np.float64, order="F"):
"""Initialize arrays for gradients and hessians.
Unless hessians are constant, arrays are initialized with undefined values.
Parameters
----------
n_samples : int
The number of samples, usually passed to `fit()`.
dtype : {np.float64, np.float32}, default=np.float64
The dtype of the arrays gradient and hessian.
order : {'C', 'F'}, default='F'
Order of the arrays gradient and hessian. The default 'F' makes the arrays
contiguous along samples.
Returns
-------
gradient : C-contiguous array of shape (n_samples,) or array of shape \
(n_samples, n_classes)
Empty array (allocated but not initialized) to be used as argument
gradient_out.
hessian : C-contiguous array of shape (n_samples,), array of shape
(n_samples, n_classes) or shape (1,)
Empty (allocated but not initialized) array to be used as argument
hessian_out.
If constant_hessian is True (e.g. `HalfSquaredError`), the array is
initialized to ``1``.
"""
if dtype not in (np.float32, np.float64):
raise ValueError(
"Valid options for 'dtype' are np.float32 and np.float64. "
f"Got dtype={dtype} instead."
)
if self.is_multiclass:
shape = (n_samples, self.n_classes)
else:
shape = (n_samples,)
gradient = np.empty(shape=shape, dtype=dtype, order=order)
if self.constant_hessian:
# If the hessians are constant, we consider them equal to 1.
# - This is correct for HalfSquaredError
# - For AbsoluteError, hessians are actually 0, but they are
# always ignored anyway.
hessian = np.ones(shape=(1,), dtype=dtype)
else:
hessian = np.empty(shape=shape, dtype=dtype, order=order)
return gradient, hessian
# Note: Naturally, we would inherit in the following order
# class HalfSquaredError(IdentityLink, CyHalfSquaredError, BaseLoss)
# But because of https://github.com/cython/cython/issues/4350 we
# set BaseLoss as the last one. This, of course, changes the MRO.
class HalfSquaredError(BaseLoss):
"""Half squared error with identity link, for regression.
Domain:
y_true and y_pred all real numbers
Link:
y_pred = raw_prediction
For a given sample x_i, half squared error is defined as::
loss(x_i) = 0.5 * (y_true_i - raw_prediction_i)**2
The factor of 0.5 simplifies the computation of gradients and results in a
unit hessian (and is consistent with what is done in LightGBM). It is also
half the Normal distribution deviance.
"""
def __init__(self, sample_weight=None):
super().__init__(closs=CyHalfSquaredError(), link=IdentityLink())
self.constant_hessian = sample_weight is None
class AbsoluteError(BaseLoss):
"""Absolute error with identity link, for regression.
Domain:
y_true and y_pred all real numbers
Link:
y_pred = raw_prediction
For a given sample x_i, the absolute error is defined as::
loss(x_i) = |y_true_i - raw_prediction_i|
Note that the exact hessian = 0 almost everywhere (except at one point, therefore
differentiable = False). Optimization routines like in HGBT, however, need a
hessian > 0. Therefore, we assign 1.
"""
differentiable = False
need_update_leaves_values = True
def __init__(self, sample_weight=None):
super().__init__(closs=CyAbsoluteError(), link=IdentityLink())
self.approx_hessian = True
self.constant_hessian = sample_weight is None
def fit_intercept_only(self, y_true, sample_weight=None):
"""Compute raw_prediction of an intercept-only model.
This is the weighted median of the target, i.e. over the samples
axis=0.
"""
if sample_weight is None:
return np.median(y_true, axis=0)
else:
return _weighted_percentile(y_true, sample_weight, 50)
class PinballLoss(BaseLoss):
"""Quantile loss aka pinball loss, for regression.
Domain:
y_true and y_pred all real numbers
quantile in (0, 1)
Link:
y_pred = raw_prediction
For a given sample x_i, the pinball loss is defined as::
loss(x_i) = rho_{quantile}(y_true_i - raw_prediction_i)
rho_{quantile}(u) = u * (quantile - 1_{u<0})
= -u *(1 - quantile) if u < 0
u * quantile if u >= 0
Note: 2 * PinballLoss(quantile=0.5) equals AbsoluteError().
Note that the exact hessian = 0 almost everywhere (except at one point, therefore
differentiable = False). Optimization routines like in HGBT, however, need a
hessian > 0. Therefore, we assign 1.
Additional Attributes
---------------------
quantile : float
The quantile level of the quantile to be estimated. Must be in range (0, 1).
"""
differentiable = False
need_update_leaves_values = True
def __init__(self, sample_weight=None, quantile=0.5):
check_scalar(
quantile,
"quantile",
target_type=numbers.Real,
min_val=0,
max_val=1,
include_boundaries="neither",
)
super().__init__(
closs=CyPinballLoss(quantile=float(quantile)),
link=IdentityLink(),
)
self.approx_hessian = True
self.constant_hessian = sample_weight is None
def fit_intercept_only(self, y_true, sample_weight=None):
"""Compute raw_prediction of an intercept-only model.
This is the weighted median of the target, i.e. over the samples
axis=0.
"""
if sample_weight is None:
return np.percentile(y_true, 100 * self.closs.quantile, axis=0)
else:
return _weighted_percentile(
y_true, sample_weight, 100 * self.closs.quantile
)
class HuberLoss(BaseLoss):
"""Huber loss, for regression.
Domain:
y_true and y_pred all real numbers
quantile in (0, 1)
Link:
y_pred = raw_prediction
For a given sample x_i, the Huber loss is defined as::
loss(x_i) = 1/2 * abserr**2 if abserr <= delta
delta * (abserr - delta/2) if abserr > delta
abserr = |y_true_i - raw_prediction_i|
delta = quantile(abserr, self.quantile)
Note: HuberLoss(quantile=1) equals HalfSquaredError and HuberLoss(quantile=0)
equals delta * (AbsoluteError() - delta/2).
Additional Attributes
---------------------
quantile : float
The quantile level which defines the breaking point `delta` to distinguish
between absolute error and squared error. Must be in range (0, 1).
Reference
---------
.. [1] Friedman, J.H. (2001). :doi:`Greedy function approximation: A gradient
boosting machine <10.1214/aos/1013203451>`.
Annals of Statistics, 29, 1189-1232.
"""
differentiable = False
need_update_leaves_values = True
def __init__(self, sample_weight=None, quantile=0.9, delta=0.5):
check_scalar(
quantile,
"quantile",
target_type=numbers.Real,
min_val=0,
max_val=1,
include_boundaries="neither",
)
self.quantile = quantile # This is better stored outside of Cython.
super().__init__(
closs=CyHuberLoss(delta=float(delta)),
link=IdentityLink(),
)
self.approx_hessian = True
self.constant_hessian = False
def fit_intercept_only(self, y_true, sample_weight=None):
"""Compute raw_prediction of an intercept-only model.
This is the weighted median of the target, i.e. over the samples
axis=0.
"""
# See formula before algo 4 in Friedman (2001), but we apply it to y_true,
# not to the residual y_true - raw_prediction. An estimator like
# HistGradientBoostingRegressor might then call it on the residual, e.g.
# fit_intercept_only(y_true - raw_prediction).
if sample_weight is None:
median = np.percentile(y_true, 50, axis=0)
else:
median = _weighted_percentile(y_true, sample_weight, 50)
diff = y_true - median
term = np.sign(diff) * np.minimum(self.closs.delta, np.abs(diff))
return median + np.average(term, weights=sample_weight)
class HalfPoissonLoss(BaseLoss):
"""Half Poisson deviance loss with log-link, for regression.
Domain:
y_true in non-negative real numbers
y_pred in positive real numbers
Link:
y_pred = exp(raw_prediction)
For a given sample x_i, half the Poisson deviance is defined as::
loss(x_i) = y_true_i * log(y_true_i/exp(raw_prediction_i))
- y_true_i + exp(raw_prediction_i)
Half the Poisson deviance is actually the negative log-likelihood up to
constant terms (not involving raw_prediction) and simplifies the
computation of the gradients.
We also skip the constant term `y_true_i * log(y_true_i) - y_true_i`.
"""
def __init__(self, sample_weight=None):
super().__init__(closs=CyHalfPoissonLoss(), link=LogLink())
self.interval_y_true = Interval(0, np.inf, True, False)
def constant_to_optimal_zero(self, y_true, sample_weight=None):
term = xlogy(y_true, y_true) - y_true
if sample_weight is not None:
term *= sample_weight
return term
class HalfGammaLoss(BaseLoss):
"""Half Gamma deviance loss with log-link, for regression.
Domain:
y_true and y_pred in positive real numbers
Link:
y_pred = exp(raw_prediction)
For a given sample x_i, half Gamma deviance loss is defined as::
loss(x_i) = log(exp(raw_prediction_i)/y_true_i)
+ y_true/exp(raw_prediction_i) - 1
Half the Gamma deviance is actually proportional to the negative log-
likelihood up to constant terms (not involving raw_prediction) and
simplifies the computation of the gradients.
We also skip the constant term `-log(y_true_i) - 1`.
"""
def __init__(self, sample_weight=None):
super().__init__(closs=CyHalfGammaLoss(), link=LogLink())
self.interval_y_true = Interval(0, np.inf, False, False)
def constant_to_optimal_zero(self, y_true, sample_weight=None):
term = -np.log(y_true) - 1
if sample_weight is not None:
term *= sample_weight
return term
class HalfTweedieLoss(BaseLoss):
"""Half Tweedie deviance loss with log-link, for regression.
Domain:
y_true in real numbers for power <= 0
y_true in non-negative real numbers for 0 < power < 2
y_true in positive real numbers for 2 <= power
y_pred in positive real numbers
power in real numbers
Link:
y_pred = exp(raw_prediction)
For a given sample x_i, half Tweedie deviance loss with p=power is defined
as::
loss(x_i) = max(y_true_i, 0)**(2-p) / (1-p) / (2-p)
- y_true_i * exp(raw_prediction_i)**(1-p) / (1-p)
+ exp(raw_prediction_i)**(2-p) / (2-p)
Taking the limits for p=0, 1, 2 gives HalfSquaredError with a log link,
HalfPoissonLoss and HalfGammaLoss.
We also skip constant terms, but those are different for p=0, 1, 2.
Therefore, the loss is not continuous in `power`.
Note furthermore that although no Tweedie distribution exists for
0 < power < 1, it still gives a strictly consistent scoring function for
the expectation.
"""
def __init__(self, sample_weight=None, power=1.5):
super().__init__(
closs=CyHalfTweedieLoss(power=float(power)),
link=LogLink(),
)
if self.closs.power <= 0:
self.interval_y_true = Interval(-np.inf, np.inf, False, False)
elif self.closs.power < 2:
self.interval_y_true = Interval(0, np.inf, True, False)
else:
self.interval_y_true = Interval(0, np.inf, False, False)
def constant_to_optimal_zero(self, y_true, sample_weight=None):
if self.closs.power == 0:
return HalfSquaredError().constant_to_optimal_zero(
y_true=y_true, sample_weight=sample_weight
)
elif self.closs.power == 1:
return HalfPoissonLoss().constant_to_optimal_zero(
y_true=y_true, sample_weight=sample_weight
)
elif self.closs.power == 2:
return HalfGammaLoss().constant_to_optimal_zero(
y_true=y_true, sample_weight=sample_weight
)
else:
p = self.closs.power
term = np.power(np.maximum(y_true, 0), 2 - p) / (1 - p) / (2 - p)
if sample_weight is not None:
term *= sample_weight
return term
class HalfTweedieLossIdentity(BaseLoss):
"""Half Tweedie deviance loss with identity link, for regression.
Domain:
y_true in real numbers for power <= 0
y_true in non-negative real numbers for 0 < power < 2
y_true in positive real numbers for 2 <= power
y_pred in positive real numbers for power != 0
y_pred in real numbers for power = 0
power in real numbers
Link:
y_pred = raw_prediction
For a given sample x_i, half Tweedie deviance loss with p=power is defined
as::
loss(x_i) = max(y_true_i, 0)**(2-p) / (1-p) / (2-p)
- y_true_i * raw_prediction_i**(1-p) / (1-p)
+ raw_prediction_i**(2-p) / (2-p)
Note that the minimum value of this loss is 0.
Note furthermore that although no Tweedie distribution exists for
0 < power < 1, it still gives a strictly consistent scoring function for
the expectation.
"""
def __init__(self, sample_weight=None, power=1.5):
super().__init__(
closs=CyHalfTweedieLossIdentity(power=float(power)),
link=IdentityLink(),
)
if self.closs.power <= 0:
self.interval_y_true = Interval(-np.inf, np.inf, False, False)
elif self.closs.power < 2:
self.interval_y_true = Interval(0, np.inf, True, False)
else:
self.interval_y_true = Interval(0, np.inf, False, False)
if self.closs.power == 0:
self.interval_y_pred = Interval(-np.inf, np.inf, False, False)
else:
self.interval_y_pred = Interval(0, np.inf, False, False)
class HalfBinomialLoss(BaseLoss):
"""Half Binomial deviance loss with logit link, for binary classification.
This is also know as binary cross entropy, log-loss and logistic loss.
Domain:
y_true in [0, 1], i.e. regression on the unit interval
y_pred in (0, 1), i.e. boundaries excluded
Link:
y_pred = expit(raw_prediction)
For a given sample x_i, half Binomial deviance is defined as the negative
log-likelihood of the Binomial/Bernoulli distribution and can be expressed
as::
loss(x_i) = log(1 + exp(raw_pred_i)) - y_true_i * raw_pred_i
See The Elements of Statistical Learning, by Hastie, Tibshirani, Friedman,
section 4.4.1 (about logistic regression).
Note that the formulation works for classification, y = {0, 1}, as well as
logistic regression, y = [0, 1].
If you add `constant_to_optimal_zero` to the loss, you get half the
Bernoulli/binomial deviance.
More details: Inserting the predicted probability y_pred = expit(raw_prediction)
in the loss gives the well known::
loss(x_i) = - y_true_i * log(y_pred_i) - (1 - y_true_i) * log(1 - y_pred_i)
"""
def __init__(self, sample_weight=None):
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/_loss/__init__.py | sklearn/_loss/__init__.py | """
The :mod:`sklearn._loss` module includes loss function classes suitable for
fitting classification and regression tasks.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn._loss.loss import (
AbsoluteError,
HalfBinomialLoss,
HalfGammaLoss,
HalfMultinomialLoss,
HalfPoissonLoss,
HalfSquaredError,
HalfTweedieLoss,
HalfTweedieLossIdentity,
HuberLoss,
PinballLoss,
)
__all__ = [
"AbsoluteError",
"HalfBinomialLoss",
"HalfGammaLoss",
"HalfMultinomialLoss",
"HalfPoissonLoss",
"HalfSquaredError",
"HalfTweedieLoss",
"HalfTweedieLossIdentity",
"HuberLoss",
"PinballLoss",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/_loss/tests/test_loss.py | sklearn/_loss/tests/test_loss.py | import pickle
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from pytest import approx
from scipy.optimize import (
LinearConstraint,
minimize,
minimize_scalar,
newton,
)
from scipy.special import logsumexp
from sklearn._loss.link import IdentityLink, _inclusive_low_high
from sklearn._loss.loss import (
_LOSSES,
AbsoluteError,
BaseLoss,
HalfBinomialLoss,
HalfGammaLoss,
HalfMultinomialLoss,
HalfPoissonLoss,
HalfSquaredError,
HalfTweedieLoss,
HalfTweedieLossIdentity,
HuberLoss,
PinballLoss,
)
from sklearn.utils import assert_all_finite
from sklearn.utils._testing import create_memmap_backed_data, skip_if_32bit
ALL_LOSSES = list(_LOSSES.values())
LOSS_INSTANCES = [loss() for loss in ALL_LOSSES]
# HalfTweedieLoss(power=1.5) is already there as default
LOSS_INSTANCES += [
PinballLoss(quantile=0.25),
HuberLoss(quantile=0.75),
HalfTweedieLoss(power=-1.5),
HalfTweedieLoss(power=0),
HalfTweedieLoss(power=1),
HalfTweedieLoss(power=2),
HalfTweedieLoss(power=3.0),
HalfTweedieLossIdentity(power=0),
HalfTweedieLossIdentity(power=1),
HalfTweedieLossIdentity(power=2),
HalfTweedieLossIdentity(power=3.0),
]
def loss_instance_name(param):
if isinstance(param, BaseLoss):
loss = param
name = loss.__class__.__name__
if isinstance(loss, PinballLoss):
name += f"(quantile={loss.closs.quantile})"
elif isinstance(loss, HuberLoss):
name += f"(quantile={loss.quantile}"
elif hasattr(loss, "closs") and hasattr(loss.closs, "power"):
name += f"(power={loss.closs.power})"
return name
else:
return str(param)
def random_y_true_raw_prediction(
loss, n_samples, y_bound=(-100, 100), raw_bound=(-5, 5), seed=42
):
"""Random generate y_true and raw_prediction in valid range."""
rng = np.random.RandomState(seed)
if loss.is_multiclass:
raw_prediction = np.empty((n_samples, loss.n_classes))
raw_prediction.flat[:] = rng.uniform(
low=raw_bound[0],
high=raw_bound[1],
size=n_samples * loss.n_classes,
)
y_true = np.arange(n_samples).astype(float) % loss.n_classes
else:
# If link is identity, we must respect the interval of y_pred:
if isinstance(loss.link, IdentityLink):
low, high = _inclusive_low_high(loss.interval_y_pred)
low = np.amax([low, raw_bound[0]])
high = np.amin([high, raw_bound[1]])
raw_bound = (low, high)
raw_prediction = rng.uniform(
low=raw_bound[0], high=raw_bound[1], size=n_samples
)
# generate a y_true in valid range
low, high = _inclusive_low_high(loss.interval_y_true)
low = max(low, y_bound[0])
high = min(high, y_bound[1])
y_true = rng.uniform(low, high, size=n_samples)
# set some values at special boundaries
if loss.interval_y_true.low == 0 and loss.interval_y_true.low_inclusive:
y_true[:: (n_samples // 3)] = 0
if loss.interval_y_true.high == 1 and loss.interval_y_true.high_inclusive:
y_true[1 :: (n_samples // 3)] = 1
return y_true, raw_prediction
def numerical_derivative(func, x, eps):
"""Helper function for numerical (first) derivatives."""
# For numerical derivatives, see
# https://en.wikipedia.org/wiki/Numerical_differentiation
# https://en.wikipedia.org/wiki/Finite_difference_coefficient
# We use central finite differences of accuracy 4.
h = np.full_like(x, fill_value=eps)
f_minus_2h = func(x - 2 * h)
f_minus_1h = func(x - h)
f_plus_1h = func(x + h)
f_plus_2h = func(x + 2 * h)
return (-f_plus_2h + 8 * f_plus_1h - 8 * f_minus_1h + f_minus_2h) / (12.0 * eps)
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
def test_loss_boundary(loss):
"""Test interval ranges of y_true and y_pred in losses."""
# make sure low and high are always within the interval, used for linspace
if loss.is_multiclass:
n_classes = 3 # default value
y_true = np.tile(np.linspace(0, n_classes - 1, num=n_classes), 3)
else:
low, high = _inclusive_low_high(loss.interval_y_true)
y_true = np.linspace(low, high, num=10)
# add boundaries if they are included
if loss.interval_y_true.low_inclusive:
y_true = np.r_[y_true, loss.interval_y_true.low]
if loss.interval_y_true.high_inclusive:
y_true = np.r_[y_true, loss.interval_y_true.high]
assert loss.in_y_true_range(y_true)
n = y_true.shape[0]
low, high = _inclusive_low_high(loss.interval_y_pred)
if loss.is_multiclass:
y_pred = np.empty((n, n_classes))
y_pred[:, 0] = np.linspace(low, high, num=n)
y_pred[:, 1] = 0.5 * (1 - y_pred[:, 0])
y_pred[:, 2] = 0.5 * (1 - y_pred[:, 0])
else:
y_pred = np.linspace(low, high, num=n)
assert loss.in_y_pred_range(y_pred)
# calculating losses should not fail
raw_prediction = loss.link.link(y_pred)
loss.loss(y_true=y_true, raw_prediction=raw_prediction)
# Fixture to test valid value ranges.
Y_COMMON_PARAMS = [
# (loss, [y success], [y fail])
(HalfSquaredError(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
(AbsoluteError(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
(PinballLoss(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
(HuberLoss(), [-100, 0, 0.1, 100], [-np.inf, np.inf]),
(HalfPoissonLoss(), [0.1, 100], [-np.inf, -3, -0.1, np.inf]),
(HalfGammaLoss(), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
(HalfTweedieLoss(power=-3), [0.1, 100], [-np.inf, np.inf]),
(HalfTweedieLoss(power=0), [0.1, 100], [-np.inf, np.inf]),
(HalfTweedieLoss(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]),
(HalfTweedieLoss(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
(HalfTweedieLoss(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
(HalfTweedieLossIdentity(power=-3), [0.1, 100], [-np.inf, np.inf]),
(HalfTweedieLossIdentity(power=0), [-3, -0.1, 0, 0.1, 100], [-np.inf, np.inf]),
(HalfTweedieLossIdentity(power=1.5), [0.1, 100], [-np.inf, -3, -0.1, np.inf]),
(HalfTweedieLossIdentity(power=2), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
(HalfTweedieLossIdentity(power=3), [0.1, 100], [-np.inf, -3, -0.1, 0, np.inf]),
(HalfBinomialLoss(), [0.1, 0.5, 0.9], [-np.inf, -1, 2, np.inf]),
(HalfMultinomialLoss(), [], [-np.inf, -1, 1.1, np.inf]),
]
# y_pred and y_true do not always have the same domain (valid value range).
# Hence, we define extra sets of parameters for each of them.
Y_TRUE_PARAMS = [ # type: ignore[var-annotated]
# (loss, [y success], [y fail])
(HalfPoissonLoss(), [0], []),
(HuberLoss(), [0], []),
(HalfTweedieLoss(power=-3), [-100, -0.1, 0], []),
(HalfTweedieLoss(power=0), [-100, 0], []),
(HalfTweedieLoss(power=1.5), [0], []),
(HalfTweedieLossIdentity(power=-3), [-100, -0.1, 0], []),
(HalfTweedieLossIdentity(power=0), [-100, 0], []),
(HalfTweedieLossIdentity(power=1.5), [0], []),
(HalfBinomialLoss(), [0, 1], []),
(HalfMultinomialLoss(), [0.0, 1.0, 2], []),
]
Y_PRED_PARAMS = [
# (loss, [y success], [y fail])
(HalfPoissonLoss(), [], [0]),
(HalfTweedieLoss(power=-3), [], [-3, -0.1, 0]),
(HalfTweedieLoss(power=0), [], [-3, -0.1, 0]),
(HalfTweedieLoss(power=1.5), [], [0]),
(HalfTweedieLossIdentity(power=-3), [], [-3, -0.1, 0]),
(HalfTweedieLossIdentity(power=0), [-3, -0.1, 0], []),
(HalfTweedieLossIdentity(power=1.5), [], [0]),
(HalfBinomialLoss(), [], [0, 1]),
(HalfMultinomialLoss(), [0.1, 0.5], [0, 1]),
]
@pytest.mark.parametrize(
"loss, y_true_success, y_true_fail",
Y_COMMON_PARAMS + Y_TRUE_PARAMS, # type: ignore[operator]
)
def test_loss_boundary_y_true(loss, y_true_success, y_true_fail):
"""Test boundaries of y_true for loss functions."""
for y in y_true_success:
assert loss.in_y_true_range(np.array([y]))
for y in y_true_fail:
assert not loss.in_y_true_range(np.array([y]))
@pytest.mark.parametrize(
"loss, y_pred_success, y_pred_fail",
Y_COMMON_PARAMS + Y_PRED_PARAMS, # type: ignore[operator]
)
def test_loss_boundary_y_pred(loss, y_pred_success, y_pred_fail):
"""Test boundaries of y_pred for loss functions."""
for y in y_pred_success:
assert loss.in_y_pred_range(np.array([y]))
for y in y_pred_fail:
assert not loss.in_y_pred_range(np.array([y]))
@pytest.mark.parametrize(
"loss, y_true, raw_prediction, loss_true, gradient_true, hessian_true",
[
(HalfSquaredError(), 1.0, 5.0, 8, 4, 1),
(AbsoluteError(), 1.0, 5.0, 4.0, 1.0, None),
(PinballLoss(quantile=0.5), 1.0, 5.0, 2, 0.5, None),
(PinballLoss(quantile=0.25), 1.0, 5.0, 4 * (1 - 0.25), 1 - 0.25, None),
(PinballLoss(quantile=0.25), 5.0, 1.0, 4 * 0.25, -0.25, None),
(HuberLoss(quantile=0.5, delta=3), 1.0, 5.0, 3 * (4 - 3 / 2), None, None),
(HuberLoss(quantile=0.5, delta=3), 1.0, 3.0, 0.5 * 2**2, None, None),
(HalfPoissonLoss(), 2.0, np.log(4), 4 - 2 * np.log(4), 4 - 2, 4),
(HalfGammaLoss(), 2.0, np.log(4), np.log(4) + 2 / 4, 1 - 2 / 4, 2 / 4),
(HalfTweedieLoss(power=3), 2.0, np.log(4), -1 / 4 + 1 / 4**2, None, None),
(HalfTweedieLossIdentity(power=1), 2.0, 4.0, 2 - 2 * np.log(2), None, None),
(HalfTweedieLossIdentity(power=2), 2.0, 4.0, np.log(2) - 1 / 2, None, None),
(
HalfTweedieLossIdentity(power=3),
2.0,
4.0,
-1 / 4 + 1 / 4**2 + 1 / 2 / 2,
None,
None,
),
(
HalfBinomialLoss(),
0.25,
np.log(4),
np.log1p(4) - 0.25 * np.log(4),
None,
None,
),
# Extreme log loss cases, checked with mpmath:
# import mpmath as mp
#
# # Stolen from scipy
# def mpf2float(x):
# return float(mp.nstr(x, 17, min_fixed=0, max_fixed=0))
#
# def mp_logloss(y_true, raw):
# with mp.workdps(100):
# y_true, raw = mp.mpf(float(y_true)), mp.mpf(float(raw))
# out = mp.log1p(mp.exp(raw)) - y_true * raw
# return mpf2float(out)
#
# def mp_gradient(y_true, raw):
# with mp.workdps(100):
# y_true, raw = mp.mpf(float(y_true)), mp.mpf(float(raw))
# out = mp.mpf(1) / (mp.mpf(1) + mp.exp(-raw)) - y_true
# return mpf2float(out)
#
# def mp_hessian(y_true, raw):
# with mp.workdps(100):
# y_true, raw = mp.mpf(float(y_true)), mp.mpf(float(raw))
# p = mp.mpf(1) / (mp.mpf(1) + mp.exp(-raw))
# out = p * (mp.mpf(1) - p)
# return mpf2float(out)
#
# y, raw = 0.0, 37.
# mp_logloss(y, raw), mp_gradient(y, raw), mp_hessian(y, raw)
(HalfBinomialLoss(), 0.0, -1e20, 0, 0, 0),
(HalfBinomialLoss(), 1.0, -1e20, 1e20, -1, 0),
(HalfBinomialLoss(), 0.0, -1e3, 0, 0, 0),
(HalfBinomialLoss(), 1.0, -1e3, 1e3, -1, 0),
(HalfBinomialLoss(), 1.0, -37.5, 37.5, -1, 0),
(HalfBinomialLoss(), 1.0, -37.0, 37, 1e-16 - 1, 8.533047625744065e-17),
(HalfBinomialLoss(), 0.0, -37.0, *[8.533047625744065e-17] * 3),
(HalfBinomialLoss(), 1.0, -36.9, 36.9, 1e-16 - 1, 9.430476078526806e-17),
(HalfBinomialLoss(), 0.0, -36.9, *[9.430476078526806e-17] * 3),
(HalfBinomialLoss(), 0.0, 37.0, 37, 1 - 1e-16, 8.533047625744065e-17),
(HalfBinomialLoss(), 1.0, 37.0, *[8.533047625744066e-17] * 3),
(HalfBinomialLoss(), 0.0, 37.5, 37.5, 1, 5.175555005801868e-17),
(HalfBinomialLoss(), 0.0, 232.8, 232.8, 1, 1.4287342391028437e-101),
(HalfBinomialLoss(), 1.0, 1e20, 0, 0, 0),
(HalfBinomialLoss(), 0.0, 1e20, 1e20, 1, 0),
(
HalfBinomialLoss(),
1.0,
232.8,
0,
-1.4287342391028437e-101,
1.4287342391028437e-101,
),
(HalfBinomialLoss(), 1.0, 232.9, 0, 0, 0),
(HalfBinomialLoss(), 1.0, 1e3, 0, 0, 0),
(HalfBinomialLoss(), 0.0, 1e3, 1e3, 1, 0),
(
HalfMultinomialLoss(n_classes=3),
0.0,
[0.2, 0.5, 0.3],
logsumexp([0.2, 0.5, 0.3]) - 0.2,
None,
None,
),
(
HalfMultinomialLoss(n_classes=3),
1.0,
[0.2, 0.5, 0.3],
logsumexp([0.2, 0.5, 0.3]) - 0.5,
None,
None,
),
(
HalfMultinomialLoss(n_classes=3),
2.0,
[0.2, 0.5, 0.3],
logsumexp([0.2, 0.5, 0.3]) - 0.3,
None,
None,
),
(
HalfMultinomialLoss(n_classes=3),
2.0,
[1e4, 0, 7e-7],
logsumexp([1e4, 0, 7e-7]) - (7e-7),
None,
None,
),
],
ids=loss_instance_name,
)
def test_loss_on_specific_values(
loss, y_true, raw_prediction, loss_true, gradient_true, hessian_true
):
"""Test losses, gradients and hessians at specific values."""
loss1 = loss(y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction]))
grad1 = loss.gradient(
y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction])
)
loss2, grad2 = loss.loss_gradient(
y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction])
)
grad3, hess = loss.gradient_hessian(
y_true=np.array([y_true]), raw_prediction=np.array([raw_prediction])
)
assert loss1 == approx(loss_true, rel=1e-15, abs=1e-15)
assert loss2 == approx(loss_true, rel=1e-15, abs=1e-15)
if gradient_true is not None:
assert grad1 == approx(gradient_true, rel=1e-15, abs=1e-15)
assert grad2 == approx(gradient_true, rel=1e-15, abs=1e-15)
assert grad3 == approx(gradient_true, rel=1e-15, abs=1e-15)
if hessian_true is not None:
assert hess == approx(hessian_true, rel=1e-15, abs=1e-15)
@pytest.mark.parametrize("loss", ALL_LOSSES)
@pytest.mark.parametrize("readonly_memmap", [False, True])
@pytest.mark.parametrize("dtype_in", [np.float32, np.float64])
@pytest.mark.parametrize("dtype_out", [np.float32, np.float64])
@pytest.mark.parametrize("sample_weight", [None, 1])
@pytest.mark.parametrize("out1", [None, 1])
@pytest.mark.parametrize("out2", [None, 1])
@pytest.mark.parametrize("n_threads", [1, 2])
def test_loss_dtype(
loss, readonly_memmap, dtype_in, dtype_out, sample_weight, out1, out2, n_threads
):
"""Test acceptance of dtypes, readonly and writeable arrays in loss functions.
Check that loss accepts if all input arrays are either all float32 or all
float64, and all output arrays are either all float32 or all float64.
Also check that input arrays can be readonly, e.g. memory mapped.
"""
loss = loss()
# generate a y_true and raw_prediction in valid range
n_samples = 5
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=n_samples,
y_bound=(-100, 100),
raw_bound=(-10, 10),
seed=42,
)
y_true = y_true.astype(dtype_in)
raw_prediction = raw_prediction.astype(dtype_in)
if sample_weight is not None:
sample_weight = np.array([2.0] * n_samples, dtype=dtype_in)
if out1 is not None:
out1 = np.empty_like(y_true, dtype=dtype_out)
if out2 is not None:
out2 = np.empty_like(raw_prediction, dtype=dtype_out)
if readonly_memmap:
y_true = create_memmap_backed_data(y_true)
raw_prediction = create_memmap_backed_data(raw_prediction)
if sample_weight is not None:
sample_weight = create_memmap_backed_data(sample_weight)
l = loss.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=out1,
n_threads=n_threads,
)
assert l is out1 if out1 is not None else True
g = loss.gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out2,
n_threads=n_threads,
)
assert g is out2 if out2 is not None else True
l, g = loss.loss_gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=out1,
gradient_out=out2,
n_threads=n_threads,
)
assert l is out1 if out1 is not None else True
assert g is out2 if out2 is not None else True
if out1 is not None and loss.is_multiclass:
out1 = np.empty_like(raw_prediction, dtype=dtype_out)
g, h = loss.gradient_hessian(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out1,
hessian_out=out2,
n_threads=n_threads,
)
assert g is out1 if out1 is not None else True
assert h is out2 if out2 is not None else True
loss(y_true=y_true, raw_prediction=raw_prediction, sample_weight=sample_weight)
loss.fit_intercept_only(y_true=y_true, sample_weight=sample_weight)
loss.constant_to_optimal_zero(y_true=y_true, sample_weight=sample_weight)
if hasattr(loss, "predict_proba"):
loss.predict_proba(raw_prediction=raw_prediction)
if hasattr(loss, "gradient_proba"):
g, p = loss.gradient_proba(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out1,
proba_out=out2,
n_threads=n_threads,
)
assert g is out1 if out1 is not None else True
assert p is out2 if out2 is not None else True
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
@pytest.mark.parametrize("sample_weight", [None, "range"])
def test_loss_same_as_C_functions(loss, sample_weight):
"""Test that Python and Cython functions return same results."""
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=20,
y_bound=(-100, 100),
raw_bound=(-10, 10),
seed=42,
)
if sample_weight == "range":
sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
out_l1 = np.empty_like(y_true)
out_l2 = np.empty_like(y_true)
out_g1 = np.empty_like(raw_prediction)
out_g2 = np.empty_like(raw_prediction)
out_h1 = np.empty_like(raw_prediction)
out_h2 = np.empty_like(raw_prediction)
loss.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=out_l1,
)
loss.closs.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=out_l2,
)
assert_allclose(out_l1, out_l2)
loss.gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out_g1,
)
loss.closs.gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out_g2,
)
assert_allclose(out_g1, out_g2)
loss.closs.loss_gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=out_l1,
gradient_out=out_g1,
)
loss.closs.loss_gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=out_l2,
gradient_out=out_g2,
)
assert_allclose(out_l1, out_l2)
assert_allclose(out_g1, out_g2)
loss.gradient_hessian(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out_g1,
hessian_out=out_h1,
)
loss.closs.gradient_hessian(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out_g2,
hessian_out=out_h2,
)
assert_allclose(out_g1, out_g2)
assert_allclose(out_h1, out_h2)
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
@pytest.mark.parametrize("sample_weight", [None, "range"])
def test_loss_gradients_are_the_same(loss, sample_weight, global_random_seed):
"""Test that loss and gradient are the same across different functions.
Also test that output arguments contain correct results.
"""
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=20,
y_bound=(-100, 100),
raw_bound=(-10, 10),
seed=global_random_seed,
)
if sample_weight == "range":
sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
out_l1 = np.empty_like(y_true)
out_l2 = np.empty_like(y_true)
out_g1 = np.empty_like(raw_prediction)
out_g2 = np.empty_like(raw_prediction)
out_g3 = np.empty_like(raw_prediction)
out_h3 = np.empty_like(raw_prediction)
l1 = loss.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=out_l1,
)
g1 = loss.gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out_g1,
)
l2, g2 = loss.loss_gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
loss_out=out_l2,
gradient_out=out_g2,
)
g3, h3 = loss.gradient_hessian(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out_g3,
hessian_out=out_h3,
)
assert_allclose(l1, l2)
assert_array_equal(l1, out_l1)
assert np.shares_memory(l1, out_l1)
assert_array_equal(l2, out_l2)
assert np.shares_memory(l2, out_l2)
assert_allclose(g1, g2)
assert_allclose(g1, g3)
assert_array_equal(g1, out_g1)
assert np.shares_memory(g1, out_g1)
assert_array_equal(g2, out_g2)
assert np.shares_memory(g2, out_g2)
assert_array_equal(g3, out_g3)
assert np.shares_memory(g3, out_g3)
if hasattr(loss, "gradient_proba"):
assert loss.is_multiclass # only for HalfMultinomialLoss
out_g4 = np.empty_like(raw_prediction)
out_proba = np.empty_like(raw_prediction)
g4, proba = loss.gradient_proba(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
gradient_out=out_g4,
proba_out=out_proba,
)
assert_allclose(g1, out_g4)
assert_allclose(g1, g4)
assert_allclose(proba, out_proba)
assert_allclose(np.sum(proba, axis=1), 1, rtol=1e-11)
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
@pytest.mark.parametrize("sample_weight", ["ones", "random"])
def test_sample_weight_multiplies(loss, sample_weight, global_random_seed):
"""Test sample weights in loss, gradients and hessians.
Make sure that passing sample weights to loss, gradient and hessian
computation methods is equivalent to multiplying by the weights.
"""
n_samples = 100
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=n_samples,
y_bound=(-100, 100),
raw_bound=(-5, 5),
seed=global_random_seed,
)
if sample_weight == "ones":
sample_weight = np.ones(shape=n_samples, dtype=np.float64)
else:
rng = np.random.RandomState(global_random_seed)
sample_weight = rng.normal(size=n_samples).astype(np.float64)
assert_allclose(
loss.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
),
sample_weight
* loss.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=None,
),
)
losses, gradient = loss.loss_gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=None,
)
losses_sw, gradient_sw = loss.loss_gradient(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
)
assert_allclose(losses * sample_weight, losses_sw)
if not loss.is_multiclass:
assert_allclose(gradient * sample_weight, gradient_sw)
else:
assert_allclose(gradient * sample_weight[:, None], gradient_sw)
gradient, hessian = loss.gradient_hessian(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=None,
)
gradient_sw, hessian_sw = loss.gradient_hessian(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
)
if not loss.is_multiclass:
assert_allclose(gradient * sample_weight, gradient_sw)
assert_allclose(hessian * sample_weight, hessian_sw)
else:
assert_allclose(gradient * sample_weight[:, None], gradient_sw)
assert_allclose(hessian * sample_weight[:, None], hessian_sw)
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
def test_graceful_squeezing(loss):
"""Test that reshaped raw_prediction gives same results."""
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=20,
y_bound=(-100, 100),
raw_bound=(-10, 10),
seed=42,
)
if raw_prediction.ndim == 1:
raw_prediction_2d = raw_prediction[:, None]
assert_allclose(
loss.loss(y_true=y_true, raw_prediction=raw_prediction_2d),
loss.loss(y_true=y_true, raw_prediction=raw_prediction),
)
assert_allclose(
loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction_2d),
loss.loss_gradient(y_true=y_true, raw_prediction=raw_prediction),
)
assert_allclose(
loss.gradient(y_true=y_true, raw_prediction=raw_prediction_2d),
loss.gradient(y_true=y_true, raw_prediction=raw_prediction),
)
assert_allclose(
loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction_2d),
loss.gradient_hessian(y_true=y_true, raw_prediction=raw_prediction),
)
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
@pytest.mark.parametrize("sample_weight", [None, "range"])
def test_loss_of_perfect_prediction(loss, sample_weight):
"""Test value of perfect predictions.
Loss of y_pred = y_true plus constant_to_optimal_zero should sums up to
zero.
"""
if not loss.is_multiclass:
# Use small values such that exp(value) is not nan.
raw_prediction = np.array([-10, -0.1, 0, 0.1, 3, 10])
# If link is identity, we must respect the interval of y_pred:
if isinstance(loss.link, IdentityLink):
eps = 1e-10
low = loss.interval_y_pred.low
if not loss.interval_y_pred.low_inclusive:
low = low + eps
high = loss.interval_y_pred.high
if not loss.interval_y_pred.high_inclusive:
high = high - eps
raw_prediction = np.clip(raw_prediction, low, high)
y_true = loss.link.inverse(raw_prediction)
else:
# HalfMultinomialLoss
y_true = np.arange(loss.n_classes).astype(float)
# raw_prediction with entries -exp(10), but +exp(10) on the diagonal
# this is close enough to np.inf which would produce nan
raw_prediction = np.full(
shape=(loss.n_classes, loss.n_classes),
fill_value=-np.exp(10),
dtype=float,
)
raw_prediction.flat[:: loss.n_classes + 1] = np.exp(10)
if sample_weight == "range":
sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
loss_value = loss.loss(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
)
constant_term = loss.constant_to_optimal_zero(
y_true=y_true, sample_weight=sample_weight
)
# Comparing loss_value + constant_term to zero would result in large
# round-off errors.
assert_allclose(loss_value, -constant_term, atol=1e-14, rtol=1e-15)
@pytest.mark.parametrize("loss", LOSS_INSTANCES, ids=loss_instance_name)
@pytest.mark.parametrize("sample_weight", [None, "range"])
def test_gradients_hessians_numerically(loss, sample_weight, global_random_seed):
"""Test gradients and hessians with numerical derivatives.
Gradient should equal the numerical derivatives of the loss function.
Hessians should equal the numerical derivatives of gradients.
"""
n_samples = 20
y_true, raw_prediction = random_y_true_raw_prediction(
loss=loss,
n_samples=n_samples,
y_bound=(-100, 100),
raw_bound=(-5, 5),
seed=global_random_seed,
)
if sample_weight == "range":
sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])
g, h = loss.gradient_hessian(
y_true=y_true,
raw_prediction=raw_prediction,
sample_weight=sample_weight,
)
assert g.shape == raw_prediction.shape
assert h.shape == raw_prediction.shape
if not loss.is_multiclass:
def loss_func(x):
return loss.loss(
y_true=y_true,
raw_prediction=x,
sample_weight=sample_weight,
)
g_numeric = numerical_derivative(loss_func, raw_prediction, eps=1e-6)
assert_allclose(g, g_numeric, rtol=5e-6, atol=1e-10)
def grad_func(x):
return loss.gradient(
y_true=y_true,
raw_prediction=x,
sample_weight=sample_weight,
)
h_numeric = numerical_derivative(grad_func, raw_prediction, eps=1e-6)
if loss.approx_hessian:
# TODO: What could we test if loss.approx_hessian?
pass
else:
assert_allclose(h, h_numeric, rtol=5e-6, atol=1e-10)
else:
# For multiclass loss, we should only change the predictions of the
# class for which the derivative is taken for, e.g. offset[:, k] = eps
# for class k.
# As a softmax is computed, offsetting the whole array by a constant
# would have no effect on the probabilities, and thus on the loss.
for k in range(loss.n_classes):
def loss_func(x):
raw = raw_prediction.copy()
raw[:, k] = x
return loss.loss(
y_true=y_true,
raw_prediction=raw,
sample_weight=sample_weight,
)
g_numeric = numerical_derivative(loss_func, raw_prediction[:, k], eps=1e-5)
assert_allclose(g[:, k], g_numeric, rtol=5e-6, atol=1e-10)
def grad_func(x):
raw = raw_prediction.copy()
raw[:, k] = x
return loss.gradient(
y_true=y_true,
raw_prediction=raw,
sample_weight=sample_weight,
)[:, k]
h_numeric = numerical_derivative(grad_func, raw_prediction[:, k], eps=1e-6)
if loss.approx_hessian:
# TODO: What could we test if loss.approx_hessian?
pass
else:
assert_allclose(h[:, k], h_numeric, rtol=5e-6, atol=1e-10)
@pytest.mark.parametrize(
"loss, x0, y_true",
[
("squared_error", -2.0, 42),
("squared_error", 117.0, 1.05),
("squared_error", 0.0, 0.0),
# The argmin of binomial_loss for y_true=0 and y_true=1 is resp.
# -inf and +inf due to logit, cf. "complete separation". Therefore, we
# use 0 < y_true < 1.
("binomial_loss", 0.3, 0.1),
("binomial_loss", -12, 0.2),
("binomial_loss", 30, 0.9),
("poisson_loss", 12.0, 1.0),
("poisson_loss", 0.0, 2.0),
("poisson_loss", -22.0, 10.0),
],
)
@skip_if_32bit
def test_derivatives(loss, x0, y_true):
"""Test that gradients are zero at the minimum of the loss.
We check this on a single value/sample using Halley's method with the
first and second order derivatives computed by the Loss instance.
Note that methods of Loss instances operate on arrays while the newton
root finder expects a scalar or a one-element array for this purpose.
"""
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/_loss/tests/test_link.py | sklearn/_loss/tests/test_link.py | import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from sklearn._loss.link import (
_LINKS,
HalfLogitLink,
Interval,
MultinomialLogit,
_inclusive_low_high,
)
LINK_FUNCTIONS = list(_LINKS.values())
def test_interval_raises():
"""Test that interval with low > high raises ValueError."""
with pytest.raises(
ValueError, match="One must have low <= high; got low=1, high=0."
):
Interval(1, 0, False, False)
@pytest.mark.parametrize(
"interval",
[
Interval(0, 1, False, False),
Interval(0, 1, False, True),
Interval(0, 1, True, False),
Interval(0, 1, True, True),
Interval(-np.inf, np.inf, False, False),
Interval(-np.inf, np.inf, False, True),
Interval(-np.inf, np.inf, True, False),
Interval(-np.inf, np.inf, True, True),
Interval(-10, -1, False, False),
Interval(-10, -1, False, True),
Interval(-10, -1, True, False),
Interval(-10, -1, True, True),
],
)
def test_is_in_range(interval):
# make sure low and high are always within the interval, used for linspace
low, high = _inclusive_low_high(interval)
x = np.linspace(low, high, num=10)
assert interval.includes(x)
# x contains lower bound
assert interval.includes(np.r_[x, interval.low]) == interval.low_inclusive
# x contains upper bound
assert interval.includes(np.r_[x, interval.high]) == interval.high_inclusive
# x contains upper and lower bound
assert interval.includes(np.r_[x, interval.low, interval.high]) == (
interval.low_inclusive and interval.high_inclusive
)
@pytest.mark.parametrize("link", LINK_FUNCTIONS)
def test_link_inverse_identity(link, global_random_seed):
# Test that link of inverse gives identity.
rng = np.random.RandomState(global_random_seed)
link = link()
n_samples, n_classes = 100, None
# The values for `raw_prediction` are limited from -20 to 20 because in the
# class `LogitLink` the term `expit(x)` comes very close to 1 for large
# positive x and therefore loses precision.
if link.is_multiclass:
n_classes = 10
raw_prediction = rng.uniform(low=-20, high=20, size=(n_samples, n_classes))
if isinstance(link, MultinomialLogit):
raw_prediction = link.symmetrize_raw_prediction(raw_prediction)
elif isinstance(link, HalfLogitLink):
raw_prediction = rng.uniform(low=-10, high=10, size=(n_samples))
else:
raw_prediction = rng.uniform(low=-20, high=20, size=(n_samples))
assert_allclose(link.link(link.inverse(raw_prediction)), raw_prediction)
y_pred = link.inverse(raw_prediction)
assert_allclose(link.inverse(link.link(y_pred)), y_pred)
@pytest.mark.parametrize("link", LINK_FUNCTIONS)
def test_link_out_argument(link):
# Test that out argument gets assigned the result.
rng = np.random.RandomState(42)
link = link()
n_samples, n_classes = 100, None
if link.is_multiclass:
n_classes = 10
raw_prediction = rng.normal(loc=0, scale=10, size=(n_samples, n_classes))
if isinstance(link, MultinomialLogit):
raw_prediction = link.symmetrize_raw_prediction(raw_prediction)
else:
# So far, the valid interval of raw_prediction is (-inf, inf) and
# we do not need to distinguish.
raw_prediction = rng.uniform(low=-10, high=10, size=(n_samples))
y_pred = link.inverse(raw_prediction, out=None)
out = np.empty_like(raw_prediction)
y_pred_2 = link.inverse(raw_prediction, out=out)
assert_allclose(y_pred, out)
assert_array_equal(out, y_pred_2)
assert np.shares_memory(out, y_pred_2)
out = np.empty_like(y_pred)
raw_prediction_2 = link.link(y_pred, out=out)
assert_allclose(raw_prediction, out)
assert_array_equal(out, raw_prediction_2)
assert np.shares_memory(out, raw_prediction_2)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/_loss/tests/__init__.py | sklearn/_loss/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_base.py | sklearn/ensemble/_base.py | """Base class for ensemble-based estimators."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from abc import ABCMeta, abstractmethod
import numpy as np
from joblib import effective_n_jobs
from sklearn.base import (
BaseEstimator,
MetaEstimatorMixin,
clone,
is_classifier,
is_regressor,
)
from sklearn.utils import Bunch, check_random_state
from sklearn.utils._tags import get_tags
from sklearn.utils._user_interface import _print_elapsed_time
from sklearn.utils.metadata_routing import _routing_enabled
from sklearn.utils.metaestimators import _BaseComposition
def _fit_single_estimator(
estimator, X, y, fit_params, message_clsname=None, message=None
):
"""Private function used to fit an estimator within a job."""
# TODO(SLEP6): remove if-condition for unrouted sample_weight when metadata
# routing can't be disabled.
if not _routing_enabled() and "sample_weight" in fit_params:
try:
with _print_elapsed_time(message_clsname, message):
estimator.fit(X, y, sample_weight=fit_params["sample_weight"])
except TypeError as exc:
if "unexpected keyword argument 'sample_weight'" in str(exc):
raise TypeError(
"Underlying estimator {} does not support sample weights.".format(
estimator.__class__.__name__
)
) from exc
raise
else:
with _print_elapsed_time(message_clsname, message):
estimator.fit(X, y, **fit_params)
return estimator
def _set_random_states(estimator, random_state=None):
"""Set fixed random_state parameters for an estimator.
Finds all parameters ending ``random_state`` and sets them to integers
derived from ``random_state``.
Parameters
----------
estimator : estimator supporting get/set_params
Estimator with potential randomness managed by random_state
parameters.
random_state : int, RandomState instance or None, default=None
Pseudo-random number generator to control the generation of the random
integers. Pass an int for reproducible output across multiple function
calls.
See :term:`Glossary <random_state>`.
Notes
-----
This does not necessarily set *all* ``random_state`` attributes that
control an estimator's randomness, only those accessible through
``estimator.get_params()``. ``random_state``s not controlled include
those belonging to:
* cross-validation splitters
* ``scipy.stats`` rvs
"""
random_state = check_random_state(random_state)
to_set = {}
for key in sorted(estimator.get_params(deep=True)):
if key == "random_state" or key.endswith("__random_state"):
to_set[key] = random_state.randint(np.iinfo(np.int32).max)
if to_set:
estimator.set_params(**to_set)
class BaseEnsemble(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for all ensemble classes.
Warning: This class should not be used directly. Use derived classes
instead.
Parameters
----------
estimator : object
The base estimator from which the ensemble is built.
n_estimators : int, default=10
The number of estimators in the ensemble.
estimator_params : list of str, default=tuple()
The list of attributes to use as parameters when instantiating a
new base estimator. If none are given, default parameters are used.
Attributes
----------
estimator_ : estimator
The base estimator from which the ensemble is grown.
estimators_ : list of estimators
The collection of fitted base estimators.
"""
@abstractmethod
def __init__(
self,
estimator=None,
*,
n_estimators=10,
estimator_params=tuple(),
):
# Set parameters
self.estimator = estimator
self.n_estimators = n_estimators
self.estimator_params = estimator_params
# Don't instantiate estimators now! Parameters of estimator might
# still change. Eg., when grid-searching with the nested object syntax.
# self.estimators_ needs to be filled by the derived classes in fit.
def _validate_estimator(self, default=None):
"""Check the base estimator.
Sets the `estimator_` attributes.
"""
if self.estimator is not None:
self.estimator_ = self.estimator
else:
self.estimator_ = default
def _make_estimator(self, append=True, random_state=None):
"""Make and configure a copy of the `estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
"""
estimator = clone(self.estimator_)
estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params})
if random_state is not None:
_set_random_states(estimator, random_state)
if append:
self.estimators_.append(estimator)
return estimator
def __len__(self):
"""Return the number of estimators in the ensemble."""
return len(self.estimators_)
def __getitem__(self, index):
"""Return the index'th estimator in the ensemble."""
return self.estimators_[index]
def __iter__(self):
"""Return iterator over estimators in the ensemble."""
return iter(self.estimators_)
def _partition_estimators(n_estimators, n_jobs):
"""Private function used to partition estimators between jobs."""
# Compute the number of jobs
n_jobs = min(effective_n_jobs(n_jobs), n_estimators)
# Partition estimators between jobs
n_estimators_per_job = np.full(n_jobs, n_estimators // n_jobs, dtype=int)
n_estimators_per_job[: n_estimators % n_jobs] += 1
starts = np.cumsum(n_estimators_per_job)
return n_jobs, n_estimators_per_job.tolist(), [0] + starts.tolist()
class _BaseHeterogeneousEnsemble(
MetaEstimatorMixin, _BaseComposition, metaclass=ABCMeta
):
"""Base class for heterogeneous ensemble of learners.
Parameters
----------
estimators : list of (str, estimator) tuples
The ensemble of estimators to use in the ensemble. Each element of the
list is defined as a tuple of string (i.e. name of the estimator) and
an estimator instance. An estimator can be set to `'drop'` using
`set_params`.
Attributes
----------
estimators_ : list of estimators
The elements of the estimators parameter, having been fitted on the
training data. If an estimator has been set to `'drop'`, it will not
appear in `estimators_`.
"""
@property
def named_estimators(self):
"""Dictionary to access any fitted sub-estimators by name.
Returns
-------
:class:`~sklearn.utils.Bunch`
"""
return Bunch(**dict(self.estimators))
@abstractmethod
def __init__(self, estimators):
self.estimators = estimators
def _validate_estimators(self):
if len(self.estimators) == 0 or not all(
isinstance(item, (tuple, list)) and isinstance(item[0], str)
for item in self.estimators
):
raise ValueError(
"Invalid 'estimators' attribute, 'estimators' should be a "
"non-empty list of (string, estimator) tuples."
)
names, estimators = zip(*self.estimators)
# defined by MetaEstimatorMixin
self._validate_names(names)
has_estimator = any(est != "drop" for est in estimators)
if not has_estimator:
raise ValueError(
"All estimators are dropped. At least one is required "
"to be an estimator."
)
is_estimator_type = is_classifier if is_classifier(self) else is_regressor
for est in estimators:
if est != "drop" and not is_estimator_type(est):
raise ValueError(
"The estimator {} should be a {}.".format(
est.__class__.__name__, is_estimator_type.__name__[3:]
)
)
return names, estimators
def set_params(self, **params):
"""
Set the parameters of an estimator from the ensemble.
Valid parameter keys can be listed with `get_params()`. Note that you
can directly set the parameters of the estimators contained in
`estimators`.
Parameters
----------
**params : keyword arguments
Specific parameters using e.g.
`set_params(parameter_name=new_value)`. In addition, to setting the
parameters of the estimator, the individual estimator of the
estimators can also be set, or can be removed by setting them to
'drop'.
Returns
-------
self : object
Estimator instance.
"""
super()._set_params("estimators", **params)
return self
def get_params(self, deep=True):
"""
Get the parameters of an estimator from the ensemble.
Returns the parameters given in the constructor as well as the
estimators contained within the `estimators` parameter.
Parameters
----------
deep : bool, default=True
Setting it to True gets the various estimators and the parameters
of the estimators as well.
Returns
-------
params : dict
Parameter and estimator names mapped to their values or parameter
names mapped to their values.
"""
return super()._get_params("estimators", deep=deep)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
try:
tags.input_tags.allow_nan = all(
get_tags(est[1]).input_tags.allow_nan if est[1] != "drop" else True
for est in self.estimators
)
tags.input_tags.sparse = all(
get_tags(est[1]).input_tags.sparse if est[1] != "drop" else True
for est in self.estimators
)
except Exception:
# If `estimators` does not comply with our API (list of tuples) then it will
# fail. In this case, we assume that `allow_nan` and `sparse` are False but
# the parameter validation will raise an error during `fit`.
pass # pragma: no cover
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_stacking.py | sklearn/ensemble/_stacking.py | """Stacking classifier and regressor."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from abc import ABCMeta, abstractmethod
from copy import deepcopy
from numbers import Integral
import numpy as np
import scipy.sparse as sparse
from sklearn.base import (
ClassifierMixin,
RegressorMixin,
TransformerMixin,
_fit_context,
clone,
is_classifier,
is_regressor,
)
from sklearn.ensemble._base import _BaseHeterogeneousEnsemble, _fit_single_estimator
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression, RidgeCV
from sklearn.model_selection import check_cv, cross_val_predict
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import Bunch
from sklearn.utils._param_validation import HasMethods, StrOptions
from sklearn.utils._repr_html.estimator import _VisualBlock
from sklearn.utils.metadata_routing import (
MetadataRouter,
MethodMapping,
_raise_for_params,
_routing_enabled,
process_routing,
)
from sklearn.utils.metaestimators import available_if
from sklearn.utils.multiclass import check_classification_targets, type_of_target
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import (
_check_feature_names_in,
_check_response_method,
_estimator_has,
check_is_fitted,
column_or_1d,
)
class _BaseStacking(TransformerMixin, _BaseHeterogeneousEnsemble, metaclass=ABCMeta):
"""Base class for stacking method."""
_parameter_constraints: dict = {
"estimators": [list],
"final_estimator": [None, HasMethods("fit")],
"cv": ["cv_object", StrOptions({"prefit"})],
"n_jobs": [None, Integral],
"passthrough": ["boolean"],
"verbose": ["verbose"],
}
@abstractmethod
def __init__(
self,
estimators,
final_estimator=None,
*,
cv=None,
stack_method="auto",
n_jobs=None,
verbose=0,
passthrough=False,
):
super().__init__(estimators=estimators)
self.final_estimator = final_estimator
self.cv = cv
self.stack_method = stack_method
self.n_jobs = n_jobs
self.verbose = verbose
self.passthrough = passthrough
def _clone_final_estimator(self, default):
if self.final_estimator is not None:
self.final_estimator_ = clone(self.final_estimator)
else:
self.final_estimator_ = clone(default)
def _concatenate_predictions(self, X, predictions):
"""Concatenate the predictions of each first layer learner and
possibly the input dataset `X`.
If `X` is sparse and `self.passthrough` is False, the output of
`transform` will be dense (the predictions). If `X` is sparse
and `self.passthrough` is True, the output of `transform` will
be sparse.
This helper is in charge of ensuring the predictions are 2D arrays and
it will drop one of the probability column when using probabilities
in the binary case. Indeed, the p(y|c=0) = 1 - p(y|c=1)
When `y` type is `"multilabel-indicator"`` and the method used is
`predict_proba`, `preds` can be either a `ndarray` of shape
`(n_samples, n_class)` or for some estimators a list of `ndarray`.
This function will drop one of the probability column in this situation as well.
"""
X_meta = []
for est_idx, preds in enumerate(predictions):
if isinstance(preds, list):
# `preds` is here a list of `n_targets` 2D ndarrays of
# `n_classes` columns. The k-th column contains the
# probabilities of the samples belonging the k-th class.
#
# Since those probabilities must sum to one for each sample,
# we can work with probabilities of `n_classes - 1` classes.
# Hence we drop the first column.
for pred in preds:
X_meta.append(pred[:, 1:])
elif preds.ndim == 1:
# Some estimator return a 1D array for predictions
# which must be 2-dimensional arrays.
X_meta.append(preds.reshape(-1, 1))
elif (
self.stack_method_[est_idx] == "predict_proba"
and len(self.classes_) == 2
):
# Remove the first column when using probabilities in
# binary classification because both features `preds` are perfectly
# collinear.
X_meta.append(preds[:, 1:])
else:
X_meta.append(preds)
self._n_feature_outs = [pred.shape[1] for pred in X_meta]
if self.passthrough:
X_meta.append(X)
if sparse.issparse(X):
return sparse.hstack(X_meta, format=X.format)
return np.hstack(X_meta)
@staticmethod
def _method_name(name, estimator, method):
if estimator == "drop":
return None
if method == "auto":
method = ["predict_proba", "decision_function", "predict"]
try:
method_name = _check_response_method(estimator, method).__name__
except AttributeError as e:
raise ValueError(
f"Underlying estimator {name} does not implement the method {method}."
) from e
return method_name
@_fit_context(
# estimators in Stacking*.estimators are not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y, **fit_params):
"""Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
**fit_params : dict
Dict of metadata, potentially containing sample_weight as a
key-value pair. If sample_weight is not present, then samples are
equally weighted. Note that sample_weight is supported only if all
underlying estimators support sample weights.
.. versionadded:: 1.6
Returns
-------
self : object
"""
# all_estimators contains all estimators, the one to be fitted and the
# 'drop' string.
names, all_estimators = self._validate_estimators()
self._validate_final_estimator()
stack_method = [self.stack_method] * len(all_estimators)
if _routing_enabled():
routed_params = process_routing(self, "fit", **fit_params)
else:
routed_params = Bunch()
for name in names:
routed_params[name] = Bunch(fit={})
if "sample_weight" in fit_params:
routed_params[name].fit["sample_weight"] = fit_params[
"sample_weight"
]
if self.cv == "prefit":
self.estimators_ = []
for estimator in all_estimators:
if estimator != "drop":
check_is_fitted(estimator)
self.estimators_.append(estimator)
else:
# Fit the base estimators on the whole training data. Those
# base estimators will be used in transform, predict, and
# predict_proba. They are exposed publicly.
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_single_estimator)(
clone(est), X, y, routed_params[name]["fit"]
)
for name, est in zip(names, all_estimators)
if est != "drop"
)
self.named_estimators_ = Bunch()
est_fitted_idx = 0
for name_est, org_est in zip(names, all_estimators):
if org_est != "drop":
current_estimator = self.estimators_[est_fitted_idx]
self.named_estimators_[name_est] = current_estimator
est_fitted_idx += 1
if hasattr(current_estimator, "feature_names_in_"):
self.feature_names_in_ = current_estimator.feature_names_in_
else:
self.named_estimators_[name_est] = "drop"
self.stack_method_ = [
self._method_name(name, est, meth)
for name, est, meth in zip(names, all_estimators, stack_method)
]
if self.cv == "prefit":
# Generate predictions from prefit models
predictions = [
getattr(estimator, predict_method)(X)
for estimator, predict_method in zip(all_estimators, self.stack_method_)
if estimator != "drop"
]
else:
# To train the meta-classifier using the most data as possible, we use
# a cross-validation to obtain the output of the stacked estimators.
# To ensure that the data provided to each estimator are the same,
# we need to set the random state of the cv if there is one and we
# need to take a copy.
cv = check_cv(self.cv, y=y, classifier=is_classifier(self))
if hasattr(cv, "random_state") and cv.random_state is None:
cv.random_state = np.random.RandomState()
predictions = Parallel(n_jobs=self.n_jobs)(
delayed(cross_val_predict)(
clone(est),
X,
y,
cv=deepcopy(cv),
method=meth,
n_jobs=self.n_jobs,
params=routed_params[name]["fit"],
verbose=self.verbose,
)
for name, est, meth in zip(names, all_estimators, self.stack_method_)
if est != "drop"
)
# Only not None or not 'drop' estimators will be used in transform.
# Remove the None from the method as well.
self.stack_method_ = [
meth
for (meth, est) in zip(self.stack_method_, all_estimators)
if est != "drop"
]
X_meta = self._concatenate_predictions(X, predictions)
_fit_single_estimator(self.final_estimator_, X_meta, y, fit_params=fit_params)
return self
@property
def n_features_in_(self):
"""Number of features seen during :term:`fit`."""
try:
check_is_fitted(self)
except NotFittedError as nfe:
raise AttributeError(
f"{self.__class__.__name__} object has no attribute n_features_in_"
) from nfe
return self.estimators_[0].n_features_in_
def _transform(self, X):
"""Concatenate and return the predictions of the estimators."""
check_is_fitted(self)
predictions = [
getattr(est, meth)(X)
for est, meth in zip(self.estimators_, self.stack_method_)
if est != "drop"
]
return self._concatenate_predictions(X, predictions)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features. The input feature names are only used when `passthrough` is
`True`.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then names are generated: `[x0, x1, ..., x(n_features_in_ - 1)]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
If `passthrough` is `False`, then only the names of `estimators` are used
to generate the output feature names.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "n_features_in_")
input_features = _check_feature_names_in(
self, input_features, generate_names=self.passthrough
)
class_name = self.__class__.__name__.lower()
non_dropped_estimators = (
name for name, est in self.estimators if est != "drop"
)
meta_names = []
for est, n_features_out in zip(non_dropped_estimators, self._n_feature_outs):
if n_features_out == 1:
meta_names.append(f"{class_name}_{est}")
else:
meta_names.extend(
f"{class_name}_{est}{i}" for i in range(n_features_out)
)
if self.passthrough:
return np.concatenate((meta_names, input_features))
return np.asarray(meta_names, dtype=object)
@available_if(
_estimator_has("predict", delegates=("final_estimator_", "final_estimator"))
)
def predict(self, X, **predict_params):
"""Predict target for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
**predict_params : dict of str -> obj
Parameters to the `predict` called by the `final_estimator`. Note
that this may be used to return uncertainties from some estimators
with `return_std` or `return_cov`. Be aware that it will only
account for uncertainty in the final estimator.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_output)
Predicted targets.
"""
check_is_fitted(self)
return self.final_estimator_.predict(self.transform(X), **predict_params)
def _sk_visual_block_with_final_estimator(self, final_estimator):
names, estimators = zip(*self.estimators)
parallel = _VisualBlock("parallel", estimators, names=names, dash_wrapped=False)
# final estimator is wrapped in a parallel block to show the label:
# 'final_estimator' in the html repr
final_block = _VisualBlock(
"parallel", [final_estimator], names=["final_estimator"], dash_wrapped=False
)
return _VisualBlock("serial", (parallel, final_block), dash_wrapped=False)
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.6
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self)
# `self.estimators` is a list of (name, est) tuples
for name, estimator in self.estimators:
router.add(
**{name: estimator},
method_mapping=MethodMapping().add(callee="fit", caller="fit"),
)
try:
final_estimator_ = self.final_estimator_
except AttributeError:
final_estimator_ = self.final_estimator
router.add(
final_estimator_=final_estimator_,
method_mapping=MethodMapping().add(caller="predict", callee="predict"),
)
return router
class StackingClassifier(ClassifierMixin, _BaseStacking):
"""Stack of estimators with a final classifier.
Stacked generalization consists in stacking the output of individual
estimator and use a classifier to compute the final prediction. Stacking
allows to use the strength of each individual estimator by using their
output as input of a final estimator.
Note that `estimators_` are fitted on the full `X` while `final_estimator_`
is trained using cross-validated predictions of the base estimators using
`cross_val_predict`.
Read more in the :ref:`User Guide <stacking>`.
.. versionadded:: 0.22
Parameters
----------
estimators : list of (str, estimator)
Base estimators which will be stacked together. Each element of the
list is defined as a tuple of string (i.e. name) and an estimator
instance. An estimator can be set to 'drop' using `set_params`.
The type of estimator is generally expected to be a classifier.
However, one can pass a regressor for some use case (e.g. ordinal
regression).
final_estimator : estimator, default=None
A classifier which will be used to combine the base estimators.
The default classifier is a
:class:`~sklearn.linear_model.LogisticRegression`.
cv : int, cross-validation generator, iterable, or "prefit", default=None
Determines the cross-validation splitting strategy used in
`cross_val_predict` to train `final_estimator`. Possible inputs for
cv are:
* None, to use the default 5-fold cross validation,
* integer, to specify the number of folds in a (Stratified) KFold,
* An object to be used as a cross-validation generator,
* An iterable yielding train, test splits,
* `"prefit"`, to assume the `estimators` are prefit. In this case, the
estimators will not be refitted.
For integer/None inputs, if the estimator is a classifier and y is
either binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used.
In all other cases, :class:`~sklearn.model_selection.KFold` is used.
These splitters are instantiated with `shuffle=False` so the splits
will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
If "prefit" is passed, it is assumed that all `estimators` have
been fitted already. The `final_estimator_` is trained on the `estimators`
predictions on the full training set and are **not** cross validated
predictions. Please note that if the models have been trained on the same
data to train the stacking model, there is a very high risk of overfitting.
.. versionadded:: 1.1
The 'prefit' option was added in 1.1
.. note::
A larger number of split will provide no benefits if the number
of training samples is large enough. Indeed, the training time
will increase. ``cv`` is not used for model evaluation but for
prediction.
stack_method : {'auto', 'predict_proba', 'decision_function', 'predict'}, \
default='auto'
Methods called for each base estimator. It can be:
* if 'auto', it will try to invoke, for each estimator,
`'predict_proba'`, `'decision_function'` or `'predict'` in that
order.
* otherwise, one of `'predict_proba'`, `'decision_function'` or
`'predict'`. If the method is not implemented by the estimator, it
will raise an error.
n_jobs : int, default=None
The number of jobs to run in parallel for `fit` of all `estimators`.
`None` means 1 unless in a `joblib.parallel_backend` context. -1 means
using all processors. See :term:`Glossary <n_jobs>` for more details.
passthrough : bool, default=False
When False, only the predictions of estimators will be used as
training data for `final_estimator`. When True, the
`final_estimator` is trained on the predictions as well as the
original training data.
verbose : int, default=0
Verbosity level.
Attributes
----------
classes_ : ndarray of shape (n_classes,) or list of ndarray if `y` \
is of type `"multilabel-indicator"`.
Class labels.
estimators_ : list of estimators
The elements of the `estimators` parameter, having been fitted on the
training data. If an estimator has been set to `'drop'`, it
will not appear in `estimators_`. When `cv="prefit"`, `estimators_`
is set to `estimators` and is not fitted again.
named_estimators_ : :class:`~sklearn.utils.Bunch`
Attribute to access any fitted sub-estimators by name.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying estimator exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimators expose such an attribute when fit.
.. versionadded:: 1.0
final_estimator_ : estimator
The classifier fit on the output of `estimators_` and responsible for
final predictions.
stack_method_ : list of str
The method used by each base estimator.
See Also
--------
StackingRegressor : Stack of estimators with a final regressor.
Notes
-----
When `predict_proba` is used by each estimator (i.e. most of the time for
`stack_method='auto'` or specifically for `stack_method='predict_proba'`),
the first column predicted by each estimator will be dropped in the case
of a binary classification problem. Indeed, both feature will be perfectly
collinear.
In some cases (e.g. ordinal regression), one can pass regressors as the
first layer of the :class:`StackingClassifier`. However, note that `y` will
be internally encoded in a numerically increasing order or lexicographic
order. If this ordering is not adequate, one should manually numerically
encode the classes in the desired order.
References
----------
.. [1] Wolpert, David H. "Stacked generalization." Neural networks 5.2
(1992): 241-259.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.svm import LinearSVC
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.pipeline import make_pipeline
>>> from sklearn.ensemble import StackingClassifier
>>> X, y = load_iris(return_X_y=True)
>>> estimators = [
... ('rf', RandomForestClassifier(n_estimators=10, random_state=42)),
... ('svr', make_pipeline(StandardScaler(),
... LinearSVC(random_state=42)))
... ]
>>> clf = StackingClassifier(
... estimators=estimators, final_estimator=LogisticRegression()
... )
>>> from sklearn.model_selection import train_test_split
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, stratify=y, random_state=42
... )
>>> clf.fit(X_train, y_train).score(X_test, y_test)
0.9...
"""
_parameter_constraints: dict = {
**_BaseStacking._parameter_constraints,
"stack_method": [
StrOptions({"auto", "predict_proba", "decision_function", "predict"})
],
}
def __init__(
self,
estimators,
final_estimator=None,
*,
cv=None,
stack_method="auto",
n_jobs=None,
passthrough=False,
verbose=0,
):
super().__init__(
estimators=estimators,
final_estimator=final_estimator,
cv=cv,
stack_method=stack_method,
n_jobs=n_jobs,
passthrough=passthrough,
verbose=verbose,
)
def _validate_final_estimator(self):
self._clone_final_estimator(default=LogisticRegression())
if not is_classifier(self.final_estimator_):
raise ValueError(
"'final_estimator' parameter should be a classifier. Got {}".format(
self.final_estimator_
)
)
def _validate_estimators(self):
"""Overload the method of `_BaseHeterogeneousEnsemble` to be more
lenient towards the type of `estimators`.
Regressors can be accepted for some cases such as ordinal regression.
"""
if len(self.estimators) == 0:
raise ValueError(
"Invalid 'estimators' attribute, 'estimators' should be a "
"non-empty list of (string, estimator) tuples."
)
names, estimators = zip(*self.estimators)
self._validate_names(names)
has_estimator = any(est != "drop" for est in estimators)
if not has_estimator:
raise ValueError(
"All estimators are dropped. At least one is required "
"to be an estimator."
)
return names, estimators
def fit(self, X, y, **fit_params):
"""Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values. Note that `y` will be internally encoded in
numerically increasing order or lexicographic order. If the order
matter (e.g. for ordinal regression), one should numerically encode
the target `y` before calling :term:`fit`.
**fit_params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.6
Only available if `enable_metadata_routing=True`, which can be
set by using ``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns a fitted instance of estimator.
"""
_raise_for_params(fit_params, self, "fit", allow=["sample_weight"])
check_classification_targets(y)
if type_of_target(y) == "multilabel-indicator":
self._label_encoder = [LabelEncoder().fit(yk) for yk in y.T]
self.classes_ = [le.classes_ for le in self._label_encoder]
y_encoded = np.array(
[
self._label_encoder[target_idx].transform(target)
for target_idx, target in enumerate(y.T)
]
).T
else:
self._label_encoder = LabelEncoder().fit(y)
self.classes_ = self._label_encoder.classes_
y_encoded = self._label_encoder.transform(y)
return super().fit(X, y_encoded, **fit_params)
@available_if(
_estimator_has("predict", delegates=("final_estimator_", "final_estimator"))
)
def predict(self, X, **predict_params):
"""Predict target for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
**predict_params : dict of str -> obj
Parameters to the `predict` called by the `final_estimator`. Note
that this may be used to return uncertainties from some estimators
with `return_std` or `return_cov`. Be aware that it will only
account for uncertainty in the final estimator.
- If `enable_metadata_routing=False` (default):
Parameters directly passed to the `predict` method of the
`final_estimator`.
- If `enable_metadata_routing=True`: Parameters safely routed to
the `predict` method of the `final_estimator`. See :ref:`Metadata
Routing User Guide <metadata_routing>` for more details.
.. versionchanged:: 1.6
`**predict_params` can be routed via metadata routing API.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_output)
Predicted targets.
"""
if _routing_enabled():
routed_params = process_routing(self, "predict", **predict_params)
else:
# TODO(SLEP6): remove when metadata routing cannot be disabled.
routed_params = Bunch()
routed_params.final_estimator_ = Bunch(predict={})
routed_params.final_estimator_.predict = predict_params
y_pred = super().predict(X, **routed_params.final_estimator_["predict"])
if isinstance(self._label_encoder, list):
# Handle the multilabel-indicator case
y_pred = np.array(
[
self._label_encoder[target_idx].inverse_transform(target)
for target_idx, target in enumerate(y_pred.T)
]
).T
else:
y_pred = self._label_encoder.inverse_transform(y_pred)
return y_pred
@available_if(
_estimator_has(
"predict_proba", delegates=("final_estimator_", "final_estimator")
)
)
def predict_proba(self, X):
"""Predict class probabilities for `X` using the final estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
probabilities : ndarray of shape (n_samples, n_classes) or \
list of ndarray of shape (n_output,)
The class probabilities of the input samples.
"""
check_is_fitted(self)
y_pred = self.final_estimator_.predict_proba(self.transform(X))
if isinstance(self._label_encoder, list):
# Handle the multilabel-indicator cases
y_pred = np.array([preds[:, 0] for preds in y_pred]).T
return y_pred
@available_if(
_estimator_has(
"decision_function", delegates=("final_estimator_", "final_estimator")
)
)
def decision_function(self, X):
"""Decision function for samples in `X` using the final estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
decisions : ndarray of shape (n_samples,), (n_samples, n_classes), \
or (n_samples, n_classes * (n_classes-1) / 2)
The decision function computed the final estimator.
"""
check_is_fitted(self)
return self.final_estimator_.decision_function(self.transform(X))
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
y_preds : ndarray of shape (n_samples, n_estimators) or \
(n_samples, n_classes * n_estimators)
Prediction outputs for each estimator.
"""
return self._transform(X)
def _sk_visual_block_(self):
# If final_estimator's default changes then this should be
# updated.
if self.final_estimator is None:
final_estimator = LogisticRegression()
else:
final_estimator = self.final_estimator
return super()._sk_visual_block_with_final_estimator(final_estimator)
class StackingRegressor(RegressorMixin, _BaseStacking):
"""Stack of estimators with a final regressor.
Stacked generalization consists in stacking the output of individual
estimator and use a regressor to compute the final prediction. Stacking
allows to use the strength of each individual estimator by using their
output as input of a final estimator.
Note that `estimators_` are fitted on the full `X` while `final_estimator_`
is trained using cross-validated predictions of the base estimators using
`cross_val_predict`.
Read more in the :ref:`User Guide <stacking>`.
.. versionadded:: 0.22
Parameters
----------
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_forest.py | sklearn/ensemble/_forest.py | """
Forest of trees-based ensemble methods.
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import threading
from abc import ABCMeta, abstractmethod
from numbers import Integral, Real
from warnings import catch_warnings, simplefilter, warn
import numpy as np
from scipy.sparse import hstack as sparse_hstack
from scipy.sparse import issparse
from sklearn.base import (
ClassifierMixin,
MultiOutputMixin,
RegressorMixin,
TransformerMixin,
_fit_context,
is_classifier,
)
from sklearn.ensemble._base import BaseEnsemble, _partition_estimators
from sklearn.exceptions import DataConversionWarning
from sklearn.metrics import accuracy_score, r2_score
from sklearn.preprocessing import OneHotEncoder
from sklearn.tree import (
BaseDecisionTree,
DecisionTreeClassifier,
DecisionTreeRegressor,
ExtraTreeClassifier,
ExtraTreeRegressor,
)
from sklearn.tree._tree import DOUBLE, DTYPE
from sklearn.utils import check_random_state, compute_sample_weight
from sklearn.utils._param_validation import Interval, RealNotInt, StrOptions
from sklearn.utils._tags import get_tags
from sklearn.utils.multiclass import check_classification_targets, type_of_target
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import (
_check_feature_names_in,
_check_sample_weight,
_num_samples,
check_is_fitted,
validate_data,
)
__all__ = [
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomForestClassifier",
"RandomForestRegressor",
"RandomTreesEmbedding",
]
MAX_INT = np.iinfo(np.int32).max
def _get_n_samples_bootstrap(n_samples, max_samples):
"""
Get the number of samples in a bootstrap sample.
Parameters
----------
n_samples : int
Number of samples in the dataset.
max_samples : int or float
The maximum number of samples to draw from the total available:
- if float, this indicates a fraction of the total and should be
the interval `(0.0, 1.0]`;
- if int, this indicates the exact number of samples;
- if None, this indicates the total number of samples.
Returns
-------
n_samples_bootstrap : int
The total number of samples to draw for the bootstrap sample.
"""
if max_samples is None:
return n_samples
if isinstance(max_samples, Integral):
if max_samples > n_samples:
msg = "`max_samples` must be <= n_samples={} but got value {}"
raise ValueError(msg.format(n_samples, max_samples))
return max_samples
if isinstance(max_samples, Real):
return max(round(n_samples * max_samples), 1)
def _generate_sample_indices(random_state, n_samples, n_samples_bootstrap):
"""
Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(
0, n_samples, n_samples_bootstrap, dtype=np.int32
)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples, n_samples_bootstrap):
"""
Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(
random_state, n_samples, n_samples_bootstrap
)
sample_counts = np.bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(
tree,
bootstrap,
X,
y,
sample_weight,
tree_idx,
n_trees,
verbose=0,
class_weight=None,
n_samples_bootstrap=None,
missing_values_in_feature_mask=None,
):
"""
Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(
tree.random_state, n_samples, n_samples_bootstrap
)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == "subsample":
with catch_warnings():
simplefilter("ignore", DeprecationWarning)
curr_sample_weight *= compute_sample_weight("auto", y, indices=indices)
elif class_weight == "balanced_subsample":
curr_sample_weight *= compute_sample_weight("balanced", y, indices=indices)
tree._fit(
X,
y,
sample_weight=curr_sample_weight,
check_input=False,
missing_values_in_feature_mask=missing_values_in_feature_mask,
)
else:
tree._fit(
X,
y,
sample_weight=sample_weight,
check_input=False,
missing_values_in_feature_mask=missing_values_in_feature_mask,
)
return tree
class BaseForest(MultiOutputMixin, BaseEnsemble, metaclass=ABCMeta):
"""
Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
_parameter_constraints: dict = {
"n_estimators": [Interval(Integral, 1, None, closed="left")],
"bootstrap": ["boolean"],
"oob_score": ["boolean", callable],
"n_jobs": [Integral, None],
"random_state": ["random_state"],
"verbose": ["verbose"],
"warm_start": ["boolean"],
"max_samples": [
None,
Interval(RealNotInt, 0.0, 1.0, closed="right"),
Interval(Integral, 1, None, closed="left"),
],
}
@abstractmethod
def __init__(
self,
estimator,
n_estimators=100,
*,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
max_samples=None,
):
super().__init__(
estimator=estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
self.max_samples = max_samples
def apply(self, X):
"""
Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : ndarray of shape (n_samples, n_estimators)
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose,
prefer="threads",
)(delayed(tree.apply)(X, check_input=False) for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""
Return the decision path in the forest.
.. versionadded:: 0.18
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse matrix of shape (n_samples, n_nodes)
Return a node indicator matrix where non zero elements indicates
that the samples goes through the nodes. The matrix is of CSR
format.
n_nodes_ptr : ndarray of shape (n_estimators + 1,)
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose,
prefer="threads",
)(
delayed(tree.decision_path)(X, check_input=False)
for tree in self.estimators_
)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""
Build a forest of trees from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, its dtype will be converted
to ``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Fitted estimator.
"""
# Validate or convert input data
if issparse(y):
raise ValueError("sparse multilabel-indicator for y is not supported.")
X, y = validate_data(
self,
X,
y,
multi_output=True,
accept_sparse="csc",
dtype=DTYPE,
ensure_all_finite=False,
)
# _compute_missing_values_in_feature_mask checks if X has missing values and
# will raise an error if the underlying tree base estimator can't handle missing
# values. Only the criterion is required to determine if the tree supports
# missing values.
estimator = type(self.estimator)(criterion=self.criterion)
missing_values_in_feature_mask = (
estimator._compute_missing_values_in_feature_mask(
X, estimator_name=self.__class__.__name__
)
)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn(
(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel()."
),
DataConversionWarning,
stacklevel=2,
)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
if self.criterion == "poisson":
if np.any(y < 0):
raise ValueError(
"Some value(s) of y are negative which is "
"not allowed for Poisson regression."
)
if np.sum(y) <= 0:
raise ValueError(
"Sum of y is not strictly positive which "
"is necessary for Poisson regression."
)
self._n_samples, self.n_outputs_ = y.shape
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
if not self.bootstrap and self.max_samples is not None:
raise ValueError(
"`max_sample` cannot be set if `bootstrap=False`. "
"Either switch to `bootstrap=True` or set "
"`max_sample=None`."
)
elif self.bootstrap:
n_samples_bootstrap = _get_n_samples_bootstrap(
n_samples=X.shape[0], max_samples=self.max_samples
)
else:
n_samples_bootstrap = None
self._n_samples_bootstrap = n_samples_bootstrap
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError(
"n_estimators=%d must be larger or equal to "
"len(estimators_)=%d when warm_start==True"
% (self.n_estimators, len(self.estimators_))
)
elif n_more_estimators == 0:
warn(
"Warm-start fitting without increasing n_estimators does not "
"fit new trees."
)
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = [
self._make_estimator(append=False, random_state=random_state)
for i in range(n_more_estimators)
]
# Parallel loop: we prefer the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading more efficient than multiprocessing in
# that case. However, for joblib 0.12+ we respect any
# parallel_backend contexts set at a higher level,
# since correctness does not rely on using threads.
trees = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose,
prefer="threads",
)(
delayed(_parallel_build_trees)(
t,
self.bootstrap,
X,
y,
sample_weight,
i,
len(trees),
verbose=self.verbose,
class_weight=self.class_weight,
n_samples_bootstrap=n_samples_bootstrap,
missing_values_in_feature_mask=missing_values_in_feature_mask,
)
for i, t in enumerate(trees)
)
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score and (
n_more_estimators > 0 or not hasattr(self, "oob_score_")
):
y_type = type_of_target(y)
if y_type == "unknown" or (
is_classifier(self) and y_type == "multiclass-multioutput"
):
# FIXME: we could consider to support multiclass-multioutput if
# we introduce or reuse a constructor parameter (e.g.
# oob_score) allowing our user to pass a callable defining the
# scoring strategy on OOB sample.
raise ValueError(
"The type of target cannot be used to compute OOB "
f"estimates. Got {y_type} while only the following are "
"supported: continuous, continuous-multioutput, binary, "
"multiclass, multilabel-indicator."
)
if callable(self.oob_score):
self._set_oob_score_and_attributes(
X, y, scoring_function=self.oob_score
)
else:
self._set_oob_score_and_attributes(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score_and_attributes(self, X, y, scoring_function=None):
"""Compute and set the OOB score and attributes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
scoring_function : callable, default=None
Scoring function for OOB score. Default depends on whether
this is a regression (R2 score) or classification problem
(accuracy score).
"""
def _compute_oob_predictions(self, X, y):
"""Compute and set the OOB score.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
Returns
-------
oob_pred : ndarray of shape (n_samples, n_classes, n_outputs) or \
(n_samples, 1, n_outputs)
The OOB predictions.
"""
# Prediction requires X to be in CSR format
if issparse(X):
X = X.tocsr()
n_samples = y.shape[0]
n_outputs = self.n_outputs_
if is_classifier(self) and hasattr(self, "n_classes_"):
# n_classes_ is an ndarray at this stage
# all the supported type of target will have the same number of
# classes in all outputs
oob_pred_shape = (n_samples, self.n_classes_[0], n_outputs)
else:
# for regression, n_classes_ does not exist and we create an empty
# axis to be consistent with the classification case and make
# the array operations compatible with the 2 settings
oob_pred_shape = (n_samples, 1, n_outputs)
oob_pred = np.zeros(shape=oob_pred_shape, dtype=np.float64)
n_oob_pred = np.zeros((n_samples, n_outputs), dtype=np.int64)
n_samples_bootstrap = _get_n_samples_bootstrap(
n_samples,
self.max_samples,
)
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state,
n_samples,
n_samples_bootstrap,
)
y_pred = self._get_oob_predictions(estimator, X[unsampled_indices, :])
oob_pred[unsampled_indices, ...] += y_pred
n_oob_pred[unsampled_indices, :] += 1
for k in range(n_outputs):
if (n_oob_pred == 0).any():
warn(
(
"Some inputs do not have OOB scores. This probably means "
"too few trees were used to compute any reliable OOB "
"estimates."
),
UserWarning,
)
n_oob_pred[n_oob_pred == 0] = 1
oob_pred[..., k] /= n_oob_pred[..., [k]]
return oob_pred
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""
Validate X whenever one tries to predict, apply, predict_proba."""
check_is_fitted(self)
if self.estimators_[0]._support_missing_values(X):
ensure_all_finite = "allow-nan"
else:
ensure_all_finite = True
X = validate_data(
self,
X,
dtype=DTYPE,
accept_sparse="csr",
reset=False,
ensure_all_finite=ensure_all_finite,
)
if issparse(X) and (X.indices.dtype != np.intc or X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based sparse matrices")
return X
@property
def feature_importances_(self):
"""
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
Returns
-------
feature_importances_ : ndarray of shape (n_features,)
The values of this array sum to 1, unless all trees are single node
trees consisting of only the root node, in which case it will be an
array of zeros.
"""
check_is_fitted(self)
all_importances = Parallel(n_jobs=self.n_jobs, prefer="threads")(
delayed(getattr)(tree, "feature_importances_")
for tree in self.estimators_
if tree.tree_.node_count > 1
)
if not all_importances:
return np.zeros(self.n_features_in_, dtype=np.float64)
all_importances = np.mean(all_importances, axis=0, dtype=np.float64)
return all_importances / np.sum(all_importances)
def _get_estimators_indices(self):
# Get drawn indices along both sample and feature axes
for tree in self.estimators_:
if not self.bootstrap:
yield np.arange(self._n_samples, dtype=np.int32)
else:
# tree.random_state is actually an immutable integer seed rather
# than a mutable RandomState instance, so it's safe to use it
# repeatedly when calling this property.
seed = tree.random_state
# Operations accessing random_state must be performed identically
# to those in `_parallel_build_trees()`
yield _generate_sample_indices(
seed, self._n_samples, self._n_samples_bootstrap
)
@property
def estimators_samples_(self):
"""The subset of drawn samples for each base estimator.
Returns a dynamically generated list of indices identifying
the samples used for fitting each member of the ensemble, i.e.,
the in-bag samples.
Note: the list is re-created at each call to the property in order
to reduce the object memory footprint by not storing the sampling
data. Thus fetching the property may be slower than expected.
"""
return [sample_indices for sample_indices in self._get_estimators_indices()]
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
# Only the criterion is required to determine if the tree supports
# missing values
estimator = type(self.estimator)(criterion=self.criterion)
tags.input_tags.allow_nan = get_tags(estimator).input_tags.allow_nan
return tags
def _accumulate_prediction(predict, X, out, lock):
"""
This is a utility function for joblib's Parallel.
It can't go locally in ForestClassifier or ForestRegressor, because joblib
complains that it cannot pickle it when placed there.
"""
prediction = predict(X, check_input=False)
with lock:
if len(out) == 1:
out[0] += prediction
else:
for i in range(len(out)):
out[i] += prediction[i]
class ForestClassifier(ClassifierMixin, BaseForest, metaclass=ABCMeta):
"""
Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(
self,
estimator,
n_estimators=100,
*,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
max_samples=None,
):
super().__init__(
estimator=estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight,
max_samples=max_samples,
)
@staticmethod
def _get_oob_predictions(tree, X):
"""Compute the OOB predictions for an individual tree.
Parameters
----------
tree : DecisionTreeClassifier object
A single decision tree classifier.
X : ndarray of shape (n_samples, n_features)
The OOB samples.
Returns
-------
y_pred : ndarray of shape (n_samples, n_classes, n_outputs)
The OOB associated predictions.
"""
y_pred = tree.predict_proba(X, check_input=False)
y_pred = np.asarray(y_pred)
if y_pred.ndim == 2:
# binary and multiclass
y_pred = y_pred[..., np.newaxis]
else:
# Roll the first `n_outputs` axis to the last axis. We will reshape
# from a shape of (n_outputs, n_samples, n_classes) to a shape of
# (n_samples, n_classes, n_outputs).
y_pred = np.rollaxis(y_pred, axis=0, start=3)
return y_pred
def _set_oob_score_and_attributes(self, X, y, scoring_function=None):
"""Compute and set the OOB score and attributes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
scoring_function : callable, default=None
Scoring function for OOB score. Defaults to `accuracy_score`.
"""
self.oob_decision_function_ = super()._compute_oob_predictions(X, y)
if self.oob_decision_function_.shape[-1] == 1:
# drop the n_outputs axis if there is a single output
self.oob_decision_function_ = self.oob_decision_function_.squeeze(axis=-1)
if scoring_function is None:
scoring_function = accuracy_score
self.oob_score_ = scoring_function(
y, np.argmax(self.oob_decision_function_, axis=1)
)
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(
y[:, k], return_inverse=True
)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ("balanced", "balanced_subsample")
if isinstance(self.class_weight, str):
if self.class_weight not in valid_presets:
raise ValueError(
"Valid presets for class_weight include "
'"balanced" and "balanced_subsample".'
'Given "%s".' % self.class_weight
)
if self.warm_start:
warn(
'class_weight presets "balanced" or '
'"balanced_subsample" are '
"not recommended for warm_start if the fitted data "
"differs from the full dataset. In order to use "
'"balanced" weights, use compute_class_weight '
'("balanced", classes, y). In place of y you can use '
"a large enough sample of the full training set "
"target to properly estimate the class frequency "
"distributions. Pass the resulting weights as the "
"class_weight parameter."
)
if self.class_weight != "balanced_subsample" or not self.bootstrap:
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight, y_original)
return y, expanded_class_weight
def predict(self, X):
"""
Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
# all dtypes should be the same, so just take the first
class_type = self.classes_[0].dtype
predictions = np.empty((n_samples, self.n_outputs_), dtype=class_type)
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[k], axis=1), axis=0
)
return predictions
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_iforest.py | sklearn/ensemble/_iforest.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numbers
import threading
from numbers import Integral, Real
from warnings import warn
import numpy as np
from scipy.sparse import issparse
from sklearn.base import OutlierMixin, _fit_context
from sklearn.ensemble._bagging import BaseBagging
from sklearn.tree import ExtraTreeRegressor
from sklearn.tree._tree import DTYPE as tree_dtype
from sklearn.utils import check_array, check_random_state, gen_batches
from sklearn.utils._chunking import get_chunk_n_rows
from sklearn.utils._param_validation import Interval, RealNotInt, StrOptions
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import (
_check_sample_weight,
_num_samples,
check_is_fitted,
validate_data,
)
__all__ = ["IsolationForest"]
def _parallel_compute_tree_depths(
tree,
X,
features,
tree_decision_path_lengths,
tree_avg_path_lengths,
depths,
lock,
):
"""Parallel computation of isolation tree depth."""
if features is None:
X_subset = X
else:
X_subset = X[:, features]
leaves_index = tree.apply(X_subset, check_input=False)
with lock:
depths += (
tree_decision_path_lengths[leaves_index]
+ tree_avg_path_lengths[leaves_index]
- 1.0
)
class IsolationForest(OutlierMixin, BaseBagging):
"""
Isolation Forest Algorithm.
Return the anomaly score of each sample using the IsolationForest algorithm
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a
measure of normality and our decision function.
Random partitioning produces noticeably shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path
lengths for particular samples, they are highly likely to be anomalies.
Read more in the :ref:`User Guide <isolation_forest>`.
.. versionadded:: 0.18
Parameters
----------
n_estimators : int, default=100
The number of base estimators in the ensemble.
max_samples : "auto", int or float, default="auto"
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
- If "auto", then `max_samples=min(256, n_samples)`.
If max_samples is larger than the number of samples provided,
all samples will be used for all trees (no sampling).
contamination : 'auto' or float, default='auto'
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Used when fitting to define the threshold
on the scores of the samples.
- If 'auto', the threshold is determined as in the
original paper.
- If float, the contamination should be in the range (0, 0.5].
.. versionchanged:: 0.22
The default value of ``contamination`` changed from 0.1
to ``'auto'``.
max_features : int or float, default=1.0
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max(1, int(max_features * n_features_in_))` features.
Note: using a float number less than 1.0 or integer less than number of
features will enable feature subsampling and leads to a longer runtime.
bootstrap : bool, default=False
If True, individual trees are fit on random subsets of the training
data sampled with replacement. If False, sampling without replacement
is performed.
n_jobs : int, default=None
The number of jobs to run in parallel for :meth:`fit`. ``None`` means 1
unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using
all processors. See :term:`Glossary <n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls the pseudo-randomness of the selection of the feature
and split values for each branching step and each tree in the forest.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : int, default=0
Controls the verbosity of the tree building process.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.21
Attributes
----------
estimator_ : :class:`~sklearn.tree.ExtraTreeRegressor` instance
The child estimator template used to create the collection of
fitted sub-estimators.
.. versionadded:: 1.2
`base_estimator_` was renamed to `estimator_`.
estimators_ : list of ExtraTreeRegressor instances
The collection of fitted sub-estimators.
estimators_features_ : list of ndarray
The subset of drawn features for each base estimator.
estimators_samples_ : list of ndarray
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
max_samples_ : int
The actual number of samples.
offset_ : float
Offset used to define the decision function from the raw scores. We
have the relation: ``decision_function = score_samples - offset_``.
``offset_`` is defined as follows. When the contamination parameter is
set to "auto", the offset is equal to -0.5 as the scores of inliers are
close to 0 and the scores of outliers are close to -1. When a
contamination parameter different than "auto" is provided, the offset
is defined in such a way we obtain the expected number of outliers
(samples with decision function < 0) in training.
.. versionadded:: 0.20
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
sklearn.covariance.EllipticEnvelope : An object for detecting outliers in a
Gaussian distributed dataset.
sklearn.svm.OneClassSVM : Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
sklearn.neighbors.LocalOutlierFactor : Unsupervised Outlier Detection
using Local Outlier Factor (LOF).
Notes
-----
The implementation is based on an ensemble of ExtraTreeRegressor. The
maximum depth of each tree is set to ``ceil(log_2(n))`` where
:math:`n` is the number of samples used to build the tree
(see [1]_ for more details).
References
----------
.. [1] F. T. Liu, K. M. Ting and Z. -H. Zhou.
:doi:`"Isolation forest." <10.1109/ICDM.2008.17>`
2008 Eighth IEEE International Conference on Data Mining (ICDM),
2008, pp. 413-422.
.. [2] F. T. Liu, K. M. Ting and Z. -H. Zhou.
:doi:`"Isolation-based anomaly detection."
<10.1145/2133360.2133363>` ACM Transactions on
Knowledge Discovery from Data (TKDD) 6.1 (2012): 1-39.
Examples
--------
>>> from sklearn.ensemble import IsolationForest
>>> X = [[-1.1], [0.3], [0.5], [100]]
>>> clf = IsolationForest(random_state=0).fit(X)
>>> clf.predict([[0.1], [0], [90]])
array([ 1, 1, -1])
For an example of using isolation forest for anomaly detection see
:ref:`sphx_glr_auto_examples_ensemble_plot_isolation_forest.py`.
"""
_parameter_constraints: dict = {
"n_estimators": [Interval(Integral, 1, None, closed="left")],
"max_samples": [
StrOptions({"auto"}),
Interval(Integral, 1, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="right"),
],
"contamination": [
StrOptions({"auto"}),
Interval(Real, 0, 0.5, closed="right"),
],
"max_features": [
Integral,
Interval(Real, 0, 1, closed="right"),
],
"bootstrap": ["boolean"],
"n_jobs": [Integral, None],
"random_state": ["random_state"],
"verbose": ["verbose"],
"warm_start": ["boolean"],
}
def __init__(
self,
*,
n_estimators=100,
max_samples="auto",
contamination="auto",
max_features=1.0,
bootstrap=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
):
super().__init__(
estimator=None,
# here above max_features has no links with self.max_features
bootstrap=bootstrap,
bootstrap_features=False,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
)
self.contamination = contamination
def _get_estimator(self):
return ExtraTreeRegressor(
# here max_features has no links with self.max_features
max_features=1,
splitter="random",
random_state=self.random_state,
)
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by iforest")
def _parallel_args(self):
# ExtraTreeRegressor releases the GIL, so it's more efficient to use
# a thread-based backend rather than a process-based backend so as
# to avoid suffering from communication overhead and extra memory
# copies. This is only used in the fit method.
return {"prefer": "threads"}
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None, sample_weight=None):
"""
Fit estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Fitted estimator.
"""
X = validate_data(
self, X, accept_sparse=["csc"], dtype=tree_dtype, ensure_all_finite=False
)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=None)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
# ensure that max_sample is in [1, n_samples]:
n_samples = X.shape[0]
if isinstance(self.max_samples, str) and self.max_samples == "auto":
max_samples = min(256, n_samples)
elif isinstance(self.max_samples, numbers.Integral):
if self.max_samples > n_samples:
warn(
"max_samples (%s) is greater than the "
"total number of samples (%s). max_samples "
"will be set to n_samples for estimation."
% (self.max_samples, n_samples)
)
max_samples = n_samples
else:
max_samples = self.max_samples
else: # max_samples is float
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
max_depth = int(np.ceil(np.log2(max(max_samples, 2))))
super()._fit(
X,
y,
max_samples=max_samples,
max_depth=max_depth,
sample_weight=sample_weight,
check_input=False,
)
self._average_path_length_per_tree, self._decision_path_lengths = zip(
*[
(
_average_path_length(tree.tree_.n_node_samples),
tree.tree_.compute_node_depths(),
)
for tree in self.estimators_
]
)
if self.contamination == "auto":
# 0.5 plays a special role as described in the original paper.
# we take the opposite as we consider the opposite of their score.
self.offset_ = -0.5
return self
# Else, define offset_ wrt contamination parameter
# To avoid performing input validation a second time we call
# _score_samples rather than score_samples.
# _score_samples expects a CSR matrix, so we convert if necessary.
if issparse(X):
X = X.tocsr()
self.offset_ = np.percentile(self._score_samples(X), 100.0 * self.contamination)
return self
def predict(self, X):
"""
Predict if a particular sample is an outlier or not.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
For each observation, tells whether or not (+1 or -1) it should
be considered as an inlier according to the fitted model.
Notes
-----
The predict method can be parallelized by setting a joblib context. This
inherently does NOT use the ``n_jobs`` parameter initialized in the class,
which is used during ``fit``. This is because, predict may actually be faster
without parallelization for a small number of samples,
such as for 1000 samples or less. The user can set the
number of jobs in the joblib context to control the number of parallel jobs.
.. code-block:: python
from joblib import parallel_backend
# Note, we use threading here as the predict method is not CPU bound.
with parallel_backend("threading", n_jobs=4):
model.predict(X)
"""
check_is_fitted(self)
decision_func = self.decision_function(X)
is_inlier = np.ones_like(decision_func, dtype=int)
is_inlier[decision_func < 0] = -1
return is_inlier
def decision_function(self, X):
"""
Average anomaly score of X of the base classifiers.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
an n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal. Negative scores represent outliers,
positive scores represent inliers.
Notes
-----
The decision_function method can be parallelized by setting a joblib context.
This inherently does NOT use the ``n_jobs`` parameter initialized in the class,
which is used during ``fit``. This is because, calculating the score may
actually be faster without parallelization for a small number of samples,
such as for 1000 samples or less.
The user can set the number of jobs in the joblib context to control the
number of parallel jobs.
.. code-block:: python
from joblib import parallel_backend
# Note, we use threading here as the decision_function method is
# not CPU bound.
with parallel_backend("threading", n_jobs=4):
model.decision_function(X)
"""
# We subtract self.offset_ to make 0 be the threshold value for being
# an outlier:
return self.score_samples(X) - self.offset_
def score_samples(self, X):
"""
Opposite of the anomaly score defined in the original paper.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
an n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal.
Notes
-----
The score function method can be parallelized by setting a joblib context. This
inherently does NOT use the ``n_jobs`` parameter initialized in the class,
which is used during ``fit``. This is because, calculating the score may
actually be faster without parallelization for a small number of samples,
such as for 1000 samples or less.
The user can set the number of jobs in the joblib context to control the
number of parallel jobs.
.. code-block:: python
from joblib import parallel_backend
# Note, we use threading here as the score_samples method is not CPU bound.
with parallel_backend("threading", n_jobs=4):
model.score(X)
"""
# Check data
X = validate_data(
self,
X,
accept_sparse="csr",
dtype=tree_dtype,
reset=False,
ensure_all_finite=False,
)
return self._score_samples(X)
def _score_samples(self, X):
"""Private version of score_samples without input validation.
Input validation would remove feature names, so we disable it.
"""
# Code structure from ForestClassifier/predict_proba
check_is_fitted(self)
# Take the opposite of the scores as bigger is better (here less abnormal)
return -self._compute_chunked_score_samples(X)
def _compute_chunked_score_samples(self, X):
n_samples = _num_samples(X)
if self._max_features == X.shape[1]:
subsample_features = False
else:
subsample_features = True
# We get as many rows as possible within our working_memory budget
# (defined by sklearn.get_config()['working_memory']) to store
# self._max_features in each row during computation.
#
# Note:
# - this will get at least 1 row, even if 1 row of score will
# exceed working_memory.
# - this does only account for temporary memory usage while loading
# the data needed to compute the scores -- the returned scores
# themselves are 1D.
chunk_n_rows = get_chunk_n_rows(
row_bytes=16 * self._max_features, max_n_rows=n_samples
)
slices = gen_batches(n_samples, chunk_n_rows)
scores = np.zeros(n_samples, order="f")
for sl in slices:
# compute score on the slices of test samples:
scores[sl] = self._compute_score_samples(X[sl], subsample_features)
return scores
def _compute_score_samples(self, X, subsample_features):
"""
Compute the score of each samples in X going through the extra trees.
Parameters
----------
X : array-like or sparse matrix
Data matrix.
subsample_features : bool
Whether features should be subsampled.
Returns
-------
scores : ndarray of shape (n_samples,)
The score of each sample in X.
"""
n_samples = X.shape[0]
depths = np.zeros(n_samples, order="f")
average_path_length_max_samples = _average_path_length([self._max_samples])
# Note: we use default n_jobs value, i.e. sequential computation, which
# we expect to be more performant that parallelizing for small number
# of samples, e.g. < 1k samples. Default n_jobs value can be overridden
# by using joblib.parallel_backend context manager around
# ._compute_score_samples. Using a higher n_jobs may speed up the
# computation of the scores, e.g. for > 1k samples. See
# https://github.com/scikit-learn/scikit-learn/pull/28622 for more
# details.
lock = threading.Lock()
Parallel(
verbose=self.verbose,
require="sharedmem",
)(
delayed(_parallel_compute_tree_depths)(
tree,
X,
features if subsample_features else None,
self._decision_path_lengths[tree_idx],
self._average_path_length_per_tree[tree_idx],
depths,
lock,
)
for tree_idx, (tree, features) in enumerate(
zip(self.estimators_, self.estimators_features_)
)
)
denominator = len(self.estimators_) * average_path_length_max_samples
scores = 2 ** (
# For a single training sample, denominator and depth are 0.
# Therefore, we set the score manually to 1.
-np.divide(
depths, denominator, out=np.ones_like(depths), where=denominator != 0
)
)
return scores
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.allow_nan = True
return tags
def _average_path_length(n_samples_leaf):
"""
The average path length in an n_samples iTree, which is equal to
the average path length of an unsuccessful BST search since the
latter has the same structure as an isolation tree.
Parameters
----------
n_samples_leaf : array-like of shape (n_samples,)
The number of training samples in each test sample leaf, for
each estimators.
Returns
-------
average_path_length : ndarray of shape (n_samples,)
"""
n_samples_leaf = check_array(n_samples_leaf, ensure_2d=False)
n_samples_leaf_shape = n_samples_leaf.shape
n_samples_leaf = n_samples_leaf.reshape((1, -1))
average_path_length = np.zeros(n_samples_leaf.shape)
mask_1 = n_samples_leaf <= 1
mask_2 = n_samples_leaf == 2
not_mask = ~np.logical_or(mask_1, mask_2)
average_path_length[mask_1] = 0.0
average_path_length[mask_2] = 1.0
average_path_length[not_mask] = (
2.0 * (np.log(n_samples_leaf[not_mask] - 1.0) + np.euler_gamma)
- 2.0 * (n_samples_leaf[not_mask] - 1.0) / n_samples_leaf[not_mask]
)
return average_path_length.reshape(n_samples_leaf_shape)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_bagging.py | sklearn/ensemble/_bagging.py | """Bagging meta-estimator."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
import numbers
from abc import ABCMeta, abstractmethod
from functools import partial
from numbers import Integral
from warnings import warn
import numpy as np
from sklearn.base import ClassifierMixin, RegressorMixin, _fit_context
from sklearn.ensemble._base import BaseEnsemble, _partition_estimators
from sklearn.metrics import accuracy_score, r2_score
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import Bunch, _safe_indexing, check_random_state, column_or_1d
from sklearn.utils._mask import indices_to_mask
from sklearn.utils._param_validation import HasMethods, Interval, RealNotInt
from sklearn.utils._tags import get_tags
from sklearn.utils.metadata_routing import (
MetadataRouter,
MethodMapping,
_raise_for_params,
_routing_enabled,
get_routing_for_object,
process_routing,
)
from sklearn.utils.metaestimators import available_if
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.random import sample_without_replacement
from sklearn.utils.validation import (
_check_method_params,
_check_sample_weight,
_estimator_has,
check_is_fitted,
has_fit_parameter,
validate_data,
)
__all__ = ["BaggingClassifier", "BaggingRegressor"]
MAX_INT = np.iinfo(np.int32).max
def _get_n_samples_bootstrap(n_samples, max_samples, sample_weight):
"""
Get the number of samples in a bootstrap sample.
Parameters
----------
n_samples : int
Number of samples in the dataset.
max_samples : None, int or float
The maximum number of samples to draw.
- If None, then draw `n_samples` samples.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * n_samples` unweighted samples or
`max_samples * sample_weight.sum()` weighted samples.
sample_weight : array of shape (n_samples,) or None
Sample weights with frequency semantics when `max_samples` is explicitly
set to a float or integer value. When keeping the `max_samples=None` default
value, the equivalence between fitting with integer weighted data points or
integer repeated data points is no longer guaranteed because the effective
bootstrap size is no longer guaranteed to be equivalent.
Returns
-------
n_samples_bootstrap : int
The total number of samples to draw for the bootstrap sample.
"""
if max_samples is None:
return n_samples
elif isinstance(max_samples, Integral):
return max_samples
if sample_weight is None:
weighted_n_samples = n_samples
weighted_n_samples_msg = f"the number of samples is {weighted_n_samples} "
else:
weighted_n_samples = sample_weight.sum()
weighted_n_samples_msg = (
f"the total sum of sample weights is {weighted_n_samples} "
)
# max_samples Real fractional value relative to weighted_n_samples
n_samples_bootstrap = max(int(max_samples * weighted_n_samples), 1)
# Warn when number of bootstrap samples is suspiciously small
# This heuristic for "suspiciously small" might be adapted if found
# unsuitable in practice
if n_samples_bootstrap < max(10, n_samples ** (1 / 3)):
warn(
f"Using the fractional value {max_samples=} when {weighted_n_samples_msg}"
f"results in a low number ({n_samples_bootstrap}) of bootstrap samples. "
"We recommend passing `max_samples` as an integer instead."
)
return n_samples_bootstrap
def _generate_indices(random_state, bootstrap, n_population, n_samples):
"""Draw randomly sampled indices."""
# Draw sample indices
if bootstrap:
indices = random_state.randint(0, n_population, n_samples)
else:
indices = sample_without_replacement(
n_population, n_samples, random_state=random_state
)
return indices
def _generate_bagging_indices(
random_state,
bootstrap_features,
bootstrap_samples,
n_features,
n_samples,
max_features,
max_samples,
sample_weight,
):
"""Randomly draw feature and sample indices."""
# Get valid random state
random_state = check_random_state(random_state)
# Draw indices
feature_indices = _generate_indices(
random_state, bootstrap_features, n_features, max_features
)
if sample_weight is None:
sample_indices = _generate_indices(
random_state, bootstrap_samples, n_samples, max_samples
)
else:
normalized_sample_weight = sample_weight / np.sum(sample_weight)
sample_indices = random_state.choice(
n_samples,
max_samples,
replace=bootstrap_samples,
p=normalized_sample_weight,
)
return feature_indices, sample_indices
def _consumes_sample_weight(estimator):
if _routing_enabled():
request_or_router = get_routing_for_object(estimator)
consumes_sample_weight = request_or_router.consumes("fit", ("sample_weight",))
else:
consumes_sample_weight = has_fit_parameter(estimator, "sample_weight")
return consumes_sample_weight
def _parallel_build_estimators(
n_estimators,
ensemble,
X,
y,
sample_weight,
seeds,
total_n_estimators,
verbose,
check_input,
fit_params,
):
"""Private function used to build a batch of estimators within a job."""
# Retrieve settings
n_samples, n_features = X.shape
max_features = ensemble._max_features
max_samples = ensemble._max_samples
bootstrap = ensemble.bootstrap
bootstrap_features = ensemble.bootstrap_features
has_check_input = has_fit_parameter(ensemble.estimator_, "check_input")
requires_feature_indexing = bootstrap_features or max_features != n_features
consumes_sample_weight = _consumes_sample_weight(ensemble.estimator_)
# Build estimators
estimators = []
estimators_features = []
for i in range(n_estimators):
if verbose > 1:
print(
"Building estimator %d of %d for this parallel run (total %d)..."
% (i + 1, n_estimators, total_n_estimators)
)
random_state = seeds[i]
estimator = ensemble._make_estimator(append=False, random_state=random_state)
if has_check_input:
estimator_fit = partial(estimator.fit, check_input=check_input)
else:
estimator_fit = estimator.fit
# Draw random feature, sample indices (using normalized sample_weight
# as probabilities if provided).
features, indices = _generate_bagging_indices(
random_state,
bootstrap_features,
bootstrap,
n_features,
n_samples,
max_features,
max_samples,
sample_weight,
)
fit_params_ = fit_params.copy()
# Note: Row sampling can be achieved either through setting sample_weight or
# by indexing. The former is more memory efficient. Therefore, use this method
# if possible, otherwise use indexing.
if consumes_sample_weight:
# Row sampling by setting sample_weight
indices_as_sample_weight = np.bincount(indices, minlength=n_samples)
fit_params_["sample_weight"] = indices_as_sample_weight
X_ = X[:, features] if requires_feature_indexing else X
estimator_fit(X_, y, **fit_params_)
else:
# Row sampling by indexing
y_ = _safe_indexing(y, indices)
X_ = _safe_indexing(X, indices)
fit_params_ = _check_method_params(X, params=fit_params_, indices=indices)
if requires_feature_indexing:
X_ = X_[:, features]
estimator_fit(X_, y_, **fit_params_)
estimators.append(estimator)
estimators_features.append(features)
return estimators, estimators_features
def _parallel_predict_proba(
estimators,
estimators_features,
X,
n_classes,
predict_params=None,
predict_proba_params=None,
):
"""Private function used to compute (proba-)predictions within a job."""
n_samples = X.shape[0]
proba = np.zeros((n_samples, n_classes))
for estimator, features in zip(estimators, estimators_features):
if hasattr(estimator, "predict_proba"):
proba_estimator = estimator.predict_proba(
X[:, features], **(predict_params or {})
)
if n_classes == len(estimator.classes_):
proba += proba_estimator
else:
proba[:, estimator.classes_] += proba_estimator[
:, range(len(estimator.classes_))
]
else:
# Resort to voting
predictions = estimator.predict(
X[:, features], **(predict_proba_params or {})
)
for i in range(n_samples):
proba[i, predictions[i]] += 1
return proba
def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes, params):
"""Private function used to compute log probabilities within a job."""
n_samples = X.shape[0]
log_proba = np.empty((n_samples, n_classes))
log_proba.fill(-np.inf)
all_classes = np.arange(n_classes, dtype=int)
for estimator, features in zip(estimators, estimators_features):
log_proba_estimator = estimator.predict_log_proba(X[:, features], **params)
if n_classes == len(estimator.classes_):
log_proba = np.logaddexp(log_proba, log_proba_estimator)
else:
log_proba[:, estimator.classes_] = np.logaddexp(
log_proba[:, estimator.classes_],
log_proba_estimator[:, range(len(estimator.classes_))],
)
missing = np.setdiff1d(all_classes, estimator.classes_)
log_proba[:, missing] = np.logaddexp(log_proba[:, missing], -np.inf)
return log_proba
def _parallel_decision_function(estimators, estimators_features, X, params):
"""Private function used to compute decisions within a job."""
return sum(
estimator.decision_function(X[:, features], **params)
for estimator, features in zip(estimators, estimators_features)
)
def _parallel_predict_regression(estimators, estimators_features, X, params):
"""Private function used to compute predictions within a job."""
return sum(
estimator.predict(X[:, features], **params)
for estimator, features in zip(estimators, estimators_features)
)
class BaseBagging(BaseEnsemble, metaclass=ABCMeta):
"""Base class for Bagging meta-estimator.
Warning: This class should not be used directly. Use derived classes
instead.
"""
_parameter_constraints: dict = {
"estimator": [HasMethods(["fit", "predict"]), None],
"n_estimators": [Interval(Integral, 1, None, closed="left")],
"max_samples": [
None,
Interval(Integral, 1, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="right"),
],
"max_features": [
Interval(Integral, 1, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="right"),
],
"bootstrap": ["boolean"],
"bootstrap_features": ["boolean"],
"oob_score": ["boolean"],
"warm_start": ["boolean"],
"n_jobs": [None, Integral],
"random_state": ["random_state"],
"verbose": ["verbose"],
}
@abstractmethod
def __init__(
self,
estimator=None,
n_estimators=10,
*,
max_samples=None,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=None,
random_state=None,
verbose=0,
):
super().__init__(
estimator=estimator,
n_estimators=n_estimators,
)
self.max_samples = max_samples
self.max_features = max_features
self.bootstrap = bootstrap
self.bootstrap_features = bootstrap_features
self.oob_score = oob_score
self.warm_start = warm_start
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
@_fit_context(
# BaseBagging.estimator is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y, sample_weight=None, **fit_params):
"""Build a Bagging ensemble of estimators from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Used as
probabilities to sample the training set. Note that the expected
frequency semantics for the `sample_weight` parameter are only
fulfilled when sampling with replacement `bootstrap=True` and using
a float or integer `max_samples` (instead of the default
`max_samples=None`).
**fit_params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.5
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
"""
_raise_for_params(fit_params, self, "fit")
# Convert data (X is required to be 2d and indexable)
X, y = validate_data(
self,
X,
y,
accept_sparse=["csr", "csc"],
dtype=None,
ensure_all_finite=False,
multi_output=True,
)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=None)
if not self.bootstrap:
warn(
f"When fitting {self.__class__.__name__} with sample_weight "
f"it is recommended to use bootstrap=True, got {self.bootstrap}."
)
return self._fit(
X,
y,
max_samples=self.max_samples,
sample_weight=sample_weight,
**fit_params,
)
def _parallel_args(self):
return {}
def _fit(
self,
X,
y,
max_samples=None,
max_depth=None,
check_input=True,
sample_weight=None,
**fit_params,
):
"""Build a Bagging ensemble of estimators from the training
set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
max_samples : int or float, default=None
Argument to use instead of self.max_samples.
max_depth : int, default=None
Override value used when constructing base estimator. Only
supported if the base estimator has a max_depth parameter.
check_input : bool, default=True
Override value used when fitting base estimator. Only supported
if the base estimator has a check_input parameter for fit function.
If the meta-estimator already checks the input, set this value to
False to prevent redundant input validation.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
**fit_params : dict, default=None
Parameters to pass to the :term:`fit` method of the underlying
estimator.
Returns
-------
self : object
Fitted estimator.
"""
random_state = check_random_state(self.random_state)
# Remap output
n_samples = X.shape[0]
self._n_samples = n_samples
y = self._validate_y(y)
# Check parameters
self._validate_estimator(self._get_estimator())
if _routing_enabled():
routed_params = process_routing(self, "fit", **fit_params)
else:
routed_params = Bunch()
routed_params.estimator = Bunch(fit=fit_params)
if max_depth is not None:
self.estimator_.max_depth = max_depth
# Validate max_samples
if max_samples is None:
max_samples = self.max_samples
max_samples = _get_n_samples_bootstrap(X.shape[0], max_samples, sample_weight)
if not self.bootstrap and max_samples > X.shape[0]:
raise ValueError(
f"Effective max_samples={max_samples} must be <= n_samples="
f"{X.shape[0]} to be able to sample without replacement."
)
# Store validated integer row sampling value
self._max_samples = max_samples
# Validate max_features
if isinstance(self.max_features, numbers.Integral):
max_features = self.max_features
elif isinstance(self.max_features, float):
max_features = int(self.max_features * self.n_features_in_)
if max_features > self.n_features_in_:
raise ValueError("max_features must be <= n_features")
max_features = max(1, int(max_features))
# Store validated integer feature sampling value
self._max_features = max_features
# Store sample_weight (needed in _get_estimators_indices). Note that
# we intentionally do not materialize `sample_weight=None` as an array
# of ones to avoid unnecessarily cluttering trained estimator pickles.
self._sample_weight = sample_weight
# Other checks
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available if bootstrap=True")
if self.warm_start and self.oob_score:
raise ValueError("Out of bag estimate only available if warm_start=False")
if hasattr(self, "oob_score_") and self.warm_start:
del self.oob_score_
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
self.estimators_features_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError(
"n_estimators=%d must be larger or equal to "
"len(estimators_)=%d when warm_start==True"
% (self.n_estimators, len(self.estimators_))
)
elif n_more_estimators == 0:
warn(
"Warm-start fitting without increasing n_estimators does not "
"fit new trees."
)
return self
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
n_more_estimators, self.n_jobs
)
total_n_estimators = sum(n_estimators)
# Advance random state to state after training
# the first n_estimators
if self.warm_start and len(self.estimators_) > 0:
random_state.randint(MAX_INT, size=len(self.estimators_))
seeds = random_state.randint(MAX_INT, size=n_more_estimators)
self._seeds = seeds
all_results = Parallel(
n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args()
)(
delayed(_parallel_build_estimators)(
n_estimators[i],
self,
X,
y,
sample_weight,
seeds[starts[i] : starts[i + 1]],
total_n_estimators,
verbose=self.verbose,
check_input=check_input,
fit_params=routed_params.estimator.fit,
)
for i in range(n_jobs)
)
# Reduce
self.estimators_ += list(
itertools.chain.from_iterable(t[0] for t in all_results)
)
self.estimators_features_ += list(
itertools.chain.from_iterable(t[1] for t in all_results)
)
if self.oob_score:
self._set_oob_score(X, y)
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y(self, y):
if len(y.shape) == 1 or y.shape[1] == 1:
return column_or_1d(y, warn=True)
return y
def _get_estimators_indices(self):
# Get drawn indices along both sample and feature axes
for seed in self._seeds:
# Operations accessing random_state must be performed identically
# to those in `_parallel_build_estimators()`
feature_indices, sample_indices = _generate_bagging_indices(
seed,
self.bootstrap_features,
self.bootstrap,
self.n_features_in_,
self._n_samples,
self._max_features,
self._max_samples,
self._sample_weight,
)
yield feature_indices, sample_indices
@property
def estimators_samples_(self):
"""
The subset of drawn samples for each base estimator.
Returns a dynamically generated list of indices identifying
the samples used for fitting each member of the ensemble, i.e.,
the in-bag samples.
Note: the list is re-created at each call to the property in order
to reduce the object memory footprint by not storing the sampling
data. Thus fetching the property may be slower than expected.
"""
return [sample_indices for _, sample_indices in self._get_estimators_indices()]
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self)
method_mapping = MethodMapping()
method_mapping.add(caller="fit", callee="fit").add(
caller="decision_function", callee="decision_function"
)
# the router needs to be built depending on whether the sub-estimator has a
# `predict_proba` method (as BaggingClassifier decides dynamically at runtime):
if hasattr(self._get_estimator(), "predict_proba"):
(
method_mapping.add(caller="predict", callee="predict_proba").add(
caller="predict_proba", callee="predict_proba"
)
)
else:
(
method_mapping.add(caller="predict", callee="predict").add(
caller="predict_proba", callee="predict"
)
)
# the router needs to be built depending on whether the sub-estimator has a
# `predict_log_proba` method (as BaggingClassifier decides dynamically at
# runtime):
if hasattr(self._get_estimator(), "predict_log_proba"):
method_mapping.add(caller="predict_log_proba", callee="predict_log_proba")
else:
# if `predict_log_proba` is not available in BaggingClassifier's
# sub-estimator, the routing should go to its `predict_proba` if it is
# available or else to its `predict` method; according to how
# `sample_weight` is passed to the respective methods dynamically at
# runtime:
if hasattr(self._get_estimator(), "predict_proba"):
method_mapping.add(caller="predict_log_proba", callee="predict_proba")
else:
method_mapping.add(caller="predict_log_proba", callee="predict")
router.add(estimator=self._get_estimator(), method_mapping=method_mapping)
return router
@abstractmethod
def _get_estimator(self):
"""Resolve which estimator to return."""
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = get_tags(self._get_estimator()).input_tags.sparse
tags.input_tags.allow_nan = get_tags(self._get_estimator()).input_tags.allow_nan
return tags
class BaggingClassifier(ClassifierMixin, BaseBagging):
"""A Bagging classifier.
A Bagging classifier is an ensemble meta-estimator that fits base
classifiers each on random subsets of the original dataset and then
aggregate their individual predictions (either by voting or by averaging)
to form a final prediction. Such a meta-estimator can typically be used as
a way to reduce the variance of a black-box estimator (e.g., a decision
tree), by introducing randomization into its construction procedure and
then making an ensemble out of it.
This algorithm encompasses several works from the literature. When random
subsets of the dataset are drawn as random subsets of the samples, then
this algorithm is known as Pasting [1]_. If samples are drawn with
replacement, then the method is known as Bagging [2]_. When random subsets
of the dataset are drawn as random subsets of the features, then the method
is known as Random Subspaces [3]_. Finally, when base estimators are built
on subsets of both samples and features, then the method is known as
Random Patches [4]_.
Read more in the :ref:`User Guide <bagging>`.
.. versionadded:: 0.15
Parameters
----------
estimator : object, default=None
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a
:class:`~sklearn.tree.DecisionTreeClassifier`.
.. versionadded:: 1.2
`base_estimator` was renamed to `estimator`.
n_estimators : int, default=10
The number of base estimators in the ensemble.
max_samples : int or float, default=None
The number of samples to draw from X to train each base estimator (with
replacement by default, see `bootstrap` for more details).
- If None, then draw `X.shape[0]` samples irrespective of `sample_weight`.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` unweighted samples or
`max_samples * sample_weight.sum()` weighted samples.
max_features : int or float, default=1.0
The number of features to draw from X to train each base estimator (
without replacement by default, see `bootstrap_features` for more
details).
- If int, then draw `max_features` features.
- If float, then draw `max(1, int(max_features * n_features_in_))` features.
bootstrap : bool, default=True
Whether samples are drawn with replacement. If False, sampling without
replacement is performed. If fitting with `sample_weight`, it is
strongly recommended to choose True, as only drawing with replacement
will ensure the expected frequency semantics of `sample_weight`.
bootstrap_features : bool, default=False
Whether features are drawn with replacement.
oob_score : bool, default=False
Whether to use out-of-bag samples to estimate
the generalization error. Only available if bootstrap=True.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.17
*warm_start* constructor parameter.
n_jobs : int, default=None
The number of jobs to run in parallel for both :meth:`fit` and
:meth:`predict`. ``None`` means 1 unless in a
:obj:`joblib.parallel_backend` context. ``-1`` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls the random resampling of the original dataset
(sample wise and feature wise).
If the base estimator accepts a `random_state` attribute, a different
seed is generated for each instance in the ensemble.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
Attributes
----------
estimator_ : estimator
The base estimator from which the ensemble is grown.
.. versionadded:: 1.2
`base_estimator_` was renamed to `estimator_`.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
estimators_ : list of estimators
The collection of fitted base estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by an array of the indices selected.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_classes_ : int or list
The number of classes.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_decision_function_ : ndarray of shape (n_samples, n_classes)
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN. This attribute exists
only when ``oob_score`` is True.
See Also
--------
BaggingRegressor : A Bagging regressor.
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
Examples
--------
>>> from sklearn.svm import SVC
>>> from sklearn.ensemble import BaggingClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=100, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = BaggingClassifier(estimator=SVC(),
... n_estimators=10, random_state=0).fit(X, y)
>>> clf.predict([[0, 0, 0, 0]])
array([1])
"""
def __init__(
self,
estimator=None,
n_estimators=10,
*,
max_samples=None,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/__init__.py | sklearn/ensemble/__init__.py | """Ensemble-based methods for classification, regression and anomaly detection."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.ensemble._bagging import BaggingClassifier, BaggingRegressor
from sklearn.ensemble._base import BaseEnsemble
from sklearn.ensemble._forest import (
ExtraTreesClassifier,
ExtraTreesRegressor,
RandomForestClassifier,
RandomForestRegressor,
RandomTreesEmbedding,
)
from sklearn.ensemble._gb import GradientBoostingClassifier, GradientBoostingRegressor
from sklearn.ensemble._hist_gradient_boosting.gradient_boosting import (
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
)
from sklearn.ensemble._iforest import IsolationForest
from sklearn.ensemble._stacking import StackingClassifier, StackingRegressor
from sklearn.ensemble._voting import VotingClassifier, VotingRegressor
from sklearn.ensemble._weight_boosting import AdaBoostClassifier, AdaBoostRegressor
__all__ = [
"AdaBoostClassifier",
"AdaBoostRegressor",
"BaggingClassifier",
"BaggingRegressor",
"BaseEnsemble",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"GradientBoostingClassifier",
"GradientBoostingRegressor",
"HistGradientBoostingClassifier",
"HistGradientBoostingRegressor",
"IsolationForest",
"RandomForestClassifier",
"RandomForestRegressor",
"RandomTreesEmbedding",
"StackingClassifier",
"StackingRegressor",
"VotingClassifier",
"VotingRegressor",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_gb.py | sklearn/ensemble/_gb.py | """Gradient Boosted Regression Trees.
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import math
import warnings
from abc import ABCMeta, abstractmethod
from numbers import Integral, Real
from time import time
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, issparse
from sklearn._loss.loss import (
_LOSSES,
AbsoluteError,
ExponentialLoss,
HalfBinomialLoss,
HalfMultinomialLoss,
HalfSquaredError,
HuberLoss,
PinballLoss,
)
from sklearn.base import ClassifierMixin, RegressorMixin, _fit_context, is_classifier
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.ensemble._base import BaseEnsemble
from sklearn.ensemble._gradient_boosting import (
_random_sample_mask,
predict_stage,
predict_stages,
)
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree._tree import DOUBLE, DTYPE, TREE_LEAF
from sklearn.utils import check_array, check_random_state, column_or_1d
from sklearn.utils._param_validation import HasMethods, Interval, StrOptions
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.stats import _weighted_percentile
from sklearn.utils.validation import (
_check_sample_weight,
check_is_fitted,
validate_data,
)
_LOSSES = _LOSSES.copy()
_LOSSES.update(
{
"quantile": PinballLoss,
"huber": HuberLoss,
}
)
def _safe_divide(numerator, denominator):
"""Prevents overflow and division by zero."""
# This is used for classifiers where the denominator might become zero exactly.
# For instance for log loss, HalfBinomialLoss, if proba=0 or proba=1 exactly, then
# denominator = hessian = 0, and we should set the node value in the line search to
# zero as there is no improvement of the loss possible.
# For numerical safety, we do this already for extremely tiny values.
if abs(denominator) < 1e-150:
return 0.0
else:
# Cast to Python float to trigger Python errors, e.g. ZeroDivisionError,
# without relying on `np.errstate` that is not supported by Pyodide.
result = float(numerator) / float(denominator)
# Cast to Python float to trigger a ZeroDivisionError without relying
# on `np.errstate` that is not supported by Pyodide.
result = float(numerator) / float(denominator)
if math.isinf(result):
warnings.warn("overflow encountered in _safe_divide", RuntimeWarning)
return result
def _init_raw_predictions(X, estimator, loss, use_predict_proba):
"""Return the initial raw predictions.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
The data array.
estimator : object
The estimator to use to compute the predictions.
loss : BaseLoss
An instance of a loss function class.
use_predict_proba : bool
Whether estimator.predict_proba is used instead of estimator.predict.
Returns
-------
raw_predictions : ndarray of shape (n_samples, K)
The initial raw predictions. K is equal to 1 for binary
classification and regression, and equal to the number of classes
for multiclass classification. ``raw_predictions`` is casted
into float64.
"""
# TODO: Use loss.fit_intercept_only where appropriate instead of
# DummyRegressor which is the default given by the `init` parameter,
# see also _init_state.
if use_predict_proba:
# Our parameter validation, set via _fit_context and _parameter_constraints
# already guarantees that estimator has a predict_proba method.
predictions = estimator.predict_proba(X)
if not loss.is_multiclass:
predictions = predictions[:, 1] # probability of positive class
eps = np.finfo(np.float64).eps
predictions = np.clip(predictions, eps, 1 - eps, dtype=np.float64)
else:
predictions = estimator.predict(X).astype(np.float64)
if predictions.ndim == 1:
return loss.link.link(predictions).reshape(-1, 1)
else:
return loss.link.link(predictions)
def _update_terminal_regions(
loss,
tree,
X,
y,
neg_gradient,
raw_prediction,
sample_weight,
sample_mask,
learning_rate=0.1,
k=0,
):
"""Update the leaf values to be predicted by the tree and raw_prediction.
The current raw predictions of the model (of this stage) are updated.
Additionally, the terminal regions (=leaves) of the given tree are updated as well.
This corresponds to the line search step in "Greedy Function Approximation" by
Friedman, Algorithm 1 step 5.
Update equals:
argmin_{x} loss(y_true, raw_prediction_old + x * tree.value)
For non-trivial cases like the Binomial loss, the update has no closed formula and
is an approximation, again, see the Friedman paper.
Also note that the update formula for the SquaredError is the identity. Therefore,
in this case, the leaf values don't need an update and only the raw_predictions are
updated (with the learning rate included).
Parameters
----------
loss : BaseLoss
tree : tree.Tree
The tree object.
X : ndarray of shape (n_samples, n_features)
The data array.
y : ndarray of shape (n_samples,)
The target labels.
neg_gradient : ndarray of shape (n_samples,)
The negative gradient.
raw_prediction : ndarray of shape (n_samples, n_trees_per_iteration)
The raw predictions (i.e. values from the tree leaves) of the
tree ensemble at iteration ``i - 1``.
sample_weight : ndarray of shape (n_samples,)
The weight of each sample.
sample_mask : ndarray of shape (n_samples,)
The sample mask to be used.
learning_rate : float, default=0.1
Learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default=0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
if not isinstance(loss, HalfSquaredError):
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
if isinstance(loss, HalfBinomialLoss):
def compute_update(y_, indices, neg_gradient, raw_prediction, k):
# Make a single Newton-Raphson step, see "Additive Logistic Regression:
# A Statistical View of Boosting" FHT00 and note that we use a slightly
# different version (factor 2) of "F" with proba=expit(raw_prediction).
# Our node estimate is given by:
# sum(w * (y - prob)) / sum(w * prob * (1 - prob))
# we take advantage that: y - prob = neg_gradient
neg_g = neg_gradient.take(indices, axis=0)
prob = y_ - neg_g
# numerator = negative gradient = y - prob
numerator = np.average(neg_g, weights=sw)
# denominator = hessian = prob * (1 - prob)
denominator = np.average(prob * (1 - prob), weights=sw)
return _safe_divide(numerator, denominator)
elif isinstance(loss, HalfMultinomialLoss):
def compute_update(y_, indices, neg_gradient, raw_prediction, k):
# we take advantage that: y - prob = neg_gradient
neg_g = neg_gradient.take(indices, axis=0)
prob = y_ - neg_g
K = loss.n_classes
# numerator = negative gradient * (k - 1) / k
# Note: The factor (k - 1)/k appears in the original papers "Greedy
# Function Approximation" by Friedman and "Additive Logistic
# Regression" by Friedman, Hastie, Tibshirani. This factor is, however,
# wrong or at least arbitrary as it directly multiplies the
# learning_rate. We keep it for backward compatibility.
numerator = np.average(neg_g, weights=sw)
numerator *= (K - 1) / K
# denominator = (diagonal) hessian = prob * (1 - prob)
denominator = np.average(prob * (1 - prob), weights=sw)
return _safe_divide(numerator, denominator)
elif isinstance(loss, ExponentialLoss):
def compute_update(y_, indices, neg_gradient, raw_prediction, k):
neg_g = neg_gradient.take(indices, axis=0)
# numerator = negative gradient = y * exp(-raw) - (1-y) * exp(raw)
numerator = np.average(neg_g, weights=sw)
# denominator = hessian = y * exp(-raw) + (1-y) * exp(raw)
# if y=0: hessian = exp(raw) = -neg_g
# y=1: hessian = exp(-raw) = neg_g
hessian = neg_g.copy()
hessian[y_ == 0] *= -1
denominator = np.average(hessian, weights=sw)
return _safe_divide(numerator, denominator)
else:
def compute_update(y_, indices, neg_gradient, raw_prediction, k):
return loss.fit_intercept_only(
y_true=y_ - raw_prediction[indices, k],
sample_weight=sw,
)
# update each leaf (= perform line search)
for leaf in np.nonzero(tree.children_left == TREE_LEAF)[0]:
indices = np.nonzero(masked_terminal_regions == leaf)[
0
] # of terminal regions
y_ = y.take(indices, axis=0)
sw = None if sample_weight is None else sample_weight[indices]
update = compute_update(y_, indices, neg_gradient, raw_prediction, k)
# TODO: Multiply here by learning rate instead of everywhere else.
tree.value[leaf, 0, 0] = update
# update predictions (both in-bag and out-of-bag)
raw_prediction[:, k] += learning_rate * tree.value[:, 0, 0].take(
terminal_regions, axis=0
)
def set_huber_delta(loss, y_true, raw_prediction, sample_weight=None):
"""Calculate and set self.closs.delta based on self.quantile."""
abserr = np.abs(y_true - raw_prediction.squeeze())
# sample_weight is always an ndarray, never None.
delta = _weighted_percentile(abserr, sample_weight, 100 * loss.quantile)
loss.closs.delta = float(delta)
class VerboseReporter:
"""Reports verbose output to stdout.
Parameters
----------
verbose : int
Verbosity level. If ``verbose==1`` output is printed once in a while
(when iteration mod verbose_mod is zero).; if larger than 1 then output
is printed for each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
"""Initialize reporter
Parameters
----------
est : Estimator
The estimator
begin_at_stage : int, default=0
stage at which to begin reporting
"""
# header fields and line format str
header_fields = ["Iter", "Train Loss"]
verbose_fmt = ["{iter:>10d}", "{train_score:>16.4f}"]
# do oob?
if est.subsample < 1:
header_fields.append("OOB Improve")
verbose_fmt.append("{oob_impr:>16.4f}")
header_fields.append("Remaining Time")
verbose_fmt.append("{remaining_time:>16s}")
# print the header line
print(("%10s " + "%16s " * (len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = " ".join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration.
Parameters
----------
j : int
The new iteration.
est : Estimator
The estimator.
"""
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = (
(est.n_estimators - (j + 1)) * (time() - self.start_time) / float(i + 1)
)
if remaining_time > 60:
remaining_time = "{0:.2f}m".format(remaining_time / 60.0)
else:
remaining_time = "{0:.2f}s".format(remaining_time)
print(
self.verbose_fmt.format(
iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time,
)
)
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(BaseEnsemble, metaclass=ABCMeta):
"""Abstract base class for Gradient Boosting."""
_parameter_constraints: dict = {
**DecisionTreeRegressor._parameter_constraints,
"learning_rate": [Interval(Real, 0.0, None, closed="left")],
"n_estimators": [Interval(Integral, 1, None, closed="left")],
"criterion": [StrOptions({"friedman_mse", "squared_error"})],
"subsample": [Interval(Real, 0.0, 1.0, closed="right")],
"verbose": ["verbose"],
"warm_start": ["boolean"],
"validation_fraction": [Interval(Real, 0.0, 1.0, closed="neither")],
"n_iter_no_change": [Interval(Integral, 1, None, closed="left"), None],
"tol": [Interval(Real, 0.0, None, closed="left")],
}
_parameter_constraints.pop("splitter")
_parameter_constraints.pop("monotonic_cst")
@abstractmethod
def __init__(
self,
*,
loss,
learning_rate,
n_estimators,
criterion,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_depth,
min_impurity_decrease,
init,
subsample,
max_features,
ccp_alpha,
random_state,
alpha=0.9,
verbose=0,
max_leaf_nodes=None,
warm_start=False,
validation_fraction=0.1,
n_iter_no_change=None,
tol=1e-4,
):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.criterion = criterion
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.min_impurity_decrease = min_impurity_decrease
self.ccp_alpha = ccp_alpha
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.validation_fraction = validation_fraction
self.n_iter_no_change = n_iter_no_change
self.tol = tol
@abstractmethod
def _encode_y(self, y=None, sample_weight=None):
"""Called by fit to validate and encode y."""
@abstractmethod
def _get_loss(self, sample_weight):
"""Get loss object from sklearn._loss.loss."""
def _fit_stage(
self,
i,
X,
y,
raw_predictions,
sample_weight,
sample_mask,
random_state,
X_csc=None,
X_csr=None,
):
"""Fit another stage of ``n_trees_per_iteration_`` trees."""
original_y = y
if isinstance(self._loss, HuberLoss):
set_huber_delta(
loss=self._loss,
y_true=y,
raw_prediction=raw_predictions,
sample_weight=sample_weight,
)
# TODO: Without oob, i.e. with self.subsample = 1.0, we could call
# self._loss.loss_gradient and use it to set train_score_.
# But note that train_score_[i] is the score AFTER fitting the i-th tree.
# Note: We need the negative gradient!
neg_gradient = -self._loss.gradient(
y_true=y,
raw_prediction=raw_predictions,
sample_weight=None, # We pass sample_weights to the tree directly.
)
# 2-d views of shape (n_samples, n_trees_per_iteration_) or (n_samples, 1)
# on neg_gradient to simplify the loop over n_trees_per_iteration_.
if neg_gradient.ndim == 1:
neg_g_view = neg_gradient.reshape((-1, 1))
else:
neg_g_view = neg_gradient
for k in range(self.n_trees_per_iteration_):
if self._loss.is_multiclass:
y = np.array(original_y == k, dtype=np.float64)
# induce regression tree on the negative gradient
tree = DecisionTreeRegressor(
criterion=self.criterion,
splitter="best",
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_decrease=self.min_impurity_decrease,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state,
ccp_alpha=self.ccp_alpha,
)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
X = X_csc if X_csc is not None else X
tree.fit(
X, neg_g_view[:, k], sample_weight=sample_weight, check_input=False
)
# update tree leaves
X_for_tree_update = X_csr if X_csr is not None else X
_update_terminal_regions(
self._loss,
tree.tree_,
X_for_tree_update,
y,
neg_g_view[:, k],
raw_predictions,
sample_weight,
sample_mask,
learning_rate=self.learning_rate,
k=k,
)
# add tree to ensemble
self.estimators_[i, k] = tree
return raw_predictions
def _set_max_features(self):
"""Set self.max_features_."""
if isinstance(self.max_features, str):
if self.max_features == "auto":
if is_classifier(self):
max_features = max(1, int(np.sqrt(self.n_features_in_)))
else:
max_features = self.n_features_in_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_in_)))
else: # self.max_features == "log2"
max_features = max(1, int(np.log2(self.n_features_in_)))
elif self.max_features is None:
max_features = self.n_features_in_
elif isinstance(self.max_features, Integral):
max_features = self.max_features
else: # float
max_features = max(1, int(self.max_features * self.n_features_in_))
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures."""
self.init_ = self.init
if self.init_ is None:
if is_classifier(self):
self.init_ = DummyClassifier(strategy="prior")
elif isinstance(self._loss, (AbsoluteError, HuberLoss)):
self.init_ = DummyRegressor(strategy="quantile", quantile=0.5)
elif isinstance(self._loss, PinballLoss):
self.init_ = DummyRegressor(strategy="quantile", quantile=self.alpha)
else:
self.init_ = DummyRegressor(strategy="mean")
self.estimators_ = np.empty(
(self.n_estimators, self.n_trees_per_iteration_), dtype=object
)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators), dtype=np.float64)
self.oob_scores_ = np.zeros((self.n_estimators), dtype=np.float64)
self.oob_score_ = np.nan
def _clear_state(self):
"""Clear the state of the gradient boosting model."""
if hasattr(self, "estimators_"):
self.estimators_ = np.empty((0, 0), dtype=object)
if hasattr(self, "train_score_"):
del self.train_score_
if hasattr(self, "oob_improvement_"):
del self.oob_improvement_
if hasattr(self, "oob_scores_"):
del self.oob_scores_
if hasattr(self, "oob_score_"):
del self.oob_score_
if hasattr(self, "init_"):
del self.init_
if hasattr(self, "_rng"):
del self._rng
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes."""
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError(
"resize with smaller n_estimators %d < %d"
% (total_n_estimators, self.estimators_[0])
)
self.estimators_ = np.resize(
self.estimators_, (total_n_estimators, self.n_trees_per_iteration_)
)
self.train_score_ = np.resize(self.train_score_, total_n_estimators)
if self.subsample < 1 or hasattr(self, "oob_improvement_"):
# if do oob resize arrays or create new if not available
if hasattr(self, "oob_improvement_"):
self.oob_improvement_ = np.resize(
self.oob_improvement_, total_n_estimators
)
self.oob_scores_ = np.resize(self.oob_scores_, total_n_estimators)
self.oob_score_ = np.nan
else:
self.oob_improvement_ = np.zeros(
(total_n_estimators,), dtype=np.float64
)
self.oob_scores_ = np.zeros((total_n_estimators,), dtype=np.float64)
self.oob_score_ = np.nan
def _is_fitted(self):
return len(getattr(self, "estimators_", [])) > 0
def _check_initialized(self):
"""Check that the estimator is initialized, raising an error if not."""
check_is_fitted(self)
@_fit_context(
# GradientBoosting*.init is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
y : array-like of shape (n_samples,)
Target values (strings or integers in classification, real numbers
in regression)
For classification, labels must correspond to classes.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, default=None
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshotting.
Returns
-------
self : object
Fitted estimator.
"""
if not self.warm_start:
self._clear_state()
# Check input
# Since check_array converts both X and y to the same dtype, but the
# trees use different types for X and y, checking them separately.
X, y = validate_data(
self,
X,
y,
accept_sparse=["csr", "csc", "coo"],
dtype=DTYPE,
multi_output=True,
)
sample_weight_is_none = sample_weight is None
sample_weight = _check_sample_weight(sample_weight, X)
if sample_weight_is_none:
y = self._encode_y(y=y, sample_weight=None)
else:
y = self._encode_y(y=y, sample_weight=sample_weight)
y = column_or_1d(y, warn=True) # TODO: Is this still required?
self._set_max_features()
# self.loss is guaranteed to be a string
self._loss = self._get_loss(sample_weight=sample_weight)
if self.n_iter_no_change is not None:
stratify = y if is_classifier(self) else None
(
X_train,
X_val,
y_train,
y_val,
sample_weight_train,
sample_weight_val,
) = train_test_split(
X,
y,
sample_weight,
random_state=self.random_state,
test_size=self.validation_fraction,
stratify=stratify,
)
if is_classifier(self):
if self.n_classes_ != np.unique(y_train).shape[0]:
# We choose to error here. The problem is that the init
# estimator would be trained on y, which has some missing
# classes now, so its predictions would not have the
# correct shape.
raise ValueError(
"The training data after the early stopping split "
"is missing some classes. Try using another random "
"seed."
)
else:
X_train, y_train, sample_weight_train = X, y, sample_weight
X_val = y_val = sample_weight_val = None
n_samples = X_train.shape[0]
# First time calling fit.
if not self._is_fitted():
# init state
self._init_state()
# fit initial model and initialize raw predictions
if self.init_ == "zero":
raw_predictions = np.zeros(
shape=(n_samples, self.n_trees_per_iteration_),
dtype=np.float64,
)
else:
# XXX clean this once we have a support_sample_weight tag
if sample_weight_is_none:
self.init_.fit(X_train, y_train)
else:
msg = (
"The initial estimator {} does not support sample "
"weights.".format(self.init_.__class__.__name__)
)
try:
self.init_.fit(
X_train, y_train, sample_weight=sample_weight_train
)
except TypeError as e:
if "unexpected keyword argument 'sample_weight'" in str(e):
# regular estimator without SW support
raise ValueError(msg) from e
else: # regular estimator whose input checking failed
raise
except ValueError as e:
if (
"pass parameters to specific steps of "
"your pipeline using the "
"stepname__parameter" in str(e)
): # pipeline
raise ValueError(msg) from e
else: # regular estimator whose input checking failed
raise
raw_predictions = _init_raw_predictions(
X_train, self.init_, self._loss, is_classifier(self)
)
begin_at_stage = 0
# The rng state must be preserved if warm_start is True
self._rng = check_random_state(self.random_state)
# warm start: this is not the first time fit was called
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError(
"n_estimators=%d must be larger or equal to "
"estimators_.shape[0]=%d when "
"warm_start==True" % (self.n_estimators, self.estimators_.shape[0])
)
begin_at_stage = self.estimators_.shape[0]
# The requirements of _raw_predict
# are more constrained than fit. It accepts only CSR
# matrices. Finite values have already been checked in _validate_data.
X_train = check_array(
X_train,
dtype=DTYPE,
order="C",
accept_sparse="csr",
ensure_all_finite=False,
)
raw_predictions = self._raw_predict(X_train)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(
X_train,
y_train,
raw_predictions,
sample_weight_train,
self._rng,
X_val,
y_val,
sample_weight_val,
begin_at_stage,
monitor,
)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, "oob_improvement_"):
# OOB scores were computed
self.oob_improvement_ = self.oob_improvement_[:n_stages]
self.oob_scores_ = self.oob_scores_[:n_stages]
self.oob_score_ = self.oob_scores_[-1]
self.n_estimators_ = n_stages
return self
def _fit_stages(
self,
X,
y,
raw_predictions,
sample_weight,
random_state,
X_val,
y_val,
sample_weight_val,
begin_at_stage=0,
monitor=None,
):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples,), dtype=bool)
n_inbag = max(1, int(self.subsample * n_samples))
if self.verbose:
verbose_reporter = VerboseReporter(verbose=self.verbose)
verbose_reporter.init(self, begin_at_stage)
X_csc = csc_matrix(X) if issparse(X) else None
X_csr = csr_matrix(X) if issparse(X) else None
if self.n_iter_no_change is not None:
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_weight_boosting.py | sklearn/ensemble/_weight_boosting.py | """Weight Boosting.
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The `BaseWeightBoosting` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- :class:`~sklearn.ensemble.AdaBoostClassifier` implements adaptive boosting
(AdaBoost-SAMME) for classification problems.
- :class:`~sklearn.ensemble.AdaBoostRegressor` implements adaptive boosting
(AdaBoost.R2) for regression problems.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from abc import ABCMeta, abstractmethod
from numbers import Integral, Real
import numpy as np
from sklearn.base import (
ClassifierMixin,
RegressorMixin,
_fit_context,
is_classifier,
is_regressor,
)
from sklearn.ensemble._base import BaseEnsemble
from sklearn.metrics import accuracy_score, r2_score
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import _safe_indexing, check_random_state
from sklearn.utils._param_validation import HasMethods, Interval, StrOptions
from sklearn.utils.extmath import softmax
from sklearn.utils.metadata_routing import (
_raise_for_unsupported_routing,
_RoutingNotSupportedMixin,
)
from sklearn.utils.validation import (
_check_sample_weight,
_num_samples,
check_is_fitted,
has_fit_parameter,
validate_data,
)
__all__ = [
"AdaBoostClassifier",
"AdaBoostRegressor",
]
class BaseWeightBoosting(BaseEnsemble, metaclass=ABCMeta):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
_parameter_constraints: dict = {
"estimator": [HasMethods(["fit", "predict"]), None],
"n_estimators": [Interval(Integral, 1, None, closed="left")],
"learning_rate": [Interval(Real, 0, None, closed="neither")],
"random_state": ["random_state"],
}
@abstractmethod
def __init__(
self,
estimator=None,
*,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.0,
random_state=None,
):
super().__init__(
estimator=estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
)
self.learning_rate = learning_rate
self.random_state = random_state
def _check_X(self, X):
# Only called to validate X in non-fit methods, therefore reset=False
return validate_data(
self,
X,
accept_sparse=["csr", "csc"],
ensure_2d=True,
allow_nd=True,
dtype=None,
reset=False,
)
@_fit_context(
# AdaBoost*.estimator is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
The target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Fitted estimator.
"""
_raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight)
X, y = validate_data(
self,
X,
y,
accept_sparse=["csr", "csc"],
ensure_2d=True,
allow_nd=True,
dtype=None,
y_numeric=is_regressor(self),
)
sample_weight = _check_sample_weight(
sample_weight, X, dtype=np.float64, copy=True, ensure_non_negative=True
)
sample_weight /= sample_weight.sum()
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
# Initialization of the random number instance that will be used to
# generate a seed at each iteration
random_state = check_random_state(self.random_state)
epsilon = np.finfo(sample_weight.dtype).eps
zero_weight_mask = sample_weight == 0.0
for iboost in range(self.n_estimators):
# avoid extremely small sample weight, for details see issue #20320
sample_weight = np.clip(sample_weight, a_min=epsilon, a_max=None)
# do not clip sample weights that were exactly zero originally
sample_weight[zero_weight_mask] = 0.0
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost, X, y, sample_weight, random_state
)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
if not np.isfinite(sample_weight_sum):
warnings.warn(
(
"Sample weights have reached infinite values,"
f" at iteration {iboost}, causing overflow. "
"Iterations stopped. Try lowering the learning rate."
),
stacklevel=2,
)
break
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost.
Warning: This method needs to be overridden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
The target values (class labels).
sample_weight : array-like of shape (n_samples,)
The current sample weights.
random_state : RandomState
The current random number generator
Returns
-------
sample_weight : array-like of shape (n_samples,) or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
Labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Yields
------
z : float
"""
X = self._check_X(X)
for y_pred in self.staged_predict(X):
if is_classifier(self):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
Returns
-------
feature_importances_ : ndarray of shape (n_features,)
The feature importances.
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError(
"Estimator not fitted, call `fit` before `feature_importances_`."
)
try:
norm = self.estimator_weights_.sum()
return (
sum(
weight * clf.feature_importances_
for weight, clf in zip(self.estimator_weights_, self.estimators_)
)
/ norm
)
except AttributeError as e:
raise AttributeError(
"Unable to compute feature importances "
"since estimator does not have a "
"feature_importances_ attribute"
) from e
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
class AdaBoostClassifier(
_RoutingNotSupportedMixin, ClassifierMixin, BaseWeightBoosting
):
"""An AdaBoost classifier.
An AdaBoost [1]_ classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm based on [2]_.
Read more in the :ref:`User Guide <adaboost>`.
.. versionadded:: 0.14
Parameters
----------
estimator : object, default=None
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper
``classes_`` and ``n_classes_`` attributes. If ``None``, then
the base estimator is :class:`~sklearn.tree.DecisionTreeClassifier`
initialized with `max_depth=1`.
.. versionadded:: 1.2
`base_estimator` was renamed to `estimator`.
n_estimators : int, default=50
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
Values must be in the range `[1, inf)`.
learning_rate : float, default=1.0
Weight applied to each classifier at each boosting iteration. A higher
learning rate increases the contribution of each classifier. There is
a trade-off between the `learning_rate` and `n_estimators` parameters.
Values must be in the range `(0.0, inf)`.
random_state : int, RandomState instance or None, default=None
Controls the random seed given at each `estimator` at each
boosting iteration.
Thus, it is only used when `estimator` exposes a `random_state`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
estimator_ : estimator
The base estimator from which the ensemble is grown.
.. versionadded:: 1.2
`base_estimator_` was renamed to `estimator_`.
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : ndarray of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : ndarray of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances if supported by the
``estimator`` (when based on decision trees).
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdaBoostRegressor : An AdaBoost regressor that begins by fitting a
regressor on the original dataset and then fits additional copies of
the regressor on the same dataset but where the weights of instances
are adjusted according to the error of the current prediction.
GradientBoostingClassifier : GB builds an additive model in a forward
stage-wise fashion. Regression trees are fit on the negative gradient
of the binomial or multinomial deviance loss function. Binary
classification is a special case where only a single regression tree is
induced.
sklearn.tree.DecisionTreeClassifier : A non-parametric supervised learning
method used for classification.
Creates a model that predicts the value of a target variable by
learning simple decision rules inferred from the data features.
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] :doi:`J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class adaboost."
Statistics and its Interface 2.3 (2009): 349-360.
<10.4310/SII.2009.v2.n3.a8>`
Examples
--------
>>> from sklearn.ensemble import AdaBoostClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=1000, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = AdaBoostClassifier(n_estimators=100, random_state=0)
>>> clf.fit(X, y)
AdaBoostClassifier(n_estimators=100, random_state=0)
>>> clf.predict([[0, 0, 0, 0]])
array([1])
>>> clf.score(X, y)
0.96
For a detailed example of using AdaBoost to fit a sequence of DecisionTrees
as weaklearners, please refer to
:ref:`sphx_glr_auto_examples_ensemble_plot_adaboost_multiclass.py`.
For a detailed example of using AdaBoost to fit a non-linearly separable
classification dataset composed of two Gaussian quantiles clusters, please
refer to :ref:`sphx_glr_auto_examples_ensemble_plot_adaboost_twoclass.py`.
"""
def __init__(
self,
estimator=None,
*,
n_estimators=50,
learning_rate=1.0,
random_state=None,
):
super().__init__(
estimator=estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state,
)
def _validate_estimator(self):
"""Check the estimator and set the estimator_ attribute."""
super()._validate_estimator(default=DecisionTreeClassifier(max_depth=1))
if not has_fit_parameter(self.estimator_, "sample_weight"):
raise ValueError(
f"{self.estimator.__class__.__name__} doesn't support sample_weight."
)
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost.
Perform a single boost according to the discrete SAMME algorithm and return the
updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values (class labels).
sample_weight : array-like of shape (n_samples,)
The current sample weights.
random_state : RandomState instance
The RandomState instance used if the base estimator accepts a
`random_state` attribute.
Returns
-------
sample_weight : array-like of shape (n_samples,) or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator(random_state=random_state)
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, "classes_", None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1.0, 0.0
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1.0 - (1.0 / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError(
"BaseClassifier in AdaBoostClassifier "
"ensemble is worse than random, ensemble "
"can not be fit."
)
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1.0 - estimator_error) / estimator_error) + np.log(n_classes - 1.0)
)
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight = np.exp(
np.log(sample_weight)
+ estimator_weight * incorrect * (sample_weight > 0)
)
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted classes.
"""
X = self._check_X(X)
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
score : ndarray of shape of (n_samples, k)
The decision function of the input samples. The order of
outputs is the same as that of the :term:`classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self)
X = self._check_X(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
if n_classes == 1:
return np.zeros_like(X, shape=(X.shape[0], 1))
pred = sum(
np.where(
(estimator.predict(X) == classes).T,
w,
-1 / (n_classes - 1) * w,
)
for estimator, w in zip(self.estimators_, self.estimator_weights_)
)
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
------
score : generator of ndarray of shape (n_samples, k)
The decision function of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self)
X = self._check_X(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.0
for weight, estimator in zip(self.estimator_weights_, self.estimators_):
norm += weight
current_pred = np.where(
(estimator.predict(X) == classes).T,
weight,
-1 / (n_classes - 1) * weight,
)
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
@staticmethod
def _compute_proba_from_decision(decision, n_classes):
"""Compute probabilities from the decision function.
This is based eq. (15) of [1] where:
p(y=c|X) = exp((1 / K-1) f_c(X)) / sum_k(exp((1 / K-1) f_k(X)))
= softmax((1 / K-1) * f(X))
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost",
2009.
"""
if n_classes == 2:
decision = np.vstack([-decision, decision]).T / 2
else:
decision /= n_classes - 1
return softmax(decision, copy=False)
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
"""
check_is_fitted(self)
n_classes = self.n_classes_
if n_classes == 1:
return np.ones((_num_samples(X), 1))
decision = self.decision_function(X)
return self._compute_proba_from_decision(decision, n_classes)
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
------
p : generator of ndarray of shape (n_samples,)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
"""
n_classes = self.n_classes_
for decision in self.staged_decision_function(X):
yield self._compute_proba_from_decision(decision, n_classes)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(_RoutingNotSupportedMixin, RegressorMixin, BaseWeightBoosting):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
.. versionadded:: 0.14
Parameters
----------
estimator : object, default=None
The base estimator from which the boosted ensemble is built.
If ``None``, then the base estimator is
:class:`~sklearn.tree.DecisionTreeRegressor` initialized with
`max_depth=3`.
.. versionadded:: 1.2
`base_estimator` was renamed to `estimator`.
n_estimators : int, default=50
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
Values must be in the range `[1, inf)`.
learning_rate : float, default=1.0
Weight applied to each regressor at each boosting iteration. A higher
learning rate increases the contribution of each regressor. There is
a trade-off between the `learning_rate` and `n_estimators` parameters.
Values must be in the range `(0.0, inf)`.
loss : {'linear', 'square', 'exponential'}, default='linear'
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, default=None
Controls the random seed given at each `estimator` at each
boosting iteration.
Thus, it is only used when `estimator` exposes a `random_state`.
In addition, it controls the bootstrap of the weights used to train the
`estimator` at each boosting iteration.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
estimator_ : estimator
The base estimator from which the ensemble is grown.
.. versionadded:: 1.2
`base_estimator_` was renamed to `estimator_`.
estimators_ : list of regressors
The collection of fitted sub-estimators.
estimator_weights_ : ndarray of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : ndarray of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances if supported by the
``estimator`` (when based on decision trees).
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdaBoostClassifier : An AdaBoost classifier.
GradientBoostingRegressor : Gradient Boosting Classification Tree.
sklearn.tree.DecisionTreeRegressor : A decision tree regressor.
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
Examples
--------
>>> from sklearn.ensemble import AdaBoostRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=4, n_informative=2,
... random_state=0, shuffle=False)
>>> regr = AdaBoostRegressor(random_state=0, n_estimators=100)
>>> regr.fit(X, y)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_voting.py | sklearn/ensemble/_voting.py | """
Soft Voting/Majority Rule classifier and Voting regressor.
This module contains:
- A Soft Voting/Majority Rule classifier for classification estimators.
- A Voting regressor for regression estimators.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from abc import abstractmethod
from numbers import Integral
import numpy as np
from sklearn.base import (
ClassifierMixin,
RegressorMixin,
TransformerMixin,
_fit_context,
clone,
)
from sklearn.ensemble._base import _BaseHeterogeneousEnsemble, _fit_single_estimator
from sklearn.exceptions import NotFittedError
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import Bunch
from sklearn.utils._param_validation import StrOptions
from sklearn.utils._repr_html.estimator import _VisualBlock
from sklearn.utils.metadata_routing import (
MetadataRouter,
MethodMapping,
_raise_for_params,
_routing_enabled,
process_routing,
)
from sklearn.utils.metaestimators import available_if
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import (
_check_feature_names_in,
check_is_fitted,
column_or_1d,
)
class _BaseVoting(TransformerMixin, _BaseHeterogeneousEnsemble):
"""Base class for voting.
Warning: This class should not be used directly. Use derived classes
instead.
"""
_parameter_constraints: dict = {
"estimators": [list],
"weights": ["array-like", None],
"n_jobs": [None, Integral],
"verbose": ["verbose"],
}
def _log_message(self, name, idx, total):
if not self.verbose:
return None
return f"({idx} of {total}) Processing {name}"
@property
def _weights_not_none(self):
"""Get the weights of not `None` estimators."""
if self.weights is None:
return None
return [w for est, w in zip(self.estimators, self.weights) if est[1] != "drop"]
def _predict(self, X):
"""Collect results from clf.predict calls."""
return np.asarray([est.predict(X) for est in self.estimators_]).T
@abstractmethod
def fit(self, X, y, **fit_params):
"""Get common fit operations."""
names, clfs = self._validate_estimators()
if self.weights is not None and len(self.weights) != len(self.estimators):
raise ValueError(
"Number of `estimators` and weights must be equal; got"
f" {len(self.weights)} weights, {len(self.estimators)} estimators"
)
if _routing_enabled():
routed_params = process_routing(self, "fit", **fit_params)
else:
routed_params = Bunch()
for name in names:
routed_params[name] = Bunch(fit={})
if "sample_weight" in fit_params:
routed_params[name].fit["sample_weight"] = fit_params[
"sample_weight"
]
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_single_estimator)(
clone(clf),
X,
y,
fit_params=routed_params[name]["fit"],
message_clsname="Voting",
message=self._log_message(name, idx + 1, len(clfs)),
)
for idx, (name, clf) in enumerate(zip(names, clfs))
if clf != "drop"
)
self.named_estimators_ = Bunch()
# Uses 'drop' as placeholder for dropped estimators
est_iter = iter(self.estimators_)
for name, est in self.estimators:
current_est = est if est == "drop" else next(est_iter)
self.named_estimators_[name] = current_est
if hasattr(current_est, "feature_names_in_"):
self.feature_names_in_ = current_est.feature_names_in_
return self
def fit_transform(self, X, y=None, **fit_params):
"""Return class labels or probabilities for each estimator.
Return predictions for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix, dataframe} of shape \
(n_samples, n_features)
Input samples.
y : ndarray of shape (n_samples,), default=None
Target values (None for unsupervised transformations).
**fit_params : dict
Additional fit parameters.
Returns
-------
X_new : ndarray array of shape (n_samples, n_features_new)
Transformed array.
"""
return super().fit_transform(X, y, **fit_params)
@property
def n_features_in_(self):
"""Number of features seen during :term:`fit`."""
# For consistency with other estimators we raise an AttributeError so
# that hasattr() fails if the estimator isn't fitted.
try:
check_is_fitted(self)
except NotFittedError as nfe:
raise AttributeError(
"{} object has no n_features_in_ attribute.".format(
self.__class__.__name__
)
) from nfe
return self.estimators_[0].n_features_in_
def _sk_visual_block_(self):
names, estimators = zip(*self.estimators)
return _VisualBlock("parallel", estimators, names=names)
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.5
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self)
# `self.estimators` is a list of (name, est) tuples
for name, estimator in self.estimators:
router.add(
**{name: estimator},
method_mapping=MethodMapping().add(callee="fit", caller="fit"),
)
return router
class VotingClassifier(ClassifierMixin, _BaseVoting):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
.. versionadded:: 0.17
Parameters
----------
estimators : list of (str, estimator) tuples
Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones
of those original estimators that will be stored in the class attribute
``self.estimators_``. An estimator can be set to ``'drop'`` using
:meth:`set_params`.
.. versionchanged:: 0.21
``'drop'`` is accepted. Using None was deprecated in 0.22 and
support was removed in 0.24.
voting : {'hard', 'soft'}, default='hard'
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like of shape (n_classifiers,), default=None
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
n_jobs : int, default=None
The number of jobs to run in parallel for ``fit``.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.18
flatten_transform : bool, default=True
Affects shape of transform output only when voting='soft'
If voting='soft' and flatten_transform=True, transform method returns
matrix with shape (n_samples, n_classifiers * n_classes). If
flatten_transform=False, it returns
(n_classifiers, n_samples, n_classes).
verbose : bool, default=False
If True, the time elapsed while fitting will be printed as it
is completed.
.. versionadded:: 0.23
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators as defined in ``estimators``
that are not 'drop'.
named_estimators_ : :class:`~sklearn.utils.Bunch`
Attribute to access any fitted sub-estimators by name.
.. versionadded:: 0.20
le_ : :class:`~sklearn.preprocessing.LabelEncoder`
Transformer used to encode the labels during fit and decode during
prediction.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying classifier exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimators expose such an attribute when fit.
.. versionadded:: 1.0
See Also
--------
VotingRegressor : Prediction voting regressor.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier, VotingClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(n_estimators=50, random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> np.array_equal(eclf1.named_estimators_.lr.predict(X),
... eclf1.named_estimators_['lr'].predict(X))
True
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
To drop an estimator, :meth:`set_params` can be used to remove it. Here we
dropped one of the estimators, resulting in 2 fitted estimators:
>>> eclf2 = eclf2.set_params(lr='drop')
>>> eclf2 = eclf2.fit(X, y)
>>> len(eclf2.estimators_)
2
Setting `flatten_transform=True` with `voting='soft'` flattens output shape of
`transform`:
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1],
... flatten_transform=True)
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>> print(eclf3.transform(X).shape)
(6, 6)
"""
_parameter_constraints: dict = {
**_BaseVoting._parameter_constraints,
"voting": [StrOptions({"hard", "soft"})],
"flatten_transform": ["boolean"],
}
def __init__(
self,
estimators,
*,
voting="hard",
weights=None,
n_jobs=None,
flatten_transform=True,
verbose=False,
):
super().__init__(estimators=estimators)
self.voting = voting
self.weights = weights
self.n_jobs = n_jobs
self.flatten_transform = flatten_transform
self.verbose = verbose
@_fit_context(
# estimators in VotingClassifier.estimators are not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y, **fit_params):
"""Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
**fit_params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.5
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns the instance itself.
"""
_raise_for_params(fit_params, self, "fit", allow=["sample_weight"])
y_type = type_of_target(y, input_name="y")
if y_type in ("unknown", "continuous"):
# raise a specific ValueError for non-classification tasks
raise ValueError(
f"Unknown label type: {y_type}. Maybe you are trying to fit a "
"classifier, which expects discrete classes on a "
"regression target with continuous values."
)
elif y_type not in ("binary", "multiclass"):
# raise a NotImplementedError for backward compatibility for non-supported
# classification tasks
raise NotImplementedError(
f"{self.__class__.__name__} only supports binary or multiclass "
"classification. Multilabel and multi-output classification are not "
"supported."
)
self.le_ = LabelEncoder().fit(y)
self.classes_ = self.le_.classes_
transformed_y = self.le_.transform(y)
return super().fit(X, transformed_y, **fit_params)
def predict(self, X):
"""Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
maj : array-like of shape (n_samples,)
Predicted class labels.
"""
check_is_fitted(self)
if self.voting == "soft":
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(
lambda x: np.argmax(np.bincount(x, weights=self._weights_not_none)),
axis=1,
arr=predictions,
)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls."""
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _check_voting(self):
if self.voting == "hard":
raise AttributeError(
f"predict_proba is not available when voting={self.voting!r}"
)
return True
@available_if(_check_voting)
def predict_proba(self, X):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
avg : array-like of shape (n_samples, n_classes)
Weighted average probability for each class per sample.
"""
check_is_fitted(self)
avg = np.average(
self._collect_probas(X), axis=0, weights=self._weights_not_none
)
return avg
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
probabilities_or_labels
If `voting='soft'` and `flatten_transform=True`:
returns ndarray of shape (n_samples, n_classifiers * n_classes),
being class probabilities calculated by each classifier.
If `voting='soft' and `flatten_transform=False`:
ndarray of shape (n_classifiers, n_samples, n_classes)
If `voting='hard'`:
ndarray of shape (n_samples, n_classifiers), being
class labels predicted by each classifier.
"""
check_is_fitted(self)
if self.voting == "soft":
probas = self._collect_probas(X)
if not self.flatten_transform:
return probas
return np.hstack(probas)
else:
return self._predict(X)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Not used, present here for API consistency by convention.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "n_features_in_")
if self.voting == "soft" and not self.flatten_transform:
raise ValueError(
"get_feature_names_out is not supported when `voting='soft'` and "
"`flatten_transform=False`"
)
_check_feature_names_in(self, input_features, generate_names=False)
class_name = self.__class__.__name__.lower()
active_names = [name for name, est in self.estimators if est != "drop"]
if self.voting == "hard":
return np.asarray(
[f"{class_name}_{name}" for name in active_names], dtype=object
)
# voting == "soft"
n_classes = len(self.classes_)
names_out = [
f"{class_name}_{name}{i}" for name in active_names for i in range(n_classes)
]
return np.asarray(names_out, dtype=object)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.transformer_tags.preserves_dtype = []
return tags
class VotingRegressor(RegressorMixin, _BaseVoting):
"""Prediction voting regressor for unfitted estimators.
A voting regressor is an ensemble meta-estimator that fits several base
regressors, each on the whole dataset. Then it averages the individual
predictions to form a final prediction.
For a detailed example, refer to
:ref:`sphx_glr_auto_examples_ensemble_plot_voting_regressor.py`.
Read more in the :ref:`User Guide <voting_regressor>`.
.. versionadded:: 0.21
Parameters
----------
estimators : list of (str, estimator) tuples
Invoking the ``fit`` method on the ``VotingRegressor`` will fit clones
of those original estimators that will be stored in the class attribute
``self.estimators_``. An estimator can be set to ``'drop'`` using
:meth:`set_params`.
.. versionchanged:: 0.21
``'drop'`` is accepted. Using None was deprecated in 0.22 and
support was removed in 0.24.
weights : array-like of shape (n_regressors,), default=None
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted values before averaging. Uses uniform weights if `None`.
n_jobs : int, default=None
The number of jobs to run in parallel for ``fit``.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : bool, default=False
If True, the time elapsed while fitting will be printed as it
is completed.
.. versionadded:: 0.23
Attributes
----------
estimators_ : list of regressors
The collection of fitted sub-estimators as defined in ``estimators``
that are not 'drop'.
named_estimators_ : :class:`~sklearn.utils.Bunch`
Attribute to access any fitted sub-estimators by name.
.. versionadded:: 0.20
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying regressor exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimators expose such an attribute when fit.
.. versionadded:: 1.0
See Also
--------
VotingClassifier : Soft Voting/Majority Rule classifier.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LinearRegression
>>> from sklearn.ensemble import RandomForestRegressor
>>> from sklearn.ensemble import VotingRegressor
>>> from sklearn.neighbors import KNeighborsRegressor
>>> r1 = LinearRegression()
>>> r2 = RandomForestRegressor(n_estimators=10, random_state=1)
>>> r3 = KNeighborsRegressor()
>>> X = np.array([[1, 1], [2, 4], [3, 9], [4, 16], [5, 25], [6, 36]])
>>> y = np.array([2, 6, 12, 20, 30, 42])
>>> er = VotingRegressor([('lr', r1), ('rf', r2), ('r3', r3)])
>>> print(er.fit(X, y).predict(X))
[ 6.8 8.4 12.5 17.8 26 34]
In the following example, we drop the `'lr'` estimator with
:meth:`~VotingRegressor.set_params` and fit the remaining two estimators:
>>> er = er.set_params(lr='drop')
>>> er = er.fit(X, y)
>>> len(er.estimators_)
2
"""
def __init__(self, estimators, *, weights=None, n_jobs=None, verbose=False):
super().__init__(estimators=estimators)
self.weights = weights
self.n_jobs = n_jobs
self.verbose = verbose
@_fit_context(
# estimators in VotingRegressor.estimators are not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y, **fit_params):
"""Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
**fit_params : dict
Parameters to pass to the underlying estimators.
.. versionadded:: 1.5
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Fitted estimator.
"""
_raise_for_params(fit_params, self, "fit", allow=["sample_weight"])
y = column_or_1d(y, warn=True)
return super().fit(X, y, **fit_params)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
return np.average(self._predict(X), axis=1, weights=self._weights_not_none)
def transform(self, X):
"""Return predictions for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
predictions : ndarray of shape (n_samples, n_classifiers)
Values predicted by each regressor.
"""
check_is_fitted(self)
return self._predict(X)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Not used, present here for API consistency by convention.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "n_features_in_")
_check_feature_names_in(self, input_features, generate_names=False)
class_name = self.__class__.__name__.lower()
return np.asarray(
[f"{class_name}_{name}" for name, est in self.estimators if est != "drop"],
dtype=object,
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_hist_gradient_boosting/grower.py | sklearn/ensemble/_hist_gradient_boosting/grower.py | """
This module contains the TreeGrower class.
TreeGrower builds a regression tree fitting a Newton-Raphson step, based on
the gradients and hessians of the training data.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numbers
from heapq import heappop, heappush
from timeit import default_timer as time
import numpy as np
from sklearn.ensemble._hist_gradient_boosting._bitset import (
set_raw_bitset_from_binned_bitset,
)
from sklearn.ensemble._hist_gradient_boosting.common import (
PREDICTOR_RECORD_DTYPE,
X_BITSET_INNER_DTYPE,
MonotonicConstraint,
)
from sklearn.ensemble._hist_gradient_boosting.histogram import HistogramBuilder
from sklearn.ensemble._hist_gradient_boosting.predictor import TreePredictor
from sklearn.ensemble._hist_gradient_boosting.splitting import Splitter
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
class TreeNode:
"""Tree Node class used in TreeGrower.
This isn't used for prediction purposes, only for training (see
TreePredictor).
Parameters
----------
depth : int
The depth of the node, i.e. its distance from the root.
sample_indices : ndarray of shape (n_samples_at_node,), dtype=np.uint32
The indices of the samples at the node.
partition_start : int
start position of the node's sample_indices in splitter.partition.
partition_stop : int
stop position of the node's sample_indices in splitter.partition.
sum_gradients : float
The sum of the gradients of the samples at the node.
sum_hessians : float
The sum of the hessians of the samples at the node.
Attributes
----------
depth : int
The depth of the node, i.e. its distance from the root.
sample_indices : ndarray of shape (n_samples_at_node,), dtype=np.uint32
The indices of the samples at the node.
sum_gradients : float
The sum of the gradients of the samples at the node.
sum_hessians : float
The sum of the hessians of the samples at the node.
split_info : SplitInfo or None
The result of the split evaluation.
is_leaf : bool
True if node is a leaf
left_child : TreeNode or None
The left child of the node. None for leaves.
right_child : TreeNode or None
The right child of the node. None for leaves.
value : float or None
The value of the leaf, as computed in finalize_leaf(). None for
non-leaf nodes.
partition_start : int
start position of the node's sample_indices in splitter.partition.
partition_stop : int
stop position of the node's sample_indices in splitter.partition.
allowed_features : None or ndarray, dtype=int
Indices of features allowed to split for children.
interaction_cst_indices : None or list of ints
Indices of the interaction sets that have to be applied on splits of
child nodes. The fewer sets the stronger the constraint as fewer sets
contain fewer features.
children_lower_bound : float
children_upper_bound : float
"""
def __init__(
self,
*,
depth,
sample_indices,
partition_start,
partition_stop,
sum_gradients,
sum_hessians,
value=None,
):
self.depth = depth
self.sample_indices = sample_indices
self.n_samples = sample_indices.shape[0]
self.sum_gradients = sum_gradients
self.sum_hessians = sum_hessians
self.value = value
self.is_leaf = False
self.allowed_features = None
self.interaction_cst_indices = None
self.set_children_bounds(float("-inf"), float("+inf"))
self.split_info = None
self.left_child = None
self.right_child = None
self.histograms = None
# start and stop indices of the node in the splitter.partition
# array. Concretely,
# self.sample_indices = view(self.splitter.partition[start:stop])
# Please see the comments about splitter.partition and
# splitter.split_indices for more info about this design.
# These 2 attributes are only used in _update_raw_prediction, because we
# need to iterate over the leaves and I don't know how to efficiently
# store the sample_indices views because they're all of different sizes.
self.partition_start = partition_start
self.partition_stop = partition_stop
def set_children_bounds(self, lower, upper):
"""Set children values bounds to respect monotonic constraints."""
# These are bounds for the node's *children* values, not the node's
# value. The bounds are used in the splitter when considering potential
# left and right child.
self.children_lower_bound = lower
self.children_upper_bound = upper
def __lt__(self, other_node):
"""Comparison for priority queue.
Nodes with high gain are higher priority than nodes with low gain.
heapq.heappush only need the '<' operator.
heapq.heappop take the smallest item first (smaller is higher
priority).
Parameters
----------
other_node : TreeNode
The node to compare with.
"""
return self.split_info.gain > other_node.split_info.gain
class TreeGrower:
"""Tree grower class used to build a tree.
The tree is fitted to predict the values of a Newton-Raphson step. The
splits are considered in a best-first fashion, and the quality of a
split is defined in splitting._split_gain.
Parameters
----------
X_binned : ndarray of shape (n_samples, n_features), dtype=np.uint8
The binned input samples. Must be Fortran-aligned.
gradients : ndarray of shape (n_samples,)
The gradients of each training sample. Those are the gradients of the
loss w.r.t the predictions, evaluated at iteration ``i - 1``.
hessians : ndarray of shape (n_samples,)
The hessians of each training sample. Those are the hessians of the
loss w.r.t the predictions, evaluated at iteration ``i - 1``.
max_leaf_nodes : int, default=None
The maximum number of leaves for each tree. If None, there is no
maximum limit.
max_depth : int, default=None
The maximum depth of each tree. The depth of a tree is the number of
edges to go from the root to the deepest leaf.
Depth isn't constrained by default.
min_samples_leaf : int, default=20
The minimum number of samples per leaf.
min_gain_to_split : float, default=0.
The minimum gain needed to split a node. Splits with lower gain will
be ignored.
min_hessian_to_split : float, default=1e-3
The minimum sum of hessians needed in each node. Splits that result in
at least one child having a sum of hessians less than
``min_hessian_to_split`` are discarded.
n_bins : int, default=256
The total number of bins, including the bin for missing values. Used
to define the shape of the histograms.
n_bins_non_missing : ndarray, dtype=np.uint32, default=None
For each feature, gives the number of bins actually used for
non-missing values. For features with a lot of unique values, this
is equal to ``n_bins - 1``. If it's an int, all features are
considered to have the same number of bins. If None, all features
are considered to have ``n_bins - 1`` bins.
has_missing_values : bool or ndarray, dtype=bool, default=False
Whether each feature contains missing values (in the training data).
If it's a bool, the same value is used for all features.
is_categorical : ndarray of bool of shape (n_features,), default=None
Indicates categorical features.
monotonic_cst : array-like of int of shape (n_features,), dtype=int, default=None
Indicates the monotonic constraint to enforce on each feature.
- 1: monotonic increase
- 0: no constraint
- -1: monotonic decrease
Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
interaction_cst : list of sets of integers, default=None
List of interaction constraints.
l2_regularization : float, default=0.
The L2 regularization parameter penalizing leaves with small hessians.
Use ``0`` for no regularization (default).
feature_fraction_per_split : float, default=1
Proportion of randomly chosen features in each and every node split.
This is a form of regularization, smaller values make the trees weaker
learners and might prevent overfitting.
rng : Generator
Numpy random Generator used for feature subsampling.
shrinkage : float, default=1.
The shrinkage parameter to apply to the leaves values, also known as
learning rate.
n_threads : int, default=None
Number of OpenMP threads to use. `_openmp_effective_n_threads` is called
to determine the effective number of threads use, which takes cgroups CPU
quotes into account. See the docstring of `_openmp_effective_n_threads`
for details.
Attributes
----------
histogram_builder : HistogramBuilder
splitter : Splitter
root : TreeNode
finalized_leaves : list of TreeNode
splittable_nodes : list of TreeNode
missing_values_bin_idx : int
Equals n_bins - 1
n_categorical_splits : int
n_features : int
n_nodes : int
total_find_split_time : float
Time spent finding the best splits
total_compute_hist_time : float
Time spent computing histograms
total_apply_split_time : float
Time spent splitting nodes
with_monotonic_cst : bool
Whether there are monotonic constraints that apply. False iff monotonic_cst is
None.
"""
def __init__(
self,
X_binned,
gradients,
hessians,
max_leaf_nodes=None,
max_depth=None,
min_samples_leaf=20,
min_gain_to_split=0.0,
min_hessian_to_split=1e-3,
n_bins=256,
n_bins_non_missing=None,
has_missing_values=False,
is_categorical=None,
monotonic_cst=None,
interaction_cst=None,
l2_regularization=0.0,
feature_fraction_per_split=1.0,
rng=np.random.default_rng(),
shrinkage=1.0,
n_threads=None,
):
self._validate_parameters(
X_binned,
min_gain_to_split,
min_hessian_to_split,
)
n_threads = _openmp_effective_n_threads(n_threads)
if n_bins_non_missing is None:
n_bins_non_missing = n_bins - 1
if isinstance(n_bins_non_missing, numbers.Integral):
n_bins_non_missing = np.array(
[n_bins_non_missing] * X_binned.shape[1], dtype=np.uint32
)
else:
n_bins_non_missing = np.asarray(n_bins_non_missing, dtype=np.uint32)
if isinstance(has_missing_values, bool):
has_missing_values = [has_missing_values] * X_binned.shape[1]
has_missing_values = np.asarray(has_missing_values, dtype=np.uint8)
# `monotonic_cst` validation is done in _validate_monotonic_cst
# at the estimator level and therefore the following should not be
# needed when using the public API.
if monotonic_cst is None:
monotonic_cst = np.full(
shape=X_binned.shape[1],
fill_value=MonotonicConstraint.NO_CST,
dtype=np.int8,
)
else:
monotonic_cst = np.asarray(monotonic_cst, dtype=np.int8)
self.with_monotonic_cst = np.any(monotonic_cst != MonotonicConstraint.NO_CST)
if is_categorical is None:
is_categorical = np.zeros(shape=X_binned.shape[1], dtype=np.uint8)
else:
is_categorical = np.asarray(is_categorical, dtype=np.uint8)
if np.any(
np.logical_and(
is_categorical == 1, monotonic_cst != MonotonicConstraint.NO_CST
)
):
raise ValueError("Categorical features cannot have monotonic constraints.")
hessians_are_constant = hessians.shape[0] == 1
self.histogram_builder = HistogramBuilder(
X_binned, n_bins, gradients, hessians, hessians_are_constant, n_threads
)
missing_values_bin_idx = n_bins - 1
self.splitter = Splitter(
X_binned=X_binned,
n_bins_non_missing=n_bins_non_missing,
missing_values_bin_idx=missing_values_bin_idx,
has_missing_values=has_missing_values,
is_categorical=is_categorical,
monotonic_cst=monotonic_cst,
l2_regularization=l2_regularization,
min_hessian_to_split=min_hessian_to_split,
min_samples_leaf=min_samples_leaf,
min_gain_to_split=min_gain_to_split,
hessians_are_constant=hessians_are_constant,
feature_fraction_per_split=feature_fraction_per_split,
rng=rng,
n_threads=n_threads,
)
self.X_binned = X_binned
self.max_leaf_nodes = max_leaf_nodes
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.min_gain_to_split = min_gain_to_split
self.n_bins_non_missing = n_bins_non_missing
self.missing_values_bin_idx = missing_values_bin_idx
self.has_missing_values = has_missing_values
self.is_categorical = is_categorical
self.monotonic_cst = monotonic_cst
self.interaction_cst = interaction_cst
self.l2_regularization = l2_regularization
self.shrinkage = shrinkage
self.n_features = X_binned.shape[1]
self.n_threads = n_threads
self.splittable_nodes = []
self.finalized_leaves = []
self.total_find_split_time = 0.0 # time spent finding the best splits
self.total_compute_hist_time = 0.0 # time spent computing histograms
self.total_apply_split_time = 0.0 # time spent splitting nodes
self.n_categorical_splits = 0
self._initialize_root()
self.n_nodes = 1
def _validate_parameters(
self,
X_binned,
min_gain_to_split,
min_hessian_to_split,
):
"""Validate parameters passed to __init__.
Also validate parameters passed to splitter.
"""
if X_binned.dtype != np.uint8:
raise NotImplementedError("X_binned must be of type uint8.")
if not X_binned.flags.f_contiguous:
raise ValueError(
"X_binned should be passed as Fortran contiguous "
"array for maximum efficiency."
)
if min_gain_to_split < 0:
raise ValueError(
"min_gain_to_split={} must be positive.".format(min_gain_to_split)
)
if min_hessian_to_split < 0:
raise ValueError(
"min_hessian_to_split={} must be positive.".format(min_hessian_to_split)
)
def grow(self):
"""Grow the tree, from root to leaves."""
while self.splittable_nodes:
self.split_next()
self._apply_shrinkage()
def _apply_shrinkage(self):
"""Multiply leaves values by shrinkage parameter.
This must be done at the very end of the growing process. If this were
done during the growing process e.g. in finalize_leaf(), then a leaf
would be shrunk but its sibling would potentially not be (if it's a
non-leaf), which would lead to a wrong computation of the 'middle'
value needed to enforce the monotonic constraints.
"""
for leaf in self.finalized_leaves:
leaf.value *= self.shrinkage
def _initialize_root(self):
"""Initialize root node and finalize it if needed."""
tic = time()
if self.interaction_cst is not None:
allowed_features = set().union(*self.interaction_cst)
allowed_features = np.fromiter(
allowed_features, dtype=np.uint32, count=len(allowed_features)
)
arbitrary_feature = allowed_features[0]
else:
allowed_features = None
arbitrary_feature = 0
# TreeNode init needs the total sum of gradients and hessians. Therefore, we
# first compute the histograms and then compute the total grad/hess on an
# arbitrary feature histogram. This way we replace a loop over n_samples by a
# loop over n_bins.
histograms = self.histogram_builder.compute_histograms_brute(
self.splitter.partition, # =self.root.sample_indices
allowed_features,
)
self.total_compute_hist_time += time() - tic
tic = time()
n_samples = self.X_binned.shape[0]
depth = 0
histogram_array = np.asarray(histograms[arbitrary_feature])
sum_gradients = histogram_array["sum_gradients"].sum()
if self.histogram_builder.hessians_are_constant:
sum_hessians = self.histogram_builder.hessians[0] * n_samples
else:
sum_hessians = histogram_array["sum_hessians"].sum()
self.root = TreeNode(
depth=depth,
sample_indices=self.splitter.partition,
partition_start=0,
partition_stop=n_samples,
sum_gradients=sum_gradients,
sum_hessians=sum_hessians,
value=0,
)
if self.root.n_samples < 2 * self.min_samples_leaf:
# Do not even bother computing any splitting statistics.
self._finalize_leaf(self.root)
return
if sum_hessians < self.splitter.min_hessian_to_split:
self._finalize_leaf(self.root)
return
if self.interaction_cst is not None:
self.root.interaction_cst_indices = range(len(self.interaction_cst))
self.root.allowed_features = allowed_features
self.root.histograms = histograms
self._compute_best_split_and_push(self.root)
self.total_find_split_time += time() - tic
def _compute_best_split_and_push(self, node):
"""Compute the best possible split (SplitInfo) of a given node.
Also push it in the heap of splittable nodes if gain isn't zero.
The gain of a node is 0 if either all the leaves are pure
(best gain = 0), or if no split would satisfy the constraints,
(min_hessians_to_split, min_gain_to_split, min_samples_leaf)
"""
node.split_info = self.splitter.find_node_split(
n_samples=node.n_samples,
histograms=node.histograms,
sum_gradients=node.sum_gradients,
sum_hessians=node.sum_hessians,
value=node.value,
lower_bound=node.children_lower_bound,
upper_bound=node.children_upper_bound,
allowed_features=node.allowed_features,
)
if node.split_info.gain <= 0: # no valid split
self._finalize_leaf(node)
else:
heappush(self.splittable_nodes, node)
def split_next(self):
"""Split the node with highest potential gain.
Returns
-------
left : TreeNode
The resulting left child.
right : TreeNode
The resulting right child.
"""
# Consider the node with the highest loss reduction (a.k.a. gain)
node = heappop(self.splittable_nodes)
tic = time()
(
sample_indices_left,
sample_indices_right,
right_child_pos,
) = self.splitter.split_indices(node.split_info, node.sample_indices)
self.total_apply_split_time += time() - tic
depth = node.depth + 1
n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)
n_leaf_nodes += 2
left_child_node = TreeNode(
depth=depth,
sample_indices=sample_indices_left,
partition_start=node.partition_start,
partition_stop=node.partition_start + right_child_pos,
sum_gradients=node.split_info.sum_gradient_left,
sum_hessians=node.split_info.sum_hessian_left,
value=node.split_info.value_left,
)
right_child_node = TreeNode(
depth=depth,
sample_indices=sample_indices_right,
partition_start=left_child_node.partition_stop,
partition_stop=node.partition_stop,
sum_gradients=node.split_info.sum_gradient_right,
sum_hessians=node.split_info.sum_hessian_right,
value=node.split_info.value_right,
)
node.right_child = right_child_node
node.left_child = left_child_node
# set interaction constraints (the indices of the constraints sets)
if self.interaction_cst is not None:
# Calculate allowed_features and interaction_cst_indices only once. Child
# nodes inherit them before they get split.
(
left_child_node.allowed_features,
left_child_node.interaction_cst_indices,
) = self._compute_interactions(node)
right_child_node.interaction_cst_indices = (
left_child_node.interaction_cst_indices
)
right_child_node.allowed_features = left_child_node.allowed_features
if not self.has_missing_values[node.split_info.feature_idx]:
# If no missing values are encountered at fit time, then samples
# with missing values during predict() will go to whichever child
# has the most samples.
node.split_info.missing_go_to_left = (
left_child_node.n_samples > right_child_node.n_samples
)
self.n_nodes += 2
self.n_categorical_splits += node.split_info.is_categorical
if self.max_leaf_nodes is not None and n_leaf_nodes == self.max_leaf_nodes:
self._finalize_leaf(left_child_node)
self._finalize_leaf(right_child_node)
self._finalize_splittable_nodes()
return left_child_node, right_child_node
if self.max_depth is not None and depth == self.max_depth:
self._finalize_leaf(left_child_node)
self._finalize_leaf(right_child_node)
return left_child_node, right_child_node
if left_child_node.n_samples < self.min_samples_leaf * 2:
self._finalize_leaf(left_child_node)
if right_child_node.n_samples < self.min_samples_leaf * 2:
self._finalize_leaf(right_child_node)
if self.with_monotonic_cst:
# Set value bounds for respecting monotonic constraints
# See test_nodes_values() for details
if (
self.monotonic_cst[node.split_info.feature_idx]
== MonotonicConstraint.NO_CST
):
lower_left = lower_right = node.children_lower_bound
upper_left = upper_right = node.children_upper_bound
else:
mid = (left_child_node.value + right_child_node.value) / 2
if (
self.monotonic_cst[node.split_info.feature_idx]
== MonotonicConstraint.POS
):
lower_left, upper_left = node.children_lower_bound, mid
lower_right, upper_right = mid, node.children_upper_bound
else: # NEG
lower_left, upper_left = mid, node.children_upper_bound
lower_right, upper_right = node.children_lower_bound, mid
left_child_node.set_children_bounds(lower_left, upper_left)
right_child_node.set_children_bounds(lower_right, upper_right)
# Compute histograms of children, and compute their best possible split
# (if needed)
should_split_left = not left_child_node.is_leaf
should_split_right = not right_child_node.is_leaf
if should_split_left or should_split_right:
# We will compute the histograms of both nodes even if one of them
# is a leaf, since computing the second histogram is very cheap
# (using histogram subtraction).
n_samples_left = left_child_node.sample_indices.shape[0]
n_samples_right = right_child_node.sample_indices.shape[0]
if n_samples_left < n_samples_right:
smallest_child = left_child_node
largest_child = right_child_node
else:
smallest_child = right_child_node
largest_child = left_child_node
# We use the brute O(n_samples) method on the child that has the
# smallest number of samples, and the subtraction trick O(n_bins)
# on the other one.
# Note that both left and right child have the same allowed_features.
tic = time()
smallest_child.histograms = self.histogram_builder.compute_histograms_brute(
smallest_child.sample_indices, smallest_child.allowed_features
)
largest_child.histograms = (
self.histogram_builder.compute_histograms_subtraction(
node.histograms,
smallest_child.histograms,
smallest_child.allowed_features,
)
)
# node.histograms is reused in largest_child.histograms. To break cyclic
# memory references and help garbage collection, we set it to None.
node.histograms = None
self.total_compute_hist_time += time() - tic
tic = time()
if should_split_left:
self._compute_best_split_and_push(left_child_node)
if should_split_right:
self._compute_best_split_and_push(right_child_node)
self.total_find_split_time += time() - tic
# Release memory used by histograms as they are no longer needed
# for leaf nodes since they won't be split.
for child in (left_child_node, right_child_node):
if child.is_leaf:
del child.histograms
# Release memory used by histograms as they are no longer needed for
# internal nodes once children histograms have been computed.
del node.histograms
return left_child_node, right_child_node
def _compute_interactions(self, node):
r"""Compute features allowed by interactions to be inherited by child nodes.
Example: Assume constraints [{0, 1}, {1, 2}].
1 <- Both constraint groups could be applied from now on
/ \
1 2 <- Left split still fulfills both constraint groups.
/ \ / \ Right split at feature 2 has only group {1, 2} from now on.
LightGBM uses the same logic for overlapping groups. See
https://github.com/microsoft/LightGBM/issues/4481 for details.
Parameters:
----------
node : TreeNode
A node that might have children. Based on its feature_idx, the interaction
constraints for possible child nodes are computed.
Returns
-------
allowed_features : ndarray, dtype=uint32
Indices of features allowed to split for children.
interaction_cst_indices : list of ints
Indices of the interaction sets that have to be applied on splits of
child nodes. The fewer sets the stronger the constraint as fewer sets
contain fewer features.
"""
# Note:
# - Case of no interactions is already captured before function call.
# - This is for nodes that are already split and have a
# node.split_info.feature_idx.
allowed_features = set()
interaction_cst_indices = []
for i in node.interaction_cst_indices:
if node.split_info.feature_idx in self.interaction_cst[i]:
interaction_cst_indices.append(i)
allowed_features.update(self.interaction_cst[i])
return (
np.fromiter(allowed_features, dtype=np.uint32, count=len(allowed_features)),
interaction_cst_indices,
)
def _finalize_leaf(self, node):
"""Make node a leaf of the tree being grown."""
node.is_leaf = True
self.finalized_leaves.append(node)
def _finalize_splittable_nodes(self):
"""Transform all splittable nodes into leaves.
Used when some constraint is met e.g. maximum number of leaves or
maximum depth."""
while len(self.splittable_nodes) > 0:
node = self.splittable_nodes.pop()
self._finalize_leaf(node)
def make_predictor(self, binning_thresholds):
"""Make a TreePredictor object out of the current tree.
Parameters
----------
binning_thresholds : array-like of floats
Corresponds to the bin_thresholds_ attribute of the BinMapper.
For each feature, this stores:
- the bin frontiers for continuous features
- the unique raw category values for categorical features
Returns
-------
A TreePredictor object.
"""
predictor_nodes = np.zeros(self.n_nodes, dtype=PREDICTOR_RECORD_DTYPE)
binned_left_cat_bitsets = np.zeros(
(self.n_categorical_splits, 8), dtype=X_BITSET_INNER_DTYPE
)
raw_left_cat_bitsets = np.zeros(
(self.n_categorical_splits, 8), dtype=X_BITSET_INNER_DTYPE
)
_fill_predictor_arrays(
predictor_nodes,
binned_left_cat_bitsets,
raw_left_cat_bitsets,
self.root,
binning_thresholds,
self.n_bins_non_missing,
)
return TreePredictor(
predictor_nodes, binned_left_cat_bitsets, raw_left_cat_bitsets
)
def _fill_predictor_arrays(
predictor_nodes,
binned_left_cat_bitsets,
raw_left_cat_bitsets,
grower_node,
binning_thresholds,
n_bins_non_missing,
next_free_node_idx=0,
next_free_bitset_idx=0,
):
"""Helper used in make_predictor to set the TreePredictor fields."""
node = predictor_nodes[next_free_node_idx]
node["count"] = grower_node.n_samples
node["depth"] = grower_node.depth
if grower_node.split_info is not None:
node["gain"] = grower_node.split_info.gain
else:
node["gain"] = -1
node["value"] = grower_node.value
if grower_node.is_leaf:
# Leaf node
node["is_leaf"] = True
return next_free_node_idx + 1, next_free_bitset_idx
split_info = grower_node.split_info
feature_idx, bin_idx = split_info.feature_idx, split_info.bin_idx
node["feature_idx"] = feature_idx
node["bin_threshold"] = bin_idx
node["missing_go_to_left"] = split_info.missing_go_to_left
node["is_categorical"] = split_info.is_categorical
if split_info.bin_idx == n_bins_non_missing[feature_idx] - 1:
# Split is on the last non-missing bin: it's a "split on nans".
# All nans go to the right, the rest go to the left.
# Note: for categorical splits, bin_idx is 0 and we rely on the bitset
node["num_threshold"] = np.inf
elif split_info.is_categorical:
categories = binning_thresholds[feature_idx]
node["bitset_idx"] = next_free_bitset_idx
binned_left_cat_bitsets[next_free_bitset_idx] = split_info.left_cat_bitset
set_raw_bitset_from_binned_bitset(
raw_left_cat_bitsets[next_free_bitset_idx],
split_info.left_cat_bitset,
categories,
)
next_free_bitset_idx += 1
else:
node["num_threshold"] = binning_thresholds[feature_idx][bin_idx]
next_free_node_idx += 1
node["left"] = next_free_node_idx
next_free_node_idx, next_free_bitset_idx = _fill_predictor_arrays(
predictor_nodes,
binned_left_cat_bitsets,
raw_left_cat_bitsets,
grower_node.left_child,
binning_thresholds=binning_thresholds,
n_bins_non_missing=n_bins_non_missing,
next_free_node_idx=next_free_node_idx,
next_free_bitset_idx=next_free_bitset_idx,
)
node["right"] = next_free_node_idx
return _fill_predictor_arrays(
predictor_nodes,
binned_left_cat_bitsets,
raw_left_cat_bitsets,
grower_node.right_child,
binning_thresholds=binning_thresholds,
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_hist_gradient_boosting/predictor.py | sklearn/ensemble/_hist_gradient_boosting/predictor.py | """
This module contains the TreePredictor class which is used for prediction.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from sklearn.ensemble._hist_gradient_boosting._predictor import (
_compute_partial_dependence,
_predict_from_binned_data,
_predict_from_raw_data,
)
from sklearn.ensemble._hist_gradient_boosting.common import (
PREDICTOR_RECORD_DTYPE,
Y_DTYPE,
)
class TreePredictor:
"""Tree class used for predictions.
Parameters
----------
nodes : ndarray of PREDICTOR_RECORD_DTYPE
The nodes of the tree.
binned_left_cat_bitsets : ndarray of shape (n_categorical_splits, 8), dtype=uint32
Array of bitsets for binned categories used in predict_binned when a
split is categorical.
raw_left_cat_bitsets : ndarray of shape (n_categorical_splits, 8), dtype=uint32
Array of bitsets for raw categories used in predict when a split is
categorical.
"""
def __init__(self, nodes, binned_left_cat_bitsets, raw_left_cat_bitsets):
self.nodes = nodes
self.binned_left_cat_bitsets = binned_left_cat_bitsets
self.raw_left_cat_bitsets = raw_left_cat_bitsets
def get_n_leaf_nodes(self):
"""Return number of leaves."""
return int(self.nodes["is_leaf"].sum())
def get_max_depth(self):
"""Return maximum depth among all leaves."""
return int(self.nodes["depth"].max())
def predict(self, X, known_cat_bitsets, f_idx_map, n_threads):
"""Predict raw values for non-binned data.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The input samples.
known_cat_bitsets : ndarray of shape (n_categorical_features, 8)
Array of bitsets of known categories, for each categorical feature.
f_idx_map : ndarray of shape (n_features,)
Map from original feature index to the corresponding index in the
known_cat_bitsets array.
n_threads : int
Number of OpenMP threads to use.
Returns
-------
y : ndarray, shape (n_samples,)
The raw predicted values.
"""
out = np.empty(X.shape[0], dtype=Y_DTYPE)
_predict_from_raw_data(
self.nodes,
X,
self.raw_left_cat_bitsets,
known_cat_bitsets,
f_idx_map,
n_threads,
out,
)
return out
def predict_binned(self, X, missing_values_bin_idx, n_threads):
"""Predict raw values for binned data.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
The input samples.
missing_values_bin_idx : uint8
Index of the bin that is used for missing values. This is the
index of the last bin and is always equal to max_bins (as passed
to the GBDT classes), or equivalently to n_bins - 1.
n_threads : int
Number of OpenMP threads to use.
Returns
-------
y : ndarray, shape (n_samples,)
The raw predicted values.
"""
out = np.empty(X.shape[0], dtype=Y_DTYPE)
_predict_from_binned_data(
self.nodes,
X,
self.binned_left_cat_bitsets,
missing_values_bin_idx,
n_threads,
out,
)
return out
def compute_partial_dependence(self, grid, target_features, out):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray, shape (n_samples, n_target_features)
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray, shape (n_target_features)
The set of target features for which the partial dependence
should be evaluated.
out : ndarray, shape (n_samples)
The value of the partial dependence function on each grid
point.
"""
_compute_partial_dependence(self.nodes, grid, target_features, out)
def __setstate__(self, state):
try:
super().__setstate__(state)
except AttributeError:
self.__dict__.update(state)
# The dtype of feature_idx is np.intp which is platform dependent. Here, we
# make sure that saving and loading on different bitness systems works without
# errors. For instance, on a 64 bit Python runtime, np.intp = np.int64,
# while on 32 bit np.intp = np.int32.
#
# TODO: consider always using platform agnostic dtypes for fitted
# estimator attributes. For this particular estimator, this would
# mean replacing the intp field of PREDICTOR_RECORD_DTYPE by an int32
# field. Ideally this should be done consistently throughout
# scikit-learn along with a common test.
if self.nodes.dtype != PREDICTOR_RECORD_DTYPE:
self.nodes = self.nodes.astype(PREDICTOR_RECORD_DTYPE, casting="same_kind")
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_hist_gradient_boosting/utils.py | sklearn/ensemble/_hist_gradient_boosting/utils.py | """This module contains utility routines."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.base import is_classifier
from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
def get_equivalent_estimator(estimator, lib="lightgbm", n_classes=None):
"""Return an unfitted estimator from another lib with matching hyperparams.
This utility function takes care of renaming the sklearn parameters into
their LightGBM, XGBoost or CatBoost equivalent parameters.
# unmapped XGB parameters:
# - min_samples_leaf
# - min_data_in_bin
# - min_split_gain (there is min_split_loss though?)
# unmapped Catboost parameters:
# max_leaves
# min_*
"""
if lib not in ("lightgbm", "xgboost", "catboost"):
raise ValueError(
"accepted libs are lightgbm, xgboost, and catboost. got {}".format(lib)
)
sklearn_params = estimator.get_params()
if sklearn_params["loss"] == "auto":
raise ValueError(
"auto loss is not accepted. We need to know if "
"the problem is binary or multiclass classification."
)
if sklearn_params["early_stopping"]:
raise NotImplementedError("Early stopping should be deactivated.")
lightgbm_loss_mapping = {
"squared_error": "regression_l2",
"absolute_error": "regression_l1",
"log_loss": "binary" if n_classes == 2 else "multiclass",
"gamma": "gamma",
"poisson": "poisson",
}
lightgbm_params = {
"objective": lightgbm_loss_mapping[sklearn_params["loss"]],
"learning_rate": sklearn_params["learning_rate"],
"n_estimators": sklearn_params["max_iter"],
"num_leaves": sklearn_params["max_leaf_nodes"],
"max_depth": sklearn_params["max_depth"],
"min_data_in_leaf": sklearn_params["min_samples_leaf"],
"reg_lambda": sklearn_params["l2_regularization"],
"max_bin": sklearn_params["max_bins"],
"min_data_in_bin": 1,
"min_sum_hessian_in_leaf": 1e-3,
"min_split_gain": 0,
"verbosity": 10 if sklearn_params["verbose"] else -10,
"boost_from_average": True,
"enable_bundle": False, # also makes feature order consistent
"subsample_for_bin": _BinMapper().subsample,
"poisson_max_delta_step": 1e-12,
"feature_fraction_bynode": sklearn_params["max_features"],
}
if sklearn_params["loss"] == "log_loss" and n_classes > 2:
# LightGBM multiplies hessians by 2 in multiclass loss.
lightgbm_params["min_sum_hessian_in_leaf"] *= 2
# LightGBM 3.0 introduced a different scaling of the hessian for the multiclass
# case.
# It is equivalent of scaling the learning rate.
# See https://github.com/microsoft/LightGBM/pull/3256.
if n_classes is not None:
lightgbm_params["learning_rate"] *= n_classes / (n_classes - 1)
# XGB
xgboost_loss_mapping = {
"squared_error": "reg:linear",
"absolute_error": "LEAST_ABSOLUTE_DEV_NOT_SUPPORTED",
"log_loss": "reg:logistic" if n_classes == 2 else "multi:softmax",
"gamma": "reg:gamma",
"poisson": "count:poisson",
}
xgboost_params = {
"tree_method": "hist",
"grow_policy": "lossguide", # so that we can set max_leaves
"objective": xgboost_loss_mapping[sklearn_params["loss"]],
"learning_rate": sklearn_params["learning_rate"],
"n_estimators": sklearn_params["max_iter"],
"max_leaves": sklearn_params["max_leaf_nodes"],
"max_depth": sklearn_params["max_depth"] or 0,
"lambda": sklearn_params["l2_regularization"],
"max_bin": sklearn_params["max_bins"],
"min_child_weight": 1e-3,
"verbosity": 2 if sklearn_params["verbose"] else 0,
"silent": sklearn_params["verbose"] == 0,
"n_jobs": -1,
"colsample_bynode": sklearn_params["max_features"],
}
# Catboost
catboost_loss_mapping = {
"squared_error": "RMSE",
# catboost does not support MAE when leaf_estimation_method is Newton
"absolute_error": "LEAST_ASBOLUTE_DEV_NOT_SUPPORTED",
"log_loss": "Logloss" if n_classes == 2 else "MultiClass",
"gamma": None,
"poisson": "Poisson",
}
catboost_params = {
"loss_function": catboost_loss_mapping[sklearn_params["loss"]],
"learning_rate": sklearn_params["learning_rate"],
"iterations": sklearn_params["max_iter"],
"depth": sklearn_params["max_depth"],
"reg_lambda": sklearn_params["l2_regularization"],
"max_bin": sklearn_params["max_bins"],
"feature_border_type": "Median",
"leaf_estimation_method": "Newton",
"verbose": bool(sklearn_params["verbose"]),
}
if lib == "lightgbm":
from lightgbm import LGBMClassifier, LGBMRegressor
if is_classifier(estimator):
return LGBMClassifier(**lightgbm_params)
else:
return LGBMRegressor(**lightgbm_params)
elif lib == "xgboost":
from xgboost import XGBClassifier, XGBRegressor
if is_classifier(estimator):
return XGBClassifier(**xgboost_params)
else:
return XGBRegressor(**xgboost_params)
else:
from catboost import CatBoostClassifier, CatBoostRegressor
if is_classifier(estimator):
return CatBoostClassifier(**catboost_params)
else:
return CatBoostRegressor(**catboost_params)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_hist_gradient_boosting/__init__.py | sklearn/ensemble/_hist_gradient_boosting/__init__.py | """This module implements histogram-based gradient boosting estimators.
The implementation is a port from pygbm which is itself strongly inspired
from LightGBM.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py | sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py | """Fast Gradient Boosting decision trees for classification and regression."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
from abc import ABC, abstractmethod
from contextlib import contextmanager, nullcontext, suppress
from functools import partial
from numbers import Integral, Real
from time import time
import numpy as np
from sklearn._loss.loss import (
_LOSSES,
BaseLoss,
HalfBinomialLoss,
HalfGammaLoss,
HalfMultinomialLoss,
HalfPoissonLoss,
PinballLoss,
)
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
RegressorMixin,
_fit_context,
is_classifier,
)
from sklearn.compose import ColumnTransformer
from sklearn.ensemble._hist_gradient_boosting._gradient_boosting import (
_update_raw_predictions,
)
from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
from sklearn.ensemble._hist_gradient_boosting.common import G_H_DTYPE, X_DTYPE, Y_DTYPE
from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
from sklearn.metrics import check_scoring
from sklearn.metrics._scorer import _SCORERS
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import FunctionTransformer, LabelEncoder, OrdinalEncoder
from sklearn.utils import check_random_state, compute_sample_weight, resample
from sklearn.utils._dataframe import is_pandas_df
from sklearn.utils._missing import is_scalar_nan
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
from sklearn.utils._param_validation import Interval, RealNotInt, StrOptions
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.validation import (
_check_monotonic_cst,
_check_sample_weight,
_check_y,
check_array,
check_consistent_length,
check_is_fitted,
validate_data,
)
_LOSSES = _LOSSES.copy()
_LOSSES.update(
{
"poisson": HalfPoissonLoss,
"gamma": HalfGammaLoss,
"quantile": PinballLoss,
}
)
def _update_leaves_values(loss, grower, y_true, raw_prediction, sample_weight):
"""Update the leaf values to be predicted by the tree.
Update equals:
loss.fit_intercept_only(y_true - raw_prediction)
This is only applied if loss.differentiable is False.
Note: It only works, if the loss is a function of the residual, as is the
case for AbsoluteError and PinballLoss. Otherwise, one would need to get
the minimum of loss(y_true, raw_prediction + x) in x. A few examples:
- AbsoluteError: median(y_true - raw_prediction).
- PinballLoss: quantile(y_true - raw_prediction).
More background:
For the standard gradient descent method according to "Greedy Function
Approximation: A Gradient Boosting Machine" by Friedman, all loss functions but the
squared loss need a line search step. BaseHistGradientBoosting, however, implements
a so called Newton boosting where the trees are fitted to a 2nd order
approximations of the loss in terms of gradients and hessians. In this case, the
line search step is only necessary if the loss is not smooth, i.e. not
differentiable, which renders the 2nd order approximation invalid. In fact,
non-smooth losses arbitrarily set hessians to 1 and effectively use the standard
gradient descent method with line search.
"""
# TODO: Ideally this should be computed in parallel over the leaves using something
# similar to _update_raw_predictions(), but this requires a cython version of
# median().
for leaf in grower.finalized_leaves:
indices = leaf.sample_indices
if sample_weight is None:
sw = None
else:
sw = sample_weight[indices]
update = loss.fit_intercept_only(
y_true=y_true[indices] - raw_prediction[indices],
sample_weight=sw,
)
leaf.value = grower.shrinkage * update
# Note that the regularization is ignored here
@contextmanager
def _patch_raw_predict(estimator, raw_predictions):
"""Context manager that patches _raw_predict to return raw_predictions.
`raw_predictions` is typically a precomputed array to avoid redundant
state-wise computations fitting with early stopping enabled: in this case
`raw_predictions` is incrementally updated whenever we add a tree to the
boosted ensemble.
Note: this makes fitting HistGradientBoosting* models inherently non thread
safe at fit time. However thread-safety at fit time was never guaranteed nor
enforced for scikit-learn estimators in general.
Thread-safety at prediction/transform time is another matter as those
operations are typically side-effect free and therefore often thread-safe by
default for most scikit-learn models and would like to keep it that way.
Therefore this context manager should only be used at fit time.
TODO: in the future, we could explore the possibility to extend the scorer
public API to expose a way to compute vales from raw predictions. That would
probably require also making the scorer aware of the inverse link function
used by the estimator which is typically private API for now, hence the need
for this patching mechanism.
"""
orig_raw_predict = estimator._raw_predict
def _patched_raw_predicts(*args, **kwargs):
return raw_predictions
estimator._raw_predict = _patched_raw_predicts
yield estimator
estimator._raw_predict = orig_raw_predict
class BaseHistGradientBoosting(BaseEstimator, ABC):
"""Base class for histogram-based gradient boosting estimators."""
_parameter_constraints: dict = {
"loss": [BaseLoss],
"learning_rate": [Interval(Real, 0, None, closed="neither")],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"max_leaf_nodes": [Interval(Integral, 2, None, closed="left"), None],
"max_depth": [Interval(Integral, 1, None, closed="left"), None],
"min_samples_leaf": [Interval(Integral, 1, None, closed="left")],
"l2_regularization": [Interval(Real, 0, None, closed="left")],
"max_features": [Interval(RealNotInt, 0, 1, closed="right")],
"monotonic_cst": ["array-like", dict, None],
"interaction_cst": [
list,
tuple,
StrOptions({"pairwise", "no_interactions"}),
None,
],
"n_iter_no_change": [Interval(Integral, 1, None, closed="left")],
"validation_fraction": [
Interval(RealNotInt, 0, 1, closed="neither"),
Interval(Integral, 1, None, closed="left"),
None,
],
"tol": [Interval(Real, 0, None, closed="left")],
"max_bins": [Interval(Integral, 2, 255, closed="both")],
"categorical_features": ["array-like", StrOptions({"from_dtype"}), None],
"warm_start": ["boolean"],
"early_stopping": [StrOptions({"auto"}), "boolean"],
"scoring": [str, callable, None],
"verbose": ["verbose"],
"random_state": ["random_state"],
}
@abstractmethod
def __init__(
self,
loss,
*,
learning_rate,
max_iter,
max_leaf_nodes,
max_depth,
min_samples_leaf,
l2_regularization,
max_features,
max_bins,
categorical_features,
monotonic_cst,
interaction_cst,
warm_start,
early_stopping,
scoring,
validation_fraction,
n_iter_no_change,
tol,
verbose,
random_state,
):
self.loss = loss
self.learning_rate = learning_rate
self.max_iter = max_iter
self.max_leaf_nodes = max_leaf_nodes
self.max_depth = max_depth
self.min_samples_leaf = min_samples_leaf
self.l2_regularization = l2_regularization
self.max_features = max_features
self.max_bins = max_bins
self.monotonic_cst = monotonic_cst
self.interaction_cst = interaction_cst
self.categorical_features = categorical_features
self.warm_start = warm_start
self.early_stopping = early_stopping
self.scoring = scoring
self.validation_fraction = validation_fraction
self.n_iter_no_change = n_iter_no_change
self.tol = tol
self.verbose = verbose
self.random_state = random_state
def _validate_parameters(self):
"""Validate parameters passed to __init__.
The parameters that are directly passed to the grower are checked in
TreeGrower."""
if self.monotonic_cst is not None and self.n_trees_per_iteration_ != 1:
raise ValueError(
"monotonic constraints are not supported for multiclass classification."
)
def _finalize_sample_weight(self, sample_weight, y):
"""Finalize sample weight.
Used by subclasses to adjust sample_weights. This is useful for implementing
class weights.
"""
return sample_weight
def _preprocess_X(self, X, *, reset):
"""Preprocess and validate X.
Parameters
----------
X : {array-like, pandas DataFrame} of shape (n_samples, n_features)
Input data.
reset : bool
Whether to reset the `n_features_in_` and `feature_names_in_ attributes.
Returns
-------
X : ndarray of shape (n_samples, n_features)
Validated input data.
known_categories : list of ndarray of shape (n_categories,)
List of known categories for each categorical feature.
"""
# If there is a preprocessor, we let the preprocessor handle the validation.
# Otherwise, we validate the data ourselves.
check_X_kwargs = dict(dtype=[X_DTYPE], ensure_all_finite=False)
if not reset:
if self._preprocessor is None:
return validate_data(self, X, reset=False, **check_X_kwargs)
return self._preprocessor.transform(X)
# At this point, reset is False, which runs during `fit`.
self.is_categorical_ = self._check_categorical_features(X)
if self.is_categorical_ is None:
self._preprocessor = None
self._is_categorical_remapped = None
X = validate_data(self, X, **check_X_kwargs)
return X, None
n_features = X.shape[1]
ordinal_encoder = OrdinalEncoder(
categories="auto",
handle_unknown="use_encoded_value",
unknown_value=np.nan,
encoded_missing_value=np.nan,
dtype=X_DTYPE,
)
check_X = partial(check_array, **check_X_kwargs)
numerical_preprocessor = FunctionTransformer(check_X)
self._preprocessor = ColumnTransformer(
[
("encoder", ordinal_encoder, self.is_categorical_),
("numerical", numerical_preprocessor, ~self.is_categorical_),
]
)
self._preprocessor.set_output(transform="default")
X = self._preprocessor.fit_transform(X)
# check categories found by the OrdinalEncoder and get their encoded values
known_categories = self._check_categories()
self.n_features_in_ = self._preprocessor.n_features_in_
with suppress(AttributeError):
self.feature_names_in_ = self._preprocessor.feature_names_in_
# The ColumnTransformer's output places the categorical features at the
# beginning
categorical_remapped = np.zeros(n_features, dtype=bool)
categorical_remapped[self._preprocessor.output_indices_["encoder"]] = True
self._is_categorical_remapped = categorical_remapped
return X, known_categories
def _check_categories(self):
"""Check categories found by the preprocessor and return their encoded values.
Returns a list of length ``self.n_features_in_``, with one entry per
input feature.
For non-categorical features, the corresponding entry is ``None``.
For categorical features, the corresponding entry is an array
containing the categories as encoded by the preprocessor (an
``OrdinalEncoder``), excluding missing values. The entry is therefore
``np.arange(n_categories)`` where ``n_categories`` is the number of
unique values in the considered feature column, after removing missing
values.
If ``n_categories > self.max_bins`` for any feature, a ``ValueError``
is raised.
"""
encoder = self._preprocessor.named_transformers_["encoder"]
known_categories = [None] * self._preprocessor.n_features_in_
categorical_column_indices = np.arange(self._preprocessor.n_features_in_)[
self._preprocessor.output_indices_["encoder"]
]
for feature_idx, categories in zip(
categorical_column_indices, encoder.categories_
):
# OrdinalEncoder always puts np.nan as the last category if the
# training data has missing values. Here we remove it because it is
# already added by the _BinMapper.
if len(categories) and is_scalar_nan(categories[-1]):
categories = categories[:-1]
if categories.size > self.max_bins:
try:
feature_name = repr(encoder.feature_names_in_[feature_idx])
except AttributeError:
feature_name = f"at index {feature_idx}"
raise ValueError(
f"Categorical feature {feature_name} is expected to "
f"have a cardinality <= {self.max_bins} but actually "
f"has a cardinality of {categories.size}."
)
known_categories[feature_idx] = np.arange(len(categories), dtype=X_DTYPE)
return known_categories
def _check_categorical_features(self, X):
"""Check and validate categorical features in X
Parameters
----------
X : {array-like, pandas DataFrame} of shape (n_samples, n_features)
Input data.
Return
------
is_categorical : ndarray of shape (n_features,) or None, dtype=bool
Indicates whether a feature is categorical. If no feature is
categorical, this is None.
"""
# Special code for pandas because of a bug in recent pandas, which is
# fixed in main and maybe included in 2.2.1, see
# https://github.com/pandas-dev/pandas/pull/57173.
# Also pandas versions < 1.5.1 do not support the dataframe interchange
if is_pandas_df(X):
X_is_dataframe = True
categorical_columns_mask = np.asarray(X.dtypes == "category")
elif hasattr(X, "__dataframe__"):
X_is_dataframe = True
categorical_columns_mask = np.asarray(
[
c.dtype[0].name == "CATEGORICAL"
for c in X.__dataframe__().get_columns()
]
)
else:
X_is_dataframe = False
categorical_columns_mask = None
categorical_features = self.categorical_features
categorical_by_dtype = (
isinstance(categorical_features, str)
and categorical_features == "from_dtype"
)
no_categorical_dtype = categorical_features is None or (
categorical_by_dtype and not X_is_dataframe
)
if no_categorical_dtype:
return None
use_pandas_categorical = categorical_by_dtype and X_is_dataframe
if use_pandas_categorical:
categorical_features = categorical_columns_mask
else:
categorical_features = np.asarray(categorical_features)
if categorical_features.size == 0:
return None
if categorical_features.dtype.kind not in ("i", "b", "U", "O"):
raise ValueError(
"categorical_features must be an array-like of bool, int or "
f"str, got: {categorical_features.dtype.name}."
)
if categorical_features.dtype.kind == "O":
types = set(type(f) for f in categorical_features)
if types != {str}:
raise ValueError(
"categorical_features must be an array-like of bool, int or "
f"str, got: {', '.join(sorted(t.__name__ for t in types))}."
)
n_features = X.shape[1]
# At this point `validate_data` was not called yet because we use the original
# dtypes to discover the categorical features. Thus `feature_names_in_`
# is not defined yet.
feature_names_in_ = getattr(X, "columns", None)
if categorical_features.dtype.kind in ("U", "O"):
# check for feature names
if feature_names_in_ is None:
raise ValueError(
"categorical_features should be passed as an array of "
"integers or as a boolean mask when the model is fitted "
"on data without feature names."
)
is_categorical = np.zeros(n_features, dtype=bool)
feature_names = list(feature_names_in_)
for feature_name in categorical_features:
try:
is_categorical[feature_names.index(feature_name)] = True
except ValueError as e:
raise ValueError(
f"categorical_features has an item value '{feature_name}' "
"which is not a valid feature name of the training "
f"data. Observed feature names: {feature_names}"
) from e
elif categorical_features.dtype.kind == "i":
# check for categorical features as indices
if (
np.max(categorical_features) >= n_features
or np.min(categorical_features) < 0
):
raise ValueError(
"categorical_features set as integer "
"indices must be in [0, n_features - 1]"
)
is_categorical = np.zeros(n_features, dtype=bool)
is_categorical[categorical_features] = True
else:
if categorical_features.shape[0] != n_features:
raise ValueError(
"categorical_features set as a boolean mask "
"must have shape (n_features,), got: "
f"{categorical_features.shape}"
)
is_categorical = categorical_features
if not np.any(is_categorical):
return None
return is_categorical
def _check_interaction_cst(self, n_features):
"""Check and validation for interaction constraints."""
if self.interaction_cst is None:
return None
if self.interaction_cst == "no_interactions":
interaction_cst = [[i] for i in range(n_features)]
elif self.interaction_cst == "pairwise":
interaction_cst = itertools.combinations(range(n_features), 2)
else:
interaction_cst = self.interaction_cst
try:
constraints = [set(group) for group in interaction_cst]
except TypeError:
raise ValueError(
"Interaction constraints must be a sequence of tuples or lists, got:"
f" {self.interaction_cst!r}."
)
for group in constraints:
for x in group:
if not (isinstance(x, Integral) and 0 <= x < n_features):
raise ValueError(
"Interaction constraints must consist of integer indices in"
f" [0, n_features - 1] = [0, {n_features - 1}], specifying the"
" position of features, got invalid indices:"
f" {group!r}"
)
# Add all not listed features as own group by default.
rest = set(range(n_features)) - set().union(*constraints)
if len(rest) > 0:
constraints.append(rest)
return constraints
@_fit_context(prefer_skip_nested_validation=True)
def fit(
self,
X,
y,
sample_weight=None,
*,
X_val=None,
y_val=None,
sample_weight_val=None,
):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,) default=None
Weights of training data.
.. versionadded:: 0.23
X_val : array-like of shape (n_val, n_features)
Additional sample of features for validation used in early stopping.
In a `Pipeline`, `X_val` can be transformed the same way as `X` with
`Pipeline(..., transform_input=["X_val"])`.
.. versionadded:: 1.7
y_val : array-like of shape (n_samples,)
Additional sample of target values for validation used in early stopping.
.. versionadded:: 1.7
sample_weight_val : array-like of shape (n_samples,) default=None
Additional weights for validation used in early stopping.
.. versionadded:: 1.7
Returns
-------
self : object
Fitted estimator.
"""
fit_start_time = time()
acc_find_split_time = 0.0 # time spent finding the best splits
acc_apply_split_time = 0.0 # time spent splitting nodes
acc_compute_hist_time = 0.0 # time spent computing histograms
# time spent predicting X for gradient and hessians update
acc_prediction_time = 0.0
X, known_categories = self._preprocess_X(X, reset=True)
y = _check_y(y, estimator=self)
y = self._encode_y(y)
check_consistent_length(X, y)
# Do not create unit sample weights by default to later skip some
# computation
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float64)
# TODO: remove when PDP supports sample weights
self._fitted_with_sw = True
sample_weight = self._finalize_sample_weight(sample_weight, y)
validation_data_provided = X_val is not None or y_val is not None
if validation_data_provided:
if y_val is None:
raise ValueError("X_val is provided, but y_val was not provided.")
if X_val is None:
raise ValueError("y_val is provided, but X_val was not provided.")
X_val = self._preprocess_X(X_val, reset=False)
y_val = _check_y(y_val, estimator=self)
y_val = self._encode_y_val(y_val)
check_consistent_length(X_val, y_val)
if sample_weight_val is not None:
sample_weight_val = _check_sample_weight(
sample_weight_val, X_val, dtype=np.float64
)
if self.early_stopping is False:
raise ValueError(
"X_val and y_val are passed to fit while at the same time "
"early_stopping is False. When passing X_val and y_val to fit,"
"early_stopping should be set to either 'auto' or True."
)
# Note: At this point, we could delete self._label_encoder if it exists.
# But we don't to keep the code even simpler.
rng = check_random_state(self.random_state)
# When warm starting, we want to reuse the same seed that was used
# the first time fit was called (e.g. train/val split).
# For feature subsampling, we want to continue with the rng we started with.
if not self.warm_start or not self._is_fitted():
self._random_seed = rng.randint(np.iinfo(np.uint32).max, dtype="u8")
feature_subsample_seed = rng.randint(np.iinfo(np.uint32).max, dtype="u8")
self._feature_subsample_rng = np.random.default_rng(feature_subsample_seed)
self._validate_parameters()
monotonic_cst = _check_monotonic_cst(self, self.monotonic_cst)
# _preprocess_X places the categorical features at the beginning,
# change the order of monotonic_cst accordingly
if self.is_categorical_ is not None:
monotonic_cst_remapped = np.concatenate(
(
monotonic_cst[self.is_categorical_],
monotonic_cst[~self.is_categorical_],
)
)
else:
monotonic_cst_remapped = monotonic_cst
# used for validation in predict
n_samples, self._n_features = X.shape
# Encode constraints into a list of sets of features indices (integers).
interaction_cst = self._check_interaction_cst(self._n_features)
# we need this stateful variable to tell raw_predict() that it was
# called from fit() (this current method), and that the data it has
# received is pre-binned.
# predicting is faster on pre-binned data, so we want early stopping
# predictions to be made on pre-binned data. Unfortunately the _scorer
# can only call predict() or predict_proba(), not raw_predict(), and
# there's no way to tell the scorer that it needs to predict binned
# data.
self._in_fit = True
# `_openmp_effective_n_threads` is used to take cgroups CPU quotes
# into account when determine the maximum number of threads to use.
n_threads = _openmp_effective_n_threads()
if isinstance(self.loss, str):
self._loss = self._get_loss(sample_weight=sample_weight)
elif isinstance(self.loss, BaseLoss):
self._loss = self.loss
if self.early_stopping == "auto":
self.do_early_stopping_ = n_samples > 10_000
else:
self.do_early_stopping_ = self.early_stopping
# create validation data if needed
self._use_validation_data = (
self.validation_fraction is not None or validation_data_provided
)
if (
self.do_early_stopping_
and self._use_validation_data
and not validation_data_provided
):
# stratify for classification
# instead of checking predict_proba, loss.n_classes >= 2 would also work
stratify = y if hasattr(self._loss, "predict_proba") else None
# Save the state of the RNG for the training and validation split.
# This is needed in order to have the same split when using
# warm starting.
if sample_weight is None:
X_train, X_val, y_train, y_val = train_test_split(
X,
y,
test_size=self.validation_fraction,
stratify=stratify,
random_state=self._random_seed,
)
sample_weight_train = sample_weight_val = None
else:
# TODO: incorporate sample_weight in sampling here, as well as
# stratify
(
X_train,
X_val,
y_train,
y_val,
sample_weight_train,
sample_weight_val,
) = train_test_split(
X,
y,
sample_weight,
test_size=self.validation_fraction,
stratify=stratify,
random_state=self._random_seed,
)
else:
X_train, y_train, sample_weight_train = X, y, sample_weight
if not validation_data_provided:
X_val = y_val = sample_weight_val = None
# Bin the data
# For ease of use of the API, the user-facing GBDT classes accept the
# parameter max_bins, which doesn't take into account the bin for
# missing values (which is always allocated). However, since max_bins
# isn't the true maximal number of bins, all other private classes
# (binmapper, histbuilder...) accept n_bins instead, which is the
# actual total number of bins. Everywhere in the code, the
# convention is that n_bins == max_bins + 1
n_bins = self.max_bins + 1 # + 1 for missing values
self._bin_mapper = _BinMapper(
n_bins=n_bins,
is_categorical=self._is_categorical_remapped,
known_categories=known_categories,
random_state=self._random_seed,
n_threads=n_threads,
)
X_binned_train = self._bin_data(X_train, is_training_data=True)
if X_val is not None:
X_binned_val = self._bin_data(X_val, is_training_data=False)
else:
X_binned_val = None
# Uses binned data to check for missing values
has_missing_values = (
(X_binned_train == self._bin_mapper.missing_values_bin_idx_)
.any(axis=0)
.astype(np.uint8)
)
if self.verbose:
print("Fitting gradient boosted rounds:")
n_samples = X_binned_train.shape[0]
scoring_is_predefined_string = self.scoring in _SCORERS
need_raw_predictions_val = X_binned_val is not None and (
scoring_is_predefined_string or self.scoring == "loss"
)
# First time calling fit, or no warm start
if not (self._is_fitted() and self.warm_start):
# Clear random state and score attributes
self._clear_state()
# initialize raw_predictions: those are the accumulated values
# predicted by the trees for the training data. raw_predictions has
# shape (n_samples, n_trees_per_iteration) where
# n_trees_per_iterations is n_classes in multiclass classification,
# else 1.
# self._baseline_prediction has shape (1, n_trees_per_iteration)
self._baseline_prediction = self._loss.fit_intercept_only(
y_true=y_train, sample_weight=sample_weight_train
).reshape((1, -1))
raw_predictions = np.zeros(
shape=(n_samples, self.n_trees_per_iteration_),
dtype=self._baseline_prediction.dtype,
order="F",
)
raw_predictions += self._baseline_prediction
# predictors is a matrix (list of lists) of TreePredictor objects
# with shape (n_iter_, n_trees_per_iteration)
self._predictors = predictors = []
# Initialize structures and attributes related to early stopping
self._scorer = None # set if scoring != loss
raw_predictions_val = None # set if use val and scoring is a string
self.train_score_ = []
self.validation_score_ = []
if self.do_early_stopping_:
# populate train_score and validation_score with the
# predictions of the initial model (before the first tree)
# Create raw_predictions_val for storing the raw predictions of
# the validation data.
if need_raw_predictions_val:
raw_predictions_val = np.zeros(
shape=(X_binned_val.shape[0], self.n_trees_per_iteration_),
dtype=self._baseline_prediction.dtype,
order="F",
)
raw_predictions_val += self._baseline_prediction
if self.scoring == "loss":
# we're going to compute scoring w.r.t the loss. As losses
# take raw predictions as input (unlike the scorers), we
# can optimize a bit and avoid repeating computing the
# predictions of the previous trees. We'll reuse
# raw_predictions (as it's needed for training anyway) for
# evaluating the training loss.
self._check_early_stopping_loss(
raw_predictions=raw_predictions,
y_train=y_train,
sample_weight_train=sample_weight_train,
raw_predictions_val=raw_predictions_val,
y_val=y_val,
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_hist_gradient_boosting/binning.py | sklearn/ensemble/_hist_gradient_boosting/binning.py | """
This module contains the BinMapper class.
BinMapper is used for mapping a real-valued dataset into integer-valued bins.
Bin thresholds are computed with the quantiles so that each bin contains
approximately the same number of samples.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.ensemble._hist_gradient_boosting._binning import _map_to_bins
from sklearn.ensemble._hist_gradient_boosting._bitset import set_bitset_memoryview
from sklearn.ensemble._hist_gradient_boosting.common import (
ALMOST_INF,
X_BINNED_DTYPE,
X_BITSET_INNER_DTYPE,
X_DTYPE,
)
from sklearn.utils import check_array, check_random_state
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import check_is_fitted
def _find_binning_thresholds(col_data, max_bins):
"""Extract quantiles from a continuous feature.
Missing values are ignored for finding the thresholds.
Parameters
----------
col_data : array-like, shape (n_samples,)
The continuous feature to bin.
max_bins: int
The maximum number of bins to use for non-missing values. If for a
given feature the number of unique values is less than ``max_bins``,
then those unique values will be used to compute the bin thresholds,
instead of the quantiles
Return
------
binning_thresholds : ndarray of shape(min(max_bins, n_unique_values) - 1,)
The increasing numeric values that can be used to separate the bins.
A given value x will be mapped into bin value i iff
bining_thresholds[i - 1] < x <= binning_thresholds[i]
"""
# ignore missing values when computing bin thresholds
missing_mask = np.isnan(col_data)
if missing_mask.any():
col_data = col_data[~missing_mask]
# The data will be sorted anyway in np.unique and again in percentile, so we do it
# here. Sorting also returns a contiguous array.
col_data = np.sort(col_data)
distinct_values = np.unique(col_data).astype(X_DTYPE)
if len(distinct_values) <= max_bins:
midpoints = distinct_values[:-1] + distinct_values[1:]
midpoints *= 0.5
else:
# We could compute approximate midpoint percentiles using the output of
# np.unique(col_data, return_counts) instead but this is more
# work and the performance benefit will be limited because we
# work on a fixed-size subsample of the full data.
percentiles = np.linspace(0, 100, num=max_bins + 1)
percentiles = percentiles[1:-1]
midpoints = np.percentile(col_data, percentiles, method="midpoint").astype(
X_DTYPE
)
assert midpoints.shape[0] == max_bins - 1
# We avoid having +inf thresholds: +inf thresholds are only allowed in
# a "split on nan" situation.
np.clip(midpoints, a_min=None, a_max=ALMOST_INF, out=midpoints)
return midpoints
class _BinMapper(TransformerMixin, BaseEstimator):
"""Transformer that maps a dataset into integer-valued bins.
For continuous features, the bins are created in a feature-wise fashion,
using quantiles so that each bins contains approximately the same number
of samples. For large datasets, quantiles are computed on a subset of the
data to speed-up the binning, but the quantiles should remain stable.
For categorical features, the raw categorical values are expected to be
in [0, 254] (this is not validated here though) and each category
corresponds to a bin. All categorical values must be known at
initialization: transform() doesn't know how to bin unknown categorical
values. Note that transform() is only used on non-training data in the
case of early stopping.
Features with a small number of values may be binned into less than
``n_bins`` bins. The last bin (at index ``n_bins - 1``) is always reserved
for missing values.
Parameters
----------
n_bins : int, default=256
The maximum number of bins to use (including the bin for missing
values). Should be in [3, 256]. Non-missing values are binned on
``max_bins = n_bins - 1`` bins. The last bin is always reserved for
missing values. If for a given feature the number of unique values is
less than ``max_bins``, then those unique values will be used to
compute the bin thresholds, instead of the quantiles. For categorical
features indicated by ``is_categorical``, the docstring for
``is_categorical`` details on this procedure.
subsample : int or None, default=2e5
If ``n_samples > subsample``, then ``sub_samples`` samples will be
randomly chosen to compute the quantiles. If ``None``, the whole data
is used.
is_categorical : ndarray of bool of shape (n_features,), default=None
Indicates categorical features. By default, all features are
considered continuous.
known_categories : list of {ndarray, None} of shape (n_features,), \
default=none
For each categorical feature, the array indicates the set of unique
categorical values. These should be the possible values over all the
data, not just the training data. For continuous features, the
corresponding entry should be None.
random_state: int, RandomState instance or None, default=None
Pseudo-random number generator to control the random sub-sampling.
Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
n_threads : int, default=None
Number of OpenMP threads to use. `_openmp_effective_n_threads` is called
to determine the effective number of threads use, which takes cgroups CPU
quotes into account. See the docstring of `_openmp_effective_n_threads`
for details.
Attributes
----------
bin_thresholds_ : list of ndarray
For each feature, each array indicates how to map a feature into a
binned feature. The semantic and size depends on the nature of the
feature:
- for real-valued features, the array corresponds to the real-valued
bin thresholds (the upper bound of each bin). There are ``max_bins
- 1`` thresholds, where ``max_bins = n_bins - 1`` is the number of
bins used for non-missing values.
- for categorical features, the array is a map from a binned category
value to the raw category value. The size of the array is equal to
``min(max_bins, category_cardinality)`` where we ignore missing
values in the cardinality.
n_bins_non_missing_ : ndarray, dtype=np.uint32
For each feature, gives the number of bins actually used for
non-missing values. For features with a lot of unique values, this is
equal to ``n_bins - 1``.
is_categorical_ : ndarray of shape (n_features,), dtype=np.uint8
Indicator for categorical features.
missing_values_bin_idx_ : np.uint8
The index of the bin where missing values are mapped. This is a
constant across all features. This corresponds to the last bin, and
it is always equal to ``n_bins - 1``. Note that if ``n_bins_non_missing_``
is less than ``n_bins - 1`` for a given feature, then there are
empty (and unused) bins.
"""
def __init__(
self,
n_bins=256,
subsample=int(2e5),
is_categorical=None,
known_categories=None,
random_state=None,
n_threads=None,
):
self.n_bins = n_bins
self.subsample = subsample
self.is_categorical = is_categorical
self.known_categories = known_categories
self.random_state = random_state
self.n_threads = n_threads
def fit(self, X, y=None):
"""Fit data X by computing the binning thresholds.
The last bin is reserved for missing values, whether missing values
are present in the data or not.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to bin.
y: None
Ignored.
Returns
-------
self : object
"""
if not (3 <= self.n_bins <= 256):
# min is 3: at least 2 distinct bins and a missing values bin
raise ValueError(
"n_bins={} should be no smaller than 3 and no larger than 256.".format(
self.n_bins
)
)
X = check_array(X, dtype=[X_DTYPE], ensure_all_finite=False)
max_bins = self.n_bins - 1
rng = check_random_state(self.random_state)
if self.subsample is not None and X.shape[0] > self.subsample:
subset = rng.choice(X.shape[0], self.subsample, replace=False)
X = X.take(subset, axis=0)
if self.is_categorical is None:
self.is_categorical_ = np.zeros(X.shape[1], dtype=np.uint8)
else:
self.is_categorical_ = np.asarray(self.is_categorical, dtype=np.uint8)
n_features = X.shape[1]
known_categories = self.known_categories
if known_categories is None:
known_categories = [None] * n_features
# validate is_categorical and known_categories parameters
for f_idx in range(n_features):
is_categorical = self.is_categorical_[f_idx]
known_cats = known_categories[f_idx]
if is_categorical and known_cats is None:
raise ValueError(
f"Known categories for feature {f_idx} must be provided."
)
if not is_categorical and known_cats is not None:
raise ValueError(
f"Feature {f_idx} isn't marked as a categorical feature, "
"but categories were passed."
)
self.missing_values_bin_idx_ = self.n_bins - 1
self.bin_thresholds_ = [None] * n_features
n_bins_non_missing = [None] * n_features
non_cat_thresholds = Parallel(n_jobs=self.n_threads, backend="threading")(
delayed(_find_binning_thresholds)(X[:, f_idx], max_bins)
for f_idx in range(n_features)
if not self.is_categorical_[f_idx]
)
non_cat_idx = 0
for f_idx in range(n_features):
if self.is_categorical_[f_idx]:
# Since categories are assumed to be encoded in
# [0, n_cats] and since n_cats <= max_bins,
# the thresholds *are* the unique categorical values. This will
# lead to the correct mapping in transform()
thresholds = known_categories[f_idx]
n_bins_non_missing[f_idx] = thresholds.shape[0]
self.bin_thresholds_[f_idx] = thresholds
else:
self.bin_thresholds_[f_idx] = non_cat_thresholds[non_cat_idx]
n_bins_non_missing[f_idx] = self.bin_thresholds_[f_idx].shape[0] + 1
non_cat_idx += 1
self.n_bins_non_missing_ = np.array(n_bins_non_missing, dtype=np.uint32)
return self
def transform(self, X):
"""Bin data X.
Missing values will be mapped to the last bin.
For categorical features, the mapping will be incorrect for unknown
categories. Since the BinMapper is given known_categories of the
entire training data (i.e. before the call to train_test_split() in
case of early-stopping), this never happens.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data to bin.
Returns
-------
X_binned : array-like of shape (n_samples, n_features)
The binned data (fortran-aligned).
"""
X = check_array(X, dtype=[X_DTYPE], ensure_all_finite=False)
check_is_fitted(self)
if X.shape[1] != self.n_bins_non_missing_.shape[0]:
raise ValueError(
"This estimator was fitted with {} features but {} got passed "
"to transform()".format(self.n_bins_non_missing_.shape[0], X.shape[1])
)
n_threads = _openmp_effective_n_threads(self.n_threads)
binned = np.zeros_like(X, dtype=X_BINNED_DTYPE, order="F")
_map_to_bins(
X,
self.bin_thresholds_,
self.is_categorical_,
self.missing_values_bin_idx_,
n_threads,
binned,
)
return binned
def make_known_categories_bitsets(self):
"""Create bitsets of known categories.
Returns
-------
- known_cat_bitsets : ndarray of shape (n_categorical_features, 8)
Array of bitsets of known categories, for each categorical feature.
- f_idx_map : ndarray of shape (n_features,)
Map from original feature index to the corresponding index in the
known_cat_bitsets array.
"""
categorical_features_indices = np.flatnonzero(self.is_categorical_)
n_features = self.is_categorical_.size
n_categorical_features = categorical_features_indices.size
f_idx_map = np.zeros(n_features, dtype=np.uint32)
f_idx_map[categorical_features_indices] = np.arange(
n_categorical_features, dtype=np.uint32
)
known_categories = self.bin_thresholds_
known_cat_bitsets = np.zeros(
(n_categorical_features, 8), dtype=X_BITSET_INNER_DTYPE
)
# TODO: complexity is O(n_categorical_features * 255). Maybe this is
# worth cythonizing
for mapped_f_idx, f_idx in enumerate(categorical_features_indices):
for raw_cat_val in known_categories[f_idx]:
set_bitset_memoryview(known_cat_bitsets[mapped_f_idx], raw_cat_val)
return known_cat_bitsets, f_idx_map
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_hist_gradient_boosting/tests/test_warm_start.py | sklearn/ensemble/_hist_gradient_boosting/tests/test_warm_start.py | import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from sklearn.base import clone
from sklearn.datasets import make_classification, make_regression
from sklearn.ensemble import (
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
)
from sklearn.metrics import check_scoring
X_classification, y_classification = make_classification(random_state=0)
X_regression, y_regression = make_regression(random_state=0)
def _assert_predictor_equal(gb_1, gb_2, X):
"""Assert that two HistGBM instances are identical."""
# Check identical nodes for each tree
for pred_ith_1, pred_ith_2 in zip(gb_1._predictors, gb_2._predictors):
for predictor_1, predictor_2 in zip(pred_ith_1, pred_ith_2):
assert_array_equal(predictor_1.nodes, predictor_2.nodes)
# Check identical predictions
assert_allclose(gb_1.predict(X), gb_2.predict(X))
@pytest.mark.parametrize(
"GradientBoosting, X, y",
[
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression),
],
)
def test_max_iter_with_warm_start_validation(GradientBoosting, X, y):
# Check that a ValueError is raised when the maximum number of iterations
# is smaller than the number of iterations from the previous fit when warm
# start is True.
estimator = GradientBoosting(max_iter=10, early_stopping=False, warm_start=True)
estimator.fit(X, y)
estimator.set_params(max_iter=5)
err_msg = (
"max_iter=5 must be larger than or equal to n_iter_=10 when warm_start==True"
)
with pytest.raises(ValueError, match=err_msg):
estimator.fit(X, y)
@pytest.mark.parametrize(
"GradientBoosting, X, y",
[
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression),
],
)
def test_warm_start_yields_identical_results(GradientBoosting, X, y):
# Make sure that fitting 50 iterations and then 25 with warm start is
# equivalent to fitting 75 iterations.
rng = 42
gb_warm_start = GradientBoosting(
n_iter_no_change=100, max_iter=50, random_state=rng, warm_start=True
)
gb_warm_start.fit(X, y).set_params(max_iter=75).fit(X, y)
gb_no_warm_start = GradientBoosting(
n_iter_no_change=100, max_iter=75, random_state=rng, warm_start=False
)
gb_no_warm_start.fit(X, y)
# Check that both predictors are equal
_assert_predictor_equal(gb_warm_start, gb_no_warm_start, X)
@pytest.mark.parametrize(
"GradientBoosting, X, y",
[
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression),
],
)
def test_warm_start_max_depth(GradientBoosting, X, y):
# Test if possible to fit trees of different depth in ensemble.
gb = GradientBoosting(
max_iter=20,
min_samples_leaf=1,
warm_start=True,
max_depth=2,
early_stopping=False,
)
gb.fit(X, y)
gb.set_params(max_iter=30, max_depth=3, n_iter_no_change=110)
gb.fit(X, y)
# First 20 trees have max_depth == 2
for i in range(20):
assert gb._predictors[i][0].get_max_depth() == 2
# Last 10 trees have max_depth == 3
for i in range(1, 11):
assert gb._predictors[-i][0].get_max_depth() == 3
@pytest.mark.parametrize(
"GradientBoosting, X, y",
[
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression),
],
)
@pytest.mark.parametrize("scoring", (None, "loss"))
def test_warm_start_early_stopping(GradientBoosting, X, y, scoring):
# Make sure that early stopping occurs after a small number of iterations
# when fitting a second time with warm starting.
n_iter_no_change = 5
gb = GradientBoosting(
n_iter_no_change=n_iter_no_change,
max_iter=10000,
early_stopping=True,
random_state=42,
warm_start=True,
tol=1e-3,
scoring=scoring,
)
gb.fit(X, y)
n_iter_first_fit = gb.n_iter_
gb.fit(X, y)
n_iter_second_fit = gb.n_iter_
assert 0 < n_iter_second_fit - n_iter_first_fit < n_iter_no_change
@pytest.mark.parametrize(
"GradientBoosting, X, y",
[
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression),
],
)
def test_warm_start_equal_n_estimators(GradientBoosting, X, y):
# Test if warm start with equal n_estimators does nothing
gb_1 = GradientBoosting(max_depth=2, early_stopping=False)
gb_1.fit(X, y)
gb_2 = clone(gb_1)
gb_2.set_params(max_iter=gb_1.max_iter, warm_start=True, n_iter_no_change=5)
gb_2.fit(X, y)
# Check that both predictors are equal
_assert_predictor_equal(gb_1, gb_2, X)
@pytest.mark.parametrize(
"GradientBoosting, X, y",
[
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression),
],
)
def test_warm_start_clear(GradientBoosting, X, y):
# Test if fit clears state.
gb_1 = GradientBoosting(n_iter_no_change=5, random_state=42)
gb_1.fit(X, y)
gb_2 = GradientBoosting(n_iter_no_change=5, random_state=42, warm_start=True)
gb_2.fit(X, y) # inits state
gb_2.set_params(warm_start=False)
gb_2.fit(X, y) # clears old state and equals est
# Check that both predictors have the same train_score_ and
# validation_score_ attributes
assert_allclose(gb_1.train_score_, gb_2.train_score_)
assert_allclose(gb_1.validation_score_, gb_2.validation_score_)
# Check that both predictors are equal
_assert_predictor_equal(gb_1, gb_2, X)
@pytest.mark.parametrize(
"GradientBoosting, X, y",
[
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression),
],
)
@pytest.mark.parametrize("rng_type", ("none", "int", "instance"))
def test_random_seeds_warm_start(GradientBoosting, X, y, rng_type):
# Make sure the seeds for train/val split and small trainset subsampling
# are correctly set in a warm start context.
def _get_rng(rng_type):
# Helper to avoid consuming rngs
if rng_type == "none":
return None
elif rng_type == "int":
return 42
else:
return np.random.RandomState(0)
random_state = _get_rng(rng_type)
gb_1 = GradientBoosting(early_stopping=True, max_iter=2, random_state=random_state)
gb_1.set_params(scoring=check_scoring(gb_1))
gb_1.fit(X, y)
random_seed_1_1 = gb_1._random_seed
gb_1.fit(X, y)
random_seed_1_2 = gb_1._random_seed # clear the old state, different seed
random_state = _get_rng(rng_type)
gb_2 = GradientBoosting(
early_stopping=True, max_iter=2, random_state=random_state, warm_start=True
)
gb_2.set_params(scoring=check_scoring(gb_2))
gb_2.fit(X, y) # inits state
random_seed_2_1 = gb_2._random_seed
gb_2.fit(X, y) # clears old state and equals est
random_seed_2_2 = gb_2._random_seed
# Without warm starting, the seeds should be
# * all different if random state is None
# * all equal if random state is an integer
# * different when refitting and equal with a new estimator (because
# the random state is mutated)
if rng_type == "none":
assert random_seed_1_1 != random_seed_1_2 != random_seed_2_1
elif rng_type == "int":
assert random_seed_1_1 == random_seed_1_2 == random_seed_2_1
else:
assert random_seed_1_1 == random_seed_2_1 != random_seed_1_2
# With warm starting, the seeds must be equal
assert random_seed_2_1 == random_seed_2_2
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_hist_gradient_boosting/tests/test_grower.py | sklearn/ensemble/_hist_gradient_boosting/tests/test_grower.py | import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from pytest import approx
from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
from sklearn.ensemble._hist_gradient_boosting.common import (
G_H_DTYPE,
X_BINNED_DTYPE,
X_BITSET_INNER_DTYPE,
X_DTYPE,
Y_DTYPE,
)
from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
n_threads = _openmp_effective_n_threads()
def _make_training_data(n_bins=256, constant_hessian=True):
rng = np.random.RandomState(42)
n_samples = 10000
# Generate some test data directly binned so as to test the grower code
# independently of the binning logic.
X_binned = rng.randint(0, n_bins - 1, size=(n_samples, 2), dtype=X_BINNED_DTYPE)
X_binned = np.asfortranarray(X_binned)
def true_decision_function(input_features):
"""Ground truth decision function
This is a very simple yet asymmetric decision tree. Therefore the
grower code should have no trouble recovering the decision function
from 10000 training samples.
"""
if input_features[0] <= n_bins // 2:
return -1
else:
return -1 if input_features[1] <= n_bins // 3 else 1
target = np.array([true_decision_function(x) for x in X_binned], dtype=Y_DTYPE)
# Assume a square loss applied to an initial model that always predicts 0
# (hardcoded for this test):
all_gradients = target.astype(G_H_DTYPE)
shape_hessians = 1 if constant_hessian else all_gradients.shape
all_hessians = np.ones(shape=shape_hessians, dtype=G_H_DTYPE)
return X_binned, all_gradients, all_hessians
def _check_children_consistency(parent, left, right):
# Make sure the samples are correctly dispatched from a parent to its
# children
assert parent.left_child is left
assert parent.right_child is right
# each sample from the parent is propagated to one of the two children
assert len(left.sample_indices) + len(right.sample_indices) == len(
parent.sample_indices
)
assert set(left.sample_indices).union(set(right.sample_indices)) == set(
parent.sample_indices
)
# samples are sent either to the left or the right node, never to both
assert set(left.sample_indices).intersection(set(right.sample_indices)) == set()
@pytest.mark.parametrize(
"n_bins, constant_hessian, stopping_param, shrinkage",
[
(11, True, "min_gain_to_split", 0.5),
(11, False, "min_gain_to_split", 1.0),
(11, True, "max_leaf_nodes", 1.0),
(11, False, "max_leaf_nodes", 0.1),
(42, True, "max_leaf_nodes", 0.01),
(42, False, "max_leaf_nodes", 1.0),
(256, True, "min_gain_to_split", 1.0),
(256, True, "max_leaf_nodes", 0.1),
],
)
def test_grow_tree(n_bins, constant_hessian, stopping_param, shrinkage):
X_binned, all_gradients, all_hessians = _make_training_data(
n_bins=n_bins, constant_hessian=constant_hessian
)
n_samples = X_binned.shape[0]
if stopping_param == "max_leaf_nodes":
stopping_param = {"max_leaf_nodes": 3}
else:
stopping_param = {"min_gain_to_split": 0.01}
grower = TreeGrower(
X_binned,
all_gradients,
all_hessians,
n_bins=n_bins,
shrinkage=shrinkage,
min_samples_leaf=1,
**stopping_param,
)
# The root node is not yet split, but the best possible split has
# already been evaluated:
assert grower.root.left_child is None
assert grower.root.right_child is None
root_split = grower.root.split_info
assert root_split.feature_idx == 0
assert root_split.bin_idx == n_bins // 2
assert len(grower.splittable_nodes) == 1
# Calling split next applies the next split and computes the best split
# for each of the two newly introduced children nodes.
left_node, right_node = grower.split_next()
# All training samples have ben split in the two nodes, approximately
# 50%/50%
_check_children_consistency(grower.root, left_node, right_node)
assert len(left_node.sample_indices) > 0.4 * n_samples
assert len(left_node.sample_indices) < 0.6 * n_samples
if grower.min_gain_to_split > 0:
# The left node is too pure: there is no gain to split it further.
assert left_node.split_info.gain < grower.min_gain_to_split
assert left_node in grower.finalized_leaves
# The right node can still be split further, this time on feature #1
split_info = right_node.split_info
assert split_info.gain > 1.0
assert split_info.feature_idx == 1
assert split_info.bin_idx == n_bins // 3
assert right_node.left_child is None
assert right_node.right_child is None
# The right split has not been applied yet. Let's do it now:
assert len(grower.splittable_nodes) == 1
right_left_node, right_right_node = grower.split_next()
_check_children_consistency(right_node, right_left_node, right_right_node)
assert len(right_left_node.sample_indices) > 0.1 * n_samples
assert len(right_left_node.sample_indices) < 0.2 * n_samples
assert len(right_right_node.sample_indices) > 0.2 * n_samples
assert len(right_right_node.sample_indices) < 0.4 * n_samples
# All the leafs are pure, it is not possible to split any further:
assert not grower.splittable_nodes
grower._apply_shrinkage()
# Check the values of the leaves:
assert grower.root.left_child.value == approx(shrinkage)
assert grower.root.right_child.left_child.value == approx(shrinkage)
assert grower.root.right_child.right_child.value == approx(-shrinkage, rel=1e-3)
def test_predictor_from_grower():
# Build a tree on the toy 3-leaf dataset to extract the predictor.
n_bins = 256
X_binned, all_gradients, all_hessians = _make_training_data(n_bins=n_bins)
grower = TreeGrower(
X_binned,
all_gradients,
all_hessians,
n_bins=n_bins,
shrinkage=1.0,
max_leaf_nodes=3,
min_samples_leaf=5,
)
grower.grow()
assert grower.n_nodes == 5 # (2 decision nodes + 3 leaves)
# Check that the node structure can be converted into a predictor
# object to perform predictions at scale
# We pass undefined binning_thresholds because we won't use predict anyway
predictor = grower.make_predictor(
binning_thresholds=np.zeros((X_binned.shape[1], n_bins))
)
assert predictor.nodes.shape[0] == 5
assert predictor.nodes["is_leaf"].sum() == 3
# Probe some predictions for each leaf of the tree
# each group of 3 samples corresponds to a condition in _make_training_data
input_data = np.array(
[
[0, 0],
[42, 99],
[128, 254],
[129, 0],
[129, 85],
[254, 85],
[129, 86],
[129, 254],
[242, 100],
],
dtype=np.uint8,
)
missing_values_bin_idx = n_bins - 1
predictions = predictor.predict_binned(
input_data, missing_values_bin_idx, n_threads
)
expected_targets = [1, 1, 1, 1, 1, 1, -1, -1, -1]
assert np.allclose(predictions, expected_targets)
# Check that training set can be recovered exactly:
predictions = predictor.predict_binned(X_binned, missing_values_bin_idx, n_threads)
assert np.allclose(predictions, -all_gradients)
@pytest.mark.parametrize(
"n_samples, min_samples_leaf, n_bins, constant_hessian, noise",
[
(11, 10, 7, True, 0),
(13, 10, 42, False, 0),
(56, 10, 255, True, 0.1),
(101, 3, 7, True, 0),
(200, 42, 42, False, 0),
(300, 55, 255, True, 0.1),
(300, 301, 255, True, 0.1),
],
)
def test_min_samples_leaf(n_samples, min_samples_leaf, n_bins, constant_hessian, noise):
rng = np.random.RandomState(seed=0)
# data = linear target, 3 features, 1 irrelevant.
X = rng.normal(size=(n_samples, 3))
y = X[:, 0] - X[:, 1]
if noise:
y_scale = y.std()
y += rng.normal(scale=noise, size=n_samples) * y_scale
mapper = _BinMapper(n_bins=n_bins)
X = mapper.fit_transform(X)
all_gradients = y.astype(G_H_DTYPE)
shape_hessian = 1 if constant_hessian else all_gradients.shape
all_hessians = np.ones(shape=shape_hessian, dtype=G_H_DTYPE)
grower = TreeGrower(
X,
all_gradients,
all_hessians,
n_bins=n_bins,
shrinkage=1.0,
min_samples_leaf=min_samples_leaf,
max_leaf_nodes=n_samples,
)
grower.grow()
predictor = grower.make_predictor(binning_thresholds=mapper.bin_thresholds_)
if n_samples >= min_samples_leaf:
for node in predictor.nodes:
if node["is_leaf"]:
assert node["count"] >= min_samples_leaf
else:
assert predictor.nodes.shape[0] == 1
assert predictor.nodes[0]["is_leaf"]
assert predictor.nodes[0]["count"] == n_samples
@pytest.mark.parametrize("n_samples, min_samples_leaf", [(99, 50), (100, 50)])
def test_min_samples_leaf_root(n_samples, min_samples_leaf):
# Make sure root node isn't split if n_samples is not at least twice
# min_samples_leaf
rng = np.random.RandomState(seed=0)
n_bins = 256
# data = linear target, 3 features, 1 irrelevant.
X = rng.normal(size=(n_samples, 3))
y = X[:, 0] - X[:, 1]
mapper = _BinMapper(n_bins=n_bins)
X = mapper.fit_transform(X)
all_gradients = y.astype(G_H_DTYPE)
all_hessians = np.ones(shape=1, dtype=G_H_DTYPE)
grower = TreeGrower(
X,
all_gradients,
all_hessians,
n_bins=n_bins,
shrinkage=1.0,
min_samples_leaf=min_samples_leaf,
max_leaf_nodes=n_samples,
)
grower.grow()
if n_samples >= min_samples_leaf * 2:
assert len(grower.finalized_leaves) >= 2
else:
assert len(grower.finalized_leaves) == 1
def assert_is_stump(grower):
# To assert that stumps are created when max_depth=1
for leaf in (grower.root.left_child, grower.root.right_child):
assert leaf.left_child is None
assert leaf.right_child is None
@pytest.mark.parametrize("max_depth", [1, 2, 3])
def test_max_depth(max_depth):
# Make sure max_depth parameter works as expected
rng = np.random.RandomState(seed=0)
n_bins = 256
n_samples = 1000
# data = linear target, 3 features, 1 irrelevant.
X = rng.normal(size=(n_samples, 3))
y = X[:, 0] - X[:, 1]
mapper = _BinMapper(n_bins=n_bins)
X = mapper.fit_transform(X)
all_gradients = y.astype(G_H_DTYPE)
all_hessians = np.ones(shape=1, dtype=G_H_DTYPE)
grower = TreeGrower(X, all_gradients, all_hessians, max_depth=max_depth)
grower.grow()
depth = max(leaf.depth for leaf in grower.finalized_leaves)
assert depth == max_depth
if max_depth == 1:
assert_is_stump(grower)
def test_input_validation():
X_binned, all_gradients, all_hessians = _make_training_data()
X_binned_float = X_binned.astype(np.float32)
with pytest.raises(NotImplementedError, match="X_binned must be of type uint8"):
TreeGrower(X_binned_float, all_gradients, all_hessians)
X_binned_C_array = np.ascontiguousarray(X_binned)
with pytest.raises(
ValueError, match="X_binned should be passed as Fortran contiguous array"
):
TreeGrower(X_binned_C_array, all_gradients, all_hessians)
def test_init_parameters_validation():
X_binned, all_gradients, all_hessians = _make_training_data()
with pytest.raises(ValueError, match="min_gain_to_split=-1 must be positive"):
TreeGrower(X_binned, all_gradients, all_hessians, min_gain_to_split=-1)
with pytest.raises(ValueError, match="min_hessian_to_split=-1 must be positive"):
TreeGrower(X_binned, all_gradients, all_hessians, min_hessian_to_split=-1)
def test_missing_value_predict_only():
# Make sure that missing values are supported at predict time even if they
# were not encountered in the training data: the missing values are
# assigned to whichever child has the most samples.
rng = np.random.RandomState(0)
n_samples = 100
X_binned = rng.randint(0, 256, size=(n_samples, 1), dtype=np.uint8)
X_binned = np.asfortranarray(X_binned)
gradients = rng.normal(size=n_samples).astype(G_H_DTYPE)
hessians = np.ones(shape=1, dtype=G_H_DTYPE)
grower = TreeGrower(
X_binned, gradients, hessians, min_samples_leaf=5, has_missing_values=False
)
grower.grow()
# We pass undefined binning_thresholds because we won't use predict anyway
predictor = grower.make_predictor(
binning_thresholds=np.zeros((X_binned.shape[1], X_binned.max() + 1))
)
# go from root to a leaf, always following node with the most samples.
# That's the path nans are supposed to take
node = predictor.nodes[0]
while not node["is_leaf"]:
left = predictor.nodes[node["left"]]
right = predictor.nodes[node["right"]]
node = left if left["count"] > right["count"] else right
prediction_main_path = node["value"]
# now build X_test with only nans, and make sure all predictions are equal
# to prediction_main_path
all_nans = np.full(shape=(n_samples, 1), fill_value=np.nan)
known_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
f_idx_map = np.zeros(0, dtype=np.uint32)
y_pred = predictor.predict(all_nans, known_cat_bitsets, f_idx_map, n_threads)
assert np.all(y_pred == prediction_main_path)
def test_split_on_nan_with_infinite_values():
# Make sure the split on nan situations are respected even when there are
# samples with +inf values (we set the threshold to +inf when we have a
# split on nan so this test makes sure this does not introduce edge-case
# bugs). We need to use the private API so that we can also test
# predict_binned().
X = np.array([0, 1, np.inf, np.nan, np.nan]).reshape(-1, 1)
# the gradient values will force a split on nan situation
gradients = np.array([0, 0, 0, 100, 100], dtype=G_H_DTYPE)
hessians = np.ones(shape=1, dtype=G_H_DTYPE)
bin_mapper = _BinMapper()
X_binned = bin_mapper.fit_transform(X)
n_bins_non_missing = 3
has_missing_values = True
grower = TreeGrower(
X_binned,
gradients,
hessians,
n_bins_non_missing=n_bins_non_missing,
has_missing_values=has_missing_values,
min_samples_leaf=1,
n_threads=n_threads,
)
grower.grow()
predictor = grower.make_predictor(binning_thresholds=bin_mapper.bin_thresholds_)
# sanity check: this was a split on nan
assert predictor.nodes[0]["num_threshold"] == np.inf
assert predictor.nodes[0]["bin_threshold"] == n_bins_non_missing - 1
known_cat_bitsets, f_idx_map = bin_mapper.make_known_categories_bitsets()
# Make sure in particular that the +inf sample is mapped to the left child
# Note that lightgbm "fails" here and will assign the inf sample to the
# right child, even though it's a "split on nan" situation.
predictions = predictor.predict(X, known_cat_bitsets, f_idx_map, n_threads)
predictions_binned = predictor.predict_binned(
X_binned,
missing_values_bin_idx=bin_mapper.missing_values_bin_idx_,
n_threads=n_threads,
)
np.testing.assert_allclose(predictions, -gradients)
np.testing.assert_allclose(predictions_binned, -gradients)
def test_grow_tree_categories():
# Check that the grower produces the right predictor tree when a split is
# categorical
X_binned = np.array([[0, 1] * 11 + [1]], dtype=X_BINNED_DTYPE).T
X_binned = np.asfortranarray(X_binned)
all_gradients = np.array([10, 1] * 11 + [1], dtype=G_H_DTYPE)
all_hessians = np.ones(1, dtype=G_H_DTYPE)
is_categorical = np.ones(1, dtype=np.uint8)
grower = TreeGrower(
X_binned,
all_gradients,
all_hessians,
n_bins=4,
shrinkage=1.0,
min_samples_leaf=1,
is_categorical=is_categorical,
n_threads=n_threads,
)
grower.grow()
assert grower.n_nodes == 3
categories = [np.array([4, 9], dtype=X_DTYPE)]
predictor = grower.make_predictor(binning_thresholds=categories)
root = predictor.nodes[0]
assert root["count"] == 23
assert root["depth"] == 0
assert root["is_categorical"]
left, right = predictor.nodes[root["left"]], predictor.nodes[root["right"]]
# arbitrary validation, but this means ones go to the left.
assert left["count"] >= right["count"]
# check binned category value (1)
expected_binned_cat_bitset = [2**1] + [0] * 7
binned_cat_bitset = predictor.binned_left_cat_bitsets
assert_array_equal(binned_cat_bitset[0], expected_binned_cat_bitset)
# check raw category value (9)
expected_raw_cat_bitsets = [2**9] + [0] * 7
raw_cat_bitsets = predictor.raw_left_cat_bitsets
assert_array_equal(raw_cat_bitsets[0], expected_raw_cat_bitsets)
# Note that since there was no missing values during training, the missing
# values aren't part of the bitsets. However, we expect the missing values
# to go to the biggest child (i.e. the left one).
# The left child has a value of -1 = negative gradient.
assert root["missing_go_to_left"]
# make sure binned missing values are mapped to the left child during
# prediction
prediction_binned = predictor.predict_binned(
np.asarray([[6]]).astype(X_BINNED_DTYPE),
missing_values_bin_idx=6,
n_threads=n_threads,
)
assert_allclose(prediction_binned, [-1]) # negative gradient
# make sure raw missing values are mapped to the left child during
# prediction
known_cat_bitsets = np.zeros((1, 8), dtype=np.uint32) # ignored anyway
f_idx_map = np.array([0], dtype=np.uint32)
prediction = predictor.predict(
np.array([[np.nan]]), known_cat_bitsets, f_idx_map, n_threads
)
assert_allclose(prediction, [-1])
@pytest.mark.parametrize("min_samples_leaf", (1, 20))
@pytest.mark.parametrize("n_unique_categories", (2, 10, 100))
@pytest.mark.parametrize("target", ("binary", "random", "equal"))
def test_ohe_equivalence(min_samples_leaf, n_unique_categories, target):
# Make sure that native categorical splits are equivalent to using a OHE,
# when given enough depth
rng = np.random.RandomState(0)
n_samples = 10_000
X_binned = rng.randint(0, n_unique_categories, size=(n_samples, 1), dtype=np.uint8)
X_ohe = OneHotEncoder(sparse_output=False).fit_transform(X_binned)
X_ohe = np.asfortranarray(X_ohe).astype(np.uint8)
if target == "equal":
gradients = X_binned.reshape(-1)
elif target == "binary":
gradients = (X_binned % 2).reshape(-1)
else:
gradients = rng.randn(n_samples)
gradients = gradients.astype(G_H_DTYPE)
hessians = np.ones(shape=1, dtype=G_H_DTYPE)
grower_params = {
"min_samples_leaf": min_samples_leaf,
"max_depth": None,
"max_leaf_nodes": None,
}
grower = TreeGrower(
X_binned, gradients, hessians, is_categorical=[True], **grower_params
)
grower.grow()
# we pass undefined bin_thresholds because we won't use predict()
predictor = grower.make_predictor(
binning_thresholds=np.zeros((1, n_unique_categories))
)
preds = predictor.predict_binned(
X_binned, missing_values_bin_idx=255, n_threads=n_threads
)
grower_ohe = TreeGrower(X_ohe, gradients, hessians, **grower_params)
grower_ohe.grow()
predictor_ohe = grower_ohe.make_predictor(
binning_thresholds=np.zeros((X_ohe.shape[1], n_unique_categories))
)
preds_ohe = predictor_ohe.predict_binned(
X_ohe, missing_values_bin_idx=255, n_threads=n_threads
)
assert predictor.get_max_depth() <= predictor_ohe.get_max_depth()
if target == "binary" and n_unique_categories > 2:
# OHE needs more splits to achieve the same predictions
assert predictor.get_max_depth() < predictor_ohe.get_max_depth()
np.testing.assert_allclose(preds, preds_ohe)
def test_grower_interaction_constraints():
"""Check that grower respects interaction constraints."""
n_features = 6
interaction_cst = [{0, 1}, {1, 2}, {3, 4, 5}]
n_samples = 10
n_bins = 6
root_feature_splits = []
def get_all_children(node):
res = []
if node.is_leaf:
return res
for n in [node.left_child, node.right_child]:
res.append(n)
res.extend(get_all_children(n))
return res
for seed in range(20):
rng = np.random.RandomState(seed)
X_binned = rng.randint(
0, n_bins - 1, size=(n_samples, n_features), dtype=X_BINNED_DTYPE
)
X_binned = np.asfortranarray(X_binned)
gradients = rng.normal(size=n_samples).astype(G_H_DTYPE)
hessians = np.ones(shape=1, dtype=G_H_DTYPE)
grower = TreeGrower(
X_binned,
gradients,
hessians,
n_bins=n_bins,
min_samples_leaf=1,
interaction_cst=interaction_cst,
n_threads=n_threads,
)
grower.grow()
root_feature_idx = grower.root.split_info.feature_idx
root_feature_splits.append(root_feature_idx)
feature_idx_to_constraint_set = {
0: {0, 1},
1: {0, 1, 2},
2: {1, 2},
3: {3, 4, 5},
4: {3, 4, 5},
5: {3, 4, 5},
}
root_constraint_set = feature_idx_to_constraint_set[root_feature_idx]
for node in (grower.root.left_child, grower.root.right_child):
# Root's children's allowed_features must be the root's constraints set.
assert_array_equal(node.allowed_features, list(root_constraint_set))
for node in get_all_children(grower.root):
if node.is_leaf:
continue
# Ensure that each node uses a subset of features of its parent node.
parent_interaction_cst_indices = set(node.interaction_cst_indices)
right_interactions_cst_indices = set(
node.right_child.interaction_cst_indices
)
left_interactions_cst_indices = set(node.left_child.interaction_cst_indices)
assert right_interactions_cst_indices.issubset(
parent_interaction_cst_indices
)
assert left_interactions_cst_indices.issubset(
parent_interaction_cst_indices
)
# The features used for split must have been present in the root's
# constraint set.
assert node.split_info.feature_idx in root_constraint_set
# Make sure that every feature is used at least once as split for the root node.
assert (
len(set(root_feature_splits))
== len(set().union(*interaction_cst))
== n_features
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_hist_gradient_boosting/tests/test_predictor.py | sklearn/ensemble/_hist_gradient_boosting/tests/test_predictor.py | import numpy as np
import pytest
from numpy.testing import assert_allclose
from sklearn.datasets import make_regression
from sklearn.ensemble._hist_gradient_boosting._bitset import (
set_bitset_memoryview,
set_raw_bitset_from_binned_bitset,
)
from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
from sklearn.ensemble._hist_gradient_boosting.common import (
ALMOST_INF,
G_H_DTYPE,
PREDICTOR_RECORD_DTYPE,
X_BINNED_DTYPE,
X_BITSET_INNER_DTYPE,
X_DTYPE,
)
from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
from sklearn.ensemble._hist_gradient_boosting.predictor import TreePredictor
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
n_threads = _openmp_effective_n_threads()
@pytest.mark.parametrize("n_bins", [200, 256])
def test_regression_dataset(n_bins):
X, y = make_regression(
n_samples=500, n_features=10, n_informative=5, random_state=42
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
mapper = _BinMapper(n_bins=n_bins, random_state=42)
X_train_binned = mapper.fit_transform(X_train)
# Init gradients and hessians to that of least squares loss
gradients = -y_train.astype(G_H_DTYPE)
hessians = np.ones(1, dtype=G_H_DTYPE)
min_samples_leaf = 10
max_leaf_nodes = 30
grower = TreeGrower(
X_train_binned,
gradients,
hessians,
min_samples_leaf=min_samples_leaf,
max_leaf_nodes=max_leaf_nodes,
n_bins=n_bins,
n_bins_non_missing=mapper.n_bins_non_missing_,
)
grower.grow()
predictor = grower.make_predictor(binning_thresholds=mapper.bin_thresholds_)
known_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
f_idx_map = np.zeros(0, dtype=np.uint32)
y_pred_train = predictor.predict(X_train, known_cat_bitsets, f_idx_map, n_threads)
assert r2_score(y_train, y_pred_train) > 0.82
y_pred_test = predictor.predict(X_test, known_cat_bitsets, f_idx_map, n_threads)
assert r2_score(y_test, y_pred_test) > 0.67
@pytest.mark.parametrize(
"num_threshold, expected_predictions",
[
(-np.inf, [0, 1, 1, 1]),
(10, [0, 0, 1, 1]),
(20, [0, 0, 0, 1]),
(ALMOST_INF, [0, 0, 0, 1]),
(np.inf, [0, 0, 0, 0]),
],
)
def test_infinite_values_and_thresholds(num_threshold, expected_predictions):
# Make sure infinite values and infinite thresholds are handled properly.
# In particular, if a value is +inf and the threshold is ALMOST_INF the
# sample should go to the right child. If the threshold is inf (split on
# nan), the +inf sample will go to the left child.
X = np.array([-np.inf, 10, 20, np.inf]).reshape(-1, 1)
nodes = np.zeros(3, dtype=PREDICTOR_RECORD_DTYPE)
# We just construct a simple tree with 1 root and 2 children
# parent node
nodes[0]["left"] = 1
nodes[0]["right"] = 2
nodes[0]["feature_idx"] = 0
nodes[0]["num_threshold"] = num_threshold
# left child
nodes[1]["is_leaf"] = True
nodes[1]["value"] = 0
# right child
nodes[2]["is_leaf"] = True
nodes[2]["value"] = 1
binned_cat_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
raw_categorical_bitsets = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
known_cat_bitset = np.zeros((0, 8), dtype=X_BITSET_INNER_DTYPE)
f_idx_map = np.zeros(0, dtype=np.uint32)
predictor = TreePredictor(nodes, binned_cat_bitsets, raw_categorical_bitsets)
predictions = predictor.predict(X, known_cat_bitset, f_idx_map, n_threads)
assert np.all(predictions == expected_predictions)
@pytest.mark.parametrize(
"bins_go_left, expected_predictions",
[
([0, 3, 4, 6], [1, 0, 0, 1, 1, 0]),
([0, 1, 2, 6], [1, 1, 1, 0, 0, 0]),
([3, 5, 6], [0, 0, 0, 1, 0, 1]),
],
)
def test_categorical_predictor(bins_go_left, expected_predictions):
# Test predictor outputs are correct with categorical features
X_binned = np.array([[0, 1, 2, 3, 4, 5]], dtype=X_BINNED_DTYPE).T
categories = np.array([2, 5, 6, 8, 10, 15], dtype=X_DTYPE)
bins_go_left = np.array(bins_go_left, dtype=X_BINNED_DTYPE)
# We just construct a simple tree with 1 root and 2 children
# parent node
nodes = np.zeros(3, dtype=PREDICTOR_RECORD_DTYPE)
nodes[0]["left"] = 1
nodes[0]["right"] = 2
nodes[0]["feature_idx"] = 0
nodes[0]["is_categorical"] = True
nodes[0]["missing_go_to_left"] = True
# left child
nodes[1]["is_leaf"] = True
nodes[1]["value"] = 1
# right child
nodes[2]["is_leaf"] = True
nodes[2]["value"] = 0
binned_cat_bitsets = np.zeros((1, 8), dtype=X_BITSET_INNER_DTYPE)
raw_categorical_bitsets = np.zeros((1, 8), dtype=X_BITSET_INNER_DTYPE)
for go_left in bins_go_left:
set_bitset_memoryview(binned_cat_bitsets[0], go_left)
set_raw_bitset_from_binned_bitset(
raw_categorical_bitsets[0], binned_cat_bitsets[0], categories
)
predictor = TreePredictor(nodes, binned_cat_bitsets, raw_categorical_bitsets)
# Check binned data gives correct predictions
prediction_binned = predictor.predict_binned(
X_binned, missing_values_bin_idx=6, n_threads=n_threads
)
assert_allclose(prediction_binned, expected_predictions)
# manually construct bitset
known_cat_bitsets = np.zeros((1, 8), dtype=np.uint32)
known_cat_bitsets[0, 0] = np.sum(2**categories, dtype=np.uint32)
f_idx_map = np.array([0], dtype=np.uint32)
# Check with un-binned data
predictions = predictor.predict(
categories.reshape(-1, 1), known_cat_bitsets, f_idx_map, n_threads
)
assert_allclose(predictions, expected_predictions)
# Check missing goes left because missing_values_bin_idx=6
X_binned_missing = np.array([[6]], dtype=X_BINNED_DTYPE).T
predictions = predictor.predict_binned(
X_binned_missing, missing_values_bin_idx=6, n_threads=n_threads
)
assert_allclose(predictions, [1])
# missing and unknown go left
predictions = predictor.predict(
np.array([[np.nan, 17]], dtype=X_DTYPE).T,
known_cat_bitsets,
f_idx_map,
n_threads,
)
assert_allclose(predictions, [1, 1])
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py | sklearn/ensemble/_hist_gradient_boosting/tests/test_gradient_boosting.py | import copyreg
import io
import pickle
import re
import warnings
from unittest.mock import Mock
import joblib
import numpy as np
import pytest
from joblib.numpy_pickle import NumpyPickler
from numpy.testing import assert_allclose, assert_array_equal
import sklearn.ensemble._hist_gradient_boosting.gradient_boosting as hgb_module
from sklearn._loss.loss import (
AbsoluteError,
HalfBinomialLoss,
HalfSquaredError,
PinballLoss,
)
from sklearn.base import BaseEstimator, TransformerMixin, clone, is_regressor
from sklearn.compose import make_column_transformer
from sklearn.datasets import make_classification, make_low_rank_matrix, make_regression
from sklearn.dummy import DummyRegressor
from sklearn.ensemble import (
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
)
from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
from sklearn.ensemble._hist_gradient_boosting.common import G_H_DTYPE
from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
from sklearn.ensemble._hist_gradient_boosting.predictor import TreePredictor
from sklearn.exceptions import NotFittedError
from sklearn.metrics import get_scorer, mean_gamma_deviance, mean_poisson_deviance
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import KBinsDiscretizer, MinMaxScaler, OneHotEncoder
from sklearn.utils import check_random_state, shuffle
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
from sklearn.utils._testing import _convert_container
from sklearn.utils.fixes import _IS_32BIT
n_threads = _openmp_effective_n_threads()
X_classification, y_classification = make_classification(random_state=0)
X_regression, y_regression = make_regression(random_state=0)
X_multi_classification, y_multi_classification = make_classification(
n_classes=3, n_informative=3, random_state=0
)
def _make_dumb_dataset(n_samples):
"""Make a dumb dataset to test early stopping."""
rng = np.random.RandomState(42)
X_dumb = rng.randn(n_samples, 1)
y_dumb = (X_dumb[:, 0] > 0).astype("int64")
return X_dumb, y_dumb
@pytest.mark.parametrize(
"GradientBoosting, X, y",
[
(HistGradientBoostingClassifier, X_classification, y_classification),
(HistGradientBoostingRegressor, X_regression, y_regression),
],
)
@pytest.mark.parametrize(
"params, err_msg",
[
(
{"interaction_cst": [0, 1]},
"Interaction constraints must be a sequence of tuples or lists",
),
(
{"interaction_cst": [{0, 9999}]},
r"Interaction constraints must consist of integer indices in \[0,"
r" n_features - 1\] = \[.*\], specifying the position of features,",
),
(
{"interaction_cst": [{-1, 0}]},
r"Interaction constraints must consist of integer indices in \[0,"
r" n_features - 1\] = \[.*\], specifying the position of features,",
),
(
{"interaction_cst": [{0.5}]},
r"Interaction constraints must consist of integer indices in \[0,"
r" n_features - 1\] = \[.*\], specifying the position of features,",
),
],
)
def test_init_parameters_validation(GradientBoosting, X, y, params, err_msg):
with pytest.raises(ValueError, match=err_msg):
GradientBoosting(**params).fit(X, y)
@pytest.mark.parametrize(
"scoring, validation_fraction, early_stopping, n_iter_no_change, tol",
[
("neg_mean_squared_error", 0.1, True, 5, 1e-7), # use scorer
("neg_mean_squared_error", None, True, 5, 1e-1), # use scorer on train
(None, 0.1, True, 5, 1e-7), # same with default scorer
(None, None, True, 5, 1e-1),
("loss", 0.1, True, 5, 1e-7), # use loss
("loss", None, True, 5, 1e-1), # use loss on training data
(None, None, False, 5, 0.0), # no early stopping
],
)
def test_early_stopping_regression(
scoring, validation_fraction, early_stopping, n_iter_no_change, tol
):
max_iter = 200
X, y = make_regression(n_samples=50, random_state=0)
gb = HistGradientBoostingRegressor(
verbose=1, # just for coverage
min_samples_leaf=5, # easier to overfit fast
scoring=scoring,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0,
)
gb.fit(X, y)
if early_stopping:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize(
"data",
(
make_classification(n_samples=30, random_state=0),
make_classification(
n_samples=30, n_classes=3, n_clusters_per_class=1, random_state=0
),
),
)
@pytest.mark.parametrize(
"scoring, validation_fraction, early_stopping, n_iter_no_change, tol",
[
("accuracy", 0.1, True, 5, 1e-7), # use scorer
("accuracy", None, True, 5, 1e-1), # use scorer on training data
(None, 0.1, True, 5, 1e-7), # same with default scorer
(None, None, True, 5, 1e-1),
("loss", 0.1, True, 5, 1e-7), # use loss
("loss", None, True, 5, 1e-1), # use loss on training data
(None, None, False, 5, 0.0), # no early stopping
],
)
def test_early_stopping_classification(
data, scoring, validation_fraction, early_stopping, n_iter_no_change, tol
):
max_iter = 50
X, y = data
gb = HistGradientBoostingClassifier(
verbose=2, # just for coverage
min_samples_leaf=5, # easier to overfit fast
scoring=scoring,
tol=tol,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
max_iter=max_iter,
n_iter_no_change=n_iter_no_change,
random_state=0,
)
gb.fit(X, y)
if early_stopping is True:
assert n_iter_no_change <= gb.n_iter_ < max_iter
else:
assert gb.n_iter_ == max_iter
@pytest.mark.parametrize(
"GradientBoosting, X, y",
[
(HistGradientBoostingClassifier, *_make_dumb_dataset(10000)),
(HistGradientBoostingClassifier, *_make_dumb_dataset(10001)),
(HistGradientBoostingRegressor, *_make_dumb_dataset(10000)),
(HistGradientBoostingRegressor, *_make_dumb_dataset(10001)),
],
)
def test_early_stopping_default(GradientBoosting, X, y):
# Test that early stopping is enabled by default if and only if there
# are more than 10000 samples
gb = GradientBoosting(max_iter=10, n_iter_no_change=2, tol=1e-1)
gb.fit(X, y)
if X.shape[0] > 10000:
assert gb.n_iter_ < gb.max_iter
else:
assert gb.n_iter_ == gb.max_iter
@pytest.mark.parametrize(
"scores, n_iter_no_change, tol, stopping",
[
([], 1, 0.001, False), # not enough iterations
([1, 1, 1], 5, 0.001, False), # not enough iterations
([1, 1, 1, 1, 1], 5, 0.001, False), # not enough iterations
([1, 2, 3, 4, 5, 6], 5, 0.001, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 0.0, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 0.999, False), # significant improvement
([1, 2, 3, 4, 5, 6], 5, 5 - 1e-5, False), # significant improvement
([1] * 6, 5, 0.0, True), # no significant improvement
([1] * 6, 5, 0.001, True), # no significant improvement
([1] * 6, 5, 5, True), # no significant improvement
],
)
def test_should_stop(scores, n_iter_no_change, tol, stopping):
gbdt = HistGradientBoostingClassifier(n_iter_no_change=n_iter_no_change, tol=tol)
assert gbdt._should_stop(scores) == stopping
def test_absolute_error():
# For coverage only.
X, y = make_regression(n_samples=500, random_state=0)
gbdt = HistGradientBoostingRegressor(loss="absolute_error", random_state=0)
gbdt.fit(X, y)
assert gbdt.score(X, y) > 0.9
def test_absolute_error_sample_weight():
# non regression test for issue #19400
# make sure no error is thrown during fit of
# HistGradientBoostingRegressor with absolute_error loss function
# and passing sample_weight
rng = np.random.RandomState(0)
n_samples = 100
X = rng.uniform(-1, 1, size=(n_samples, 2))
y = rng.uniform(-1, 1, size=n_samples)
sample_weight = rng.uniform(0, 1, size=n_samples)
gbdt = HistGradientBoostingRegressor(loss="absolute_error")
gbdt.fit(X, y, sample_weight=sample_weight)
@pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 1.0, 2.0])])
def test_gamma_y_positive(y):
# Test that ValueError is raised if any y_i <= 0.
err_msg = r"loss='gamma' requires strictly positive y."
gbdt = HistGradientBoostingRegressor(loss="gamma", random_state=0)
with pytest.raises(ValueError, match=err_msg):
gbdt.fit(np.zeros(shape=(len(y), 1)), y)
def test_gamma():
# For a Gamma distributed target, we expect an HGBT trained with the Gamma deviance
# (loss) to give better results than an HGBT with any other loss function, measured
# in out-of-sample Gamma deviance as metric/score.
# Note that squared error could potentially predict negative values which is
# invalid (np.inf) for the Gamma deviance. A Poisson HGBT (having a log link)
# does not have that defect.
# Important note: It seems that a Poisson HGBT almost always has better
# out-of-sample performance than the Gamma HGBT, measured in Gamma deviance.
# LightGBM shows the same behaviour. Hence, we only compare to a squared error
# HGBT, but not to a Poisson deviance HGBT.
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 100, 20
X = make_low_rank_matrix(
n_samples=n_train + n_test,
n_features=n_features,
random_state=rng,
)
# We create a log-linear Gamma model. This gives y.min ~ 1e-2, y.max ~ 1e2
coef = rng.uniform(low=-10, high=20, size=n_features)
# Numpy parametrizes gamma(shape=k, scale=theta) with mean = k * theta and
# variance = k * theta^2. We parametrize it instead with mean = exp(X @ coef)
# and variance = dispersion * mean^2 by setting k = 1 / dispersion,
# theta = dispersion * mean.
dispersion = 0.5
y = rng.gamma(shape=1 / dispersion, scale=dispersion * np.exp(X @ coef))
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_test, random_state=rng
)
gbdt_gamma = HistGradientBoostingRegressor(loss="gamma", random_state=123)
gbdt_mse = HistGradientBoostingRegressor(loss="squared_error", random_state=123)
dummy = DummyRegressor(strategy="mean")
for model in (gbdt_gamma, gbdt_mse, dummy):
model.fit(X_train, y_train)
for X, y in [(X_train, y_train), (X_test, y_test)]:
loss_gbdt_gamma = mean_gamma_deviance(y, gbdt_gamma.predict(X))
# We restrict the squared error HGBT to predict at least the minimum seen y at
# train time to make it strictly positive.
loss_gbdt_mse = mean_gamma_deviance(
y, np.maximum(np.min(y_train), gbdt_mse.predict(X))
)
loss_dummy = mean_gamma_deviance(y, dummy.predict(X))
assert loss_gbdt_gamma < loss_dummy
assert loss_gbdt_gamma < loss_gbdt_mse
@pytest.mark.parametrize("quantile", [0.2, 0.5, 0.8])
def test_quantile_asymmetric_error(quantile):
"""Test quantile regression for asymmetric distributed targets."""
n_samples = 10_000
rng = np.random.RandomState(42)
# take care that X @ coef + intercept > 0
X = np.concatenate(
(
np.abs(rng.randn(n_samples)[:, None]),
-rng.randint(2, size=(n_samples, 1)),
),
axis=1,
)
intercept = 1.23
coef = np.array([0.5, -2])
# For an exponential distribution with rate lambda, e.g. exp(-lambda * x),
# the quantile at level q is:
# quantile(q) = - log(1 - q) / lambda
# scale = 1/lambda = -quantile(q) / log(1-q)
y = rng.exponential(
scale=-(X @ coef + intercept) / np.log(1 - quantile), size=n_samples
)
model = HistGradientBoostingRegressor(
loss="quantile",
quantile=quantile,
max_iter=25,
random_state=0,
max_leaf_nodes=10,
).fit(X, y)
assert_allclose(np.mean(model.predict(X) > y), quantile, rtol=1e-2)
pinball_loss = PinballLoss(quantile=quantile)
loss_true_quantile = pinball_loss(y, X @ coef + intercept)
loss_pred_quantile = pinball_loss(y, model.predict(X))
# we are overfitting
assert loss_pred_quantile <= loss_true_quantile
@pytest.mark.parametrize("y", [([1.0, -2.0, 0.0]), ([0.0, 0.0, 0.0])])
def test_poisson_y_positive(y):
# Test that ValueError is raised if either one y_i < 0 or sum(y_i) <= 0.
err_msg = r"loss='poisson' requires non-negative y and sum\(y\) > 0."
gbdt = HistGradientBoostingRegressor(loss="poisson", random_state=0)
with pytest.raises(ValueError, match=err_msg):
gbdt.fit(np.zeros(shape=(len(y), 1)), y)
def test_poisson():
# For Poisson distributed target, Poisson loss should give better results
# than least squares measured in Poisson deviance as metric.
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 100, 100
X = make_low_rank_matrix(
n_samples=n_train + n_test, n_features=n_features, random_state=rng
)
# We create a log-linear Poisson model and downscale coef as it will get
# exponentiated.
coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
y = rng.poisson(lam=np.exp(X @ coef))
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_test, random_state=rng
)
gbdt_pois = HistGradientBoostingRegressor(loss="poisson", random_state=rng)
gbdt_ls = HistGradientBoostingRegressor(loss="squared_error", random_state=rng)
gbdt_pois.fit(X_train, y_train)
gbdt_ls.fit(X_train, y_train)
dummy = DummyRegressor(strategy="mean").fit(X_train, y_train)
for X, y in [(X_train, y_train), (X_test, y_test)]:
metric_pois = mean_poisson_deviance(y, gbdt_pois.predict(X))
# squared_error might produce non-positive predictions => clip
metric_ls = mean_poisson_deviance(y, np.clip(gbdt_ls.predict(X), 1e-15, None))
metric_dummy = mean_poisson_deviance(y, dummy.predict(X))
assert metric_pois < metric_ls
assert metric_pois < metric_dummy
def test_binning_train_validation_are_separated():
# Make sure training and validation data are binned separately.
# See issue 13926
rng = np.random.RandomState(0)
validation_fraction = 0.2
gb = HistGradientBoostingClassifier(
early_stopping=True, validation_fraction=validation_fraction, random_state=rng
)
gb.fit(X_classification, y_classification)
mapper_training_data = gb._bin_mapper
# Note that since the data is small there is no subsampling and the
# random_state doesn't matter
mapper_whole_data = _BinMapper(random_state=0)
mapper_whole_data.fit(X_classification)
n_samples = X_classification.shape[0]
assert np.all(
mapper_training_data.n_bins_non_missing_
== int((1 - validation_fraction) * n_samples)
)
assert np.all(
mapper_training_data.n_bins_non_missing_
!= mapper_whole_data.n_bins_non_missing_
)
def test_missing_values_trivial():
# sanity check for missing values support. With only one feature and
# y == isnan(X), the gbdt is supposed to reach perfect accuracy on the
# training set.
n_samples = 100
n_features = 1
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
mask = rng.binomial(1, 0.5, size=X.shape).astype(bool)
X[mask] = np.nan
y = mask.ravel()
gb = HistGradientBoostingClassifier()
gb.fit(X, y)
assert gb.score(X, y) == pytest.approx(1)
@pytest.mark.parametrize("problem", ("classification", "regression"))
@pytest.mark.parametrize(
(
"missing_proportion, expected_min_score_classification, "
"expected_min_score_regression"
),
[(0.1, 0.97, 0.89), (0.2, 0.93, 0.81), (0.5, 0.79, 0.52)],
)
def test_missing_values_resilience(
problem,
missing_proportion,
expected_min_score_classification,
expected_min_score_regression,
):
# Make sure the estimators can deal with missing values and still yield
# decent predictions
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 2
if problem == "regression":
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
random_state=rng,
)
gb = HistGradientBoostingRegressor()
expected_min_score = expected_min_score_regression
else:
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
n_redundant=0,
n_repeated=0,
random_state=rng,
)
gb = HistGradientBoostingClassifier()
expected_min_score = expected_min_score_classification
mask = rng.binomial(1, missing_proportion, size=X.shape).astype(bool)
X[mask] = np.nan
gb.fit(X, y)
assert gb.score(X, y) > expected_min_score
@pytest.mark.parametrize(
"data",
[
make_classification(random_state=0, n_classes=2),
make_classification(random_state=0, n_classes=3, n_informative=3),
],
ids=["binary_log_loss", "multiclass_log_loss"],
)
def test_zero_division_hessians(data):
# non regression test for issue #14018
# make sure we avoid zero division errors when computing the leaves values.
# If the learning rate is too high, the raw predictions are bad and will
# saturate the softmax (or sigmoid in binary classif). This leads to
# probabilities being exactly 0 or 1, gradients being constant, and
# hessians being zero.
X, y = data
gb = HistGradientBoostingClassifier(learning_rate=100, max_iter=10)
gb.fit(X, y)
def test_small_trainset():
# Make sure that the small trainset is stratified and has the expected
# length (10k samples)
n_samples = 20000
original_distrib = {0: 0.1, 1: 0.2, 2: 0.3, 3: 0.4}
rng = np.random.RandomState(42)
X = rng.randn(n_samples).reshape(n_samples, 1)
y = [
[class_] * int(prop * n_samples) for (class_, prop) in original_distrib.items()
]
y = shuffle(np.concatenate(y))
gb = HistGradientBoostingClassifier()
# Compute the small training set
X_small, y_small, *_ = gb._get_small_trainset(
X, y, seed=42, sample_weight_train=None
)
# Compute the class distribution in the small training set
unique, counts = np.unique(y_small, return_counts=True)
small_distrib = {class_: count / 10000 for (class_, count) in zip(unique, counts)}
# Test that the small training set has the expected length
assert X_small.shape[0] == 10000
assert y_small.shape[0] == 10000
# Test that the class distributions in the whole dataset and in the small
# training set are identical
assert small_distrib == pytest.approx(original_distrib)
def test_missing_values_minmax_imputation():
# Compare the buit-in missing value handling of Histogram GBC with an
# a-priori missing value imputation strategy that should yield the same
# results in terms of decision function.
#
# Each feature (containing NaNs) is replaced by 2 features:
# - one where the nans are replaced by min(feature) - 1
# - one where the nans are replaced by max(feature) + 1
# A split where nans go to the left has an equivalent split in the
# first (min) feature, and a split where nans go to the right has an
# equivalent split in the second (max) feature.
#
# Assuming the data is such that there is never a tie to select the best
# feature to split on during training, the learned decision trees should be
# strictly equivalent (learn a sequence of splits that encode the same
# decision function).
#
# The MinMaxImputer transformer is meant to be a toy implementation of the
# "Missing In Attributes" (MIA) missing value handling for decision trees
# https://www.sciencedirect.com/science/article/abs/pii/S0167865508000305
# The implementation of MIA as an imputation transformer was suggested by
# "Remark 3" in :arxiv:'<1902.06931>`
class MinMaxImputer(TransformerMixin, BaseEstimator):
def fit(self, X, y=None):
mm = MinMaxScaler().fit(X)
self.data_min_ = mm.data_min_
self.data_max_ = mm.data_max_
return self
def transform(self, X):
X_min, X_max = X.copy(), X.copy()
for feature_idx in range(X.shape[1]):
nan_mask = np.isnan(X[:, feature_idx])
X_min[nan_mask, feature_idx] = self.data_min_[feature_idx] - 1
X_max[nan_mask, feature_idx] = self.data_max_[feature_idx] + 1
return np.concatenate([X_min, X_max], axis=1)
def make_missing_value_data(n_samples=int(1e4), seed=0):
rng = np.random.RandomState(seed)
X, y = make_regression(n_samples=n_samples, n_features=4, random_state=rng)
# Pre-bin the data to ensure a deterministic handling by the 2
# strategies and also make it easier to insert np.nan in a structured
# way:
X = KBinsDiscretizer(
n_bins=42, encode="ordinal", quantile_method="averaged_inverted_cdf"
).fit_transform(X)
# First feature has missing values completely at random:
rnd_mask = rng.rand(X.shape[0]) > 0.9
X[rnd_mask, 0] = np.nan
# Second and third features have missing values for extreme values
# (censoring missingness):
low_mask = X[:, 1] == 0
X[low_mask, 1] = np.nan
high_mask = X[:, 2] == X[:, 2].max()
X[high_mask, 2] = np.nan
# Make the last feature nan pattern very informative:
y_max = np.percentile(y, 70)
y_max_mask = y >= y_max
y[y_max_mask] = y_max
X[y_max_mask, 3] = np.nan
# Check that there is at least one missing value in each feature:
for feature_idx in range(X.shape[1]):
assert any(np.isnan(X[:, feature_idx]))
# Let's use a test set to check that the learned decision function is
# the same as evaluated on unseen data. Otherwise it could just be the
# case that we find two independent ways to overfit the training set.
return train_test_split(X, y, random_state=rng)
# n_samples need to be large enough to minimize the likelihood of having
# several candidate splits with the same gain value in a given tree.
X_train, X_test, y_train, y_test = make_missing_value_data(
n_samples=int(1e4), seed=0
)
# Use a small number of leaf nodes and iterations so as to keep
# under-fitting models to minimize the likelihood of ties when training the
# model.
gbm1 = HistGradientBoostingRegressor(max_iter=100, max_leaf_nodes=5, random_state=0)
gbm1.fit(X_train, y_train)
gbm2 = make_pipeline(MinMaxImputer(), clone(gbm1))
gbm2.fit(X_train, y_train)
# Check that the model reach the same score:
assert gbm1.score(X_train, y_train) == pytest.approx(gbm2.score(X_train, y_train))
assert gbm1.score(X_test, y_test) == pytest.approx(gbm2.score(X_test, y_test))
# Check the individual prediction match as a finer grained
# decision function check.
assert_allclose(gbm1.predict(X_train), gbm2.predict(X_train))
assert_allclose(gbm1.predict(X_test), gbm2.predict(X_test))
def test_infinite_values():
# Basic test for infinite values
X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
y = np.array([0, 0, 1, 1])
gbdt = HistGradientBoostingRegressor(min_samples_leaf=1)
gbdt.fit(X, y)
np.testing.assert_allclose(gbdt.predict(X), y, atol=1e-4)
def test_consistent_lengths():
X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
y = np.array([0, 0, 1, 1])
sample_weight = np.array([0.1, 0.3, 0.1])
gbdt = HistGradientBoostingRegressor()
with pytest.raises(ValueError, match=r"sample_weight.shape == \(3,\), expected"):
gbdt.fit(X, y, sample_weight)
with pytest.raises(
ValueError, match="Found input variables with inconsistent number"
):
gbdt.fit(X, y[1:])
def test_infinite_values_missing_values():
# High level test making sure that inf and nan values are properly handled
# when both are present. This is similar to
# test_split_on_nan_with_infinite_values() in test_grower.py, though we
# cannot check the predictions for binned values here.
X = np.asarray([-np.inf, 0, 1, np.inf, np.nan]).reshape(-1, 1)
y_isnan = np.isnan(X.ravel())
y_isinf = X.ravel() == np.inf
stump_clf = HistGradientBoostingClassifier(
min_samples_leaf=1, max_iter=1, learning_rate=1, max_depth=2
)
assert stump_clf.fit(X, y_isinf).score(X, y_isinf) == 1
assert stump_clf.fit(X, y_isnan).score(X, y_isnan) == 1
@pytest.mark.parametrize("scoring", [None, "loss"])
def test_string_target_early_stopping(scoring):
# Regression tests for #14709 where the targets need to be encoded before
# to compute the score
rng = np.random.RandomState(42)
X = rng.randn(100, 10)
y = np.array(["x"] * 50 + ["y"] * 50, dtype=object)
gbrt = HistGradientBoostingClassifier(n_iter_no_change=10, scoring=scoring)
gbrt.fit(X, y)
def test_zero_sample_weights_regression():
# Make sure setting a SW to zero amounts to ignoring the corresponding
# sample
X = [[1, 0], [1, 0], [1, 0], [0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = HistGradientBoostingRegressor(min_samples_leaf=1)
gb.fit(X, y, sample_weight=sample_weight)
assert gb.predict([[1, 0]])[0] > 0.5
def test_zero_sample_weights_classification():
# Make sure setting a SW to zero amounts to ignoring the corresponding
# sample
X = [[1, 0], [1, 0], [1, 0], [0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = HistGradientBoostingClassifier(loss="log_loss", min_samples_leaf=1)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
X = [[1, 0], [1, 0], [1, 0], [0, 1], [1, 1]]
y = [0, 0, 1, 0, 2]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1, 1]
gb = HistGradientBoostingClassifier(loss="log_loss", min_samples_leaf=1)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
@pytest.mark.parametrize(
"problem", ("regression", "binary_classification", "multiclass_classification")
)
@pytest.mark.parametrize("duplication", ("half", "all"))
def test_sample_weight_effect(problem, duplication):
# High level test to make sure that duplicating a sample is equivalent to
# giving it weight of 2.
# fails for n_samples > 255 because binning does not take sample weights
# into account. Keeping n_samples <= 255 makes
# sure only unique values are used so SW have no effect on binning.
n_samples = 255
n_features = 2
if problem == "regression":
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
random_state=0,
)
Klass = HistGradientBoostingRegressor
else:
n_classes = 2 if problem == "binary_classification" else 3
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features,
n_redundant=0,
n_clusters_per_class=1,
n_classes=n_classes,
random_state=0,
)
Klass = HistGradientBoostingClassifier
# This test can't pass if min_samples_leaf > 1 because that would force 2
# samples to be in the same node in est_sw, while these samples would be
# free to be separate in est_dup: est_dup would just group together the
# duplicated samples.
est = Klass(min_samples_leaf=1)
# Create dataset with duplicate and corresponding sample weights
if duplication == "half":
lim = n_samples // 2
else:
lim = n_samples
X_dup = np.r_[X, X[:lim]]
y_dup = np.r_[y, y[:lim]]
sample_weight = np.ones(shape=(n_samples))
sample_weight[:lim] = 2
est_sw = clone(est).fit(X, y, sample_weight=sample_weight)
est_dup = clone(est).fit(X_dup, y_dup)
# checking raw_predict is stricter than just predict for classification
assert np.allclose(est_sw._raw_predict(X_dup), est_dup._raw_predict(X_dup))
@pytest.mark.parametrize("Loss", (HalfSquaredError, AbsoluteError))
def test_sum_hessians_are_sample_weight(Loss):
# For losses with constant hessians, the sum_hessians field of the
# histograms must be equal to the sum of the sample weight of samples at
# the corresponding bin.
rng = np.random.RandomState(0)
n_samples = 1000
n_features = 2
X, y = make_regression(n_samples=n_samples, n_features=n_features, random_state=rng)
bin_mapper = _BinMapper()
X_binned = bin_mapper.fit_transform(X)
# While sample weights are supposed to be positive, this still works.
sample_weight = rng.normal(size=n_samples)
loss = Loss(sample_weight=sample_weight)
gradients, hessians = loss.init_gradient_and_hessian(
n_samples=n_samples, dtype=G_H_DTYPE
)
gradients, hessians = gradients.reshape((-1, 1)), hessians.reshape((-1, 1))
raw_predictions = rng.normal(size=(n_samples, 1))
loss.gradient_hessian(
y_true=y,
raw_prediction=raw_predictions,
sample_weight=sample_weight,
gradient_out=gradients,
hessian_out=hessians,
n_threads=n_threads,
)
# build sum_sample_weight which contains the sum of the sample weights at
# each bin (for each feature). This must be equal to the sum_hessians
# field of the corresponding histogram
sum_sw = np.zeros(shape=(n_features, bin_mapper.n_bins))
for feature_idx in range(n_features):
for sample_idx in range(n_samples):
sum_sw[feature_idx, X_binned[sample_idx, feature_idx]] += sample_weight[
sample_idx
]
# Build histogram
grower = TreeGrower(
X_binned, gradients[:, 0], hessians[:, 0], n_bins=bin_mapper.n_bins
)
histograms = grower.histogram_builder.compute_histograms_brute(
grower.root.sample_indices
)
for feature_idx in range(n_features):
for bin_idx in range(bin_mapper.n_bins):
assert histograms[feature_idx, bin_idx]["sum_hessians"] == (
pytest.approx(sum_sw[feature_idx, bin_idx], rel=1e-5)
)
def test_max_depth_max_leaf_nodes():
# Non regression test for
# https://github.com/scikit-learn/scikit-learn/issues/16179
# there was a bug when the max_depth and the max_leaf_nodes criteria were
# met at the same time, which would lead to max_leaf_nodes not being
# respected.
X, y = make_classification(random_state=0)
est = HistGradientBoostingClassifier(max_depth=2, max_leaf_nodes=3, max_iter=1).fit(
X, y
)
tree = est._predictors[0][0]
assert tree.get_max_depth() == 2
assert tree.get_n_leaf_nodes() == 3 # would be 4 prior to bug fix
def test_early_stopping_on_test_set_with_warm_start():
# Non regression test for #16661 where second fit fails with
# warm_start=True, early_stopping is on, and no validation set
X, y = make_classification(random_state=0)
gb = HistGradientBoostingClassifier(
max_iter=1,
scoring="loss",
warm_start=True,
early_stopping=True,
n_iter_no_change=1,
validation_fraction=None,
)
gb.fit(X, y)
# does not raise on second call
gb.set_params(max_iter=2)
gb.fit(X, y)
def test_early_stopping_with_sample_weights(monkeypatch):
"""Check that sample weights is passed in to the scorer and _raw_predict is not
called."""
mock_scorer = Mock(side_effect=get_scorer("neg_median_absolute_error"))
def mock_check_scoring(estimator, scoring):
assert scoring == "neg_median_absolute_error"
return mock_scorer
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_hist_gradient_boosting/tests/test_histogram.py | sklearn/ensemble/_hist_gradient_boosting/tests/test_histogram.py | import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from sklearn.ensemble._hist_gradient_boosting.common import (
G_H_DTYPE,
HISTOGRAM_DTYPE,
X_BINNED_DTYPE,
)
from sklearn.ensemble._hist_gradient_boosting.histogram import (
_build_histogram,
_build_histogram_naive,
_build_histogram_no_hessian,
_build_histogram_root,
_build_histogram_root_no_hessian,
_subtract_histograms,
)
@pytest.mark.parametrize("build_func", [_build_histogram_naive, _build_histogram])
def test_build_histogram(build_func):
binned_feature = np.array([0, 2, 0, 1, 2, 0, 2, 1], dtype=X_BINNED_DTYPE)
# Small sample_indices (below unrolling threshold)
ordered_gradients = np.array([0, 1, 3], dtype=G_H_DTYPE)
ordered_hessians = np.array([1, 1, 2], dtype=G_H_DTYPE)
sample_indices = np.array([0, 2, 3], dtype=np.uint32)
hist = np.zeros((1, 3), dtype=HISTOGRAM_DTYPE)
build_func(
0, sample_indices, binned_feature, ordered_gradients, ordered_hessians, hist
)
hist = hist[0]
assert_array_equal(hist["count"], [2, 1, 0])
assert_allclose(hist["sum_gradients"], [1, 3, 0])
assert_allclose(hist["sum_hessians"], [2, 2, 0])
# Larger sample_indices (above unrolling threshold)
sample_indices = np.array([0, 2, 3, 6, 7], dtype=np.uint32)
ordered_gradients = np.array([0, 1, 3, 0, 1], dtype=G_H_DTYPE)
ordered_hessians = np.array([1, 1, 2, 1, 0], dtype=G_H_DTYPE)
hist = np.zeros((1, 3), dtype=HISTOGRAM_DTYPE)
build_func(
0, sample_indices, binned_feature, ordered_gradients, ordered_hessians, hist
)
hist = hist[0]
assert_array_equal(hist["count"], [2, 2, 1])
assert_allclose(hist["sum_gradients"], [1, 4, 0])
assert_allclose(hist["sum_hessians"], [2, 2, 1])
def test_histogram_sample_order_independence():
# Make sure the order of the samples has no impact on the histogram
# computations
rng = np.random.RandomState(42)
n_sub_samples = 100
n_samples = 1000
n_bins = 256
binned_feature = rng.randint(0, n_bins - 1, size=n_samples, dtype=X_BINNED_DTYPE)
sample_indices = rng.choice(
np.arange(n_samples, dtype=np.uint32), n_sub_samples, replace=False
)
ordered_gradients = rng.randn(n_sub_samples).astype(G_H_DTYPE)
hist_gc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
_build_histogram_no_hessian(
0, sample_indices, binned_feature, ordered_gradients, hist_gc
)
ordered_hessians = rng.exponential(size=n_sub_samples).astype(G_H_DTYPE)
hist_ghc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
_build_histogram(
0, sample_indices, binned_feature, ordered_gradients, ordered_hessians, hist_ghc
)
permutation = rng.permutation(n_sub_samples)
hist_gc_perm = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
_build_histogram_no_hessian(
0,
sample_indices[permutation],
binned_feature,
ordered_gradients[permutation],
hist_gc_perm,
)
hist_ghc_perm = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
_build_histogram(
0,
sample_indices[permutation],
binned_feature,
ordered_gradients[permutation],
ordered_hessians[permutation],
hist_ghc_perm,
)
hist_gc = hist_gc[0]
hist_ghc = hist_ghc[0]
hist_gc_perm = hist_gc_perm[0]
hist_ghc_perm = hist_ghc_perm[0]
assert_allclose(hist_gc["sum_gradients"], hist_gc_perm["sum_gradients"])
assert_array_equal(hist_gc["count"], hist_gc_perm["count"])
assert_allclose(hist_ghc["sum_gradients"], hist_ghc_perm["sum_gradients"])
assert_allclose(hist_ghc["sum_hessians"], hist_ghc_perm["sum_hessians"])
assert_array_equal(hist_ghc["count"], hist_ghc_perm["count"])
@pytest.mark.parametrize("constant_hessian", [True, False])
def test_unrolled_equivalent_to_naive(constant_hessian):
# Make sure the different unrolled histogram computations give the same
# results as the naive one.
rng = np.random.RandomState(42)
n_samples = 10
n_bins = 5
sample_indices = np.arange(n_samples).astype(np.uint32)
binned_feature = rng.randint(0, n_bins - 1, size=n_samples, dtype=np.uint8)
ordered_gradients = rng.randn(n_samples).astype(G_H_DTYPE)
if constant_hessian:
ordered_hessians = np.ones(n_samples, dtype=G_H_DTYPE)
else:
ordered_hessians = rng.lognormal(size=n_samples).astype(G_H_DTYPE)
hist_gc_root = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
hist_ghc_root = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
hist_gc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
hist_ghc = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
hist_naive = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
_build_histogram_root_no_hessian(0, binned_feature, ordered_gradients, hist_gc_root)
_build_histogram_root(
0, binned_feature, ordered_gradients, ordered_hessians, hist_ghc_root
)
_build_histogram_no_hessian(
0, sample_indices, binned_feature, ordered_gradients, hist_gc
)
_build_histogram(
0, sample_indices, binned_feature, ordered_gradients, ordered_hessians, hist_ghc
)
_build_histogram_naive(
0,
sample_indices,
binned_feature,
ordered_gradients,
ordered_hessians,
hist_naive,
)
hist_naive = hist_naive[0]
hist_gc_root = hist_gc_root[0]
hist_ghc_root = hist_ghc_root[0]
hist_gc = hist_gc[0]
hist_ghc = hist_ghc[0]
for hist in (hist_gc_root, hist_ghc_root, hist_gc, hist_ghc):
assert_array_equal(hist["count"], hist_naive["count"])
assert_allclose(hist["sum_gradients"], hist_naive["sum_gradients"])
for hist in (hist_ghc_root, hist_ghc):
assert_allclose(hist["sum_hessians"], hist_naive["sum_hessians"])
for hist in (hist_gc_root, hist_gc):
assert_array_equal(hist["sum_hessians"], np.zeros(n_bins))
@pytest.mark.parametrize("constant_hessian", [True, False])
def test_hist_subtraction(constant_hessian):
# Make sure the histogram subtraction trick gives the same result as the
# classical method.
rng = np.random.RandomState(42)
n_samples = 10
n_bins = 5
sample_indices = np.arange(n_samples).astype(np.uint32)
binned_feature = rng.randint(0, n_bins - 1, size=n_samples, dtype=np.uint8)
ordered_gradients = rng.randn(n_samples).astype(G_H_DTYPE)
if constant_hessian:
ordered_hessians = np.ones(n_samples, dtype=G_H_DTYPE)
else:
ordered_hessians = rng.lognormal(size=n_samples).astype(G_H_DTYPE)
hist_parent = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
if constant_hessian:
_build_histogram_no_hessian(
0, sample_indices, binned_feature, ordered_gradients, hist_parent
)
else:
_build_histogram(
0,
sample_indices,
binned_feature,
ordered_gradients,
ordered_hessians,
hist_parent,
)
mask = rng.randint(0, 2, n_samples).astype(bool)
sample_indices_left = sample_indices[mask]
ordered_gradients_left = ordered_gradients[mask]
ordered_hessians_left = ordered_hessians[mask]
hist_left = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
if constant_hessian:
_build_histogram_no_hessian(
0, sample_indices_left, binned_feature, ordered_gradients_left, hist_left
)
else:
_build_histogram(
0,
sample_indices_left,
binned_feature,
ordered_gradients_left,
ordered_hessians_left,
hist_left,
)
sample_indices_right = sample_indices[~mask]
ordered_gradients_right = ordered_gradients[~mask]
ordered_hessians_right = ordered_hessians[~mask]
hist_right = np.zeros((1, n_bins), dtype=HISTOGRAM_DTYPE)
if constant_hessian:
_build_histogram_no_hessian(
0, sample_indices_right, binned_feature, ordered_gradients_right, hist_right
)
else:
_build_histogram(
0,
sample_indices_right,
binned_feature,
ordered_gradients_right,
ordered_hessians_right,
hist_right,
)
hist_left_sub = np.copy(hist_parent)
hist_right_sub = np.copy(hist_parent)
_subtract_histograms(0, n_bins, hist_left_sub, hist_right)
_subtract_histograms(0, n_bins, hist_right_sub, hist_left)
for key in ("count", "sum_hessians", "sum_gradients"):
assert_allclose(hist_left[key], hist_left_sub[key], rtol=1e-6)
assert_allclose(hist_right[key], hist_right_sub[key], rtol=1e-6)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_hist_gradient_boosting/tests/test_bitset.py | sklearn/ensemble/_hist_gradient_boosting/tests/test_bitset.py | import numpy as np
import pytest
from numpy.testing import assert_allclose
from sklearn.ensemble._hist_gradient_boosting._bitset import (
in_bitset_memoryview,
set_bitset_memoryview,
set_raw_bitset_from_binned_bitset,
)
from sklearn.ensemble._hist_gradient_boosting.common import X_DTYPE
@pytest.mark.parametrize(
"values_to_insert, expected_bitset",
[
([0, 4, 33], np.array([2**0 + 2**4, 2**1, 0], dtype=np.uint32)),
(
[31, 32, 33, 79],
np.array([2**31, 2**0 + 2**1, 2**15], dtype=np.uint32),
),
],
)
def test_set_get_bitset(values_to_insert, expected_bitset):
n_32bits_ints = 3
bitset = np.zeros(n_32bits_ints, dtype=np.uint32)
for value in values_to_insert:
set_bitset_memoryview(bitset, value)
assert_allclose(expected_bitset, bitset)
for value in range(32 * n_32bits_ints):
if value in values_to_insert:
assert in_bitset_memoryview(bitset, value)
else:
assert not in_bitset_memoryview(bitset, value)
@pytest.mark.parametrize(
"raw_categories, binned_cat_to_insert, expected_raw_bitset",
[
(
[3, 4, 5, 10, 31, 32, 43],
[0, 2, 4, 5, 6],
[2**3 + 2**5 + 2**31, 2**0 + 2**11],
),
([3, 33, 50, 52], [1, 3], [0, 2**1 + 2**20]),
],
)
def test_raw_bitset_from_binned_bitset(
raw_categories, binned_cat_to_insert, expected_raw_bitset
):
binned_bitset = np.zeros(2, dtype=np.uint32)
raw_bitset = np.zeros(2, dtype=np.uint32)
raw_categories = np.asarray(raw_categories, dtype=X_DTYPE)
for val in binned_cat_to_insert:
set_bitset_memoryview(binned_bitset, val)
set_raw_bitset_from_binned_bitset(raw_bitset, binned_bitset, raw_categories)
assert_allclose(expected_raw_bitset, raw_bitset)
for binned_cat_val, raw_cat_val in enumerate(raw_categories):
if binned_cat_val in binned_cat_to_insert:
assert in_bitset_memoryview(raw_bitset, raw_cat_val)
else:
assert not in_bitset_memoryview(raw_bitset, raw_cat_val)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_hist_gradient_boosting/tests/test_splitting.py | sklearn/ensemble/_hist_gradient_boosting/tests/test_splitting.py | import numpy as np
import pytest
from numpy.testing import assert_array_equal
from sklearn.ensemble._hist_gradient_boosting.common import (
G_H_DTYPE,
HISTOGRAM_DTYPE,
X_BINNED_DTYPE,
MonotonicConstraint,
)
from sklearn.ensemble._hist_gradient_boosting.histogram import HistogramBuilder
from sklearn.ensemble._hist_gradient_boosting.splitting import (
Splitter,
compute_node_value,
)
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
from sklearn.utils._testing import skip_if_32bit
n_threads = _openmp_effective_n_threads()
@pytest.mark.parametrize("n_bins", [3, 32, 256])
def test_histogram_split(n_bins):
rng = np.random.RandomState(42)
feature_idx = 0
l2_regularization = 0
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.0
X_binned = np.asfortranarray(
rng.randint(0, n_bins - 1, size=(int(1e4), 1)), dtype=X_BINNED_DTYPE
)
binned_feature = X_binned.T[feature_idx]
sample_indices = np.arange(binned_feature.shape[0], dtype=np.uint32)
ordered_hessians = np.ones_like(binned_feature, dtype=G_H_DTYPE)
all_hessians = ordered_hessians
sum_hessians = all_hessians.sum()
hessians_are_constant = False
for true_bin in range(1, n_bins - 2):
for sign in [-1, 1]:
ordered_gradients = np.full_like(binned_feature, sign, dtype=G_H_DTYPE)
ordered_gradients[binned_feature <= true_bin] *= -1
all_gradients = ordered_gradients
sum_gradients = all_gradients.sum()
builder = HistogramBuilder(
X_binned,
n_bins,
all_gradients,
all_hessians,
hessians_are_constant,
n_threads,
)
n_bins_non_missing = np.array(
[n_bins - 1] * X_binned.shape[1], dtype=np.uint32
)
has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8)
monotonic_cst = np.array(
[MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8
)
is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
splitter = Splitter(
X_binned,
n_bins_non_missing,
missing_values_bin_idx,
has_missing_values,
is_categorical,
monotonic_cst,
l2_regularization,
min_hessian_to_split,
min_samples_leaf,
min_gain_to_split,
hessians_are_constant,
)
histograms = builder.compute_histograms_brute(sample_indices)
value = compute_node_value(
sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization
)
split_info = splitter.find_node_split(
sample_indices.shape[0], histograms, sum_gradients, sum_hessians, value
)
assert split_info.bin_idx == true_bin
assert split_info.gain >= 0
assert split_info.feature_idx == feature_idx
assert (
split_info.n_samples_left + split_info.n_samples_right
== sample_indices.shape[0]
)
# Constant hessian: 1. per sample.
assert split_info.n_samples_left == split_info.sum_hessian_left
@skip_if_32bit
@pytest.mark.parametrize("constant_hessian", [True, False])
def test_gradient_and_hessian_sanity(constant_hessian):
# This test checks that the values of gradients and hessians are
# consistent in different places:
# - in split_info: si.sum_gradient_left + si.sum_gradient_right must be
# equal to the gradient at the node. Same for hessians.
# - in the histograms: summing 'sum_gradients' over the bins must be
# constant across all features, and those sums must be equal to the
# node's gradient. Same for hessians.
rng = np.random.RandomState(42)
n_bins = 10
n_features = 20
n_samples = 500
l2_regularization = 0.0
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.0
X_binned = rng.randint(
0, n_bins, size=(n_samples, n_features), dtype=X_BINNED_DTYPE
)
X_binned = np.asfortranarray(X_binned)
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = rng.randn(n_samples).astype(G_H_DTYPE)
sum_gradients = all_gradients.sum()
if constant_hessian:
all_hessians = np.ones(1, dtype=G_H_DTYPE)
sum_hessians = 1 * n_samples
else:
all_hessians = rng.lognormal(size=n_samples).astype(G_H_DTYPE)
sum_hessians = all_hessians.sum()
builder = HistogramBuilder(
X_binned, n_bins, all_gradients, all_hessians, constant_hessian, n_threads
)
n_bins_non_missing = np.array([n_bins - 1] * X_binned.shape[1], dtype=np.uint32)
has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8)
monotonic_cst = np.array(
[MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8
)
is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
splitter = Splitter(
X_binned,
n_bins_non_missing,
missing_values_bin_idx,
has_missing_values,
is_categorical,
monotonic_cst,
l2_regularization,
min_hessian_to_split,
min_samples_leaf,
min_gain_to_split,
constant_hessian,
)
hists_parent = builder.compute_histograms_brute(sample_indices)
value_parent = compute_node_value(
sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization
)
si_parent = splitter.find_node_split(
n_samples, hists_parent, sum_gradients, sum_hessians, value_parent
)
sample_indices_left, sample_indices_right, _ = splitter.split_indices(
si_parent, sample_indices
)
hists_left = builder.compute_histograms_brute(sample_indices_left)
value_left = compute_node_value(
si_parent.sum_gradient_left,
si_parent.sum_hessian_left,
-np.inf,
np.inf,
l2_regularization,
)
hists_right = builder.compute_histograms_brute(sample_indices_right)
value_right = compute_node_value(
si_parent.sum_gradient_right,
si_parent.sum_hessian_right,
-np.inf,
np.inf,
l2_regularization,
)
si_left = splitter.find_node_split(
n_samples,
hists_left,
si_parent.sum_gradient_left,
si_parent.sum_hessian_left,
value_left,
)
si_right = splitter.find_node_split(
n_samples,
hists_right,
si_parent.sum_gradient_right,
si_parent.sum_hessian_right,
value_right,
)
# make sure that si.sum_gradient_left + si.sum_gradient_right have their
# expected value, same for hessians
for si, indices in (
(si_parent, sample_indices),
(si_left, sample_indices_left),
(si_right, sample_indices_right),
):
gradient = si.sum_gradient_right + si.sum_gradient_left
expected_gradient = all_gradients[indices].sum()
hessian = si.sum_hessian_right + si.sum_hessian_left
if constant_hessian:
expected_hessian = indices.shape[0] * all_hessians[0]
else:
expected_hessian = all_hessians[indices].sum()
assert np.isclose(gradient, expected_gradient)
assert np.isclose(hessian, expected_hessian)
# make sure sum of gradients in histograms are the same for all features,
# and make sure they're equal to their expected value
hists_parent = np.asarray(hists_parent, dtype=HISTOGRAM_DTYPE)
hists_left = np.asarray(hists_left, dtype=HISTOGRAM_DTYPE)
hists_right = np.asarray(hists_right, dtype=HISTOGRAM_DTYPE)
for hists, indices in (
(hists_parent, sample_indices),
(hists_left, sample_indices_left),
(hists_right, sample_indices_right),
):
# note: gradients and hessians have shape (n_features,),
# we're comparing them to *scalars*. This has the benefit of also
# making sure that all the entries are equal across features.
gradients = hists["sum_gradients"].sum(axis=1) # shape = (n_features,)
expected_gradient = all_gradients[indices].sum() # scalar
hessians = hists["sum_hessians"].sum(axis=1)
if constant_hessian:
# 0 is not the actual hessian, but it's not computed in this case
expected_hessian = 0.0
else:
expected_hessian = all_hessians[indices].sum()
assert np.allclose(gradients, expected_gradient)
assert np.allclose(hessians, expected_hessian)
def test_split_indices():
# Check that split_indices returns the correct splits and that
# splitter.partition is consistent with what is returned.
rng = np.random.RandomState(421)
n_bins = 5
n_samples = 10
l2_regularization = 0.0
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.0
# split will happen on feature 1 and on bin 3
X_binned = [
[0, 0],
[0, 3],
[0, 4],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 4],
[0, 0],
[0, 4],
]
X_binned = np.asfortranarray(X_binned, dtype=X_BINNED_DTYPE)
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = rng.randn(n_samples).astype(G_H_DTYPE)
all_hessians = np.ones(1, dtype=G_H_DTYPE)
sum_gradients = all_gradients.sum()
sum_hessians = 1 * n_samples
hessians_are_constant = True
builder = HistogramBuilder(
X_binned, n_bins, all_gradients, all_hessians, hessians_are_constant, n_threads
)
n_bins_non_missing = np.array([n_bins] * X_binned.shape[1], dtype=np.uint32)
has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8)
monotonic_cst = np.array(
[MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8
)
is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
splitter = Splitter(
X_binned,
n_bins_non_missing,
missing_values_bin_idx,
has_missing_values,
is_categorical,
monotonic_cst,
l2_regularization,
min_hessian_to_split,
min_samples_leaf,
min_gain_to_split,
hessians_are_constant,
)
assert np.all(sample_indices == splitter.partition)
histograms = builder.compute_histograms_brute(sample_indices)
value = compute_node_value(
sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization
)
si_root = splitter.find_node_split(
n_samples, histograms, sum_gradients, sum_hessians, value
)
# sanity checks for best split
assert si_root.feature_idx == 1
assert si_root.bin_idx == 3
samples_left, samples_right, position_right = splitter.split_indices(
si_root, splitter.partition
)
assert set(samples_left) == set([0, 1, 3, 4, 5, 6, 8])
assert set(samples_right) == set([2, 7, 9])
assert list(samples_left) == list(splitter.partition[:position_right])
assert list(samples_right) == list(splitter.partition[position_right:])
# Check that the resulting split indices sizes are consistent with the
# count statistics anticipated when looking for the best split.
assert samples_left.shape[0] == si_root.n_samples_left
assert samples_right.shape[0] == si_root.n_samples_right
def test_min_gain_to_split():
# Try to split a pure node (all gradients are equal, same for hessians)
# with min_gain_to_split = 0 and make sure that the node is not split (best
# possible gain = -1). Note: before the strict inequality comparison, this
# test would fail because the node would be split with a gain of 0.
rng = np.random.RandomState(42)
l2_regularization = 0
min_hessian_to_split = 0
min_samples_leaf = 1
min_gain_to_split = 0.0
n_bins = 255
n_samples = 100
X_binned = np.asfortranarray(
rng.randint(0, n_bins, size=(n_samples, 1)), dtype=X_BINNED_DTYPE
)
binned_feature = X_binned[:, 0]
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_hessians = np.ones_like(binned_feature, dtype=G_H_DTYPE)
all_gradients = np.ones_like(binned_feature, dtype=G_H_DTYPE)
sum_gradients = all_gradients.sum()
sum_hessians = all_hessians.sum()
hessians_are_constant = False
builder = HistogramBuilder(
X_binned, n_bins, all_gradients, all_hessians, hessians_are_constant, n_threads
)
n_bins_non_missing = np.array([n_bins - 1] * X_binned.shape[1], dtype=np.uint32)
has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8)
monotonic_cst = np.array(
[MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8
)
is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
splitter = Splitter(
X_binned,
n_bins_non_missing,
missing_values_bin_idx,
has_missing_values,
is_categorical,
monotonic_cst,
l2_regularization,
min_hessian_to_split,
min_samples_leaf,
min_gain_to_split,
hessians_are_constant,
)
histograms = builder.compute_histograms_brute(sample_indices)
value = compute_node_value(
sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization
)
split_info = splitter.find_node_split(
n_samples, histograms, sum_gradients, sum_hessians, value
)
assert split_info.gain == -1
@pytest.mark.parametrize(
(
"X_binned, all_gradients, has_missing_values, n_bins_non_missing, "
" expected_split_on_nan, expected_bin_idx, expected_go_to_left"
),
[
# basic sanity check with no missing values: given the gradient
# values, the split must occur on bin_idx=3
(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], # X_binned
[1, 1, 1, 1, 5, 5, 5, 5, 5, 5], # gradients
False, # no missing values
10, # n_bins_non_missing
False, # don't split on nans
3, # expected_bin_idx
"not_applicable",
),
# We replace 2 samples by NaNs (bin_idx=8)
# These 2 samples were mapped to the left node before, so they should
# be mapped to left node again
# Notice how the bin_idx threshold changes from 3 to 1.
(
[8, 0, 1, 8, 2, 3, 4, 5, 6, 7], # 8 <=> missing
[1, 1, 1, 1, 5, 5, 5, 5, 5, 5],
True, # missing values
8, # n_bins_non_missing
False, # don't split on nans
1, # cut on bin_idx=1
True,
), # missing values go to left
# same as above, but with non-consecutive missing_values_bin
(
[9, 0, 1, 9, 2, 3, 4, 5, 6, 7], # 9 <=> missing
[1, 1, 1, 1, 5, 5, 5, 5, 5, 5],
True, # missing values
8, # n_bins_non_missing
False, # don't split on nans
1, # cut on bin_idx=1
True,
), # missing values go to left
# this time replacing 2 samples that were on the right.
(
[0, 1, 2, 3, 8, 4, 8, 5, 6, 7], # 8 <=> missing
[1, 1, 1, 1, 5, 5, 5, 5, 5, 5],
True, # missing values
8, # n_bins_non_missing
False, # don't split on nans
3, # cut on bin_idx=3 (like in first case)
False,
), # missing values go to right
# same as above, but with non-consecutive missing_values_bin
(
[0, 1, 2, 3, 9, 4, 9, 5, 6, 7], # 9 <=> missing
[1, 1, 1, 1, 5, 5, 5, 5, 5, 5],
True, # missing values
8, # n_bins_non_missing
False, # don't split on nans
3, # cut on bin_idx=3 (like in first case)
False,
), # missing values go to right
# For the following cases, split_on_nans is True (we replace all of
# the samples with nans, instead of just 2).
(
[0, 1, 2, 3, 4, 4, 4, 4, 4, 4], # 4 <=> missing
[1, 1, 1, 1, 5, 5, 5, 5, 5, 5],
True, # missing values
4, # n_bins_non_missing
True, # split on nans
3, # cut on bin_idx=3
False,
), # missing values go to right
# same as above, but with non-consecutive missing_values_bin
(
[0, 1, 2, 3, 9, 9, 9, 9, 9, 9], # 9 <=> missing
[1, 1, 1, 1, 1, 1, 5, 5, 5, 5],
True, # missing values
4, # n_bins_non_missing
True, # split on nans
3, # cut on bin_idx=3
False,
), # missing values go to right
(
[6, 6, 6, 6, 0, 1, 2, 3, 4, 5], # 6 <=> missing
[1, 1, 1, 1, 5, 5, 5, 5, 5, 5],
True, # missing values
6, # n_bins_non_missing
True, # split on nans
5, # cut on bin_idx=5
False,
), # missing values go to right
# same as above, but with non-consecutive missing_values_bin
(
[9, 9, 9, 9, 0, 1, 2, 3, 4, 5], # 9 <=> missing
[1, 1, 1, 1, 5, 5, 5, 5, 5, 5],
True, # missing values
6, # n_bins_non_missing
True, # split on nans
5, # cut on bin_idx=5
False,
), # missing values go to right
],
)
def test_splitting_missing_values(
X_binned,
all_gradients,
has_missing_values,
n_bins_non_missing,
expected_split_on_nan,
expected_bin_idx,
expected_go_to_left,
):
# Make sure missing values are properly supported.
# we build an artificial example with gradients such that the best split
# is on bin_idx=3, when there are no missing values.
# Then we introduce missing values and:
# - make sure the chosen bin is correct (find_best_bin()): it's
# still the same split, even though the index of the bin may change
# - make sure the missing values are mapped to the correct child
# (split_indices())
n_bins = max(X_binned) + 1
n_samples = len(X_binned)
l2_regularization = 0.0
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.0
sample_indices = np.arange(n_samples, dtype=np.uint32)
X_binned = np.array(X_binned, dtype=X_BINNED_DTYPE).reshape(-1, 1)
X_binned = np.asfortranarray(X_binned)
all_gradients = np.array(all_gradients, dtype=G_H_DTYPE)
has_missing_values = np.array([has_missing_values], dtype=np.uint8)
all_hessians = np.ones(1, dtype=G_H_DTYPE)
sum_gradients = all_gradients.sum()
sum_hessians = 1 * n_samples
hessians_are_constant = True
builder = HistogramBuilder(
X_binned, n_bins, all_gradients, all_hessians, hessians_are_constant, n_threads
)
n_bins_non_missing = np.array([n_bins_non_missing], dtype=np.uint32)
monotonic_cst = np.array(
[MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8
)
is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
splitter = Splitter(
X_binned,
n_bins_non_missing,
missing_values_bin_idx,
has_missing_values,
is_categorical,
monotonic_cst,
l2_regularization,
min_hessian_to_split,
min_samples_leaf,
min_gain_to_split,
hessians_are_constant,
)
histograms = builder.compute_histograms_brute(sample_indices)
value = compute_node_value(
sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization
)
split_info = splitter.find_node_split(
n_samples, histograms, sum_gradients, sum_hessians, value
)
assert split_info.bin_idx == expected_bin_idx
if has_missing_values:
assert split_info.missing_go_to_left == expected_go_to_left
split_on_nan = split_info.bin_idx == n_bins_non_missing[0] - 1
assert split_on_nan == expected_split_on_nan
# Make sure the split is properly computed.
# This also make sure missing values are properly assigned to the correct
# child in split_indices()
samples_left, samples_right, _ = splitter.split_indices(
split_info, splitter.partition
)
if not expected_split_on_nan:
# When we don't split on nans, the split should always be the same.
assert set(samples_left) == set([0, 1, 2, 3])
assert set(samples_right) == set([4, 5, 6, 7, 8, 9])
else:
# When we split on nans, samples with missing values are always mapped
# to the right child.
missing_samples_indices = np.flatnonzero(
np.array(X_binned) == missing_values_bin_idx
)
non_missing_samples_indices = np.flatnonzero(
np.array(X_binned) != missing_values_bin_idx
)
assert set(samples_right) == set(missing_samples_indices)
assert set(samples_left) == set(non_missing_samples_indices)
@pytest.mark.parametrize(
"X_binned, has_missing_values, n_bins_non_missing, ",
[
# one category
([0] * 20, False, 1),
# all categories appear less than MIN_CAT_SUPPORT (hardcoded to 10)
([0] * 9 + [1] * 8, False, 2),
# only one category appears more than MIN_CAT_SUPPORT
([0] * 12 + [1] * 8, False, 2),
# missing values + category appear less than MIN_CAT_SUPPORT
# 9 is missing
([0] * 9 + [1] * 8 + [9] * 4, True, 2),
# no non-missing category
([9] * 11, True, 0),
],
)
def test_splitting_categorical_cat_smooth(
X_binned, has_missing_values, n_bins_non_missing
):
# Checks categorical splits are correct when the MIN_CAT_SUPPORT constraint
# isn't respected: there are no splits
n_bins = max(X_binned) + 1
n_samples = len(X_binned)
X_binned = np.array([X_binned], dtype=X_BINNED_DTYPE).T
X_binned = np.asfortranarray(X_binned)
l2_regularization = 0.0
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.0
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = np.ones(n_samples, dtype=G_H_DTYPE)
has_missing_values = np.array([has_missing_values], dtype=np.uint8)
all_hessians = np.ones(1, dtype=G_H_DTYPE)
sum_gradients = all_gradients.sum()
sum_hessians = n_samples
hessians_are_constant = True
builder = HistogramBuilder(
X_binned, n_bins, all_gradients, all_hessians, hessians_are_constant, n_threads
)
n_bins_non_missing = np.array([n_bins_non_missing], dtype=np.uint32)
monotonic_cst = np.array(
[MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8
)
is_categorical = np.ones_like(monotonic_cst, dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
splitter = Splitter(
X_binned,
n_bins_non_missing,
missing_values_bin_idx,
has_missing_values,
is_categorical,
monotonic_cst,
l2_regularization,
min_hessian_to_split,
min_samples_leaf,
min_gain_to_split,
hessians_are_constant,
)
histograms = builder.compute_histograms_brute(sample_indices)
value = compute_node_value(
sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization
)
split_info = splitter.find_node_split(
n_samples, histograms, sum_gradients, sum_hessians, value
)
# no split found
assert split_info.gain == -1
def _assert_categories_equals_bitset(categories, bitset):
# assert that the bitset exactly corresponds to the categories
# bitset is assumed to be an array of 8 uint32 elements
# form bitset from threshold
expected_bitset = np.zeros(8, dtype=np.uint32)
for cat in categories:
idx = cat // 32
shift = cat % 32
expected_bitset[idx] |= 1 << shift
# check for equality
assert_array_equal(expected_bitset, bitset)
@pytest.mark.parametrize(
(
"X_binned, all_gradients, expected_categories_left, n_bins_non_missing,"
"missing_values_bin_idx, has_missing_values, expected_missing_go_to_left"
),
[
# 4 categories
(
[0, 1, 2, 3] * 11, # X_binned
[10, 1, 10, 10] * 11, # all_gradients
[1], # expected_categories_left
4, # n_bins_non_missing
4, # missing_values_bin_idx
False, # has_missing_values
None,
), # expected_missing_go_to_left, unchecked
# Make sure that the categories that are on the right (second half) of
# the sorted categories array can still go in the left child. In this
# case, the best split was found when scanning from right to left.
(
[0, 1, 2, 3] * 11, # X_binned
[10, 10, 10, 1] * 11, # all_gradients
[3], # expected_categories_left
4, # n_bins_non_missing
4, # missing_values_bin_idx
False, # has_missing_values
None,
), # expected_missing_go_to_left, unchecked
# categories that don't respect MIN_CAT_SUPPORT (cat 4) are always
# mapped to the right child
(
[0, 1, 2, 3] * 11 + [4] * 5, # X_binned
[10, 10, 10, 1] * 11 + [10] * 5, # all_gradients
[3], # expected_categories_left
4, # n_bins_non_missing
4, # missing_values_bin_idx
False, # has_missing_values
None,
), # expected_missing_go_to_left, unchecked
# categories that don't respect MIN_CAT_SUPPORT are always mapped to
# the right child: in this case a more sensible split could have been
# 3, 4 - 0, 1, 2
# But the split is still 3 - 0, 1, 2, 4. this is because we only scan
# up to the middle of the sorted category array (0, 1, 2, 3), and
# because we exclude cat 4 in this array.
(
[0, 1, 2, 3] * 11 + [4] * 5, # X_binned
[10, 10, 10, 1] * 11 + [1] * 5, # all_gradients
[3], # expected_categories_left
4, # n_bins_non_missing
4, # missing_values_bin_idx
False, # has_missing_values
None,
), # expected_missing_go_to_left, unchecked
# 4 categories with missing values that go to the right
(
[0, 1, 2] * 11 + [9] * 11, # X_binned
[10, 1, 10] * 11 + [10] * 11, # all_gradients
[1], # expected_categories_left
3, # n_bins_non_missing
9, # missing_values_bin_idx
True, # has_missing_values
False,
), # expected_missing_go_to_left
# 4 categories with missing values that go to the left
(
[0, 1, 2] * 11 + [9] * 11, # X_binned
[10, 1, 10] * 11 + [1] * 11, # all_gradients
[1, 9], # expected_categories_left
3, # n_bins_non_missing
9, # missing_values_bin_idx
True, # has_missing_values
True,
), # expected_missing_go_to_left
# split is on the missing value
(
[0, 1, 2, 3, 4] * 11 + [255] * 12, # X_binned
[10, 10, 10, 10, 10] * 11 + [1] * 12, # all_gradients
[255], # expected_categories_left
5, # n_bins_non_missing
255, # missing_values_bin_idx
True, # has_missing_values
True,
), # expected_missing_go_to_left
# split on even categories
(
list(range(60)) * 12, # X_binned
[10, 1] * 360, # all_gradients
list(range(1, 60, 2)), # expected_categories_left
59, # n_bins_non_missing
59, # missing_values_bin_idx
True, # has_missing_values
True,
), # expected_missing_go_to_left
# split on every 8 categories
(
list(range(256)) * 12, # X_binned
[10, 10, 10, 10, 10, 10, 10, 1] * 384, # all_gradients
list(range(7, 256, 8)), # expected_categories_left
255, # n_bins_non_missing
255, # missing_values_bin_idx
True, # has_missing_values
True,
), # expected_missing_go_to_left
],
)
def test_splitting_categorical_sanity(
X_binned,
all_gradients,
expected_categories_left,
n_bins_non_missing,
missing_values_bin_idx,
has_missing_values,
expected_missing_go_to_left,
):
# Tests various combinations of categorical splits
n_samples = len(X_binned)
n_bins = max(X_binned) + 1
X_binned = np.array(X_binned, dtype=X_BINNED_DTYPE).reshape(-1, 1)
X_binned = np.asfortranarray(X_binned)
l2_regularization = 0.0
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.0
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = np.array(all_gradients, dtype=G_H_DTYPE)
all_hessians = np.ones(1, dtype=G_H_DTYPE)
has_missing_values = np.array([has_missing_values], dtype=np.uint8)
sum_gradients = all_gradients.sum()
sum_hessians = n_samples
hessians_are_constant = True
builder = HistogramBuilder(
X_binned, n_bins, all_gradients, all_hessians, hessians_are_constant, n_threads
)
n_bins_non_missing = np.array([n_bins_non_missing], dtype=np.uint32)
monotonic_cst = np.array(
[MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8
)
is_categorical = np.ones_like(monotonic_cst, dtype=np.uint8)
splitter = Splitter(
X_binned,
n_bins_non_missing,
missing_values_bin_idx,
has_missing_values,
is_categorical,
monotonic_cst,
l2_regularization,
min_hessian_to_split,
min_samples_leaf,
min_gain_to_split,
hessians_are_constant,
)
histograms = builder.compute_histograms_brute(sample_indices)
value = compute_node_value(
sum_gradients, sum_hessians, -np.inf, np.inf, l2_regularization
)
split_info = splitter.find_node_split(
n_samples, histograms, sum_gradients, sum_hessians, value
)
assert split_info.is_categorical
assert split_info.gain > 0
_assert_categories_equals_bitset(
expected_categories_left, split_info.left_cat_bitset
)
if has_missing_values:
assert split_info.missing_go_to_left == expected_missing_go_to_left
# If there is no missing value during training, the flag missing_go_to_left
# is set later in the grower.
# make sure samples are split correctly
samples_left, samples_right, _ = splitter.split_indices(
split_info, splitter.partition
)
left_mask = np.isin(X_binned.ravel(), expected_categories_left)
assert_array_equal(sample_indices[left_mask], samples_left)
assert_array_equal(sample_indices[~left_mask], samples_right)
def test_split_interaction_constraints():
"""Check that allowed_features are respected."""
n_features = 4
# features 1 and 2 are not allowed to be split on
allowed_features = np.array([0, 3], dtype=np.uint32)
n_bins = 5
n_samples = 10
l2_regularization = 0.0
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.0
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_hessians = np.ones(1, dtype=G_H_DTYPE)
sum_hessians = n_samples
hessians_are_constant = True
split_features = []
# The loop is to ensure that we split at least once on each allowed feature (0, 3).
# This is tracked by split_features and checked at the end.
for i in range(10):
rng = np.random.RandomState(919 + i)
X_binned = np.asfortranarray(
rng.randint(0, n_bins - 1, size=(n_samples, n_features)),
dtype=X_BINNED_DTYPE,
)
X_binned = np.asfortranarray(X_binned, dtype=X_BINNED_DTYPE)
# Make feature 1 very important
all_gradients = (10 * X_binned[:, 1] + rng.randn(n_samples)).astype(G_H_DTYPE)
sum_gradients = all_gradients.sum()
builder = HistogramBuilder(
X_binned,
n_bins,
all_gradients,
all_hessians,
hessians_are_constant,
n_threads,
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_hist_gradient_boosting/tests/__init__.py | sklearn/ensemble/_hist_gradient_boosting/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_hist_gradient_boosting/tests/test_compare_lightgbm.py | sklearn/ensemble/_hist_gradient_boosting/tests/test_compare_lightgbm.py | import numpy as np
import pytest
from sklearn.datasets import make_classification, make_regression
from sklearn.ensemble import (
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
)
from sklearn.ensemble._hist_gradient_boosting.binning import _BinMapper
from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
@pytest.mark.parametrize("seed", range(5))
@pytest.mark.parametrize(
"loss",
[
"squared_error",
"poisson",
pytest.param(
"gamma",
marks=pytest.mark.skip("LightGBM with gamma loss has larger deviation."),
),
],
)
@pytest.mark.parametrize("min_samples_leaf", (1, 20))
@pytest.mark.parametrize(
"n_samples, max_leaf_nodes",
[
(255, 4096),
(1000, 8),
],
)
def test_same_predictions_regression(
seed, loss, min_samples_leaf, n_samples, max_leaf_nodes
):
# Make sure sklearn has the same predictions as lightgbm for easy targets.
#
# In particular when the size of the trees are bound and the number of
# samples is large enough, the structure of the prediction trees found by
# LightGBM and sklearn should be exactly identical.
#
# Notes:
# - Several candidate splits may have equal gains when the number of
# samples in a node is low (and because of float errors). Therefore the
# predictions on the test set might differ if the structure of the tree
# is not exactly the same. To avoid this issue we only compare the
# predictions on the test set when the number of samples is large enough
# and max_leaf_nodes is low enough.
# - To ignore discrepancies caused by small differences in the binning
# strategy, data is pre-binned if n_samples > 255.
# - We don't check the absolute_error loss here. This is because
# LightGBM's computation of the median (used for the initial value of
# raw_prediction) is a bit off (they'll e.g. return midpoints when there
# is no need to.). Since these tests only run 1 iteration, the
# discrepancy between the initial values leads to biggish differences in
# the predictions. These differences are much smaller with more
# iterations.
pytest.importorskip("lightgbm")
rng = np.random.RandomState(seed=seed)
max_iter = 1
max_bins = 255
X, y = make_regression(
n_samples=n_samples, n_features=5, n_informative=5, random_state=0
)
if loss in ("gamma", "poisson"):
# make the target positive
y = np.abs(y) + np.mean(np.abs(y))
if n_samples > 255:
# bin data and convert it to float32 so that the estimator doesn't
# treat it as pre-binned
X = _BinMapper(n_bins=max_bins + 1).fit_transform(X).astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
est_sklearn = HistGradientBoostingRegressor(
loss=loss,
max_iter=max_iter,
max_bins=max_bins,
learning_rate=1,
early_stopping=False,
min_samples_leaf=min_samples_leaf,
max_leaf_nodes=max_leaf_nodes,
)
est_lightgbm = get_equivalent_estimator(est_sklearn, lib="lightgbm")
est_lightgbm.set_params(min_sum_hessian_in_leaf=0)
est_lightgbm.fit(X_train, y_train)
est_sklearn.fit(X_train, y_train)
# We need X to be treated a numerical data, not pre-binned data.
X_train, X_test = X_train.astype(np.float32), X_test.astype(np.float32)
pred_lightgbm = est_lightgbm.predict(X_train)
pred_sklearn = est_sklearn.predict(X_train)
if loss in ("gamma", "poisson"):
# More than 65% of the predictions must be close up to the 2nd decimal.
# TODO: We are not entirely satisfied with this lax comparison, but the root
# cause is not clear, maybe algorithmic differences. One such example is the
# poisson_max_delta_step parameter of LightGBM which does not exist in HGBT.
assert (
np.mean(np.isclose(pred_lightgbm, pred_sklearn, rtol=1e-2, atol=1e-2))
> 0.65
)
else:
# Less than 1% of the predictions may deviate more than 1e-3 in relative terms.
assert np.mean(np.isclose(pred_lightgbm, pred_sklearn, rtol=1e-3)) > 1 - 0.01
if max_leaf_nodes < 10 and n_samples >= 1000 and loss in ("squared_error",):
pred_lightgbm = est_lightgbm.predict(X_test)
pred_sklearn = est_sklearn.predict(X_test)
# Less than 1% of the predictions may deviate more than 1e-4 in relative terms.
assert np.mean(np.isclose(pred_lightgbm, pred_sklearn, rtol=1e-4)) > 1 - 0.01
@pytest.mark.parametrize("seed", range(5))
@pytest.mark.parametrize("min_samples_leaf", (1, 20))
@pytest.mark.parametrize(
"n_samples, max_leaf_nodes",
[
(255, 4096),
(1000, 8),
],
)
def test_same_predictions_classification(
seed, min_samples_leaf, n_samples, max_leaf_nodes
):
# Same as test_same_predictions_regression but for classification
pytest.importorskip("lightgbm")
rng = np.random.RandomState(seed=seed)
max_iter = 1
n_classes = 2
max_bins = 255
X, y = make_classification(
n_samples=n_samples,
n_classes=n_classes,
n_features=5,
n_informative=5,
n_redundant=0,
random_state=0,
)
if n_samples > 255:
# bin data and convert it to float32 so that the estimator doesn't
# treat it as pre-binned
X = _BinMapper(n_bins=max_bins + 1).fit_transform(X).astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
est_sklearn = HistGradientBoostingClassifier(
loss="log_loss",
max_iter=max_iter,
max_bins=max_bins,
learning_rate=1,
early_stopping=False,
min_samples_leaf=min_samples_leaf,
max_leaf_nodes=max_leaf_nodes,
)
est_lightgbm = get_equivalent_estimator(
est_sklearn, lib="lightgbm", n_classes=n_classes
)
est_lightgbm.fit(X_train, y_train)
est_sklearn.fit(X_train, y_train)
# We need X to be treated a numerical data, not pre-binned data.
X_train, X_test = X_train.astype(np.float32), X_test.astype(np.float32)
pred_lightgbm = est_lightgbm.predict(X_train)
pred_sklearn = est_sklearn.predict(X_train)
assert np.mean(pred_sklearn == pred_lightgbm) > 0.89
acc_lightgbm = accuracy_score(y_train, pred_lightgbm)
acc_sklearn = accuracy_score(y_train, pred_sklearn)
np.testing.assert_almost_equal(acc_lightgbm, acc_sklearn)
if max_leaf_nodes < 10 and n_samples >= 1000:
pred_lightgbm = est_lightgbm.predict(X_test)
pred_sklearn = est_sklearn.predict(X_test)
assert np.mean(pred_sklearn == pred_lightgbm) > 0.89
acc_lightgbm = accuracy_score(y_test, pred_lightgbm)
acc_sklearn = accuracy_score(y_test, pred_sklearn)
np.testing.assert_almost_equal(acc_lightgbm, acc_sklearn, decimal=2)
@pytest.mark.parametrize("seed", range(5))
@pytest.mark.parametrize("min_samples_leaf", (1, 20))
@pytest.mark.parametrize(
"n_samples, max_leaf_nodes",
[
(255, 4096),
(10000, 8),
],
)
def test_same_predictions_multiclass_classification(
seed, min_samples_leaf, n_samples, max_leaf_nodes
):
# Same as test_same_predictions_regression but for classification
pytest.importorskip("lightgbm")
rng = np.random.RandomState(seed=seed)
n_classes = 3
max_iter = 1
max_bins = 255
lr = 1
X, y = make_classification(
n_samples=n_samples,
n_classes=n_classes,
n_features=5,
n_informative=5,
n_redundant=0,
n_clusters_per_class=1,
random_state=0,
)
if n_samples > 255:
# bin data and convert it to float32 so that the estimator doesn't
# treat it as pre-binned
X = _BinMapper(n_bins=max_bins + 1).fit_transform(X).astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
est_sklearn = HistGradientBoostingClassifier(
loss="log_loss",
max_iter=max_iter,
max_bins=max_bins,
learning_rate=lr,
early_stopping=False,
min_samples_leaf=min_samples_leaf,
max_leaf_nodes=max_leaf_nodes,
)
est_lightgbm = get_equivalent_estimator(
est_sklearn, lib="lightgbm", n_classes=n_classes
)
est_lightgbm.fit(X_train, y_train)
est_sklearn.fit(X_train, y_train)
# We need X to be treated a numerical data, not pre-binned data.
X_train, X_test = X_train.astype(np.float32), X_test.astype(np.float32)
pred_lightgbm = est_lightgbm.predict(X_train)
pred_sklearn = est_sklearn.predict(X_train)
assert np.mean(pred_sklearn == pred_lightgbm) > 0.89
proba_lightgbm = est_lightgbm.predict_proba(X_train)
proba_sklearn = est_sklearn.predict_proba(X_train)
# assert more than 75% of the predicted probabilities are the same up to
# the second decimal
assert np.mean(np.abs(proba_lightgbm - proba_sklearn) < 1e-2) > 0.75
acc_lightgbm = accuracy_score(y_train, pred_lightgbm)
acc_sklearn = accuracy_score(y_train, pred_sklearn)
np.testing.assert_allclose(acc_lightgbm, acc_sklearn, rtol=0, atol=5e-2)
if max_leaf_nodes < 10 and n_samples >= 1000:
pred_lightgbm = est_lightgbm.predict(X_test)
pred_sklearn = est_sklearn.predict(X_test)
assert np.mean(pred_sklearn == pred_lightgbm) > 0.89
proba_lightgbm = est_lightgbm.predict_proba(X_train)
proba_sklearn = est_sklearn.predict_proba(X_train)
# assert more than 75% of the predicted probabilities are the same up
# to the second decimal
assert np.mean(np.abs(proba_lightgbm - proba_sklearn) < 1e-2) > 0.75
acc_lightgbm = accuracy_score(y_test, pred_lightgbm)
acc_sklearn = accuracy_score(y_test, pred_sklearn)
np.testing.assert_almost_equal(acc_lightgbm, acc_sklearn, decimal=2)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_hist_gradient_boosting/tests/test_monotonic_constraints.py | sklearn/ensemble/_hist_gradient_boosting/tests/test_monotonic_constraints.py | import re
import numpy as np
import pytest
from sklearn.ensemble import (
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
)
from sklearn.ensemble._hist_gradient_boosting.common import (
G_H_DTYPE,
X_BINNED_DTYPE,
MonotonicConstraint,
)
from sklearn.ensemble._hist_gradient_boosting.grower import TreeGrower
from sklearn.ensemble._hist_gradient_boosting.histogram import HistogramBuilder
from sklearn.ensemble._hist_gradient_boosting.splitting import (
Splitter,
compute_node_value,
)
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
from sklearn.utils._testing import _convert_container
n_threads = _openmp_effective_n_threads()
def is_increasing(a):
return (np.diff(a) >= 0.0).all()
def is_decreasing(a):
return (np.diff(a) <= 0.0).all()
def assert_leaves_values_monotonic(predictor, monotonic_cst):
# make sure leaves values (from left to right) are either all increasing
# or all decreasing (or neither) depending on the monotonic constraint.
nodes = predictor.nodes
def get_leaves_values():
"""get leaves values from left to right"""
values = []
def depth_first_collect_leaf_values(node_idx):
node = nodes[node_idx]
if node["is_leaf"]:
values.append(node["value"])
return
depth_first_collect_leaf_values(node["left"])
depth_first_collect_leaf_values(node["right"])
depth_first_collect_leaf_values(0) # start at root (0)
return values
values = get_leaves_values()
if monotonic_cst == MonotonicConstraint.NO_CST:
# some increasing, some decreasing
assert not is_increasing(values) and not is_decreasing(values)
elif monotonic_cst == MonotonicConstraint.POS:
# all increasing
assert is_increasing(values)
else: # NEG
# all decreasing
assert is_decreasing(values)
def assert_children_values_monotonic(predictor, monotonic_cst):
# Make sure siblings values respect the monotonic constraints. Left should
# be lower (resp greater) than right child if constraint is POS (resp.
# NEG).
# Note that this property alone isn't enough to ensure full monotonicity,
# since we also need to guanrantee that all the descendents of the left
# child won't be greater (resp. lower) than the right child, or its
# descendents. That's why we need to bound the predicted values (this is
# tested in assert_children_values_bounded)
nodes = predictor.nodes
left_lower = []
left_greater = []
for node in nodes:
if node["is_leaf"]:
continue
left_idx = node["left"]
right_idx = node["right"]
if nodes[left_idx]["value"] < nodes[right_idx]["value"]:
left_lower.append(node)
elif nodes[left_idx]["value"] > nodes[right_idx]["value"]:
left_greater.append(node)
if monotonic_cst == MonotonicConstraint.NO_CST:
assert left_lower and left_greater
elif monotonic_cst == MonotonicConstraint.POS:
assert left_lower and not left_greater
else: # NEG
assert not left_lower and left_greater
def assert_children_values_bounded(grower, monotonic_cst):
# Make sure that the values of the children of a node are bounded by the
# middle value between that node and its sibling (if there is a monotonic
# constraint).
# As a bonus, we also check that the siblings values are properly ordered
# which is slightly redundant with assert_children_values_monotonic (but
# this check is done on the grower nodes whereas
# assert_children_values_monotonic is done on the predictor nodes)
if monotonic_cst == MonotonicConstraint.NO_CST:
return
def recursively_check_children_node_values(node, right_sibling=None):
if node.is_leaf:
return
if right_sibling is not None:
middle = (node.value + right_sibling.value) / 2
if monotonic_cst == MonotonicConstraint.POS:
assert node.left_child.value <= node.right_child.value <= middle
if not right_sibling.is_leaf:
assert (
middle
<= right_sibling.left_child.value
<= right_sibling.right_child.value
)
else: # NEG
assert node.left_child.value >= node.right_child.value >= middle
if not right_sibling.is_leaf:
assert (
middle
>= right_sibling.left_child.value
>= right_sibling.right_child.value
)
recursively_check_children_node_values(
node.left_child, right_sibling=node.right_child
)
recursively_check_children_node_values(node.right_child)
recursively_check_children_node_values(grower.root)
@pytest.mark.parametrize("seed", range(3))
@pytest.mark.parametrize(
"monotonic_cst",
(
MonotonicConstraint.NO_CST,
MonotonicConstraint.POS,
MonotonicConstraint.NEG,
),
)
def test_nodes_values(monotonic_cst, seed):
# Build a single tree with only one feature, and make sure the nodes
# values respect the monotonic constraints.
# Considering the following tree with a monotonic POS constraint, we
# should have:
#
# root
# / \
# 5 10 # middle = 7.5
# / \ / \
# a b c d
#
# a <= b and c <= d (assert_children_values_monotonic)
# a, b <= middle <= c, d (assert_children_values_bounded)
# a <= b <= c <= d (assert_leaves_values_monotonic)
#
# The last one is a consequence of the others, but can't hurt to check
rng = np.random.RandomState(seed)
n_samples = 1000
n_features = 1
X_binned = rng.randint(0, 255, size=(n_samples, n_features), dtype=np.uint8)
X_binned = np.asfortranarray(X_binned)
gradients = rng.normal(size=n_samples).astype(G_H_DTYPE)
hessians = np.ones(shape=1, dtype=G_H_DTYPE)
grower = TreeGrower(
X_binned, gradients, hessians, monotonic_cst=[monotonic_cst], shrinkage=0.1
)
grower.grow()
# grow() will shrink the leaves values at the very end. For our comparison
# tests, we need to revert the shrinkage of the leaves, else we would
# compare the value of a leaf (shrunk) with a node (not shrunk) and the
# test would not be correct.
for leave in grower.finalized_leaves:
leave.value /= grower.shrinkage
# We pass undefined binning_thresholds because we won't use predict anyway
predictor = grower.make_predictor(
binning_thresholds=np.zeros((X_binned.shape[1], X_binned.max() + 1))
)
# The consistency of the bounds can only be checked on the tree grower
# as the node bounds are not copied into the predictor tree. The
# consistency checks on the values of node children and leaves can be
# done either on the grower tree or on the predictor tree. We only
# do those checks on the predictor tree as the latter is derived from
# the former.
assert_children_values_monotonic(predictor, monotonic_cst)
assert_children_values_bounded(grower, monotonic_cst)
assert_leaves_values_monotonic(predictor, monotonic_cst)
@pytest.mark.parametrize("use_feature_names", (True, False))
def test_predictions(global_random_seed, use_feature_names):
# Train a model with a POS constraint on the first non-categorical feature
# and a NEG constraint on the second non-categorical feature, and make sure
# the constraints are respected by checking the predictions.
# test adapted from lightgbm's test_monotone_constraint(), itself inspired
# by https://xgboost.readthedocs.io/en/latest/tutorials/monotonic.html
rng = np.random.RandomState(global_random_seed)
n_samples = 1000
f_0 = rng.rand(n_samples) # positive correlation with y
f_1 = rng.rand(n_samples) # negative correlation with y
# extra categorical features, no correlation with y,
# to check the correctness of monotonicity constraint remapping, see issue #28898
f_a = rng.randint(low=0, high=9, size=n_samples)
f_b = rng.randint(low=0, high=9, size=n_samples)
f_c = rng.randint(low=0, high=9, size=n_samples)
X = np.c_[f_a, f_0, f_b, f_1, f_c]
columns_name = ["f_a", "f_0", "f_b", "f_1", "f_c"]
constructor_name = "dataframe" if use_feature_names else "array"
X = _convert_container(X, constructor_name, columns_name=columns_name)
noise = rng.normal(loc=0.0, scale=0.01, size=n_samples)
y = 5 * f_0 + np.sin(10 * np.pi * f_0) - 5 * f_1 - np.cos(10 * np.pi * f_1) + noise
if use_feature_names:
monotonic_cst = {"f_0": +1, "f_1": -1}
categorical_features = ["f_a", "f_b", "f_c"]
else:
monotonic_cst = [0, +1, 0, -1, 0]
categorical_features = [0, 2, 4]
gbdt = HistGradientBoostingRegressor(
monotonic_cst=monotonic_cst, categorical_features=categorical_features
)
gbdt.fit(X, y)
linspace = np.linspace(0, 1, 100)
sin = np.sin(linspace)
constant = np.full_like(linspace, fill_value=0.5)
# We now assert the predictions properly respect the constraints, on each
# feature. When testing for a feature we need to set the other one to a
# constant, because the monotonic constraints are only a "all else being
# equal" type of constraints:
# a constraint on the first feature only means that
# x0 < x0' => f(x0, x1) < f(x0', x1)
# while x1 stays constant.
# The constraint does not guanrantee that
# x0 < x0' => f(x0, x1) < f(x0', x1')
# First non-categorical feature (POS)
# assert pred is all increasing when f_0 is all increasing
X = np.c_[constant, linspace, constant, constant, constant]
X = _convert_container(X, constructor_name, columns_name=columns_name)
pred = gbdt.predict(X)
assert is_increasing(pred)
# assert pred actually follows the variations of f_0
X = np.c_[constant, sin, constant, constant, constant]
X = _convert_container(X, constructor_name, columns_name=columns_name)
pred = gbdt.predict(X)
assert np.all((np.diff(pred) >= 0) == (np.diff(sin) >= 0))
# Second non-categorical feature (NEG)
# assert pred is all decreasing when f_1 is all increasing
X = np.c_[constant, constant, constant, linspace, constant]
X = _convert_container(X, constructor_name, columns_name=columns_name)
pred = gbdt.predict(X)
assert is_decreasing(pred)
# assert pred actually follows the inverse variations of f_1
X = np.c_[constant, constant, constant, sin, constant]
X = _convert_container(X, constructor_name, columns_name=columns_name)
pred = gbdt.predict(X)
assert ((np.diff(pred) <= 0) == (np.diff(sin) >= 0)).all()
def test_input_error():
X = [[1, 2], [2, 3], [3, 4]]
y = [0, 1, 2]
gbdt = HistGradientBoostingRegressor(monotonic_cst=[1, 0, -1])
with pytest.raises(
ValueError, match=re.escape("monotonic_cst has shape (3,) but the input data")
):
gbdt.fit(X, y)
for monotonic_cst in ([1, 3], [1, -3], [0.3, -0.7]):
gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
expected_msg = re.escape(
"must be an array-like of -1, 0 or 1. Observed values:"
)
with pytest.raises(ValueError, match=expected_msg):
gbdt.fit(X, y)
gbdt = HistGradientBoostingClassifier(monotonic_cst=[0, 1])
with pytest.raises(
ValueError,
match="monotonic constraints are not supported for multiclass classification",
):
gbdt.fit(X, y)
def test_input_error_related_to_feature_names():
pd = pytest.importorskip("pandas")
X = pd.DataFrame({"a": [0, 1, 2], "b": [0, 1, 2]})
y = np.array([0, 1, 0])
monotonic_cst = {"d": 1, "a": 1, "c": -1}
gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
expected_msg = re.escape(
"monotonic_cst contains 2 unexpected feature names: ['c', 'd']."
)
with pytest.raises(ValueError, match=expected_msg):
gbdt.fit(X, y)
monotonic_cst = {k: 1 for k in "abcdefghijklmnopqrstuvwxyz"}
gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
expected_msg = re.escape(
"monotonic_cst contains 24 unexpected feature names: "
"['c', 'd', 'e', 'f', 'g', '...']."
)
with pytest.raises(ValueError, match=expected_msg):
gbdt.fit(X, y)
monotonic_cst = {"a": 1}
gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
expected_msg = re.escape(
"HistGradientBoostingRegressor was not fitted on data with feature "
"names. Pass monotonic_cst as an integer array instead."
)
with pytest.raises(ValueError, match=expected_msg):
gbdt.fit(X.values, y)
monotonic_cst = {"b": -1, "a": "+"}
gbdt = HistGradientBoostingRegressor(monotonic_cst=monotonic_cst)
expected_msg = re.escape("monotonic_cst['a'] must be either -1, 0 or 1. Got '+'.")
with pytest.raises(ValueError, match=expected_msg):
gbdt.fit(X, y)
def test_bounded_value_min_gain_to_split():
# The purpose of this test is to show that when computing the gain at a
# given split, the value of the current node should be properly bounded to
# respect the monotonic constraints, because it strongly interacts with
# min_gain_to_split. We build a simple example where gradients are [1, 1,
# 100, 1, 1] (hessians are all ones). The best split happens on the 3rd
# bin, and depending on whether the value of the node is bounded or not,
# the min_gain_to_split constraint is or isn't satisfied.
l2_regularization = 0
min_hessian_to_split = 0
min_samples_leaf = 1
n_bins = n_samples = 5
X_binned = np.arange(n_samples).reshape(-1, 1).astype(X_BINNED_DTYPE)
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_hessians = np.ones(n_samples, dtype=G_H_DTYPE)
all_gradients = np.array([1, 1, 100, 1, 1], dtype=G_H_DTYPE)
sum_gradients = all_gradients.sum()
sum_hessians = all_hessians.sum()
hessians_are_constant = False
builder = HistogramBuilder(
X_binned, n_bins, all_gradients, all_hessians, hessians_are_constant, n_threads
)
n_bins_non_missing = np.array([n_bins - 1] * X_binned.shape[1], dtype=np.uint32)
has_missing_values = np.array([False] * X_binned.shape[1], dtype=np.uint8)
monotonic_cst = np.array(
[MonotonicConstraint.NO_CST] * X_binned.shape[1], dtype=np.int8
)
is_categorical = np.zeros_like(monotonic_cst, dtype=np.uint8)
missing_values_bin_idx = n_bins - 1
children_lower_bound, children_upper_bound = -np.inf, np.inf
min_gain_to_split = 2000
splitter = Splitter(
X_binned,
n_bins_non_missing,
missing_values_bin_idx,
has_missing_values,
is_categorical,
monotonic_cst,
l2_regularization,
min_hessian_to_split,
min_samples_leaf,
min_gain_to_split,
hessians_are_constant,
)
histograms = builder.compute_histograms_brute(sample_indices)
# Since the gradient array is [1, 1, 100, 1, 1]
# the max possible gain happens on the 3rd bin (or equivalently in the 2nd)
# and is equal to about 1307, which less than min_gain_to_split = 2000, so
# the node is considered unsplittable (gain = -1)
current_lower_bound, current_upper_bound = -np.inf, np.inf
value = compute_node_value(
sum_gradients,
sum_hessians,
current_lower_bound,
current_upper_bound,
l2_regularization,
)
# the unbounded value is equal to -sum_gradients / sum_hessians
assert value == pytest.approx(-104 / 5)
split_info = splitter.find_node_split(
n_samples,
histograms,
sum_gradients,
sum_hessians,
value,
lower_bound=children_lower_bound,
upper_bound=children_upper_bound,
)
assert split_info.gain == -1 # min_gain_to_split not respected
# here again the max possible gain is on the 3rd bin but we now cap the
# value of the node into [-10, inf].
# This means the gain is now about 2430 which is more than the
# min_gain_to_split constraint.
current_lower_bound, current_upper_bound = -10, np.inf
value = compute_node_value(
sum_gradients,
sum_hessians,
current_lower_bound,
current_upper_bound,
l2_regularization,
)
assert value == -10
split_info = splitter.find_node_split(
n_samples,
histograms,
sum_gradients,
sum_hessians,
value,
lower_bound=children_lower_bound,
upper_bound=children_upper_bound,
)
assert split_info.gain > min_gain_to_split
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/_hist_gradient_boosting/tests/test_binning.py | sklearn/ensemble/_hist_gradient_boosting/tests/test_binning.py | import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from sklearn.ensemble._hist_gradient_boosting.binning import (
_BinMapper,
_find_binning_thresholds,
_map_to_bins,
)
from sklearn.ensemble._hist_gradient_boosting.common import (
ALMOST_INF,
X_BINNED_DTYPE,
X_DTYPE,
)
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
n_threads = _openmp_effective_n_threads()
DATA = (
np.random.RandomState(42)
.normal(loc=[0, 10], scale=[1, 0.01], size=(int(1e6), 2))
.astype(X_DTYPE)
)
def test_find_binning_thresholds_regular_data():
data = np.linspace(0, 10, 1001)
bin_thresholds = _find_binning_thresholds(data, max_bins=10)
assert_allclose(bin_thresholds, [1, 2, 3, 4, 5, 6, 7, 8, 9])
bin_thresholds = _find_binning_thresholds(data, max_bins=5)
assert_allclose(bin_thresholds, [2, 4, 6, 8])
def test_find_binning_thresholds_small_regular_data():
data = np.linspace(0, 10, 11)
bin_thresholds = _find_binning_thresholds(data, max_bins=5)
assert_allclose(bin_thresholds, [2, 4, 6, 8])
bin_thresholds = _find_binning_thresholds(data, max_bins=10)
assert_allclose(bin_thresholds, [1, 2, 3, 4, 5, 6, 7, 8, 9])
bin_thresholds = _find_binning_thresholds(data, max_bins=11)
assert_allclose(bin_thresholds, np.arange(10) + 0.5)
bin_thresholds = _find_binning_thresholds(data, max_bins=255)
assert_allclose(bin_thresholds, np.arange(10) + 0.5)
def test_find_binning_thresholds_random_data():
bin_thresholds = [
_find_binning_thresholds(DATA[:, i], max_bins=255) for i in range(2)
]
for i in range(len(bin_thresholds)):
assert bin_thresholds[i].shape == (254,) # 255 - 1
assert bin_thresholds[i].dtype == DATA.dtype
assert_allclose(
bin_thresholds[0][[64, 128, 192]], np.array([-0.7, 0.0, 0.7]), atol=1e-1
)
assert_allclose(
bin_thresholds[1][[64, 128, 192]], np.array([9.99, 10.00, 10.01]), atol=1e-2
)
def test_find_binning_thresholds_low_n_bins():
bin_thresholds = [
_find_binning_thresholds(DATA[:, i], max_bins=128) for i in range(2)
]
for i in range(len(bin_thresholds)):
assert bin_thresholds[i].shape == (127,) # 128 - 1
assert bin_thresholds[i].dtype == DATA.dtype
@pytest.mark.parametrize("n_bins", (2, 257))
def test_invalid_n_bins(n_bins):
err_msg = "n_bins={} should be no smaller than 3 and no larger than 256".format(
n_bins
)
with pytest.raises(ValueError, match=err_msg):
_BinMapper(n_bins=n_bins).fit(DATA)
def test_bin_mapper_n_features_transform():
mapper = _BinMapper(n_bins=42, random_state=42).fit(DATA)
err_msg = "This estimator was fitted with 2 features but 4 got passed"
with pytest.raises(ValueError, match=err_msg):
mapper.transform(np.repeat(DATA, 2, axis=1))
@pytest.mark.parametrize("max_bins", [16, 128, 255])
def test_map_to_bins(max_bins):
bin_thresholds = [
_find_binning_thresholds(DATA[:, i], max_bins=max_bins) for i in range(2)
]
binned = np.zeros_like(DATA, dtype=X_BINNED_DTYPE, order="F")
is_categorical = np.zeros(2, dtype=np.uint8)
last_bin_idx = max_bins
_map_to_bins(DATA, bin_thresholds, is_categorical, last_bin_idx, n_threads, binned)
assert binned.shape == DATA.shape
assert binned.dtype == np.uint8
assert binned.flags.f_contiguous
min_indices = DATA.argmin(axis=0)
max_indices = DATA.argmax(axis=0)
for feature_idx, min_idx in enumerate(min_indices):
assert binned[min_idx, feature_idx] == 0
for feature_idx, max_idx in enumerate(max_indices):
assert binned[max_idx, feature_idx] == max_bins - 1
@pytest.mark.parametrize("max_bins", [5, 10, 42])
def test_bin_mapper_random_data(max_bins):
n_samples, n_features = DATA.shape
expected_count_per_bin = n_samples // max_bins
tol = int(0.05 * expected_count_per_bin)
# max_bins is the number of bins for non-missing values
n_bins = max_bins + 1
mapper = _BinMapper(n_bins=n_bins, random_state=42).fit(DATA)
binned = mapper.transform(DATA)
assert binned.shape == (n_samples, n_features)
assert binned.dtype == np.uint8
assert_array_equal(binned.min(axis=0), np.array([0, 0]))
assert_array_equal(binned.max(axis=0), np.array([max_bins - 1, max_bins - 1]))
assert len(mapper.bin_thresholds_) == n_features
for bin_thresholds_feature in mapper.bin_thresholds_:
assert bin_thresholds_feature.shape == (max_bins - 1,)
assert bin_thresholds_feature.dtype == DATA.dtype
assert np.all(mapper.n_bins_non_missing_ == max_bins)
# Check that the binned data is approximately balanced across bins.
for feature_idx in range(n_features):
for bin_idx in range(max_bins):
count = (binned[:, feature_idx] == bin_idx).sum()
assert abs(count - expected_count_per_bin) < tol
@pytest.mark.parametrize("n_samples, max_bins", [(5, 5), (5, 10), (5, 11), (42, 255)])
def test_bin_mapper_small_random_data(n_samples, max_bins):
data = np.random.RandomState(42).normal(size=n_samples).reshape(-1, 1)
assert len(np.unique(data)) == n_samples
# max_bins is the number of bins for non-missing values
n_bins = max_bins + 1
mapper = _BinMapper(n_bins=n_bins, random_state=42)
binned = mapper.fit_transform(data)
assert binned.shape == data.shape
assert binned.dtype == np.uint8
assert_array_equal(binned.ravel()[np.argsort(data.ravel())], np.arange(n_samples))
@pytest.mark.parametrize(
"max_bins, n_distinct, multiplier",
[
(5, 5, 1),
(5, 5, 3),
(255, 12, 42),
],
)
def test_bin_mapper_identity_repeated_values(max_bins, n_distinct, multiplier):
data = np.array(list(range(n_distinct)) * multiplier).reshape(-1, 1)
# max_bins is the number of bins for non-missing values
n_bins = max_bins + 1
binned = _BinMapper(n_bins=n_bins).fit_transform(data)
assert_array_equal(data, binned)
@pytest.mark.parametrize("n_distinct", [2, 7, 42])
def test_bin_mapper_repeated_values_invariance(n_distinct):
rng = np.random.RandomState(42)
distinct_values = rng.normal(size=n_distinct)
assert len(np.unique(distinct_values)) == n_distinct
repeated_indices = rng.randint(low=0, high=n_distinct, size=1000)
data = distinct_values[repeated_indices]
rng.shuffle(data)
assert_array_equal(np.unique(data), np.sort(distinct_values))
data = data.reshape(-1, 1)
mapper_1 = _BinMapper(n_bins=n_distinct + 1)
binned_1 = mapper_1.fit_transform(data)
assert_array_equal(np.unique(binned_1[:, 0]), np.arange(n_distinct))
# Adding more bins to the mapper yields the same results (same thresholds)
mapper_2 = _BinMapper(n_bins=min(256, n_distinct * 3) + 1)
binned_2 = mapper_2.fit_transform(data)
assert_allclose(mapper_1.bin_thresholds_[0], mapper_2.bin_thresholds_[0])
assert_array_equal(binned_1, binned_2)
@pytest.mark.parametrize(
"max_bins, scale, offset",
[
(3, 2, -1),
(42, 1, 0),
(255, 0.3, 42),
],
)
def test_bin_mapper_identity_small(max_bins, scale, offset):
data = np.arange(max_bins).reshape(-1, 1) * scale + offset
# max_bins is the number of bins for non-missing values
n_bins = max_bins + 1
binned = _BinMapper(n_bins=n_bins).fit_transform(data)
assert_array_equal(binned, np.arange(max_bins).reshape(-1, 1))
@pytest.mark.parametrize(
"max_bins_small, max_bins_large",
[
(2, 2),
(3, 3),
(4, 4),
(42, 42),
(255, 255),
(5, 17),
(42, 255),
],
)
def test_bin_mapper_idempotence(max_bins_small, max_bins_large):
assert max_bins_large >= max_bins_small
data = np.random.RandomState(42).normal(size=30000).reshape(-1, 1)
mapper_small = _BinMapper(n_bins=max_bins_small + 1)
mapper_large = _BinMapper(n_bins=max_bins_small + 1)
binned_small = mapper_small.fit_transform(data)
binned_large = mapper_large.fit_transform(binned_small)
assert_array_equal(binned_small, binned_large)
@pytest.mark.parametrize("n_bins", [10, 100, 256])
@pytest.mark.parametrize("diff", [-5, 0, 5])
def test_n_bins_non_missing(n_bins, diff):
# Check that n_bins_non_missing is n_unique_values when
# there are not a lot of unique values, else n_bins - 1.
n_unique_values = n_bins + diff
X = list(range(n_unique_values)) * 2
X = np.array(X).reshape(-1, 1)
mapper = _BinMapper(n_bins=n_bins).fit(X)
assert np.all(mapper.n_bins_non_missing_ == min(n_bins - 1, n_unique_values))
def test_subsample():
# Make sure bin thresholds are different when applying subsampling
mapper_no_subsample = _BinMapper(subsample=None, random_state=0).fit(DATA)
mapper_subsample = _BinMapper(subsample=256, random_state=0).fit(DATA)
for feature in range(DATA.shape[1]):
assert not np.allclose(
mapper_no_subsample.bin_thresholds_[feature],
mapper_subsample.bin_thresholds_[feature],
rtol=1e-4,
)
@pytest.mark.parametrize(
"n_bins, n_bins_non_missing, X_trans_expected",
[
(
256,
[4, 2, 2],
[
[0, 0, 0], # 255 <=> missing value
[255, 255, 0],
[1, 0, 0],
[255, 1, 1],
[2, 1, 1],
[3, 0, 0],
],
),
(
3,
[2, 2, 2],
[
[0, 0, 0], # 2 <=> missing value
[2, 2, 0],
[0, 0, 0],
[2, 1, 1],
[1, 1, 1],
[1, 0, 0],
],
),
],
)
def test_missing_values_support(n_bins, n_bins_non_missing, X_trans_expected):
# check for missing values: make sure nans are mapped to the last bin
# and that the _BinMapper attributes are correct
X = [
[1, 1, 0],
[np.nan, np.nan, 0],
[2, 1, 0],
[np.nan, 2, 1],
[3, 2, 1],
[4, 1, 0],
]
X = np.array(X)
mapper = _BinMapper(n_bins=n_bins)
mapper.fit(X)
assert_array_equal(mapper.n_bins_non_missing_, n_bins_non_missing)
for feature_idx in range(X.shape[1]):
assert (
len(mapper.bin_thresholds_[feature_idx])
== n_bins_non_missing[feature_idx] - 1
)
assert mapper.missing_values_bin_idx_ == n_bins - 1
X_trans = mapper.transform(X)
assert_array_equal(X_trans, X_trans_expected)
def test_infinite_values():
# Make sure infinite values are properly handled.
bin_mapper = _BinMapper()
X = np.array([-np.inf, 0, 1, np.inf]).reshape(-1, 1)
bin_mapper.fit(X)
assert_allclose(bin_mapper.bin_thresholds_[0], [-np.inf, 0.5, ALMOST_INF])
assert bin_mapper.n_bins_non_missing_ == [4]
expected_binned_X = np.array([0, 1, 2, 3]).reshape(-1, 1)
assert_array_equal(bin_mapper.transform(X), expected_binned_X)
@pytest.mark.parametrize("n_bins", [15, 256])
def test_categorical_feature(n_bins):
# Basic test for categorical features
# we make sure that categories are mapped into [0, n_categories - 1] and
# that nans are mapped to the last bin
X = np.array(
[[4] * 500 + [1] * 3 + [10] * 4 + [0] * 4 + [13] + [7] * 5 + [np.nan] * 2],
dtype=X_DTYPE,
).T
known_categories = [np.unique(X[~np.isnan(X)])]
bin_mapper = _BinMapper(
n_bins=n_bins,
is_categorical=np.array([True]),
known_categories=known_categories,
).fit(X)
assert bin_mapper.n_bins_non_missing_ == [6]
assert_array_equal(bin_mapper.bin_thresholds_[0], [0, 1, 4, 7, 10, 13])
X = np.array([[0, 1, 4, np.nan, 7, 10, 13]], dtype=X_DTYPE).T
expected_trans = np.array([[0, 1, 2, n_bins - 1, 3, 4, 5]]).T
assert_array_equal(bin_mapper.transform(X), expected_trans)
# Negative categories are mapped to the missing values' bin
# (i.e. the bin of index `missing_values_bin_idx_ == n_bins - 1).
# Unknown positive categories does not happen in practice and tested
# for illustration purpose.
X = np.array([[-4, -1, 100]], dtype=X_DTYPE).T
expected_trans = np.array([[n_bins - 1, n_bins - 1, 6]]).T
assert_array_equal(bin_mapper.transform(X), expected_trans)
def test_categorical_feature_negative_missing():
"""Make sure bin mapper treats negative categories as missing values."""
X = np.array(
[[4] * 500 + [1] * 3 + [5] * 10 + [-1] * 3 + [np.nan] * 4], dtype=X_DTYPE
).T
bin_mapper = _BinMapper(
n_bins=4,
is_categorical=np.array([True]),
known_categories=[np.array([1, 4, 5], dtype=X_DTYPE)],
).fit(X)
assert bin_mapper.n_bins_non_missing_ == [3]
X = np.array([[-1, 1, 3, 5, np.nan]], dtype=X_DTYPE).T
# Negative values for categorical features are considered as missing values.
# They are mapped to the bin of index `bin_mapper.missing_values_bin_idx_`,
# which is 3 here.
assert bin_mapper.missing_values_bin_idx_ == 3
expected_trans = np.array([[3, 0, 1, 2, 3]]).T
assert_array_equal(bin_mapper.transform(X), expected_trans)
@pytest.mark.parametrize("n_bins", (128, 256))
def test_categorical_with_numerical_features(n_bins):
# basic check for binmapper with mixed data
X1 = np.arange(10, 20).reshape(-1, 1) # numerical
X2 = np.arange(10, 15).reshape(-1, 1) # categorical
X2 = np.r_[X2, X2]
X = np.c_[X1, X2]
known_categories = [None, np.unique(X2).astype(X_DTYPE)]
bin_mapper = _BinMapper(
n_bins=n_bins,
is_categorical=np.array([False, True]),
known_categories=known_categories,
).fit(X)
assert_array_equal(bin_mapper.n_bins_non_missing_, [10, 5])
bin_thresholds = bin_mapper.bin_thresholds_
assert len(bin_thresholds) == 2
assert_array_equal(bin_thresholds[1], np.arange(10, 15))
expected_X_trans = [
[0, 0],
[1, 1],
[2, 2],
[3, 3],
[4, 4],
[5, 0],
[6, 1],
[7, 2],
[8, 3],
[9, 4],
]
assert_array_equal(bin_mapper.transform(X), expected_X_trans)
def test_make_known_categories_bitsets():
# Check the output of make_known_categories_bitsets
X = np.array(
[[14, 2, 30], [30, 4, 70], [40, 10, 180], [40, 240, 180]], dtype=X_DTYPE
)
bin_mapper = _BinMapper(
n_bins=256,
is_categorical=np.array([False, True, True]),
known_categories=[None, X[:, 1], X[:, 2]],
)
bin_mapper.fit(X)
known_cat_bitsets, f_idx_map = bin_mapper.make_known_categories_bitsets()
# Note that for non-categorical features, values are left to 0
expected_f_idx_map = np.array([0, 0, 1], dtype=np.uint8)
assert_allclose(expected_f_idx_map, f_idx_map)
expected_cat_bitset = np.zeros((2, 8), dtype=np.uint32)
# first categorical feature: [2, 4, 10, 240]
f_idx = 1
mapped_f_idx = f_idx_map[f_idx]
expected_cat_bitset[mapped_f_idx, 0] = 2**2 + 2**4 + 2**10
# 240 = 32**7 + 16, therefore the 16th bit of the 7th array is 1.
expected_cat_bitset[mapped_f_idx, 7] = 2**16
# second categorical feature [30, 70, 180]
f_idx = 2
mapped_f_idx = f_idx_map[f_idx]
expected_cat_bitset[mapped_f_idx, 0] = 2**30
expected_cat_bitset[mapped_f_idx, 2] = 2**6
expected_cat_bitset[mapped_f_idx, 5] = 2**20
assert_allclose(expected_cat_bitset, known_cat_bitsets)
@pytest.mark.parametrize(
"is_categorical, known_categories, match",
[
(np.array([True]), [None], "Known categories for feature 0 must be provided"),
(
np.array([False]),
np.array([1, 2, 3]),
"isn't marked as a categorical feature, but categories were passed",
),
],
)
def test_categorical_parameters(is_categorical, known_categories, match):
# test the validation of the is_categorical and known_categories parameters
X = np.array([[1, 2, 3]], dtype=X_DTYPE)
bin_mapper = _BinMapper(
is_categorical=is_categorical, known_categories=known_categories
)
with pytest.raises(ValueError, match=match):
bin_mapper.fit(X)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/tests/test_stacking.py | sklearn/ensemble/tests/test_stacking.py | """Test the stacking classifier and regressor."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import re
from unittest.mock import Mock
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from scipy import sparse
from sklearn import config_context
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from sklearn.datasets import (
load_breast_cancer,
load_diabetes,
load_iris,
make_classification,
make_multilabel_classification,
make_regression,
)
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.ensemble import (
RandomForestClassifier,
RandomForestRegressor,
StackingClassifier,
StackingRegressor,
)
from sklearn.exceptions import ConvergenceWarning, NotFittedError
from sklearn.linear_model import (
LinearRegression,
LogisticRegression,
Ridge,
RidgeClassifier,
)
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import scale
from sklearn.svm import SVC, LinearSVC, LinearSVR
from sklearn.tests.metadata_routing_common import (
ConsumingClassifier,
ConsumingRegressor,
_Registry,
check_recorded_metadata,
)
from sklearn.utils._mocking import CheckingClassifier
from sklearn.utils._testing import (
assert_allclose,
assert_allclose_dense_sparse,
ignore_warnings,
)
from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS
diabetes = load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
iris = load_iris()
X_iris, y_iris = iris.data, iris.target
X_multilabel, y_multilabel = make_multilabel_classification(
n_classes=3, random_state=42
)
X_binary, y_binary = make_classification(n_classes=2, random_state=42)
@pytest.mark.parametrize(
"cv", [3, StratifiedKFold(n_splits=3, shuffle=True, random_state=42)]
)
@pytest.mark.parametrize(
"final_estimator", [None, RandomForestClassifier(random_state=42)]
)
@pytest.mark.parametrize("passthrough", [False, True])
def test_stacking_classifier_iris(cv, final_estimator, passthrough):
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, y_test = train_test_split(
scale(X_iris), y_iris, stratify=y_iris, random_state=42
)
estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())]
clf = StackingClassifier(
estimators=estimators,
final_estimator=final_estimator,
cv=cv,
passthrough=passthrough,
)
clf.fit(X_train, y_train)
clf.predict(X_test)
clf.predict_proba(X_test)
assert clf.score(X_test, y_test) > 0.8
X_trans = clf.transform(X_test)
expected_column_count = 10 if passthrough else 6
assert X_trans.shape[1] == expected_column_count
if passthrough:
assert_allclose(X_test, X_trans[:, -4:])
clf.set_params(lr="drop")
clf.fit(X_train, y_train)
clf.predict(X_test)
clf.predict_proba(X_test)
if final_estimator is None:
# LogisticRegression has decision_function method
clf.decision_function(X_test)
X_trans = clf.transform(X_test)
expected_column_count_drop = 7 if passthrough else 3
assert X_trans.shape[1] == expected_column_count_drop
if passthrough:
assert_allclose(X_test, X_trans[:, -4:])
def test_stacking_classifier_drop_column_binary_classification():
# check that a column is dropped in binary classification
X, y = load_breast_cancer(return_X_y=True)
X_train, X_test, y_train, _ = train_test_split(
scale(X), y, stratify=y, random_state=42
)
# both classifiers implement 'predict_proba' and will both drop one column
estimators = [
("lr", LogisticRegression()),
("rf", RandomForestClassifier(random_state=42)),
]
clf = StackingClassifier(estimators=estimators, cv=3)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert X_trans.shape[1] == 2
# LinearSVC does not implement 'predict_proba' and will not drop one column
estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())]
clf.set_params(estimators=estimators)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert X_trans.shape[1] == 2
def test_stacking_classifier_drop_estimator():
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, _ = train_test_split(
scale(X_iris), y_iris, stratify=y_iris, random_state=42
)
estimators = [("lr", "drop"), ("svc", LinearSVC(random_state=0))]
rf = RandomForestClassifier(n_estimators=10, random_state=42)
clf = StackingClassifier(
estimators=[("svc", LinearSVC(random_state=0))],
final_estimator=rf,
cv=5,
)
clf_drop = StackingClassifier(estimators=estimators, final_estimator=rf, cv=5)
clf.fit(X_train, y_train)
clf_drop.fit(X_train, y_train)
assert_allclose(clf.predict(X_test), clf_drop.predict(X_test))
assert_allclose(clf.predict_proba(X_test), clf_drop.predict_proba(X_test))
assert_allclose(clf.transform(X_test), clf_drop.transform(X_test))
def test_stacking_regressor_drop_estimator():
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, _ = train_test_split(
scale(X_diabetes), y_diabetes, random_state=42
)
estimators = [("lr", "drop"), ("ridge", Ridge(alpha=1.0))]
rf = RandomForestRegressor(n_estimators=10, random_state=42)
reg = StackingRegressor(
estimators=[("ridge", Ridge(alpha=1.0))],
final_estimator=rf,
cv=5,
)
reg_drop = StackingRegressor(estimators=estimators, final_estimator=rf, cv=5)
reg.fit(X_train, y_train)
reg_drop.fit(X_train, y_train)
assert_allclose(reg.predict(X_test), reg_drop.predict(X_test))
assert_allclose(reg.transform(X_test), reg_drop.transform(X_test))
@pytest.mark.parametrize("cv", [3, KFold(n_splits=3, shuffle=True, random_state=42)])
@pytest.mark.parametrize(
"final_estimator, predict_params",
[
(None, {}),
(RandomForestRegressor(random_state=42), {}),
(DummyRegressor(), {"return_std": True}),
],
)
@pytest.mark.parametrize("passthrough", [False, True])
def test_stacking_regressor_diabetes(cv, final_estimator, predict_params, passthrough):
# prescale the data to avoid convergence warning without using a pipeline
# for later assert
X_train, X_test, y_train, _ = train_test_split(
scale(X_diabetes), y_diabetes, random_state=42
)
estimators = [("lr", LinearRegression()), ("svr", LinearSVR())]
reg = StackingRegressor(
estimators=estimators,
final_estimator=final_estimator,
cv=cv,
passthrough=passthrough,
)
reg.fit(X_train, y_train)
result = reg.predict(X_test, **predict_params)
expected_result_length = 2 if predict_params else 1
if predict_params:
assert len(result) == expected_result_length
X_trans = reg.transform(X_test)
expected_column_count = 12 if passthrough else 2
assert X_trans.shape[1] == expected_column_count
if passthrough:
assert_allclose(X_test, X_trans[:, -10:])
reg.set_params(lr="drop")
reg.fit(X_train, y_train)
reg.predict(X_test)
X_trans = reg.transform(X_test)
expected_column_count_drop = 11 if passthrough else 1
assert X_trans.shape[1] == expected_column_count_drop
if passthrough:
assert_allclose(X_test, X_trans[:, -10:])
@pytest.mark.parametrize(
"sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
)
def test_stacking_regressor_sparse_passthrough(sparse_container):
# Check passthrough behavior on a sparse X matrix
X_train, X_test, y_train, _ = train_test_split(
sparse_container(scale(X_diabetes)), y_diabetes, random_state=42
)
estimators = [("lr", LinearRegression()), ("svr", LinearSVR())]
rf = RandomForestRegressor(n_estimators=10, random_state=42)
clf = StackingRegressor(
estimators=estimators, final_estimator=rf, cv=5, passthrough=True
)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert_allclose_dense_sparse(X_test, X_trans[:, -10:])
assert sparse.issparse(X_trans)
assert X_test.format == X_trans.format
@pytest.mark.parametrize(
"sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
)
def test_stacking_classifier_sparse_passthrough(sparse_container):
# Check passthrough behavior on a sparse X matrix
X_train, X_test, y_train, _ = train_test_split(
sparse_container(scale(X_iris)), y_iris, random_state=42
)
estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())]
rf = RandomForestClassifier(n_estimators=10, random_state=42)
clf = StackingClassifier(
estimators=estimators, final_estimator=rf, cv=5, passthrough=True
)
clf.fit(X_train, y_train)
X_trans = clf.transform(X_test)
assert_allclose_dense_sparse(X_test, X_trans[:, -4:])
assert sparse.issparse(X_trans)
assert X_test.format == X_trans.format
def test_stacking_classifier_drop_binary_prob():
# check that classifier will drop one of the probability column for
# binary classification problem
# Select only the 2 first classes
X_, y_ = scale(X_iris[:100]), y_iris[:100]
estimators = [("lr", LogisticRegression()), ("rf", RandomForestClassifier())]
clf = StackingClassifier(estimators=estimators)
clf.fit(X_, y_)
X_meta = clf.transform(X_)
assert X_meta.shape[1] == 2
class NoWeightRegressor(RegressorMixin, BaseEstimator):
def fit(self, X, y):
self.reg = DummyRegressor()
return self.reg.fit(X, y)
def predict(self, X):
return np.ones(X.shape[0])
class NoWeightClassifier(ClassifierMixin, BaseEstimator):
def fit(self, X, y):
self.clf = DummyClassifier(strategy="stratified")
return self.clf.fit(X, y)
@pytest.mark.parametrize(
"y, params, type_err, msg_err",
[
(y_iris, {"estimators": []}, ValueError, "Invalid 'estimators' attribute,"),
(
y_iris,
{
"estimators": [
("lr", LogisticRegression()),
("svm", SVC(max_iter=50_000)),
],
"stack_method": "predict_proba",
},
ValueError,
"does not implement the method predict_proba",
),
(
y_iris,
{
"estimators": [
("lr", LogisticRegression()),
("cor", NoWeightClassifier()),
]
},
TypeError,
"does not support sample weight",
),
(
y_iris,
{
"estimators": [
("lr", LogisticRegression()),
("cor", LinearSVC(max_iter=50_000)),
],
"final_estimator": NoWeightClassifier(),
},
TypeError,
"does not support sample weight",
),
],
)
def test_stacking_classifier_error(y, params, type_err, msg_err):
with pytest.raises(type_err, match=msg_err):
clf = StackingClassifier(**params, cv=3)
clf.fit(scale(X_iris), y, sample_weight=np.ones(X_iris.shape[0]))
@pytest.mark.parametrize(
"y, params, type_err, msg_err",
[
(y_diabetes, {"estimators": []}, ValueError, "Invalid 'estimators' attribute,"),
(
y_diabetes,
{"estimators": [("lr", LinearRegression()), ("cor", NoWeightRegressor())]},
TypeError,
"does not support sample weight",
),
(
y_diabetes,
{
"estimators": [
("lr", LinearRegression()),
("cor", LinearSVR()),
],
"final_estimator": NoWeightRegressor(),
},
TypeError,
"does not support sample weight",
),
],
)
def test_stacking_regressor_error(y, params, type_err, msg_err):
with pytest.raises(type_err, match=msg_err):
reg = StackingRegressor(**params, cv=3)
reg.fit(scale(X_diabetes), y, sample_weight=np.ones(X_diabetes.shape[0]))
@pytest.mark.parametrize(
"estimator, X, y",
[
(
StackingClassifier(
estimators=[
("first", LogisticRegression(random_state=0)),
("second", LinearSVC(random_state=0)),
]
),
X_iris[:100],
y_iris[:100],
), # keep only classes 0 and 1
(
StackingRegressor(
estimators=[
("first", Ridge(alpha=1.0)),
("second", Ridge(alpha=1e-6)),
]
),
X_diabetes,
y_diabetes,
),
],
ids=["StackingClassifier", "StackingRegressor"],
)
def test_stacking_randomness(estimator, X, y):
# checking that fixing the random state of the CV will lead to the same
# results
estimator_full = clone(estimator)
estimator_full.set_params(
cv=KFold(shuffle=True, random_state=np.random.RandomState(0))
)
estimator_drop = clone(estimator)
estimator_drop.set_params(first="drop")
estimator_drop.set_params(
cv=KFold(shuffle=True, random_state=np.random.RandomState(0))
)
assert_allclose(
estimator_full.fit(X, y).transform(X)[:, 1:],
estimator_drop.fit(X, y).transform(X),
)
def test_stacking_classifier_stratify_default():
# check that we stratify the classes for the default CV
clf = StackingClassifier(
estimators=[
("lr", LogisticRegression(max_iter=10_000)),
("svm", LinearSVC(max_iter=10_000)),
]
)
# since iris is not shuffled, a simple k-fold would not contain the
# 3 classes during training
clf.fit(X_iris, y_iris)
@pytest.mark.parametrize(
"stacker, X, y",
[
(
StackingClassifier(
estimators=[
("lr", LogisticRegression()),
("svm", LinearSVC(random_state=42)),
],
final_estimator=LogisticRegression(),
cv=KFold(shuffle=True, random_state=42),
),
*load_breast_cancer(return_X_y=True),
),
(
StackingRegressor(
estimators=[
("first", Ridge(alpha=1.0)),
("second", Ridge(alpha=1e-6)),
],
final_estimator=LinearRegression(),
cv=KFold(shuffle=True, random_state=42),
),
X_diabetes,
y_diabetes,
),
],
ids=["StackingClassifier", "StackingRegressor"],
)
def test_stacking_with_sample_weight(stacker, X, y):
# check that sample weights has an influence on the fitting
# note: ConvergenceWarning are catch since we are not worrying about the
# convergence here
n_half_samples = len(y) // 2
total_sample_weight = np.array(
[0.1] * n_half_samples + [0.9] * (len(y) - n_half_samples)
)
X_train, X_test, y_train, _, sample_weight_train, _ = train_test_split(
X, y, total_sample_weight, random_state=42
)
stacker = clone(stacker)
with ignore_warnings(category=ConvergenceWarning):
stacker.fit(X_train, y_train)
y_pred_no_weight = stacker.predict(X_test)
with ignore_warnings(category=ConvergenceWarning):
stacker.fit(X_train, y_train, sample_weight=np.ones(y_train.shape))
y_pred_unit_weight = stacker.predict(X_test)
assert_allclose(y_pred_no_weight, y_pred_unit_weight)
with ignore_warnings(category=ConvergenceWarning):
stacker.fit(X_train, y_train, sample_weight=sample_weight_train)
y_pred_biased = stacker.predict(X_test)
assert np.abs(y_pred_no_weight - y_pred_biased).sum() > 0
def test_stacking_classifier_sample_weight_fit_param():
# check sample_weight is passed to all invocations of fit
stacker = StackingClassifier(
estimators=[("lr", CheckingClassifier(expected_sample_weight=True))],
final_estimator=CheckingClassifier(expected_sample_weight=True),
)
stacker.fit(X_iris, y_iris, sample_weight=np.ones(X_iris.shape[0]))
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
@pytest.mark.parametrize(
"stacker, X, y",
[
(
StackingClassifier(
estimators=[
("lr", LogisticRegression()),
("svm", LinearSVC(random_state=42)),
],
final_estimator=LogisticRegression(),
),
*load_breast_cancer(return_X_y=True),
),
(
StackingRegressor(
estimators=[
("ridge1", Ridge(alpha=1.0)),
("ridge2", Ridge(alpha=1e-6)),
],
final_estimator=LinearRegression(),
),
X_diabetes,
y_diabetes,
),
],
ids=["StackingClassifier", "StackingRegressor"],
)
def test_stacking_cv_influence(stacker, X, y):
# check that the stacking affects the fit of the final estimator but not
# the fit of the base estimators
# note: ConvergenceWarning are caught since we are not worrying about the
# convergence here
stacker_cv_3 = clone(stacker)
stacker_cv_5 = clone(stacker)
stacker_cv_3.set_params(cv=3)
stacker_cv_5.set_params(cv=5)
stacker_cv_3.fit(X, y)
stacker_cv_5.fit(X, y)
# the base estimators should be identical
for est_cv_3, est_cv_5 in zip(stacker_cv_3.estimators_, stacker_cv_5.estimators_):
assert_allclose(est_cv_3.coef_, est_cv_5.coef_)
# the final estimator should be different
with pytest.raises(AssertionError, match="Not equal"):
assert_allclose(
stacker_cv_3.final_estimator_.coef_, stacker_cv_5.final_estimator_.coef_
)
@pytest.mark.parametrize(
"Stacker, Estimator, stack_method, final_estimator, X, y",
[
(
StackingClassifier,
DummyClassifier,
"predict_proba",
LogisticRegression(random_state=42),
X_iris,
y_iris,
),
(
StackingRegressor,
DummyRegressor,
"predict",
LinearRegression(),
X_diabetes,
y_diabetes,
),
],
)
def test_stacking_prefit(Stacker, Estimator, stack_method, final_estimator, X, y):
"""Check the behaviour of stacking when `cv='prefit'`"""
X_train1, X_train2, y_train1, y_train2 = train_test_split(
X, y, random_state=42, test_size=0.5
)
estimators = [
("d0", Estimator().fit(X_train1, y_train1)),
("d1", Estimator().fit(X_train1, y_train1)),
]
# mock out fit and stack_method to be asserted later
for _, estimator in estimators:
estimator.fit = Mock(name="fit")
stack_func = getattr(estimator, stack_method)
predict_method_mocked = Mock(side_effect=stack_func)
# Mocking a method will not provide a `__name__` while Python methods
# do and we are using it in `_get_response_method`.
predict_method_mocked.__name__ = stack_method
setattr(estimator, stack_method, predict_method_mocked)
stacker = Stacker(
estimators=estimators, cv="prefit", final_estimator=final_estimator
)
stacker.fit(X_train2, y_train2)
assert stacker.estimators_ == [estimator for _, estimator in estimators]
# fit was not called again
assert all(estimator.fit.call_count == 0 for estimator in stacker.estimators_)
# stack method is called with the proper inputs
for estimator in stacker.estimators_:
stack_func_mock = getattr(estimator, stack_method)
stack_func_mock.assert_called_with(X_train2)
@pytest.mark.parametrize(
"stacker, X, y",
[
(
StackingClassifier(
estimators=[("lr", LogisticRegression()), ("svm", SVC())],
cv="prefit",
),
X_iris,
y_iris,
),
(
StackingRegressor(
estimators=[
("lr", LinearRegression()),
("svm", LinearSVR()),
],
cv="prefit",
),
X_diabetes,
y_diabetes,
),
],
)
def test_stacking_prefit_error(stacker, X, y):
# check that NotFittedError is raised
# if base estimators are not fitted when cv="prefit"
with pytest.raises(NotFittedError):
stacker.fit(X, y)
@pytest.mark.parametrize(
"make_dataset, Stacking, Estimator",
[
(make_classification, StackingClassifier, LogisticRegression),
(make_regression, StackingRegressor, LinearRegression),
],
)
def test_stacking_without_n_features_in(make_dataset, Stacking, Estimator):
# Stacking supports estimators without `n_features_in_`. Regression test
# for #17353
class MyEstimator(Estimator):
"""Estimator without n_features_in_"""
def fit(self, X, y):
super().fit(X, y)
del self.n_features_in_
X, y = make_dataset(random_state=0, n_samples=100)
stacker = Stacking(estimators=[("lr", MyEstimator())])
msg = f"{Stacking.__name__} object has no attribute n_features_in_"
with pytest.raises(AttributeError, match=msg):
stacker.n_features_in_
# Does not raise
stacker.fit(X, y)
msg = "'MyEstimator' object has no attribute 'n_features_in_'"
with pytest.raises(AttributeError, match=msg):
stacker.n_features_in_
@pytest.mark.parametrize(
"estimator",
[
# output a 2D array of the probability of the positive class for each output
MLPClassifier(random_state=42),
# output a list of 2D array containing the probability of each class
# for each output
RandomForestClassifier(random_state=42),
],
ids=["MLPClassifier", "RandomForestClassifier"],
)
def test_stacking_classifier_multilabel_predict_proba(estimator):
"""Check the behaviour for the multilabel classification case and the
`predict_proba` stacking method.
Estimators are not consistent with the output arrays and we need to ensure that
we handle all cases.
"""
X_train, X_test, y_train, y_test = train_test_split(
X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42
)
n_outputs = 3
estimators = [("est", estimator)]
stacker = StackingClassifier(
estimators=estimators,
final_estimator=KNeighborsClassifier(),
stack_method="predict_proba",
).fit(X_train, y_train)
X_trans = stacker.transform(X_test)
assert X_trans.shape == (X_test.shape[0], n_outputs)
# we should not have any collinear classes and thus nothing should sum to 1
assert not any(np.isclose(X_trans.sum(axis=1), 1.0))
y_pred = stacker.predict(X_test)
assert y_pred.shape == y_test.shape
def test_stacking_classifier_multilabel_decision_function():
"""Check the behaviour for the multilabel classification case and the
`decision_function` stacking method. Only `RidgeClassifier` supports this
case.
"""
X_train, X_test, y_train, y_test = train_test_split(
X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42
)
n_outputs = 3
estimators = [("est", RidgeClassifier())]
stacker = StackingClassifier(
estimators=estimators,
final_estimator=KNeighborsClassifier(),
stack_method="decision_function",
).fit(X_train, y_train)
X_trans = stacker.transform(X_test)
assert X_trans.shape == (X_test.shape[0], n_outputs)
y_pred = stacker.predict(X_test)
assert y_pred.shape == y_test.shape
@pytest.mark.parametrize("stack_method", ["auto", "predict"])
@pytest.mark.parametrize("passthrough", [False, True])
def test_stacking_classifier_multilabel_auto_predict(stack_method, passthrough):
"""Check the behaviour for the multilabel classification case for stack methods
supported for all estimators or automatically picked up.
"""
X_train, X_test, y_train, y_test = train_test_split(
X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42
)
y_train_before_fit = y_train.copy()
n_outputs = 3
estimators = [
("mlp", MLPClassifier(random_state=42)),
("rf", RandomForestClassifier(random_state=42)),
("ridge", RidgeClassifier()),
]
final_estimator = KNeighborsClassifier()
clf = StackingClassifier(
estimators=estimators,
final_estimator=final_estimator,
passthrough=passthrough,
stack_method=stack_method,
).fit(X_train, y_train)
# make sure we don't change `y_train` inplace
assert_array_equal(y_train_before_fit, y_train)
y_pred = clf.predict(X_test)
assert y_pred.shape == y_test.shape
if stack_method == "auto":
expected_stack_methods = ["predict_proba", "predict_proba", "decision_function"]
else:
expected_stack_methods = ["predict"] * len(estimators)
assert clf.stack_method_ == expected_stack_methods
n_features_X_trans = n_outputs * len(estimators)
if passthrough:
n_features_X_trans += X_train.shape[1]
X_trans = clf.transform(X_test)
assert X_trans.shape == (X_test.shape[0], n_features_X_trans)
assert_array_equal(clf.classes_, [np.array([0, 1])] * n_outputs)
@pytest.mark.parametrize(
"stacker, feature_names, X, y, expected_names",
[
(
StackingClassifier(
estimators=[
("lr", LogisticRegression(random_state=0)),
("svm", LinearSVC(random_state=0)),
]
),
iris.feature_names,
X_iris,
y_iris,
[
"stackingclassifier_lr0",
"stackingclassifier_lr1",
"stackingclassifier_lr2",
"stackingclassifier_svm0",
"stackingclassifier_svm1",
"stackingclassifier_svm2",
],
),
(
StackingClassifier(
estimators=[
("lr", LogisticRegression(random_state=0)),
("other", "drop"),
("svm", LinearSVC(random_state=0)),
]
),
iris.feature_names,
X_iris[:100],
y_iris[:100], # keep only classes 0 and 1
[
"stackingclassifier_lr",
"stackingclassifier_svm",
],
),
(
StackingRegressor(
estimators=[
("lr", LinearRegression()),
("svm", LinearSVR(random_state=0)),
]
),
diabetes.feature_names,
X_diabetes,
y_diabetes,
[
"stackingregressor_lr",
"stackingregressor_svm",
],
),
],
ids=[
"StackingClassifier_multiclass",
"StackingClassifier_binary",
"StackingRegressor",
],
)
@pytest.mark.parametrize("passthrough", [True, False])
def test_get_feature_names_out(
stacker, feature_names, X, y, expected_names, passthrough
):
"""Check get_feature_names_out works for stacking."""
stacker = clone(stacker)
stacker.set_params(passthrough=passthrough)
stacker.fit(scale(X), y)
if passthrough:
expected_names = np.concatenate((expected_names, feature_names))
names_out = stacker.get_feature_names_out(feature_names)
assert_array_equal(names_out, expected_names)
def test_stacking_classifier_base_regressor():
"""Check that a regressor can be used as the first layer in `StackingClassifier`."""
X_train, X_test, y_train, y_test = train_test_split(
scale(X_iris), y_iris, stratify=y_iris, random_state=42
)
clf = StackingClassifier(estimators=[("ridge", Ridge())])
clf.fit(X_train, y_train)
clf.predict(X_test)
clf.predict_proba(X_test)
assert clf.score(X_test, y_test) > 0.8
def test_stacking_final_estimator_attribute_error():
"""Check that we raise the proper AttributeError when the final estimator
does not implement the `decision_function` method, which is decorated with
`available_if`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28108
"""
X, y = make_classification(random_state=42)
estimators = [
("lr", LogisticRegression()),
("rf", RandomForestClassifier(n_estimators=2, random_state=42)),
]
# RandomForestClassifier does not implement 'decision_function' and should raise
# an AttributeError
final_estimator = RandomForestClassifier(n_estimators=2, random_state=42)
clf = StackingClassifier(
estimators=estimators, final_estimator=final_estimator, cv=3
)
outer_msg = "This 'StackingClassifier' has no attribute 'decision_function'"
inner_msg = "'RandomForestClassifier' object has no attribute 'decision_function'"
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
clf.fit(X, y).decision_function(X)
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg in str(exec_info.value.__cause__)
# Metadata Routing Tests
# ======================
@pytest.mark.parametrize(
"Estimator, Child",
[
(StackingClassifier, ConsumingClassifier),
(StackingRegressor, ConsumingRegressor),
],
)
def test_routing_passed_metadata_not_supported(Estimator, Child):
"""Test that the right error message is raised when metadata is passed while
not supported when `enable_metadata_routing=False`."""
with pytest.raises(
ValueError, match="is only supported if enable_metadata_routing=True"
):
Estimator(["clf", Child()]).fit(
X_iris, y_iris, sample_weight=[1, 1, 1, 1, 1], metadata="a"
)
@pytest.mark.parametrize(
"Estimator, Child",
[
(StackingClassifier, ConsumingClassifier),
(StackingRegressor, ConsumingRegressor),
],
)
@config_context(enable_metadata_routing=True)
def test_get_metadata_routing_without_fit(Estimator, Child):
# Test that metadata_routing() doesn't raise when called before fit.
est = Estimator([("sub_est", Child())])
est.get_metadata_routing()
@pytest.mark.parametrize(
"Estimator, Child",
[
(StackingClassifier, ConsumingClassifier),
(StackingRegressor, ConsumingRegressor),
],
)
@pytest.mark.parametrize(
"prop, prop_value", [("sample_weight", np.ones(X_iris.shape[0])), ("metadata", "a")]
)
@config_context(enable_metadata_routing=True)
def test_metadata_routing_for_stacking_estimators(Estimator, Child, prop, prop_value):
"""Test that metadata is routed correctly for Stacking*."""
est = Estimator(
[
(
"sub_est1",
Child(registry=_Registry()).set_fit_request(**{prop: True}),
),
(
"sub_est2",
Child(registry=_Registry()).set_fit_request(**{prop: True}),
),
],
final_estimator=Child(registry=_Registry()).set_predict_request(**{prop: True}),
)
est.fit(X_iris, y_iris, **{prop: prop_value})
est.fit_transform(X_iris, y_iris, **{prop: prop_value})
est.predict(X_iris, **{prop: prop_value})
for estimator in est.estimators:
# access sub-estimator in (name, est) with estimator[1]:
registry = estimator[1].registry
assert len(registry)
for sub_est in registry:
check_recorded_metadata(
obj=sub_est,
method="fit",
parent="fit",
split_params=(prop),
**{prop: prop_value},
)
# access final_estimator:
registry = est.final_estimator_.registry
assert len(registry)
check_recorded_metadata(
obj=registry[-1],
method="predict",
parent="predict",
split_params=(prop),
**{prop: prop_value},
)
@pytest.mark.parametrize(
"Estimator, Child",
[
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/tests/test_gradient_boosting.py | sklearn/ensemble/tests/test_gradient_boosting.py | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import re
import warnings
import numpy as np
import pytest
from numpy.testing import assert_allclose
from sklearn import datasets
from sklearn.base import clone
from sklearn.datasets import make_classification, make_regression
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor
from sklearn.ensemble._gb import _safe_divide
from sklearn.ensemble._gradient_boosting import predict_stages
from sklearn.exceptions import DataConversionWarning, NotFittedError
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import scale
from sklearn.svm import NuSVR
from sklearn.utils import check_random_state
from sklearn.utils._mocking import NoSampleWeightWrapper
from sklearn.utils._param_validation import InvalidParameterError
from sklearn.utils._testing import (
assert_array_almost_equal,
assert_array_equal,
skip_if_32bit,
)
from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS
GRADIENT_BOOSTING_ESTIMATORS = [GradientBoostingClassifier, GradientBoostingRegressor]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also make regression dataset
X_reg, y_reg = make_regression(
n_samples=100, n_features=4, n_informative=8, noise=10, random_state=7
)
y_reg = scale(y_reg)
rng = np.random.RandomState(0)
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_exponential_n_classes_gt_2():
"""Test exponential loss raises for n_classes > 2."""
clf = GradientBoostingClassifier(loss="exponential")
msg = "loss='exponential' is only suitable for a binary classification"
with pytest.raises(ValueError, match=msg):
clf.fit(iris.data, iris.target)
def test_raise_if_init_has_no_predict_proba():
"""Test raise if init_ has no predict_proba method."""
clf = GradientBoostingClassifier(init=GradientBoostingRegressor)
msg = (
"The 'init' parameter of GradientBoostingClassifier must be a str among "
"{'zero'}, None or an object implementing 'fit' and 'predict_proba'."
)
with pytest.raises(ValueError, match=msg):
clf.fit(X, y)
@pytest.mark.parametrize("loss", ("log_loss", "exponential"))
def test_classification_toy(loss, global_random_seed):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(
loss=loss, n_estimators=10, random_state=global_random_seed
)
with pytest.raises(ValueError):
clf.predict(T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf.estimators_)
log_loss_decrease = clf.train_score_[:-1] - clf.train_score_[1:]
assert np.any(log_loss_decrease >= 0.0)
leaves = clf.apply(X)
assert leaves.shape == (6, 10, 1)
@pytest.mark.parametrize("loss", ("log_loss", "exponential"))
def test_classification_synthetic(loss, global_random_seed):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII - Figure 10.9
# Note that Figure 10.9 reuses the dataset generated for figure 10.2
# and should have 2_000 train data points and 10_000 test data points.
# Here we intentionally use a smaller variant to make the test run faster,
# but the conclusions are still the same, despite the smaller datasets.
X, y = datasets.make_hastie_10_2(n_samples=2000, random_state=global_random_seed)
split_idx = 500
X_train, X_test = X[:split_idx], X[split_idx:]
y_train, y_test = y[:split_idx], y[split_idx:]
# Increasing the number of trees should decrease the test error
common_params = {
"max_depth": 1,
"learning_rate": 1.0,
"loss": loss,
"random_state": global_random_seed,
}
gbrt_10_stumps = GradientBoostingClassifier(n_estimators=10, **common_params)
gbrt_10_stumps.fit(X_train, y_train)
gbrt_50_stumps = GradientBoostingClassifier(n_estimators=50, **common_params)
gbrt_50_stumps.fit(X_train, y_train)
assert gbrt_10_stumps.score(X_test, y_test) < gbrt_50_stumps.score(X_test, y_test)
# Decision stumps are better suited for this dataset with a large number of
# estimators.
common_params = {
"n_estimators": 200,
"learning_rate": 1.0,
"loss": loss,
"random_state": global_random_seed,
}
gbrt_stumps = GradientBoostingClassifier(max_depth=1, **common_params)
gbrt_stumps.fit(X_train, y_train)
gbrt_10_nodes = GradientBoostingClassifier(max_leaf_nodes=10, **common_params)
gbrt_10_nodes.fit(X_train, y_train)
assert gbrt_stumps.score(X_test, y_test) > gbrt_10_nodes.score(X_test, y_test)
@pytest.mark.parametrize("loss", ("squared_error", "absolute_error", "huber"))
@pytest.mark.parametrize("subsample", (1.0, 0.5))
def test_regression_dataset(loss, subsample, global_random_seed):
# Check consistency on regression dataset with least squares
# and least absolute deviation.
ones = np.ones(len(y_reg))
last_y_pred = None
for sample_weight in [None, ones, 2 * ones]:
# learning_rate, max_depth and n_estimators were adjusted to get a mode
# that is accurate enough to reach a low MSE on the training set while
# keeping the resource used to execute this test low enough.
reg = GradientBoostingRegressor(
n_estimators=30,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=global_random_seed,
learning_rate=0.5,
)
reg.fit(X_reg, y_reg, sample_weight=sample_weight)
leaves = reg.apply(X_reg)
assert leaves.shape == (100, 30)
y_pred = reg.predict(X_reg)
mse = mean_squared_error(y_reg, y_pred)
assert mse < 0.05
if last_y_pred is not None:
# FIXME: We temporarily bypass this test. This is due to the fact
# that GBRT with and without `sample_weight` do not use the same
# implementation of the median during the initialization with the
# `DummyRegressor`. In the future, we should make sure that both
# implementations should be the same. See PR #17377 for more.
# assert_allclose(last_y_pred, y_pred)
pass
last_y_pred = y_pred
@pytest.mark.parametrize("subsample", (1.0, 0.5))
@pytest.mark.parametrize("sample_weight", (None, 1))
def test_iris(subsample, sample_weight, global_random_seed):
if sample_weight == 1:
sample_weight = np.ones(len(iris.target))
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(
n_estimators=100,
loss="log_loss",
random_state=global_random_seed,
subsample=subsample,
)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9
leaves = clf.apply(iris.data)
assert leaves.shape == (150, 100, 3)
def test_regression_synthetic(global_random_seed):
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(global_random_seed)
regression_params = {
"n_estimators": 100,
"max_depth": 4,
"min_samples_split": 2,
"learning_rate": 0.1,
"loss": "squared_error",
"random_state": global_random_seed,
}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200, random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 6.5
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 2500.0
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.025
@pytest.mark.parametrize(
"GradientBoosting, X, y",
[
(GradientBoostingRegressor, X_reg, y_reg),
(GradientBoostingClassifier, iris.data, iris.target),
],
)
def test_feature_importances(GradientBoosting, X, y):
# smoke test to check that the gradient boosting expose an attribute
# feature_importances_
gbdt = GradientBoosting()
assert not hasattr(gbdt, "feature_importances_")
gbdt.fit(X, y)
assert hasattr(gbdt, "feature_importances_")
def test_probability_log(global_random_seed):
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=global_random_seed)
with pytest.raises(ValueError):
clf.predict_proba(T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_single_class_with_sample_weight():
sample_weight = [0, 0, 0, 1, 1, 1]
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
msg = (
"y contains 1 class after sample_weight trimmed classes with "
"zero weights, while a minimum of 2 classes are required."
)
with pytest.raises(ValueError, match=msg):
clf.fit(X, y, sample_weight=sample_weight)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_check_inputs_predict_stages(csc_container):
# check that predict_stages through an error if the type of X is not
# supported
x, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
x_sparse_csc = csc_container(x)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(x, y)
score = np.zeros((y.shape)).reshape(-1, 1)
err_msg = "When X is a sparse matrix, a CSR format is expected"
with pytest.raises(ValueError, match=err_msg):
predict_stages(clf.estimators_, x_sparse_csc, clf.learning_rate, score)
x_fortran = np.asfortranarray(x)
with pytest.raises(ValueError, match="X should be C-ordered np.ndarray"):
predict_stages(clf.estimators_, x_fortran, clf.learning_rate, score)
def test_max_feature_regression(global_random_seed):
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=global_random_seed)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(
n_estimators=100,
min_samples_split=5,
max_depth=2,
learning_rate=0.1,
max_features=2,
random_state=global_random_seed,
)
gbrt.fit(X_train, y_train)
log_loss = gbrt._loss(y_test, gbrt.decision_function(X_test))
assert log_loss < 0.5, "GB failed with deviance %.4f" % log_loss
def test_feature_importance_regression(
fetch_california_housing_fxt, global_random_seed
):
"""Test that Gini importance is calculated correctly.
This test follows the example from [1]_ (pg. 373).
.. [1] Friedman, J., Hastie, T., & Tibshirani, R. (2001). The elements
of statistical learning. New York: Springer series in statistics.
"""
california = fetch_california_housing_fxt()
X, y = california.data, california.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=global_random_seed
)
reg = GradientBoostingRegressor(
loss="huber",
learning_rate=0.1,
max_leaf_nodes=6,
n_estimators=100,
random_state=global_random_seed,
)
reg.fit(X_train, y_train)
sorted_idx = np.argsort(reg.feature_importances_)[::-1]
sorted_features = [california.feature_names[s] for s in sorted_idx]
# The most important feature is the median income by far.
assert sorted_features[0] == "MedInc"
# The three subsequent features are the following. Their relative ordering
# might change a bit depending on the randomness of the trees and the
# train / test split.
assert set(sorted_features[1:4]) == {"Longitude", "AveOccup", "Latitude"}
def test_max_features():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features=None)
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == n_features
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=None)
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == n_features
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == int(n_features * 0.3)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features="sqrt")
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == int(np.sqrt(n_features))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features="log2")
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == int(np.log2(n_features))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == 1
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200, random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
with pytest.raises(ValueError):
np.fromiter(clf.staged_predict(X_test), dtype=np.float64)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert y.shape == y_pred.shape
assert_array_almost_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200, random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not
with pytest.raises(NotFittedError):
np.fromiter(clf.staged_predict_proba(X_test), dtype=np.float64)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert y_test.shape == y_pred.shape
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert y_test.shape[0] == staged_proba.shape[0]
assert 2 == staged_proba.shape[1]
assert_array_almost_equal(clf.predict_proba(X_test), staged_proba)
@pytest.mark.parametrize("Estimator", GRADIENT_BOOSTING_ESTIMATORS)
def test_staged_functions_defensive(Estimator, global_random_seed):
# test that staged_functions make defensive copies
rng = np.random.RandomState(global_random_seed)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(int) + 1 # don't predict zeros
estimator = Estimator()
estimator.fit(X, y)
for func in ["predict", "decision_function", "predict_proba"]:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert np.all(staged_result[0] != 0)
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
with pytest.raises(ValueError):
clf.fit(X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64), clf.predict([rng.rand(2)]))
def test_quantile_loss(global_random_seed):
# Check if quantile loss with alpha=0.5 equals absolute_error.
clf_quantile = GradientBoostingRegressor(
n_estimators=100,
loss="quantile",
max_depth=4,
alpha=0.5,
random_state=global_random_seed,
)
clf_quantile.fit(X_reg, y_reg)
y_quantile = clf_quantile.predict(X_reg)
clf_ae = GradientBoostingRegressor(
n_estimators=100,
loss="absolute_error",
max_depth=4,
random_state=global_random_seed,
)
clf_ae.fit(X_reg, y_reg)
y_ae = clf_ae.predict(X_reg)
assert_allclose(y_quantile, y_ae)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = list(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), list(map(str, true_result)))
assert 100 == len(clf.estimators_)
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T), np.asarray(true_result, dtype=np.float32))
assert 100 == len(clf.estimators_)
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
warn_msg = (
"A column-vector y was passed when a 1d array was expected. "
"Please change the shape of y to \\(n_samples, \\), for "
"example using ravel()."
)
with pytest.warns(DataConversionWarning, match=warn_msg):
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
@pytest.mark.parametrize("GradientBoostingEstimator", GRADIENT_BOOSTING_ESTIMATORS)
def test_oob_improvement(GradientBoostingEstimator):
# Test if oob improvement has correct shape and regression test.
estimator = GradientBoostingEstimator(
n_estimators=100, random_state=1, subsample=0.5
)
estimator.fit(X, y)
assert estimator.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(
estimator.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.11, 0.11]),
decimal=2,
)
@pytest.mark.parametrize("GradientBoostingEstimator", GRADIENT_BOOSTING_ESTIMATORS)
def test_oob_scores(GradientBoostingEstimator):
# Test if oob scores has correct shape and regression test.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
estimator = GradientBoostingEstimator(
n_estimators=100, random_state=1, subsample=0.5
)
estimator.fit(X, y)
assert estimator.oob_scores_.shape[0] == 100
assert estimator.oob_scores_[-1] == pytest.approx(estimator.oob_score_)
estimator = GradientBoostingEstimator(
n_estimators=100,
random_state=1,
subsample=0.5,
n_iter_no_change=5,
)
estimator.fit(X, y)
assert estimator.oob_scores_.shape[0] < 100
assert estimator.oob_scores_[-1] == pytest.approx(estimator.oob_score_)
@pytest.mark.parametrize(
"GradientBoostingEstimator, oob_attribute",
[
(GradientBoostingClassifier, "oob_improvement_"),
(GradientBoostingClassifier, "oob_scores_"),
(GradientBoostingClassifier, "oob_score_"),
(GradientBoostingRegressor, "oob_improvement_"),
(GradientBoostingRegressor, "oob_scores_"),
(GradientBoostingRegressor, "oob_score_"),
],
)
def test_oob_attributes_error(GradientBoostingEstimator, oob_attribute):
"""
Check that we raise an AttributeError when the OOB statistics were not computed.
"""
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
estimator = GradientBoostingEstimator(
n_estimators=100,
random_state=1,
subsample=1.0,
)
estimator.fit(X, y)
with pytest.raises(AttributeError):
estimator.oob_attribute
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
estimator = GradientBoostingClassifier(
n_estimators=100, loss="log_loss", random_state=1, subsample=0.5
)
estimator.fit(iris.data, iris.target)
score = estimator.score(iris.data, iris.target)
assert score > 0.9
assert estimator.oob_improvement_.shape[0] == estimator.n_estimators
assert estimator.oob_scores_.shape[0] == estimator.n_estimators
assert estimator.oob_scores_[-1] == pytest.approx(estimator.oob_score_)
estimator = GradientBoostingClassifier(
n_estimators=100,
loss="log_loss",
random_state=1,
subsample=0.5,
n_iter_no_change=5,
)
estimator.fit(iris.data, iris.target)
score = estimator.score(iris.data, iris.target)
assert estimator.oob_improvement_.shape[0] < estimator.n_estimators
assert estimator.oob_scores_.shape[0] < estimator.n_estimators
assert estimator.oob_scores_[-1] == pytest.approx(estimator.oob_score_)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(estimator.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
@pytest.mark.thread_unsafe # manually captured stdout
def test_verbose_output():
# Check verbose=1 does not cause error.
import sys
from io import StringIO
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(
n_estimators=100, random_state=1, verbose=1, subsample=0.8
)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = " ".join(["%10s"] + ["%16s"] * 3) % (
"Iter",
"Train Loss",
"OOB Improve",
"Remaining Time",
)
assert true_header == header
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert 10 + 9 == n_lines
@pytest.mark.thread_unsafe # manually captured stdout
def test_more_verbose_output():
# Check verbose=2 does not cause error.
import sys
from io import StringIO
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1, verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = " ".join(["%10s"] + ["%16s"] * 2) % (
"Iter",
"Train Loss",
"Remaining Time",
)
assert true_header == header
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert 100 == n_lines
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start(Cls, global_random_seed):
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=global_random_seed)
est = Cls(n_estimators=200, max_depth=1, random_state=global_random_seed)
est.fit(X, y)
est_ws = Cls(
n_estimators=100, max_depth=1, warm_start=True, random_state=global_random_seed
)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
if Cls is GradientBoostingRegressor:
assert_allclose(est_ws.predict(X), est.predict(X))
else:
# Random state is preserved and hence predict_proba must also be
# same
assert_array_equal(est_ws.predict(X), est.predict(X))
assert_allclose(est_ws.predict_proba(X), est.predict_proba(X))
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_n_estimators(Cls, global_random_seed):
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=global_random_seed)
est = Cls(n_estimators=300, max_depth=1, random_state=global_random_seed)
est.fit(X, y)
est_ws = Cls(
n_estimators=100, max_depth=1, warm_start=True, random_state=global_random_seed
)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_allclose(est_ws.predict(X), est.predict(X))
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_max_depth(Cls):
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_clear(Cls):
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
@pytest.mark.parametrize("GradientBoosting", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_state_oob_scores(GradientBoosting):
"""
Check that the states of the OOB scores are cleared when used with `warm_start`.
"""
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
n_estimators = 100
estimator = GradientBoosting(
n_estimators=n_estimators,
max_depth=1,
subsample=0.5,
warm_start=True,
random_state=1,
)
estimator.fit(X, y)
oob_scores, oob_score = estimator.oob_scores_, estimator.oob_score_
assert len(oob_scores) == n_estimators
assert oob_scores[-1] == pytest.approx(oob_score)
n_more_estimators = 200
estimator.set_params(n_estimators=n_more_estimators).fit(X, y)
assert len(estimator.oob_scores_) == n_more_estimators
assert_allclose(estimator.oob_scores_[:n_estimators], oob_scores)
estimator.set_params(n_estimators=n_estimators, warm_start=False).fit(X, y)
assert estimator.oob_scores_ is not oob_scores
assert estimator.oob_score_ is not oob_score
assert_allclose(estimator.oob_scores_, oob_scores)
assert estimator.oob_score_ == pytest.approx(oob_score)
assert oob_scores[-1] == pytest.approx(oob_score)
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_smaller_n_estimators(Cls):
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
with pytest.raises(ValueError):
est.fit(X, y)
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_equal_n_estimators(Cls):
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_oob_switch(Cls):
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
assert_array_equal(est.oob_scores_[:100], np.zeros(100))
# the last 10 are not zeros
assert (est.oob_improvement_[-10:] != 0.0).all()
assert (est.oob_scores_[-10:] != 0.0).all()
assert est.oob_scores_[-1] == pytest.approx(est.oob_score_)
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_oob(Cls):
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=200, max_depth=1, subsample=0.5, random_state=1)
est.fit(X, y)
est_ws = Cls(
n_estimators=100, max_depth=1, subsample=0.5, random_state=1, warm_start=True
)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100], est.oob_improvement_[:100])
assert_array_almost_equal(est_ws.oob_scores_[:100], est.oob_scores_[:100])
assert est.oob_scores_[-1] == pytest.approx(est.oob_score_)
assert est_ws.oob_scores_[-1] == pytest.approx(est_ws.oob_score_)
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
@pytest.mark.parametrize(
"sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
)
def test_warm_start_sparse(Cls, sparse_container):
# Test that all sparse matrix types are supported
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/tests/test_common.py | sklearn/ensemble/tests/test_common.py | import numpy as np
import pytest
from sklearn.base import ClassifierMixin, clone, is_classifier
from sklearn.datasets import (
load_diabetes,
load_iris,
make_classification,
make_regression,
)
from sklearn.ensemble import (
RandomForestClassifier,
RandomForestRegressor,
StackingClassifier,
StackingRegressor,
VotingClassifier,
VotingRegressor,
)
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.svm import SVC, SVR, LinearSVC
X, y = load_iris(return_X_y=True)
X_r, y_r = load_diabetes(return_X_y=True)
@pytest.mark.parametrize(
"X, y, estimator",
[
(
*make_classification(n_samples=10),
StackingClassifier(
estimators=[
("lr", LogisticRegression()),
("svm", LinearSVC()),
("rf", RandomForestClassifier(n_estimators=5, max_depth=3)),
],
cv=2,
),
),
(
*make_classification(n_samples=10),
VotingClassifier(
estimators=[
("lr", LogisticRegression()),
("svm", LinearSVC()),
("rf", RandomForestClassifier(n_estimators=5, max_depth=3)),
]
),
),
(
*make_regression(n_samples=10),
StackingRegressor(
estimators=[
("lr", LinearRegression()),
("svm", SVR(kernel="linear")),
("rf", RandomForestRegressor(n_estimators=5, max_depth=3)),
],
cv=2,
),
),
(
*make_regression(n_samples=10),
VotingRegressor(
estimators=[
("lr", LinearRegression()),
("svm", SVR(kernel="linear")),
("rf", RandomForestRegressor(n_estimators=5, max_depth=3)),
]
),
),
],
ids=[
"stacking-classifier",
"voting-classifier",
"stacking-regressor",
"voting-regressor",
],
)
def test_ensemble_heterogeneous_estimators_behavior(X, y, estimator):
# check that the behavior of `estimators`, `estimators_`,
# `named_estimators`, `named_estimators_` is consistent across all
# ensemble classes and when using `set_params()`.
estimator = clone(estimator) # Avoid side effects from shared instances
# before fit
assert "svm" in estimator.named_estimators
assert estimator.named_estimators.svm is estimator.estimators[1][1]
assert estimator.named_estimators.svm is estimator.named_estimators["svm"]
# check fitted attributes
estimator.fit(X, y)
assert len(estimator.named_estimators) == 3
assert len(estimator.named_estimators_) == 3
assert sorted(list(estimator.named_estimators_.keys())) == sorted(
["lr", "svm", "rf"]
)
# check that set_params() does not add a new attribute
estimator_new_params = clone(estimator)
svm_estimator = SVC() if is_classifier(estimator) else SVR()
estimator_new_params.set_params(svm=svm_estimator).fit(X, y)
assert not hasattr(estimator_new_params, "svm")
assert (
estimator_new_params.named_estimators.lr.get_params()
== estimator.named_estimators.lr.get_params()
)
assert (
estimator_new_params.named_estimators.rf.get_params()
== estimator.named_estimators.rf.get_params()
)
# check the behavior when setting and dropping an estimator
estimator_dropped = clone(estimator)
estimator_dropped.set_params(svm="drop")
estimator_dropped.fit(X, y)
assert len(estimator_dropped.named_estimators) == 3
assert estimator_dropped.named_estimators.svm == "drop"
assert len(estimator_dropped.named_estimators_) == 3
assert sorted(list(estimator_dropped.named_estimators_.keys())) == sorted(
["lr", "svm", "rf"]
)
for sub_est in estimator_dropped.named_estimators_:
# check that the correspondence is correct
assert not isinstance(sub_est, type(estimator.named_estimators.svm))
# check that we can set the parameters of the underlying classifier
estimator.set_params(svm__C=10.0)
estimator.set_params(rf__max_depth=5)
assert (
estimator.get_params()["svm__C"]
== estimator.get_params()["svm"].get_params()["C"]
)
assert (
estimator.get_params()["rf__max_depth"]
== estimator.get_params()["rf"].get_params()["max_depth"]
)
@pytest.mark.parametrize(
"Ensemble",
[VotingClassifier, StackingRegressor, VotingRegressor],
)
def test_ensemble_heterogeneous_estimators_type(Ensemble):
# check that ensemble will fail during validation if the underlying
# estimators are not of the same type (i.e. classifier or regressor)
# StackingClassifier can have an underlying regresor so it's not checked
if issubclass(Ensemble, ClassifierMixin):
X, y = make_classification(n_samples=10)
estimators = [("lr", LinearRegression())]
ensemble_type = "classifier"
else:
X, y = make_regression(n_samples=10)
estimators = [("lr", LogisticRegression())]
ensemble_type = "regressor"
ensemble = Ensemble(estimators=estimators)
err_msg = "should be a {}".format(ensemble_type)
with pytest.raises(ValueError, match=err_msg):
ensemble.fit(X, y)
@pytest.mark.parametrize(
"X, y, Ensemble",
[
(*make_classification(n_samples=10), StackingClassifier),
(*make_classification(n_samples=10), VotingClassifier),
(*make_regression(n_samples=10), StackingRegressor),
(*make_regression(n_samples=10), VotingRegressor),
],
)
def test_ensemble_heterogeneous_estimators_name_validation(X, y, Ensemble):
# raise an error when the name contains dunder
if issubclass(Ensemble, ClassifierMixin):
estimators = [("lr__", LogisticRegression())]
else:
estimators = [("lr__", LinearRegression())]
ensemble = Ensemble(estimators=estimators)
err_msg = r"Estimator names must not contain __: got \['lr__'\]"
with pytest.raises(ValueError, match=err_msg):
ensemble.fit(X, y)
# raise an error when the name is not unique
if issubclass(Ensemble, ClassifierMixin):
estimators = [("lr", LogisticRegression()), ("lr", LogisticRegression())]
else:
estimators = [("lr", LinearRegression()), ("lr", LinearRegression())]
ensemble = Ensemble(estimators=estimators)
err_msg = r"Names provided are not unique: \['lr', 'lr'\]"
with pytest.raises(ValueError, match=err_msg):
ensemble.fit(X, y)
# raise an error when the name conflicts with the parameters
if issubclass(Ensemble, ClassifierMixin):
estimators = [("estimators", LogisticRegression())]
else:
estimators = [("estimators", LinearRegression())]
ensemble = Ensemble(estimators=estimators)
err_msg = "Estimator names conflict with constructor arguments"
with pytest.raises(ValueError, match=err_msg):
ensemble.fit(X, y)
@pytest.mark.parametrize(
"X, y, estimator",
[
(
*make_classification(n_samples=10),
StackingClassifier(estimators=[("lr", LogisticRegression())]),
),
(
*make_classification(n_samples=10),
VotingClassifier(estimators=[("lr", LogisticRegression())]),
),
(
*make_regression(n_samples=10),
StackingRegressor(estimators=[("lr", LinearRegression())]),
),
(
*make_regression(n_samples=10),
VotingRegressor(estimators=[("lr", LinearRegression())]),
),
],
ids=[
"stacking-classifier",
"voting-classifier",
"stacking-regressor",
"voting-regressor",
],
)
def test_ensemble_heterogeneous_estimators_all_dropped(X, y, estimator):
# check that we raise a consistent error when all estimators are
# dropped
estimator.set_params(lr="drop")
with pytest.raises(ValueError, match="All estimators are dropped."):
estimator.fit(X, y)
@pytest.mark.parametrize(
"Ensemble, Estimator, X, y",
[
(StackingClassifier, LogisticRegression, X, y),
(StackingRegressor, LinearRegression, X_r, y_r),
(VotingClassifier, LogisticRegression, X, y),
(VotingRegressor, LinearRegression, X_r, y_r),
],
)
# FIXME: we should move this test in `estimator_checks` once we are able
# to construct meta-estimator instances
def test_heterogeneous_ensemble_support_missing_values(Ensemble, Estimator, X, y):
# check that Voting and Stacking predictor delegate the missing values
# validation to the underlying estimator.
X = X.copy()
mask = np.random.choice([1, 0], X.shape, p=[0.1, 0.9]).astype(bool)
X[mask] = np.nan
pipe = make_pipeline(SimpleImputer(), Estimator())
ensemble = Ensemble(estimators=[("pipe1", pipe), ("pipe2", pipe)])
ensemble.fit(X, y).score(X, y)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/tests/test_base.py | sklearn/ensemble/tests/test_base.py | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from collections import OrderedDict
import numpy as np
from sklearn.datasets import load_iris
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble._base import _set_random_states
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(
estimator=Perceptron(random_state=None), n_estimators=3
)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
random_state = np.random.RandomState(3)
ensemble._make_estimator(random_state=random_state)
ensemble._make_estimator(random_state=random_state)
ensemble._make_estimator(append=False)
assert 3 == len(ensemble)
assert 3 == len(ensemble.estimators_)
assert isinstance(ensemble[0], Perceptron)
assert ensemble[0].random_state is None
assert isinstance(ensemble[1].random_state, int)
assert isinstance(ensemble[2].random_state, int)
assert ensemble[1].random_state != ensemble[2].random_state
np_int_ensemble = BaggingClassifier(
estimator=Perceptron(), n_estimators=np.int32(3)
)
np_int_ensemble.fit(iris.data, iris.target)
def test_set_random_states():
# Linear Discriminant Analysis doesn't have random state: smoke test
_set_random_states(LinearDiscriminantAnalysis(), random_state=17)
clf1 = Perceptron(random_state=None)
assert clf1.random_state is None
# check random_state is None still sets
_set_random_states(clf1, None)
assert isinstance(clf1.random_state, int)
# check random_state fixes results in consistent initialisation
_set_random_states(clf1, 3)
assert isinstance(clf1.random_state, int)
clf2 = Perceptron(random_state=None)
_set_random_states(clf2, 3)
assert clf1.random_state == clf2.random_state
# nested random_state
def make_steps():
return [
("sel", SelectFromModel(Perceptron(random_state=None))),
("clf", Perceptron(random_state=None)),
]
est1 = Pipeline(make_steps())
_set_random_states(est1, 3)
assert isinstance(est1.steps[0][1].estimator.random_state, int)
assert isinstance(est1.steps[1][1].random_state, int)
assert (
est1.get_params()["sel__estimator__random_state"]
!= est1.get_params()["clf__random_state"]
)
# ensure multiple random_state parameters are invariant to get_params()
# iteration order
class AlphaParamPipeline(Pipeline):
def get_params(self, *args, **kwargs):
params = Pipeline.get_params(self, *args, **kwargs).items()
return OrderedDict(sorted(params))
class RevParamPipeline(Pipeline):
def get_params(self, *args, **kwargs):
params = Pipeline.get_params(self, *args, **kwargs).items()
return OrderedDict(sorted(params, reverse=True))
for cls in [AlphaParamPipeline, RevParamPipeline]:
est2 = cls(make_steps())
_set_random_states(est2, 3)
assert (
est1.get_params()["sel__estimator__random_state"]
== est2.get_params()["sel__estimator__random_state"]
)
assert (
est1.get_params()["clf__random_state"]
== est2.get_params()["clf__random_state"]
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/tests/test_iforest.py | sklearn/ensemble/tests/test_iforest.py | """
Testing for Isolation Forest algorithm (sklearn.ensemble.iforest).
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from unittest.mock import Mock, patch
import numpy as np
import pytest
from joblib import parallel_backend
from sklearn.datasets import load_diabetes, load_iris, make_classification
from sklearn.ensemble import IsolationForest
from sklearn.ensemble._iforest import _average_path_length
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import ParameterGrid, train_test_split
from sklearn.utils import check_random_state
from sklearn.utils._testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
ignore_warnings,
)
from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS
# load iris & diabetes dataset
iris = load_iris()
diabetes = load_diabetes()
def test_iforest(global_random_seed):
"""Check Isolation Forest for various parameter settings."""
X_train = np.array([[0, 1], [1, 2]])
X_test = np.array([[2, 1], [1, 1]])
grid = ParameterGrid(
{"n_estimators": [3], "max_samples": [0.5, 1.0, 3], "bootstrap": [True, False]}
)
with ignore_warnings():
for params in grid:
IsolationForest(random_state=global_random_seed, **params).fit(
X_train
).predict(X_test)
@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS)
def test_iforest_sparse(global_random_seed, sparse_container):
"""Check IForest for various parameter settings on sparse input."""
rng = check_random_state(global_random_seed)
X_train, X_test = train_test_split(diabetes.data[:50], random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0], "bootstrap": [True, False]})
X_train_sparse = sparse_container(X_train)
X_test_sparse = sparse_container(X_test)
for params in grid:
# Trained on sparse format
sparse_classifier = IsolationForest(
n_estimators=10, random_state=global_random_seed, **params
).fit(X_train_sparse)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_classifier = IsolationForest(
n_estimators=10, random_state=global_random_seed, **params
).fit(X_train)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
def test_iforest_error():
"""Test that it gives proper exception on deficient input."""
X = iris.data
# The dataset has less than 256 samples, explicitly setting
# max_samples > n_samples should result in a warning. If not set
# explicitly there should be no warning
warn_msg = "max_samples will be set to n_samples for estimation"
with pytest.warns(UserWarning, match=warn_msg):
IsolationForest(max_samples=1000).fit(X)
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
IsolationForest(max_samples="auto").fit(X)
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
IsolationForest(max_samples=np.int64(2)).fit(X)
# test X_test n_features match X_train one:
with pytest.raises(ValueError):
IsolationForest().fit(X).predict(X[:, 1:])
def test_recalculate_max_depth():
"""Check max_depth recalculation when max_samples is reset to n_samples"""
X = iris.data
clf = IsolationForest().fit(X)
for est in clf.estimators_:
assert est.max_depth == int(np.ceil(np.log2(X.shape[0])))
def test_max_samples_attribute():
X = iris.data
clf = IsolationForest().fit(X)
assert clf.max_samples_ == X.shape[0]
clf = IsolationForest(max_samples=500)
warn_msg = "max_samples will be set to n_samples for estimation"
with pytest.warns(UserWarning, match=warn_msg):
clf.fit(X)
assert clf.max_samples_ == X.shape[0]
clf = IsolationForest(max_samples=0.4).fit(X)
assert clf.max_samples_ == 0.4 * X.shape[0]
def test_iforest_parallel_regression(global_random_seed):
"""Check parallel regression."""
rng = check_random_state(global_random_seed)
X_train, X_test = train_test_split(diabetes.data, random_state=rng)
ensemble = IsolationForest(n_jobs=3, random_state=global_random_seed).fit(X_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = IsolationForest(n_jobs=1, random_state=global_random_seed).fit(X_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_iforest_performance(global_random_seed):
"""Test Isolation Forest performs well"""
# Generate train/test data
rng = check_random_state(global_random_seed)
X = 0.3 * rng.randn(600, 2)
X = rng.permutation(np.vstack((X + 2, X - 2)))
X_train = X[:1000]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-1, high=1, size=(200, 2))
X_test = np.vstack((X[1000:], X_outliers))
y_test = np.array([0] * 200 + [1] * 200)
# fit the model
clf = IsolationForest(max_samples=100, random_state=rng).fit(X_train)
# predict scores (the lower, the more normal)
y_pred = -clf.decision_function(X_test)
# check that there is at most 6 errors (false positive or false negative)
assert roc_auc_score(y_test, y_pred) > 0.98
@pytest.mark.parametrize("contamination", [0.25, "auto"])
def test_iforest_works(contamination, global_random_seed):
# toy sample (the last two samples are outliers)
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [7, 4], [-5, 9]]
# Test IsolationForest
clf = IsolationForest(random_state=global_random_seed, contamination=contamination)
clf.fit(X)
decision_func = -clf.decision_function(X)
pred = clf.predict(X)
# assert detect outliers:
assert np.min(decision_func[-2:]) > np.max(decision_func[:-2])
assert_array_equal(pred, 6 * [1] + 2 * [-1])
def test_max_samples_consistency():
# Make sure validated max_samples in iforest and BaseBagging are identical
X = iris.data
clf = IsolationForest().fit(X)
assert clf.max_samples_ == clf._max_samples
def test_iforest_subsampled_features():
# It tests non-regression for #5732 which failed at predict.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(
diabetes.data[:50], diabetes.target[:50], random_state=rng
)
clf = IsolationForest(max_features=0.8)
clf.fit(X_train, y_train)
clf.predict(X_test)
def test_iforest_average_path_length():
# It tests non-regression for #8549 which used the wrong formula
# for average path length, strictly for the integer case
# Updated to check average path length when input is <= 2 (issue #11839)
result_one = 2.0 * (np.log(4.0) + np.euler_gamma) - 2.0 * 4.0 / 5.0
result_two = 2.0 * (np.log(998.0) + np.euler_gamma) - 2.0 * 998.0 / 999.0
assert_allclose(_average_path_length([0]), [0.0])
assert_allclose(_average_path_length([1]), [0.0])
assert_allclose(_average_path_length([2]), [1.0])
assert_allclose(_average_path_length([5]), [result_one])
assert_allclose(_average_path_length([999]), [result_two])
assert_allclose(
_average_path_length(np.array([1, 2, 5, 999])),
[0.0, 1.0, result_one, result_two],
)
# _average_path_length is increasing
avg_path_length = _average_path_length(np.arange(5))
assert_array_equal(avg_path_length, np.sort(avg_path_length))
def test_score_samples():
X_train = [[1, 1], [1, 2], [2, 1]]
clf1 = IsolationForest(contamination=0.1).fit(X_train)
clf2 = IsolationForest().fit(X_train)
assert_array_equal(
clf1.score_samples([[2.0, 2.0]]),
clf1.decision_function([[2.0, 2.0]]) + clf1.offset_,
)
assert_array_equal(
clf2.score_samples([[2.0, 2.0]]),
clf2.decision_function([[2.0, 2.0]]) + clf2.offset_,
)
assert_array_equal(
clf1.score_samples([[2.0, 2.0]]), clf2.score_samples([[2.0, 2.0]])
)
def test_iforest_warm_start():
"""Test iterative addition of iTrees to an iForest"""
rng = check_random_state(0)
X = rng.randn(20, 2)
# fit first 10 trees
clf = IsolationForest(
n_estimators=10, max_samples=20, random_state=rng, warm_start=True
)
clf.fit(X)
# remember the 1st tree
tree_1 = clf.estimators_[0]
# fit another 10 trees
clf.set_params(n_estimators=20)
clf.fit(X)
# expecting 20 fitted trees and no overwritten trees
assert len(clf.estimators_) == 20
assert clf.estimators_[0] is tree_1
# mock get_chunk_n_rows to actually test more than one chunk (here one
# chunk has 3 rows):
@patch(
"sklearn.ensemble._iforest.get_chunk_n_rows",
side_effect=Mock(**{"return_value": 3}),
)
@pytest.mark.parametrize("contamination, n_predict_calls", [(0.25, 3), ("auto", 2)])
@pytest.mark.thread_unsafe # monkeypatched code
def test_iforest_chunks_works1(
mocked_get_chunk, contamination, n_predict_calls, global_random_seed
):
test_iforest_works(contamination, global_random_seed)
assert mocked_get_chunk.call_count == n_predict_calls
# idem with chunk_size = 10 rows
@patch(
"sklearn.ensemble._iforest.get_chunk_n_rows",
side_effect=Mock(**{"return_value": 10}),
)
@pytest.mark.parametrize("contamination, n_predict_calls", [(0.25, 3), ("auto", 2)])
@pytest.mark.thread_unsafe # monkeypatched code
def test_iforest_chunks_works2(
mocked_get_chunk, contamination, n_predict_calls, global_random_seed
):
test_iforest_works(contamination, global_random_seed)
assert mocked_get_chunk.call_count == n_predict_calls
def test_iforest_with_uniform_data():
"""Test whether iforest predicts inliers when using uniform data"""
# 2-d array of all 1s
X = np.ones((100, 10))
iforest = IsolationForest()
iforest.fit(X)
rng = np.random.RandomState(0)
assert all(iforest.predict(X) == 1)
assert all(iforest.predict(rng.randn(100, 10)) == 1)
assert all(iforest.predict(X + 1) == 1)
assert all(iforest.predict(X - 1) == 1)
# 2-d array where columns contain the same value across rows
X = np.repeat(rng.randn(1, 10), 100, 0)
iforest = IsolationForest()
iforest.fit(X)
assert all(iforest.predict(X) == 1)
assert all(iforest.predict(rng.randn(100, 10)) == 1)
assert all(iforest.predict(np.ones((100, 10))) == 1)
# Single row
X = rng.randn(1, 10)
iforest = IsolationForest()
iforest.fit(X)
assert all(iforest.predict(X) == 1)
assert all(iforest.predict(rng.randn(100, 10)) == 1)
assert all(iforest.predict(np.ones((100, 10))) == 1)
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_iforest_with_n_jobs_does_not_segfault(csc_container):
"""Check that Isolation Forest does not segfault with n_jobs=2
Non-regression test for #23252
"""
X, _ = make_classification(n_samples=85_000, n_features=100, random_state=0)
X = csc_container(X)
IsolationForest(n_estimators=10, max_samples=256, n_jobs=2).fit(X)
def test_iforest_preserve_feature_names():
"""Check that feature names are preserved when contamination is not "auto".
Feature names are required for consistency checks during scoring.
Non-regression test for Issue #25844
"""
pd = pytest.importorskip("pandas")
rng = np.random.RandomState(0)
X = pd.DataFrame(data=rng.randn(4), columns=["a"])
model = IsolationForest(random_state=0, contamination=0.05)
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
model.fit(X)
@pytest.mark.parametrize("sparse_container", CSC_CONTAINERS + CSR_CONTAINERS)
def test_iforest_sparse_input_float_contamination(sparse_container):
"""Check that `IsolationForest` accepts sparse matrix input and float value for
contamination.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27626
"""
X, _ = make_classification(n_samples=50, n_features=4, random_state=0)
X = sparse_container(X)
X.sort_indices()
contamination = 0.1
iforest = IsolationForest(
n_estimators=5, contamination=contamination, random_state=0
).fit(X)
X_decision = iforest.decision_function(X)
assert (X_decision < 0).sum() / X.shape[0] == pytest.approx(contamination)
@pytest.mark.parametrize("n_jobs", [1, 2])
@pytest.mark.parametrize("contamination", [0.25, "auto"])
def test_iforest_predict_parallel(global_random_seed, contamination, n_jobs):
"""Check that `IsolationForest.predict` is parallelized."""
# toy sample (the last two samples are outliers)
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [7, 4], [-5, 9]]
# Test IsolationForest
clf = IsolationForest(
random_state=global_random_seed, contamination=contamination, n_jobs=None
)
clf.fit(X)
decision_func = -clf.decision_function(X)
pred = clf.predict(X)
# assert detect outliers:
assert np.min(decision_func[-2:]) > np.max(decision_func[:-2])
assert_array_equal(pred, 6 * [1] + 2 * [-1])
clf_parallel = IsolationForest(
random_state=global_random_seed, contamination=contamination, n_jobs=-1
)
clf_parallel.fit(X)
with parallel_backend("threading", n_jobs=n_jobs):
pred_paralell = clf_parallel.predict(X)
# assert the same results as non-parallel
assert_array_equal(pred, pred_paralell)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/tests/test_voting.py | sklearn/ensemble/tests/test_voting.py | """Testing for the VotingClassifier and VotingRegressor"""
import re
import numpy as np
import pytest
from sklearn import config_context, datasets
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.calibration import CalibratedClassifierCV
from sklearn.datasets import make_multilabel_classification
from sklearn.dummy import DummyRegressor
from sklearn.ensemble import (
RandomForestClassifier,
RandomForestRegressor,
VotingClassifier,
VotingRegressor,
)
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tests.metadata_routing_common import (
ConsumingClassifier,
ConsumingRegressor,
_Registry,
check_recorded_metadata,
)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils._testing import (
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
)
# Load datasets
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
# Scaled to solve ConvergenceWarning throw by Logistic Regression
X_scaled = StandardScaler().fit_transform(X)
X_r, y_r = datasets.load_diabetes(return_X_y=True)
@pytest.mark.parametrize(
"params, err_msg",
[
(
{"estimators": []},
"Invalid 'estimators' attribute, 'estimators' should be a non-empty list",
),
(
{"estimators": [LogisticRegression()]},
"Invalid 'estimators' attribute, 'estimators' should be a non-empty list",
),
(
{"estimators": [(213, LogisticRegression())]},
"Invalid 'estimators' attribute, 'estimators' should be a non-empty list",
),
(
{"estimators": [("lr", LogisticRegression())], "weights": [1, 2]},
"Number of `estimators` and weights must be equal",
),
],
)
def test_voting_classifier_estimator_init(params, err_msg):
ensemble = VotingClassifier(**params)
with pytest.raises(ValueError, match=err_msg):
ensemble.fit(X, y)
def test_predictproba_hardvoting():
eclf = VotingClassifier(
estimators=[("lr1", LogisticRegression()), ("lr2", LogisticRegression())],
voting="hard",
)
inner_msg = "predict_proba is not available when voting='hard'"
outer_msg = "'VotingClassifier' has no attribute 'predict_proba'"
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
eclf.predict_proba
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg in str(exec_info.value.__cause__)
assert not hasattr(eclf, "predict_proba")
eclf.fit(X_scaled, y)
assert not hasattr(eclf, "predict_proba")
def test_notfitted():
eclf = VotingClassifier(
estimators=[("lr1", LogisticRegression()), ("lr2", LogisticRegression())],
voting="soft",
)
ereg = VotingRegressor([("dr", DummyRegressor())])
msg = (
"This %s instance is not fitted yet. Call 'fit'"
" with appropriate arguments before using this estimator."
)
with pytest.raises(NotFittedError, match=msg % "VotingClassifier"):
eclf.predict(X)
with pytest.raises(NotFittedError, match=msg % "VotingClassifier"):
eclf.predict_proba(X)
with pytest.raises(NotFittedError, match=msg % "VotingClassifier"):
eclf.transform(X)
with pytest.raises(NotFittedError, match=msg % "VotingRegressor"):
ereg.predict(X_r)
with pytest.raises(NotFittedError, match=msg % "VotingRegressor"):
ereg.transform(X_r)
def test_majority_label_iris(global_random_seed):
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=global_random_seed)
clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed)
clf3 = GaussianNB()
eclf = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="hard"
)
scores = cross_val_score(eclf, X, y, scoring="accuracy")
assert scores.mean() >= 0.9
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[("lr", clf1), ("rf", clf2)], voting="hard")
assert clf1.fit(X, y).predict(X)[52] == 2
assert clf2.fit(X, y).predict(X)[52] == 1
assert eclf.fit(X, y).predict(X)[52] == 1
def test_weights_iris(global_random_seed):
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=global_random_seed)
clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed)
clf3 = GaussianNB()
eclf = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)],
voting="soft",
weights=[1, 2, 10],
)
scores = cross_val_score(eclf, X_scaled, y, scoring="accuracy")
assert scores.mean() >= 0.9
def test_weights_regressor():
"""Check weighted average regression prediction on diabetes dataset."""
reg1 = DummyRegressor(strategy="mean")
reg2 = DummyRegressor(strategy="median")
reg3 = DummyRegressor(strategy="quantile", quantile=0.2)
ereg = VotingRegressor(
[("mean", reg1), ("median", reg2), ("quantile", reg3)], weights=[1, 2, 10]
)
X_r_train, X_r_test, y_r_train, y_r_test = train_test_split(
X_r, y_r, test_size=0.25
)
reg1_pred = reg1.fit(X_r_train, y_r_train).predict(X_r_test)
reg2_pred = reg2.fit(X_r_train, y_r_train).predict(X_r_test)
reg3_pred = reg3.fit(X_r_train, y_r_train).predict(X_r_test)
ereg_pred = ereg.fit(X_r_train, y_r_train).predict(X_r_test)
avg = np.average(
np.asarray([reg1_pred, reg2_pred, reg3_pred]), axis=0, weights=[1, 2, 10]
)
assert_almost_equal(ereg_pred, avg, decimal=2)
ereg_weights_none = VotingRegressor(
[("mean", reg1), ("median", reg2), ("quantile", reg3)], weights=None
)
ereg_weights_equal = VotingRegressor(
[("mean", reg1), ("median", reg2), ("quantile", reg3)], weights=[1, 1, 1]
)
ereg_weights_none.fit(X_r_train, y_r_train)
ereg_weights_equal.fit(X_r_train, y_r_train)
ereg_none_pred = ereg_weights_none.predict(X_r_test)
ereg_equal_pred = ereg_weights_equal.predict(X_r_test)
assert_almost_equal(ereg_none_pred, ereg_equal_pred, decimal=2)
def test_predict_on_toy_problem(global_random_seed):
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=global_random_seed)
clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed)
clf3 = GaussianNB()
X = np.array(
[[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2], [2.1, 1.4], [3.1, 2.3]]
)
y = np.array([1, 1, 1, 2, 2, 2])
assert_array_equal(clf1.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])
assert_array_equal(clf2.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])
assert_array_equal(clf3.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])
eclf = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)],
voting="hard",
weights=[1, 1, 1],
)
assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])
eclf = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)],
voting="soft",
weights=[1, 1, 1],
)
assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array(
[
[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226],
]
)
clf2_res = np.array([[0.8, 0.2], [0.8, 0.2], [0.2, 0.8], [0.3, 0.7]])
clf3_res = np.array(
[[0.9985082, 0.0014918], [0.99845843, 0.00154157], [0.0, 1.0], [0.0, 1.0]]
)
t00 = (2 * clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2 * clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2 * clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2 * clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)],
voting="soft",
weights=[2, 1, 1],
)
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
inner_msg = "predict_proba is not available when voting='hard'"
outer_msg = "'VotingClassifier' has no attribute 'predict_proba'"
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
eclf = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="hard"
)
eclf.fit(X, y).predict_proba(X)
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg in str(exec_info.value.__cause__)
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(
n_classes=2, n_labels=1, allow_unlabeled=False, random_state=123
)
clf = OneVsRestClassifier(SVC(kernel="linear"))
eclf = VotingClassifier(estimators=[("ovr", clf)], voting="hard")
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1, n_estimators=3)
clf3 = GaussianNB()
eclf = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft"
)
params = {
"lr__C": [1.0, 100.0],
"voting": ["soft", "hard"],
"weights": [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]],
}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=2)
grid.fit(X_scaled, y)
def test_parallel_fit(global_random_seed):
"""Check parallel backend of VotingClassifier on toy dataset."""
clf1 = LogisticRegression(random_state=global_random_seed)
clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf1 = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft", n_jobs=1
).fit(X, y)
eclf2 = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft", n_jobs=2
).fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_almost_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
def test_sample_weight(global_random_seed):
"""Tests sample_weight parameter of VotingClassifier"""
clf1 = LogisticRegression(random_state=global_random_seed)
clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed)
clf3 = CalibratedClassifierCV(SVC(random_state=global_random_seed), ensemble=False)
eclf1 = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2), ("svc", clf3)], voting="soft"
).fit(X_scaled, y, sample_weight=np.ones((len(y),)))
eclf2 = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2), ("svc", clf3)], voting="soft"
).fit(X_scaled, y)
assert_array_equal(eclf1.predict(X_scaled), eclf2.predict(X_scaled))
assert_array_almost_equal(
eclf1.predict_proba(X_scaled), eclf2.predict_proba(X_scaled)
)
sample_weight = np.random.RandomState(global_random_seed).uniform(size=(len(y),))
eclf3 = VotingClassifier(estimators=[("lr", clf1)], voting="soft")
eclf3.fit(X_scaled, y, sample_weight=sample_weight)
clf1.fit(X_scaled, y, sample_weight)
assert_array_equal(eclf3.predict(X_scaled), clf1.predict(X_scaled))
assert_array_almost_equal(
eclf3.predict_proba(X_scaled), clf1.predict_proba(X_scaled)
)
# check that an error is raised and indicative if sample_weight is not
# supported.
clf4 = KNeighborsClassifier()
eclf3 = VotingClassifier(
estimators=[("lr", clf1), ("svc", clf3), ("knn", clf4)], voting="soft"
)
msg = "Underlying estimator KNeighborsClassifier does not support sample weights."
with pytest.raises(TypeError, match=msg):
eclf3.fit(X_scaled, y, sample_weight=sample_weight)
# check that _fit_single_estimator will raise the right error
# it should raise the original error if this is not linked to sample_weight
class ClassifierErrorFit(ClassifierMixin, BaseEstimator):
def fit(self, X_scaled, y, sample_weight):
raise TypeError("Error unrelated to sample_weight.")
clf = ClassifierErrorFit()
with pytest.raises(TypeError, match="Error unrelated to sample_weight"):
clf.fit(X_scaled, y, sample_weight=sample_weight)
def test_sample_weight_kwargs():
"""Check that VotingClassifier passes sample_weight as kwargs"""
class MockClassifier(ClassifierMixin, BaseEstimator):
"""Mock Classifier to check that sample_weight is received as kwargs"""
def fit(self, X, y, *args, **sample_weight):
assert "sample_weight" in sample_weight
clf = MockClassifier()
eclf = VotingClassifier(estimators=[("mock", clf)], voting="soft")
# Should not raise an error.
eclf.fit(X, y, sample_weight=np.ones((len(y),)))
def test_voting_classifier_set_params(global_random_seed):
# check equivalence in the output when setting underlying estimators
clf1 = LogisticRegression(random_state=global_random_seed)
clf2 = RandomForestClassifier(
n_estimators=10, random_state=global_random_seed, max_depth=None
)
clf3 = GaussianNB()
eclf1 = VotingClassifier(
[("lr", clf1), ("rf", clf2)], voting="soft", weights=[1, 2]
).fit(X_scaled, y)
eclf2 = VotingClassifier(
[("lr", clf1), ("nb", clf3)], voting="soft", weights=[1, 2]
)
eclf2.set_params(nb=clf2).fit(X_scaled, y)
assert_array_equal(eclf1.predict(X_scaled), eclf2.predict(X_scaled))
assert_array_almost_equal(
eclf1.predict_proba(X_scaled), eclf2.predict_proba(X_scaled)
)
assert eclf2.estimators[0][1].get_params() == clf1.get_params()
assert eclf2.estimators[1][1].get_params() == clf2.get_params()
def test_set_estimator_drop():
# VotingClassifier set_params should be able to set estimators as drop
# Test predict
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(n_estimators=10, random_state=123)
clf3 = GaussianNB()
eclf1 = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2), ("nb", clf3)],
voting="hard",
weights=[1, 0, 0.5],
).fit(X, y)
eclf2 = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2), ("nb", clf3)],
voting="hard",
weights=[1, 1, 0.5],
)
eclf2.set_params(rf="drop").fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert dict(eclf2.estimators)["rf"] == "drop"
assert len(eclf2.estimators_) == 2
assert all(
isinstance(est, (LogisticRegression, GaussianNB)) for est in eclf2.estimators_
)
assert eclf2.get_params()["rf"] == "drop"
eclf1.set_params(voting="soft").fit(X, y)
eclf2.set_params(voting="soft").fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_almost_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
msg = "All estimators are dropped. At least one is required"
with pytest.raises(ValueError, match=msg):
eclf2.set_params(lr="drop", rf="drop", nb="drop").fit(X, y)
# Test soft voting transform
X1 = np.array([[1], [2]])
y1 = np.array([1, 2])
eclf1 = VotingClassifier(
estimators=[("rf", clf2), ("nb", clf3)],
voting="soft",
weights=[0, 0.5],
flatten_transform=False,
).fit(X1, y1)
eclf2 = VotingClassifier(
estimators=[("rf", clf2), ("nb", clf3)],
voting="soft",
weights=[1, 0.5],
flatten_transform=False,
)
eclf2.set_params(rf="drop").fit(X1, y1)
assert_array_almost_equal(
eclf1.transform(X1),
np.array([[[0.7, 0.3], [0.3, 0.7]], [[1.0, 0.0], [0.0, 1.0]]]),
)
assert_array_almost_equal(eclf2.transform(X1), np.array([[[1.0, 0.0], [0.0, 1.0]]]))
eclf1.set_params(voting="hard")
eclf2.set_params(voting="hard")
assert_array_equal(eclf1.transform(X1), np.array([[0, 0], [1, 1]]))
assert_array_equal(eclf2.transform(X1), np.array([[0], [1]]))
def test_estimator_weights_format(global_random_seed):
# Test estimator weights inputs as list and array
clf1 = LogisticRegression(random_state=global_random_seed)
clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed)
eclf1 = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2)], weights=[1, 2], voting="soft"
)
eclf2 = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2)], weights=np.array((1, 2)), voting="soft"
)
eclf1.fit(X_scaled, y)
eclf2.fit(X_scaled, y)
assert_array_almost_equal(
eclf1.predict_proba(X_scaled), eclf2.predict_proba(X_scaled)
)
def test_transform(global_random_seed):
"""Check transform method of VotingClassifier on toy dataset."""
clf1 = LogisticRegression(random_state=global_random_seed)
clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf1 = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft"
).fit(X, y)
eclf2 = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)],
voting="soft",
flatten_transform=True,
).fit(X, y)
eclf3 = VotingClassifier(
estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)],
voting="soft",
flatten_transform=False,
).fit(X, y)
assert_array_equal(eclf1.transform(X).shape, (4, 6))
assert_array_equal(eclf2.transform(X).shape, (4, 6))
assert_array_equal(eclf3.transform(X).shape, (3, 4, 2))
assert_array_almost_equal(eclf1.transform(X), eclf2.transform(X))
assert_array_almost_equal(
eclf3.transform(X).swapaxes(0, 1).reshape((4, 6)), eclf2.transform(X)
)
@pytest.mark.parametrize(
"X, y, voter",
[
(
X,
y,
VotingClassifier(
[
("lr", LogisticRegression()),
("rf", RandomForestClassifier(n_estimators=5)),
]
),
),
(
X_r,
y_r,
VotingRegressor(
[
("lr", LinearRegression()),
("rf", RandomForestRegressor(n_estimators=5)),
]
),
),
],
)
def test_none_estimator_with_weights(X, y, voter):
# check that an estimator can be set to 'drop' and passing some weight
# regression test for
# https://github.com/scikit-learn/scikit-learn/issues/13777
voter = clone(voter)
# Scaled to solve ConvergenceWarning throw by Logistic Regression
X_scaled = StandardScaler().fit_transform(X)
voter.fit(X_scaled, y, sample_weight=np.ones(y.shape))
voter.set_params(lr="drop")
voter.fit(X_scaled, y, sample_weight=np.ones(y.shape))
y_pred = voter.predict(X_scaled)
assert y_pred.shape == y.shape
@pytest.mark.parametrize(
"est",
[
VotingRegressor(
estimators=[
("lr", LinearRegression()),
("tree", DecisionTreeRegressor(random_state=0)),
]
),
VotingClassifier(
estimators=[
("lr", LogisticRegression(random_state=0)),
("tree", DecisionTreeClassifier(random_state=0)),
]
),
],
ids=["VotingRegressor", "VotingClassifier"],
)
def test_n_features_in(est):
est = clone(est)
X = [[1, 2], [3, 4], [5, 6]]
y = [0, 1, 2]
assert not hasattr(est, "n_features_in_")
est.fit(X, y)
assert est.n_features_in_ == 2
@pytest.mark.parametrize(
"estimator",
[
VotingRegressor(
estimators=[
("lr", LinearRegression()),
("rf", RandomForestRegressor(random_state=123)),
],
verbose=True,
),
VotingClassifier(
estimators=[
("lr", LogisticRegression(random_state=123)),
("rf", RandomForestClassifier(random_state=123)),
],
verbose=True,
),
],
)
def test_voting_verbose(estimator, capsys):
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
pattern = (
r"\[Voting\].*\(1 of 2\) Processing lr, total=.*\n"
r"\[Voting\].*\(2 of 2\) Processing rf, total=.*\n$"
)
clone(estimator).fit(X, y)
assert re.match(pattern, capsys.readouterr()[0])
def test_get_features_names_out_regressor():
"""Check get_feature_names_out output for regressor."""
X = [[1, 2], [3, 4], [5, 6]]
y = [0, 1, 2]
voting = VotingRegressor(
estimators=[
("lr", LinearRegression()),
("tree", DecisionTreeRegressor(random_state=0)),
("ignore", "drop"),
]
)
voting.fit(X, y)
names_out = voting.get_feature_names_out()
expected_names = ["votingregressor_lr", "votingregressor_tree"]
assert_array_equal(names_out, expected_names)
@pytest.mark.parametrize(
"kwargs, expected_names",
[
(
{"voting": "soft", "flatten_transform": True},
[
"votingclassifier_lr0",
"votingclassifier_lr1",
"votingclassifier_lr2",
"votingclassifier_tree0",
"votingclassifier_tree1",
"votingclassifier_tree2",
],
),
({"voting": "hard"}, ["votingclassifier_lr", "votingclassifier_tree"]),
],
)
def test_get_features_names_out_classifier(kwargs, expected_names):
"""Check get_feature_names_out for classifier for different settings."""
X = [[1, 2], [3, 4], [5, 6], [1, 1.2]]
y = [0, 1, 2, 0]
voting = VotingClassifier(
estimators=[
("lr", LogisticRegression(random_state=0)),
("tree", DecisionTreeClassifier(random_state=0)),
],
**kwargs,
)
voting.fit(X, y)
X_trans = voting.transform(X)
names_out = voting.get_feature_names_out()
assert X_trans.shape[1] == len(expected_names)
assert_array_equal(names_out, expected_names)
def test_get_features_names_out_classifier_error():
"""Check that error is raised when voting="soft" and flatten_transform=False."""
X = [[1, 2], [3, 4], [5, 6]]
y = [0, 1, 2]
voting = VotingClassifier(
estimators=[
("lr", LogisticRegression(random_state=0)),
("tree", DecisionTreeClassifier(random_state=0)),
],
voting="soft",
flatten_transform=False,
)
voting.fit(X, y)
msg = (
"get_feature_names_out is not supported when `voting='soft'` and "
"`flatten_transform=False`"
)
with pytest.raises(ValueError, match=msg):
voting.get_feature_names_out()
# Metadata Routing Tests
# ======================
@pytest.mark.parametrize(
"Estimator, Child",
[(VotingClassifier, ConsumingClassifier), (VotingRegressor, ConsumingRegressor)],
)
def test_routing_passed_metadata_not_supported(Estimator, Child):
"""Test that the right error message is raised when metadata is passed while
not supported when `enable_metadata_routing=False`."""
X = np.array([[0, 1], [2, 2], [4, 6]])
y = [1, 2, 3]
with pytest.raises(
ValueError, match="is only supported if enable_metadata_routing=True"
):
Estimator(["clf", Child()]).fit(X, y, sample_weight=[1, 1, 1], metadata="a")
@pytest.mark.parametrize(
"Estimator, Child",
[(VotingClassifier, ConsumingClassifier), (VotingRegressor, ConsumingRegressor)],
)
@config_context(enable_metadata_routing=True)
def test_get_metadata_routing_without_fit(Estimator, Child):
# Test that metadata_routing() doesn't raise when called before fit.
est = Estimator([("sub_est", Child())])
est.get_metadata_routing()
@pytest.mark.parametrize(
"Estimator, Child",
[(VotingClassifier, ConsumingClassifier), (VotingRegressor, ConsumingRegressor)],
)
@pytest.mark.parametrize("prop", ["sample_weight", "metadata"])
@config_context(enable_metadata_routing=True)
def test_metadata_routing_for_voting_estimators(Estimator, Child, prop):
"""Test that metadata is routed correctly for Voting*."""
X = np.array([[0, 1], [2, 2], [4, 6]])
y = [1, 2, 3]
sample_weight, metadata = [1, 1, 1], "a"
est = Estimator(
[
(
"sub_est1",
Child(registry=_Registry()).set_fit_request(**{prop: True}),
),
(
"sub_est2",
Child(registry=_Registry()).set_fit_request(**{prop: True}),
),
]
)
est.fit(X, y, **{prop: sample_weight if prop == "sample_weight" else metadata})
for estimator in est.estimators:
if prop == "sample_weight":
kwargs = {prop: sample_weight}
else:
kwargs = {prop: metadata}
# access sub-estimator in (name, est) with estimator[1]
registry = estimator[1].registry
assert len(registry)
for sub_est in registry:
check_recorded_metadata(obj=sub_est, method="fit", parent="fit", **kwargs)
@pytest.mark.parametrize(
"Estimator, Child",
[(VotingClassifier, ConsumingClassifier), (VotingRegressor, ConsumingRegressor)],
)
@config_context(enable_metadata_routing=True)
def test_metadata_routing_error_for_voting_estimators(Estimator, Child):
"""Test that the right error is raised when metadata is not requested."""
X = np.array([[0, 1], [2, 2], [4, 6]])
y = [1, 2, 3]
sample_weight, metadata = [1, 1, 1], "a"
est = Estimator([("sub_est", Child())])
error_message = (
"[sample_weight, metadata] are passed but are not explicitly set as requested"
f" or not requested for {Child.__name__}.fit"
)
with pytest.raises(ValueError, match=re.escape(error_message)):
est.fit(X, y, sample_weight=sample_weight, metadata=metadata)
# End of Metadata Routing Tests
# =============================
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/tests/test_forest.py | sklearn/ensemble/tests/test_forest.py | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
import math
import pickle
from collections import defaultdict
from functools import partial
from itertools import combinations, product
from typing import Any, Dict
from unittest.mock import patch
import joblib
import numpy as np
import pytest
from scipy.special import comb
import sklearn
from sklearn import clone, datasets
from sklearn.datasets import make_classification, make_hastie_10_2
from sklearn.decomposition import TruncatedSVD
from sklearn.dummy import DummyRegressor
from sklearn.ensemble import (
ExtraTreesClassifier,
ExtraTreesRegressor,
RandomForestClassifier,
RandomForestRegressor,
RandomTreesEmbedding,
)
from sklearn.ensemble._forest import (
_generate_unsampled_indices,
_get_n_samples_bootstrap,
)
from sklearn.exceptions import NotFittedError
from sklearn.metrics import (
explained_variance_score,
f1_score,
mean_poisson_deviance,
mean_squared_error,
)
from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split
from sklearn.svm import LinearSVC
from sklearn.tree._classes import SPARSE_SPLITTERS
from sklearn.utils._testing import (
_convert_container,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
ignore_warnings,
skip_if_no_parallel,
)
from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.parallel import Parallel
from sklearn.utils.validation import check_random_state
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# Larger classification sample used for testing feature importances
X_large, y_large = datasets.make_classification(
n_samples=500,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0,
)
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# Make regression dataset
X_reg, y_reg = datasets.make_regression(n_samples=500, n_features=10, random_state=1)
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
# Get the default backend in joblib to test parallelism and interaction with
# different backends
DEFAULT_JOBLIB_BACKEND = joblib.parallel.get_active_backend()[0].__class__
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS: Dict[str, Any] = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
FOREST_CLASSIFIERS_REGRESSORS: Dict[str, Any] = FOREST_CLASSIFIERS.copy()
FOREST_CLASSIFIERS_REGRESSORS.update(FOREST_REGRESSORS)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf)
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf)
# also test apply
leaf_indices = clf.apply(X)
assert leaf_indices.shape == (len(X), clf.n_estimators)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
@pytest.mark.parametrize("criterion", ("gini", "log_loss"))
def test_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with criterion %s and score = %f" % (criterion, score)
clf = ForestClassifier(
n_estimators=10, criterion=criterion, max_features=2, random_state=1
)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.5, "Failed with criterion %s and score = %f" % (criterion, score)
@pytest.mark.parametrize("name", FOREST_REGRESSORS)
@pytest.mark.parametrize(
"criterion", ("squared_error", "absolute_error", "friedman_mse")
)
def test_regression_criterion(name, criterion):
# Check consistency on regression dataset.
ForestRegressor = FOREST_REGRESSORS[name]
reg = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
reg.fit(X_reg, y_reg)
score = reg.score(X_reg, y_reg)
assert score > 0.93, (
"Failed with max_features=None, criterion %s and score = %f"
% (
criterion,
score,
)
)
reg = ForestRegressor(
n_estimators=5, criterion=criterion, max_features=6, random_state=1
)
reg.fit(X_reg, y_reg)
score = reg.score(X_reg, y_reg)
assert score > 0.92, "Failed with max_features=6, criterion %s and score = %f" % (
criterion,
score,
)
def test_poisson_vs_mse():
"""Test that random forest with poisson criterion performs better than
mse for a poisson target.
There is a similar test for DecisionTreeRegressor.
"""
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 500, 10
X = datasets.make_low_rank_matrix(
n_samples=n_train + n_test, n_features=n_features, random_state=rng
)
# We create a log-linear Poisson model and downscale coef as it will get
# exponentiated.
coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
y = rng.poisson(lam=np.exp(X @ coef))
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=n_test, random_state=rng
)
# We prevent some overfitting by setting min_samples_split=10.
forest_poi = RandomForestRegressor(
criterion="poisson", min_samples_leaf=10, max_features="sqrt", random_state=rng
)
forest_mse = RandomForestRegressor(
criterion="squared_error",
min_samples_leaf=10,
max_features="sqrt",
random_state=rng,
)
forest_poi.fit(X_train, y_train)
forest_mse.fit(X_train, y_train)
dummy = DummyRegressor(strategy="mean").fit(X_train, y_train)
for X, y, data_name in [(X_train, y_train, "train"), (X_test, y_test, "test")]:
metric_poi = mean_poisson_deviance(y, forest_poi.predict(X))
# squared_error forest might produce non-positive predictions => clip
# If y = 0 for those, the poisson deviance gets too good.
# If we drew more samples, we would eventually get y > 0 and the
# poisson deviance would explode, i.e. be undefined. Therefore, we do
# not clip to a tiny value like 1e-15, but to 1e-6. This acts like a
# small penalty to the non-positive predictions.
metric_mse = mean_poisson_deviance(
y, np.clip(forest_mse.predict(X), 1e-6, None)
)
metric_dummy = mean_poisson_deviance(y, dummy.predict(X))
# As squared_error might correctly predict 0 in train set, its train
# score can be better than Poisson. This is no longer the case for the
# test set. But keep the above comment for clipping in mind.
if data_name == "test":
assert metric_poi < metric_mse
assert metric_poi < 0.8 * metric_dummy
@pytest.mark.parametrize("criterion", ("poisson", "squared_error"))
def test_balance_property_random_forest(criterion):
""" "Test that sum(y_pred)==sum(y_true) on the training set."""
rng = np.random.RandomState(42)
n_train, n_test, n_features = 500, 500, 10
X = datasets.make_low_rank_matrix(
n_samples=n_train + n_test, n_features=n_features, random_state=rng
)
coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
y = rng.poisson(lam=np.exp(X @ coef))
reg = RandomForestRegressor(
criterion=criterion, n_estimators=10, bootstrap=False, random_state=rng
)
reg.fit(X, y)
assert np.sum(reg.predict(X)) == pytest.approx(np.sum(y))
@pytest.mark.parametrize("name", FOREST_REGRESSORS)
def test_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert not hasattr(r, "classes_")
assert not hasattr(r, "n_classes_")
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert not hasattr(r, "classes_")
assert not hasattr(r, "n_classes_")
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(
n_estimators=10, random_state=1, max_features=1, max_depth=1
)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(
np.sum(clf.predict_proba(iris.data), axis=1), np.ones(iris.data.shape[0])
)
assert_array_almost_equal(
clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data))
)
@pytest.mark.parametrize("dtype", (np.float64, np.float32))
@pytest.mark.parametrize(
"name, criterion",
itertools.chain(
product(FOREST_CLASSIFIERS, ["gini", "log_loss"]),
product(FOREST_REGRESSORS, ["squared_error", "friedman_mse", "absolute_error"]),
),
)
def test_importances(dtype, name, criterion):
tolerance = 0.01
if name in FOREST_REGRESSORS and criterion == "absolute_error":
tolerance = 0.05
# cast as dtype
X = X_large.astype(dtype, copy=False)
y = y_large.astype(dtype, copy=False)
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=10, criterion=criterion, random_state=0)
est.fit(X, y)
importances = est.feature_importances_
# The forest estimator can detect that only the first 3 features of the
# dataset are informative:
n_important = np.sum(importances > 0.1)
assert importances.shape[0] == 10
assert n_important == 3
assert np.all(importances[:3] > 0.1)
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert np.all(importances >= 0.0)
for scale in [0.5, 100]:
est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert np.abs(importances - importances_bis).mean() < tolerance
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.0
for count in np.bincount(samples):
p = 1.0 * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.0
for k in range(n_features):
# Weight of each B of size k
coef = 1.0 / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (
coef
* (1.0 * n_samples_b / n_samples) # P(B=b)
* (
entropy(y_)
- sum(
[
entropy(c) * len(c) / n_samples_b
for c in children
]
)
)
)
return imp
data = np.array(
[
[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0],
]
)
X, y = np.array(data[:, :7], dtype=bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(
n_estimators=500, max_features=1, criterion="log_loss", random_state=0
).fit(X, y)
importances = (
sum(
tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_
)
/ clf.n_estimators
)
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert np.abs(true_importances - importances).mean() < 0.01
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_unfitted_feature_importances(name):
err_msg = (
"This {} instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator.".format(name)
)
with pytest.raises(NotFittedError, match=err_msg):
getattr(FOREST_ESTIMATORS[name](), "feature_importances_")
@pytest.mark.parametrize("ForestClassifier", FOREST_CLASSIFIERS.values())
@pytest.mark.parametrize("X_type", ["array", "sparse_csr", "sparse_csc"])
@pytest.mark.parametrize(
"X, y, lower_bound_accuracy",
[
(
*datasets.make_classification(n_samples=300, n_classes=2, random_state=0),
0.9,
),
(
*datasets.make_classification(
n_samples=1000, n_classes=3, n_informative=6, random_state=0
),
0.65,
),
(
iris.data,
iris.target * 2 + 1,
0.65,
),
(
*datasets.make_multilabel_classification(n_samples=300, random_state=0),
0.18,
),
],
)
@pytest.mark.parametrize("oob_score", [True, partial(f1_score, average="micro")])
def test_forest_classifier_oob(
ForestClassifier, X, y, X_type, lower_bound_accuracy, oob_score
):
"""Check that OOB score is close to score on a test set."""
X = _convert_container(X, constructor_name=X_type)
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.5,
random_state=0,
)
classifier = ForestClassifier(
n_estimators=40,
bootstrap=True,
oob_score=oob_score,
random_state=0,
)
assert not hasattr(classifier, "oob_score_")
assert not hasattr(classifier, "oob_decision_function_")
classifier.fit(X_train, y_train)
if callable(oob_score):
test_score = oob_score(y_test, classifier.predict(X_test))
else:
test_score = classifier.score(X_test, y_test)
assert classifier.oob_score_ >= lower_bound_accuracy
abs_diff = abs(test_score - classifier.oob_score_)
assert abs_diff <= 0.11, f"{abs_diff=} is greater than 0.11"
assert hasattr(classifier, "oob_score_")
assert not hasattr(classifier, "oob_prediction_")
assert hasattr(classifier, "oob_decision_function_")
if y.ndim == 1:
expected_shape = (X_train.shape[0], len(set(y)))
else:
expected_shape = (X_train.shape[0], len(set(y[:, 0])), y.shape[1])
assert classifier.oob_decision_function_.shape == expected_shape
@pytest.mark.parametrize("ForestRegressor", FOREST_REGRESSORS.values())
@pytest.mark.parametrize("X_type", ["array", "sparse_csr", "sparse_csc"])
@pytest.mark.parametrize(
"X, y, lower_bound_r2",
[
(
*datasets.make_regression(
n_samples=500, n_features=10, n_targets=1, random_state=0
),
0.7,
),
(
*datasets.make_regression(
n_samples=500, n_features=10, n_targets=2, random_state=0
),
0.55,
),
],
)
@pytest.mark.parametrize("oob_score", [True, explained_variance_score])
def test_forest_regressor_oob(ForestRegressor, X, y, X_type, lower_bound_r2, oob_score):
"""Check that forest-based regressor provide an OOB score close to the
score on a test set."""
X = _convert_container(X, constructor_name=X_type)
X_train, X_test, y_train, y_test = train_test_split(
X,
y,
test_size=0.5,
random_state=0,
)
regressor = ForestRegressor(
n_estimators=50,
bootstrap=True,
oob_score=oob_score,
random_state=0,
)
assert not hasattr(regressor, "oob_score_")
assert not hasattr(regressor, "oob_prediction_")
regressor.fit(X_train, y_train)
if callable(oob_score):
test_score = oob_score(y_test, regressor.predict(X_test))
else:
test_score = regressor.score(X_test, y_test)
assert regressor.oob_score_ >= lower_bound_r2
assert abs(test_score - regressor.oob_score_) <= 0.1
assert hasattr(regressor, "oob_score_")
assert hasattr(regressor, "oob_prediction_")
assert not hasattr(regressor, "oob_decision_function_")
if y.ndim == 1:
expected_shape = (X_train.shape[0],)
else:
expected_shape = (X_train.shape[0], y.ndim)
assert regressor.oob_prediction_.shape == expected_shape
@pytest.mark.parametrize("ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values())
def test_forest_oob_warning(ForestEstimator):
"""Check that a warning is raised when not enough estimator and the OOB
estimates will be inaccurate."""
estimator = ForestEstimator(
n_estimators=1,
oob_score=True,
bootstrap=True,
random_state=0,
)
with pytest.warns(UserWarning, match="Some inputs do not have OOB scores"):
estimator.fit(iris.data, iris.target)
@pytest.mark.parametrize("ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values())
def test_forest_oob_score_requires_bootstrap(ForestEstimator):
"""Check that we raise an error if OOB score is requested without
activating bootstrapping.
"""
X = iris.data
y = iris.target
err_msg = "Out of bag estimation only available if bootstrap=True"
estimator = ForestEstimator(oob_score=True, bootstrap=False)
with pytest.raises(ValueError, match=err_msg):
estimator.fit(X, y)
@pytest.mark.parametrize("ForestClassifier", FOREST_CLASSIFIERS.values())
def test_classifier_error_oob_score_multiclass_multioutput(ForestClassifier):
"""Check that we raise an error with when requesting OOB score with
multiclass-multioutput classification target.
"""
rng = np.random.RandomState(42)
X = iris.data
y = rng.randint(low=0, high=5, size=(iris.data.shape[0], 2))
y_type = type_of_target(y)
assert y_type == "multiclass-multioutput"
estimator = ForestClassifier(oob_score=True, bootstrap=True)
err_msg = "The type of target cannot be used to compute OOB estimates"
with pytest.raises(ValueError, match=err_msg):
estimator.fit(X, y)
@pytest.mark.parametrize("ForestRegressor", FOREST_REGRESSORS.values())
def test_forest_multioutput_integral_regression_target(ForestRegressor):
"""Check that multioutput regression with integral values is not interpreted
as a multiclass-multioutput target and OOB score can be computed.
"""
rng = np.random.RandomState(42)
X = iris.data
y = rng.randint(low=0, high=10, size=(iris.data.shape[0], 2))
estimator = ForestRegressor(
n_estimators=30, oob_score=True, bootstrap=True, random_state=0
)
estimator.fit(X, y)
n_samples_bootstrap = _get_n_samples_bootstrap(len(X), estimator.max_samples)
n_samples_test = X.shape[0] // 4
oob_pred = np.zeros([n_samples_test, 2])
for sample_idx, sample in enumerate(X[:n_samples_test]):
n_samples_oob = 0
oob_pred_sample = np.zeros(2)
for tree in estimator.estimators_:
oob_unsampled_indices = _generate_unsampled_indices(
tree.random_state, len(X), n_samples_bootstrap
)
if sample_idx in oob_unsampled_indices:
n_samples_oob += 1
oob_pred_sample += tree.predict(sample.reshape(1, -1)).squeeze()
oob_pred[sample_idx] = oob_pred_sample / n_samples_oob
assert_allclose(oob_pred, estimator.oob_prediction_[:n_samples_test])
@pytest.mark.parametrize("oob_score", [True, False])
def test_random_trees_embedding_raise_error_oob(oob_score):
with pytest.raises(TypeError, match="got an unexpected keyword argument"):
RandomTreesEmbedding(oob_score=oob_score)
with pytest.raises(NotImplementedError, match="OOB score not supported"):
RandomTreesEmbedding()._set_oob_score_and_attributes(X, y)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_gridsearch(name):
# Check that base trees can be grid-searched.
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {"n_estimators": (1, 2), "max_depth": (1, 2)})
clf.fit(iris.data, iris.target)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
def test_parallel(name):
"""Check parallel computations in classification"""
if name in FOREST_CLASSIFIERS:
X = iris.data
y = iris.target
elif name in FOREST_REGRESSORS:
X = X_reg
y = y_reg
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert len(forest) == 10
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
def test_pickle(name):
# Check pickability.
if name in FOREST_CLASSIFIERS:
X = iris.data[::2]
y = iris.target[::2]
elif name in FOREST_REGRESSORS:
X = X_reg[::2]
y = y_reg[::2]
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert type(obj2) == obj.__class__
score2 = obj2.score(X, y)
assert score == score2
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
def test_multioutput(name):
# Check estimators on multi-output problems.
X_train = [
[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2],
]
y_train = [
[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3],
]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert len(proba) == 2
assert proba[0].shape == (4, 2)
assert proba[1].shape == (4, 4)
log_proba = est.predict_log_proba(X_test)
assert len(log_proba) == 2
assert log_proba[0].shape == (4, 2)
assert log_proba[1].shape == (4, 4)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_multioutput_string(name):
# Check estimators on multi-output problems with string outputs.
X_train = [
[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2],
]
y_train = [
["red", "blue"],
["red", "blue"],
["red", "blue"],
["green", "green"],
["green", "green"],
["green", "green"],
["red", "purple"],
["red", "purple"],
["red", "purple"],
["green", "yellow"],
["green", "yellow"],
["green", "yellow"],
]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [
["red", "blue"],
["green", "green"],
["red", "purple"],
["green", "yellow"],
]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_equal(y_pred, y_test)
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert len(proba) == 2
assert proba[0].shape == (4, 2)
assert proba[1].shape == (4, 4)
log_proba = est.predict_log_proba(X_test)
assert len(log_proba) == 2
assert log_proba[0].shape == (4, 2)
assert log_proba[1].shape == (4, 4)
@pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
def test_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert clf.n_classes_ == 2
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr_matrix
assert isinstance(X_transformed, np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(
n_estimators=10, sparse_output=False, random_state=0
)
hasher_sparse = RandomTreesEmbedding(
n_estimators=10, sparse_output=True, random_state=0
)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(), X_transformed.toarray())
# one leaf active per data point per forest
assert X_transformed.shape[0] == X.shape[0]
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert linear_clf.score(X_reduced, y) == 1.0
@pytest.mark.parametrize("csc_container", CSC_CONTAINERS)
def test_random_hasher_sparse_data(csc_container):
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_container(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs, random_state=12345).fit(
X_train, y_train
)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in itertools.pairwise(probas):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
reg = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in reg.estimators_:
tree = "".join(
("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature, tree.tree_.threshold)
)
uniques[tree] += 1
uniques = sorted([(1.0 * count / n_trees, tree) for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert len(uniques) == 5
assert 0.20 > uniques[0][0] # Rough approximation of 1/6.
assert 0.20 > uniques[1][0]
assert 0.20 > uniques[2][0]
assert 0.20 > uniques[3][0]
assert uniques[4][0] > 0.3
assert uniques[4][1] == "0,1/0,0/--0,2/--"
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
reg = ExtraTreesRegressor(max_features=1, random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in reg.estimators_:
tree = "".join(
("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature, tree.tree_.threshold)
)
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert len(uniques) == 8
@pytest.mark.parametrize("name", FOREST_ESTIMATORS)
def test_max_leaf_nodes_max_depth(name):
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/tests/__init__.py | sklearn/ensemble/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/tests/test_weight_boosting.py | sklearn/ensemble/tests/test_weight_boosting.py | """Testing for the boost module (sklearn.ensemble.boost)."""
import re
import numpy as np
import pytest
from sklearn import datasets
from sklearn.base import BaseEstimator, clone
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn.utils._mocking import NoSampleWeightWrapper
from sklearn.utils._testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
)
from sklearn.utils.fixes import (
COO_CONTAINERS,
CSC_CONTAINERS,
CSR_CONTAINERS,
DOK_CONTAINERS,
LIL_CONTAINERS,
)
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the diabetes dataset and randomly permute it
diabetes = datasets.load_diabetes()
diabetes.data, diabetes.target = shuffle(
diabetes.data, diabetes.target, random_state=rng
)
def test_oneclass_adaboost_proba():
# Test predict_proba robustness for one class label input.
# In response to issue #7501
# https://github.com/scikit-learn/scikit-learn/issues/7501
y_t = np.ones(len(X))
clf = AdaBoostClassifier().fit(X, y_t)
assert_array_almost_equal(clf.predict_proba(X), np.ones((len(X), 1)))
def test_classification_toy():
# Check classification on a toy dataset.
clf = AdaBoostClassifier(random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert clf.predict_proba(T).shape == (len(T), 2)
assert clf.decision_function(T).shape == (len(T),)
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf = AdaBoostClassifier()
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
assert proba.shape[1] == len(classes)
assert clf.decision_function(iris.data).shape[1] == len(classes)
score = clf.score(iris.data, iris.target)
assert score > 0.9, f"Failed with {score = }"
# Check we used multiple estimators
assert len(clf.estimators_) > 1
# Check for distinct random states (see issue #7408)
assert len(set(est.random_state for est in clf.estimators_)) == len(clf.estimators_)
@pytest.mark.parametrize("loss", ["linear", "square", "exponential"])
def test_diabetes(loss):
# Check consistency on dataset diabetes.
reg = AdaBoostRegressor(loss=loss, random_state=0)
reg.fit(diabetes.data, diabetes.target)
score = reg.score(diabetes.data, diabetes.target)
assert score > 0.55
# Check we used multiple estimators
assert len(reg.estimators_) > 1
# Check for distinct random states (see issue #7408)
assert len(set(est.random_state for est in reg.estimators_)) == len(reg.estimators_)
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
diabetes_weights = rng.randint(10, size=diabetes.target.shape)
clf = AdaBoostClassifier(n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(iris.data, iris.target, sample_weight=iris_weights)
]
assert len(staged_predictions) == 10
assert_array_almost_equal(predictions, staged_predictions[-1])
assert len(staged_probas) == 10
assert_array_almost_equal(proba, staged_probas[-1])
assert len(staged_scores) == 10
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(diabetes.data, diabetes.target, sample_weight=diabetes_weights)
predictions = clf.predict(diabetes.data)
staged_predictions = [p for p in clf.staged_predict(diabetes.data)]
score = clf.score(diabetes.data, diabetes.target, sample_weight=diabetes_weights)
staged_scores = [
s
for s in clf.staged_score(
diabetes.data, diabetes.target, sample_weight=diabetes_weights
)
]
assert len(staged_predictions) == 10
assert_array_almost_equal(predictions, staged_predictions[-1])
assert len(staged_scores) == 10
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(estimator=DecisionTreeClassifier())
parameters = {
"n_estimators": (1, 2),
"estimator__max_depth": (1, 2),
}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(estimator=DecisionTreeRegressor(), random_state=0)
parameters = {"n_estimators": (1, 2), "estimator__max_depth": (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(diabetes.data, diabetes.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
obj = AdaBoostClassifier()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert type(obj2) == obj.__class__
score2 = obj2.score(iris.data, iris.target)
assert score == score2
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(diabetes.data, diabetes.target)
score = obj.score(diabetes.data, diabetes.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert type(obj2) == obj.__class__
score2 = obj2.score(diabetes.data, diabetes.target)
assert score == score2
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(
n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1,
)
clf = AdaBoostClassifier()
clf.fit(X, y)
importances = clf.feature_importances_
assert importances.shape[0] == 10
assert (importances[:3, np.newaxis] >= importances[3:]).all()
def test_adaboost_classifier_sample_weight_error():
# Test that it gives proper exception on incorrect sample weight.
clf = AdaBoostClassifier()
msg = re.escape("sample_weight.shape == (1,), expected (6,)")
with pytest.raises(ValueError, match=msg):
clf.fit(X, y_class, sample_weight=np.asarray([-1]))
def test_estimator():
# Test different estimators.
from sklearn.ensemble import RandomForestClassifier
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC())
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC())
with pytest.raises(ValueError, match="worse than random"):
clf.fit(X_fail, y_fail)
def test_sample_weights_infinite():
msg = "Sample weights have reached infinite values"
clf = AdaBoostClassifier(n_estimators=30, learning_rate=23.0)
with pytest.warns(UserWarning, match=msg):
clf.fit(iris.data, iris.target)
@pytest.mark.parametrize(
"sparse_container, expected_internal_type",
zip(
[
*CSC_CONTAINERS,
*CSR_CONTAINERS,
*LIL_CONTAINERS,
*COO_CONTAINERS,
*DOK_CONTAINERS,
],
CSC_CONTAINERS + 4 * CSR_CONTAINERS,
),
)
def test_sparse_classification(sparse_container, expected_internal_type):
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super().fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(
n_classes=1, n_samples=15, n_features=5, random_state=42
)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
X_train_sparse = sparse_container(X_train)
X_test_sparse = sparse_container(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
estimator=CustomSVC(probability=True),
random_state=1,
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
estimator=CustomSVC(probability=True),
random_state=1,
).fit(X_train, y_train)
# predict
sparse_clf_results = sparse_classifier.predict(X_test_sparse)
dense_clf_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_clf_results, dense_clf_results)
# decision_function
sparse_clf_results = sparse_classifier.decision_function(X_test_sparse)
dense_clf_results = dense_classifier.decision_function(X_test)
assert_array_almost_equal(sparse_clf_results, dense_clf_results)
# predict_log_proba
sparse_clf_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_clf_results = dense_classifier.predict_log_proba(X_test)
assert_array_almost_equal(sparse_clf_results, dense_clf_results)
# predict_proba
sparse_clf_results = sparse_classifier.predict_proba(X_test_sparse)
dense_clf_results = dense_classifier.predict_proba(X_test)
assert_array_almost_equal(sparse_clf_results, dense_clf_results)
# score
sparse_clf_results = sparse_classifier.score(X_test_sparse, y_test)
dense_clf_results = dense_classifier.score(X_test, y_test)
assert_array_almost_equal(sparse_clf_results, dense_clf_results)
# staged_decision_function
sparse_clf_results = sparse_classifier.staged_decision_function(X_test_sparse)
dense_clf_results = dense_classifier.staged_decision_function(X_test)
for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results):
assert_array_almost_equal(sparse_clf_res, dense_clf_res)
# staged_predict
sparse_clf_results = sparse_classifier.staged_predict(X_test_sparse)
dense_clf_results = dense_classifier.staged_predict(X_test)
for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results):
assert_array_equal(sparse_clf_res, dense_clf_res)
# staged_predict_proba
sparse_clf_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_clf_results = dense_classifier.staged_predict_proba(X_test)
for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results):
assert_array_almost_equal(sparse_clf_res, dense_clf_res)
# staged_score
sparse_clf_results = sparse_classifier.staged_score(X_test_sparse, y_test)
dense_clf_results = dense_classifier.staged_score(X_test, y_test)
for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results):
assert_array_equal(sparse_clf_res, dense_clf_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == expected_internal_type for t in types])
@pytest.mark.parametrize(
"sparse_container, expected_internal_type",
zip(
[
*CSC_CONTAINERS,
*CSR_CONTAINERS,
*LIL_CONTAINERS,
*COO_CONTAINERS,
*DOK_CONTAINERS,
],
CSC_CONTAINERS + 4 * CSR_CONTAINERS,
),
)
def test_sparse_regression(sparse_container, expected_internal_type):
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super().fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(
n_samples=15, n_features=50, n_targets=1, random_state=42
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
X_train_sparse = sparse_container(X_train)
X_test_sparse = sparse_container(X_test)
# Trained on sparse format
sparse_regressor = AdaBoostRegressor(estimator=CustomSVR(), random_state=1).fit(
X_train_sparse, y_train
)
# Trained on dense format
dense_regressor = AdaBoostRegressor(estimator=CustomSVR(), random_state=1).fit(
X_train, y_train
)
# predict
sparse_regr_results = sparse_regressor.predict(X_test_sparse)
dense_regr_results = dense_regressor.predict(X_test)
assert_array_almost_equal(sparse_regr_results, dense_regr_results)
# staged_predict
sparse_regr_results = sparse_regressor.staged_predict(X_test_sparse)
dense_regr_results = dense_regressor.staged_predict(X_test)
for sparse_regr_res, dense_regr_res in zip(sparse_regr_results, dense_regr_results):
assert_array_almost_equal(sparse_regr_res, dense_regr_res)
types = [i.data_type_ for i in sparse_regressor.estimators_]
assert all([t == expected_internal_type for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert len(boost.estimator_weights_) == len(boost.estimator_errors_)
def test_multidimensional_X():
"""
Check that the AdaBoost estimators can work with n-dimensional
data matrix
"""
rng = np.random.RandomState(0)
X = rng.randn(51, 3, 3)
yc = rng.choice([0, 1], 51)
yr = rng.randn(51)
boost = AdaBoostClassifier(DummyClassifier(strategy="most_frequent"))
boost.fit(X, yc)
boost.predict(X)
boost.predict_proba(X)
boost = AdaBoostRegressor(DummyRegressor())
boost.fit(X, yr)
boost.predict(X)
def test_adaboostclassifier_without_sample_weight():
X, y = iris.data, iris.target
estimator = NoSampleWeightWrapper(DummyClassifier())
clf = AdaBoostClassifier(estimator=estimator)
err_msg = "{} doesn't support sample_weight".format(estimator.__class__.__name__)
with pytest.raises(ValueError, match=err_msg):
clf.fit(X, y)
def test_adaboostregressor_sample_weight():
# check that giving weight will have an influence on the error computed
# for a weak learner
rng = np.random.RandomState(42)
X = np.linspace(0, 100, num=1000)
y = (0.8 * X + 0.2) + (rng.rand(X.shape[0]) * 0.0001)
X = X.reshape(-1, 1)
# add an arbitrary outlier
X[-1] *= 10
y[-1] = 10000
# random_state=0 ensure that the underlying bootstrap will use the outlier
regr_no_outlier = AdaBoostRegressor(
estimator=LinearRegression(), n_estimators=1, random_state=0
)
regr_with_weight = clone(regr_no_outlier)
regr_with_outlier = clone(regr_no_outlier)
# fit 3 models:
# - a model containing the outlier
# - a model without the outlier
# - a model containing the outlier but with a null sample-weight
regr_with_outlier.fit(X, y)
regr_no_outlier.fit(X[:-1], y[:-1])
sample_weight = np.ones_like(y)
sample_weight[-1] = 0
regr_with_weight.fit(X, y, sample_weight=sample_weight)
score_with_outlier = regr_with_outlier.score(X[:-1], y[:-1])
score_no_outlier = regr_no_outlier.score(X[:-1], y[:-1])
score_with_weight = regr_with_weight.score(X[:-1], y[:-1])
assert score_with_outlier < score_no_outlier
assert score_with_outlier < score_with_weight
assert score_no_outlier == pytest.approx(score_with_weight)
def test_adaboost_consistent_predict():
# check that predict_proba and predict give consistent results
# regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/14084
X_train, X_test, y_train, y_test = train_test_split(
*datasets.load_digits(return_X_y=True), random_state=42
)
model = AdaBoostClassifier(random_state=42)
model.fit(X_train, y_train)
assert_array_equal(
np.argmax(model.predict_proba(X_test), axis=1), model.predict(X_test)
)
@pytest.mark.parametrize(
"model, X, y",
[
(AdaBoostClassifier(), iris.data, iris.target),
(AdaBoostRegressor(), diabetes.data, diabetes.target),
],
)
def test_adaboost_negative_weight_error(model, X, y):
sample_weight = np.ones_like(y)
sample_weight[-1] = -10
err_msg = "Negative values in data passed to `sample_weight`"
with pytest.raises(ValueError, match=err_msg):
model.fit(X, y, sample_weight=sample_weight)
def test_adaboost_numerically_stable_feature_importance_with_small_weights():
"""Check that we don't create NaN feature importance with numerically
instable inputs.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/20320
"""
rng = np.random.RandomState(42)
X = rng.normal(size=(1000, 10))
y = rng.choice([0, 1], size=1000)
sample_weight = np.ones_like(y) * 1e-263
tree = DecisionTreeClassifier(max_depth=10, random_state=12)
ada_model = AdaBoostClassifier(estimator=tree, n_estimators=20, random_state=12)
ada_model.fit(X, y, sample_weight=sample_weight)
assert np.isnan(ada_model.feature_importances_).sum() == 0
def test_adaboost_decision_function(global_random_seed):
"""Check that the decision function respects the symmetric constraint for weak
learners.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/26520
"""
n_classes = 3
X, y = datasets.make_classification(
n_classes=n_classes, n_clusters_per_class=1, random_state=global_random_seed
)
clf = AdaBoostClassifier(n_estimators=1, random_state=global_random_seed).fit(X, y)
y_score = clf.decision_function(X)
assert_allclose(y_score.sum(axis=1), 0, atol=1e-8)
# With a single learner, we expect to have a decision function in
# {1, - 1 / (n_classes - 1)}.
assert set(np.unique(y_score)) == {1, -1 / (n_classes - 1)}
# We can assert the same for staged_decision_function since we have a single learner
for y_score in clf.staged_decision_function(X):
assert_allclose(y_score.sum(axis=1), 0, atol=1e-8)
# With a single learner, we expect to have a decision function in
# {1, - 1 / (n_classes - 1)}.
assert set(np.unique(y_score)) == {1, -1 / (n_classes - 1)}
clf.set_params(n_estimators=5).fit(X, y)
y_score = clf.decision_function(X)
assert_allclose(y_score.sum(axis=1), 0, atol=1e-8)
for y_score in clf.staged_decision_function(X):
assert_allclose(y_score.sum(axis=1), 0, atol=1e-8)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/ensemble/tests/test_bagging.py | sklearn/ensemble/tests/test_bagging.py | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import re
import warnings
from itertools import cycle, product
import joblib
import numpy as np
import pytest
from sklearn import config_context
from sklearn.base import BaseEstimator
from sklearn.datasets import load_diabetes, load_iris, make_hastie_10_2
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.ensemble import (
AdaBoostClassifier,
AdaBoostRegressor,
BaggingClassifier,
BaggingRegressor,
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
RandomForestClassifier,
RandomForestRegressor,
)
from sklearn.ensemble._bagging import _get_n_samples_bootstrap
from sklearn.feature_selection import SelectKBest
from sklearn.linear_model import LogisticRegression, Perceptron
from sklearn.model_selection import GridSearchCV, ParameterGrid, train_test_split
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer, scale
from sklearn.random_projection import SparseRandomProjection
from sklearn.svm import SVC, SVR
from sklearn.tests.metadata_routing_common import (
ConsumingClassifierWithOnlyPredict,
ConsumingClassifierWithoutPredictLogProba,
ConsumingClassifierWithoutPredictProba,
_Registry,
check_recorded_metadata,
)
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import check_random_state
from sklearn.utils._testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
)
from sklearn.utils.fixes import CSC_CONTAINERS, CSR_CONTAINERS
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the diabetes dataset
# and randomly permute it
diabetes = load_diabetes()
perm = rng.permutation(diabetes.target.size)
diabetes.data = diabetes.data[perm]
diabetes.target = diabetes.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, random_state=rng
)
grid = ParameterGrid(
{
"max_samples": [0.5, 1.0],
"max_features": [1, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False],
}
)
estimators = [
None,
DummyClassifier(),
Perceptron(max_iter=20),
DecisionTreeClassifier(max_depth=2),
KNeighborsClassifier(),
SVC(),
]
# Try different parameter settings with different base classifiers without
# doing the full cartesian product to keep the test durations low.
for params, estimator in zip(grid, cycle(estimators)):
BaggingClassifier(
estimator=estimator,
random_state=rng,
n_estimators=2,
**params,
).fit(X_train, y_train).predict(X_test)
@pytest.mark.parametrize(
"sparse_container, params, method",
product(
CSR_CONTAINERS + CSC_CONTAINERS,
[
{
"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True,
},
{
"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True,
},
{"max_features": 2, "bootstrap": False, "bootstrap_features": True},
{"max_samples": 0.5, "bootstrap": True, "bootstrap_features": False},
],
["predict", "predict_proba", "predict_log_proba", "decision_function"],
),
)
def test_sparse_classification(sparse_container, params, method):
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super().fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(
scale(iris.data), iris.target, random_state=rng
)
X_train_sparse = sparse_container(X_train)
X_test_sparse = sparse_container(X_test)
# Trained on sparse format
sparse_classifier = BaggingClassifier(
estimator=CustomSVC(kernel="linear", decision_function_shape="ovr"),
random_state=1,
**params,
).fit(X_train_sparse, y_train)
sparse_results = getattr(sparse_classifier, method)(X_test_sparse)
# Trained on dense format
dense_classifier = BaggingClassifier(
estimator=CustomSVC(kernel="linear", decision_function_shape="ovr"),
random_state=1,
**params,
).fit(X_train, y_train)
dense_results = getattr(dense_classifier, method)(X_test)
assert_array_almost_equal(sparse_results, dense_results)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(
diabetes.data[:50], diabetes.target[:50], random_state=rng
)
grid = ParameterGrid(
{
"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False],
}
)
for estimator in [
None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR(),
]:
for params in grid:
BaggingRegressor(estimator=estimator, random_state=rng, **params).fit(
X_train, y_train
).predict(X_test)
@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + CSC_CONTAINERS)
def test_sparse_regression(sparse_container):
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(
diabetes.data[:50], diabetes.target[:50], random_state=rng
)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super().fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{
"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True,
},
{
"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True,
},
{"max_features": 2, "bootstrap": False, "bootstrap_features": True},
{"max_samples": 0.5, "bootstrap": True, "bootstrap_features": False},
]
X_train_sparse = sparse_container(X_train)
X_test_sparse = sparse_container(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
estimator=CustomSVR(), random_state=1, **params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = (
BaggingRegressor(estimator=CustomSVR(), random_state=1, **params)
.fit(X_train, y_train)
.predict(X_test)
)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_almost_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_almost_equal(sparse_results, dense_results)
class DummySizeEstimator(BaseEstimator):
def fit(self, X, y):
self.training_size_ = X.shape[0]
self.training_hash_ = joblib.hash(X)
def predict(self, X):
return np.ones(X.shape[0])
def test_bootstrap_samples():
# Test that bootstrapping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(
diabetes.data, diabetes.target, random_state=rng
)
estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(
estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng,
).fit(X_train, y_train)
assert estimator.score(X_train, y_train) == ensemble.score(X_train, y_train)
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(
estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng,
).fit(X_train, y_train)
assert estimator.score(X_train, y_train) > ensemble.score(X_train, y_train)
# check that each sampling correspond to a complete bootstrap resample.
# the size of each bootstrap should be the same as the input data but
# the data should be different (checked using the hash of the data).
ensemble = BaggingRegressor(estimator=DummySizeEstimator(), bootstrap=True).fit(
X_train, y_train
)
training_hash = []
for estimator in ensemble.estimators_:
assert estimator.training_size_ == X_train.shape[0]
training_hash.append(estimator.training_hash_)
assert len(set(training_hash)) == len(training_hash)
def test_bootstrap_features():
# Test that bootstrapping features may generate duplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(
diabetes.data, diabetes.target, random_state=rng
)
ensemble = BaggingRegressor(
estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng,
).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert diabetes.data.shape[1] == np.unique(features).shape[0]
ensemble = BaggingRegressor(
estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng,
).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert diabetes.data.shape[1] > np.unique(features).shape[0]
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, random_state=rng
)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(
estimator=DecisionTreeClassifier(), random_state=rng
).fit(X_train, y_train)
assert_array_almost_equal(
np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test))
)
assert_array_almost_equal(
ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test))
)
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(
estimator=LogisticRegression(), random_state=rng, max_samples=5
).fit(X_train, y_train)
assert_array_almost_equal(
np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test))
)
assert_array_almost_equal(
ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test))
)
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, random_state=rng
)
for estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(
estimator=estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng,
).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert abs(test_score - clf.oob_score_) < 0.1
# Test with few estimators
warn_msg = (
"Some inputs do not have OOB scores. This probably means too few "
"estimators were used to compute any reliable oob estimates."
)
with pytest.warns(UserWarning, match=warn_msg):
clf = BaggingClassifier(
estimator=estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng,
)
clf.fit(X_train, y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(
diabetes.data, diabetes.target, random_state=rng
)
clf = BaggingRegressor(
estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng,
).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert abs(test_score - clf.oob_score_) < 0.1
# Test with few estimators
warn_msg = (
"Some inputs do not have OOB scores. This probably means too few "
"estimators were used to compute any reliable oob estimates."
)
with pytest.warns(UserWarning, match=warn_msg):
regr = BaggingRegressor(
estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng,
)
regr.fit(X_train, y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(
diabetes.data, diabetes.target, random_state=rng
)
clf1 = BaggingRegressor(
estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng,
).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_almost_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test support of decision_function
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
assert not hasattr(BaggingClassifier(base).fit(X, y), "decision_function")
def test_parallel_classification():
# Check parallel classification.
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, random_state=0
)
ensemble = BaggingClassifier(
DecisionTreeClassifier(), n_jobs=3, random_state=0
).fit(X_train, y_train)
# predict_proba
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=1)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(
DecisionTreeClassifier(), n_jobs=1, random_state=0
).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(
SVC(decision_function_shape="ovr"), n_jobs=3, random_state=0
).fit(X_train, y_train)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=1)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(
SVC(decision_function_shape="ovr"), n_jobs=1, random_state=0
).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
# TODO: remove mark once loky bug is fixed:
# https://github.com/joblib/loky/issues/458
@pytest.mark.thread_unsafe
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(
diabetes.data, diabetes.target, random_state=rng
)
ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=3, random_state=0).fit(
X_train, y_train
)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=1, random_state=0).fit(
X_train, y_train
)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {"n_estimators": (1, 2), "estimator__C": (1, 2)}
GridSearchCV(BaggingClassifier(SVC()), parameters, scoring="roc_auc").fit(X, y)
# TODO: remove mark once loky bug is fixed:
# https://github.com/joblib/loky/issues/458
@pytest.mark.thread_unsafe
def test_estimator():
# Check estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, random_state=rng
)
ensemble = BaggingClassifier(None, n_jobs=3, random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.estimator_, DecisionTreeClassifier)
ensemble = BaggingClassifier(
DecisionTreeClassifier(), n_jobs=3, random_state=0
).fit(X_train, y_train)
assert isinstance(ensemble.estimator_, DecisionTreeClassifier)
ensemble = BaggingClassifier(Perceptron(), n_jobs=3, random_state=0).fit(
X_train, y_train
)
assert isinstance(ensemble.estimator_, Perceptron)
# Regression
X_train, X_test, y_train, y_test = train_test_split(
diabetes.data, diabetes.target, random_state=rng
)
ensemble = BaggingRegressor(None, n_jobs=3, random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.estimator_, DecisionTreeRegressor)
ensemble = BaggingRegressor(DecisionTreeRegressor(), n_jobs=3, random_state=0).fit(
X_train, y_train
)
assert isinstance(ensemble.estimator_, DecisionTreeRegressor)
ensemble = BaggingRegressor(SVR(), n_jobs=3, random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.estimator_, SVR)
def test_bagging_with_pipeline():
estimator = BaggingClassifier(
make_pipeline(SelectKBest(k=1), DecisionTreeClassifier()), max_features=2
)
estimator.fit(iris.data, iris.target)
assert isinstance(estimator[0].steps[-1][1].random_state, int)
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(
n_estimators=n_estimators, random_state=random_state, warm_start=True
)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert len(clf_ws) == n_estimators
clf_no_ws = BaggingClassifier(
n_estimators=10, random_state=random_state, warm_start=False
)
clf_no_ws.fit(X, y)
assert set([tree.random_state for tree in clf_ws]) == set(
[tree.random_state for tree in clf_no_ws]
)
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
with pytest.raises(ValueError):
clf.fit(X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.0
warn_msg = "Warm-start fitting without increasing n_estimators does not"
with pytest.warns(UserWarning, match=warn_msg):
clf.fit(X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True, random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False, random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
with pytest.raises(ValueError):
clf.fit(X, y)
def test_warning_bootstrap_sample_weight():
X, y = iris.data, iris.target
sample_weight = np.ones_like(y)
clf = BaggingClassifier(bootstrap=False)
warn_msg = (
"When fitting BaggingClassifier with sample_weight "
"it is recommended to use bootstrap=True"
)
with pytest.warns(UserWarning, match=warn_msg):
clf.fit(X, y, sample_weight=sample_weight)
X, y = diabetes.data, diabetes.target
sample_weight = np.ones_like(y)
reg = BaggingRegressor(bootstrap=False)
warn_msg = (
"When fitting BaggingRegressor with sample_weight "
"it is recommended to use bootstrap=True"
)
with pytest.warns(UserWarning, match=warn_msg):
reg.fit(X, y, sample_weight=sample_weight)
def test_invalid_sample_weight_max_samples_bootstrap_combinations():
X, y = iris.data, iris.target
# Case 1: small weights and fractional max_samples lead to a small
# number of bootstrap samples, which raises a UserWarning.
clf = BaggingClassifier(max_samples=1.0)
sample_weight = np.ones_like(y) / (2 * len(y))
expected_msg = (
"Using the fractional value max_samples=1.0 when "
r"the total sum of sample weights is 0.5(\d*) "
r"results in a low number \(1\) of bootstrap samples. "
"We recommend passing `max_samples` as an integer."
)
with pytest.warns(UserWarning, match=expected_msg):
clf.fit(X, y, sample_weight=sample_weight)
# Case 2: large weights and bootstrap=False would lead to sampling without
# replacement more than the number of samples, which is not allowed.
clf = BaggingClassifier(bootstrap=False, max_samples=1.0)
sample_weight = np.ones_like(y)
sample_weight[-1] = 2
expected_msg = re.escape(
"max_samples=151 must be <= n_samples=150 to be able to sample without "
"replacement."
)
with pytest.raises(ValueError, match=expected_msg):
with pytest.warns(
UserWarning, match="When fitting BaggingClassifier with sample_weight"
):
clf.fit(X, y, sample_weight=sample_weight)
class EstimatorAcceptingSampleWeight(BaseEstimator):
"""Fake estimator accepting sample_weight"""
def fit(self, X, y, sample_weight=None):
"""Record values passed during fit"""
self.X_ = X
self.y_ = y
self.sample_weight_ = sample_weight
def predict(self, X):
pass
class EstimatorRejectingSampleWeight(BaseEstimator):
"""Fake estimator rejecting sample_weight"""
def fit(self, X, y):
"""Record values passed during fit"""
self.X_ = X
self.y_ = y
def predict(self, X):
pass
@pytest.mark.parametrize("bagging_class", [BaggingRegressor, BaggingClassifier])
@pytest.mark.parametrize("accept_sample_weight", [False, True])
@pytest.mark.parametrize("metadata_routing", [False, True])
@pytest.mark.parametrize("max_samples", [10, 0.8])
def test_draw_indices_using_sample_weight(
bagging_class, accept_sample_weight, metadata_routing, max_samples
):
X = np.arange(100).reshape(-1, 1)
y = np.repeat([0, 1], 50)
# all indices except 4 and 5 have zero weight
sample_weight = np.zeros(100)
sample_weight[4] = 1
sample_weight[5] = 2
if accept_sample_weight:
base_estimator = EstimatorAcceptingSampleWeight()
else:
base_estimator = EstimatorRejectingSampleWeight()
n_samples, n_features = X.shape
if isinstance(max_samples, float):
# max_samples passed as a fraction of the input data. Since
# sample_weight are provided, the effective number of samples is the
# sum of the sample weights.
expected_integer_max_samples = int(max_samples * sample_weight.sum())
else:
expected_integer_max_samples = max_samples
with config_context(enable_metadata_routing=metadata_routing):
# TODO(slep006): remove block when default routing is implemented
if metadata_routing and accept_sample_weight:
base_estimator = base_estimator.set_fit_request(sample_weight=True)
bagging = bagging_class(base_estimator, max_samples=max_samples, n_estimators=4)
bagging.fit(X, y, sample_weight=sample_weight)
for estimator, samples in zip(bagging.estimators_, bagging.estimators_samples_):
counts = np.bincount(samples, minlength=n_samples)
assert sum(counts) == len(samples) == expected_integer_max_samples
# only indices 4 and 5 should appear
assert np.isin(samples, [4, 5]).all()
if accept_sample_weight:
# sampled indices represented through weighting
assert estimator.X_.shape == (n_samples, n_features)
assert estimator.y_.shape == (n_samples,)
assert_allclose(estimator.X_, X)
assert_allclose(estimator.y_, y)
assert_allclose(estimator.sample_weight_, counts)
else:
# sampled indices represented through indexing
assert estimator.X_.shape == (expected_integer_max_samples, n_features)
assert estimator.y_.shape == (expected_integer_max_samples,)
assert_allclose(estimator.X_, X[samples])
assert_allclose(estimator.y_, y[samples])
def test_get_n_samples_bootstrap():
n_samples, max_samples, sample_weight = 10, None, "not_used"
assert _get_n_samples_bootstrap(n_samples, max_samples, sample_weight) == n_samples
n_samples, max_samples, sample_weight = 10, 5, "not_used"
assert (
_get_n_samples_bootstrap(n_samples, max_samples, sample_weight) == max_samples
)
n_samples, max_samples, sample_weight = 10, 1e-5, None
assert _get_n_samples_bootstrap(n_samples, max_samples, sample_weight) == 1
n_samples, max_samples, sample_weight = 10, 0.66, None
warning_msg = ".+the number of samples.+low number.+max_samples.+as an integer"
with pytest.warns(UserWarning, match=warning_msg):
assert _get_n_samples_bootstrap(n_samples, max_samples, sample_weight) == int(
max_samples * n_samples
)
n_samples, max_samples, sample_weight = 10, 1e-5, None
with pytest.warns(UserWarning, match=warning_msg):
assert _get_n_samples_bootstrap(n_samples, max_samples, sample_weight) == 1
warning_msg_with_weights = (
".+the total sum of sample weights.+low number.+max_samples.+as an integer"
)
rng = np.random.default_rng(0)
n_samples, max_samples, sample_weight = 1_000_000, 1e-5, rng.uniform(size=1_000_000)
with pytest.warns(UserWarning, match=warning_msg_with_weights):
assert _get_n_samples_bootstrap(n_samples, max_samples, sample_weight) == int(
max_samples * sample_weight.sum()
)
sample_weight = np.ones(3)
with warnings.catch_warnings():
warnings.simplefilter("error")
n_samples, max_samples, sample_weight = 100, 30, None
assert (
_get_n_samples_bootstrap(n_samples, max_samples, sample_weight)
== max_samples
)
n_samples, max_samples, sample_weight = 100, 0.5, rng.uniform(size=100)
assert _get_n_samples_bootstrap(n_samples, max_samples, sample_weight) == int(
max_samples * sample_weight.sum()
)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=100, random_state=1)
clf = BaggingClassifier(n_estimators=5, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=10)
clf.fit(X, y)
with pytest.raises(AttributeError):
getattr(clf, "oob_score_")
def test_oob_score_consistency():
# Make sure OOB scores are identical when random_state, estimator, and
# training data are fixed and fitting is done twice
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BaggingClassifier(
KNeighborsClassifier(),
max_samples=0.5,
max_features=0.5,
oob_score=True,
random_state=1,
)
assert bagging.fit(X, y).oob_score_ == bagging.fit(X, y).oob_score_
def test_estimators_samples():
# Check that format of estimators_samples_ is correct and that results
# generated at fit time can be identically reproduced at a later time
# using data saved in object attributes.
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BaggingClassifier(
LogisticRegression(),
max_samples=0.5,
max_features=0.5,
random_state=1,
bootstrap=False,
)
bagging.fit(X, y)
# Get relevant attributes
estimators_samples = bagging.estimators_samples_
estimators_features = bagging.estimators_features_
estimators = bagging.estimators_
# Test for correct formatting
assert len(estimators_samples) == len(estimators)
assert len(estimators_samples[0]) == len(X) // 2
assert estimators_samples[0].dtype.kind == "i"
# Re-fit single estimator to test for consistent sampling
estimator_index = 0
estimator_samples = estimators_samples[estimator_index]
estimator_features = estimators_features[estimator_index]
estimator = estimators[estimator_index]
X_train = (X[estimator_samples])[:, estimator_features]
y_train = y[estimator_samples]
orig_coefs = estimator.coef_
estimator.fit(X_train, y_train)
new_coefs = estimator.coef_
assert_array_almost_equal(orig_coefs, new_coefs)
def test_estimators_samples_deterministic():
# This test is a regression test to check that with a random step
# (e.g. SparseRandomProjection) and a given random state, the results
# generated at fit time can be identically reproduced at a later time using
# data saved in object attributes. Check issue #9524 for full discussion.
iris = load_iris()
X, y = iris.data, iris.target
base_pipeline = make_pipeline(
SparseRandomProjection(n_components=2), LogisticRegression()
)
clf = BaggingClassifier(estimator=base_pipeline, max_samples=0.5, random_state=0)
clf.fit(X, y)
pipeline_estimator_coef = clf.estimators_[0].steps[-1][1].coef_.copy()
estimator = clf.estimators_[0]
estimator_sample = clf.estimators_samples_[0]
estimator_feature = clf.estimators_features_[0]
X_train = (X[estimator_sample])[:, estimator_feature]
y_train = y[estimator_sample]
estimator.fit(X_train, y_train)
assert_array_equal(estimator.steps[-1][1].coef_, pipeline_estimator_coef)
def test_max_samples_consistency():
# Make sure validated max_samples and original max_samples are identical
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/_base.py | sklearn/neighbors/_base.py | """Base and mixin classes for nearest neighbors."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
import numbers
import warnings
from abc import ABCMeta, abstractmethod
from functools import partial
from numbers import Integral, Real
import numpy as np
from joblib import effective_n_jobs
from scipy.sparse import csr_matrix, issparse
from sklearn.base import BaseEstimator, MultiOutputMixin, is_classifier
from sklearn.exceptions import DataConversionWarning, EfficiencyWarning
from sklearn.metrics import DistanceMetric, pairwise_distances_chunked
from sklearn.metrics._pairwise_distances_reduction import ArgKmin, RadiusNeighbors
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.neighbors._ball_tree import BallTree
from sklearn.neighbors._kd_tree import KDTree
from sklearn.utils import check_array, gen_even_slices, get_tags
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
from sklearn.utils.fixes import parse_version, sp_base_version
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import _to_object_array, check_is_fitted, validate_data
SCIPY_METRICS = [
"braycurtis",
"canberra",
"chebyshev",
"correlation",
"cosine",
"dice",
"hamming",
"jaccard",
"mahalanobis",
"minkowski",
"rogerstanimoto",
"russellrao",
"seuclidean",
"sokalsneath",
"sqeuclidean",
"yule",
]
if sp_base_version < parse_version("1.17"):
# Deprecated in SciPy 1.15 and removed in SciPy 1.17
SCIPY_METRICS += ["sokalmichener"]
if sp_base_version < parse_version("1.11"):
# Deprecated in SciPy 1.9 and removed in SciPy 1.11
SCIPY_METRICS += ["kulsinski"]
if sp_base_version < parse_version("1.9"):
# Deprecated in SciPy 1.0 and removed in SciPy 1.9
SCIPY_METRICS += ["matching"]
VALID_METRICS = dict(
ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=sorted(set(PAIRWISE_DISTANCE_FUNCTIONS).union(SCIPY_METRICS)),
)
VALID_METRICS_SPARSE = dict(
ball_tree=[],
kd_tree=[],
brute=(PAIRWISE_DISTANCE_FUNCTIONS.keys() - {"haversine", "nan_euclidean"}),
)
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``.
Assume weights have already been validated.
Parameters
----------
dist : ndarray
The input distances.
weights : {'uniform', 'distance'}, callable or None
The kind of weighting used.
Returns
-------
weights_arr : array of the same shape as ``dist``
If ``weights == 'uniform'``, then returns None.
"""
if weights in (None, "uniform"):
return None
if weights == "distance":
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, "__contains__") and 0.0 in point_dist:
dist[point_dist_i] = point_dist == 0.0
else:
dist[point_dist_i] = 1.0 / point_dist
else:
with np.errstate(divide="ignore"):
dist = 1.0 / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
if callable(weights):
return weights(dist)
def _is_sorted_by_data(graph):
"""Return whether the graph's non-zero entries are sorted by data.
The non-zero entries are stored in graph.data and graph.indices.
For each row (or sample), the non-zero entries can be either:
- sorted by indices, as after graph.sort_indices();
- sorted by data, as after _check_precomputed(graph);
- not sorted.
Parameters
----------
graph : sparse matrix of shape (n_samples, n_samples)
Neighbors graph as given by `kneighbors_graph` or
`radius_neighbors_graph`. Matrix should be of format CSR format.
Returns
-------
res : bool
Whether input graph is sorted by data.
"""
assert graph.format == "csr"
out_of_order = graph.data[:-1] > graph.data[1:]
line_change = np.unique(graph.indptr[1:-1] - 1)
line_change = line_change[line_change < out_of_order.shape[0]]
return out_of_order.sum() == out_of_order[line_change].sum()
def _check_precomputed(X):
"""Check precomputed distance matrix.
If the precomputed distance matrix is sparse, it checks that the non-zero
entries are sorted by distances. If not, the matrix is copied and sorted.
Parameters
----------
X : {sparse matrix, array-like}, (n_samples, n_samples)
Distance matrix to other samples. X may be a sparse matrix, in which
case only non-zero elements may be considered neighbors.
Returns
-------
X : {sparse matrix, array-like}, (n_samples, n_samples)
Distance matrix to other samples. X may be a sparse matrix, in which
case only non-zero elements may be considered neighbors.
"""
if not issparse(X):
X = check_array(X, ensure_non_negative=True, input_name="X")
return X
else:
graph = X
if graph.format not in ("csr", "csc", "coo", "lil"):
raise TypeError(
"Sparse matrix in {!r} format is not supported due to "
"its handling of explicit zeros".format(graph.format)
)
copied = graph.format != "csr"
graph = check_array(
graph,
accept_sparse="csr",
ensure_non_negative=True,
input_name="precomputed distance matrix",
)
graph = sort_graph_by_row_values(graph, copy=not copied, warn_when_not_sorted=True)
return graph
@validate_params(
{
"graph": ["sparse matrix"],
"copy": ["boolean"],
"warn_when_not_sorted": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def sort_graph_by_row_values(graph, copy=False, warn_when_not_sorted=True):
"""Sort a sparse graph such that each row is stored with increasing values.
.. versionadded:: 1.2
Parameters
----------
graph : sparse matrix of shape (n_samples, n_samples)
Distance matrix to other samples, where only non-zero elements are
considered neighbors. Matrix is converted to CSR format if not already.
copy : bool, default=False
If True, the graph is copied before sorting. If False, the sorting is
performed inplace. If the graph is not of CSR format, `copy` must be
True to allow the conversion to CSR format, otherwise an error is
raised.
warn_when_not_sorted : bool, default=True
If True, a :class:`~sklearn.exceptions.EfficiencyWarning` is raised
when the input graph is not sorted by row values.
Returns
-------
graph : sparse matrix of shape (n_samples, n_samples)
Distance matrix to other samples, where only non-zero elements are
considered neighbors. Matrix is in CSR format.
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from sklearn.neighbors import sort_graph_by_row_values
>>> X = csr_matrix(
... [[0., 3., 1.],
... [3., 0., 2.],
... [1., 2., 0.]])
>>> X.data
array([3., 1., 3., 2., 1., 2.])
>>> X_ = sort_graph_by_row_values(X)
>>> X_.data
array([1., 3., 2., 3., 1., 2.])
"""
if graph.format == "csr" and _is_sorted_by_data(graph):
return graph
if warn_when_not_sorted:
warnings.warn(
(
"Precomputed sparse input was not sorted by row values. Use the"
" function sklearn.neighbors.sort_graph_by_row_values to sort the input"
" by row values, with warn_when_not_sorted=False to remove this"
" warning."
),
EfficiencyWarning,
)
if graph.format not in ("csr", "csc", "coo", "lil"):
raise TypeError(
f"Sparse matrix in {graph.format!r} format is not supported due to "
"its handling of explicit zeros"
)
elif graph.format != "csr":
if not copy:
raise ValueError(
"The input graph is not in CSR format. Use copy=True to allow "
"the conversion to CSR format."
)
graph = graph.asformat("csr")
elif copy: # csr format with copy=True
graph = graph.copy()
row_nnz = np.diff(graph.indptr)
if row_nnz.max() == row_nnz.min():
# if each sample has the same number of provided neighbors
n_samples = graph.shape[0]
distances = graph.data.reshape(n_samples, -1)
order = np.argsort(distances, kind="mergesort")
order += np.arange(n_samples)[:, None] * row_nnz[0]
order = order.ravel()
graph.data = graph.data[order]
graph.indices = graph.indices[order]
else:
for start, stop in zip(graph.indptr, graph.indptr[1:]):
order = np.argsort(graph.data[start:stop], kind="mergesort")
graph.data[start:stop] = graph.data[start:stop][order]
graph.indices[start:stop] = graph.indices[start:stop][order]
return graph
def _kneighbors_from_graph(graph, n_neighbors, return_distance):
"""Decompose a nearest neighbors sparse graph into distances and indices.
Parameters
----------
graph : sparse matrix of shape (n_samples, n_samples)
Neighbors graph as given by `kneighbors_graph` or
`radius_neighbors_graph`. Matrix should be of format CSR format.
n_neighbors : int
Number of neighbors required for each sample.
return_distance : bool
Whether or not to return the distances.
Returns
-------
neigh_dist : ndarray of shape (n_samples, n_neighbors)
Distances to nearest neighbors. Only present if `return_distance=True`.
neigh_ind : ndarray of shape (n_samples, n_neighbors)
Indices of nearest neighbors.
"""
n_samples = graph.shape[0]
assert graph.format == "csr"
# number of neighbors by samples
row_nnz = np.diff(graph.indptr)
row_nnz_min = row_nnz.min()
if n_neighbors is not None and row_nnz_min < n_neighbors:
raise ValueError(
"%d neighbors per samples are required, but some samples have only"
" %d neighbors in precomputed graph matrix. Decrease number of "
"neighbors used or recompute the graph with more neighbors."
% (n_neighbors, row_nnz_min)
)
def extract(a):
# if each sample has the same number of provided neighbors
if row_nnz.max() == row_nnz_min:
return a.reshape(n_samples, -1)[:, :n_neighbors]
else:
idx = np.tile(np.arange(n_neighbors), (n_samples, 1))
idx += graph.indptr[:-1, None]
return a.take(idx, mode="clip").reshape(n_samples, n_neighbors)
if return_distance:
return extract(graph.data), extract(graph.indices)
else:
return extract(graph.indices)
def _radius_neighbors_from_graph(graph, radius, return_distance):
"""Decompose a nearest neighbors sparse graph into distances and indices.
Parameters
----------
graph : sparse matrix of shape (n_samples, n_samples)
Neighbors graph as given by `kneighbors_graph` or
`radius_neighbors_graph`. Matrix should be of format CSR format.
radius : float
Radius of neighborhoods which should be strictly positive.
return_distance : bool
Whether or not to return the distances.
Returns
-------
neigh_dist : ndarray of shape (n_samples,) of arrays
Distances to nearest neighbors. Only present if `return_distance=True`.
neigh_ind : ndarray of shape (n_samples,) of arrays
Indices of nearest neighbors.
"""
assert graph.format == "csr"
no_filter_needed = bool(graph.data.max() <= radius)
if no_filter_needed:
data, indices, indptr = graph.data, graph.indices, graph.indptr
else:
mask = graph.data <= radius
if return_distance:
data = np.compress(mask, graph.data)
indices = np.compress(mask, graph.indices)
indptr = np.concatenate(([0], np.cumsum(mask)))[graph.indptr]
indices = indices.astype(np.intp, copy=no_filter_needed)
if return_distance:
neigh_dist = _to_object_array(np.split(data, indptr[1:-1]))
neigh_ind = _to_object_array(np.split(indices, indptr[1:-1]))
if return_distance:
return neigh_dist, neigh_ind
else:
return neigh_ind
class NeighborsBase(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for nearest neighbors estimators."""
_parameter_constraints: dict = {
"n_neighbors": [Interval(Integral, 1, None, closed="left"), None],
"radius": [Interval(Real, 0, None, closed="both"), None],
"algorithm": [StrOptions({"auto", "ball_tree", "kd_tree", "brute"})],
"leaf_size": [Interval(Integral, 1, None, closed="left")],
"p": [Interval(Real, 0, None, closed="right"), None],
"metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable],
"metric_params": [dict, None],
"n_jobs": [Integral, None],
}
@abstractmethod
def __init__(
self,
n_neighbors=None,
radius=None,
algorithm="auto",
leaf_size=30,
metric="minkowski",
p=2,
metric_params=None,
n_jobs=None,
):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
def _check_algorithm_metric(self):
if self.algorithm == "auto":
if self.metric == "precomputed":
alg_check = "brute"
elif (
callable(self.metric)
or self.metric in VALID_METRICS["ball_tree"]
or isinstance(self.metric, DistanceMetric)
):
alg_check = "ball_tree"
else:
alg_check = "brute"
else:
alg_check = self.algorithm
if callable(self.metric):
if self.algorithm == "kd_tree":
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree does not support callable metric '%s'"
"Function call overhead will result"
"in very poor performance." % self.metric
)
elif self.metric not in VALID_METRICS[alg_check] and not isinstance(
self.metric, DistanceMetric
):
raise ValueError(
"Metric '%s' not valid. Use "
"sorted(sklearn.neighbors.VALID_METRICS['%s']) "
"to get valid options. "
"Metric can also be a callable function." % (self.metric, alg_check)
)
if self.metric_params is not None and "p" in self.metric_params:
if self.p is not None:
warnings.warn(
(
"Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored."
),
SyntaxWarning,
stacklevel=3,
)
def _fit(self, X, y=None):
ensure_all_finite = "allow-nan" if get_tags(self).input_tags.allow_nan else True
if self.__sklearn_tags__().target_tags.required:
if not isinstance(X, (KDTree, BallTree, NeighborsBase)):
X, y = validate_data(
self,
X,
y,
accept_sparse="csr",
multi_output=True,
order="C",
ensure_all_finite=ensure_all_finite,
)
if is_classifier(self):
# Classification targets require a specific format
if y.ndim == 1 or (y.ndim == 2 and y.shape[1] == 1):
if y.ndim != 1:
warnings.warn(
(
"A column-vector y was passed when a "
"1d array was expected. Please change "
"the shape of y to (n_samples,), for "
"example using ravel()."
),
DataConversionWarning,
stacklevel=2,
)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
check_classification_targets(y)
self.classes_ = []
# Using `dtype=np.intp` is necessary since `np.bincount`
# (called in _classification.py) fails when dealing
# with a float64 array on 32bit systems.
self._y = np.empty(y.shape, dtype=np.intp)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
else:
self._y = y
else:
if not isinstance(X, (KDTree, BallTree, NeighborsBase)):
X = validate_data(
self,
X,
ensure_all_finite=ensure_all_finite,
accept_sparse="csr",
order="C",
)
self._check_algorithm_metric()
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get("p", self.p)
if self.metric == "minkowski":
self.effective_metric_params_["p"] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == "minkowski":
p = self.effective_metric_params_.pop("p", 2)
w = self.effective_metric_params_.pop("w", None)
if p == 1 and w is None:
self.effective_metric_ = "manhattan"
elif p == 2 and w is None:
self.effective_metric_ = "euclidean"
elif p == np.inf and w is None:
self.effective_metric_ = "chebyshev"
else:
# Use the generic minkowski metric, possibly weighted.
self.effective_metric_params_["p"] = p
self.effective_metric_params_["w"] = w
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
self.n_samples_fit_ = X.n_samples_fit_
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = "ball_tree"
self.n_samples_fit_ = X.data.shape[0]
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = "kd_tree"
self.n_samples_fit_ = X.data.shape[0]
return self
if self.metric == "precomputed":
X = _check_precomputed(X)
# Precomputed matrix X must be squared
if X.shape[0] != X.shape[1]:
raise ValueError(
"Precomputed matrix must be square."
" Input is a {}x{} matrix.".format(X.shape[0], X.shape[1])
)
self.n_features_in_ = X.shape[1]
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ("auto", "brute"):
warnings.warn("cannot use tree with sparse input: using brute force")
if (
self.effective_metric_ not in VALID_METRICS_SPARSE["brute"]
and not callable(self.effective_metric_)
and not isinstance(self.effective_metric_, DistanceMetric)
):
raise ValueError(
"Metric '%s' not valid for sparse input. "
"Use sorted(sklearn.neighbors."
"VALID_METRICS_SPARSE['brute']) "
"to get valid options. "
"Metric can also be a callable function." % (self.effective_metric_)
)
self._fit_X = X.copy()
self._tree = None
self._fit_method = "brute"
self.n_samples_fit_ = X.shape[0]
return self
self._fit_method = self.algorithm
self._fit_X = X
self.n_samples_fit_ = X.shape[0]
if self._fit_method == "auto":
# A tree approach is better for small number of neighbors or small
# number of features, with KDTree generally faster when available
if (
self.metric == "precomputed"
or self._fit_X.shape[1] > 15
or (
self.n_neighbors is not None
and self.n_neighbors >= self._fit_X.shape[0] // 2
)
):
self._fit_method = "brute"
else:
if (
self.effective_metric_ == "minkowski"
and self.effective_metric_params_["p"] < 1
):
self._fit_method = "brute"
elif (
self.effective_metric_ == "minkowski"
and self.effective_metric_params_.get("w") is not None
):
# 'minkowski' with weights is not supported by KDTree but is
# supported byBallTree.
self._fit_method = "ball_tree"
elif self.effective_metric_ in VALID_METRICS["kd_tree"]:
self._fit_method = "kd_tree"
elif (
callable(self.effective_metric_)
or self.effective_metric_ in VALID_METRICS["ball_tree"]
):
self._fit_method = "ball_tree"
else:
self._fit_method = "brute"
if (
self.effective_metric_ == "minkowski"
and self.effective_metric_params_["p"] < 1
):
# For 0 < p < 1 Minkowski distances aren't valid distance
# metric as they do not satisfy triangular inequality:
# they are semi-metrics.
# algorithm="kd_tree" and algorithm="ball_tree" can't be used because
# KDTree and BallTree require a proper distance metric to work properly.
# However, the brute-force algorithm supports semi-metrics.
if self._fit_method == "brute":
warnings.warn(
"Mind that for 0 < p < 1, Minkowski metrics are not distance"
" metrics. Continuing the execution with `algorithm='brute'`."
)
else: # self._fit_method in ("kd_tree", "ball_tree")
raise ValueError(
f'algorithm="{self._fit_method}" does not support 0 < p < 1 for '
"the Minkowski metric. To resolve this problem either "
'set p >= 1 or algorithm="brute".'
)
if self._fit_method == "ball_tree":
self._tree = BallTree(
X,
self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_,
)
elif self._fit_method == "kd_tree":
if (
self.effective_metric_ == "minkowski"
and self.effective_metric_params_.get("w") is not None
):
raise ValueError(
"algorithm='kd_tree' is not valid for "
"metric='minkowski' with a weight parameter 'w': "
"try algorithm='ball_tree' "
"or algorithm='brute' instead."
)
self._tree = KDTree(
X,
self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_,
)
elif self._fit_method == "brute":
self._tree = None
return self
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
# For cross-validation routines to split data correctly
tags.input_tags.pairwise = self.metric == "precomputed"
# when input is precomputed metric values, all those values need to be positive
tags.input_tags.positive_only = tags.input_tags.pairwise
tags.input_tags.allow_nan = self.metric == "nan_euclidean"
return tags
class KNeighborsMixin:
"""Mixin for k-neighbors searches."""
def _kneighbors_reduce_func(self, dist, start, n_neighbors, return_distance):
"""Reduce a chunk of distances to the nearest neighbors.
Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked`
Parameters
----------
dist : ndarray of shape (n_samples_chunk, n_samples)
The distance matrix.
start : int
The index in X which the first row of dist corresponds to.
n_neighbors : int
Number of neighbors required for each sample.
return_distance : bool
Whether or not to return the distances.
Returns
-------
dist : array of shape (n_samples_chunk, n_neighbors)
Returned only if `return_distance=True`.
neigh : array of shape (n_samples_chunk, n_neighbors)
The neighbors indices.
"""
sample_range = np.arange(dist.shape[0])[:, None]
neigh_ind = np.argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == "euclidean":
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
return result
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Find the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int, default=None
Number of neighbors required for each sample. The default is the
value passed to the constructor.
return_distance : bool, default=True
Whether or not to return the distances.
Returns
-------
neigh_dist : ndarray of shape (n_queries, n_neighbors)
Array representing the lengths to points, only present if
return_distance=True.
neigh_ind : ndarray of shape (n_queries, n_neighbors)
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NearestNeighbors
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples)
NearestNeighbors(n_neighbors=1)
>>> print(neigh.kneighbors([[1., 1., 1.]]))
(array([[0.5]]), array([[2]]))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False)
array([[1],
[2]]...)
"""
check_is_fitted(self)
if n_neighbors is None:
n_neighbors = self.n_neighbors
elif n_neighbors <= 0:
raise ValueError("Expected n_neighbors > 0. Got %d" % n_neighbors)
elif not isinstance(n_neighbors, numbers.Integral):
raise TypeError(
"n_neighbors does not take %s value, enter integer value"
% type(n_neighbors)
)
ensure_all_finite = "allow-nan" if get_tags(self).input_tags.allow_nan else True
query_is_train = X is None
if query_is_train:
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
else:
if self.metric == "precomputed":
X = _check_precomputed(X)
else:
X = validate_data(
self,
X,
ensure_all_finite=ensure_all_finite,
accept_sparse="csr",
reset=False,
order="C",
)
n_samples_fit = self.n_samples_fit_
if n_neighbors > n_samples_fit:
if query_is_train:
n_neighbors -= 1 # ok to modify inplace because an error is raised
inequality_str = "n_neighbors < n_samples_fit"
else:
inequality_str = "n_neighbors <= n_samples_fit"
raise ValueError(
f"Expected {inequality_str}, but "
f"n_neighbors = {n_neighbors}, n_samples_fit = {n_samples_fit}, "
f"n_samples = {X.shape[0]}" # include n_samples for common tests
)
n_jobs = effective_n_jobs(self.n_jobs)
chunked_results = None
use_pairwise_distances_reductions = (
self._fit_method == "brute"
and ArgKmin.is_usable_for(
X if X is not None else self._fit_X, self._fit_X, self.effective_metric_
)
)
if use_pairwise_distances_reductions:
results = ArgKmin.compute(
X=X,
Y=self._fit_X,
k=n_neighbors,
metric=self.effective_metric_,
metric_kwargs=self.effective_metric_params_,
strategy="auto",
return_distance=return_distance,
)
elif (
self._fit_method == "brute" and self.metric == "precomputed" and issparse(X)
):
results = _kneighbors_from_graph(
X, n_neighbors=n_neighbors, return_distance=return_distance
)
elif self._fit_method == "brute":
# Joblib-based backend, which is used when user-defined callable
# are passed for metric.
# This won't be used in the future once PairwiseDistancesReductions
# support:
# - DistanceMetrics which work on supposedly binary data
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/_graph.py | sklearn/neighbors/_graph.py | """Nearest Neighbors graph functions"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
from sklearn.base import ClassNamePrefixFeaturesOutMixin, TransformerMixin, _fit_context
from sklearn.neighbors._base import (
VALID_METRICS,
KNeighborsMixin,
NeighborsBase,
RadiusNeighborsMixin,
)
from sklearn.neighbors._unsupervised import NearestNeighbors
from sklearn.utils._param_validation import (
Integral,
Interval,
Real,
StrOptions,
validate_params,
)
from sklearn.utils.validation import check_is_fitted
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(["metric", "p", "metric_params"], [metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for the same parameter."
% (func_param, param_name, est_params[param_name])
)
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
if include_self == "auto":
include_self = mode == "connectivity"
# it does not include each sample as its own neighbors
if not include_self:
X = None
return X
@validate_params(
{
"X": ["array-like", "sparse matrix", KNeighborsMixin],
"n_neighbors": [Interval(Integral, 1, None, closed="left")],
"mode": [StrOptions({"connectivity", "distance"})],
"metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable],
"p": [Interval(Real, 0, None, closed="right"), None],
"metric_params": [dict, None],
"include_self": ["boolean", StrOptions({"auto"})],
"n_jobs": [Integral, None],
},
prefer_skip_nested_validation=False, # metric is not validated yet
)
def kneighbors_graph(
X,
n_neighbors,
*,
mode="connectivity",
metric="minkowski",
p=2,
metric_params=None,
include_self=False,
n_jobs=None,
):
"""Compute the (weighted) graph of k-Neighbors for points in X.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample data.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : str, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
p : float, default=2
Power parameter for the Minkowski metric. When p = 1, this is equivalent
to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2.
For arbitrary p, minkowski_distance (l_p) is used. This parameter is expected
to be positive.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
include_self : bool or 'auto', default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If 'auto', then True is used for mode='connectivity' and False
for mode='distance'.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
A : sparse matrix of shape (n_samples, n_samples)
Graph where A[i, j] is assigned the weight of edge that
connects i to j. The matrix is of CSR format.
See Also
--------
radius_neighbors_graph: Compute the (weighted) graph of Neighbors for points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2, mode='connectivity', include_self=True)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 1.],
[1., 0., 1.]])
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(
n_neighbors=n_neighbors,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X._fit_X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
@validate_params(
{
"X": ["array-like", "sparse matrix", RadiusNeighborsMixin],
"radius": [Interval(Real, 0, None, closed="both")],
"mode": [StrOptions({"connectivity", "distance"})],
"metric": [StrOptions(set(itertools.chain(*VALID_METRICS.values()))), callable],
"p": [Interval(Real, 0, None, closed="right"), None],
"metric_params": [dict, None],
"include_self": ["boolean", StrOptions({"auto"})],
"n_jobs": [Integral, None],
},
prefer_skip_nested_validation=False, # metric is not validated yet
)
def radius_neighbors_graph(
X,
radius,
*,
mode="connectivity",
metric="minkowski",
p=2,
metric_params=None,
include_self=False,
n_jobs=None,
):
"""Compute the (weighted) graph of Neighbors for points in X.
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample data.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
metric : str, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
p : float, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
include_self : bool or 'auto', default=False
Whether or not to mark each sample as the first nearest neighbor to
itself. If 'auto', then True is used for mode='connectivity' and False
for mode='distance'.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
A : sparse matrix of shape (n_samples, n_samples)
Graph where A[i, j] is assigned the weight of edge that connects
i to j. The matrix is of CSR format.
See Also
--------
kneighbors_graph: Compute the weighted graph of k-neighbors for points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5, mode='connectivity',
... include_self=True)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 0.],
[1., 0., 1.]])
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(
radius=radius,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X._fit_X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
class KNeighborsTransformer(
ClassNamePrefixFeaturesOutMixin, KNeighborsMixin, TransformerMixin, NeighborsBase
):
"""Transform X into a (weighted) graph of k nearest neighbors.
The transformed data is a sparse graph as returned by kneighbors_graph.
Read more in the :ref:`User Guide <neighbors_transformer>`.
.. versionadded:: 0.22
Parameters
----------
mode : {'distance', 'connectivity'}, default='distance'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
n_neighbors : int, default=5
Number of neighbors for each sample in the transformed sparse graph.
For compatibility reasons, as each sample is considered as its own
neighbor, one extra neighbor will be computed when mode == 'distance'.
In this case, the sparse graph contains (n_neighbors + 1) neighbors.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
p : float, default=2
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
This parameter is expected to be positive.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
effective_metric_ : str or callable
The distance metric used. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_fit_ : int
Number of samples in the fitted data.
See Also
--------
kneighbors_graph : Compute the weighted graph of k-neighbors for
points in X.
RadiusNeighborsTransformer : Transform X into a weighted graph of
neighbors nearer than a radius.
Notes
-----
For an example of using :class:`~sklearn.neighbors.KNeighborsTransformer`
in combination with :class:`~sklearn.manifold.TSNE` see
:ref:`sphx_glr_auto_examples_neighbors_approximate_nearest_neighbors.py`.
Examples
--------
>>> from sklearn.datasets import load_wine
>>> from sklearn.neighbors import KNeighborsTransformer
>>> X, _ = load_wine(return_X_y=True)
>>> X.shape
(178, 13)
>>> transformer = KNeighborsTransformer(n_neighbors=5, mode='distance')
>>> X_dist_graph = transformer.fit_transform(X)
>>> X_dist_graph.shape
(178, 178)
"""
_parameter_constraints: dict = {
**NeighborsBase._parameter_constraints,
"mode": [StrOptions({"distance", "connectivity"})],
}
_parameter_constraints.pop("radius")
def __init__(
self,
*,
mode="distance",
n_neighbors=5,
algorithm="auto",
leaf_size=30,
metric="minkowski",
p=2,
metric_params=None,
n_jobs=None,
):
super().__init__(
n_neighbors=n_neighbors,
radius=None,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
)
self.mode = mode
@_fit_context(
# KNeighborsTransformer.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None):
"""Fit the k-nearest neighbors transformer from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : KNeighborsTransformer
The fitted k-nearest neighbors transformer.
"""
self._fit(X)
self._n_features_out = self.n_samples_fit_
return self
def transform(self, X):
"""Compute the (weighted) graph of Neighbors for points in X.
Parameters
----------
X : array-like of shape (n_samples_transform, n_features)
Sample data.
Returns
-------
Xt : sparse matrix of shape (n_samples_transform, n_samples_fit)
Xt[i, j] is assigned the weight of edge that connects i to j.
Only the neighbors have an explicit value.
The diagonal is always explicit.
The matrix is of CSR format.
"""
check_is_fitted(self)
add_one = self.mode == "distance"
return self.kneighbors_graph(
X, mode=self.mode, n_neighbors=self.n_neighbors + add_one
)
def fit_transform(self, X, y=None):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
Xt : sparse matrix of shape (n_samples, n_samples)
Xt[i, j] is assigned the weight of edge that connects i to j.
Only the neighbors have an explicit value.
The diagonal is always explicit.
The matrix is of CSR format.
"""
return self.fit(X).transform(X)
class RadiusNeighborsTransformer(
ClassNamePrefixFeaturesOutMixin,
RadiusNeighborsMixin,
TransformerMixin,
NeighborsBase,
):
"""Transform X into a (weighted) graph of neighbors nearer than a radius.
The transformed data is a sparse graph as returned by
`radius_neighbors_graph`.
Read more in the :ref:`User Guide <neighbors_transformer>`.
.. versionadded:: 0.22
Parameters
----------
mode : {'distance', 'connectivity'}, default='distance'
Type of returned matrix: 'connectivity' will return the connectivity
matrix with ones and zeros, and 'distance' will return the distances
between neighbors according to the given metric.
radius : float, default=1.0
Radius of neighborhood in the transformed sparse graph.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
p : float, default=2
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
This parameter is expected to be positive.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
effective_metric_ : str or callable
The distance metric used. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_fit_ : int
Number of samples in the fitted data.
See Also
--------
kneighbors_graph : Compute the weighted graph of k-neighbors for
points in X.
KNeighborsTransformer : Transform X into a weighted graph of k
nearest neighbors.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import load_wine
>>> from sklearn.cluster import DBSCAN
>>> from sklearn.neighbors import RadiusNeighborsTransformer
>>> from sklearn.pipeline import make_pipeline
>>> X, _ = load_wine(return_X_y=True)
>>> estimator = make_pipeline(
... RadiusNeighborsTransformer(radius=42.0, mode='distance'),
... DBSCAN(eps=25.0, metric='precomputed'))
>>> X_clustered = estimator.fit_predict(X)
>>> clusters, counts = np.unique(X_clustered, return_counts=True)
>>> print(counts)
[ 29 15 111 11 12]
"""
_parameter_constraints: dict = {
**NeighborsBase._parameter_constraints,
"mode": [StrOptions({"distance", "connectivity"})],
}
_parameter_constraints.pop("n_neighbors")
def __init__(
self,
*,
mode="distance",
radius=1.0,
algorithm="auto",
leaf_size=30,
metric="minkowski",
p=2,
metric_params=None,
n_jobs=None,
):
super().__init__(
n_neighbors=None,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
)
self.mode = mode
@_fit_context(
# RadiusNeighborsTransformer.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None):
"""Fit the radius neighbors transformer from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : RadiusNeighborsTransformer
The fitted radius neighbors transformer.
"""
self._fit(X)
self._n_features_out = self.n_samples_fit_
return self
def transform(self, X):
"""Compute the (weighted) graph of Neighbors for points in X.
Parameters
----------
X : array-like of shape (n_samples_transform, n_features)
Sample data.
Returns
-------
Xt : sparse matrix of shape (n_samples_transform, n_samples_fit)
Xt[i, j] is assigned the weight of edge that connects i to j.
Only the neighbors have an explicit value.
The diagonal is always explicit.
The matrix is of CSR format.
"""
check_is_fitted(self)
return self.radius_neighbors_graph(X, mode=self.mode, sort_results=True)
def fit_transform(self, X, y=None):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training set.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
Xt : sparse matrix of shape (n_samples, n_samples)
Xt[i, j] is assigned the weight of edge that connects i to j.
Only the neighbors have an explicit value.
The diagonal is always explicit.
The matrix is of CSR format.
"""
return self.fit(X).transform(X)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/_nca.py | sklearn/neighbors/_nca.py | """
Neighborhood Component Analysis
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import sys
import time
from numbers import Integral, Real
from warnings import warn
import numpy as np
from scipy.optimize import minimize
from sklearn.base import (
BaseEstimator,
ClassNamePrefixFeaturesOutMixin,
TransformerMixin,
_fit_context,
)
from sklearn.decomposition import PCA
from sklearn.exceptions import ConvergenceWarning
from sklearn.metrics import pairwise_distances
from sklearn.preprocessing import LabelEncoder
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils.extmath import softmax
from sklearn.utils.fixes import _get_additional_lbfgs_options_dict
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.random import check_random_state
from sklearn.utils.validation import check_array, check_is_fitted, validate_data
class NeighborhoodComponentsAnalysis(
ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator
):
"""Neighborhood Components Analysis.
Neighborhood Component Analysis (NCA) is a machine learning algorithm for
metric learning. It learns a linear transformation in a supervised fashion
to improve the classification accuracy of a stochastic nearest neighbors
rule in the transformed space.
Read more in the :ref:`User Guide <nca>`.
Parameters
----------
n_components : int, default=None
Preferred dimensionality of the projected space.
If None it will be set to `n_features`.
init : {'auto', 'pca', 'lda', 'identity', 'random'} or ndarray of shape \
(n_features_a, n_features_b), default='auto'
Initialization of the linear transformation. Possible options are
`'auto'`, `'pca'`, `'lda'`, `'identity'`, `'random'`, and a numpy
array of shape `(n_features_a, n_features_b)`.
- `'auto'`
Depending on `n_components`, the most reasonable initialization
is chosen. If `n_components <= min(n_features, n_classes - 1)`
we use `'lda'`, as it uses labels information. If not, but
`n_components < min(n_features, n_samples)`, we use `'pca'`, as
it projects data in meaningful directions (those of higher
variance). Otherwise, we just use `'identity'`.
- `'pca'`
`n_components` principal components of the inputs passed
to :meth:`fit` will be used to initialize the transformation.
(See :class:`~sklearn.decomposition.PCA`)
- `'lda'`
`min(n_components, n_classes)` most discriminative
components of the inputs passed to :meth:`fit` will be used to
initialize the transformation. (If `n_components > n_classes`,
the rest of the components will be zero.) (See
:class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis`)
- `'identity'`
If `n_components` is strictly smaller than the
dimensionality of the inputs passed to :meth:`fit`, the identity
matrix will be truncated to the first `n_components` rows.
- `'random'`
The initial transformation will be a random array of shape
`(n_components, n_features)`. Each value is sampled from the
standard normal distribution.
- numpy array
`n_features_b` must match the dimensionality of the inputs passed
to :meth:`fit` and n_features_a must be less than or equal to that.
If `n_components` is not `None`, `n_features_a` must match it.
warm_start : bool, default=False
If `True` and :meth:`fit` has been called before, the solution of the
previous call to :meth:`fit` is used as the initial linear
transformation (`n_components` and `init` will be ignored).
max_iter : int, default=50
Maximum number of iterations in the optimization.
tol : float, default=1e-5
Convergence tolerance for the optimization.
callback : callable, default=None
If not `None`, this function is called after every iteration of the
optimizer, taking as arguments the current solution (flattened
transformation matrix) and the number of iterations. This might be
useful in case one wants to examine or store the transformation
found after each iteration.
verbose : int, default=0
If 0, no progress messages will be printed.
If 1, progress messages will be printed to stdout.
If > 1, progress messages will be printed and the `disp`
parameter of :func:`scipy.optimize.minimize` will be set to
`verbose - 2`.
random_state : int or numpy.RandomState, default=None
A pseudo random number generator object or a seed for it if int. If
`init='random'`, `random_state` is used to initialize the random
transformation. If `init='pca'`, `random_state` is passed as an
argument to PCA when initializing the transformation. Pass an int
for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
components_ : ndarray of shape (n_components, n_features)
The linear transformation learned during fitting.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_iter_ : int
Counts the number of iterations performed by the optimizer.
random_state_ : numpy.RandomState
Pseudo random number generator object used during initialization.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis : Linear
Discriminant Analysis.
sklearn.decomposition.PCA : Principal component analysis (PCA).
References
----------
.. [1] J. Goldberger, G. Hinton, S. Roweis, R. Salakhutdinov.
"Neighbourhood Components Analysis". Advances in Neural Information
Processing Systems. 17, 513-520, 2005.
https://www.cs.toronto.edu/~rsalakhu/papers/ncanips.pdf
.. [2] Wikipedia entry on Neighborhood Components Analysis
https://en.wikipedia.org/wiki/Neighbourhood_components_analysis
Examples
--------
>>> from sklearn.neighbors import NeighborhoodComponentsAnalysis
>>> from sklearn.neighbors import KNeighborsClassifier
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import train_test_split
>>> X, y = load_iris(return_X_y=True)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... stratify=y, test_size=0.7, random_state=42)
>>> nca = NeighborhoodComponentsAnalysis(random_state=42)
>>> nca.fit(X_train, y_train)
NeighborhoodComponentsAnalysis(...)
>>> knn = KNeighborsClassifier(n_neighbors=3)
>>> knn.fit(X_train, y_train)
KNeighborsClassifier(...)
>>> print(knn.score(X_test, y_test))
0.933333...
>>> knn.fit(nca.transform(X_train), y_train)
KNeighborsClassifier(...)
>>> print(knn.score(nca.transform(X_test), y_test))
0.961904...
"""
_parameter_constraints: dict = {
"n_components": [
Interval(Integral, 1, None, closed="left"),
None,
],
"init": [
StrOptions({"auto", "pca", "lda", "identity", "random"}),
np.ndarray,
],
"warm_start": ["boolean"],
"max_iter": [Interval(Integral, 1, None, closed="left")],
"tol": [Interval(Real, 0, None, closed="left")],
"callback": [callable, None],
"verbose": ["verbose"],
"random_state": ["random_state"],
}
def __init__(
self,
n_components=None,
*,
init="auto",
warm_start=False,
max_iter=50,
tol=1e-5,
callback=None,
verbose=0,
random_state=None,
):
self.n_components = n_components
self.init = init
self.warm_start = warm_start
self.max_iter = max_iter
self.tol = tol
self.callback = callback
self.verbose = verbose
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training samples.
y : array-like of shape (n_samples,)
The corresponding training labels.
Returns
-------
self : object
Fitted estimator.
"""
# Validate the inputs X and y, and converts y to numerical classes.
X, y = validate_data(self, X, y, ensure_min_samples=2)
check_classification_targets(y)
y = LabelEncoder().fit_transform(y)
# Check the preferred dimensionality of the projected space
if self.n_components is not None and self.n_components > X.shape[1]:
raise ValueError(
"The preferred dimensionality of the "
f"projected space `n_components` ({self.n_components}) cannot "
"be greater than the given data "
f"dimensionality ({X.shape[1]})!"
)
# If warm_start is enabled, check that the inputs are consistent
if (
self.warm_start
and hasattr(self, "components_")
and self.components_.shape[1] != X.shape[1]
):
raise ValueError(
f"The new inputs dimensionality ({X.shape[1]}) does not "
"match the input dimensionality of the "
f"previously learned transformation ({self.components_.shape[1]})."
)
# Check how the linear transformation should be initialized
init = self.init
if isinstance(init, np.ndarray):
init = check_array(init)
# Assert that init.shape[1] = X.shape[1]
if init.shape[1] != X.shape[1]:
raise ValueError(
f"The input dimensionality ({init.shape[1]}) of the given "
"linear transformation `init` must match the "
f"dimensionality of the given inputs `X` ({X.shape[1]})."
)
# Assert that init.shape[0] <= init.shape[1]
if init.shape[0] > init.shape[1]:
raise ValueError(
f"The output dimensionality ({init.shape[0]}) of the given "
"linear transformation `init` cannot be "
f"greater than its input dimensionality ({init.shape[1]})."
)
# Assert that self.n_components = init.shape[0]
if self.n_components is not None and self.n_components != init.shape[0]:
raise ValueError(
"The preferred dimensionality of the "
f"projected space `n_components` ({self.n_components}) does"
" not match the output dimensionality of "
"the given linear transformation "
f"`init` ({init.shape[0]})!"
)
# Initialize the random generator
self.random_state_ = check_random_state(self.random_state)
# Measure the total training time
t_train = time.time()
# Compute a mask that stays fixed during optimization:
same_class_mask = y[:, np.newaxis] == y[np.newaxis, :]
# (n_samples, n_samples)
# Initialize the transformation
transformation = np.ravel(self._initialize(X, y, init))
# Create a dictionary of parameters to be passed to the optimizer
disp = self.verbose - 2 if self.verbose > 1 else -1
optimizer_params = {
"method": "L-BFGS-B",
"fun": self._loss_grad_lbfgs,
"args": (X, same_class_mask, -1.0),
"jac": True,
"x0": transformation,
"tol": self.tol,
"options": dict(
maxiter=self.max_iter,
**_get_additional_lbfgs_options_dict("disp", disp),
),
"callback": self._callback,
}
# Call the optimizer
self.n_iter_ = 0
opt_result = minimize(**optimizer_params)
# Reshape the solution found by the optimizer
self.components_ = opt_result.x.reshape(-1, X.shape[1])
# Stop timer
t_train = time.time() - t_train
if self.verbose:
cls_name = self.__class__.__name__
# Warn the user if the algorithm did not converge
if not opt_result.success:
warn(
"[{}] NCA did not converge: {}".format(
cls_name, opt_result.message
),
ConvergenceWarning,
)
print("[{}] Training took {:8.2f}s.".format(cls_name, t_train))
return self
def transform(self, X):
"""Apply the learned transformation to the given data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data samples.
Returns
-------
X_embedded: ndarray of shape (n_samples, n_components)
The data samples transformed.
Raises
------
NotFittedError
If :meth:`fit` has not been called before.
"""
check_is_fitted(self)
X = validate_data(self, X, reset=False)
return np.dot(X, self.components_.T)
def _initialize(self, X, y, init):
"""Initialize the transformation.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training samples.
y : array-like of shape (n_samples,)
The training labels.
init : str or ndarray of shape (n_features_a, n_features_b)
The validated initialization of the linear transformation.
Returns
-------
transformation : ndarray of shape (n_components, n_features)
The initialized linear transformation.
"""
transformation = init
if self.warm_start and hasattr(self, "components_"):
transformation = self.components_
elif isinstance(init, np.ndarray):
pass
else:
n_samples, n_features = X.shape
n_components = self.n_components or n_features
if init == "auto":
n_classes = len(np.unique(y))
if n_components <= min(n_features, n_classes - 1):
init = "lda"
elif n_components < min(n_features, n_samples):
init = "pca"
else:
init = "identity"
if init == "identity":
transformation = np.eye(n_components, X.shape[1])
elif init == "random":
transformation = self.random_state_.standard_normal(
size=(n_components, X.shape[1])
)
elif init in {"pca", "lda"}:
init_time = time.time()
if init == "pca":
pca = PCA(
n_components=n_components, random_state=self.random_state_
)
if self.verbose:
print("Finding principal components... ", end="")
sys.stdout.flush()
pca.fit(X)
transformation = pca.components_
elif init == "lda":
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
lda = LinearDiscriminantAnalysis(n_components=n_components)
if self.verbose:
print("Finding most discriminative components... ", end="")
sys.stdout.flush()
lda.fit(X, y)
transformation = lda.scalings_.T[:n_components]
if self.verbose:
print("done in {:5.2f}s".format(time.time() - init_time))
return transformation
def _callback(self, transformation):
"""Called after each iteration of the optimizer.
Parameters
----------
transformation : ndarray of shape (n_components * n_features,)
The solution computed by the optimizer in this iteration.
"""
if self.callback is not None:
self.callback(transformation, self.n_iter_)
self.n_iter_ += 1
def _loss_grad_lbfgs(self, transformation, X, same_class_mask, sign=1.0):
"""Compute the loss and the loss gradient w.r.t. `transformation`.
Parameters
----------
transformation : ndarray of shape (n_components * n_features,)
The raveled linear transformation on which to compute loss and
evaluate gradient.
X : ndarray of shape (n_samples, n_features)
The training samples.
same_class_mask : ndarray of shape (n_samples, n_samples)
A mask where `mask[i, j] == 1` if `X[i]` and `X[j]` belong
to the same class, and `0` otherwise.
Returns
-------
loss : float
The loss computed for the given transformation.
gradient : ndarray of shape (n_components * n_features,)
The new (flattened) gradient of the loss.
"""
if self.n_iter_ == 0:
self.n_iter_ += 1
if self.verbose:
header_fields = ["Iteration", "Objective Value", "Time(s)"]
header_fmt = "{:>10} {:>20} {:>10}"
header = header_fmt.format(*header_fields)
cls_name = self.__class__.__name__
print("[{}]".format(cls_name))
print(
"[{}] {}\n[{}] {}".format(
cls_name, header, cls_name, "-" * len(header)
)
)
t_funcall = time.time()
transformation = transformation.reshape(-1, X.shape[1])
X_embedded = np.dot(X, transformation.T) # (n_samples, n_components)
# Compute softmax distances
p_ij = pairwise_distances(X_embedded, squared=True)
np.fill_diagonal(p_ij, np.inf)
p_ij = softmax(-p_ij) # (n_samples, n_samples)
# Compute loss
masked_p_ij = p_ij * same_class_mask
p = np.sum(masked_p_ij, axis=1, keepdims=True) # (n_samples, 1)
loss = np.sum(p)
# Compute gradient of loss w.r.t. `transform`
weighted_p_ij = masked_p_ij - p_ij * p
weighted_p_ij_sym = weighted_p_ij + weighted_p_ij.T
np.fill_diagonal(weighted_p_ij_sym, -weighted_p_ij.sum(axis=0))
gradient = 2 * X_embedded.T.dot(weighted_p_ij_sym).dot(X)
# time complexity of the gradient: O(n_components x n_samples x (
# n_samples + n_features))
if self.verbose:
t_funcall = time.time() - t_funcall
values_fmt = "[{}] {:>10} {:>20.6e} {:>10.2f}"
print(
values_fmt.format(
self.__class__.__name__, self.n_iter_, loss, t_funcall
)
)
sys.stdout.flush()
return sign * loss, sign * gradient.ravel()
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.target_tags.required = True
return tags
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.components_.shape[0]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/_nearest_centroid.py | sklearn/neighbors/_nearest_centroid.py | """
Nearest Centroid Classification
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from numbers import Real
import numpy as np
from scipy import sparse as sp
from sklearn.base import BaseEstimator, ClassifierMixin, _fit_context
from sklearn.discriminant_analysis import DiscriminantAnalysisPredictionMixin
from sklearn.metrics.pairwise import pairwise_distances, pairwise_distances_argmin
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import get_tags
from sklearn.utils._available_if import available_if
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.sparsefuncs import csc_median_axis_0
from sklearn.utils.validation import check_is_fitted, validate_data
class NearestCentroid(
DiscriminantAnalysisPredictionMixin, ClassifierMixin, BaseEstimator
):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric : {"euclidean", "manhattan"}, default="euclidean"
Metric to use for distance computation.
If `metric="euclidean"`, the centroid for the samples corresponding to each
class is the arithmetic mean, which minimizes the sum of squared L1 distances.
If `metric="manhattan"`, the centroid is the feature-wise median, which
minimizes the sum of L1 distances.
.. versionchanged:: 1.5
All metrics but `"euclidean"` and `"manhattan"` were deprecated and
now raise an error.
.. versionchanged:: 0.19
`metric='precomputed'` was deprecated and now raises an error
shrink_threshold : float, default=None
Threshold for shrinking centroids to remove features.
priors : {"uniform", "empirical"} or array-like of shape (n_classes,), \
default="uniform"
The class prior probabilities. By default, the class proportions are
inferred from the training data.
.. versionadded:: 1.6
Attributes
----------
centroids_ : array-like of shape (n_classes, n_features)
Centroid of each class.
classes_ : array of shape (n_classes,)
The unique classes labels.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
deviations_ : ndarray of shape (n_classes, n_features)
Deviations (or shrinkages) of the centroids of each class from the
overall centroid. Equal to eq. (18.4) if `shrink_threshold=None`,
else (18.5) p. 653 of [2]. Can be used to identify features used
for classification.
.. versionadded:: 1.6
within_class_std_dev_ : ndarray of shape (n_features,)
Pooled or within-class standard deviation of input data.
.. versionadded:: 1.6
class_prior_ : ndarray of shape (n_classes,)
The class prior probabilities.
.. versionadded:: 1.6
See Also
--------
KNeighborsClassifier : Nearest neighbors classifier.
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
[1] Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
[2] Hastie, T., Tibshirani, R., Friedman, J. (2009). The Elements of Statistical
Learning Data Mining, Inference, and Prediction. 2nd Edition. New York, Springer.
Examples
--------
>>> from sklearn.neighbors import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid()
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
_parameter_constraints: dict = {
"metric": [StrOptions({"manhattan", "euclidean"})],
"shrink_threshold": [Interval(Real, 0, None, closed="neither"), None],
"priors": ["array-like", StrOptions({"empirical", "uniform"})],
}
def __init__(
self,
metric="euclidean",
*,
shrink_threshold=None,
priors="uniform",
):
self.metric = metric
self.shrink_threshold = shrink_threshold
self.priors = priors
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : object
Fitted estimator.
"""
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == "manhattan":
X, y = validate_data(self, X, y, accept_sparse=["csc"])
else:
ensure_all_finite = (
"allow-nan" if get_tags(self).input_tags.allow_nan else True
)
X, y = validate_data(
self,
X,
y,
ensure_all_finite=ensure_all_finite,
accept_sparse=["csr", "csc"],
)
is_X_sparse = sp.issparse(X)
check_classification_targets(y)
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError(
"The number of classes has to be greater than one; got %d class"
% (n_classes)
)
if self.priors == "empirical": # estimate priors from sample
_, class_counts = np.unique(y, return_inverse=True) # non-negative ints
self.class_prior_ = np.bincount(class_counts) / float(len(y))
elif self.priors == "uniform":
self.class_prior_ = np.asarray([1 / n_classes] * n_classes)
else:
self.class_prior_ = np.asarray(self.priors)
if (self.class_prior_ < 0).any():
raise ValueError("priors must be non-negative")
if not np.isclose(self.class_prior_.sum(), 1.0):
warnings.warn(
"The priors do not sum to 1. Normalizing such that it sums to one.",
UserWarning,
)
self.class_prior_ = self.class_prior_ / self.class_prior_.sum()
# Mask mapping each class to its members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else: # metric == "euclidean"
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
# Compute within-class std_dev with unshrunked centroids
variance = np.array(X - self.centroids_[y_ind], copy=False) ** 2
self.within_class_std_dev_ = np.array(
np.sqrt(variance.sum(axis=0) / (n_samples - n_classes)), copy=False
)
if any(self.within_class_std_dev_ == 0):
warnings.warn(
"self.within_class_std_dev_ has at least 1 zero standard deviation."
"Inputs within the same classes for at least 1 feature are identical."
)
err_msg = "All features have zero variance. Division by zero."
if is_X_sparse and np.all((X.max(axis=0) - X.min(axis=0)).toarray() == 0):
raise ValueError(err_msg)
elif not is_X_sparse and np.all(np.ptp(X, axis=0) == 0):
raise ValueError(err_msg)
dataset_centroid_ = X.mean(axis=0)
# m parameter for determining deviation
m = np.sqrt((1.0 / nk) - (1.0 / n_samples))
# Calculate deviation using the standard deviation of centroids.
# To deter outliers from affecting the results.
s = self.within_class_std_dev_ + np.median(self.within_class_std_dev_)
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
self.deviations_ = np.array(
(self.centroids_ - dataset_centroid_) / ms, copy=False
)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
if self.shrink_threshold:
signs = np.sign(self.deviations_)
self.deviations_ = np.abs(self.deviations_) - self.shrink_threshold
np.clip(self.deviations_, 0, None, out=self.deviations_)
self.deviations_ *= signs
# Now adjust the centroids using the deviation
msd = ms * self.deviations_
self.centroids_ = np.array(dataset_centroid_ + msd, copy=False)
return self
def predict(self, X):
"""Perform classification on an array of test vectors `X`.
The predicted class `C` for each sample in `X` is returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
y_pred : ndarray of shape (n_samples,)
The predicted classes.
"""
check_is_fitted(self)
if np.isclose(self.class_prior_, 1 / len(self.classes_)).all():
# `validate_data` is called here since we are not calling `super()`
ensure_all_finite = (
"allow-nan" if get_tags(self).input_tags.allow_nan else True
)
X = validate_data(
self,
X,
ensure_all_finite=ensure_all_finite,
accept_sparse="csr",
reset=False,
)
return self.classes_[
pairwise_distances_argmin(X, self.centroids_, metric=self.metric)
]
else:
return super().predict(X)
def _decision_function(self, X):
# return discriminant scores, see eq. (18.2) p. 652 of the ESL.
check_is_fitted(self, "centroids_")
X_normalized = validate_data(
self, X, copy=True, reset=False, accept_sparse="csr", dtype=np.float64
)
discriminant_score = np.empty(
(X_normalized.shape[0], self.classes_.size), dtype=np.float64
)
mask = self.within_class_std_dev_ != 0
X_normalized[:, mask] /= self.within_class_std_dev_[mask]
centroids_normalized = self.centroids_.copy()
centroids_normalized[:, mask] /= self.within_class_std_dev_[mask]
for class_idx in range(self.classes_.size):
distances = pairwise_distances(
X_normalized, centroids_normalized[[class_idx]], metric=self.metric
).ravel()
distances **= 2
discriminant_score[:, class_idx] = np.squeeze(
-distances + 2.0 * np.log(self.class_prior_[class_idx])
)
return discriminant_score
def _check_euclidean_metric(self):
return self.metric == "euclidean"
decision_function = available_if(_check_euclidean_metric)(
DiscriminantAnalysisPredictionMixin.decision_function
)
predict_proba = available_if(_check_euclidean_metric)(
DiscriminantAnalysisPredictionMixin.predict_proba
)
predict_log_proba = available_if(_check_euclidean_metric)(
DiscriminantAnalysisPredictionMixin.predict_log_proba
)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.allow_nan = self.metric == "nan_euclidean"
tags.input_tags.sparse = True
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/_classification.py | sklearn/neighbors/_classification.py | """Nearest Neighbor Classification"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from numbers import Integral
import numpy as np
from sklearn.base import ClassifierMixin, _fit_context
from sklearn.metrics._pairwise_distances_reduction import (
ArgKminClassMode,
RadiusNeighborsClassMode,
)
from sklearn.neighbors._base import (
KNeighborsMixin,
NeighborsBase,
RadiusNeighborsMixin,
_check_precomputed,
_get_weights,
)
from sklearn.utils._param_validation import StrOptions
from sklearn.utils.arrayfuncs import _all_with_any_reduction_axis_1
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.fixes import _mode
from sklearn.utils.validation import (
_is_arraylike,
_num_samples,
check_is_fitted,
validate_data,
)
def _adjusted_metric(metric, metric_kwargs, p=None):
metric_kwargs = metric_kwargs or {}
if metric == "minkowski":
metric_kwargs["p"] = p
if p == 2:
metric = "euclidean"
return metric, metric_kwargs
class KNeighborsClassifier(KNeighborsMixin, ClassifierMixin, NeighborsBase):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, default=5
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : {'uniform', 'distance'}, callable or None, default='uniform'
Weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Refer to the example entitled
:ref:`sphx_glr_auto_examples_neighbors_plot_classification.py`
showing the impact of the `weights` parameter on the decision
boundary.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : float, default=2
Power parameter for the Minkowski metric. When p = 1, this is equivalent
to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2.
For arbitrary p, minkowski_distance (l_p) is used. This parameter is expected
to be positive.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`, in which
case only "nonzero" elements may be considered neighbors.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Doesn't affect :meth:`fit` method.
Attributes
----------
classes_ : array of shape (n_classes,)
Class labels known to the classifier
effective_metric_ : str or callble
The distance metric used. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_fit_ : int
Number of samples in the fitted data.
outputs_2d_ : bool
False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit
otherwise True.
See Also
--------
RadiusNeighborsClassifier: Classifier based on neighbors within a fixed radius.
KNeighborsRegressor: Regression based on k-nearest neighbors.
RadiusNeighborsRegressor: Regression based on neighbors within a fixed radius.
NearestNeighbors: Unsupervised learner for implementing neighbor searches.
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y)
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[0.666 0.333]]
"""
_parameter_constraints: dict = {**NeighborsBase._parameter_constraints}
_parameter_constraints.pop("radius")
_parameter_constraints.update(
{"weights": [StrOptions({"uniform", "distance"}), callable, None]}
)
def __init__(
self,
n_neighbors=5,
*,
weights="uniform",
algorithm="auto",
leaf_size=30,
p=2,
metric="minkowski",
metric_params=None,
n_jobs=None,
):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
)
self.weights = weights
@_fit_context(
# KNeighborsClassifier.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y):
"""Fit the k-nearest neighbors classifier from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs)
Target values.
Returns
-------
self : KNeighborsClassifier
The fitted k-nearest neighbors classifier.
"""
return self._fit(X, y)
def predict(self, X):
"""Predict the class labels for the provided data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', or None
Test samples. If `None`, predictions for all indexed points are
returned; in this case, points are not considered their own
neighbors.
Returns
-------
y : ndarray of shape (n_queries,) or (n_queries, n_outputs)
Class labels for each data sample.
"""
check_is_fitted(self, "_fit_method")
if self.weights == "uniform":
if self._fit_method == "brute" and ArgKminClassMode.is_usable_for(
X, self._fit_X, self.metric
):
probabilities = self.predict_proba(X)
if self.outputs_2d_:
return np.stack(
[
self.classes_[idx][np.argmax(probas, axis=1)]
for idx, probas in enumerate(probabilities)
],
axis=1,
)
return self.classes_[np.argmax(probabilities, axis=1)]
# In that case, we do not need the distances to perform
# the weighting so we do not compute them.
neigh_ind = self.kneighbors(X, return_distance=False)
neigh_dist = None
else:
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_queries = _num_samples(self._fit_X if X is None else X)
weights = _get_weights(neigh_dist, self.weights)
if weights is not None and _all_with_any_reduction_axis_1(weights, value=0):
raise ValueError(
"All neighbors of some sample is getting zero weights. "
"Please modify 'weights' to avoid this case if you are "
"using a user-defined function."
)
y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = _mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', or None
Test samples. If `None`, predictions for all indexed points are
returned; in this case, points are not considered their own
neighbors.
Returns
-------
p : ndarray of shape (n_queries, n_classes), or a list of n_outputs \
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
check_is_fitted(self, "_fit_method")
if self.weights == "uniform":
# TODO: systematize this mapping of metric for
# PairwiseDistancesReductions.
metric, metric_kwargs = _adjusted_metric(
metric=self.metric, metric_kwargs=self.metric_params, p=self.p
)
if (
self._fit_method == "brute"
and ArgKminClassMode.is_usable_for(X, self._fit_X, metric)
# TODO: Implement efficient multi-output solution
and not self.outputs_2d_
):
if self.metric == "precomputed":
X = _check_precomputed(X)
else:
X = validate_data(
self, X, accept_sparse="csr", reset=False, order="C"
)
probabilities = ArgKminClassMode.compute(
X,
self._fit_X,
k=self.n_neighbors,
weights=self.weights,
Y_labels=self._y,
unique_Y_labels=self.classes_,
metric=metric,
metric_kwargs=metric_kwargs,
# `strategy="parallel_on_X"` has in practice be shown
# to be more efficient than `strategy="parallel_on_Y``
# on many combination of datasets.
# Hence, we choose to enforce it here.
# For more information, see:
# https://github.com/scikit-learn/scikit-learn/pull/24076#issuecomment-1445258342
# TODO: adapt the heuristic for `strategy="auto"` for
# `ArgKminClassMode` and use `strategy="auto"`.
strategy="parallel_on_X",
)
return probabilities
# In that case, we do not need the distances to perform
# the weighting so we do not compute them.
neigh_ind = self.kneighbors(X, return_distance=False)
neigh_dist = None
else:
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_queries = _num_samples(self._fit_X if X is None else X)
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
elif _all_with_any_reduction_axis_1(weights, value=0):
raise ValueError(
"All neighbors of some sample is getting zero weights. "
"Please modify 'weights' to avoid this case if you are "
"using a user-defined function."
)
all_rows = np.arange(n_queries)
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_queries, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
# This function is defined here only to modify the parent docstring
# and add information about X=None
def score(self, X, y, sample_weight=None):
"""
Return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like of shape (n_samples, n_features), or None
Test samples. If `None`, predictions for all indexed points are
used; in this case, points are not considered their own
neighbors. This means that `knn.fit(X, y).score(None, y)`
implicitly performs a leave-one-out cross-validation procedure
and is equivalent to `cross_val_score(knn, X, y, cv=LeaveOneOut())`
but typically much faster.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for `X`.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of ``self.predict(X)`` w.r.t. `y`.
"""
return super().score(X, y, sample_weight)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.classifier_tags.multi_label = True
tags.input_tags.pairwise = self.metric == "precomputed"
return tags
class RadiusNeighborsClassifier(RadiusNeighborsMixin, ClassifierMixin, NeighborsBase):
"""Classifier implementing a vote among neighbors within a given radius.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, default=1.0
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : {'uniform', 'distance'}, callable or None, default='uniform'
Weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : float, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
This parameter is expected to be positive.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`, in which
case only "nonzero" elements may be considered neighbors.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
outlier_label : {manual label, 'most_frequent'}, default=None
Label for outlier samples (samples with no neighbors in given radius).
- manual label: str or int label (should be the same type as y)
or list of manual labels if multi-output is used.
- 'most_frequent' : assign the most frequent label of y to outliers.
- None : when any outlier is detected, ValueError will be raised.
The outlier label should be selected from among the unique 'Y' labels.
If it is specified with a different value a warning will be raised and
all class probabilities of outliers will be assigned to be 0.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier.
effective_metric_ : str or callable
The distance metric used. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_fit_ : int
Number of samples in the fitted data.
outlier_label_ : int or array-like of shape (n_class,)
Label which is given for outlier samples (samples with no neighbors
on given radius).
outputs_2d_ : bool
False when `y`'s shape is (n_samples, ) or (n_samples, 1) during fit
otherwise True.
See Also
--------
KNeighborsClassifier : Classifier implementing the k-nearest neighbors
vote.
RadiusNeighborsRegressor : Regression based on neighbors within a
fixed radius.
KNeighborsRegressor : Regression based on k-nearest neighbors.
NearestNeighbors : Unsupervised learner for implementing neighbor
searches.
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y)
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
>>> print(neigh.predict_proba([[1.0]]))
[[0.66666667 0.33333333]]
"""
_parameter_constraints: dict = {
**NeighborsBase._parameter_constraints,
"weights": [StrOptions({"uniform", "distance"}), callable, None],
"outlier_label": [Integral, str, "array-like", None],
}
_parameter_constraints.pop("n_neighbors")
def __init__(
self,
radius=1.0,
*,
weights="uniform",
algorithm="auto",
leaf_size=30,
p=2,
metric="minkowski",
outlier_label=None,
metric_params=None,
n_jobs=None,
):
super().__init__(
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
)
self.weights = weights
self.outlier_label = outlier_label
@_fit_context(
# RadiusNeighborsClassifier.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y):
"""Fit the radius neighbors classifier from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs)
Target values.
Returns
-------
self : RadiusNeighborsClassifier
The fitted radius neighbors classifier.
"""
self._fit(X, y)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
if self.outlier_label is None:
outlier_label_ = None
elif self.outlier_label == "most_frequent":
outlier_label_ = []
# iterate over multi-output, get the most frequent label for each
# output.
for k, classes_k in enumerate(classes_):
label_count = np.bincount(_y[:, k])
outlier_label_.append(classes_k[label_count.argmax()])
else:
if _is_arraylike(self.outlier_label) and not isinstance(
self.outlier_label, str
):
if len(self.outlier_label) != len(classes_):
raise ValueError(
"The length of outlier_label: {} is "
"inconsistent with the output "
"length: {}".format(self.outlier_label, len(classes_))
)
outlier_label_ = self.outlier_label
else:
outlier_label_ = [self.outlier_label] * len(classes_)
for classes, label in zip(classes_, outlier_label_):
if _is_arraylike(label) and not isinstance(label, str):
# ensure the outlier label for each output is a scalar.
raise TypeError(
"The outlier_label of classes {} is "
"supposed to be a scalar, got "
"{}.".format(classes, label)
)
if np.append(classes, label).dtype != classes.dtype:
# ensure the dtype of outlier label is consistent with y.
raise TypeError(
"The dtype of outlier_label {} is "
"inconsistent with classes {} in "
"y.".format(label, classes)
)
self.outlier_label_ = outlier_label_
return self
def predict(self, X):
"""Predict the class labels for the provided data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', or None
Test samples. If `None`, predictions for all indexed points are
returned; in this case, points are not considered their own
neighbors.
Returns
-------
y : ndarray of shape (n_queries,) or (n_queries, n_outputs)
Class labels for each data sample.
"""
probs = self.predict_proba(X)
classes_ = self.classes_
if not self.outputs_2d_:
probs = [probs]
classes_ = [self.classes_]
n_outputs = len(classes_)
n_queries = probs[0].shape[0]
y_pred = np.empty((n_queries, n_outputs), dtype=classes_[0].dtype)
for k, prob in enumerate(probs):
# iterate over multi-output, assign labels based on probabilities
# of each output.
max_prob_index = prob.argmax(axis=1)
y_pred[:, k] = classes_[k].take(max_prob_index)
outlier_zero_probs = (prob == 0).all(axis=1)
if outlier_zero_probs.any():
zero_prob_index = np.flatnonzero(outlier_zero_probs)
y_pred[zero_prob_index, k] = self.outlier_label_[k]
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', or None
Test samples. If `None`, predictions for all indexed points are
returned; in this case, points are not considered their own
neighbors.
Returns
-------
p : ndarray of shape (n_queries, n_classes), or a list of \
n_outputs of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
check_is_fitted(self, "_fit_method")
n_queries = _num_samples(self._fit_X if X is None else X)
metric, metric_kwargs = _adjusted_metric(
metric=self.metric, metric_kwargs=self.metric_params, p=self.p
)
if (
self.weights == "uniform"
and self._fit_method == "brute"
and not self.outputs_2d_
and RadiusNeighborsClassMode.is_usable_for(X, self._fit_X, metric)
):
probabilities = RadiusNeighborsClassMode.compute(
X=X,
Y=self._fit_X,
radius=self.radius,
weights=self.weights,
Y_labels=self._y,
unique_Y_labels=self.classes_,
outlier_label=self.outlier_label,
metric=metric,
metric_kwargs=metric_kwargs,
strategy="parallel_on_X",
# `strategy="parallel_on_X"` has in practice be shown
# to be more efficient than `strategy="parallel_on_Y``
# on many combination of datasets.
# Hence, we choose to enforce it here.
# For more information, see:
# https://github.com/scikit-learn/scikit-learn/pull/26828/files#r1282398471
)
return probabilities
neigh_dist, neigh_ind = self.radius_neighbors(X)
outlier_mask = np.zeros(n_queries, dtype=bool)
outlier_mask[:] = [len(nind) == 0 for nind in neigh_ind]
outliers = np.flatnonzero(outlier_mask)
inliers = np.flatnonzero(~outlier_mask)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
if self.outlier_label_ is None and outliers.size > 0:
raise ValueError(
"No neighbors found for test samples %r, "
"you can try using larger radius, "
"giving a label for outliers, "
"or considering removing them from your dataset." % outliers
)
weights = _get_weights(neigh_dist, self.weights)
if weights is not None:
weights = weights[inliers]
probabilities = []
# iterate over multi-output, measure probabilities of the k-th output.
for k, classes_k in enumerate(classes_):
pred_labels = np.zeros(len(neigh_ind), dtype=object)
pred_labels[:] = [_y[ind, k] for ind in neigh_ind]
proba_k = np.zeros((n_queries, classes_k.size))
proba_inl = np.zeros((len(inliers), classes_k.size))
# samples have different size of neighbors within the same radius
if weights is None:
for i, idx in enumerate(pred_labels[inliers]):
proba_inl[i, :] = np.bincount(idx, minlength=classes_k.size)
else:
for i, idx in enumerate(pred_labels[inliers]):
proba_inl[i, :] = np.bincount(
idx, weights[i], minlength=classes_k.size
)
proba_k[inliers, :] = proba_inl
if outliers.size > 0:
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/__init__.py | sklearn/neighbors/__init__.py | """The k-nearest neighbors algorithms."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.neighbors._ball_tree import BallTree
from sklearn.neighbors._base import (
VALID_METRICS,
VALID_METRICS_SPARSE,
sort_graph_by_row_values,
)
from sklearn.neighbors._classification import (
KNeighborsClassifier,
RadiusNeighborsClassifier,
)
from sklearn.neighbors._graph import (
KNeighborsTransformer,
RadiusNeighborsTransformer,
kneighbors_graph,
radius_neighbors_graph,
)
from sklearn.neighbors._kd_tree import KDTree
from sklearn.neighbors._kde import KernelDensity
from sklearn.neighbors._lof import LocalOutlierFactor
from sklearn.neighbors._nca import NeighborhoodComponentsAnalysis
from sklearn.neighbors._nearest_centroid import NearestCentroid
from sklearn.neighbors._regression import KNeighborsRegressor, RadiusNeighborsRegressor
from sklearn.neighbors._unsupervised import NearestNeighbors
__all__ = [
"VALID_METRICS",
"VALID_METRICS_SPARSE",
"BallTree",
"KDTree",
"KNeighborsClassifier",
"KNeighborsRegressor",
"KNeighborsTransformer",
"KernelDensity",
"LocalOutlierFactor",
"NearestCentroid",
"NearestNeighbors",
"NeighborhoodComponentsAnalysis",
"RadiusNeighborsClassifier",
"RadiusNeighborsRegressor",
"RadiusNeighborsTransformer",
"kneighbors_graph",
"radius_neighbors_graph",
"sort_graph_by_row_values",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/_kde.py | sklearn/neighbors/_kde.py | """
Kernel Density Estimation
-------------------------
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
from numbers import Integral, Real
import numpy as np
from scipy.special import gammainc
from sklearn.base import BaseEstimator, _fit_context
from sklearn.neighbors._ball_tree import BallTree
from sklearn.neighbors._base import VALID_METRICS
from sklearn.neighbors._kd_tree import KDTree
from sklearn.utils import check_random_state
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils.extmath import row_norms
from sklearn.utils.validation import (
_check_sample_weight,
check_is_fitted,
validate_data,
)
VALID_KERNELS = [
"gaussian",
"tophat",
"epanechnikov",
"exponential",
"linear",
"cosine",
]
TREE_DICT = {"ball_tree": BallTree, "kd_tree": KDTree}
# TODO: implement a brute force version for testing purposes
# TODO: create a density estimation base class?
class KernelDensity(BaseEstimator):
"""Kernel Density Estimation.
Read more in the :ref:`User Guide <kernel_density>`.
Parameters
----------
bandwidth : float or {"scott", "silverman"}, default=1.0
The bandwidth of the kernel. If bandwidth is a float, it defines the
bandwidth of the kernel. If bandwidth is a string, one of the estimation
methods is implemented.
algorithm : {'kd_tree', 'ball_tree', 'auto'}, default='auto'
The tree algorithm to use.
kernel : {'gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', \
'cosine'}, default='gaussian'
The kernel to use.
metric : str, default='euclidean'
Metric to use for distance computation. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
Not all metrics are valid with all algorithms: refer to the
documentation of :class:`BallTree` and :class:`KDTree`. Note that the
normalization of the density output is correct only for the Euclidean
distance metric.
atol : float, default=0
The desired absolute tolerance of the result. A larger tolerance will
generally lead to faster execution.
rtol : float, default=0
The desired relative tolerance of the result. A larger tolerance will
generally lead to faster execution.
breadth_first : bool, default=True
If true (default), use a breadth-first approach to the problem.
Otherwise use a depth-first approach.
leaf_size : int, default=40
Specify the leaf size of the underlying tree. See :class:`BallTree`
or :class:`KDTree` for details.
metric_params : dict, default=None
Additional parameters to be passed to the tree for use with the
metric. For more information, see the documentation of
:class:`BallTree` or :class:`KDTree`.
Attributes
----------
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
tree_ : ``BinaryTree`` instance
The tree algorithm for fast generalized N-point problems.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
bandwidth_ : float
Value of the bandwidth, given directly by the bandwidth parameter or
estimated using the 'scott' or 'silverman' method.
.. versionadded:: 1.0
See Also
--------
sklearn.neighbors.KDTree : K-dimensional tree for fast generalized N-point
problems.
sklearn.neighbors.BallTree : Ball tree for fast generalized N-point
problems.
Examples
--------
Compute a gaussian kernel density estimate with a fixed bandwidth.
>>> from sklearn.neighbors import KernelDensity
>>> import numpy as np
>>> rng = np.random.RandomState(42)
>>> X = rng.random_sample((100, 3))
>>> kde = KernelDensity(kernel='gaussian', bandwidth=0.5).fit(X)
>>> log_density = kde.score_samples(X[:3])
>>> log_density
array([-1.52955942, -1.51462041, -1.60244657])
"""
_parameter_constraints: dict = {
"bandwidth": [
Interval(Real, 0, None, closed="neither"),
StrOptions({"scott", "silverman"}),
],
"algorithm": [StrOptions(set(TREE_DICT.keys()) | {"auto"})],
"kernel": [StrOptions(set(VALID_KERNELS))],
"metric": [
StrOptions(
set(itertools.chain(*[VALID_METRICS[alg] for alg in TREE_DICT.keys()]))
)
],
"atol": [Interval(Real, 0, None, closed="left")],
"rtol": [Interval(Real, 0, None, closed="left")],
"breadth_first": ["boolean"],
"leaf_size": [Interval(Integral, 1, None, closed="left")],
"metric_params": [None, dict],
}
def __init__(
self,
*,
bandwidth=1.0,
algorithm="auto",
kernel="gaussian",
metric="euclidean",
atol=0,
rtol=0,
breadth_first=True,
leaf_size=40,
metric_params=None,
):
self.algorithm = algorithm
self.bandwidth = bandwidth
self.kernel = kernel
self.metric = metric
self.atol = atol
self.rtol = rtol
self.breadth_first = breadth_first
self.leaf_size = leaf_size
self.metric_params = metric_params
def _choose_algorithm(self, algorithm, metric):
# given the algorithm string + metric string, choose the optimal
# algorithm to compute the result.
if algorithm == "auto":
# use KD Tree if possible
if metric in KDTree.valid_metrics:
return "kd_tree"
elif metric in BallTree.valid_metrics:
return "ball_tree"
else: # kd_tree or ball_tree
if metric not in TREE_DICT[algorithm].valid_metrics:
raise ValueError(
"invalid metric for {0}: '{1}'".format(TREE_DICT[algorithm], metric)
)
return algorithm
@_fit_context(
# KernelDensity.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None, sample_weight=None):
"""Fit the Kernel Density model on the data.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
sample_weight : array-like of shape (n_samples,), default=None
List of sample weights attached to the data X.
.. versionadded:: 0.20
Returns
-------
self : object
Returns the instance itself.
"""
algorithm = self._choose_algorithm(self.algorithm, self.metric)
if isinstance(self.bandwidth, str):
if self.bandwidth == "scott":
self.bandwidth_ = X.shape[0] ** (-1 / (X.shape[1] + 4))
elif self.bandwidth == "silverman":
self.bandwidth_ = (X.shape[0] * (X.shape[1] + 2) / 4) ** (
-1 / (X.shape[1] + 4)
)
else:
self.bandwidth_ = self.bandwidth
X = validate_data(self, X, order="C", dtype=np.float64)
if sample_weight is not None:
sample_weight = _check_sample_weight(
sample_weight, X, dtype=np.float64, ensure_non_negative=True
)
kwargs = self.metric_params
if kwargs is None:
kwargs = {}
self.tree_ = TREE_DICT[algorithm](
X,
metric=self.metric,
leaf_size=self.leaf_size,
sample_weight=sample_weight,
**kwargs,
)
return self
def score_samples(self, X):
"""Compute the log-likelihood of each sample under the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
An array of points to query. Last dimension should match dimension
of training data (n_features).
Returns
-------
density : ndarray of shape (n_samples,)
Log-likelihood of each sample in `X`. These are normalized to be
probability densities, so values will be low for high-dimensional
data.
"""
check_is_fitted(self)
# The returned density is normalized to the number of points.
# For it to be a probability, we must scale it. For this reason
# we'll also scale atol.
X = validate_data(self, X, order="C", dtype=np.float64, reset=False)
if self.tree_.sample_weight is None:
N = self.tree_.data.shape[0]
else:
N = self.tree_.sum_weight
atol_N = self.atol * N
log_density = self.tree_.kernel_density(
X,
h=self.bandwidth_,
kernel=self.kernel,
atol=atol_N,
rtol=self.rtol,
breadth_first=self.breadth_first,
return_log=True,
)
log_density -= np.log(N)
return log_density
def score(self, X, y=None):
"""Compute the total log-likelihood under the model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
y : None
Ignored. This parameter exists only for compatibility with
:class:`~sklearn.pipeline.Pipeline`.
Returns
-------
logprob : float
Total log-likelihood of the data in X. This is normalized to be a
probability density, so the value will be low for high-dimensional
data.
"""
return np.sum(self.score_samples(X))
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Currently, this is implemented only for gaussian and tophat kernels.
Parameters
----------
n_samples : int, default=1
Number of samples to generate.
random_state : int, RandomState instance or None, default=None
Determines random number generation used to generate
random samples. Pass an int for reproducible results
across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
X : array-like of shape (n_samples, n_features)
List of samples.
"""
check_is_fitted(self)
# TODO: implement sampling for other valid kernel shapes
if self.kernel not in ["gaussian", "tophat"]:
raise NotImplementedError()
data = np.asarray(self.tree_.data)
rng = check_random_state(random_state)
u = rng.uniform(0, 1, size=n_samples)
if self.tree_.sample_weight is None:
i = (u * data.shape[0]).astype(np.int64)
else:
cumsum_weight = np.cumsum(np.asarray(self.tree_.sample_weight))
sum_weight = cumsum_weight[-1]
i = np.searchsorted(cumsum_weight, u * sum_weight)
if self.kernel == "gaussian":
return np.atleast_2d(rng.normal(data[i], self.bandwidth_))
elif self.kernel == "tophat":
# we first draw points from a d-dimensional normal distribution,
# then use an incomplete gamma function to map them to a uniform
# d-dimensional tophat distribution.
dim = data.shape[1]
X = rng.normal(size=(n_samples, dim))
s_sq = row_norms(X, squared=True)
correction = (
gammainc(0.5 * dim, 0.5 * s_sq) ** (1.0 / dim)
* self.bandwidth_
/ np.sqrt(s_sq)
)
return data[i] + X * correction[:, np.newaxis]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/_unsupervised.py | sklearn/neighbors/_unsupervised.py | """Unsupervised nearest neighbors learner"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.base import _fit_context
from sklearn.neighbors._base import KNeighborsMixin, NeighborsBase, RadiusNeighborsMixin
class NearestNeighbors(KNeighborsMixin, RadiusNeighborsMixin, NeighborsBase):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
.. versionadded:: 0.9
Parameters
----------
n_neighbors : int, default=5
Number of neighbors to use by default for :meth:`kneighbors` queries.
radius : float, default=1.0
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`, in which
case only "nonzero" elements may be considered neighbors.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
p : float (positive), default=2
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
effective_metric_ : str
Metric used to compute distances to neighbors.
effective_metric_params_ : dict
Parameters for the metric used to compute distances to neighbors.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_fit_ : int
Number of samples in the fitted data.
See Also
--------
KNeighborsClassifier : Classifier implementing the k-nearest neighbors
vote.
RadiusNeighborsClassifier : Classifier implementing a vote among neighbors
within a given radius.
KNeighborsRegressor : Regression based on k-nearest neighbors.
RadiusNeighborsRegressor : Regression based on neighbors within a fixed
radius.
BallTree : Space partitioning data structure for organizing points in a
multi-dimensional space, used for nearest neighbor search.
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(n_neighbors=2, radius=0.4)
>>> neigh.fit(samples)
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors(
... [[0, 0, 1.3]], 0.4, return_distance=False
... )
>>> np.asarray(nbrs[0][0])
array(2)
"""
def __init__(
self,
*,
n_neighbors=5,
radius=1.0,
algorithm="auto",
leaf_size=30,
metric="minkowski",
p=2,
metric_params=None,
n_jobs=None,
):
super().__init__(
n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
)
@_fit_context(
# NearestNeighbors.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None):
"""Fit the nearest neighbors estimator from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : NearestNeighbors
The fitted nearest neighbors estimator.
"""
return self._fit(X)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/_lof.py | sklearn/neighbors/_lof.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from numbers import Real
import numpy as np
from sklearn.base import OutlierMixin, _fit_context
from sklearn.neighbors._base import KNeighborsMixin, NeighborsBase
from sklearn.utils import check_array
from sklearn.utils._param_validation import Interval, StrOptions
from sklearn.utils.metaestimators import available_if
from sklearn.utils.validation import check_is_fitted
__all__ = ["LocalOutlierFactor"]
class LocalOutlierFactor(KNeighborsMixin, OutlierMixin, NeighborsBase):
"""Unsupervised Outlier Detection using the Local Outlier Factor (LOF).
The anomaly score of each sample is called the Local Outlier Factor.
It measures the local deviation of the density of a given sample with respect
to its neighbors.
It is local in that the anomaly score depends on how isolated the object
is with respect to the surrounding neighborhood.
More precisely, locality is given by k-nearest neighbors, whose distance
is used to estimate the local density.
By comparing the local density of a sample to the local densities of its
neighbors, one can identify samples that have a substantially lower density
than their neighbors. These are considered outliers.
.. versionadded:: 0.19
Parameters
----------
n_neighbors : int, default=20
Number of neighbors to use by default for :meth:`kneighbors` queries.
If n_neighbors is larger than the number of samples provided,
all samples will be used.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf is size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`, in which
case only "nonzero" elements may be considered neighbors.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
p : float, default=2
Parameter for the Minkowski metric from
:func:`sklearn.metrics.pairwise_distances`. When p = 1, this
is equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
contamination : 'auto' or float, default='auto'
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. When fitting this is used to define the
threshold on the scores of the samples.
- if 'auto', the threshold is determined as in the
original paper,
- if a float, the contamination should be in the range (0, 0.5].
.. versionchanged:: 0.22
The default value of ``contamination`` changed from 0.1
to ``'auto'``.
novelty : bool, default=False
By default, LocalOutlierFactor is only meant to be used for outlier
detection (novelty=False). Set novelty to True if you want to use
LocalOutlierFactor for novelty detection. In this case be aware that
you should only use predict, decision_function and score_samples
on new unseen data and not on the training set; and note that the
results obtained this way may differ from the standard LOF results.
.. versionadded:: 0.20
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
negative_outlier_factor_ : ndarray of shape (n_samples,)
The opposite LOF of the training samples. The higher, the more normal.
Inliers tend to have a LOF score close to 1
(``negative_outlier_factor_`` close to -1), while outliers tend to have
a larger LOF score.
The local outlier factor (LOF) of a sample captures its
supposed 'degree of abnormality'.
It is the average of the ratio of the local reachability density of
a sample and those of its k-nearest neighbors.
n_neighbors_ : int
The actual number of neighbors used for :meth:`kneighbors` queries.
offset_ : float
Offset used to obtain binary labels from the raw scores.
Observations having a negative_outlier_factor smaller than `offset_`
are detected as abnormal.
The offset is set to -1.5 (inliers score around -1), except when a
contamination parameter different than "auto" is provided. In that
case, the offset is defined in such a way we obtain the expected
number of outliers in training.
.. versionadded:: 0.20
effective_metric_ : str
The effective metric used for the distance computation.
effective_metric_params_ : dict
The effective additional keyword arguments for the metric function.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_fit_ : int
It is the number of samples in the fitted data.
See Also
--------
sklearn.svm.OneClassSVM: Unsupervised Outlier Detection using
Support Vector Machine.
References
----------
.. [1] Breunig, M. M., Kriegel, H. P., Ng, R. T., & Sander, J. (2000, May).
`LOF: identifying density-based local outliers.
<https://dl.acm.org/doi/pdf/10.1145/342009.335388>`_
In Proceedings of the 2000 ACM SIGMOD International Conference on
Management of Data, pp. 93-104.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import LocalOutlierFactor
>>> X = [[-1.1], [0.2], [101.1], [0.3]]
>>> clf = LocalOutlierFactor(n_neighbors=2)
>>> clf.fit_predict(X)
array([ 1, 1, -1, 1])
>>> clf.negative_outlier_factor_
array([ -0.9821, -1.0370, -73.3697, -0.9821])
"""
_parameter_constraints: dict = {
**NeighborsBase._parameter_constraints,
"contamination": [
StrOptions({"auto"}),
Interval(Real, 0, 0.5, closed="right"),
],
"novelty": ["boolean"],
}
_parameter_constraints.pop("radius")
def __init__(
self,
n_neighbors=20,
*,
algorithm="auto",
leaf_size=30,
metric="minkowski",
p=2,
metric_params=None,
contamination="auto",
novelty=False,
n_jobs=None,
):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
)
self.contamination = contamination
self.novelty = novelty
def _check_novelty_fit_predict(self):
if self.novelty:
msg = (
"fit_predict is not available when novelty=True. Use "
"novelty=False if you want to predict on the training set."
)
raise AttributeError(msg)
return True
@available_if(_check_novelty_fit_predict)
def fit_predict(self, X, y=None):
"""Fit the model to the training set X and return the labels.
**Not available for novelty detection (when novelty is set to True).**
Label is 1 for an inlier and -1 for an outlier according to the LOF
score and the contamination parameter.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and 1 for inliers.
"""
# As fit_predict would be different from fit.predict, fit_predict is
# only available for outlier detection (novelty=False)
return self.fit(X)._predict()
@_fit_context(
# LocalOutlierFactor.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None):
"""Fit the local outlier factor detector from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : LocalOutlierFactor
The fitted local outlier factor detector.
"""
self._fit(X)
n_samples = self.n_samples_fit_
if self.n_neighbors > n_samples:
warnings.warn(
"n_neighbors (%s) is greater than the "
"total number of samples (%s). n_neighbors "
"will be set to (n_samples - 1) for estimation."
% (self.n_neighbors, n_samples)
)
self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))
self._distances_fit_X_, _neighbors_indices_fit_X_ = self.kneighbors(
n_neighbors=self.n_neighbors_
)
if self._fit_X.dtype == np.float32:
self._distances_fit_X_ = self._distances_fit_X_.astype(
self._fit_X.dtype,
copy=False,
)
self._lrd = self._local_reachability_density(
self._distances_fit_X_, _neighbors_indices_fit_X_
)
# Compute lof score over training samples to define offset_:
lrd_ratios_array = (
self._lrd[_neighbors_indices_fit_X_] / self._lrd[:, np.newaxis]
)
self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)
if self.contamination == "auto":
# inliers score around -1 (the higher, the less abnormal).
self.offset_ = -1.5
else:
self.offset_ = np.percentile(
self.negative_outlier_factor_, 100.0 * self.contamination
)
# Verify if negative_outlier_factor_ values are within acceptable range.
# Novelty must also be false to detect outliers
if np.min(self.negative_outlier_factor_) < -1e7 and not self.novelty:
warnings.warn(
"Duplicate values are leading to incorrect results. "
"Increase the number of neighbors for more accurate results."
)
return self
def _check_novelty_predict(self):
if not self.novelty:
msg = (
"predict is not available when novelty=False, use "
"fit_predict if you want to predict on training data. Use "
"novelty=True if you want to use LOF for novelty detection "
"and predict on new unseen data."
)
raise AttributeError(msg)
return True
@available_if(_check_novelty_predict)
def predict(self, X=None):
"""Predict the labels (1 inlier, -1 outlier) of X according to LOF.
**Only available for novelty detection (when novelty is set to True).**
This method allows to generalize prediction to *new observations* (not
in the training set). Note that the result of ``clf.fit(X)`` then
``clf.predict(X)`` with ``novelty=True`` may differ from the result
obtained by ``clf.fit_predict(X)`` with ``novelty=False``.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
return self._predict(X)
def _predict(self, X=None):
"""Predict the labels (1 inlier, -1 outlier) of X according to LOF.
If X is None, returns the same as fit_predict(X_train).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples. If None, makes prediction on the
training data without considering them as their own neighbors.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
check_is_fitted(self)
if X is not None:
shifted_opposite_lof_scores = self.decision_function(X)
is_inlier = np.ones(shifted_opposite_lof_scores.shape[0], dtype=int)
is_inlier[shifted_opposite_lof_scores < 0] = -1
else:
is_inlier = np.ones(self.n_samples_fit_, dtype=int)
is_inlier[self.negative_outlier_factor_ < self.offset_] = -1
return is_inlier
def _check_novelty_decision_function(self):
if not self.novelty:
msg = (
"decision_function is not available when novelty=False. "
"Use novelty=True if you want to use LOF for novelty "
"detection and compute decision_function for new unseen "
"data. Note that the opposite LOF of the training samples "
"is always available by considering the "
"negative_outlier_factor_ attribute."
)
raise AttributeError(msg)
return True
@available_if(_check_novelty_decision_function)
def decision_function(self, X):
"""Shifted opposite of the Local Outlier Factor of X.
Bigger is better, i.e. large values correspond to inliers.
**Only available for novelty detection (when novelty is set to True).**
The shift offset allows a zero threshold for being an outlier.
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
shifted_opposite_lof_scores : ndarray of shape (n_samples,)
The shifted opposite of the Local Outlier Factor of each input
samples. The lower, the more abnormal. Negative scores represent
outliers, positive scores represent inliers.
"""
return self.score_samples(X) - self.offset_
def _check_novelty_score_samples(self):
if not self.novelty:
msg = (
"score_samples is not available when novelty=False. The "
"scores of the training samples are always available "
"through the negative_outlier_factor_ attribute. Use "
"novelty=True if you want to use LOF for novelty detection "
"and compute score_samples for new unseen data."
)
raise AttributeError(msg)
return True
@available_if(_check_novelty_score_samples)
def score_samples(self, X):
"""Opposite of the Local Outlier Factor of X.
It is the opposite as bigger is better, i.e. large values correspond
to inliers.
**Only available for novelty detection (when novelty is set to True).**
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point. Because of this, the scores obtained via ``score_samples`` may
differ from the standard LOF scores.
The standard LOF scores for the training data is available via the
``negative_outlier_factor_`` attribute.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
opposite_lof_scores : ndarray of shape (n_samples,)
The opposite of the Local Outlier Factor of each input samples.
The lower, the more abnormal.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse="csr")
distances_X, neighbors_indices_X = self.kneighbors(
X, n_neighbors=self.n_neighbors_
)
if X.dtype == np.float32:
distances_X = distances_X.astype(X.dtype, copy=False)
X_lrd = self._local_reachability_density(
distances_X,
neighbors_indices_X,
)
lrd_ratios_array = self._lrd[neighbors_indices_X] / X_lrd[:, np.newaxis]
# as bigger is better:
return -np.mean(lrd_ratios_array, axis=1)
def _local_reachability_density(self, distances_X, neighbors_indices):
"""The local reachability density (LRD)
The LRD of a sample is the inverse of the average reachability
distance of its k-nearest neighbors.
Parameters
----------
distances_X : ndarray of shape (n_queries, self.n_neighbors)
Distances to the neighbors (in the training samples `self._fit_X`)
of each query point to compute the LRD.
neighbors_indices : ndarray of shape (n_queries, self.n_neighbors)
Neighbors indices (of each query point) among training samples
self._fit_X.
Returns
-------
local_reachability_density : ndarray of shape (n_queries,)
The local reachability density of each sample.
"""
dist_k = self._distances_fit_X_[neighbors_indices, self.n_neighbors_ - 1]
reach_dist_array = np.maximum(distances_X, dist_k)
# 1e-10 to avoid `nan' when nb of duplicates > n_neighbors_:
return 1.0 / (np.mean(reach_dist_array, axis=1) + 1e-10)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/_regression.py | sklearn/neighbors/_regression.py | """Nearest Neighbor Regression."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
import numpy as np
from sklearn.base import RegressorMixin, _fit_context
from sklearn.metrics import DistanceMetric
from sklearn.neighbors._base import (
KNeighborsMixin,
NeighborsBase,
RadiusNeighborsMixin,
_get_weights,
)
from sklearn.utils._param_validation import StrOptions
class KNeighborsRegressor(KNeighborsMixin, RegressorMixin, NeighborsBase):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
.. versionadded:: 0.9
Parameters
----------
n_neighbors : int, default=5
Number of neighbors to use by default for :meth:`kneighbors` queries.
weights : {'uniform', 'distance'}, callable or None, default='uniform'
Weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
See the following example for a demonstration of the impact of
different weighting schemes on predictions:
:ref:`sphx_glr_auto_examples_neighbors_plot_regression.py`.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : float, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : str, DistanceMetric object or callable, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`, in which
case only "nonzero" elements may be considered neighbors.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
If metric is a DistanceMetric object, it will be passed directly to
the underlying computation routines.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Doesn't affect :meth:`fit` method.
Attributes
----------
effective_metric_ : str or callable
The distance metric to use. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_fit_ : int
Number of samples in the fitted data.
See Also
--------
NearestNeighbors : Unsupervised learner for implementing neighbor searches.
RadiusNeighborsRegressor : Regression based on neighbors within a fixed radius.
KNeighborsClassifier : Classifier implementing the k-nearest neighbors vote.
RadiusNeighborsClassifier : Classifier implementing
a vote among neighbors within a given radius.
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y)
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[0.5]
"""
_parameter_constraints: dict = {
**NeighborsBase._parameter_constraints,
"weights": [StrOptions({"uniform", "distance"}), callable, None],
}
_parameter_constraints["metric"].append(DistanceMetric)
_parameter_constraints.pop("radius")
def __init__(
self,
n_neighbors=5,
*,
weights="uniform",
algorithm="auto",
leaf_size=30,
p=2,
metric="minkowski",
metric_params=None,
n_jobs=None,
):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric,
p=p,
metric_params=metric_params,
n_jobs=n_jobs,
)
self.weights = weights
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
# For cross-validation routines to split data correctly
tags.input_tags.pairwise = self.metric == "precomputed"
return tags
@_fit_context(
# KNeighborsRegressor.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y):
"""Fit the k-nearest neighbors regressor from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs)
Target values.
Returns
-------
self : KNeighborsRegressor
The fitted k-nearest neighbors regressor.
"""
return self._fit(X, y)
def predict(self, X):
"""Predict the target for the provided data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', or None
Test samples. If `None`, predictions for all indexed points are
returned; in this case, points are not considered their own
neighbors.
Returns
-------
y : ndarray of shape (n_queries,) or (n_queries, n_outputs), dtype=int
Target values.
"""
if self.weights == "uniform":
# In that case, we do not need the distances to perform
# the weighting so we do not compute them.
neigh_ind = self.kneighbors(X, return_distance=False)
neigh_dist = None
else:
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((neigh_dist.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(RadiusNeighborsMixin, RegressorMixin, NeighborsBase):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
.. versionadded:: 0.9
Parameters
----------
radius : float, default=1.0
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : {'uniform', 'distance'}, callable or None, default='uniform'
Weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto'
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, default=30
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p : float, default=2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : str or callable, default='minkowski'
Metric to use for distance computation. Default is "minkowski", which
results in the standard Euclidean distance when p = 2. See the
documentation of `scipy.spatial.distance
<https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>`_ and
the metrics listed in
:class:`~sklearn.metrics.pairwise.distance_metrics` for valid metric
values.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`, in which
case only "nonzero" elements may be considered neighbors.
If metric is a callable function, it takes two arrays representing 1D
vectors as inputs and must return one value indicating the distance
between those vectors. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Attributes
----------
effective_metric_ : str or callable
The distance metric to use. It will be same as the `metric` parameter
or a synonym of it, e.g. 'euclidean' if the `metric` parameter set to
'minkowski' and `p` parameter set to 2.
effective_metric_params_ : dict
Additional keyword arguments for the metric function. For most metrics
will be same with `metric_params` parameter, but may also contain the
`p` parameter value if the `effective_metric_` attribute is set to
'minkowski'.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_fit_ : int
Number of samples in the fitted data.
See Also
--------
NearestNeighbors : Unsupervised learner for implementing neighbor searches.
KNeighborsRegressor : Regression based on k-nearest neighbors.
KNeighborsClassifier : Classifier based on the k-nearest neighbors.
RadiusNeighborsClassifier : Classifier based on neighbors within a given radius.
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y)
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[0.5]
"""
_parameter_constraints: dict = {
**NeighborsBase._parameter_constraints,
"weights": [StrOptions({"uniform", "distance"}), callable, None],
}
_parameter_constraints.pop("n_neighbors")
def __init__(
self,
radius=1.0,
*,
weights="uniform",
algorithm="auto",
leaf_size=30,
p=2,
metric="minkowski",
metric_params=None,
n_jobs=None,
):
super().__init__(
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p,
metric=metric,
metric_params=metric_params,
n_jobs=n_jobs,
)
self.weights = weights
@_fit_context(
# RadiusNeighborsRegressor.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y):
"""Fit the radius neighbors regressor from the training dataset.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples) if metric='precomputed'
Training data.
y : {array-like, sparse matrix} of shape (n_samples,) or \
(n_samples, n_outputs)
Target values.
Returns
-------
self : RadiusNeighborsRegressor
The fitted radius neighbors regressor.
"""
return self._fit(X, y)
def predict(self, X):
"""Predict the target for the provided data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', or None
Test samples. If `None`, predictions for all indexed points are
returned; in this case, points are not considered their own
neighbors.
Returns
-------
y : ndarray of shape (n_queries,) or (n_queries, n_outputs), \
dtype=double
Target values.
"""
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
empty_obs = np.full_like(_y[0], np.nan)
if weights is None:
y_pred = np.array(
[
np.mean(_y[ind, :], axis=0) if len(ind) else empty_obs
for (i, ind) in enumerate(neigh_ind)
]
)
else:
y_pred = np.array(
[
(
np.average(_y[ind, :], axis=0, weights=weights[i])
if len(ind)
else empty_obs
)
for (i, ind) in enumerate(neigh_ind)
]
)
if np.any(np.isnan(y_pred)):
empty_warning_msg = (
"One or more samples have no neighbors "
"within specified radius; predicting NaN."
)
warnings.warn(empty_warning_msg)
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/tests/test_neighbors_pipeline.py | sklearn/neighbors/tests/test_neighbors_pipeline.py | """
This is testing the equivalence between some estimators with internal nearest
neighbors computations, and the corresponding pipeline versions with
KNeighborsTransformer or RadiusNeighborsTransformer to precompute the
neighbors.
"""
import numpy as np
from sklearn.base import clone
from sklearn.cluster import DBSCAN, SpectralClustering
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.datasets import make_blobs
from sklearn.manifold import TSNE, Isomap, SpectralEmbedding
from sklearn.neighbors import (
KNeighborsRegressor,
KNeighborsTransformer,
LocalOutlierFactor,
RadiusNeighborsRegressor,
RadiusNeighborsTransformer,
)
from sklearn.pipeline import make_pipeline
from sklearn.utils._testing import assert_array_almost_equal
def test_spectral_clustering():
# Test chaining KNeighborsTransformer and SpectralClustering
n_neighbors = 5
X, _ = make_blobs(random_state=0)
# compare the chained version and the compact version
est_chain = make_pipeline(
KNeighborsTransformer(n_neighbors=n_neighbors, mode="connectivity"),
SpectralClustering(
n_neighbors=n_neighbors, affinity="precomputed", random_state=42
),
)
est_compact = SpectralClustering(
n_neighbors=n_neighbors, affinity="nearest_neighbors", random_state=42
)
labels_compact = est_compact.fit_predict(X)
labels_chain = est_chain.fit_predict(X)
assert_array_almost_equal(labels_chain, labels_compact)
def test_spectral_embedding():
# Test chaining KNeighborsTransformer and SpectralEmbedding
n_neighbors = 5
n_samples = 1000
centers = np.array(
[
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
]
)
S, true_labels = make_blobs(
n_samples=n_samples, centers=centers, cluster_std=1.0, random_state=42
)
# compare the chained version and the compact version
est_chain = make_pipeline(
KNeighborsTransformer(n_neighbors=n_neighbors, mode="connectivity"),
SpectralEmbedding(
n_neighbors=n_neighbors, affinity="precomputed", random_state=42
),
)
est_compact = SpectralEmbedding(
n_neighbors=n_neighbors, affinity="nearest_neighbors", random_state=42
)
St_compact = est_compact.fit_transform(S)
St_chain = est_chain.fit_transform(S)
assert_array_almost_equal(St_chain, St_compact)
def test_dbscan():
# Test chaining RadiusNeighborsTransformer and DBSCAN
radius = 0.3
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
# compare the chained version and the compact version
est_chain = make_pipeline(
RadiusNeighborsTransformer(radius=radius, mode="distance"),
DBSCAN(metric="precomputed", eps=radius),
)
est_compact = DBSCAN(eps=radius)
labels_chain = est_chain.fit_predict(X)
labels_compact = est_compact.fit_predict(X)
assert_array_almost_equal(labels_chain, labels_compact)
def test_isomap():
# Test chaining KNeighborsTransformer and Isomap with
# neighbors_algorithm='precomputed'
algorithm = "auto"
n_neighbors = 10
X, _ = make_blobs(random_state=0)
X2, _ = make_blobs(random_state=1)
# compare the chained version and the compact version
est_chain = make_pipeline(
KNeighborsTransformer(
n_neighbors=n_neighbors, algorithm=algorithm, mode="distance"
),
Isomap(n_neighbors=n_neighbors, metric="precomputed"),
)
est_compact = Isomap(n_neighbors=n_neighbors, neighbors_algorithm=algorithm)
Xt_chain = est_chain.fit_transform(X)
Xt_compact = est_compact.fit_transform(X)
assert_array_almost_equal(Xt_chain, Xt_compact)
Xt_chain = est_chain.transform(X2)
Xt_compact = est_compact.transform(X2)
assert_array_almost_equal(Xt_chain, Xt_compact)
def test_tsne():
# Test chaining KNeighborsTransformer and TSNE
max_iter = 250
perplexity = 5
n_neighbors = int(3.0 * perplexity + 1)
rng = np.random.RandomState(0)
X = rng.randn(20, 2)
for metric in ["minkowski", "sqeuclidean"]:
# compare the chained version and the compact version
est_chain = make_pipeline(
KNeighborsTransformer(
n_neighbors=n_neighbors, mode="distance", metric=metric
),
TSNE(
init="random",
metric="precomputed",
perplexity=perplexity,
method="barnes_hut",
random_state=42,
max_iter=max_iter,
),
)
est_compact = TSNE(
init="random",
metric=metric,
perplexity=perplexity,
max_iter=max_iter,
method="barnes_hut",
random_state=42,
)
Xt_chain = est_chain.fit_transform(X)
Xt_compact = est_compact.fit_transform(X)
assert_array_almost_equal(Xt_chain, Xt_compact)
def test_lof_novelty_false():
# Test chaining KNeighborsTransformer and LocalOutlierFactor
n_neighbors = 4
rng = np.random.RandomState(0)
X = rng.randn(40, 2)
# compare the chained version and the compact version
est_chain = make_pipeline(
KNeighborsTransformer(n_neighbors=n_neighbors, mode="distance"),
LocalOutlierFactor(
metric="precomputed",
n_neighbors=n_neighbors,
novelty=False,
contamination="auto",
),
)
est_compact = LocalOutlierFactor(
n_neighbors=n_neighbors, novelty=False, contamination="auto"
)
pred_chain = est_chain.fit_predict(X)
pred_compact = est_compact.fit_predict(X)
assert_array_almost_equal(pred_chain, pred_compact)
def test_lof_novelty_true():
# Test chaining KNeighborsTransformer and LocalOutlierFactor
n_neighbors = 4
rng = np.random.RandomState(0)
X1 = rng.randn(40, 2)
X2 = rng.randn(40, 2)
# compare the chained version and the compact version
est_chain = make_pipeline(
KNeighborsTransformer(n_neighbors=n_neighbors, mode="distance"),
LocalOutlierFactor(
metric="precomputed",
n_neighbors=n_neighbors,
novelty=True,
contamination="auto",
),
)
est_compact = LocalOutlierFactor(
n_neighbors=n_neighbors, novelty=True, contamination="auto"
)
pred_chain = est_chain.fit(X1).predict(X2)
pred_compact = est_compact.fit(X1).predict(X2)
assert_array_almost_equal(pred_chain, pred_compact)
def test_kneighbors_regressor():
# Test chaining KNeighborsTransformer and classifiers/regressors
rng = np.random.RandomState(0)
X = 2 * rng.rand(40, 5) - 1
X2 = 2 * rng.rand(40, 5) - 1
y = rng.rand(40, 1)
n_neighbors = 12
radius = 1.5
# We precompute more neighbors than necessary, to have equivalence between
# k-neighbors estimator after radius-neighbors transformer, and vice-versa.
factor = 2
k_trans = KNeighborsTransformer(n_neighbors=n_neighbors, mode="distance")
k_trans_factor = KNeighborsTransformer(
n_neighbors=int(n_neighbors * factor), mode="distance"
)
r_trans = RadiusNeighborsTransformer(radius=radius, mode="distance")
r_trans_factor = RadiusNeighborsTransformer(
radius=int(radius * factor), mode="distance"
)
k_reg = KNeighborsRegressor(n_neighbors=n_neighbors)
r_reg = RadiusNeighborsRegressor(radius=radius)
test_list = [
(k_trans, k_reg),
(k_trans_factor, r_reg),
(r_trans, r_reg),
(r_trans_factor, k_reg),
]
for trans, reg in test_list:
# compare the chained version and the compact version
reg_compact = clone(reg)
reg_precomp = clone(reg)
reg_precomp.set_params(metric="precomputed")
reg_chain = make_pipeline(clone(trans), reg_precomp)
y_pred_chain = reg_chain.fit(X, y).predict(X2)
y_pred_compact = reg_compact.fit(X, y).predict(X2)
assert_array_almost_equal(y_pred_chain, y_pred_compact)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/tests/test_quad_tree.py | sklearn/neighbors/tests/test_quad_tree.py | import pickle
import numpy as np
import pytest
from sklearn.neighbors._quad_tree import _QuadTree
from sklearn.utils import check_random_state
def test_quadtree_boundary_computation():
# Introduce a point into a quad tree with boundaries not easy to compute.
Xs = []
# check a random case
Xs.append(np.array([[-1, 1], [-4, -1]], dtype=np.float32))
# check the case where only 0 are inserted
Xs.append(np.array([[0, 0], [0, 0]], dtype=np.float32))
# check the case where only negative are inserted
Xs.append(np.array([[-1, -2], [-4, 0]], dtype=np.float32))
# check the case where only small numbers are inserted
Xs.append(np.array([[-1e-6, 1e-6], [-4e-6, -1e-6]], dtype=np.float32))
for X in Xs:
tree = _QuadTree(n_dimensions=2, verbose=0)
tree.build_tree(X)
tree._check_coherence()
def test_quadtree_similar_point():
# Introduce a point into a quad tree where a similar point already exists.
# Test will hang if it doesn't complete.
Xs = []
# check the case where points are actually different
Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32))
# check the case where points are the same on X axis
Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32))
# check the case where points are arbitrarily close on X axis
Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32))
# check the case where points are the same on Y axis
Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32))
# check the case where points are arbitrarily close on Y axis
Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32))
# check the case where points are arbitrarily close on both axes
Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]], dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - x axis
Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]], dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - y axis
Xs.append(
np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]], dtype=np.float32)
)
for X in Xs:
tree = _QuadTree(n_dimensions=2, verbose=0)
tree.build_tree(X)
tree._check_coherence()
@pytest.mark.parametrize("n_dimensions", (2, 3))
@pytest.mark.parametrize("protocol", (0, 1, 2))
def test_quad_tree_pickle(n_dimensions, protocol):
rng = check_random_state(0)
X = rng.random_sample((10, n_dimensions))
tree = _QuadTree(n_dimensions=n_dimensions, verbose=0)
tree.build_tree(X)
s = pickle.dumps(tree, protocol=protocol)
bt2 = pickle.loads(s)
for x in X:
cell_x_tree = tree.get_cell(x)
cell_x_bt2 = bt2.get_cell(x)
assert cell_x_tree == cell_x_bt2
@pytest.mark.parametrize("n_dimensions", (2, 3))
def test_qt_insert_duplicate(n_dimensions):
rng = check_random_state(0)
X = rng.random_sample((10, n_dimensions))
# create some duplicates
Xd = np.r_[X, X[:5]]
epsilon = 1e-6
# EPSILON=1e-6 is defined in sklearn/neighbors/_quad_tree.pyx but not
# accessible from Python
# add slight noise: duplicate detection should tolerate tiny numerical differences
Xd += epsilon * (rng.rand(*Xd.shape) - 0.5)
tree = _QuadTree(n_dimensions=n_dimensions, verbose=0)
tree.build_tree(Xd)
cumulative_size = tree.cumulative_size
leafs = tree.leafs
# Assert that the first 5 are indeed duplicated and that the next
# ones are single point leaf
for i, x in enumerate(X):
cell_id = tree.get_cell(x)
assert leafs[cell_id]
assert cumulative_size[cell_id] == 1 + (i < 5)
def test_summarize():
# Simple check for quad tree's summarize
angle = 0.9
X = np.array(
[[-10.0, -10.0], [9.0, 10.0], [10.0, 9.0], [10.0, 10.0]], dtype=np.float32
)
query_pt = X[0, :]
n_dimensions = X.shape[1]
offset = n_dimensions + 2
qt = _QuadTree(n_dimensions, verbose=0)
qt.build_tree(X)
idx, summary = qt._py_summarize(query_pt, X, angle)
node_dist = summary[n_dimensions]
node_size = summary[n_dimensions + 1]
# Summary should contain only 1 node with size 3 and distance to
# X[1:] barycenter
barycenter = X[1:].mean(axis=0)
ds2c = ((X[0] - barycenter) ** 2).sum()
assert idx == offset
assert node_size == 3, "summary size = {}".format(node_size)
assert np.isclose(node_dist, ds2c)
# Summary should contain all 3 node with size 1 and distance to
# each point in X[1:] for ``angle=0``
idx, summary = qt._py_summarize(query_pt, X, 0.0)
barycenter = X[1:].mean(axis=0)
ds2c = ((X[0] - barycenter) ** 2).sum()
assert idx == 3 * (offset)
for i in range(3):
node_dist = summary[i * offset + n_dimensions]
node_size = summary[i * offset + n_dimensions + 1]
ds2c = ((X[0] - X[i + 1]) ** 2).sum()
assert node_size == 1, "summary size = {}".format(node_size)
assert np.isclose(node_dist, ds2c)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/tests/test_kde.py | sklearn/neighbors/tests/test_kde.py | import joblib
import numpy as np
import pytest
from sklearn.datasets import make_blobs
from sklearn.exceptions import NotFittedError
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KDTree, KernelDensity, NearestNeighbors
from sklearn.neighbors._ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.utils._testing import assert_allclose
# XXX Duplicated in test_neighbors_tree, test_kde
def compute_kernel_slow(Y, X, kernel, h):
if h == "scott":
h = X.shape[0] ** (-1 / (X.shape[1] + 4))
elif h == "silverman":
h = (X.shape[0] * (X.shape[1] + 2) / 4) ** (-1 / (X.shape[1] + 4))
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == "gaussian":
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == "tophat":
return norm * (d < h).sum(-1)
elif kernel == "epanechnikov":
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == "exponential":
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == "linear":
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == "cosine":
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError("kernel not recognized")
def check_results(kernel, bandwidth, atol, rtol, X, Y, dens_true):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth, atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true, atol=atol, rtol=max(1e-7, rtol))
assert_allclose(
np.exp(kde.score(Y)), np.prod(dens_true), atol=atol, rtol=max(1e-7, rtol)
)
@pytest.mark.parametrize(
"kernel", ["gaussian", "tophat", "epanechnikov", "exponential", "linear", "cosine"]
)
@pytest.mark.parametrize("bandwidth", [0.01, 0.1, 1, "scott", "silverman"])
def test_kernel_density(kernel, bandwidth):
n_samples, n_features = (100, 3)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
for rtol in [0, 1e-5]:
for atol in [1e-6, 1e-2]:
for breadth_first in (True, False):
check_results(kernel, bandwidth, atol, rtol, X, Y, dens_true)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ["gaussian", "tophat"]:
# draw a tophat sample
kde = KernelDensity(bandwidth=bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert X.shape == samp.shape
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == "tophat":
assert np.all(dist < bandwidth)
elif kernel == "gaussian":
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ["epanechnikov", "exponential", "linear", "cosine"]:
kde = KernelDensity(bandwidth=bandwidth, kernel=kernel).fit(X)
with pytest.raises(NotImplementedError):
kde.sample(100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert kde.sample().shape == (1, 1)
@pytest.mark.parametrize("algorithm", ["auto", "ball_tree", "kd_tree"])
@pytest.mark.parametrize(
"metric", ["euclidean", "minkowski", "manhattan", "chebyshev", "haversine"]
)
def test_kde_algorithm_metric_choice(algorithm, metric):
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
kde = KernelDensity(algorithm=algorithm, metric=metric)
if algorithm == "kd_tree" and metric not in KDTree.valid_metrics:
with pytest.raises(ValueError, match="invalid metric"):
kde.fit(X)
else:
kde.fit(X)
y_dens = kde.score_samples(Y)
assert y_dens.shape == Y.shape[:1]
def test_kde_score(n_samples=100, n_features=3):
pass
# FIXME
# rng = np.random.RandomState(0)
# X = rng.random_sample((n_samples, n_features))
# Y = rng.random_sample((n_samples, n_features))
def test_kde_sample_weights_error():
kde = KernelDensity()
with pytest.raises(ValueError):
kde.fit(np.random.random((200, 10)), sample_weight=np.random.random((200, 10)))
with pytest.raises(ValueError):
kde.fit(np.random.random((200, 10)), sample_weight=-np.random.random(200))
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=0.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(
StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"),
)
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params)
search.fit(X)
assert search.best_params_["kerneldensity__bandwidth"] == 0.1
def test_kde_sample_weights():
n_samples = 400
size_test = 20
weights_neutral = np.full(n_samples, 3.0)
for d in [1, 2, 10]:
rng = np.random.RandomState(0)
X = rng.rand(n_samples, d)
weights = 1 + (10 * X.sum(axis=1)).astype(np.int8)
X_repetitions = np.repeat(X, weights, axis=0)
n_samples_test = size_test // d
test_points = rng.rand(n_samples_test, d)
for algorithm in ["auto", "ball_tree", "kd_tree"]:
for metric in ["euclidean", "minkowski", "manhattan", "chebyshev"]:
if algorithm != "kd_tree" or metric in KDTree.valid_metrics:
kde = KernelDensity(algorithm=algorithm, metric=metric)
# Test that adding a constant sample weight has no effect
kde.fit(X, sample_weight=weights_neutral)
scores_const_weight = kde.score_samples(test_points)
sample_const_weight = kde.sample(random_state=1234)
kde.fit(X)
scores_no_weight = kde.score_samples(test_points)
sample_no_weight = kde.sample(random_state=1234)
assert_allclose(scores_const_weight, scores_no_weight)
assert_allclose(sample_const_weight, sample_no_weight)
# Test equivalence between sampling and (integer) weights
kde.fit(X, sample_weight=weights)
scores_weight = kde.score_samples(test_points)
sample_weight = kde.sample(random_state=1234)
kde.fit(X_repetitions)
scores_ref_sampling = kde.score_samples(test_points)
sample_ref_sampling = kde.sample(random_state=1234)
assert_allclose(scores_weight, scores_ref_sampling)
assert_allclose(sample_weight, sample_ref_sampling)
# Test that sample weights has a non-trivial effect
diff = np.max(np.abs(scores_no_weight - scores_weight))
assert diff > 0.001
# Test invariance with respect to arbitrary scaling
scale_factor = rng.rand()
kde.fit(X, sample_weight=(scale_factor * weights))
scores_scaled_weight = kde.score_samples(test_points)
assert_allclose(scores_scaled_weight, scores_weight)
@pytest.mark.parametrize("sample_weight", [None, [0.1, 0.2, 0.3]])
def test_pickling(tmpdir, sample_weight):
# Make sure that predictions are the same before and after pickling. Used
# to be a bug because sample_weights wasn't pickled and the resulting tree
# would miss some info.
kde = KernelDensity()
data = np.reshape([1.0, 2.0, 3.0], (-1, 1))
kde.fit(data, sample_weight=sample_weight)
X = np.reshape([1.1, 2.1], (-1, 1))
scores = kde.score_samples(X)
file_path = str(tmpdir.join("dump.pkl"))
joblib.dump(kde, file_path)
kde = joblib.load(file_path)
scores_pickled = kde.score_samples(X)
assert_allclose(scores, scores_pickled)
@pytest.mark.parametrize("method", ["score_samples", "sample"])
def test_check_is_fitted(method):
# Check that predict raises an exception in an unfitted estimator.
# Unfitted estimators should raise a NotFittedError.
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
kde = KernelDensity()
with pytest.raises(NotFittedError):
getattr(kde, method)(X)
@pytest.mark.parametrize("bandwidth", ["scott", "silverman", 0.1])
def test_bandwidth(bandwidth):
n_samples, n_features = (100, 3)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
kde = KernelDensity(bandwidth=bandwidth).fit(X)
samp = kde.sample(100)
kde_sc = kde.score_samples(X)
assert X.shape == samp.shape
assert kde_sc.shape == (n_samples,)
# Test that the attribute self.bandwidth_ has the expected value
if bandwidth == "scott":
h = X.shape[0] ** (-1 / (X.shape[1] + 4))
elif bandwidth == "silverman":
h = (X.shape[0] * (X.shape[1] + 2) / 4) ** (-1 / (X.shape[1] + 4))
else:
h = bandwidth
assert kde.bandwidth_ == pytest.approx(h)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/tests/test_lof.py | sklearn/neighbors/tests/test_lof.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import re
from math import sqrt
import numpy as np
import pytest
from sklearn import metrics, neighbors
from sklearn.datasets import load_iris
from sklearn.metrics import roc_auc_score
from sklearn.utils import check_random_state
from sklearn.utils._testing import assert_allclose, assert_array_equal
from sklearn.utils.estimator_checks import (
check_outlier_corruption,
parametrize_with_checks,
)
from sklearn.utils.fixes import CSR_CONTAINERS
# load the iris dataset
# and randomly permute it
rng = check_random_state(0)
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_lof(global_dtype):
# Toy sample (the last two samples are outliers):
X = np.asarray(
[[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [5, 3], [-4, 2]],
dtype=global_dtype,
)
# Test LocalOutlierFactor:
clf = neighbors.LocalOutlierFactor(n_neighbors=5)
score = clf.fit(X).negative_outlier_factor_
assert_array_equal(clf._fit_X, X)
# Assert largest outlier score is smaller than smallest inlier score:
assert np.min(score[:-2]) > np.max(score[-2:])
# Assert predict() works:
clf = neighbors.LocalOutlierFactor(contamination=0.25, n_neighbors=5).fit(X)
expected_predictions = 6 * [1] + 2 * [-1]
assert_array_equal(clf._predict(), expected_predictions)
assert_array_equal(clf.fit_predict(X), expected_predictions)
def test_lof_performance(global_dtype):
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2).astype(global_dtype, copy=False)
X_train = X[:100]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2)).astype(
global_dtype, copy=False
)
X_test = np.r_[X[100:], X_outliers]
y_test = np.array([0] * 20 + [1] * 20)
# fit the model for novelty detection
clf = neighbors.LocalOutlierFactor(novelty=True).fit(X_train)
# predict scores (the lower, the more normal)
y_pred = -clf.decision_function(X_test)
# check that roc_auc is good
assert roc_auc_score(y_test, y_pred) > 0.99
def test_lof_values(global_dtype):
# toy samples:
X_train = np.asarray([[1, 1], [1, 2], [2, 1]], dtype=global_dtype)
clf1 = neighbors.LocalOutlierFactor(
n_neighbors=2, contamination=0.1, novelty=True
).fit(X_train)
clf2 = neighbors.LocalOutlierFactor(n_neighbors=2, novelty=True).fit(X_train)
s_0 = 2.0 * sqrt(2.0) / (1.0 + sqrt(2.0))
s_1 = (1.0 + sqrt(2)) * (1.0 / (4.0 * sqrt(2.0)) + 1.0 / (2.0 + 2.0 * sqrt(2)))
# check predict()
assert_allclose(-clf1.negative_outlier_factor_, [s_0, s_1, s_1])
assert_allclose(-clf2.negative_outlier_factor_, [s_0, s_1, s_1])
# check predict(one sample not in train)
assert_allclose(-clf1.score_samples([[2.0, 2.0]]), [s_0])
assert_allclose(-clf2.score_samples([[2.0, 2.0]]), [s_0])
# check predict(one sample already in train)
assert_allclose(-clf1.score_samples([[1.0, 1.0]]), [s_1])
assert_allclose(-clf2.score_samples([[1.0, 1.0]]), [s_1])
def test_lof_precomputed(global_dtype, random_state=42):
"""Tests LOF with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(random_state)
X = rng.random_sample((10, 4)).astype(global_dtype, copy=False)
Y = rng.random_sample((3, 4)).astype(global_dtype, copy=False)
DXX = metrics.pairwise_distances(X, metric="euclidean")
DYX = metrics.pairwise_distances(Y, X, metric="euclidean")
# As a feature matrix (n_samples by n_features)
lof_X = neighbors.LocalOutlierFactor(n_neighbors=3, novelty=True)
lof_X.fit(X)
pred_X_X = lof_X._predict()
pred_X_Y = lof_X.predict(Y)
# As a dense distance matrix (n_samples by n_samples)
lof_D = neighbors.LocalOutlierFactor(
n_neighbors=3, algorithm="brute", metric="precomputed", novelty=True
)
lof_D.fit(DXX)
pred_D_X = lof_D._predict()
pred_D_Y = lof_D.predict(DYX)
assert_allclose(pred_X_X, pred_D_X)
assert_allclose(pred_X_Y, pred_D_Y)
def test_n_neighbors_attribute():
X = iris.data
clf = neighbors.LocalOutlierFactor(n_neighbors=500).fit(X)
assert clf.n_neighbors_ == X.shape[0] - 1
clf = neighbors.LocalOutlierFactor(n_neighbors=500)
msg = "n_neighbors will be set to (n_samples - 1)"
with pytest.warns(UserWarning, match=re.escape(msg)):
clf.fit(X)
assert clf.n_neighbors_ == X.shape[0] - 1
def test_score_samples(global_dtype):
X_train = np.asarray([[1, 1], [1, 2], [2, 1]], dtype=global_dtype)
X_test = np.asarray([[2.0, 2.0]], dtype=global_dtype)
clf1 = neighbors.LocalOutlierFactor(
n_neighbors=2, contamination=0.1, novelty=True
).fit(X_train)
clf2 = neighbors.LocalOutlierFactor(n_neighbors=2, novelty=True).fit(X_train)
clf1_scores = clf1.score_samples(X_test)
clf1_decisions = clf1.decision_function(X_test)
clf2_scores = clf2.score_samples(X_test)
clf2_decisions = clf2.decision_function(X_test)
assert_allclose(
clf1_scores,
clf1_decisions + clf1.offset_,
)
assert_allclose(
clf2_scores,
clf2_decisions + clf2.offset_,
)
assert_allclose(clf1_scores, clf2_scores)
def test_novelty_errors():
X = iris.data
# check errors for novelty=False
clf = neighbors.LocalOutlierFactor()
clf.fit(X)
# predict, decision_function and score_samples raise ValueError
for method in ["predict", "decision_function", "score_samples"]:
outer_msg = f"'LocalOutlierFactor' has no attribute '{method}'"
inner_msg = "{} is not available when novelty=False".format(method)
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
getattr(clf, method)
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg in str(exec_info.value.__cause__)
# check errors for novelty=True
clf = neighbors.LocalOutlierFactor(novelty=True)
outer_msg = "'LocalOutlierFactor' has no attribute 'fit_predict'"
inner_msg = "fit_predict is not available when novelty=True"
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
getattr(clf, "fit_predict")
assert isinstance(exec_info.value.__cause__, AttributeError)
assert inner_msg in str(exec_info.value.__cause__)
def test_novelty_training_scores(global_dtype):
# check that the scores of the training samples are still accessible
# when novelty=True through the negative_outlier_factor_ attribute
X = iris.data.astype(global_dtype)
# fit with novelty=False
clf_1 = neighbors.LocalOutlierFactor()
clf_1.fit(X)
scores_1 = clf_1.negative_outlier_factor_
# fit with novelty=True
clf_2 = neighbors.LocalOutlierFactor(novelty=True)
clf_2.fit(X)
scores_2 = clf_2.negative_outlier_factor_
assert_allclose(scores_1, scores_2)
def test_hasattr_prediction():
# check availability of prediction methods depending on novelty value.
X = [[1, 1], [1, 2], [2, 1]]
# when novelty=True
clf = neighbors.LocalOutlierFactor(novelty=True)
clf.fit(X)
assert hasattr(clf, "predict")
assert hasattr(clf, "decision_function")
assert hasattr(clf, "score_samples")
assert not hasattr(clf, "fit_predict")
# when novelty=False
clf = neighbors.LocalOutlierFactor(novelty=False)
clf.fit(X)
assert hasattr(clf, "fit_predict")
assert not hasattr(clf, "predict")
assert not hasattr(clf, "decision_function")
assert not hasattr(clf, "score_samples")
@parametrize_with_checks([neighbors.LocalOutlierFactor(novelty=True)])
def test_novelty_true_common_tests(estimator, check):
# the common tests are run for the default LOF (novelty=False).
# here we run these common tests for LOF when novelty=True
check(estimator)
@pytest.mark.parametrize("expected_outliers", [30, 53])
def test_predicted_outlier_number(expected_outliers):
# the number of predicted outliers should be equal to the number of
# expected outliers unless there are ties in the abnormality scores.
X = iris.data
n_samples = X.shape[0]
contamination = float(expected_outliers) / n_samples
clf = neighbors.LocalOutlierFactor(contamination=contamination)
y_pred = clf.fit_predict(X)
num_outliers = np.sum(y_pred != 1)
if num_outliers != expected_outliers:
y_dec = clf.negative_outlier_factor_
check_outlier_corruption(num_outliers, expected_outliers, y_dec)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sparse(csr_container):
# LocalOutlierFactor must support CSR inputs
# TODO: compare results on dense and sparse data as proposed in:
# https://github.com/scikit-learn/scikit-learn/pull/23585#discussion_r968388186
X = csr_container(iris.data)
lof = neighbors.LocalOutlierFactor(novelty=True)
lof.fit(X)
lof.predict(X)
lof.score_samples(X)
lof.decision_function(X)
lof = neighbors.LocalOutlierFactor(novelty=False)
lof.fit_predict(X)
def test_lof_error_n_neighbors_too_large():
"""Check that we raise a proper error message when n_neighbors == n_samples.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/17207
"""
X = np.ones((7, 7))
msg = (
"Expected n_neighbors < n_samples_fit, but n_neighbors = 1, "
"n_samples_fit = 1, n_samples = 1"
)
with pytest.raises(ValueError, match=msg):
lof = neighbors.LocalOutlierFactor(n_neighbors=1).fit(X[:1])
lof = neighbors.LocalOutlierFactor(n_neighbors=2).fit(X[:2])
assert lof.n_samples_fit_ == 2
msg = (
"Expected n_neighbors < n_samples_fit, but n_neighbors = 2, "
"n_samples_fit = 2, n_samples = 2"
)
with pytest.raises(ValueError, match=msg):
lof.kneighbors(None, n_neighbors=2)
distances, indices = lof.kneighbors(None, n_neighbors=1)
assert distances.shape == (2, 1)
assert indices.shape == (2, 1)
msg = (
"Expected n_neighbors <= n_samples_fit, but n_neighbors = 3, "
"n_samples_fit = 2, n_samples = 7"
)
with pytest.raises(ValueError, match=msg):
lof.kneighbors(X, n_neighbors=3)
(
distances,
indices,
) = lof.kneighbors(X, n_neighbors=2)
assert distances.shape == (7, 2)
assert indices.shape == (7, 2)
@pytest.mark.parametrize("algorithm", ["auto", "ball_tree", "kd_tree", "brute"])
@pytest.mark.parametrize("novelty", [True, False])
@pytest.mark.parametrize("contamination", [0.5, "auto"])
def test_lof_input_dtype_preservation(global_dtype, algorithm, contamination, novelty):
"""Check that the fitted attributes are stored using the data type of X."""
X = iris.data.astype(global_dtype, copy=False)
iso = neighbors.LocalOutlierFactor(
n_neighbors=5, algorithm=algorithm, contamination=contamination, novelty=novelty
)
iso.fit(X)
assert iso.negative_outlier_factor_.dtype == global_dtype
for method in ("score_samples", "decision_function"):
if hasattr(iso, method):
y_pred = getattr(iso, method)(X)
assert y_pred.dtype == global_dtype
@pytest.mark.parametrize("algorithm", ["auto", "ball_tree", "kd_tree", "brute"])
@pytest.mark.parametrize("novelty", [True, False])
@pytest.mark.parametrize("contamination", [0.5, "auto"])
def test_lof_dtype_equivalence(algorithm, novelty, contamination):
"""Check the equivalence of the results with 32 and 64 bits input."""
inliers = iris.data[:50] # setosa iris are really distinct from others
outliers = iris.data[-5:] # virginica will be considered as outliers
# lower the precision of the input data to check that we have an equivalence when
# making the computation in 32 and 64 bits.
X = np.concatenate([inliers, outliers], axis=0).astype(np.float32)
lof_32 = neighbors.LocalOutlierFactor(
algorithm=algorithm, novelty=novelty, contamination=contamination
)
X_32 = X.astype(np.float32, copy=True)
lof_32.fit(X_32)
lof_64 = neighbors.LocalOutlierFactor(
algorithm=algorithm, novelty=novelty, contamination=contamination
)
X_64 = X.astype(np.float64, copy=True)
lof_64.fit(X_64)
assert_allclose(lof_32.negative_outlier_factor_, lof_64.negative_outlier_factor_)
for method in ("score_samples", "decision_function", "predict", "fit_predict"):
if hasattr(lof_32, method):
y_pred_32 = getattr(lof_32, method)(X_32)
y_pred_64 = getattr(lof_64, method)(X_64)
assert_allclose(y_pred_32, y_pred_64, atol=0.0002)
def test_lof_duplicate_samples():
"""
Check that LocalOutlierFactor raises a warning when duplicate values
in the training data cause inaccurate results.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/27839
"""
rng = np.random.default_rng(0)
x = rng.permutation(
np.hstack(
[
[0.1] * 1000, # constant values
np.linspace(0.1, 0.3, num=3000),
rng.random(500) * 100, # the clear outliers
]
)
)
X = x.reshape(-1, 1)
error_msg = (
"Duplicate values are leading to incorrect results. "
"Increase the number of neighbors for more accurate results."
)
lof = neighbors.LocalOutlierFactor(n_neighbors=5, contamination=0.1)
# Catch the warning
with pytest.warns(UserWarning, match=re.escape(error_msg)):
lof.fit_predict(X)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/tests/test_kd_tree.py | sklearn/neighbors/tests/test_kd_tree.py | import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
from sklearn.neighbors._kd_tree import KDTree, KDTree32, KDTree64
from sklearn.neighbors.tests.test_ball_tree import get_dataset_for_binary_tree
from sklearn.utils.parallel import Parallel, delayed
DIMENSION = 3
METRICS = {"euclidean": {}, "manhattan": {}, "chebyshev": {}, "minkowski": dict(p=3)}
KD_TREE_CLASSES = [
KDTree64,
KDTree32,
]
def test_KDTree_is_KDTree64_subclass():
assert issubclass(KDTree, KDTree64)
@pytest.mark.parametrize("BinarySearchTree", KD_TREE_CLASSES)
def test_array_object_type(BinarySearchTree):
"""Check that we do not accept object dtype array."""
X = np.array([(1, 2, 3), (2, 5), (5, 5, 1, 2)], dtype=object)
with pytest.raises(ValueError, match="setting an array element with a sequence"):
BinarySearchTree(X)
# TODO: remove mark once loky bug is fixed:
# https://github.com/joblib/loky/issues/458
@pytest.mark.thread_unsafe
@pytest.mark.parametrize("BinarySearchTree", KD_TREE_CLASSES)
def test_kdtree_picklable_with_joblib(BinarySearchTree):
"""Make sure that KDTree queries work when joblib memmaps.
Non-regression test for #21685 and #21228."""
rng = np.random.RandomState(0)
X = rng.random_sample((10, 3))
tree = BinarySearchTree(X, leaf_size=2)
# Call Parallel with max_nbytes=1 to trigger readonly memory mapping that
# use to raise "ValueError: buffer source array is read-only" in a previous
# version of the Cython code.
Parallel(n_jobs=2, max_nbytes=1)(delayed(tree.query)(data) for data in 2 * [X])
@pytest.mark.parametrize("metric", METRICS)
def test_kd_tree_numerical_consistency(global_random_seed, metric):
# Results on float64 and float32 versions of a dataset must be
# numerically close.
X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(
random_seed=global_random_seed, features=50
)
metric_params = METRICS.get(metric, {})
kd_64 = KDTree64(X_64, leaf_size=2, metric=metric, **metric_params)
kd_32 = KDTree32(X_32, leaf_size=2, metric=metric, **metric_params)
# Test consistency with respect to the `query` method
k = 4
dist_64, ind_64 = kd_64.query(Y_64, k=k)
dist_32, ind_32 = kd_32.query(Y_32, k=k)
assert_allclose(dist_64, dist_32, rtol=1e-5)
assert_equal(ind_64, ind_32)
assert dist_64.dtype == np.float64
assert dist_32.dtype == np.float32
# Test consistency with respect to the `query_radius` method
r = 2.38
ind_64 = kd_64.query_radius(Y_64, r=r)
ind_32 = kd_32.query_radius(Y_32, r=r)
for _ind64, _ind32 in zip(ind_64, ind_32):
assert_equal(_ind64, _ind32)
# Test consistency with respect to the `query_radius` method
# with return distances being true
ind_64, dist_64 = kd_64.query_radius(Y_64, r=r, return_distance=True)
ind_32, dist_32 = kd_32.query_radius(Y_32, r=r, return_distance=True)
for _ind64, _ind32, _dist_64, _dist_32 in zip(ind_64, ind_32, dist_64, dist_32):
assert_equal(_ind64, _ind32)
assert_allclose(_dist_64, _dist_32, rtol=1e-5)
assert _dist_64.dtype == np.float64
assert _dist_32.dtype == np.float32
@pytest.mark.parametrize("metric", METRICS)
def test_kernel_density_numerical_consistency(global_random_seed, metric):
# Test consistency with respect to the `kernel_density` method
X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(random_seed=global_random_seed)
metric_params = METRICS.get(metric, {})
kd_64 = KDTree64(X_64, leaf_size=2, metric=metric, **metric_params)
kd_32 = KDTree32(X_32, leaf_size=2, metric=metric, **metric_params)
kernel = "gaussian"
h = 0.1
density64 = kd_64.kernel_density(Y_64, h=h, kernel=kernel, breadth_first=True)
density32 = kd_32.kernel_density(Y_32, h=h, kernel=kernel, breadth_first=True)
assert_allclose(density64, density32, rtol=1e-5)
assert density64.dtype == np.float64
assert density32.dtype == np.float32
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/tests/test_neighbors.py | sklearn/neighbors/tests/test_neighbors.py | import re
import warnings
from itertools import product
import joblib
import numpy as np
import pytest
from scipy.sparse import issparse
from sklearn import (
config_context,
datasets,
metrics,
neighbors,
)
from sklearn.base import clone
from sklearn.exceptions import EfficiencyWarning, NotFittedError
from sklearn.metrics._dist_metrics import (
DistanceMetric,
)
from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS, pairwise_distances
from sklearn.metrics.tests.test_dist_metrics import BOOL_METRICS
from sklearn.metrics.tests.test_pairwise_distances_reduction import (
assert_compatible_argkmin_results,
assert_compatible_radius_results,
)
from sklearn.model_selection import (
LeaveOneOut,
cross_val_predict,
cross_val_score,
train_test_split,
)
from sklearn.neighbors import (
VALID_METRICS_SPARSE,
KNeighborsRegressor,
)
from sklearn.neighbors._base import (
KNeighborsMixin,
_check_precomputed,
_is_sorted_by_data,
sort_graph_by_row_values,
)
from sklearn.pipeline import make_pipeline
from sklearn.utils._testing import (
assert_allclose,
assert_array_equal,
ignore_warnings,
)
from sklearn.utils.fixes import (
BSR_CONTAINERS,
COO_CONTAINERS,
CSC_CONTAINERS,
CSR_CONTAINERS,
DIA_CONTAINERS,
DOK_CONTAINERS,
LIL_CONTAINERS,
)
from sklearn.utils.validation import check_random_state
rng = np.random.RandomState(0)
# load and shuffle iris dataset
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# load and shuffle digits
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
SPARSE_TYPES = tuple(
BSR_CONTAINERS
+ COO_CONTAINERS
+ CSC_CONTAINERS
+ CSR_CONTAINERS
+ DOK_CONTAINERS
+ LIL_CONTAINERS
)
SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,)
ALGORITHMS = ("ball_tree", "brute", "kd_tree", "auto")
COMMON_VALID_METRICS = sorted(
set.intersection(*map(set, neighbors.VALID_METRICS.values()))
)
P = (1, 2, 3, 4, np.inf)
# Filter deprecation warnings.
neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph)
neighbors.radius_neighbors_graph = ignore_warnings(neighbors.radius_neighbors_graph)
# A list containing metrics where the string specifies the use of the
# DistanceMetric object directly (as resolved in _parse_metric)
DISTANCE_METRIC_OBJS = ["DM_euclidean"]
def _parse_metric(metric: str, dtype=None):
"""
Helper function for properly building a type-specialized DistanceMetric instances.
Constructs a type-specialized DistanceMetric instance from a string
beginning with "DM_" while allowing a pass-through for other metric-specifying
strings. This is necessary since we wish to parameterize dtype independent of
metric, yet DistanceMetric requires it for construction.
"""
if metric[:3] == "DM_":
return DistanceMetric.get_metric(metric[3:], dtype=dtype)
return metric
def _generate_test_params_for(metric: str, n_features: int):
"""Return list of DistanceMetric kwargs for tests."""
# Distinguishing on cases not to compute unneeded datastructures.
rng = np.random.RandomState(1)
if metric == "minkowski":
return [
dict(p=1.5),
dict(p=2),
dict(p=3),
dict(p=np.inf),
dict(p=3, w=rng.rand(n_features)),
]
if metric == "seuclidean":
return [dict(V=rng.rand(n_features))]
if metric == "mahalanobis":
A = rng.rand(n_features, n_features)
# Make the matrix symmetric positive definite
VI = A + A.T + 3 * np.eye(n_features)
return [dict(VI=VI)]
# Case of: "euclidean", "manhattan", "chebyshev", "haversine" or any other metric.
# In those cases, no kwargs are needed.
return [{}]
def _weight_func(dist):
"""Weight function to replace lambda d: d ** -2.
The lambda function is not valid because:
if d==0 then 0^-2 is not valid."""
# Dist could be multidimensional, flatten it so all values
# can be looped
with np.errstate(divide="ignore"):
retval = 1.0 / dist
return retval**2
WEIGHTS = ["uniform", "distance", _weight_func]
# XXX: probably related to the thread-safety bug tracked at:
# https://github.com/scikit-learn/scikit-learn/issues/31884
@pytest.mark.thread_unsafe
@pytest.mark.parametrize(
"n_samples, n_features, n_query_pts, n_neighbors",
[
(100, 100, 10, 100),
(1000, 5, 100, 1),
],
)
@pytest.mark.parametrize("query_is_train", [False, True])
@pytest.mark.parametrize("metric", COMMON_VALID_METRICS + DISTANCE_METRIC_OBJS)
def test_unsupervised_kneighbors(
global_dtype,
n_samples,
n_features,
n_query_pts,
n_neighbors,
query_is_train,
metric,
):
# The different algorithms must return identical results
# on their common metrics, with and without returning
# distances
metric = _parse_metric(metric, global_dtype)
# Redefining the rng locally to use the same generated X
local_rng = np.random.RandomState(0)
X = local_rng.rand(n_samples, n_features).astype(global_dtype, copy=False)
query = (
X
if query_is_train
else local_rng.rand(n_query_pts, n_features).astype(global_dtype, copy=False)
)
results_nodist = []
results = []
for algorithm in ALGORITHMS:
if isinstance(metric, DistanceMetric) and global_dtype == np.float32:
if "tree" in algorithm: # pragma: nocover
pytest.skip(
"Neither KDTree nor BallTree support 32-bit distance metric"
" objects."
)
neigh = neighbors.NearestNeighbors(
n_neighbors=n_neighbors, algorithm=algorithm, metric=metric
)
neigh.fit(X)
results_nodist.append(neigh.kneighbors(query, return_distance=False))
results.append(neigh.kneighbors(query, return_distance=True))
for i in range(len(results) - 1):
algorithm = ALGORITHMS[i]
next_algorithm = ALGORITHMS[i + 1]
indices_no_dist = results_nodist[i]
distances, next_distances = results[i][0], results[i + 1][0]
indices, next_indices = results[i][1], results[i + 1][1]
assert_array_equal(
indices_no_dist,
indices,
err_msg=(
f"The '{algorithm}' algorithm returns different"
"indices depending on 'return_distances'."
),
)
assert_array_equal(
indices,
next_indices,
err_msg=(
f"The '{algorithm}' and '{next_algorithm}' "
"algorithms return different indices."
),
)
assert_allclose(
distances,
next_distances,
err_msg=(
f"The '{algorithm}' and '{next_algorithm}' "
"algorithms return different distances."
),
atol=1e-6,
)
@pytest.mark.parametrize(
"n_samples, n_features, n_query_pts",
[
(100, 100, 10),
(1000, 5, 100),
],
)
@pytest.mark.parametrize("metric", COMMON_VALID_METRICS + DISTANCE_METRIC_OBJS)
@pytest.mark.parametrize("n_neighbors, radius", [(1, 100), (50, 500), (100, 1000)])
@pytest.mark.parametrize(
"NeighborsMixinSubclass",
[
neighbors.KNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsClassifier,
neighbors.RadiusNeighborsRegressor,
],
)
def test_neigh_predictions_algorithm_agnosticity(
global_dtype,
n_samples,
n_features,
n_query_pts,
metric,
n_neighbors,
radius,
NeighborsMixinSubclass,
):
# The different algorithms must return identical predictions results
# on their common metrics.
metric = _parse_metric(metric, global_dtype)
if isinstance(metric, DistanceMetric):
if "Classifier" in NeighborsMixinSubclass.__name__:
pytest.skip(
"Metrics of type `DistanceMetric` are not yet supported for"
" classifiers."
)
if "Radius" in NeighborsMixinSubclass.__name__:
pytest.skip(
"Metrics of type `DistanceMetric` are not yet supported for"
" radius-neighbor estimators."
)
# Redefining the rng locally to use the same generated X
local_rng = np.random.RandomState(0)
X = local_rng.rand(n_samples, n_features).astype(global_dtype, copy=False)
y = local_rng.randint(3, size=n_samples)
query = local_rng.rand(n_query_pts, n_features).astype(global_dtype, copy=False)
predict_results = []
parameter = (
n_neighbors if issubclass(NeighborsMixinSubclass, KNeighborsMixin) else radius
)
for algorithm in ALGORITHMS:
if isinstance(metric, DistanceMetric) and global_dtype == np.float32:
if "tree" in algorithm: # pragma: nocover
pytest.skip(
"Neither KDTree nor BallTree support 32-bit distance metric"
" objects."
)
neigh = NeighborsMixinSubclass(parameter, algorithm=algorithm, metric=metric)
neigh.fit(X, y)
predict_results.append(neigh.predict(query))
for i in range(len(predict_results) - 1):
algorithm = ALGORITHMS[i]
next_algorithm = ALGORITHMS[i + 1]
predictions, next_predictions = predict_results[i], predict_results[i + 1]
assert_allclose(
predictions,
next_predictions,
err_msg=(
f"The '{algorithm}' and '{next_algorithm}' "
"algorithms return different predictions."
),
)
@pytest.mark.parametrize(
"KNeighborsMixinSubclass",
[
neighbors.KNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.NearestNeighbors,
],
)
def test_unsupervised_inputs(global_dtype, KNeighborsMixinSubclass):
# Test unsupervised inputs for neighbors estimators
X = rng.random_sample((10, 3)).astype(global_dtype, copy=False)
y = rng.randint(3, size=10)
nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1)
nbrs_fid.fit(X)
dist1, ind1 = nbrs_fid.kneighbors(X)
nbrs = KNeighborsMixinSubclass(n_neighbors=1)
for data in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)):
nbrs.fit(data, y)
dist2, ind2 = nbrs.kneighbors(X)
assert_allclose(dist1, dist2)
assert_array_equal(ind1, ind2)
def test_not_fitted_error_gets_raised():
X = [[1]]
neighbors_ = neighbors.NearestNeighbors()
with pytest.raises(NotFittedError):
neighbors_.kneighbors_graph(X)
with pytest.raises(NotFittedError):
neighbors_.radius_neighbors_graph(X)
@pytest.mark.filterwarnings("ignore:EfficiencyWarning")
def check_precomputed(make_train_test, estimators):
"""Tests unsupervised NearestNeighbors with a distance matrix."""
# Note: smaller samples may result in spurious test success
rng = np.random.RandomState(42)
X = rng.random_sample((10, 4))
Y = rng.random_sample((3, 4))
DXX, DYX = make_train_test(X, Y)
for method in [
"kneighbors",
]:
# TODO: also test radius_neighbors, but requires different assertion
# As a feature matrix (n_samples by n_features)
nbrs_X = neighbors.NearestNeighbors(n_neighbors=3)
nbrs_X.fit(X)
dist_X, ind_X = getattr(nbrs_X, method)(Y)
# As a dense distance matrix (n_samples by n_samples)
nbrs_D = neighbors.NearestNeighbors(
n_neighbors=3, algorithm="brute", metric="precomputed"
)
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_allclose(dist_X, dist_D)
assert_array_equal(ind_X, ind_D)
# Check auto works too
nbrs_D = neighbors.NearestNeighbors(
n_neighbors=3, algorithm="auto", metric="precomputed"
)
nbrs_D.fit(DXX)
dist_D, ind_D = getattr(nbrs_D, method)(DYX)
assert_allclose(dist_X, dist_D)
assert_array_equal(ind_X, ind_D)
# Check X=None in prediction
dist_X, ind_X = getattr(nbrs_X, method)(None)
dist_D, ind_D = getattr(nbrs_D, method)(None)
assert_allclose(dist_X, dist_D)
assert_array_equal(ind_X, ind_D)
# Must raise a ValueError if the matrix is not of correct shape
with pytest.raises(ValueError):
getattr(nbrs_D, method)(X)
target = np.arange(X.shape[0])
for Est in estimators:
est = Est(metric="euclidean")
est.radius = est.n_neighbors = 1
pred_X = est.fit(X, target).predict(Y)
est.metric = "precomputed"
pred_D = est.fit(DXX, target).predict(DYX)
assert_allclose(pred_X, pred_D)
def test_precomputed_dense():
def make_train_test(X_train, X_test):
return (
metrics.pairwise_distances(X_train),
metrics.pairwise_distances(X_test, X_train),
)
estimators = [
neighbors.KNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsClassifier,
neighbors.RadiusNeighborsRegressor,
]
check_precomputed(make_train_test, estimators)
@pytest.mark.parametrize("fmt", ["csr", "lil"])
def test_precomputed_sparse_knn(fmt):
def make_train_test(X_train, X_test):
nn = neighbors.NearestNeighbors(n_neighbors=3 + 1).fit(X_train)
return (
nn.kneighbors_graph(X_train, mode="distance").asformat(fmt),
nn.kneighbors_graph(X_test, mode="distance").asformat(fmt),
)
# We do not test RadiusNeighborsClassifier and RadiusNeighborsRegressor
# since the precomputed neighbors graph is built with k neighbors only.
estimators = [
neighbors.KNeighborsClassifier,
neighbors.KNeighborsRegressor,
]
check_precomputed(make_train_test, estimators)
@pytest.mark.parametrize("fmt", ["csr", "lil"])
def test_precomputed_sparse_radius(fmt):
def make_train_test(X_train, X_test):
nn = neighbors.NearestNeighbors(radius=1).fit(X_train)
return (
nn.radius_neighbors_graph(X_train, mode="distance").asformat(fmt),
nn.radius_neighbors_graph(X_test, mode="distance").asformat(fmt),
)
# We do not test KNeighborsClassifier and KNeighborsRegressor
# since the precomputed neighbors graph is built with a radius.
estimators = [
neighbors.RadiusNeighborsClassifier,
neighbors.RadiusNeighborsRegressor,
]
check_precomputed(make_train_test, estimators)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_is_sorted_by_data(csr_container):
# Test that _is_sorted_by_data works as expected. In CSR sparse matrix,
# entries in each row can be sorted by indices, by data, or unsorted.
# _is_sorted_by_data should return True when entries are sorted by data,
# and False in all other cases.
# Test with sorted single row sparse array
X = csr_container(np.arange(10).reshape(1, 10))
assert _is_sorted_by_data(X)
# Test with unsorted 1D array
X[0, 2] = 5
assert not _is_sorted_by_data(X)
# Test when the data is sorted in each sample, but not necessarily
# between samples
X = csr_container([[0, 1, 2], [3, 0, 0], [3, 4, 0], [1, 0, 2]])
assert _is_sorted_by_data(X)
# Test with duplicates entries in X.indptr
data, indices, indptr = [0, 4, 2, 2], [0, 1, 1, 1], [0, 2, 2, 4]
X = csr_container((data, indices, indptr), shape=(3, 3))
assert _is_sorted_by_data(X)
@pytest.mark.filterwarnings("ignore:EfficiencyWarning")
@pytest.mark.parametrize("function", [sort_graph_by_row_values, _check_precomputed])
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sort_graph_by_row_values(function, csr_container):
# Test that sort_graph_by_row_values returns a graph sorted by row values
X = csr_container(np.abs(np.random.RandomState(42).randn(10, 10)))
assert not _is_sorted_by_data(X)
Xt = function(X)
assert _is_sorted_by_data(Xt)
# test with a different number of nonzero entries for each sample
mask = np.random.RandomState(42).randint(2, size=(10, 10))
X = X.toarray()
X[mask == 1] = 0
X = csr_container(X)
assert not _is_sorted_by_data(X)
Xt = function(X)
assert _is_sorted_by_data(Xt)
@pytest.mark.filterwarnings("ignore:EfficiencyWarning")
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sort_graph_by_row_values_copy(csr_container):
# Test if the sorting is done inplace if X is CSR, so that Xt is X.
X_ = csr_container(np.abs(np.random.RandomState(42).randn(10, 10)))
assert not _is_sorted_by_data(X_)
# sort_graph_by_row_values is done inplace if copy=False
X = X_.copy()
assert sort_graph_by_row_values(X).data is X.data
X = X_.copy()
assert sort_graph_by_row_values(X, copy=False).data is X.data
X = X_.copy()
assert sort_graph_by_row_values(X, copy=True).data is not X.data
# _check_precomputed is never done inplace
X = X_.copy()
assert _check_precomputed(X).data is not X.data
# do not raise if X is not CSR and copy=True
sort_graph_by_row_values(X.tocsc(), copy=True)
# raise if X is not CSR and copy=False
with pytest.raises(ValueError, match="Use copy=True to allow the conversion"):
sort_graph_by_row_values(X.tocsc(), copy=False)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_sort_graph_by_row_values_warning(csr_container):
# Test that the parameter warn_when_not_sorted works as expected.
X = csr_container(np.abs(np.random.RandomState(42).randn(10, 10)))
assert not _is_sorted_by_data(X)
# warning
with pytest.warns(EfficiencyWarning, match="was not sorted by row values"):
sort_graph_by_row_values(X, copy=True)
with pytest.warns(EfficiencyWarning, match="was not sorted by row values"):
sort_graph_by_row_values(X, copy=True, warn_when_not_sorted=True)
with pytest.warns(EfficiencyWarning, match="was not sorted by row values"):
_check_precomputed(X)
# no warning
with warnings.catch_warnings():
warnings.simplefilter("error")
sort_graph_by_row_values(X, copy=True, warn_when_not_sorted=False)
@pytest.mark.parametrize(
"sparse_container", DOK_CONTAINERS + BSR_CONTAINERS + DIA_CONTAINERS
)
def test_sort_graph_by_row_values_bad_sparse_format(sparse_container):
# Test that sort_graph_by_row_values and _check_precomputed error on bad formats
X = sparse_container(np.abs(np.random.RandomState(42).randn(10, 10)))
with pytest.raises(TypeError, match="format is not supported"):
sort_graph_by_row_values(X)
with pytest.raises(TypeError, match="format is not supported"):
_check_precomputed(X)
@pytest.mark.filterwarnings("ignore:EfficiencyWarning")
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_precomputed_sparse_invalid(csr_container):
dist = np.array([[0.0, 2.0, 1.0], [2.0, 0.0, 3.0], [1.0, 3.0, 0.0]])
dist_csr = csr_container(dist)
neigh = neighbors.NearestNeighbors(n_neighbors=1, metric="precomputed")
neigh.fit(dist_csr)
neigh.kneighbors(None, n_neighbors=1)
neigh.kneighbors(np.array([[0.0, 0.0, 0.0]]), n_neighbors=2)
# Ensures enough number of nearest neighbors
dist = np.array([[0.0, 2.0, 0.0], [2.0, 0.0, 3.0], [0.0, 3.0, 0.0]])
dist_csr = csr_container(dist)
neigh.fit(dist_csr)
msg = "2 neighbors per samples are required, but some samples have only 1"
with pytest.raises(ValueError, match=msg):
neigh.kneighbors(None, n_neighbors=1)
# Checks error with inconsistent distance matrix
dist = np.array([[5.0, 2.0, 1.0], [-2.0, 0.0, 3.0], [1.0, 3.0, 0.0]])
dist_csr = csr_container(dist)
msg = "Negative values in data passed to precomputed distance matrix."
with pytest.raises(ValueError, match=msg):
neigh.kneighbors(dist_csr, n_neighbors=1)
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric="euclidean")
y = rng.randint(3, size=20)
for Est in (
neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor,
):
metric_score = cross_val_score(Est(), X, y)
precomp_score = cross_val_score(Est(metric="precomputed"), D, y)
assert_array_equal(metric_score, precomp_score)
def test_unsupervised_radius_neighbors(
global_dtype, n_samples=20, n_features=5, n_query_pts=2, radius=0.5, random_state=0
):
# Test unsupervised radius-based query
rng = np.random.RandomState(random_state)
X = rng.rand(n_samples, n_features).astype(global_dtype, copy=False)
test = rng.rand(n_query_pts, n_features).astype(global_dtype, copy=False)
for p in P:
results = []
for algorithm in ALGORITHMS:
neigh = neighbors.NearestNeighbors(radius=radius, algorithm=algorithm, p=p)
neigh.fit(X)
ind1 = neigh.radius_neighbors(test, return_distance=False)
# sort the results: this is not done automatically for
# radius searches
dist, ind = neigh.radius_neighbors(test, return_distance=True)
for d, i, i1 in zip(dist, ind, ind1):
j = d.argsort()
d[:] = d[j]
i[:] = i[j]
i1[:] = i1[j]
results.append((dist, ind))
assert_allclose(np.concatenate(list(ind)), np.concatenate(list(ind1)))
for i in range(len(results) - 1):
assert_allclose(
np.concatenate(list(results[i][0])),
np.concatenate(list(results[i + 1][0])),
)
assert_allclose(
np.concatenate(list(results[i][1])),
np.concatenate(list(results[i + 1][1])),
)
@pytest.mark.parametrize("algorithm", ALGORITHMS)
@pytest.mark.parametrize("weights", WEIGHTS)
def test_kneighbors_classifier(
global_dtype,
algorithm,
weights,
n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0,
):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features).astype(global_dtype, copy=False) - 1
y = ((X**2).sum(axis=1) < 0.5).astype(int)
y_str = y.astype(str)
knn = neighbors.KNeighborsClassifier(
n_neighbors=n_neighbors, weights=weights, algorithm=algorithm
)
knn.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
# Test prediction with y_str
knn.fit(X, y_str)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
def test_kneighbors_classifier_float_labels(
global_dtype,
n_samples=40,
n_features=5,
n_test_pts=10,
n_neighbors=5,
random_state=0,
):
# Test k-neighbors classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features).astype(global_dtype, copy=False) - 1
y = ((X**2).sum(axis=1) < 0.5).astype(int)
knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X, y.astype(float))
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = knn.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
def test_kneighbors_classifier_predict_proba(global_dtype):
# Test KNeighborsClassifier.predict_proba() method
X = np.array(
[[0, 2, 0], [0, 2, 1], [2, 0, 0], [2, 2, 0], [0, 0, 2], [0, 0, 1]]
).astype(global_dtype, copy=False)
y = np.array([4, 4, 5, 5, 1, 1])
cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist
cls.fit(X, y)
y_prob = cls.predict_proba(X)
real_prob = (
np.array(
[
[0, 2, 1],
[1, 2, 0],
[1, 0, 2],
[0, 1, 2],
[2, 1, 0],
[2, 1, 0],
]
)
/ 3.0
)
assert_array_equal(real_prob, y_prob)
# Check that it also works with non integer labels
cls.fit(X, y.astype(str))
y_prob = cls.predict_proba(X)
assert_array_equal(real_prob, y_prob)
# Check that it works with weights='distance'
cls = neighbors.KNeighborsClassifier(n_neighbors=2, p=1, weights="distance")
cls.fit(X, y)
y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]]))
real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]])
assert_allclose(real_prob, y_prob)
@pytest.mark.parametrize("algorithm", ALGORITHMS)
@pytest.mark.parametrize("weights", WEIGHTS)
def test_radius_neighbors_classifier(
global_dtype,
algorithm,
weights,
n_samples=40,
n_features=5,
n_test_pts=10,
radius=0.5,
random_state=0,
):
# Test radius-based classification
rng = np.random.RandomState(random_state)
X = 2 * rng.rand(n_samples, n_features).astype(global_dtype, copy=False) - 1
y = ((X**2).sum(axis=1) < radius).astype(int)
y_str = y.astype(str)
neigh = neighbors.RadiusNeighborsClassifier(
radius=radius, weights=weights, algorithm=algorithm
)
neigh.fit(X, y)
epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y[:n_test_pts])
neigh.fit(X, y_str)
y_pred = neigh.predict(X[:n_test_pts] + epsilon)
assert_array_equal(y_pred, y_str[:n_test_pts])
@pytest.mark.parametrize("algorithm", ALGORITHMS)
@pytest.mark.parametrize("weights", WEIGHTS)
@pytest.mark.parametrize("outlier_label", [0, -1, None])
def test_radius_neighbors_classifier_when_no_neighbors(
global_dtype, algorithm, weights, outlier_label
):
# Test radius-based classifier when no neighbors found.
# In this case it should rise an informative exception
X = np.array([[1.0, 1.0], [2.0, 2.0]], dtype=global_dtype)
y = np.array([1, 2])
radius = 0.1
# no outliers
z1 = np.array([[1.01, 1.01], [2.01, 2.01]], dtype=global_dtype)
# one outlier
z2 = np.array([[1.01, 1.01], [1.4, 1.4]], dtype=global_dtype)
rnc = neighbors.RadiusNeighborsClassifier
clf = rnc(
radius=radius,
weights=weights,
algorithm=algorithm,
outlier_label=outlier_label,
)
clf.fit(X, y)
assert_array_equal(np.array([1, 2]), clf.predict(z1))
if outlier_label is None:
with pytest.raises(ValueError):
clf.predict(z2)
@pytest.mark.parametrize("algorithm", ALGORITHMS)
@pytest.mark.parametrize("weights", WEIGHTS)
def test_radius_neighbors_classifier_outlier_labeling(global_dtype, algorithm, weights):
# Test radius-based classifier when no neighbors found and outliers
# are labeled.
X = np.array(
[[1.0, 1.0], [2.0, 2.0], [0.99, 0.99], [0.98, 0.98], [2.01, 2.01]],
dtype=global_dtype,
)
y = np.array([1, 2, 1, 1, 2])
radius = 0.1
# no outliers
z1 = np.array([[1.01, 1.01], [2.01, 2.01]], dtype=global_dtype)
# one outlier
z2 = np.array([[1.4, 1.4], [1.01, 1.01], [2.01, 2.01]], dtype=global_dtype)
correct_labels1 = np.array([1, 2])
correct_labels2 = np.array([-1, 1, 2])
outlier_proba = np.array([0, 0])
clf = neighbors.RadiusNeighborsClassifier(
radius=radius, weights=weights, algorithm=algorithm, outlier_label=-1
)
clf.fit(X, y)
assert_array_equal(correct_labels1, clf.predict(z1))
with pytest.warns(UserWarning, match="Outlier label -1 is not in training classes"):
assert_array_equal(correct_labels2, clf.predict(z2))
with pytest.warns(UserWarning, match="Outlier label -1 is not in training classes"):
assert_allclose(outlier_proba, clf.predict_proba(z2)[0])
# test outlier_labeling of using predict_proba()
RNC = neighbors.RadiusNeighborsClassifier
X = np.array([[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]], dtype=global_dtype)
y = np.array([0, 2, 2, 1, 1, 1, 3, 3, 3, 3])
# test outlier_label scalar verification
def check_array_exception():
clf = RNC(radius=1, outlier_label=[[5]])
clf.fit(X, y)
with pytest.raises(TypeError):
check_array_exception()
# test invalid outlier_label dtype
def check_dtype_exception():
clf = RNC(radius=1, outlier_label="a")
clf.fit(X, y)
with pytest.raises(TypeError):
check_dtype_exception()
# test most frequent
clf = RNC(radius=1, outlier_label="most_frequent")
clf.fit(X, y)
proba = clf.predict_proba([[1], [15]])
assert_array_equal(proba[1, :], [0, 0, 0, 1])
# test manual label in y
clf = RNC(radius=1, outlier_label=1)
clf.fit(X, y)
proba = clf.predict_proba([[1], [15]])
assert_array_equal(proba[1, :], [0, 1, 0, 0])
pred = clf.predict([[1], [15]])
assert_array_equal(pred, [2, 1])
# test manual label out of y warning
def check_warning():
clf = RNC(radius=1, outlier_label=4)
clf.fit(X, y)
clf.predict_proba([[1], [15]])
with pytest.warns(UserWarning):
check_warning()
# test multi output same outlier label
y_multi = [
[0, 1],
[2, 1],
[2, 2],
[1, 2],
[1, 2],
[1, 3],
[3, 3],
[3, 3],
[3, 0],
[3, 0],
]
clf = RNC(radius=1, outlier_label=1)
clf.fit(X, y_multi)
proba = clf.predict_proba([[7], [15]])
assert_array_equal(proba[1][1, :], [0, 1, 0, 0])
pred = clf.predict([[7], [15]])
assert_array_equal(pred[1, :], [1, 1])
# test multi output different outlier label
y_multi = [
[0, 0],
[2, 2],
[2, 2],
[1, 1],
[1, 1],
[1, 1],
[3, 3],
[3, 3],
[3, 3],
[3, 3],
]
clf = RNC(radius=1, outlier_label=[0, 1])
clf.fit(X, y_multi)
proba = clf.predict_proba([[7], [15]])
assert_array_equal(proba[0][1, :], [1, 0, 0, 0])
assert_array_equal(proba[1][1, :], [0, 1, 0, 0])
pred = clf.predict([[7], [15]])
assert_array_equal(pred[1, :], [0, 1])
# test inconsistent outlier label list length
def check_exception():
clf = RNC(radius=1, outlier_label=[0, 1, 2])
clf.fit(X, y_multi)
with pytest.raises(ValueError):
check_exception()
def test_radius_neighbors_classifier_zero_distance():
# Test radius-based classifier, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [2.0, 2.0]])
y = np.array([1, 2])
radius = 0.1
z1 = np.array([[1.01, 1.01], [2.0, 2.0]])
correct_labels1 = np.array([1, 2])
weight_func = _weight_func
for algorithm in ALGORITHMS:
for weights in ["uniform", "distance", weight_func]:
clf = neighbors.RadiusNeighborsClassifier(
radius=radius, weights=weights, algorithm=algorithm
)
clf.fit(X, y)
with np.errstate(invalid="ignore"):
# Ignore the warning raised in _weight_func when making
# predictions with null distances resulting in np.inf values.
assert_array_equal(correct_labels1, clf.predict(z1))
def test_neighbors_regressors_zero_distance():
# Test radius-based regressor, when distance to a sample is zero.
X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]])
y = np.array([1.0, 1.5, 2.0, 0.0])
radius = 0.2
z = np.array([[1.1, 1.1], [2.0, 2.0]])
rnn_correct_labels = np.array([1.25, 2.0])
knn_correct_unif = np.array([1.25, 1.0])
knn_correct_dist = np.array([1.25, 2.0])
for algorithm in ALGORITHMS:
# we don't test for weights=_weight_func since user will be expected
# to handle zero distances themselves in the function.
for weights in ["uniform", "distance"]:
rnn = neighbors.RadiusNeighborsRegressor(
radius=radius, weights=weights, algorithm=algorithm
)
rnn.fit(X, y)
assert_allclose(rnn_correct_labels, rnn.predict(z))
for weights, corr_labels in zip(
["uniform", "distance"], [knn_correct_unif, knn_correct_dist]
):
knn = neighbors.KNeighborsRegressor(
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/tests/test_neighbors_tree.py | sklearn/neighbors/tests/test_neighbors_tree.py | # SPDX-License-Identifier: BSD-3-Clause
import itertools
import pickle
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_almost_equal
from sklearn.metrics import DistanceMetric
from sklearn.neighbors._ball_tree import (
BallTree,
kernel_norm,
)
from sklearn.neighbors._ball_tree import (
NeighborsHeap64 as NeighborsHeapBT,
)
from sklearn.neighbors._ball_tree import (
nodeheap_sort as nodeheap_sort_bt,
)
from sklearn.neighbors._ball_tree import (
simultaneous_sort as simultaneous_sort_bt,
)
from sklearn.neighbors._kd_tree import (
KDTree,
)
from sklearn.neighbors._kd_tree import (
NeighborsHeap64 as NeighborsHeapKDT,
)
from sklearn.neighbors._kd_tree import (
nodeheap_sort as nodeheap_sort_kdt,
)
from sklearn.neighbors._kd_tree import (
simultaneous_sort as simultaneous_sort_kdt,
)
from sklearn.utils import check_random_state
rng = np.random.RandomState(42)
V_mahalanobis = rng.rand(3, 3)
V_mahalanobis = np.dot(V_mahalanobis, V_mahalanobis.T)
DIMENSION = 3
METRICS = {
"euclidean": {},
"manhattan": {},
"minkowski": dict(p=3),
"chebyshev": {},
"seuclidean": dict(V=rng.random_sample(DIMENSION)),
"mahalanobis": dict(V=V_mahalanobis),
}
KD_TREE_METRICS = ["euclidean", "manhattan", "chebyshev", "minkowski"]
BALL_TREE_METRICS = list(METRICS)
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1.0 / p)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == "gaussian":
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == "tophat":
return norm * (d < h).sum(-1)
elif kernel == "epanechnikov":
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == "exponential":
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == "linear":
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == "cosine":
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError("kernel not recognized")
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
@pytest.mark.parametrize("Cls", [KDTree, BallTree])
@pytest.mark.parametrize(
"kernel", ["gaussian", "tophat", "epanechnikov", "exponential", "linear", "cosine"]
)
@pytest.mark.parametrize("h", [0.01, 0.1, 1])
@pytest.mark.parametrize("rtol", [0, 1e-5])
@pytest.mark.parametrize("atol", [1e-6, 1e-2])
@pytest.mark.parametrize("breadth_first", [True, False])
def test_kernel_density(
Cls, kernel, h, rtol, atol, breadth_first, n_samples=100, n_features=3
):
rng = check_random_state(1)
X = rng.random_sample((n_samples, n_features))
Y = rng.random_sample((n_samples, n_features))
dens_true = compute_kernel_slow(Y, X, kernel, h)
tree = Cls(X, leaf_size=10)
dens = tree.kernel_density(
Y, h, atol=atol, rtol=rtol, kernel=kernel, breadth_first=breadth_first
)
assert_allclose(dens, dens_true, atol=atol, rtol=max(rtol, 1e-7))
@pytest.mark.parametrize("Cls", [KDTree, BallTree])
def test_neighbor_tree_query_radius(Cls, n_samples=100, n_features=10):
rng = check_random_state(0)
X = 2 * rng.random_sample(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1e-15 # roundoff error can cause test to fail
tree = Cls(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = tree.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
@pytest.mark.parametrize("Cls", [KDTree, BallTree])
def test_neighbor_tree_query_radius_distance(Cls, n_samples=100, n_features=10):
rng = check_random_state(0)
X = 2 * rng.random_sample(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1e-15 # roundoff error can cause test to fail
tree = Cls(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = tree.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
@pytest.mark.parametrize("Cls", [KDTree, BallTree])
@pytest.mark.parametrize("dualtree", (True, False))
def test_neighbor_tree_two_point(Cls, dualtree, n_samples=100, n_features=3):
rng = check_random_state(0)
X = rng.random_sample((n_samples, n_features))
Y = rng.random_sample((n_samples, n_features))
r = np.linspace(0, 1, 10)
tree = Cls(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
counts = tree.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
@pytest.mark.parametrize("NeighborsHeap", [NeighborsHeapBT, NeighborsHeapKDT])
def test_neighbors_heap(NeighborsHeap, n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
rng = check_random_state(0)
for row in range(n_pts):
d_in = rng.random_sample(2 * n_nbrs).astype(np.float64, copy=False)
i_in = np.arange(2 * n_nbrs, dtype=np.intp)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
@pytest.mark.parametrize("nodeheap_sort", [nodeheap_sort_bt, nodeheap_sort_kdt])
def test_node_heap(nodeheap_sort, n_nodes=50):
rng = check_random_state(0)
vals = rng.random_sample(n_nodes).astype(np.float64, copy=False)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
@pytest.mark.parametrize(
"simultaneous_sort", [simultaneous_sort_bt, simultaneous_sort_kdt]
)
def test_simultaneous_sort(simultaneous_sort, n_rows=10, n_pts=201):
rng = check_random_state(0)
dist = rng.random_sample((n_rows, n_pts)).astype(np.float64, copy=False)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(np.intp, copy=False)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
@pytest.mark.parametrize("Cls", [KDTree, BallTree])
def test_gaussian_kde(Cls, n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
rng = check_random_state(0)
x_in = rng.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
tree = Cls(x_in[:, None])
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
dens_tree = tree.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_tree, dens_gkde, decimal=3)
@pytest.mark.parametrize(
"Cls, metric",
itertools.chain(
[(KDTree, metric) for metric in KD_TREE_METRICS],
[(BallTree, metric) for metric in BALL_TREE_METRICS],
),
)
@pytest.mark.parametrize("k", (1, 3, 5))
@pytest.mark.parametrize("dualtree", (True, False))
@pytest.mark.parametrize("breadth_first", (True, False))
def test_nn_tree_query(Cls, metric, k, dualtree, breadth_first):
rng = check_random_state(0)
X = rng.random_sample((40, DIMENSION))
Y = rng.random_sample((10, DIMENSION))
kwargs = METRICS[metric]
kdt = Cls(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree, breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
@pytest.mark.parametrize(
"Cls, metric",
[(KDTree, "euclidean"), (BallTree, "euclidean"), (BallTree, dist_func)],
)
@pytest.mark.parametrize("protocol", (0, 1, 2))
def test_pickle(Cls, metric, protocol):
rng = check_random_state(0)
X = rng.random_sample((10, 3))
if hasattr(metric, "__call__"):
kwargs = {"p": 2}
else:
kwargs = {}
tree1 = Cls(X, leaf_size=1, metric=metric, **kwargs)
ind1, dist1 = tree1.query(X)
s = pickle.dumps(tree1, protocol=protocol)
tree2 = pickle.loads(s)
ind2, dist2 = tree2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert isinstance(tree2, Cls)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/tests/test_graph.py | sklearn/neighbors/tests/test_graph.py | import numpy as np
import pytest
from sklearn.metrics import euclidean_distances
from sklearn.neighbors import KNeighborsTransformer, RadiusNeighborsTransformer
from sklearn.neighbors._base import _is_sorted_by_data
from sklearn.utils._testing import assert_array_equal
def test_transformer_result():
# Test the number of neighbors returned
n_neighbors = 5
n_samples_fit = 20
n_queries = 18
n_features = 10
rng = np.random.RandomState(42)
X = rng.randn(n_samples_fit, n_features)
X2 = rng.randn(n_queries, n_features)
radius = np.percentile(euclidean_distances(X), 10)
# with n_neighbors
for mode in ["distance", "connectivity"]:
add_one = mode == "distance"
nnt = KNeighborsTransformer(n_neighbors=n_neighbors, mode=mode)
Xt = nnt.fit_transform(X)
assert Xt.shape == (n_samples_fit, n_samples_fit)
assert Xt.data.shape == (n_samples_fit * (n_neighbors + add_one),)
assert Xt.format == "csr"
assert _is_sorted_by_data(Xt)
X2t = nnt.transform(X2)
assert X2t.shape == (n_queries, n_samples_fit)
assert X2t.data.shape == (n_queries * (n_neighbors + add_one),)
assert X2t.format == "csr"
assert _is_sorted_by_data(X2t)
# with radius
for mode in ["distance", "connectivity"]:
add_one = mode == "distance"
nnt = RadiusNeighborsTransformer(radius=radius, mode=mode)
Xt = nnt.fit_transform(X)
assert Xt.shape == (n_samples_fit, n_samples_fit)
assert not Xt.data.shape == (n_samples_fit * (n_neighbors + add_one),)
assert Xt.format == "csr"
assert _is_sorted_by_data(Xt)
X2t = nnt.transform(X2)
assert X2t.shape == (n_queries, n_samples_fit)
assert not X2t.data.shape == (n_queries * (n_neighbors + add_one),)
assert X2t.format == "csr"
assert _is_sorted_by_data(X2t)
def _has_explicit_diagonal(X):
"""Return True if the diagonal is explicitly stored"""
X = X.tocoo()
explicit = X.row[X.row == X.col]
return len(explicit) == X.shape[0]
def test_explicit_diagonal():
# Test that the diagonal is explicitly stored in the sparse graph
n_neighbors = 5
n_samples_fit, n_samples_transform, n_features = 20, 18, 10
rng = np.random.RandomState(42)
X = rng.randn(n_samples_fit, n_features)
X2 = rng.randn(n_samples_transform, n_features)
nnt = KNeighborsTransformer(n_neighbors=n_neighbors)
Xt = nnt.fit_transform(X)
assert _has_explicit_diagonal(Xt)
assert np.all(Xt.data.reshape(n_samples_fit, n_neighbors + 1)[:, 0] == 0)
Xt = nnt.transform(X)
assert _has_explicit_diagonal(Xt)
assert np.all(Xt.data.reshape(n_samples_fit, n_neighbors + 1)[:, 0] == 0)
# Using transform on new data should not always have zero diagonal
X2t = nnt.transform(X2)
assert not _has_explicit_diagonal(X2t)
@pytest.mark.parametrize("Klass", [KNeighborsTransformer, RadiusNeighborsTransformer])
def test_graph_feature_names_out(Klass):
"""Check `get_feature_names_out` for transformers defined in `_graph.py`."""
n_samples_fit = 20
n_features = 10
rng = np.random.RandomState(42)
X = rng.randn(n_samples_fit, n_features)
est = Klass().fit(X)
names_out = est.get_feature_names_out()
class_name_lower = Klass.__name__.lower()
expected_names_out = np.array(
[f"{class_name_lower}{i}" for i in range(est.n_samples_fit_)],
dtype=object,
)
assert_array_equal(names_out, expected_names_out)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/tests/__init__.py | sklearn/neighbors/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/tests/test_nca.py | sklearn/neighbors/tests/test_nca.py | """
Testing for Neighborhood Component Analysis module (sklearn.neighbors.nca)
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import re
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy.optimize import check_grad
from sklearn import clone
from sklearn.datasets import load_iris, make_blobs, make_classification
from sklearn.exceptions import ConvergenceWarning
from sklearn.metrics import pairwise_distances
from sklearn.neighbors import NeighborhoodComponentsAnalysis
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import check_random_state
from sklearn.utils.validation import validate_data
rng = check_random_state(0)
# Load and shuffle the iris dataset.
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris_data = iris.data[perm]
iris_target = iris.target[perm]
# Avoid having test data introducing dependencies between tests.
iris_data.flags.writeable = False
iris_target.flags.writeable = False
EPS = np.finfo(float).eps
def test_simple_example():
"""Test on a simple example.
Puts four points in the input space where the opposite labels points are
next to each other. After transform the samples from the same class
should be next to each other.
"""
X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]])
y = np.array([1, 0, 1, 0])
nca = NeighborhoodComponentsAnalysis(
n_components=2, init="identity", random_state=42
)
nca.fit(X, y)
X_t = nca.transform(X)
assert_array_equal(pairwise_distances(X_t).argsort()[:, 1], np.array([2, 3, 0, 1]))
def test_toy_example_collapse_points():
"""Test on a toy example of three points that should collapse
We build a simple example: two points from the same class and a point from
a different class in the middle of them. On this simple example, the new
(transformed) points should all collapse into one single point. Indeed, the
objective is 2/(1 + exp(d/2)), with d the euclidean distance between the
two samples from the same class. This is maximized for d=0 (because d>=0),
with an objective equal to 1 (loss=-1.).
"""
rng = np.random.RandomState(42)
input_dim = 5
two_points = rng.randn(2, input_dim)
X = np.vstack([two_points, two_points.mean(axis=0)[np.newaxis, :]])
y = [0, 0, 1]
class LossStorer:
def __init__(self, X, y):
self.loss = np.inf # initialize the loss to very high
# Initialize a fake NCA and variables needed to compute the loss:
self.fake_nca = NeighborhoodComponentsAnalysis()
self.fake_nca.n_iter_ = np.inf
self.X, y = validate_data(self.fake_nca, X, y, ensure_min_samples=2)
y = LabelEncoder().fit_transform(y)
self.same_class_mask = y[:, np.newaxis] == y[np.newaxis, :]
def callback(self, transformation, n_iter):
"""Stores the last value of the loss function"""
self.loss, _ = self.fake_nca._loss_grad_lbfgs(
transformation, self.X, self.same_class_mask, -1.0
)
loss_storer = LossStorer(X, y)
nca = NeighborhoodComponentsAnalysis(random_state=42, callback=loss_storer.callback)
X_t = nca.fit_transform(X, y)
print(X_t)
# test that points are collapsed into one point
assert_array_almost_equal(X_t - X_t[0], 0.0)
assert abs(loss_storer.loss + 1) < 1e-10
def test_finite_differences(global_random_seed):
"""Test gradient of loss function
Assert that the gradient is almost equal to its finite differences
approximation.
"""
# Initialize the transformation `M`, as well as `X` and `y` and `NCA`
rng = np.random.RandomState(global_random_seed)
X, y = make_classification(random_state=global_random_seed)
M = rng.randn(rng.randint(1, X.shape[1] + 1), X.shape[1])
nca = NeighborhoodComponentsAnalysis()
nca.n_iter_ = 0
mask = y[:, np.newaxis] == y[np.newaxis, :]
def fun(M):
return nca._loss_grad_lbfgs(M, X, mask)[0]
def grad(M):
return nca._loss_grad_lbfgs(M, X, mask)[1]
# compare the gradient to a finite difference approximation
diff = check_grad(fun, grad, M.ravel())
assert diff == pytest.approx(0.0, abs=1e-4)
def test_params_validation():
# Test that invalid parameters raise value error
X = np.arange(12).reshape(4, 3)
y = [1, 1, 2, 2]
NCA = NeighborhoodComponentsAnalysis
rng = np.random.RandomState(42)
init = rng.rand(5, 3)
msg = (
f"The output dimensionality ({init.shape[0]}) "
"of the given linear transformation `init` cannot be "
f"greater than its input dimensionality ({init.shape[1]})."
)
with pytest.raises(ValueError, match=re.escape(msg)):
NCA(init=init).fit(X, y)
n_components = 10
msg = (
"The preferred dimensionality of the projected space "
f"`n_components` ({n_components}) cannot be greater "
f"than the given data dimensionality ({X.shape[1]})!"
)
with pytest.raises(ValueError, match=re.escape(msg)):
NCA(n_components=n_components).fit(X, y)
def test_transformation_dimensions():
X = np.arange(12).reshape(4, 3)
y = [1, 1, 2, 2]
# Fail if transformation input dimension does not match inputs dimensions
transformation = np.array([[1, 2], [3, 4]])
with pytest.raises(ValueError):
NeighborhoodComponentsAnalysis(init=transformation).fit(X, y)
# Fail if transformation output dimension is larger than
# transformation input dimension
transformation = np.array([[1, 2], [3, 4], [5, 6]])
# len(transformation) > len(transformation[0])
with pytest.raises(ValueError):
NeighborhoodComponentsAnalysis(init=transformation).fit(X, y)
# Pass otherwise
transformation = np.arange(9).reshape(3, 3)
NeighborhoodComponentsAnalysis(init=transformation).fit(X, y)
def test_n_components():
rng = np.random.RandomState(42)
X = np.arange(12).reshape(4, 3)
y = [1, 1, 2, 2]
init = rng.rand(X.shape[1] - 1, 3)
# n_components = X.shape[1] != transformation.shape[0]
n_components = X.shape[1]
nca = NeighborhoodComponentsAnalysis(init=init, n_components=n_components)
msg = (
"The preferred dimensionality of the projected space "
f"`n_components` ({n_components}) does not match the output "
"dimensionality of the given linear transformation "
f"`init` ({init.shape[0]})!"
)
with pytest.raises(ValueError, match=re.escape(msg)):
nca.fit(X, y)
# n_components > X.shape[1]
n_components = X.shape[1] + 2
nca = NeighborhoodComponentsAnalysis(init=init, n_components=n_components)
msg = (
"The preferred dimensionality of the projected space "
f"`n_components` ({n_components}) cannot be greater than "
f"the given data dimensionality ({X.shape[1]})!"
)
with pytest.raises(ValueError, match=re.escape(msg)):
nca.fit(X, y)
# n_components < X.shape[1]
nca = NeighborhoodComponentsAnalysis(n_components=2, init="identity")
nca.fit(X, y)
def test_init_transformation():
rng = np.random.RandomState(42)
X, y = make_blobs(n_samples=30, centers=6, n_features=5, random_state=0)
# Start learning from scratch
nca = NeighborhoodComponentsAnalysis(init="identity")
nca.fit(X, y)
# Initialize with random
nca_random = NeighborhoodComponentsAnalysis(init="random")
nca_random.fit(X, y)
# Initialize with auto
nca_auto = NeighborhoodComponentsAnalysis(init="auto")
nca_auto.fit(X, y)
# Initialize with PCA
nca_pca = NeighborhoodComponentsAnalysis(init="pca")
nca_pca.fit(X, y)
# Initialize with LDA
nca_lda = NeighborhoodComponentsAnalysis(init="lda")
nca_lda.fit(X, y)
init = rng.rand(X.shape[1], X.shape[1])
nca = NeighborhoodComponentsAnalysis(init=init)
nca.fit(X, y)
# init.shape[1] must match X.shape[1]
init = rng.rand(X.shape[1], X.shape[1] + 1)
nca = NeighborhoodComponentsAnalysis(init=init)
msg = (
f"The input dimensionality ({init.shape[1]}) of the given "
"linear transformation `init` must match the "
f"dimensionality of the given inputs `X` ({X.shape[1]})."
)
with pytest.raises(ValueError, match=re.escape(msg)):
nca.fit(X, y)
# init.shape[0] must be <= init.shape[1]
init = rng.rand(X.shape[1] + 1, X.shape[1])
nca = NeighborhoodComponentsAnalysis(init=init)
msg = (
f"The output dimensionality ({init.shape[0]}) of the given "
"linear transformation `init` cannot be "
f"greater than its input dimensionality ({init.shape[1]})."
)
with pytest.raises(ValueError, match=re.escape(msg)):
nca.fit(X, y)
# init.shape[0] must match n_components
init = rng.rand(X.shape[1], X.shape[1])
n_components = X.shape[1] - 2
nca = NeighborhoodComponentsAnalysis(init=init, n_components=n_components)
msg = (
"The preferred dimensionality of the "
f"projected space `n_components` ({n_components}) "
"does not match the output dimensionality of the given "
f"linear transformation `init` ({init.shape[0]})!"
)
with pytest.raises(ValueError, match=re.escape(msg)):
nca.fit(X, y)
@pytest.mark.parametrize("n_samples", [3, 5, 7, 11])
@pytest.mark.parametrize("n_features", [3, 5, 7, 11])
@pytest.mark.parametrize("n_classes", [5, 7, 11])
@pytest.mark.parametrize("n_components", [3, 5, 7, 11])
def test_auto_init(n_samples, n_features, n_classes, n_components):
# Test that auto choose the init as expected with every configuration
# of order of n_samples, n_features, n_classes and n_components.
rng = np.random.RandomState(42)
nca_base = NeighborhoodComponentsAnalysis(
init="auto", n_components=n_components, max_iter=1, random_state=rng
)
if n_classes >= n_samples:
pass
# n_classes > n_samples is impossible, and n_classes == n_samples
# throws an error from lda but is an absurd case
else:
X = rng.randn(n_samples, n_features)
y = np.tile(range(n_classes), n_samples // n_classes + 1)[:n_samples]
if n_components > n_features:
# this would return a ValueError, which is already tested in
# test_params_validation
pass
else:
nca = clone(nca_base)
nca.fit(X, y)
if n_components <= min(n_classes - 1, n_features):
nca_other = clone(nca_base).set_params(init="lda")
elif n_components < min(n_features, n_samples):
nca_other = clone(nca_base).set_params(init="pca")
else:
nca_other = clone(nca_base).set_params(init="identity")
nca_other.fit(X, y)
assert_array_almost_equal(nca.components_, nca_other.components_)
def test_warm_start_validation():
X, y = make_classification(
n_samples=30,
n_features=5,
n_classes=4,
n_redundant=0,
n_informative=5,
random_state=0,
)
nca = NeighborhoodComponentsAnalysis(warm_start=True, max_iter=5)
nca.fit(X, y)
X_less_features, y = make_classification(
n_samples=30,
n_features=4,
n_classes=4,
n_redundant=0,
n_informative=4,
random_state=0,
)
msg = (
f"The new inputs dimensionality ({X_less_features.shape[1]}) "
"does not match the input dimensionality of the previously learned "
f"transformation ({nca.components_.shape[1]})."
)
with pytest.raises(ValueError, match=re.escape(msg)):
nca.fit(X_less_features, y)
def test_warm_start_effectiveness():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
nca_warm = NeighborhoodComponentsAnalysis(warm_start=True, random_state=0)
nca_warm.fit(iris_data, iris_target)
transformation_warm = nca_warm.components_
nca_warm.max_iter = 1
nca_warm.fit(iris_data, iris_target)
transformation_warm_plus_one = nca_warm.components_
nca_cold = NeighborhoodComponentsAnalysis(warm_start=False, random_state=0)
nca_cold.fit(iris_data, iris_target)
transformation_cold = nca_cold.components_
nca_cold.max_iter = 1
nca_cold.fit(iris_data, iris_target)
transformation_cold_plus_one = nca_cold.components_
diff_warm = np.sum(np.abs(transformation_warm_plus_one - transformation_warm))
diff_cold = np.sum(np.abs(transformation_cold_plus_one - transformation_cold))
assert diff_warm < 3.0, (
"Transformer changed significantly after one "
"iteration even though it was warm-started."
)
assert diff_cold > diff_warm, (
"Cold-started transformer changed less "
"significantly than warm-started "
"transformer after one iteration."
)
@pytest.mark.parametrize(
"init_name", ["pca", "lda", "identity", "random", "precomputed"]
)
def test_verbose(init_name, capsys):
# assert there is proper output when verbose = 1, for every initialization
# except auto because auto will call one of the others
rng = np.random.RandomState(42)
X, y = make_blobs(n_samples=30, centers=6, n_features=5, random_state=0)
regexp_init = r"... done in \ *\d+\.\d{2}s"
msgs = {
"pca": "Finding principal components" + regexp_init,
"lda": "Finding most discriminative components" + regexp_init,
}
if init_name == "precomputed":
init = rng.randn(X.shape[1], X.shape[1])
else:
init = init_name
nca = NeighborhoodComponentsAnalysis(verbose=1, init=init)
nca.fit(X, y)
out, _ = capsys.readouterr()
# check output
lines = re.split("\n+", out)
# if pca or lda init, an additional line is printed, so we test
# it and remove it to test the rest equally among initializations
if init_name in ["pca", "lda"]:
assert re.match(msgs[init_name], lines[0])
lines = lines[1:]
assert lines[0] == "[NeighborhoodComponentsAnalysis]"
header = "{:>10} {:>20} {:>10}".format("Iteration", "Objective Value", "Time(s)")
assert lines[1] == "[NeighborhoodComponentsAnalysis] {}".format(header)
assert lines[2] == "[NeighborhoodComponentsAnalysis] {}".format("-" * len(header))
for line in lines[3:-2]:
# The following regex will match for instance:
# '[NeighborhoodComponentsAnalysis] 0 6.988936e+01 0.01'
assert re.match(
r"\[NeighborhoodComponentsAnalysis\] *\d+ *\d\.\d{6}e"
r"[+|-]\d+\ *\d+\.\d{2}",
line,
)
assert re.match(
r"\[NeighborhoodComponentsAnalysis\] Training took\ *\d+\.\d{2}s\.",
lines[-2],
)
assert lines[-1] == ""
def test_no_verbose(capsys):
# assert by default there is no output (verbose=0)
nca = NeighborhoodComponentsAnalysis()
nca.fit(iris_data, iris_target)
out, _ = capsys.readouterr()
# check output
assert out == ""
def test_singleton_class():
X = iris_data.copy()
y = iris_target.copy()
# one singleton class
singleton_class = 1
(ind_singleton,) = np.where(y == singleton_class)
y[ind_singleton] = 2
y[ind_singleton[0]] = singleton_class
nca = NeighborhoodComponentsAnalysis(max_iter=30)
nca.fit(X, y)
# One non-singleton class
(ind_1,) = np.where(y == 1)
(ind_2,) = np.where(y == 2)
y[ind_1] = 0
y[ind_1[0]] = 1
y[ind_2] = 0
y[ind_2[0]] = 2
nca = NeighborhoodComponentsAnalysis(max_iter=30)
nca.fit(X, y)
# Only singleton classes
(ind_0,) = np.where(y == 0)
(ind_1,) = np.where(y == 1)
(ind_2,) = np.where(y == 2)
X = X[[ind_0[0], ind_1[0], ind_2[0]]]
y = y[[ind_0[0], ind_1[0], ind_2[0]]]
nca = NeighborhoodComponentsAnalysis(init="identity", max_iter=30)
nca.fit(X, y)
assert_array_equal(X, nca.transform(X))
def test_one_class():
X = iris_data[iris_target == 0]
y = iris_target[iris_target == 0]
nca = NeighborhoodComponentsAnalysis(
max_iter=30, n_components=X.shape[1], init="identity"
)
nca.fit(X, y)
assert_array_equal(X, nca.transform(X))
def test_callback(capsys):
max_iter = 10
def my_cb(transformation, n_iter):
assert transformation.shape == (iris_data.shape[1] ** 2,)
rem_iter = max_iter - n_iter
print("{} iterations remaining...".format(rem_iter))
# assert that my_cb is called
nca = NeighborhoodComponentsAnalysis(max_iter=max_iter, callback=my_cb, verbose=1)
nca.fit(iris_data, iris_target)
out, _ = capsys.readouterr()
# check output
assert "{} iterations remaining...".format(max_iter - 1) in out
def test_expected_transformation_shape():
"""Test that the transformation has the expected shape."""
X = iris_data
y = iris_target
class TransformationStorer:
def __init__(self, X, y):
# Initialize a fake NCA and variables needed to call the loss
# function:
self.fake_nca = NeighborhoodComponentsAnalysis()
self.fake_nca.n_iter_ = np.inf
self.X, y = validate_data(self.fake_nca, X, y, ensure_min_samples=2)
y = LabelEncoder().fit_transform(y)
self.same_class_mask = y[:, np.newaxis] == y[np.newaxis, :]
def callback(self, transformation, n_iter):
"""Stores the last value of the transformation taken as input by
the optimizer"""
self.transformation = transformation
transformation_storer = TransformationStorer(X, y)
cb = transformation_storer.callback
nca = NeighborhoodComponentsAnalysis(max_iter=5, callback=cb)
nca.fit(X, y)
assert transformation_storer.transformation.size == X.shape[1] ** 2
def test_convergence_warning():
nca = NeighborhoodComponentsAnalysis(max_iter=2, verbose=1)
cls_name = nca.__class__.__name__
msg = "[{}] NCA did not converge".format(cls_name)
with pytest.warns(ConvergenceWarning, match=re.escape(msg)):
nca.fit(iris_data, iris_target)
@pytest.mark.parametrize(
"param, value",
[
("n_components", np.int32(3)),
("max_iter", np.int32(100)),
("tol", np.float32(0.0001)),
],
)
def test_parameters_valid_types(param, value):
# check that no error is raised when parameters have numpy integer or
# floating types.
nca = NeighborhoodComponentsAnalysis(**{param: value})
X = iris_data
y = iris_target
nca.fit(X, y)
@pytest.mark.parametrize("n_components", [None, 2])
def test_nca_feature_names_out(n_components):
"""Check `get_feature_names_out` for `NeighborhoodComponentsAnalysis`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/28293
"""
X = iris_data
y = iris_target
est = NeighborhoodComponentsAnalysis(n_components=n_components).fit(X, y)
names_out = est.get_feature_names_out()
class_name_lower = est.__class__.__name__.lower()
if n_components is not None:
expected_n_features = n_components
else:
expected_n_features = X.shape[1]
expected_names_out = np.array(
[f"{class_name_lower}{i}" for i in range(expected_n_features)],
dtype=object,
)
assert_array_equal(names_out, expected_names_out)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/tests/test_ball_tree.py | sklearn/neighbors/tests/test_ball_tree.py | import itertools
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_almost_equal, assert_equal
from sklearn.neighbors._ball_tree import BallTree, BallTree32, BallTree64
from sklearn.utils import check_random_state
from sklearn.utils._testing import _convert_container
from sklearn.utils.validation import check_array
rng = np.random.RandomState(10)
V_mahalanobis = rng.rand(3, 3)
V_mahalanobis = np.dot(V_mahalanobis, V_mahalanobis.T)
DIMENSION = 3
METRICS = {
"euclidean": {},
"manhattan": {},
"minkowski": dict(p=3),
"chebyshev": {},
}
DISCRETE_METRICS = ["hamming", "canberra", "braycurtis"]
BOOLEAN_METRICS = [
"jaccard",
"dice",
"rogerstanimoto",
"russellrao",
"sokalmichener",
"sokalsneath",
]
BALL_TREE_CLASSES = [
BallTree64,
BallTree32,
]
def brute_force_neighbors(X, Y, k, metric, **kwargs):
from sklearn.metrics import DistanceMetric
X, Y = check_array(X), check_array(Y)
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_BallTree_is_BallTree64_subclass():
assert issubclass(BallTree, BallTree64)
@pytest.mark.parametrize("metric", itertools.chain(BOOLEAN_METRICS, DISCRETE_METRICS))
@pytest.mark.parametrize("array_type", ["list", "array"])
@pytest.mark.parametrize("BallTreeImplementation", BALL_TREE_CLASSES)
def test_ball_tree_query_metrics(metric, array_type, BallTreeImplementation):
rng = check_random_state(0)
if metric in BOOLEAN_METRICS:
X = rng.random_sample((40, 10)).round(0)
Y = rng.random_sample((10, 10)).round(0)
elif metric in DISCRETE_METRICS:
X = (4 * rng.random_sample((40, 10))).round(0)
Y = (4 * rng.random_sample((10, 10))).round(0)
X = _convert_container(X, array_type)
Y = _convert_container(Y, array_type)
k = 5
bt = BallTreeImplementation(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
@pytest.mark.parametrize(
"BallTreeImplementation, decimal_tol", zip(BALL_TREE_CLASSES, [6, 5])
)
def test_query_haversine(BallTreeImplementation, decimal_tol):
rng = check_random_state(0)
X = 2 * np.pi * rng.random_sample((40, 2))
bt = BallTreeImplementation(X, leaf_size=1, metric="haversine")
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric="haversine")
assert_array_almost_equal(dist1, dist2, decimal=decimal_tol)
assert_array_almost_equal(ind1, ind2)
@pytest.mark.parametrize("BallTreeImplementation", BALL_TREE_CLASSES)
def test_array_object_type(BallTreeImplementation):
"""Check that we do not accept object dtype array."""
X = np.array([(1, 2, 3), (2, 5), (5, 5, 1, 2)], dtype=object)
with pytest.raises(ValueError, match="setting an array element with a sequence"):
BallTreeImplementation(X)
@pytest.mark.parametrize("BallTreeImplementation", BALL_TREE_CLASSES)
def test_bad_pyfunc_metric(BallTreeImplementation):
def wrong_returned_value(x, y):
return "1"
def one_arg_func(x):
return 1.0 # pragma: no cover
X = np.ones((5, 2))
msg = "Custom distance function must accept two vectors and return a float."
with pytest.raises(TypeError, match=msg):
BallTreeImplementation(X, metric=wrong_returned_value)
msg = "takes 1 positional argument but 2 were given"
with pytest.raises(TypeError, match=msg):
BallTreeImplementation(X, metric=one_arg_func)
@pytest.mark.parametrize("metric", itertools.chain(METRICS, BOOLEAN_METRICS))
def test_ball_tree_numerical_consistency(global_random_seed, metric):
# Results on float64 and float32 versions of a dataset must be
# numerically close.
X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(
random_seed=global_random_seed, features=50
)
metric_params = METRICS.get(metric, {})
bt_64 = BallTree64(X_64, leaf_size=1, metric=metric, **metric_params)
bt_32 = BallTree32(X_32, leaf_size=1, metric=metric, **metric_params)
# Test consistency with respect to the `query` method
k = 5
dist_64, ind_64 = bt_64.query(Y_64, k=k)
dist_32, ind_32 = bt_32.query(Y_32, k=k)
assert_allclose(dist_64, dist_32, rtol=1e-5)
assert_equal(ind_64, ind_32)
assert dist_64.dtype == np.float64
assert dist_32.dtype == np.float32
# Test consistency with respect to the `query_radius` method
r = 2.38
ind_64 = bt_64.query_radius(Y_64, r=r)
ind_32 = bt_32.query_radius(Y_32, r=r)
for _ind64, _ind32 in zip(ind_64, ind_32):
assert_equal(_ind64, _ind32)
# Test consistency with respect to the `query_radius` method
# with return distances being true
ind_64, dist_64 = bt_64.query_radius(Y_64, r=r, return_distance=True)
ind_32, dist_32 = bt_32.query_radius(Y_32, r=r, return_distance=True)
for _ind64, _ind32, _dist_64, _dist_32 in zip(ind_64, ind_32, dist_64, dist_32):
assert_equal(_ind64, _ind32)
assert_allclose(_dist_64, _dist_32, rtol=1e-5)
assert _dist_64.dtype == np.float64
assert _dist_32.dtype == np.float32
@pytest.mark.parametrize("metric", itertools.chain(METRICS, BOOLEAN_METRICS))
def test_kernel_density_numerical_consistency(global_random_seed, metric):
# Test consistency with respect to the `kernel_density` method
X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(random_seed=global_random_seed)
metric_params = METRICS.get(metric, {})
bt_64 = BallTree64(X_64, leaf_size=1, metric=metric, **metric_params)
bt_32 = BallTree32(X_32, leaf_size=1, metric=metric, **metric_params)
kernel = "gaussian"
h = 0.1
density64 = bt_64.kernel_density(Y_64, h=h, kernel=kernel, breadth_first=True)
density32 = bt_32.kernel_density(Y_32, h=h, kernel=kernel, breadth_first=True)
assert_allclose(density64, density32, rtol=1e-5)
assert density64.dtype == np.float64
assert density32.dtype == np.float32
def test_two_point_correlation_numerical_consistency(global_random_seed):
# Test consistency with respect to the `two_point_correlation` method
X_64, X_32, Y_64, Y_32 = get_dataset_for_binary_tree(random_seed=global_random_seed)
bt_64 = BallTree64(X_64, leaf_size=10)
bt_32 = BallTree32(X_32, leaf_size=10)
r = np.linspace(0, 1, 10)
counts_64 = bt_64.two_point_correlation(Y_64, r=r, dualtree=True)
counts_32 = bt_32.two_point_correlation(Y_32, r=r, dualtree=True)
assert_allclose(counts_64, counts_32)
def get_dataset_for_binary_tree(random_seed, features=3):
rng = np.random.RandomState(random_seed)
_X = rng.rand(100, features)
_Y = rng.rand(5, features)
X_64 = _X.astype(dtype=np.float64, copy=False)
Y_64 = _Y.astype(dtype=np.float64, copy=False)
X_32 = _X.astype(dtype=np.float32, copy=False)
Y_32 = _Y.astype(dtype=np.float32, copy=False)
return X_64, X_32, Y_64, Y_32
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/neighbors/tests/test_nearest_centroid.py | sklearn/neighbors/tests/test_nearest_centroid.py | """
Testing for the nearest centroid module.
"""
import numpy as np
import pytest
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
from sklearn.utils._testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
)
from sklearn.utils.fixes import CSR_CONTAINERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
true_result_prior1 = [-1, 1, 1]
true_discriminant_scores = [-32, 64, 80]
true_proba = [[1, 1.26642e-14], [1.60381e-28, 1], [1.80485e-35, 1]]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_classification_toy(csr_container):
# Check classification on a toy dataset, including sparse versions.
X_csr = csr_container(X)
T_csr = csr_container(T)
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.decision_function(T), true_discriminant_scores)
assert_array_almost_equal(clf.predict_proba(T), true_proba)
# Test uniform priors
clf = NearestCentroid(priors="uniform")
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.decision_function(T), true_discriminant_scores)
assert_array_almost_equal(clf.predict_proba(T), true_proba)
clf = NearestCentroid(priors="empirical")
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.decision_function(T), true_discriminant_scores)
assert_array_almost_equal(clf.predict_proba(T), true_proba)
# Test custom priors
clf = NearestCentroid(priors=[0.25, 0.75])
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result_prior1)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ("euclidean", "manhattan"):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ("euclidean", "manhattan"):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric, shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert type(obj2) == obj.__class__
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(
score,
score2,
"Failed to generate same score after pickling (classification).",
)
def test_shrinkage_correct():
# Ensure that the shrinking is correct.
# The expected result is calculated by R (pamr),
# which is implemented by the author of the original paper.
# (One need to modify the code to output the new centroid in pamr.predict)
X = np.array([[0, 1], [1, 0], [1, 1], [2, 0], [6, 8]])
y = np.array([1, 1, 2, 2, 2])
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
expected_result = np.array([[0.7787310, 0.8545292], [2.814179, 2.763647]])
np.testing.assert_array_almost_equal(clf.centroids_, expected_result)
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
@pytest.mark.parametrize("csr_container", CSR_CONTAINERS)
def test_manhattan_metric(csr_container):
# Test the manhattan metric.
X_csr = csr_container(X)
clf = NearestCentroid(metric="manhattan")
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
def test_features_zero_var():
# Test that features with 0 variance throw error
X = np.empty((10, 2))
X[:, 0] = -0.13725701
X[:, 1] = -0.9853293
y = np.zeros((10))
y[0] = 1
clf = NearestCentroid(shrink_threshold=0.1)
with pytest.raises(ValueError):
clf.fit(X, y)
def test_negative_priors_error():
"""Check that we raise an error when the user-defined priors are negative."""
clf = NearestCentroid(priors=[-2, 4])
with pytest.raises(ValueError, match="priors must be non-negative"):
clf.fit(X, y)
def test_warn_non_normalized_priors():
"""Check that we raise a warning and normalize the user-defined priors when they
don't sum to 1.
"""
priors = [2, 4]
clf = NearestCentroid(priors=priors)
with pytest.warns(
UserWarning,
match="The priors do not sum to 1. Normalizing such that it sums to one.",
):
clf.fit(X, y)
assert_allclose(clf.class_prior_, np.asarray(priors) / np.asarray(priors).sum())
@pytest.mark.parametrize(
"response_method", ["decision_function", "predict_proba", "predict_log_proba"]
)
def test_method_not_available_with_manhattan(response_method):
"""Check that we raise an AttributeError with Manhattan metric when trying
to call a non-thresholded response method.
"""
clf = NearestCentroid(metric="manhattan").fit(X, y)
with pytest.raises(AttributeError):
getattr(clf, response_method)(T)
@pytest.mark.parametrize("array_constructor", [np.array] + CSR_CONTAINERS)
def test_error_zero_variances(array_constructor):
"""Check that we raise an error when the variance for all features is zero."""
X = np.ones((len(y), 2))
X[:, 1] *= 2
X = array_constructor(X)
clf = NearestCentroid()
with pytest.raises(ValueError, match="All features have zero variance"):
clf.fit(X, y)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/covariance/_robust_covariance.py | sklearn/covariance/_robust_covariance.py | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from numbers import Integral, Real
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from sklearn.base import _fit_context
from sklearn.covariance._empirical_covariance import (
EmpiricalCovariance,
empirical_covariance,
)
from sklearn.utils import check_array, check_random_state
from sklearn.utils._param_validation import Interval
from sklearn.utils.extmath import fast_logdet
from sklearn.utils.validation import validate_data
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(
X,
n_support,
remaining_iterations=30,
initial_estimates=None,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None,
):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int
Number of observations to compute the robust estimates of location
and covariance from. This parameter must be greater than
`n_samples / 2`.
remaining_iterations : int, default=30
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : tuple of shape (2,), default=None
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : bool, default=False
Verbose mode.
cov_computation_method : callable, \
default=:func:`sklearn.covariance.empirical_covariance`
The function which will be used to compute the covariance.
Must return array of shape (n_features, n_features).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
location : ndarray of shape (n_features,)
Robust location estimates.
covariance : ndarray of shape (n_features, n_features)
Robust covariance estimates.
support : ndarray of shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(
X,
n_support,
remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates,
verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
def _c_step(
X,
n_support,
random_state,
remaining_iterations=30,
initial_estimates=None,
verbose=False,
cov_computation_method=empirical_covariance,
):
n_samples, n_features = X.shape
dist = np.inf
# Initialisation
if initial_estimates is None:
# compute initial robust estimates from a random subset
support_indices = random_state.permutation(n_samples)[:n_support]
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support_indices)
precision = linalg.pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support_indices = np.argpartition(dist, n_support - 1)[:n_support]
X_support = X[support_indices]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
# If the data already has singular covariance, calculate the precision,
# as the loop below will not be entered.
if np.isinf(det):
precision = linalg.pinvh(covariance)
previous_det = np.inf
while det < previous_det and remaining_iterations > 0 and not np.isinf(det):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support_indices = support_indices
# compute a new support_indices from the full data set mahalanobis distances
precision = linalg.pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support_indices = np.argpartition(dist, n_support - 1)[:n_support]
X_support = X[support_indices]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Check if best fit already found (det => 0, logdet => -inf)
if np.isinf(det):
results = location, covariance, det, support_indices, dist
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print(
"Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations)
)
results = location, covariance, det, support_indices, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn(
"Determinant has increased; this should not happen: "
"log(det) > log(previous_det) (%.15f > %.15f). "
"You may want to try with a higher value of "
"support_fraction (current value: %.3f)."
% (det, previous_det, n_support / n_samples),
RuntimeWarning,
)
results = (
previous_location,
previous_covariance,
previous_det,
previous_support_indices,
previous_dist,
)
# Check early stopping
if remaining_iterations == 0:
if verbose:
print("Maximum number of iterations reached")
results = location, covariance, det, support_indices, dist
location, covariance, det, support_indices, dist = results
# Convert from list of indices to boolean mask.
support = np.bincount(support_indices, minlength=n_samples).astype(bool)
return location, covariance, det, support, dist
def _consistency_factor(n_features, alpha):
"""Multiplicative factor to make covariance estimate consistent
at the normal distribution, as described in [Pison2002]_.
Parameters
----------
n_features : int
Number of features.
alpha : float
Parameter related to the proportion of discarded points.
This parameter must be in the range (0, 1).
Returns
-------
c_alpha : float
Scaling factor to make covariance matrix consistent.
References
----------
.. [Butler1993] R. W. Butler. P. L. Davies. M. Jhun. "Asymptotics for the
Minimum Covariance Determinant Estimator." Ann. Statist. 21 (3)
1385 - 1400, September, 1993. https://doi.org/10.1214/aos/1176349264]
.. [Croux1999] Croux, C., Haesbroeck, G. "Influence Function and
Efficiency of the Minimum Covariance Determinant Scatter Matrix
Estimator" Journal of Multivariate Analysis 71(2) (1999) 161-190
.. [Pison2002] Pison, G., Van Aelst, S., Willems, G., "Small sample
corrections for LTS and MCD" Metrika 55(1) (2002) 111-123
"""
# Formulas as in Sec 3 of Pison 2002, derived from Eq 4.2 in Croux 1999
q_alpha = chi2.ppf(alpha, df=n_features)
c_alpha = alpha / chi2.cdf(q_alpha, n_features + 2)
return c_alpha
def select_candidates(
X,
n_support,
n_trials,
select=1,
n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None,
):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[RV]_.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int
The number of samples the pure data set must contain.
This parameter must be in the range `[(n + p + 1)/2] < n_support < n`.
n_trials : int or tuple of shape (2,)
Number of different initial sets of observations from which to
run the algorithm. This parameter should be a strictly positive
integer.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
select : int, default=1
Number of best candidates results to return. This parameter must be
a strictly positive integer.
n_iter : int, default=30
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
This parameter must be a strictly positive integer.
verbose : bool, default=False
Control the output verbosity.
cov_computation_method : callable, \
default=:func:`sklearn.covariance.empirical_covariance`
The function which will be used to compute the covariance.
Must return an array of shape (n_features, n_features).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
See Also
---------
c_step
Returns
-------
best_locations : ndarray of shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : ndarray of shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : ndarray of shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [RV] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
if isinstance(n_trials, Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError(
"Invalid 'n_trials' parameter, expected tuple or integer, got %s (%s)"
% (n_trials, type(n_trials))
)
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X,
n_support,
remaining_iterations=n_iter,
verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
)
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(
_c_step(
X,
n_support,
remaining_iterations=n_iter,
initial_estimates=initial_estimates,
verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
)
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = zip(
*all_estimates
)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(
X,
support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None,
):
"""Estimate the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, default=None
The proportion of points to be included in the support of the raw
MCD estimate. Default is `None`, which implies that the minimum
value of `support_fraction` will be used within the algorithm:
`(n_samples + n_features + 1) / 2 * n_samples`. This parameter must be
in the range (0, 1).
cov_computation_method : callable, \
default=:func:`sklearn.covariance.empirical_covariance`
The function which will be used to compute the covariance.
Must return an array of shape (n_features, n_features).
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
location : ndarray of shape (n_features,)
Robust location of the data.
covariance : ndarray of shape (n_features, n_features)
Robust covariance of the features.
support : ndarray of shape (n_samples,), dtype=bool
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [RouseeuwVan]_,
see the MinCovDet object.
References
----------
.. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator="fast_mcd")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = min(int(np.ceil(0.5 * (n_samples + n_features + 1))), n_samples)
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[: (n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = (
0.5
* (X_sorted[n_support + halves_start] + X_sorted[halves_start]).mean()
)
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets * (n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features, n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
n_best_tot = 10
all_best_covariances = np.zeros((n_best_tot, n_features, n_features))
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset,
h_subset,
n_trials,
select=n_best_sub,
n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged * (n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = select_candidates(
X[selection],
h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = select_candidates(
X,
n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X,
n_support,
n_trials=n_trials,
select=n_best,
n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X,
n_support,
n_trials=(locations_best, covariances_best),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state,
)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, default=None
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`(n_samples + n_features + 1) / 2 * n_samples`. The parameter must be
in the range (0, 1].
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling the data.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
raw_location_ : ndarray of shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : ndarray of shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : ndarray of shape (n_features,)
Estimated robust location.
For an example of comparing raw robust estimates with
the true location and covariance, refer to
:ref:`sphx_glr_auto_examples_covariance_plot_robust_vs_empirical_covariance.py`.
covariance_ : ndarray of shape (n_features, n_features)
Estimated robust covariance matrix.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : ndarray of shape (n_samples,)
Mahalanobis distances of the training set (on which :meth:`fit` is
called) observations.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
EllipticEnvelope : An object for detecting outliers in
a Gaussian distributed dataset.
EmpiricalCovariance : Maximum likelihood covariance estimator.
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
GraphicalLassoCV : Sparse inverse covariance with cross-validated
choice of the l1 penalty.
LedoitWolf : LedoitWolf Estimator.
OAS : Oracle Approximating Shrinkage Estimator.
ShrunkCovariance : Covariance estimator with shrinkage.
References
----------
.. [Rouseeuw1984] P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.
.. [Rousseeuw] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
.. [ButlerDavies] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import MinCovDet
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> cov = MinCovDet(random_state=0).fit(X)
>>> cov.covariance_
array([[0.8102, 0.2736],
[0.2736, 0.3330]])
>>> cov.location_
array([0.0769 , 0.0397])
"""
_parameter_constraints: dict = {
**EmpiricalCovariance._parameter_constraints,
"support_fraction": [Interval(Real, 0, 1, closed="right"), None],
"random_state": ["random_state"],
}
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(
self,
*,
store_precision=True,
assume_centered=False,
support_fraction=None,
random_state=None,
):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X, ensure_min_samples=2, estimator="MinCovDet")
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn(
"The covariance matrix associated to your dataset is not full rank"
)
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X,
support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state,
)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(
X[raw_support], assume_centered=True
)
# get precision matrix in an optimized way
precision = linalg.pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the asymptotic correction factor derived by [Croux1999]_.
Parameters
----------
data : array-like of shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : ndarray of shape (n_features, n_features)
Corrected robust covariance estimate.
References
----------
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/covariance/_graph_lasso.py | sklearn/covariance/_graph_lasso.py | """GraphicalLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import operator
import sys
import time
import warnings
from numbers import Integral, Real
import numpy as np
from scipy import linalg
from sklearn.base import _fit_context
from sklearn.covariance import EmpiricalCovariance, empirical_covariance, log_likelihood
from sklearn.exceptions import ConvergenceWarning
# mypy error: Module 'sklearn.linear_model' has no attribute '_cd_fast'
from sklearn.linear_model import _cd_fast as cd_fast # type: ignore[attr-defined]
from sklearn.linear_model import lars_path_gram
from sklearn.model_selection import check_cv, cross_val_score
from sklearn.utils import Bunch
from sklearn.utils._param_validation import Interval, StrOptions, validate_params
from sklearn.utils.metadata_routing import (
MetadataRouter,
MethodMapping,
_raise_for_params,
_routing_enabled,
process_routing,
)
from sklearn.utils.parallel import Parallel, delayed
from sklearn.utils.validation import (
_is_arraylike_not_scalar,
check_random_state,
check_scalar,
validate_data,
)
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graphical-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = -2.0 * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum() - np.abs(np.diag(precision_)).sum())
return gap
# The g-lasso algorithm
def _graphical_lasso(
emp_cov,
alpha,
*,
cov_init=None,
mode="cd",
tol=1e-4,
enet_tol=1e-4,
max_iter=100,
verbose=False,
eps=np.finfo(np.float64).eps,
):
_, n_features = emp_cov.shape
if alpha == 0:
# Early return without regularization
precision_ = linalg.inv(emp_cov)
cost = -2.0 * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
return emp_cov, precision_, (cost, d_gap), 0
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[:: n_features + 1]
covariance_.flat[:: n_features + 1] = diagonal
precision_ = linalg.pinvh(covariance_)
indices = np.arange(n_features)
i = 0 # initialize the counter to be robust to `max_iter=0`
costs = list()
# The different l1 regression solver have different numerical errors
if mode == "cd":
errors = dict(over="raise", invalid="ignore")
else:
errors = dict(invalid="raise")
try:
# be robust to the max_iter=0 edge case, see:
# https://github.com/scikit-learn/scikit-learn/issues/4134
d_gap = np.inf
# set a sub_covariance buffer
sub_covariance = np.copy(covariance_[1:, 1:], order="C")
for i in range(max_iter):
for idx in range(n_features):
# To keep the contiguous matrix `sub_covariance` equal to
# covariance_[indices != idx].T[indices != idx]
# we only need to update 1 column and 1 line when idx changes
if idx > 0:
di = idx - 1
sub_covariance[di] = covariance_[di][indices != idx]
sub_covariance[:, di] = covariance_[:, di][indices != idx]
else:
sub_covariance[:] = covariance_[1:, 1:]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == "cd":
# Use coordinate descent
coefs = -(
precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps)
)
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
w=coefs,
alpha=alpha,
beta=0,
Q=sub_covariance,
q=row,
y=row,
# TODO: It is not ideal that the max_iter of the outer
# solver (graphical lasso) is coupled with the max_iter of
# the inner solver (CD). Ideally, CD has its own parameter
# enet_max_iter (like enet_tol). A minimum of 20 is rather
# arbitrary, but not unreasonable.
max_iter=max(20, max_iter),
tol=enet_tol,
rng=check_random_state(None),
random=False,
positive=False,
do_screening=True,
)
else: # mode == "lars"
_, _, coefs = lars_path_gram(
Xy=row,
Gram=sub_covariance,
n_samples=row.size,
alpha_min=alpha / (n_features - 1),
copy_Gram=True,
eps=eps,
method="lars",
return_path=False,
)
# Update the precision matrix
precision_[idx, idx] = 1.0 / (
covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)
)
precision_[indices != idx, idx] = -precision_[idx, idx] * coefs
precision_[idx, indices != idx] = -precision_[idx, idx] * coefs
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
if not np.isfinite(precision_.sum()):
raise FloatingPointError(
"The system is too ill-conditioned for this solver"
)
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
"[graphical_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e"
% (i, cost, d_gap)
)
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError(
"Non SPD result: the system is too ill-conditioned for this solver"
)
else:
warnings.warn(
"graphical_lasso: did not converge after %i iteration: dual gap: %.3e"
% (max_iter, d_gap),
ConvergenceWarning,
)
except FloatingPointError as e:
e.args = (e.args[0] + ". The system is too ill-conditioned for this solver",)
raise e
return covariance_, precision_, costs, i + 1
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : ndarray of shape (n_features, n_features)
The sample covariance matrix.
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphicalLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[:: A.shape[0] + 1] = 0
return np.max(np.abs(A))
@validate_params(
{
"emp_cov": ["array-like"],
"return_costs": ["boolean"],
"return_n_iter": ["boolean"],
},
prefer_skip_nested_validation=False,
)
def graphical_lasso(
emp_cov,
alpha,
*,
mode="cd",
tol=1e-4,
enet_tol=1e-4,
max_iter=100,
verbose=False,
return_costs=False,
eps=np.finfo(np.float64).eps,
return_n_iter=False,
):
"""L1-penalized covariance estimator.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
.. versionchanged:: v0.20
graph_lasso has been renamed to graphical_lasso
Parameters
----------
emp_cov : array-like of shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
Range is (0, inf].
mode : {'cd', 'lars'}, default='cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : float, default=1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped. Range is (0, inf].
enet_tol : float, default=1e-4
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'. Range is (0, inf].
max_iter : int, default=100
The maximum number of iterations.
verbose : bool, default=False
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : bool, default=False
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, default=eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Default is `np.finfo(np.float64).eps`.
return_n_iter : bool, default=False
Whether or not to return the number of iterations.
Returns
-------
covariance : ndarray of shape (n_features, n_features)
The estimated covariance matrix.
precision : ndarray of shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
GraphicalLassoCV : Sparse inverse covariance with
cross-validated choice of the l1 penalty.
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_sparse_spd_matrix
>>> from sklearn.covariance import empirical_covariance, graphical_lasso
>>> true_cov = make_sparse_spd_matrix(n_dim=3,random_state=42)
>>> rng = np.random.RandomState(42)
>>> X = rng.multivariate_normal(mean=np.zeros(3), cov=true_cov, size=3)
>>> emp_cov = empirical_covariance(X, assume_centered=True)
>>> emp_cov, _ = graphical_lasso(emp_cov, alpha=0.05)
>>> emp_cov
array([[ 1.687, 0.212, -0.209],
[ 0.212, 0.221, -0.0817],
[-0.209, -0.0817, 0.232]])
"""
model = GraphicalLasso(
alpha=alpha,
mode=mode,
covariance="precomputed",
tol=tol,
enet_tol=enet_tol,
max_iter=max_iter,
verbose=verbose,
eps=eps,
assume_centered=True,
).fit(emp_cov)
output = [model.covariance_, model.precision_]
if return_costs:
output.append(model.costs_)
if return_n_iter:
output.append(model.n_iter_)
return tuple(output)
class BaseGraphicalLasso(EmpiricalCovariance):
_parameter_constraints: dict = {
**EmpiricalCovariance._parameter_constraints,
"tol": [Interval(Real, 0, None, closed="right")],
"enet_tol": [Interval(Real, 0, None, closed="right")],
"max_iter": [Interval(Integral, 0, None, closed="left")],
"mode": [StrOptions({"cd", "lars"})],
"verbose": ["verbose"],
"eps": [Interval(Real, 0, None, closed="both")],
}
_parameter_constraints.pop("store_precision")
def __init__(
self,
tol=1e-4,
enet_tol=1e-4,
max_iter=100,
mode="cd",
verbose=False,
eps=np.finfo(np.float64).eps,
assume_centered=False,
):
super().__init__(assume_centered=assume_centered)
self.tol = tol
self.enet_tol = enet_tol
self.max_iter = max_iter
self.mode = mode
self.verbose = verbose
self.eps = eps
class GraphicalLasso(BaseGraphicalLasso):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
For a usage example see
:ref:`sphx_glr_auto_examples_applications_plot_stock_market.py`.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
.. versionchanged:: v0.20
GraphLasso has been renamed to GraphicalLasso
Parameters
----------
alpha : float, default=0.01
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
Range is (0, inf].
mode : {'cd', 'lars'}, default='cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
covariance : "precomputed", default=None
If covariance is "precomputed", the input data in `fit` is assumed
to be the covariance matrix. If `None`, the empirical covariance
is estimated from the data `X`.
.. versionadded:: 1.3
tol : float, default=1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped. Range is (0, inf].
enet_tol : float, default=1e-4
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'. Range is (0, inf].
max_iter : int, default=100
The maximum number of iterations.
verbose : bool, default=False
If verbose is True, the objective function and dual gap are
plotted at each iteration.
eps : float, default=eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Default is `np.finfo(np.float64).eps`.
.. versionadded:: 1.3
assume_centered : bool, default=False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
costs_ : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
.. versionadded:: 1.3
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
graphical_lasso : L1-penalized covariance estimator.
GraphicalLassoCV : Sparse inverse covariance with
cross-validated choice of the l1 penalty.
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import GraphicalLasso
>>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
... [0.0, 0.4, 0.0, 0.0],
... [0.2, 0.0, 0.3, 0.1],
... [0.0, 0.0, 0.1, 0.7]])
>>> np.random.seed(0)
>>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0],
... cov=true_cov,
... size=200)
>>> cov = GraphicalLasso().fit(X)
>>> np.around(cov.covariance_, decimals=3)
array([[0.816, 0.049, 0.218, 0.019],
[0.049, 0.364, 0.017, 0.034],
[0.218, 0.017, 0.322, 0.093],
[0.019, 0.034, 0.093, 0.69 ]])
>>> np.around(cov.location_, decimals=3)
array([0.073, 0.04 , 0.038, 0.143])
"""
_parameter_constraints: dict = {
**BaseGraphicalLasso._parameter_constraints,
"alpha": [Interval(Real, 0, None, closed="both")],
"covariance": [StrOptions({"precomputed"}), None],
}
def __init__(
self,
alpha=0.01,
*,
mode="cd",
covariance=None,
tol=1e-4,
enet_tol=1e-4,
max_iter=100,
verbose=False,
eps=np.finfo(np.float64).eps,
assume_centered=False,
):
super().__init__(
tol=tol,
enet_tol=enet_tol,
max_iter=max_iter,
mode=mode,
verbose=verbose,
eps=eps,
assume_centered=assume_centered,
)
self.alpha = alpha
self.covariance = covariance
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the GraphicalLasso model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data from which to compute the covariance estimate.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
# Covariance does not make sense for a single feature
X = validate_data(self, X, ensure_min_features=2, ensure_min_samples=2)
if self.covariance == "precomputed":
emp_cov = X.copy()
self.location_ = np.zeros(X.shape[1])
else:
emp_cov = empirical_covariance(X, assume_centered=self.assume_centered)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
self.covariance_, self.precision_, self.costs_, self.n_iter_ = _graphical_lasso(
emp_cov,
alpha=self.alpha,
cov_init=None,
mode=self.mode,
tol=self.tol,
enet_tol=self.enet_tol,
max_iter=self.max_iter,
verbose=self.verbose,
eps=self.eps,
)
return self
# Cross-validation with GraphicalLasso
def graphical_lasso_path(
X,
alphas,
cov_init=None,
X_test=None,
mode="cd",
tol=1e-4,
enet_tol=1e-4,
max_iter=100,
verbose=False,
eps=np.finfo(np.float64).eps,
):
"""l1-penalized covariance estimator along a path of decreasing alphas
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : array-like of shape (n_alphas,)
The list of regularization parameters, decreasing order.
cov_init : array of shape (n_features, n_features), default=None
The initial guess for the covariance.
X_test : array of shape (n_test_samples, n_features), default=None
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}, default='cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : float, default=1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped. The tolerance must be a positive
number.
enet_tol : float, default=1e-4
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'. The tolerance must be a positive number.
max_iter : int, default=100
The maximum number of iterations. This parameter should be a strictly
positive integer.
verbose : int or bool, default=False
The higher the verbosity flag, the more information is printed
during the fitting.
eps : float, default=eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Default is `np.finfo(np.float64).eps`.
.. versionadded:: 1.3
Returns
-------
covariances_ : list of shape (n_alphas,) of ndarray of shape \
(n_features, n_features)
The estimated covariance matrices.
precisions_ : list of shape (n_alphas,) of ndarray of shape \
(n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : list of shape (n_alphas,), dtype=float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_, _, _ = _graphical_lasso(
emp_cov,
alpha=alpha,
cov_init=covariance_,
mode=mode,
tol=tol,
enet_tol=enet_tol,
max_iter=max_iter,
verbose=inner_verbose,
eps=eps,
)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write(".")
elif verbose > 1:
if X_test is not None:
print(
"[graphical_lasso_path] alpha: %.2e, score: %.2e"
% (alpha, this_score)
)
else:
print("[graphical_lasso_path] alpha: %.2e" % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphicalLassoCV(BaseGraphicalLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty.
See glossary entry for :term:`cross-validation estimator`.
Read more in the :ref:`User Guide <sparse_inverse_covariance>`.
.. versionchanged:: v0.20
GraphLassoCV has been renamed to GraphicalLassoCV
Parameters
----------
alphas : int or array-like of shape (n_alphas,), dtype=float, default=4
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details. Range is [1, inf) for an integer.
Range is (0, inf] for an array-like of floats.
n_refinements : int, default=4
The number of times the grid is refined. Not used if explicit
values of alphas are passed. Range is [1, inf).
cv : int, cross-validation generator or iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs :class:`~sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.20
``cv`` default value if None changed from 3-fold to 5-fold.
tol : float, default=1e-4
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped. Range is (0, inf].
enet_tol : float, default=1e-4
The tolerance for the elastic net solver used to calculate the descent
direction. This parameter controls the accuracy of the search direction
for a given column update, not of the overall parameter estimate. Only
used for mode='cd'. Range is (0, inf].
max_iter : int, default=100
Maximum number of iterations.
mode : {'cd', 'lars'}, default='cd'
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None
verbose : bool, default=False
If verbose is True, the objective function and duality gap are
printed at each iteration.
eps : float, default=eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Default is `np.finfo(np.float64).eps`.
.. versionadded:: 1.3
assume_centered : bool, default=False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Attributes
----------
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
precision_ : ndarray of shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
costs_ : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
.. versionadded:: 1.3
alpha_ : float
Penalization parameter selected.
cv_results_ : dict of ndarrays
A dict with keys:
alphas : ndarray of shape (n_alphas,)
All penalization parameters explored.
split(k)_test_score : ndarray of shape (n_alphas,)
Log-likelihood score on left-out data across (k)th fold.
.. versionadded:: 1.0
mean_test_score : ndarray of shape (n_alphas,)
Mean of scores over the folds.
.. versionadded:: 1.0
std_test_score : ndarray of shape (n_alphas,)
Standard deviation of scores over the folds.
.. versionadded:: 1.0
n_iter_ : int
Number of iterations run for the optimal alpha.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
graphical_lasso : L1-penalized covariance estimator.
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
Notes
-----
The search for the optimal penalization parameter (`alpha`) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of `alpha` then come out as missing values, but the optimum may
be close to these missing values.
In `fit`, once the best parameter `alpha` is found through
cross-validation, the model is fit again using the entire training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import GraphicalLassoCV
>>> true_cov = np.array([[0.8, 0.0, 0.2, 0.0],
... [0.0, 0.4, 0.0, 0.0],
... [0.2, 0.0, 0.3, 0.1],
... [0.0, 0.0, 0.1, 0.7]])
>>> np.random.seed(0)
>>> X = np.random.multivariate_normal(mean=[0, 0, 0, 0],
... cov=true_cov,
... size=200)
>>> cov = GraphicalLassoCV().fit(X)
>>> np.around(cov.covariance_, decimals=3)
array([[0.816, 0.051, 0.22 , 0.017],
[0.051, 0.364, 0.018, 0.036],
[0.22 , 0.018, 0.322, 0.094],
[0.017, 0.036, 0.094, 0.69 ]])
>>> np.around(cov.location_, decimals=3)
array([0.073, 0.04 , 0.038, 0.143])
For an example comparing :class:`sklearn.covariance.GraphicalLassoCV`,
:func:`sklearn.covariance.ledoit_wolf` shrinkage and the empirical covariance
on high-dimensional gaussian data, see
:ref:`sphx_glr_auto_examples_covariance_plot_sparse_cov.py`.
"""
_parameter_constraints: dict = {
**BaseGraphicalLasso._parameter_constraints,
"alphas": [Interval(Integral, 0, None, closed="left"), "array-like"],
"n_refinements": [Interval(Integral, 1, None, closed="left")],
"cv": ["cv_object"],
"n_jobs": [Integral, None],
}
def __init__(
self,
*,
alphas=4,
n_refinements=4,
cv=None,
tol=1e-4,
enet_tol=1e-4,
max_iter=100,
mode="cd",
n_jobs=None,
verbose=False,
eps=np.finfo(np.float64).eps,
assume_centered=False,
):
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/covariance/_elliptic_envelope.py | sklearn/covariance/_elliptic_envelope.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from numbers import Real
import numpy as np
from sklearn.base import OutlierMixin, _fit_context
from sklearn.covariance._robust_covariance import MinCovDet
from sklearn.metrics import accuracy_score
from sklearn.utils._param_validation import Interval
from sklearn.utils.validation import check_is_fitted
class EllipticEnvelope(OutlierMixin, MinCovDet):
"""An object for detecting outliers in a Gaussian distributed dataset.
Read more in the :ref:`User Guide <outlier_detection>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, the support of robust location and covariance estimates
is computed, and a covariance estimate is recomputed from it,
without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, default=None
The proportion of points to be included in the support of the raw
MCD estimate. If None, the minimum value of support_fraction will
be used within the algorithm: `(n_samples + n_features + 1) / 2 * n_samples`.
Range is (0, 1).
contamination : float, default=0.1
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Range is (0, 0.5].
random_state : int, RandomState instance or None, default=None
Determines the pseudo random number generator for shuffling
the data. Pass an int for reproducible results across multiple function
calls. See :term:`Glossary <random_state>`.
Attributes
----------
location_ : ndarray of shape (n_features,)
Estimated robust location.
covariance_ : ndarray of shape (n_features, n_features)
Estimated robust covariance matrix.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute the
robust estimates of location and shape.
offset_ : float
Offset used to define the decision function from the raw scores.
We have the relation: ``decision_function = score_samples - offset_``.
The offset depends on the contamination parameter and is defined in
such a way we obtain the expected number of outliers (samples with
decision function < 0) in training.
.. versionadded:: 0.20
raw_location_ : ndarray of shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : ndarray of shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
dist_ : ndarray of shape (n_samples,)
Mahalanobis distances of the training set (on which :meth:`fit` is
called) observations.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
EmpiricalCovariance : Maximum likelihood covariance estimator.
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
LedoitWolf : LedoitWolf Estimator.
MinCovDet : Minimum Covariance Determinant
(robust estimator of covariance).
OAS : Oracle Approximating Shrinkage Estimator.
ShrunkCovariance : Covariance estimator with shrinkage.
Notes
-----
Outlier detection from covariance estimation may break or not
perform well in high-dimensional settings. In particular, one will
always take care to work with ``n_samples > n_features ** 2``.
References
----------
.. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the
minimum covariance determinant estimator" Technometrics 41(3), 212
(1999)
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import EllipticEnvelope
>>> true_cov = np.array([[.8, .3],
... [.3, .4]])
>>> X = np.random.RandomState(0).multivariate_normal(mean=[0, 0],
... cov=true_cov,
... size=500)
>>> cov = EllipticEnvelope(random_state=0).fit(X)
>>> # predict returns 1 for an inlier and -1 for an outlier
>>> cov.predict([[0, 0],
... [3, 3]])
array([ 1, -1])
>>> cov.covariance_
array([[0.8102, 0.2736],
[0.2736, 0.3330]])
>>> cov.location_
array([0.0769 , 0.0397])
"""
_parameter_constraints: dict = {
**MinCovDet._parameter_constraints,
"contamination": [Interval(Real, 0, 0.5, closed="right")],
}
def __init__(
self,
*,
store_precision=True,
assume_centered=False,
support_fraction=None,
contamination=0.1,
random_state=None,
):
super().__init__(
store_precision=store_precision,
assume_centered=assume_centered,
support_fraction=support_fraction,
random_state=random_state,
)
self.contamination = contamination
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the EllipticEnvelope model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
super().fit(X)
self.offset_ = np.percentile(-self.dist_, 100.0 * self.contamination)
return self
def decision_function(self, X):
"""Compute the decision function of the given observations.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
decision : ndarray of shape (n_samples,)
Decision function of the samples.
It is equal to the shifted Mahalanobis distances.
The threshold for being an outlier is 0, which ensures a
compatibility with other outlier detection algorithms.
"""
check_is_fitted(self)
negative_mahal_dist = self.score_samples(X)
return negative_mahal_dist - self.offset_
def score_samples(self, X):
"""Compute the negative Mahalanobis distances.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
negative_mahal_distances : array-like of shape (n_samples,)
Opposite of the Mahalanobis distances.
"""
check_is_fitted(self)
return -self.mahalanobis(X)
def predict(self, X):
"""
Predict labels (1 inlier, -1 outlier) of X according to fitted model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
values = self.decision_function(X)
is_inlier = np.full(values.shape[0], -1, dtype=int)
is_inlier[values >= 0] = 1
return is_inlier
def score(self, X, y, sample_weight=None):
"""Return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) w.r.t. y.
"""
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/covariance/_empirical_covariance.py | sklearn/covariance/_empirical_covariance.py | """
Maximum likelihood covariance estimator.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# avoid division truncation
import warnings
import numpy as np
from scipy import linalg
from sklearn import config_context
from sklearn.base import BaseEstimator, _fit_context
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.utils import check_array, metadata_routing
from sklearn.utils._param_validation import validate_params
from sklearn.utils.extmath import fast_logdet
from sklearn.utils.validation import validate_data
@validate_params(
{
"emp_cov": [np.ndarray],
"precision": [np.ndarray],
},
prefer_skip_nested_validation=True,
)
def log_likelihood(emp_cov, precision):
"""Compute the sample mean of the log_likelihood under a covariance model.
Computes the empirical expected log-likelihood, allowing for universal
comparison (beyond this software package), and accounts for normalization
terms and scaling.
Parameters
----------
emp_cov : ndarray of shape (n_features, n_features)
Maximum Likelihood Estimator of covariance.
precision : ndarray of shape (n_features, n_features)
The precision matrix of the covariance model to be tested.
Returns
-------
log_likelihood_ : float
Sample mean of the log-likelihood.
"""
p = precision.shape[0]
log_likelihood_ = -np.sum(emp_cov * precision) + fast_logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.0
return log_likelihood_
@validate_params(
{
"X": ["array-like"],
"assume_centered": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def empirical_covariance(X, *, assume_centered=False):
"""Compute the Maximum likelihood covariance estimator.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data from which to compute the covariance estimate.
assume_centered : bool, default=False
If `True`, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If `False`, data will be centered before computation.
Returns
-------
covariance : ndarray of shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
Examples
--------
>>> from sklearn.covariance import empirical_covariance
>>> X = [[1,1,1],[1,1,1],[1,1,1],
... [0,0,0],[0,0,0],[0,0,0]]
>>> empirical_covariance(X)
array([[0.25, 0.25, 0.25],
[0.25, 0.25, 0.25],
[0.25, 0.25, 0.25]])
"""
X = check_array(X, ensure_2d=False, ensure_all_finite=False)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn(
"Only one sample available. You may want to reshape your data array"
)
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator.
Read more in the :ref:`User Guide <covariance>`.
Parameters
----------
store_precision : bool, default=True
Specifies if the estimated precision is stored.
assume_centered : bool, default=False
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
EllipticEnvelope : An object for detecting outliers in
a Gaussian distributed dataset.
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
LedoitWolf : LedoitWolf Estimator.
MinCovDet : Minimum Covariance Determinant
(robust estimator of covariance).
OAS : Oracle Approximating Shrinkage Estimator.
ShrunkCovariance : Covariance estimator with shrinkage.
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import EmpiricalCovariance
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> cov = EmpiricalCovariance().fit(X)
>>> cov.covariance_
array([[0.7569, 0.2818],
[0.2818, 0.3928]])
>>> cov.location_
array([0.0622, 0.0193])
"""
# X_test should have been called X
__metadata_request__score = {"X_test": metadata_routing.UNUSED}
_parameter_constraints: dict = {
"store_precision": ["boolean"],
"assume_centered": ["boolean"],
}
def __init__(self, *, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : array-like of shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance, check_finite=False)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like of shape (n_features, n_features)
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_, check_finite=False)
return precision
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the maximum likelihood covariance estimator to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Compute the log-likelihood of `X_test` under the estimated Gaussian model.
The Gaussian model is defined by its mean and covariance matrix which are
represented respectively by `self.location_` and `self.covariance_`.
Parameters
----------
X_test : array-like of shape (n_samples, n_features)
Test data of which we compute the likelihood, where `n_samples` is
the number of samples and `n_features` is the number of features.
`X_test` is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
res : float
The log-likelihood of `X_test` with `self.location_` and `self.covariance_`
as estimators of the Gaussian model mean and covariance matrix respectively.
"""
X_test = validate_data(self, X_test, reset=False)
# compute empirical covariance of the test set
test_cov = empirical_covariance(X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm="frobenius", scaling=True, squared=True):
"""Compute the Mean Squared Error between two covariance estimators.
Parameters
----------
comp_cov : array-like of shape (n_features, n_features)
The covariance to compare with.
norm : {"frobenius", "spectral"}, default="frobenius"
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool, default=True
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool, default=True
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
result : float
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error**2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented"
)
# optionally scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, X):
"""Compute the squared Mahalanobis distances of given observations.
For a detailed example of how outliers affects the Mahalanobis distance,
see :ref:`sphx_glr_auto_examples_covariance_plot_mahalanobis_distances.py`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
dist : ndarray of shape (n_samples,)
Squared Mahalanobis distances of the observations.
"""
X = validate_data(self, X, reset=False)
precision = self.get_precision()
with config_context(assume_finite=True):
# compute mahalanobis distances
dist = pairwise_distances(
X, self.location_[np.newaxis, :], metric="mahalanobis", VI=precision
)
return np.reshape(dist, (len(X),)) ** 2
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/covariance/__init__.py | sklearn/covariance/__init__.py | """Methods and algorithms to robustly estimate covariance.
They estimate the covariance of features at given sets of points, as well as the
precision matrix defined as the inverse of the covariance. Covariance estimation is
closely related to the theory of Gaussian graphical models.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.covariance._elliptic_envelope import EllipticEnvelope
from sklearn.covariance._empirical_covariance import (
EmpiricalCovariance,
empirical_covariance,
log_likelihood,
)
from sklearn.covariance._graph_lasso import (
GraphicalLasso,
GraphicalLassoCV,
graphical_lasso,
)
from sklearn.covariance._robust_covariance import MinCovDet, fast_mcd
from sklearn.covariance._shrunk_covariance import (
OAS,
LedoitWolf,
ShrunkCovariance,
ledoit_wolf,
ledoit_wolf_shrinkage,
oas,
shrunk_covariance,
)
__all__ = [
"OAS",
"EllipticEnvelope",
"EmpiricalCovariance",
"GraphicalLasso",
"GraphicalLassoCV",
"LedoitWolf",
"MinCovDet",
"ShrunkCovariance",
"empirical_covariance",
"fast_mcd",
"graphical_lasso",
"ledoit_wolf",
"ledoit_wolf_shrinkage",
"log_likelihood",
"oas",
"shrunk_covariance",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/covariance/_shrunk_covariance.py | sklearn/covariance/_shrunk_covariance.py | """
Covariance estimators using shrinkage.
Shrinkage corresponds to regularising `cov` using a convex combination:
shrunk_cov = (1-shrinkage)*cov + shrinkage*structured_estimate.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# avoid division truncation
import warnings
from numbers import Integral, Real
import numpy as np
from sklearn.base import _fit_context
from sklearn.covariance import EmpiricalCovariance, empirical_covariance
from sklearn.utils import check_array
from sklearn.utils._param_validation import Interval, validate_params
from sklearn.utils.validation import validate_data
def _ledoit_wolf(X, *, assume_centered, block_size):
"""Estimate the shrunk Ledoit-Wolf covariance matrix."""
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X**2).mean()), 0.0
n_features = X.shape[1]
# get Ledoit-Wolf shrinkage
shrinkage = ledoit_wolf_shrinkage(
X, assume_centered=assume_centered, block_size=block_size
)
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
mu = np.sum(np.trace(emp_cov)) / n_features
shrunk_cov = (1.0 - shrinkage) * emp_cov
shrunk_cov.flat[:: n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
def _oas(X, *, assume_centered=False):
"""Estimate covariance with the Oracle Approximating Shrinkage algorithm.
The formulation is based on [1]_.
[1] "Shrinkage algorithms for MMSE covariance estimation.",
Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
https://arxiv.org/pdf/0907.4698.pdf
"""
if len(X.shape) == 2 and X.shape[1] == 1:
# for only one feature, the result is the same whatever the shrinkage
if not assume_centered:
X = X - X.mean()
return np.atleast_2d((X**2).mean()), 0.0
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=assume_centered)
# The shrinkage is defined as:
# shrinkage = min(
# trace(S @ S.T) + trace(S)**2) / ((n + 1) (trace(S @ S.T) - trace(S)**2 / p), 1
# )
# where n and p are n_samples and n_features, respectively (cf. Eq. 23 in [1]).
# The factor 2 / p is omitted since it does not impact the value of the estimator
# for large p.
# Instead of computing trace(S)**2, we can compute the average of the squared
# elements of S that is equal to trace(S)**2 / p**2.
# See the definition of the Frobenius norm:
# https://en.wikipedia.org/wiki/Matrix_norm#Frobenius_norm
alpha = np.mean(emp_cov**2)
mu = np.trace(emp_cov) / n_features
mu_squared = mu**2
# The factor 1 / p**2 will cancel out since it is in both the numerator and
# denominator
num = alpha + mu_squared
den = (n_samples + 1) * (alpha - mu_squared / n_features)
shrinkage = 1.0 if den == 0 else min(num / den, 1.0)
# The shrunk covariance is defined as:
# (1 - shrinkage) * S + shrinkage * F (cf. Eq. 4 in [1])
# where S is the empirical covariance and F is the shrinkage target defined as
# F = trace(S) / n_features * np.identity(n_features) (cf. Eq. 3 in [1])
shrunk_cov = (1.0 - shrinkage) * emp_cov
shrunk_cov.flat[:: n_features + 1] += shrinkage * mu
return shrunk_cov, shrinkage
###############################################################################
# Public API
# ShrunkCovariance estimator
@validate_params(
{
"emp_cov": ["array-like"],
"shrinkage": [Interval(Real, 0, 1, closed="both")],
},
prefer_skip_nested_validation=True,
)
def shrunk_covariance(emp_cov, shrinkage=0.1):
"""Calculate covariance matrices shrunk on the diagonal.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
emp_cov : array-like of shape (..., n_features, n_features)
Covariance matrices to be shrunk, at least 2D ndarray.
shrinkage : float, default=0.1
Coefficient in the convex combination used for the computation
of the shrunk estimate. Range is [0, 1].
Returns
-------
shrunk_cov : ndarray of shape (..., n_features, n_features)
Shrunk covariance matrices.
Notes
-----
The regularized (shrunk) covariance is given by::
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where `mu = trace(cov) / n_features`.
Examples
--------
>>> import numpy as np
>>> from sklearn.datasets import make_gaussian_quantiles
>>> from sklearn.covariance import empirical_covariance, shrunk_covariance
>>> real_cov = np.array([[.8, .3], [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500)
>>> shrunk_covariance(empirical_covariance(X))
array([[0.739, 0.254],
[0.254, 0.411]])
"""
emp_cov = check_array(emp_cov, allow_nd=True)
n_features = emp_cov.shape[-1]
shrunk_cov = (1.0 - shrinkage) * emp_cov
mu = np.trace(emp_cov, axis1=-2, axis2=-1) / n_features
mu = np.expand_dims(mu, axis=tuple(range(mu.ndim, emp_cov.ndim)))
shrunk_cov += shrinkage * mu * np.eye(n_features)
return shrunk_cov
class ShrunkCovariance(EmpiricalCovariance):
"""Covariance estimator with shrinkage.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data will be centered before computation.
shrinkage : float, default=0.1
Coefficient in the convex combination used for the computation
of the shrunk estimate. Range is [0, 1].
Attributes
----------
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
EllipticEnvelope : An object for detecting outliers in
a Gaussian distributed dataset.
EmpiricalCovariance : Maximum likelihood covariance estimator.
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
GraphicalLassoCV : Sparse inverse covariance with cross-validated
choice of the l1 penalty.
LedoitWolf : LedoitWolf Estimator.
MinCovDet : Minimum Covariance Determinant
(robust estimator of covariance).
OAS : Oracle Approximating Shrinkage Estimator.
Notes
-----
The regularized covariance is given by:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import ShrunkCovariance
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> cov = ShrunkCovariance().fit(X)
>>> cov.covariance_
array([[0.7387, 0.2536],
[0.2536, 0.4110]])
>>> cov.location_
array([0.0622, 0.0193])
"""
_parameter_constraints: dict = {
**EmpiricalCovariance._parameter_constraints,
"shrinkage": [Interval(Real, 0, 1, closed="both")],
}
def __init__(self, *, store_precision=True, assume_centered=False, shrinkage=0.1):
super().__init__(
store_precision=store_precision, assume_centered=assume_centered
)
self.shrinkage = shrinkage
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the shrunk covariance model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X)
# Not calling the parent object to fit, to avoid a potential
# matrix inversion when setting the precision
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(X, assume_centered=self.assume_centered)
covariance = shrunk_covariance(covariance, self.shrinkage)
self._set_covariance(covariance)
return self
# Ledoit-Wolf estimator
@validate_params(
{
"X": ["array-like"],
"assume_centered": ["boolean"],
"block_size": [Interval(Integral, 1, None, closed="left")],
},
prefer_skip_nested_validation=True,
)
def ledoit_wolf_shrinkage(X, assume_centered=False, block_size=1000):
"""Estimate the shrunk Ledoit-Wolf covariance matrix.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data from which to compute the Ledoit-Wolf shrunk covariance shrinkage.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data will be centered before computation.
block_size : int, default=1000
Size of blocks into which the covariance matrix will be split.
Returns
-------
shrinkage : float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import ledoit_wolf_shrinkage
>>> real_cov = np.array([[.4, .2], [.2, .8]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50)
>>> shrinkage_coefficient = ledoit_wolf_shrinkage(X)
>>> shrinkage_coefficient
np.float64(0.23)
"""
X = check_array(X)
# for only one feature, the result is the same whatever the shrinkage
if len(X.shape) == 2 and X.shape[1] == 1:
return 0.0
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn(
"Only one sample available. You may want to reshape your data array"
)
n_samples, n_features = X.shape
# optionally center data
if not assume_centered:
X = X - X.mean(0)
# A non-blocked version of the computation is present in the tests
# in tests/test_covariance.py
# number of blocks to split the covariance matrix into
n_splits = int(n_features / block_size)
X2 = X**2
emp_cov_trace = np.sum(X2, axis=0) / n_samples
mu = np.sum(emp_cov_trace) / n_features
beta_ = 0.0 # sum of the coefficients of <X2.T, X2>
delta_ = 0.0 # sum of the *squared* coefficients of <X.T, X>
# starting block computation
for i in range(n_splits):
for j in range(n_splits):
rows = slice(block_size * i, block_size * (i + 1))
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, cols]))
delta_ += np.sum(np.dot(X.T[rows], X[:, cols]) ** 2)
rows = slice(block_size * i, block_size * (i + 1))
beta_ += np.sum(np.dot(X2.T[rows], X2[:, block_size * n_splits :]))
delta_ += np.sum(np.dot(X.T[rows], X[:, block_size * n_splits :]) ** 2)
for j in range(n_splits):
cols = slice(block_size * j, block_size * (j + 1))
beta_ += np.sum(np.dot(X2.T[block_size * n_splits :], X2[:, cols]))
delta_ += np.sum(np.dot(X.T[block_size * n_splits :], X[:, cols]) ** 2)
delta_ += np.sum(
np.dot(X.T[block_size * n_splits :], X[:, block_size * n_splits :]) ** 2
)
delta_ /= n_samples**2
beta_ += np.sum(
np.dot(X2.T[block_size * n_splits :], X2[:, block_size * n_splits :])
)
# use delta_ to compute beta
beta = 1.0 / (n_features * n_samples) * (beta_ / n_samples - delta_)
# delta is the sum of the squared coefficients of (<X.T,X> - mu*Id) / p
delta = delta_ - 2.0 * mu * emp_cov_trace.sum() + n_features * mu**2
delta /= n_features
# get final beta as the min between beta and delta
# We do this to prevent shrinking more than "1", which would invert
# the value of covariances
beta = min(beta, delta)
# finally get shrinkage
shrinkage = 0 if beta == 0 else beta / delta
return shrinkage
@validate_params(
{"X": ["array-like"]},
prefer_skip_nested_validation=False,
)
def ledoit_wolf(X, *, assume_centered=False, block_size=1000):
"""Estimate the shrunk Ledoit-Wolf covariance matrix.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data from which to compute the covariance estimate.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data will be centered before computation.
block_size : int, default=1000
Size of blocks into which the covariance matrix will be split.
This is purely a memory optimization and does not affect results.
Returns
-------
shrunk_cov : ndarray of shape (n_features, n_features)
Shrunk covariance.
shrinkage : float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularized (shrunk) covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import empirical_covariance, ledoit_wolf
>>> real_cov = np.array([[.4, .2], [.2, .8]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=50)
>>> covariance, shrinkage = ledoit_wolf(X)
>>> covariance
array([[0.44, 0.16],
[0.16, 0.80]])
>>> shrinkage
np.float64(0.23)
"""
estimator = LedoitWolf(
assume_centered=assume_centered,
block_size=block_size,
store_precision=False,
).fit(X)
return estimator.covariance_, estimator.shrinkage_
class LedoitWolf(EmpiricalCovariance):
"""LedoitWolf Estimator.
Ledoit-Wolf is a particular form of shrinkage, where the shrinkage
coefficient is computed using O. Ledoit and M. Wolf's formula as
described in "A Well-Conditioned Estimator for Large-Dimensional
Covariance Matrices", Ledoit and Wolf, Journal of Multivariate
Analysis, Volume 88, Issue 2, February 2004, pages 365-411.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data will be centered before computation.
block_size : int, default=1000
Size of blocks into which the covariance matrix will be split
during its Ledoit-Wolf estimation. This is purely a memory
optimization and does not affect results.
Attributes
----------
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
shrinkage_ : float
Coefficient in the convex combination used for the computation
of the shrunk estimate. Range is [0, 1].
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
EllipticEnvelope : An object for detecting outliers in
a Gaussian distributed dataset.
EmpiricalCovariance : Maximum likelihood covariance estimator.
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
GraphicalLassoCV : Sparse inverse covariance with cross-validated
choice of the l1 penalty.
MinCovDet : Minimum Covariance Determinant
(robust estimator of covariance).
OAS : Oracle Approximating Shrinkage Estimator.
ShrunkCovariance : Covariance estimator with shrinkage.
Notes
-----
The regularised covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)
where mu = trace(cov) / n_features
and shrinkage is given by the Ledoit and Wolf formula (see References)
References
----------
"A Well-Conditioned Estimator for Large-Dimensional Covariance Matrices",
Ledoit and Wolf, Journal of Multivariate Analysis, Volume 88, Issue 2,
February 2004, pages 365-411.
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import LedoitWolf
>>> real_cov = np.array([[.4, .2],
... [.2, .8]])
>>> np.random.seed(0)
>>> X = np.random.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=50)
>>> cov = LedoitWolf().fit(X)
>>> cov.covariance_
array([[0.4406, 0.1616],
[0.1616, 0.8022]])
>>> cov.location_
array([ 0.0595 , -0.0075])
See also :ref:`sphx_glr_auto_examples_covariance_plot_covariance_estimation.py`
and :ref:`sphx_glr_auto_examples_covariance_plot_lw_vs_oas.py`
for more detailed examples.
"""
_parameter_constraints: dict = {
**EmpiricalCovariance._parameter_constraints,
"block_size": [Interval(Integral, 1, None, closed="left")],
}
def __init__(self, *, store_precision=True, assume_centered=False, block_size=1000):
super().__init__(
store_precision=store_precision, assume_centered=assume_centered
)
self.block_size = block_size
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the Ledoit-Wolf shrunk covariance model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
X = validate_data(self, X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = _ledoit_wolf(
X - self.location_, assume_centered=True, block_size=self.block_size
)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
# OAS estimator
@validate_params(
{"X": ["array-like"]},
prefer_skip_nested_validation=False,
)
def oas(X, *, assume_centered=False):
"""Estimate covariance with the Oracle Approximating Shrinkage.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data from which to compute the covariance estimate.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, data will be centered before computation.
Returns
-------
shrunk_cov : array-like of shape (n_features, n_features)
Shrunk covariance.
shrinkage : float
Coefficient in the convex combination used for the computation
of the shrunk estimate.
Notes
-----
The regularised covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features),
where mu = trace(cov) / n_features and shrinkage is given by the OAS formula
(see [1]_).
The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In
the original article, formula (23) states that 2/p (p being the number of
features) is multiplied by Trace(cov*cov) in both the numerator and
denominator, but this operation is omitted because for a large p, the value
of 2/p is so small that it doesn't affect the value of the estimator.
References
----------
.. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.",
Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
<0907.4698>`
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import oas
>>> rng = np.random.RandomState(0)
>>> real_cov = [[.8, .3], [.3, .4]]
>>> X = rng.multivariate_normal(mean=[0, 0], cov=real_cov, size=500)
>>> shrunk_cov, shrinkage = oas(X)
>>> shrunk_cov
array([[0.7533, 0.2763],
[0.2763, 0.3964]])
>>> shrinkage
np.float64(0.0195)
"""
estimator = OAS(
assume_centered=assume_centered,
).fit(X)
return estimator.covariance_, estimator.shrinkage_
class OAS(EmpiricalCovariance):
"""Oracle Approximating Shrinkage Estimator.
Read more in the :ref:`User Guide <shrunk_covariance>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data will be centered before computation.
Attributes
----------
covariance_ : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
location_ : ndarray of shape (n_features,)
Estimated location, i.e. the estimated mean.
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
shrinkage_ : float
coefficient in the convex combination used for the computation
of the shrunk estimate. Range is [0, 1].
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
EllipticEnvelope : An object for detecting outliers in
a Gaussian distributed dataset.
EmpiricalCovariance : Maximum likelihood covariance estimator.
GraphicalLasso : Sparse inverse covariance estimation
with an l1-penalized estimator.
GraphicalLassoCV : Sparse inverse covariance with cross-validated
choice of the l1 penalty.
LedoitWolf : LedoitWolf Estimator.
MinCovDet : Minimum Covariance Determinant
(robust estimator of covariance).
ShrunkCovariance : Covariance estimator with shrinkage.
Notes
-----
The regularised covariance is:
(1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features),
where mu = trace(cov) / n_features and shrinkage is given by the OAS formula
(see [1]_).
The shrinkage formulation implemented here differs from Eq. 23 in [1]_. In
the original article, formula (23) states that 2/p (p being the number of
features) is multiplied by Trace(cov*cov) in both the numerator and
denominator, but this operation is omitted because for a large p, the value
of 2/p is so small that it doesn't affect the value of the estimator.
References
----------
.. [1] :arxiv:`"Shrinkage algorithms for MMSE covariance estimation.",
Chen, Y., Wiesel, A., Eldar, Y. C., & Hero, A. O.
IEEE Transactions on Signal Processing, 58(10), 5016-5029, 2010.
<0907.4698>`
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import OAS
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> oas = OAS().fit(X)
>>> oas.covariance_
array([[0.7533, 0.2763],
[0.2763, 0.3964]])
>>> oas.precision_
array([[ 1.7833, -1.2431 ],
[-1.2431, 3.3889]])
>>> oas.shrinkage_
np.float64(0.0195)
See also :ref:`sphx_glr_auto_examples_covariance_plot_covariance_estimation.py`
and :ref:`sphx_glr_auto_examples_covariance_plot_lw_vs_oas.py`
for more detailed examples.
"""
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the Oracle Approximating Shrinkage covariance model to X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
X = validate_data(self, X)
# Not calling the parent object to fit, to avoid computing the
# covariance matrix (and potentially the precision)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance, shrinkage = _oas(X - self.location_, assume_centered=True)
self.shrinkage_ = shrinkage
self._set_covariance(covariance)
return self
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/covariance/tests/test_robust_covariance.py | sklearn/covariance/tests/test_robust_covariance.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import itertools
import numpy as np
import pytest
from sklearn import datasets
from sklearn.covariance import MinCovDet, empirical_covariance, fast_mcd
from sklearn.utils._testing import assert_array_almost_equal
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd(global_random_seed):
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.02, 0.1, 74, global_random_seed)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.3, 0.3, 65, global_random_seed)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50, global_random_seed)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540, global_random_seed)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870, global_random_seed)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.10, 0.10, 350, global_random_seed)
# n_samples == n_features
launch_mcd_on_dataset(20, 20, 0, 0.1, 0.1, 15, global_random_seed)
def test_fast_mcd_on_invalid_input():
X = np.arange(100)
msg = "Expected 2D array, got 1D array instead"
with pytest.raises(ValueError, match=msg):
fast_mcd(X)
def test_mcd_class_on_invalid_input():
X = np.arange(100)
mcd = MinCovDet()
msg = "Expected 2D array, got 1D array instead"
with pytest.raises(ValueError, match=msg):
mcd.fit(X)
def launch_mcd_on_dataset(
n_samples, n_features, n_outliers, tol_loc, tol_cov, tol_support, seed
):
rand_gen = np.random.RandomState(seed)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10.0 * (rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=seed).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert error_location < tol_loc
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert error_cov < tol_cov
assert np.sum(H) >= tol_support
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_mcd_issue3367(global_random_seed):
# Check that MCD completes when the covariance matrix is singular
# i.e. one of the rows and columns are all zeros
rand_gen = np.random.RandomState(global_random_seed)
# Think of these as the values for X and Y -> 10 values between -5 and 5
data_values = np.linspace(-5, 5, 10).tolist()
# Get the cartesian product of all possible coordinate pairs from above set
data = np.array(list(itertools.product(data_values, data_values)))
# Add a third column that's all zeros to make our data a set of point
# within a plane, which means that the covariance matrix will be singular
data = np.hstack((data, np.zeros((data.shape[0], 1))))
# The below line of code should raise an exception if the covariance matrix
# is singular. As a further test, since we have points in XYZ, the
# principle components (Eigenvectors) of these directly relate to the
# geometry of the points. Since it's a plane, we should be able to test
# that the Eigenvector that corresponds to the smallest Eigenvalue is the
# plane normal, specifically [0, 0, 1], since everything is in the XY plane
# (as I've set it up above). To do this one would start by:
#
# evals, evecs = np.linalg.eigh(mcd_fit.covariance_)
# normal = evecs[:, np.argmin(evals)]
#
# After which we need to assert that our `normal` is equal to [0, 0, 1].
# Do note that there is floating point error associated with this, so it's
# best to subtract the two and then compare some small tolerance (e.g.
# 1e-12).
MinCovDet(random_state=rand_gen).fit(data)
def test_mcd_support_covariance_is_zero():
# Check that MCD returns a ValueError with informative message when the
# covariance of the support data is equal to 0.
X_1 = np.array([0.5, 0.1, 0.1, 0.1, 0.957, 0.1, 0.1, 0.1, 0.4285, 0.1])
X_1 = X_1.reshape(-1, 1)
X_2 = np.array([0.5, 0.3, 0.3, 0.3, 0.957, 0.3, 0.3, 0.3, 0.4285, 0.3])
X_2 = X_2.reshape(-1, 1)
msg = (
"The covariance matrix of the support data is equal to 0, try to "
"increase support_fraction"
)
for X in [X_1, X_2]:
with pytest.raises(ValueError, match=msg):
MinCovDet().fit(X)
def test_mcd_increasing_det_warning(global_random_seed):
# Check that a warning is raised if we observe increasing determinants
# during the c_step. In theory the sequence of determinants should be
# decreasing. Increasing determinants are likely due to ill-conditioned
# covariance matrices that result in poor precision matrices.
X = [
[5.1, 3.5, 1.4, 0.2],
[4.9, 3.0, 1.4, 0.2],
[4.7, 3.2, 1.3, 0.2],
[4.6, 3.1, 1.5, 0.2],
[5.0, 3.6, 1.4, 0.2],
[4.6, 3.4, 1.4, 0.3],
[5.0, 3.4, 1.5, 0.2],
[4.4, 2.9, 1.4, 0.2],
[4.9, 3.1, 1.5, 0.1],
[5.4, 3.7, 1.5, 0.2],
[4.8, 3.4, 1.6, 0.2],
[4.8, 3.0, 1.4, 0.1],
[4.3, 3.0, 1.1, 0.1],
[5.1, 3.5, 1.4, 0.3],
[5.7, 3.8, 1.7, 0.3],
[5.4, 3.4, 1.7, 0.2],
[4.6, 3.6, 1.0, 0.2],
[5.0, 3.0, 1.6, 0.2],
[5.2, 3.5, 1.5, 0.2],
]
mcd = MinCovDet(support_fraction=0.5, random_state=global_random_seed)
warn_msg = "Determinant has increased"
with pytest.warns(RuntimeWarning, match=warn_msg):
mcd.fit(X)
@pytest.mark.parametrize("n_samples,n_features", [(2000, 10)])
def test_mincovdet_bias_on_normal(n_samples, n_features, global_random_seed):
"""Check that MinCovDet does not underestimate the empirical
variance on Gaussian data.
A large sample size and n_features makes the test robust.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/23162
"""
threshold = 0.985 # threshold for variance underesitmation
rng = np.random.default_rng(global_random_seed)
x = rng.normal(size=(n_features, n_samples))
# Assume centered data, to reduce test complexity
var_emp = empirical_covariance(x.T, assume_centered=True).diagonal()
cov_mcd = (
MinCovDet(
support_fraction=1.0,
store_precision=False,
assume_centered=True,
random_state=global_random_seed,
)
.fit(x.T)
.covariance_
)
var_mcd = np.diag(cov_mcd)
# compute mean ratio of variances
mean_var_ratio = np.sum(var_mcd) / np.sum(var_emp)
assert mean_var_ratio > threshold, "MinCovDet underestimates the Gaussian variance"
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/covariance/tests/test_graphical_lasso.py | sklearn/covariance/tests/test_graphical_lasso.py | """Test the graphical_lasso module."""
import sys
from io import StringIO
import numpy as np
import pytest
from numpy.testing import assert_allclose
from scipy import linalg
from sklearn import config_context, datasets
from sklearn.covariance import (
GraphicalLasso,
GraphicalLassoCV,
empirical_covariance,
graphical_lasso,
)
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.model_selection import GroupKFold
from sklearn.utils import check_random_state
from sklearn.utils._testing import (
_convert_container,
assert_array_almost_equal,
assert_array_less,
)
def test_graphical_lassos(global_random_seed):
"""Test the graphical lasso solvers."""
# Sample data from a sparse multivariate normal
dim = 10
n_samples = 100
random_state = check_random_state(global_random_seed)
prec = make_sparse_spd_matrix(dim, alpha=0.95, random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0.0, 0.1, 0.25):
covs = dict()
icovs = dict()
for method in ("cd", "lars"):
cov_, icov_, costs = graphical_lasso(
emp_cov,
return_costs=True,
alpha=alpha,
mode=method,
tol=1e-7,
enet_tol=1e-11,
max_iter=100,
)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
# use 1e-10 since the cost can be exactly 0
assert_array_less(np.diff(costs), 1e-10)
# Check that the 2 approaches give similar results
assert_allclose(covs["cd"], covs["lars"], atol=2e-3)
assert_allclose(icovs["cd"], icovs["lars"], atol=2e-3)
# Smoke test the estimator
model = GraphicalLasso(alpha=0.25, tol=1e-7, enet_tol=1e-11, max_iter=100).fit(X)
model.score(X)
assert_allclose(model.covariance_, covs["cd"], rtol=1e-6)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphicalLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graphical_lasso_when_alpha_equals_0(global_random_seed):
"""Test graphical_lasso's early return condition when alpha=0."""
X = np.random.RandomState(global_random_seed).randn(100, 10)
emp_cov = empirical_covariance(X, assume_centered=True)
model = GraphicalLasso(alpha=0, covariance="precomputed").fit(emp_cov)
assert_allclose(model.precision_, np.linalg.inv(emp_cov))
_, precision = graphical_lasso(emp_cov, alpha=0)
assert_allclose(precision, np.linalg.inv(emp_cov))
@pytest.mark.parametrize("mode", ["cd", "lars"])
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
def test_graphical_lasso_n_iter(mode):
X, _ = datasets.make_classification(n_samples=5_000, n_features=20, random_state=0)
emp_cov = empirical_covariance(X)
_, _, n_iter = graphical_lasso(
emp_cov, 0.2, mode=mode, max_iter=2, return_n_iter=True
)
assert n_iter == 2
def test_graphical_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# (need to set penalize.diagonal to FALSE)
cov_R = np.array(
[
[0.68112222, 0.0000000, 0.265820, 0.02464314],
[0.00000000, 0.1887129, 0.000000, 0.00000000],
[0.26582000, 0.0000000, 3.095503, 0.28697200],
[0.02464314, 0.0000000, 0.286972, 0.57713289],
]
)
icov_R = np.array(
[
[1.5190747, 0.000000, -0.1304475, 0.0000000],
[0.0000000, 5.299055, 0.0000000, 0.0000000],
[-0.1304475, 0.000000, 0.3498624, -0.1683946],
[0.0000000, 0.000000, -0.1683946, 1.8164353],
]
)
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ("cd", "lars"):
cov, icov = graphical_lasso(emp_cov, alpha=1.0, return_costs=False, mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_2D():
# Hard-coded solution from Python skggm package
# obtained by calling `quic(emp_cov, lam=.1, tol=1e-8)`
cov_skggm = np.array([[3.09550269, 1.186972], [1.186972, 0.57713289]])
icov_skggm = np.array([[1.52836773, -3.14334831], [-3.14334831, 8.19753385]])
X = datasets.load_iris().data[:, 2:]
emp_cov = empirical_covariance(X)
for method in ("cd", "lars"):
cov, icov = graphical_lasso(emp_cov, alpha=0.1, return_costs=False, mode=method)
assert_array_almost_equal(cov, cov_skggm)
assert_array_almost_equal(icov, icov_skggm)
@pytest.mark.parametrize("method", ["cd", "lars"])
def test_graphical_lasso_iris_singular(method):
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
# library(glasso)
# X = t(array(c(
# 5.4, 3.7, 1.5, 0.2,
# 4.8, 3.4, 1.6, 0.2,
# 4.8, 3. , 1.4, 0.1),
# dim = c(4, 3)
# ))
# n = nrow(X)
# emp_cov = cov(X) * (n - 1)/n # without Bessel correction
# sol = glasso(emp_cov, 0.01, penalize.diagonal = FALSE)
# # print cov_R
# print(noquote(format(sol$w, scientific=FALSE, digits = 10)))
cov_R = np.array(
[
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222],
]
)
icov_R = np.array(
[
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5],
]
)
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
cov, icov = graphical_lasso(emp_cov, alpha=0.01, return_costs=False, mode=method)
assert_allclose(cov, cov_R, atol=1e-6)
assert_allclose(icov, icov_R, atol=1e-5)
def test_graphical_lasso_cv(global_random_seed):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = np.random.RandomState(global_random_seed)
prec = make_sparse_spd_matrix(dim, alpha=0.96, random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphicalLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
@pytest.mark.parametrize("alphas_container_type", ["list", "tuple", "array"])
def test_graphical_lasso_cv_alphas_iterable(alphas_container_type):
"""Check that we can pass an array-like to `alphas`.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/22489
"""
true_cov = np.array(
[
[0.8, 0.0, 0.2, 0.0],
[0.0, 0.4, 0.0, 0.0],
[0.2, 0.0, 0.3, 0.1],
[0.0, 0.0, 0.1, 0.7],
]
)
rng = np.random.RandomState(0)
X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
alphas = _convert_container([0.02, 0.03], alphas_container_type)
GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X)
@pytest.mark.parametrize(
"alphas,err_type,err_msg",
[
([-0.02, 0.03], ValueError, "must be > 0"),
([0, 0.03], ValueError, "must be > 0"),
(["not_number", 0.03], TypeError, "must be an instance of float"),
],
)
def test_graphical_lasso_cv_alphas_invalid_array(alphas, err_type, err_msg):
"""Check that if an array-like containing a value
outside of (0, inf] is passed to `alphas`, a ValueError is raised.
Check if a string is passed, a TypeError is raised.
"""
true_cov = np.array(
[
[0.8, 0.0, 0.2, 0.0],
[0.0, 0.4, 0.0, 0.0],
[0.2, 0.0, 0.3, 0.1],
[0.0, 0.0, 0.1, 0.7],
]
)
rng = np.random.RandomState(0)
X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
with pytest.raises(err_type, match=err_msg):
GraphicalLassoCV(alphas=alphas, tol=1e-1, n_jobs=1).fit(X)
def test_graphical_lasso_cv_scores(global_random_seed):
splits = 4
n_alphas = 5
n_refinements = 3
true_cov = np.array(
[
[0.8, 0.0, 0.2, 0.0],
[0.0, 0.4, 0.0, 0.0],
[0.2, 0.0, 0.3, 0.1],
[0.0, 0.0, 0.1, 0.7],
]
)
rng = np.random.RandomState(global_random_seed)
X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=200)
cov = GraphicalLassoCV(cv=splits, alphas=n_alphas, n_refinements=n_refinements).fit(
X
)
_assert_graphical_lasso_cv_scores(
cov=cov,
n_splits=splits,
n_refinements=n_refinements,
n_alphas=n_alphas,
)
@config_context(enable_metadata_routing=True)
def test_graphical_lasso_cv_scores_with_routing(global_random_seed):
"""Check that `GraphicalLassoCV` internally dispatches metadata to
the splitter.
"""
splits = 5
n_alphas = 5
n_refinements = 3
true_cov = np.array(
[
[0.8, 0.0, 0.2, 0.0],
[0.0, 0.4, 0.0, 0.0],
[0.2, 0.0, 0.3, 0.1],
[0.0, 0.0, 0.1, 0.7],
]
)
rng = np.random.RandomState(global_random_seed)
X = rng.multivariate_normal(mean=[0, 0, 0, 0], cov=true_cov, size=300)
n_samples = X.shape[0]
groups = rng.randint(0, 5, n_samples)
params = {"groups": groups}
cv = GroupKFold(n_splits=splits)
cv.set_split_request(groups=True)
cov = GraphicalLassoCV(cv=cv, alphas=n_alphas, n_refinements=n_refinements).fit(
X, **params
)
_assert_graphical_lasso_cv_scores(
cov=cov,
n_splits=splits,
n_refinements=n_refinements,
n_alphas=n_alphas,
)
def _assert_graphical_lasso_cv_scores(cov, n_splits, n_refinements, n_alphas):
cv_results = cov.cv_results_
# alpha and one for each split
total_alphas = n_refinements * n_alphas + 1
keys = ["alphas"]
split_keys = [f"split{i}_test_score" for i in range(n_splits)]
for key in keys + split_keys:
assert key in cv_results
assert len(cv_results[key]) == total_alphas
cv_scores = np.asarray([cov.cv_results_[key] for key in split_keys])
expected_mean = cv_scores.mean(axis=0)
expected_std = cv_scores.std(axis=0)
assert_allclose(cov.cv_results_["mean_test_score"], expected_mean)
assert_allclose(cov.cv_results_["std_test_score"], expected_std)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/covariance/tests/__init__.py | sklearn/covariance/tests/__init__.py | python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false | |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/covariance/tests/test_elliptic_envelope.py | sklearn/covariance/tests/test_elliptic_envelope.py | """
Testing for Elliptic Envelope algorithm (sklearn.covariance.elliptic_envelope).
"""
import numpy as np
import pytest
from sklearn.covariance import EllipticEnvelope
from sklearn.exceptions import NotFittedError
from sklearn.utils._testing import (
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
)
def test_elliptic_envelope(global_random_seed):
rnd = np.random.RandomState(global_random_seed)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
with pytest.raises(NotFittedError):
clf.predict(X)
with pytest.raises(NotFittedError):
clf.decision_function(X)
clf.fit(X)
y_pred = clf.predict(X)
scores = clf.score_samples(X)
decisions = clf.decision_function(X)
assert_array_almost_equal(scores, -clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(
clf.score(X, np.ones(100)), (100 - y_pred[y_pred == -1].size) / 100.0
)
assert sum(y_pred == -1) == sum(decisions < 0)
def test_score_samples():
X_train = [[1, 1], [1, 2], [2, 1]]
clf1 = EllipticEnvelope(contamination=0.2).fit(X_train)
clf2 = EllipticEnvelope().fit(X_train)
assert_array_equal(
clf1.score_samples([[2.0, 2.0]]),
clf1.decision_function([[2.0, 2.0]]) + clf1.offset_,
)
assert_array_equal(
clf2.score_samples([[2.0, 2.0]]),
clf2.decision_function([[2.0, 2.0]]) + clf2.offset_,
)
assert_array_equal(
clf1.score_samples([[2.0, 2.0]]), clf2.score_samples([[2.0, 2.0]])
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/covariance/tests/test_covariance.py | sklearn/covariance/tests/test_covariance.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import pytest
from sklearn import datasets
from sklearn.covariance import (
OAS,
EmpiricalCovariance,
LedoitWolf,
ShrunkCovariance,
empirical_covariance,
ledoit_wolf,
ledoit_wolf_shrinkage,
oas,
shrunk_covariance,
)
from sklearn.covariance._shrunk_covariance import _ledoit_wolf, _oas
from sklearn.utils._testing import (
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_equal,
)
X, _ = datasets.load_diabetes(return_X_y=True)
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(cov.error_norm(emp_cov, norm="spectral"), 0)
assert_almost_equal(cov.error_norm(emp_cov, norm="frobenius"), 0)
assert_almost_equal(cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(cov.error_norm(emp_cov, squared=False), 0)
with pytest.raises(NotImplementedError):
cov.error_norm(emp_cov, norm="foo")
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
assert np.amin(mahal_dist) > 0
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d), norm="spectral"), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
warn_msg = "Only one sample available. You may want to reshape your data array"
with pytest.warns(UserWarning, match=warn_msg):
cov.fit(X_1sample)
assert_array_almost_equal(cov.covariance_, np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
@pytest.mark.parametrize("n_matrices", [1, 3])
def test_shrunk_covariance_func(n_matrices):
"""Check `shrunk_covariance` function."""
n_features = 2
cov = np.ones((n_features, n_features))
cov_target = np.array([[1, 0.5], [0.5, 1]])
if n_matrices > 1:
cov = np.repeat(cov[np.newaxis, ...], n_matrices, axis=0)
cov_target = np.repeat(cov_target[np.newaxis, ...], n_matrices, axis=0)
cov_shrunk = shrunk_covariance(cov, 0.5)
assert_allclose(cov_shrunk, cov_target)
def test_shrunk_covariance():
"""Check consistency between `ShrunkCovariance` and `shrunk_covariance`."""
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5), cov.covariance_, 4
)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4
)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.0)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert cov.precision_ is None
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(
ledoit_wolf_shrinkage(X_centered, assume_centered=True), shrinkage_
)
assert_almost_equal(
ledoit_wolf_shrinkage(X_centered, assume_centered=True, block_size=6),
shrinkage_,
)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(
X_centered, assume_centered=True
)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d, assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d**2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert lw.precision_ is None
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(
lw.shrinkage_, _ledoit_wolf(X=X, assume_centered=False, block_size=10000)[1]
)
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
assert_allclose(
X_1d.var(ddof=0),
_ledoit_wolf(X=X_1d, assume_centered=False, block_size=10000)[0],
)
lw_cov_from_mle, lw_shrinkage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shrinkage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
warn_msg = "Only one sample available. You may want to reshape your data array"
with pytest.warns(UserWarning, match=warn_msg):
lw.fit(X_1sample)
assert_array_almost_equal(lw.covariance_, np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert lw.precision_ is None
def _naive_ledoit_wolf_shrinkage(X):
# A simple implementation of the formulas from Ledoit & Wolf
# The computation below achieves the following computations of the
# "O. Ledoit and M. Wolf, A Well-Conditioned Estimator for
# Large-Dimensional Covariance Matrices"
# beta and delta are given in the beginning of section 3.2
n_samples, n_features = X.shape
emp_cov = empirical_covariance(X, assume_centered=False)
mu = np.trace(emp_cov) / n_features
delta_ = emp_cov.copy()
delta_.flat[:: n_features + 1] -= mu
delta = (delta_**2).sum() / n_features
X2 = X**2
beta_ = (
1.0
/ (n_features * n_samples)
* np.sum(np.dot(X2.T, X2) / n_samples - emp_cov**2)
)
beta = min(beta_, delta)
shrinkage = beta / delta
return shrinkage
def test_ledoit_wolf_small():
# Compare our blocked implementation to the naive implementation
X_small = X[:, :4]
lw = LedoitWolf()
lw.fit(X_small)
shrinkage_ = lw.shrinkage_
assert_almost_equal(shrinkage_, _naive_ledoit_wolf_shrinkage(X_small))
def test_ledoit_wolf_large(global_random_seed):
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(global_random_seed)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
@pytest.mark.parametrize(
"ledoit_wolf_fitting_function", [LedoitWolf().fit, ledoit_wolf_shrinkage]
)
def test_ledoit_wolf_empty_array(ledoit_wolf_fitting_function):
"""Check that we validate X and raise proper error with 0-sample array."""
X_empty = np.zeros((0, 2))
with pytest.raises(ValueError, match="Found array with 0 sample"):
ledoit_wolf_fitting_function(X_empty)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_centered, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d**2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert oa.precision_ is None
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shrinkage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shrinkage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shrinkage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
warn_msg = "Only one sample available. You may want to reshape your data array"
with pytest.warns(UserWarning, match=warn_msg):
oa.fit(X_1sample)
assert_array_almost_equal(oa.covariance_, np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert oa.precision_ is None
# test function _oas without assuming centered data
X_1f = X[:, 0:1]
oa = OAS()
oa.fit(X_1f)
# compare shrunk covariance obtained from data and from MLE estimate
_oa_cov_from_mle, _oa_shrinkage_from_mle = _oas(X_1f)
assert_array_almost_equal(_oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(_oa_shrinkage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1f**2).sum() / n_samples, oa.covariance_, 4)
def test_EmpiricalCovariance_validates_mahalanobis():
"""Checks that EmpiricalCovariance validates data with mahalanobis."""
cov = EmpiricalCovariance().fit(X)
msg = f"X has 2 features, but \\w+ is expecting {X.shape[1]} features as input"
with pytest.raises(ValueError, match=msg):
cov.mahalanobis(X[:, :2])
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_extraction/image.py | sklearn/feature_extraction/image.py | """Utilities to extract features from images."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from itertools import product
from numbers import Integral, Number, Real
import numpy as np
from numpy.lib.stride_tricks import as_strided
from scipy import sparse
from sklearn.base import BaseEstimator, TransformerMixin, _fit_context
from sklearn.utils import check_array, check_random_state
from sklearn.utils._param_validation import (
Hidden,
Interval,
RealNotInt,
validate_params,
)
__all__ = [
"PatchExtractor",
"extract_patches_2d",
"grid_to_graph",
"img_to_graph",
"reconstruct_from_patches_2d",
]
from sklearn.utils.validation import validate_data
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
----------
n_x : int
The size of the grid in the x direction.
n_y : int
The size of the grid in the y direction.
n_z : integer, default=1
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(), vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(), vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
_, n_y, n_z = img.shape
gradient = np.abs(
img[
edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z,
]
- img[
edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z,
]
)
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.isin(edges[0], inds), np.isin(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.flatnonzero(mask), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(
n_x, n_y, n_z, mask=None, img=None, return_as=sparse.coo_matrix, dtype=None
):
"""Auxiliary function for img_to_graph and grid_to_graph"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None: # To not overwrite input dtype
if img is None:
dtype = int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = mask.astype(dtype=bool, copy=False)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix(
(
np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)), np.hstack((j_idx, diag_idx))),
),
(n_voxels, n_voxels),
dtype=dtype,
)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
@validate_params(
{
"img": ["array-like"],
"mask": [None, np.ndarray],
"return_as": [type],
"dtype": "no_validation", # validation delegated to numpy
},
prefer_skip_nested_validation=True,
)
def img_to_graph(img, *, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections.
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : array-like of shape (height, width) or (height, width, channel)
2D or 3D image.
mask : ndarray of shape (height, width) or \
(height, width, channel), dtype=bool, default=None
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, \
default=sparse.coo_matrix
The class to use to build the returned adjacency matrix.
dtype : dtype, default=None
The data of the returned sparse matrix. By default it is the
dtype of img.
Returns
-------
graph : ndarray or a sparse matrix class
The computed adjacency matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn.feature_extraction.image import img_to_graph
>>> img = np.array([[0, 0], [0, 1]])
>>> img_to_graph(img, return_as=np.ndarray)
array([[0, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 1, 1, 1]])
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
@validate_params(
{
"n_x": [Interval(Integral, left=1, right=None, closed="left")],
"n_y": [Interval(Integral, left=1, right=None, closed="left")],
"n_z": [Interval(Integral, left=1, right=None, closed="left")],
"mask": [None, np.ndarray],
"return_as": [type],
"dtype": "no_validation", # validation delegated to numpy
},
prefer_skip_nested_validation=True,
)
def grid_to_graph(
n_x, n_y, n_z=1, *, mask=None, return_as=sparse.coo_matrix, dtype=int
):
"""Graph of the pixel-to-pixel connections.
Edges exist if 2 voxels are connected.
Read more in the :ref:`User Guide <connectivity_graph_image>`.
Parameters
----------
n_x : int
Dimension in x axis.
n_y : int
Dimension in y axis.
n_z : int, default=1
Dimension in z axis.
mask : ndarray of shape (n_x, n_y, n_z), dtype=bool, default=None
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, \
default=sparse.coo_matrix
The class to use to build the returned adjacency matrix.
dtype : dtype, default=int
The data of the returned sparse matrix. By default it is int.
Returns
-------
graph : np.ndarray or a sparse matrix class
The computed adjacency matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn.feature_extraction.image import grid_to_graph
>>> shape_img = (4, 4, 1)
>>> mask = np.zeros(shape=shape_img, dtype=bool)
>>> mask[[1, 2], [1, 2], :] = True
>>> graph = grid_to_graph(*shape_img, mask=mask)
>>> print(graph)
<COOrdinate sparse matrix of dtype 'int64'
with 2 stored elements and shape (2, 2)>
Coords Values
(0, 0) 1
(1, 1) 1
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as, dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : int or float, default=None
The maximum number of patches to extract. If `max_patches` is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches. If `max_patches` is None, all possible patches are extracted.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if isinstance(max_patches, (Integral)) and max_patches < all_patches:
return max_patches
elif isinstance(max_patches, (Integral)) and max_patches >= all_patches:
return all_patches
elif isinstance(max_patches, (Real)) and 0 < max_patches < 1:
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def _extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : int or tuple of length arr.ndim.default=8
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : int or tuple of length arr.ndim, default=1
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = tuple(slice(None, None, st) for st in extraction_step)
indexing_strides = arr[slices].strides
patch_indices_shape = (
(np.array(arr.shape) - np.array(patch_shape)) // np.array(extraction_step)
) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
@validate_params(
{
"image": [np.ndarray],
"patch_size": [tuple, list],
"max_patches": [
Interval(RealNotInt, 0, 1, closed="neither"),
Interval(Integral, 1, None, closed="left"),
None,
],
"random_state": ["random_state"],
},
prefer_skip_nested_validation=True,
)
def extract_patches_2d(image, patch_size, *, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches.
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : ndarray of shape (image_height, image_width) or \
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of int (patch_height, patch_width)
The dimensions of one patch.
max_patches : int or float, default=None
The maximum number of patches to extract. If `max_patches` is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches. If `max_patches` is None it corresponds to the total number
of patches that can be extracted.
random_state : int, RandomState instance, default=None
Determines the random number generator used for random sampling when
`max_patches` is not None. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
patches : array of shape (n_patches, patch_height, patch_width) or \
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.datasets import load_sample_image
>>> from sklearn.feature_extraction import image
>>> # Use the array data from the first image in this dataset:
>>> one_image = load_sample_image("china.jpg")
>>> print('Image shape: {}'.format(one_image.shape))
Image shape: (427, 640, 3)
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print('Patches shape: {}'.format(patches.shape))
Patches shape: (272214, 2, 2, 3)
>>> # Here are just two of these patches:
>>> print(patches[1])
[[[174 201 231]
[174 201 231]]
[[173 200 230]
[173 200 230]]]
>>> print(patches[800])
[[[187 214 243]
[188 215 244]]
[[187 214 243]
[188 215 244]]]
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError(
"Height of the patch should be less than the height of the image."
)
if p_w > i_w:
raise ValueError(
"Width of the patch should be less than the width of the image."
)
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = _extract_patches(
image, patch_shape=(p_h, p_w, n_colors), extraction_step=1
)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
@validate_params(
{"patches": [np.ndarray], "image_size": [tuple, Hidden(list)]},
prefer_skip_nested_validation=True,
)
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : ndarray of shape (n_patches, patch_height, patch_width) or \
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of int (image_height, image_width) or \
(image_height, image_width, n_channels)
The size of the image that will be reconstructed.
Returns
-------
image : ndarray of shape image_size
The reconstructed image.
Examples
--------
>>> from sklearn.datasets import load_sample_image
>>> from sklearn.feature_extraction import image
>>> one_image = load_sample_image("china.jpg")
>>> print('Image shape: {}'.format(one_image.shape))
Image shape: (427, 640, 3)
>>> image_patches = image.extract_patches_2d(image=one_image, patch_size=(10, 10))
>>> print('Patches shape: {}'.format(image_patches.shape))
Patches shape: (263758, 10, 10, 3)
>>> image_reconstructed = image.reconstruct_from_patches_2d(
... patches=image_patches,
... image_size=one_image.shape
... )
>>> print(f"Reconstructed shape: {image_reconstructed.shape}")
Reconstructed shape: (427, 640, 3)
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i : i + p_h, j : j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) * min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(TransformerMixin, BaseEstimator):
"""Extracts patches from a collection of images.
Read more in the :ref:`User Guide <image_feature_extraction>`.
.. versionadded:: 0.9
Parameters
----------
patch_size : tuple of int (patch_height, patch_width), default=None
The dimensions of one patch. If set to None, the patch size will be
automatically set to `(img_height // 10, img_width // 10)`, where
`img_height` and `img_width` are the dimensions of the input images.
max_patches : int or float, default=None
The maximum number of patches per image to extract. If `max_patches` is
a float in (0, 1), it is taken to mean a proportion of the total number
of patches. If set to None, extract all possible patches.
random_state : int, RandomState instance, default=None
Determines the random number generator used for random sampling when
`max_patches is not None`. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
See Also
--------
reconstruct_from_patches_2d : Reconstruct image from all of its patches.
Notes
-----
This estimator is stateless and does not need to be fitted. However, we
recommend to call :meth:`fit_transform` instead of :meth:`transform`, as
parameter validation is only performed in :meth:`fit`.
Examples
--------
>>> from sklearn.datasets import load_sample_images
>>> from sklearn.feature_extraction import image
>>> # Use the array data from the second image in this dataset:
>>> X = load_sample_images().images[1]
>>> X = X[None, ...]
>>> print(f"Image shape: {X.shape}")
Image shape: (1, 427, 640, 3)
>>> pe = image.PatchExtractor(patch_size=(10, 10))
>>> pe_trans = pe.transform(X)
>>> print(f"Patches shape: {pe_trans.shape}")
Patches shape: (263758, 10, 10, 3)
>>> X_reconstructed = image.reconstruct_from_patches_2d(pe_trans, X.shape[1:])
>>> print(f"Reconstructed shape: {X_reconstructed.shape}")
Reconstructed shape: (427, 640, 3)
"""
_parameter_constraints: dict = {
"patch_size": [tuple, None],
"max_patches": [
None,
Interval(RealNotInt, 0, 1, closed="neither"),
Interval(Integral, 1, None, closed="left"),
],
"random_state": ["random_state"],
}
def __init__(self, *, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Only validate the parameters of the estimator.
This method allows to: (i) validate the parameters of the estimator and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : ndarray of shape (n_samples, image_height, image_width) or \
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns the instance itself.
"""
return self
def transform(self, X):
"""Transform the image samples in `X` into a matrix of patch data.
Parameters
----------
X : ndarray of shape (n_samples, image_height, image_width) or \
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches : array of shape (n_patches, patch_height, patch_width) or \
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
X = validate_data(
self,
X=X,
ensure_2d=False,
allow_nd=True,
ensure_min_samples=1,
ensure_min_features=1,
reset=False,
)
random_state = check_random_state(self.random_state)
n_imgs, img_height, img_width = X.shape[:3]
if self.patch_size is None:
patch_size = img_height // 10, img_width // 10
else:
if len(self.patch_size) != 2:
raise ValueError(
"patch_size must be a tuple of two integers. Got"
f" {self.patch_size} instead."
)
patch_size = self.patch_size
n_imgs, img_height, img_width = X.shape[:3]
X = np.reshape(X, (n_imgs, img_height, img_width, -1))
n_channels = X.shape[-1]
# compute the dimensions of the patches array
patch_height, patch_width = patch_size
n_patches = _compute_n_patches(
img_height, img_width, patch_height, patch_width, self.max_patches
)
patches_shape = (n_imgs * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches : (ii + 1) * n_patches] = extract_patches_2d(
image,
patch_size,
max_patches=self.max_patches,
random_state=random_state,
)
return patches
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.two_d_array = False
tags.input_tags.three_d_array = True
tags.requires_fit = False
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_extraction/_stop_words.py | sklearn/feature_extraction/_stop_words.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# This list of English stop words is taken from the "Glasgow Information
# Retrieval Group". The original list can be found at
# http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words
ENGLISH_STOP_WORDS = frozenset(
[
"a",
"about",
"above",
"across",
"after",
"afterwards",
"again",
"against",
"all",
"almost",
"alone",
"along",
"already",
"also",
"although",
"always",
"am",
"among",
"amongst",
"amoungst",
"amount",
"an",
"and",
"another",
"any",
"anyhow",
"anyone",
"anything",
"anyway",
"anywhere",
"are",
"around",
"as",
"at",
"back",
"be",
"became",
"because",
"become",
"becomes",
"becoming",
"been",
"before",
"beforehand",
"behind",
"being",
"below",
"beside",
"besides",
"between",
"beyond",
"bill",
"both",
"bottom",
"but",
"by",
"call",
"can",
"cannot",
"cant",
"co",
"con",
"could",
"couldnt",
"cry",
"de",
"describe",
"detail",
"do",
"done",
"down",
"due",
"during",
"each",
"eg",
"eight",
"either",
"eleven",
"else",
"elsewhere",
"empty",
"enough",
"etc",
"even",
"ever",
"every",
"everyone",
"everything",
"everywhere",
"except",
"few",
"fifteen",
"fifty",
"fill",
"find",
"fire",
"first",
"five",
"for",
"former",
"formerly",
"forty",
"found",
"four",
"from",
"front",
"full",
"further",
"get",
"give",
"go",
"had",
"has",
"hasnt",
"have",
"he",
"hence",
"her",
"here",
"hereafter",
"hereby",
"herein",
"hereupon",
"hers",
"herself",
"him",
"himself",
"his",
"how",
"however",
"hundred",
"i",
"ie",
"if",
"in",
"inc",
"indeed",
"interest",
"into",
"is",
"it",
"its",
"itself",
"keep",
"last",
"latter",
"latterly",
"least",
"less",
"ltd",
"made",
"many",
"may",
"me",
"meanwhile",
"might",
"mill",
"mine",
"more",
"moreover",
"most",
"mostly",
"move",
"much",
"must",
"my",
"myself",
"name",
"namely",
"neither",
"never",
"nevertheless",
"next",
"nine",
"no",
"nobody",
"none",
"noone",
"nor",
"not",
"nothing",
"now",
"nowhere",
"of",
"off",
"often",
"on",
"once",
"one",
"only",
"onto",
"or",
"other",
"others",
"otherwise",
"our",
"ours",
"ourselves",
"out",
"over",
"own",
"part",
"per",
"perhaps",
"please",
"put",
"rather",
"re",
"same",
"see",
"seem",
"seemed",
"seeming",
"seems",
"serious",
"several",
"she",
"should",
"show",
"side",
"since",
"sincere",
"six",
"sixty",
"so",
"some",
"somehow",
"someone",
"something",
"sometime",
"sometimes",
"somewhere",
"still",
"such",
"system",
"take",
"ten",
"than",
"that",
"the",
"their",
"them",
"themselves",
"then",
"thence",
"there",
"thereafter",
"thereby",
"therefore",
"therein",
"thereupon",
"these",
"they",
"thick",
"thin",
"third",
"this",
"those",
"though",
"three",
"through",
"throughout",
"thru",
"thus",
"to",
"together",
"too",
"top",
"toward",
"towards",
"twelve",
"twenty",
"two",
"un",
"under",
"until",
"up",
"upon",
"us",
"very",
"via",
"was",
"we",
"well",
"were",
"what",
"whatever",
"when",
"whence",
"whenever",
"where",
"whereafter",
"whereas",
"whereby",
"wherein",
"whereupon",
"wherever",
"whether",
"which",
"while",
"whither",
"who",
"whoever",
"whole",
"whom",
"whose",
"why",
"will",
"with",
"within",
"without",
"would",
"yet",
"you",
"your",
"yours",
"yourself",
"yourselves",
]
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_extraction/_hash.py | sklearn/feature_extraction/_hash.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from itertools import chain
from numbers import Integral
import numpy as np
import scipy.sparse as sp
from sklearn.base import BaseEstimator, TransformerMixin, _fit_context
from sklearn.feature_extraction._hashing_fast import transform as _hashing_transform
from sklearn.utils import metadata_routing
from sklearn.utils._param_validation import Interval, StrOptions
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(TransformerMixin, BaseEstimator):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
For an efficiency comparison of the different feature extractors, see
:ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
Read more in the :ref:`User Guide <feature_hashing>`.
.. versionadded:: 0.13
Parameters
----------
n_features : int, default=2**20
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
input_type : str, default='dict'
Choose a string from {'dict', 'pair', 'string'}.
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
dtype : numpy dtype, default=np.float64
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
alternate_sign : bool, default=True
When True, an alternating sign is added to the features as to
approximately conserve the inner product in the hashed space even for
small n_features. This approach is similar to sparse random projection.
.. versionchanged:: 0.19
``alternate_sign`` replaces the now deprecated ``non_negative``
parameter.
See Also
--------
DictVectorizer : Vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : Handles nominal/categorical features.
Notes
-----
This estimator is :term:`stateless` and does not need to be fitted.
However, we recommend to call :meth:`fit_transform` instead of
:meth:`transform`, as parameter validation is only performed in
:meth:`fit`.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
With `input_type="string"`, the input must be an iterable over iterables of
strings:
>>> h = FeatureHasher(n_features=8, input_type="string")
>>> raw_X = [["dog", "cat", "snake"], ["snake", "dog"], ["cat", "bird"]]
>>> f = h.transform(raw_X)
>>> f.toarray()
array([[ 0., 0., 0., -1., 0., -1., 0., 1.],
[ 0., 0., 0., -1., 0., -1., 0., 0.],
[ 0., -1., 0., 0., 0., 0., 0., 1.]])
"""
# raw_X should have been called X
__metadata_request__transform = {"raw_X": metadata_routing.UNUSED}
_parameter_constraints: dict = {
"n_features": [Interval(Integral, 1, np.iinfo(np.int32).max, closed="both")],
"input_type": [StrOptions({"dict", "pair", "string"})],
"dtype": "no_validation", # delegate to numpy
"alternate_sign": ["boolean"],
}
def __init__(
self,
n_features=(2**20),
*,
input_type="dict",
dtype=np.float64,
alternate_sign=True,
):
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.alternate_sign = alternate_sign
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X=None, y=None):
"""Only validates estimator's parameters.
This method allows to: (i) validate the estimator's parameters and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : Ignored
Not used, present here for API consistency by convention.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
FeatureHasher class instance.
"""
return self
def transform(self, raw_X):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
first_raw_X = next(raw_X)
if isinstance(first_raw_X, str):
raise ValueError(
"Samples can not be a single string. The input must be an iterable"
" over iterables of strings."
)
raw_X_ = chain([first_raw_X], raw_X)
raw_X = (((f, 1) for f in x) for x in raw_X_)
indices, indptr, values = _hashing_transform(
raw_X, self.n_features, self.dtype, self.alternate_sign, seed=0
)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix(
(values, indices, indptr),
dtype=self.dtype,
shape=(n_samples, self.n_features),
)
X.sum_duplicates() # also sorts the indices
return X
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.two_d_array = False
if self.input_type == "string":
tags.input_tags.string = True
elif self.input_type == "dict":
tags.input_tags.dict = True
tags.requires_fit = False
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_extraction/__init__.py | sklearn/feature_extraction/__init__.py | """Feature extraction from raw data."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.feature_extraction import image, text
from sklearn.feature_extraction._dict_vectorizer import DictVectorizer
from sklearn.feature_extraction._hash import FeatureHasher
from sklearn.feature_extraction.image import grid_to_graph, img_to_graph
__all__ = [
"DictVectorizer",
"FeatureHasher",
"grid_to_graph",
"image",
"img_to_graph",
"text",
]
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_extraction/text.py | sklearn/feature_extraction/text.py | """Utilities to build feature vectors from text documents."""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import array
import re
import unicodedata
import warnings
from collections import defaultdict
from collections.abc import Mapping
from functools import partial
from numbers import Integral
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from sklearn.base import (
BaseEstimator,
OneToOneFeatureMixin,
TransformerMixin,
_fit_context,
)
from sklearn.exceptions import NotFittedError
from sklearn.feature_extraction._hash import FeatureHasher
from sklearn.feature_extraction._stop_words import ENGLISH_STOP_WORDS
from sklearn.preprocessing import normalize
from sklearn.utils import metadata_routing
from sklearn.utils._param_validation import HasMethods, Interval, RealNotInt, StrOptions
from sklearn.utils.fixes import _IS_32BIT
from sklearn.utils.validation import (
FLOAT_DTYPES,
check_array,
check_is_fitted,
validate_data,
)
__all__ = [
"ENGLISH_STOP_WORDS",
"CountVectorizer",
"HashingVectorizer",
"TfidfTransformer",
"TfidfVectorizer",
"strip_accents_ascii",
"strip_accents_unicode",
"strip_tags",
]
def _preprocess(doc, accent_function=None, lower=False):
"""Chain together an optional series of text preprocessing steps to
apply to a document.
Parameters
----------
doc: str
The string to preprocess
accent_function: callable, default=None
Function for handling accented characters. Common strategies include
normalizing and removing.
lower: bool, default=False
Whether to use str.lower to lowercase all of the text
Returns
-------
doc: str
preprocessed string
"""
if lower:
doc = doc.lower()
if accent_function is not None:
doc = accent_function(doc)
return doc
def _analyze(
doc,
analyzer=None,
tokenizer=None,
ngrams=None,
preprocessor=None,
decoder=None,
stop_words=None,
):
"""Chain together an optional series of text processing steps to go from
a single document to ngrams, with or without tokenizing or preprocessing.
If analyzer is used, only the decoder argument is used, as the analyzer is
intended to replace the preprocessor, tokenizer, and ngrams steps.
Parameters
----------
analyzer: callable, default=None
tokenizer: callable, default=None
ngrams: callable, default=None
preprocessor: callable, default=None
decoder: callable, default=None
stop_words: list, default=None
Returns
-------
ngrams: list
A sequence of tokens, possibly with pairs, triples, etc.
"""
if decoder is not None:
doc = decoder(doc)
if analyzer is not None:
doc = analyzer(doc)
else:
if preprocessor is not None:
doc = preprocessor(doc)
if tokenizer is not None:
doc = tokenizer(doc)
if ngrams is not None:
if stop_words is not None:
doc = ngrams(doc, stop_words)
else:
doc = ngrams(doc)
return doc
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart.
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
Parameters
----------
s : str
The string to strip.
Returns
-------
s : str
The stripped string.
See Also
--------
strip_accents_ascii : Remove accentuated char for any unicode symbol that
has a direct ASCII equivalent.
"""
try:
# If `s` is ASCII-compatible, then it does not contain any accented
# characters and we can avoid an expensive list comprehension
s.encode("ASCII", errors="strict")
return s
except UnicodeEncodeError:
normalized = unicodedata.normalize("NFKD", s)
return "".join([c for c in normalized if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing.
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
Parameters
----------
s : str
The string to strip.
Returns
-------
s : str
The stripped string.
See Also
--------
strip_accents_unicode : Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize("NFKD", s)
return nkfd_form.encode("ASCII", "ignore").decode("ASCII")
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function.
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
Parameters
----------
s : str
The string to strip.
Returns
-------
s : str
The stripped string.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, str):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class _VectorizerMixin:
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols.
The decoding strategy depends on the vectorizer parameters.
Parameters
----------
doc : bytes or str
The string to decode.
Returns
-------
doc: str
A string of unicode symbols.
"""
if self.input == "filename":
with open(doc, "rb") as fh:
doc = fh.read()
elif self.input == "file":
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError(
"np.nan is an invalid document, expected byte or unicode string."
)
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
if min_n == 1:
# no need to do any slicing for unigrams
# just iterate through the original tokens
tokens = list(original_tokens)
min_n += 1
else:
tokens = []
n_original_tokens = len(original_tokens)
# bind method outside of loop to reduce overhead
tokens_append = tokens.append
space_join = " ".join
for n in range(min_n, min(max_n + 1, n_original_tokens + 1)):
for i in range(n_original_tokens - n + 1):
tokens_append(space_join(original_tokens[i : i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
min_n, max_n = self.ngram_range
if min_n == 1:
# no need to do any slicing for unigrams
# iterate through the string
ngrams = list(text_document)
min_n += 1
else:
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for n in range(min_n, min(max_n + 1, text_len + 1)):
for i in range(text_len - n + 1):
ngrams_append(text_document[i : i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
operating only inside word boundaries. n-grams at the edges
of words are padded with space."""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
# bind method outside of loop to reduce overhead
ngrams_append = ngrams.append
for w in text_document.split():
w = " " + w + " "
w_len = len(w)
for n in range(min_n, max_n + 1):
offset = 0
ngrams_append(w[offset : offset + n])
while offset + n < w_len:
offset += 1
ngrams_append(w[offset : offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization.
Returns
-------
preprocessor: callable
A function to preprocess the text before tokenization.
"""
if self.preprocessor is not None:
return self.preprocessor
# accent stripping
if not self.strip_accents:
strip_accents = None
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == "ascii":
strip_accents = strip_accents_ascii
elif self.strip_accents == "unicode":
strip_accents = strip_accents_unicode
else:
raise ValueError(
'Invalid value for "strip_accents": %s' % self.strip_accents
)
return partial(_preprocess, accent_function=strip_accents, lower=self.lowercase)
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens.
Returns
-------
tokenizer: callable
A function to split a string into a sequence of tokens.
"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
if token_pattern.groups > 1:
raise ValueError(
"More than 1 capturing group in token pattern. Only a single "
"group should be captured."
)
return token_pattern.findall
def get_stop_words(self):
"""Build or fetch the effective stop words list.
Returns
-------
stop_words: list or None
A list of stop words.
"""
return _check_stop_list(self.stop_words)
def _check_stop_words_consistency(self, stop_words, preprocess, tokenize):
"""Check if stop words are consistent
Returns
-------
is_consistent : True if stop words are consistent with the preprocessor
and tokenizer, False if they are not, None if the check
was previously performed, "error" if it could not be
performed (e.g. because of the use of a custom
preprocessor / tokenizer)
"""
if id(self.stop_words) == getattr(self, "_stop_words_id", None):
# Stop words are were previously validated
return None
# NB: stop_words is validated, unlike self.stop_words
try:
inconsistent = set()
for w in stop_words or ():
tokens = list(tokenize(preprocess(w)))
for token in tokens:
if token not in stop_words:
inconsistent.add(token)
self._stop_words_id = id(self.stop_words)
if inconsistent:
warnings.warn(
"Your stop_words may be inconsistent with "
"your preprocessing. Tokenizing the stop "
"words generated tokens %r not in "
"stop_words." % sorted(inconsistent)
)
return not inconsistent
except Exception:
# Failed to check stop words consistency (e.g. because a custom
# preprocessor or tokenizer was used)
self._stop_words_id = id(self.stop_words)
return "error"
def build_analyzer(self):
"""Return a callable to process input data.
The callable handles preprocessing, tokenization, and n-grams generation.
Returns
-------
analyzer: callable
A function to handle preprocessing, tokenization
and n-grams generation.
"""
if callable(self.analyzer):
return partial(_analyze, analyzer=self.analyzer, decoder=self.decode)
preprocess = self.build_preprocessor()
if self.analyzer == "char":
return partial(
_analyze,
ngrams=self._char_ngrams,
preprocessor=preprocess,
decoder=self.decode,
)
elif self.analyzer == "char_wb":
return partial(
_analyze,
ngrams=self._char_wb_ngrams,
preprocessor=preprocess,
decoder=self.decode,
)
elif self.analyzer == "word":
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
self._check_stop_words_consistency(stop_words, preprocess, tokenize)
return partial(
_analyze,
ngrams=self._word_ngrams,
tokenizer=tokenize,
preprocessor=preprocess,
decoder=self.decode,
stop_words=stop_words,
)
else:
raise ValueError(
"%s is not a valid tokenization scheme/analyzer" % self.analyzer
)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(vocabulary.values())
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in range(len(vocabulary)):
if i not in indices:
msg = "Vocabulary of size %d doesn't contain index %d." % (
len(vocabulary),
i,
)
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fitted)"""
if not hasattr(self, "vocabulary_"):
self._validate_vocabulary()
if not self.fixed_vocabulary_:
raise NotFittedError("Vocabulary not fitted or provided")
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
def _validate_ngram_range(self):
"""Check validity of ngram_range parameter"""
min_n, max_m = self.ngram_range
if min_n > max_m:
raise ValueError(
"Invalid value for ngram_range=%s "
"lower boundary larger than the upper boundary." % str(self.ngram_range)
)
def _warn_for_unused_params(self):
if self.tokenizer is not None and self.token_pattern is not None:
warnings.warn(
"The parameter 'token_pattern' will not be used"
" since 'tokenizer' is not None'"
)
if self.preprocessor is not None and callable(self.analyzer):
warnings.warn(
"The parameter 'preprocessor' will not be used"
" since 'analyzer' is callable'"
)
if (
self.ngram_range != (1, 1)
and self.ngram_range is not None
and callable(self.analyzer)
):
warnings.warn(
"The parameter 'ngram_range' will not be used"
" since 'analyzer' is callable'"
)
if self.analyzer != "word" or callable(self.analyzer):
if self.stop_words is not None:
warnings.warn(
"The parameter 'stop_words' will not be used"
" since 'analyzer' != 'word'"
)
if (
self.token_pattern is not None
and self.token_pattern != r"(?u)\b\w\w+\b"
):
warnings.warn(
"The parameter 'token_pattern' will not be used"
" since 'analyzer' != 'word'"
)
if self.tokenizer is not None:
warnings.warn(
"The parameter 'tokenizer' will not be used"
" since 'analyzer' != 'word'"
)
class HashingVectorizer(
TransformerMixin, _VectorizerMixin, BaseEstimator, auto_wrap_output_keys=None
):
r"""Convert a collection of text documents to a matrix of token occurrences.
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory.
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters.
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
For an efficiency comparison of the different feature extractors, see
:ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
For an example of document clustering and comparison with
:class:`~sklearn.feature_extraction.text.TfidfVectorizer`, see
:ref:`sphx_glr_auto_examples_text_plot_document_clustering.py`.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : {'filename', 'file', 'content'}, default='content'
- If `'filename'`, the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
- If `'file'`, the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
- If `'content'`, the input is expected to be a sequence of items that
can be of type string or byte.
encoding : str, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}, default='strict'
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode'} or callable, default=None
Remove accents and perform other character normalization
during the preprocessing step.
'ascii' is a fast method that only works on characters that have
a direct ASCII mapping.
'unicode' is a slightly slower method that works on any character.
None (default) means no character normalization is performed.
Both 'ascii' and 'unicode' use NFKD normalization from
:func:`unicodedata.normalize`.
lowercase : bool, default=True
Convert all characters to lowercase before tokenizing.
preprocessor : callable, default=None
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
Only applies if ``analyzer`` is not callable.
tokenizer : callable, default=None
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
stop_words : {'english'}, list, default=None
If 'english', a built-in stop word list for English is used.
There are several known issues with 'english' and you should
consider an alternative (see :ref:`stop_words`).
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
token_pattern : str or None, default=r"(?u)\\b\\w\\w+\\b"
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
If there is a capturing group in token_pattern then the
captured group content, not the entire match, becomes the token.
At most one capturing group is permitted.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used. For example an ``ngram_range`` of ``(1, 1)`` means only
unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means
only bigrams.
Only applies if ``analyzer`` is not callable.
analyzer : {'word', 'char', 'char_wb'} or callable, default='word'
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
.. versionchanged:: 0.21
Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data
is first read from the file and then passed to the given callable
analyzer.
n_features : int, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
binary : bool, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
norm : {'l1', 'l2'}, default='l2'
Norm used to normalize term vectors. None for no normalization.
alternate_sign : bool, default=True
When True, an alternating sign is added to the features as to
approximately conserve the inner product in the hashed space even for
small n_features. This approach is similar to sparse random projection.
.. versionadded:: 0.19
dtype : type, default=np.float64
Type of the matrix returned by fit_transform() or transform().
See Also
--------
CountVectorizer : Convert a collection of text documents to a matrix of
token counts.
TfidfVectorizer : Convert a collection of raw documents to a matrix of
TF-IDF features.
Notes
-----
This estimator is :term:`stateless` and does not need to be fitted.
However, we recommend to call :meth:`fit_transform` instead of
:meth:`transform`, as parameter validation is only performed in
:meth:`fit`.
Examples
--------
>>> from sklearn.feature_extraction.text import HashingVectorizer
>>> corpus = [
... 'This is the first document.',
... 'This document is the second document.',
... 'And this is the third one.',
... 'Is this the first document?',
... ]
>>> vectorizer = HashingVectorizer(n_features=2**4)
>>> X = vectorizer.fit_transform(corpus)
>>> print(X.shape)
(4, 16)
"""
_parameter_constraints: dict = {
"input": [StrOptions({"filename", "file", "content"})],
"encoding": [str],
"decode_error": [StrOptions({"strict", "ignore", "replace"})],
"strip_accents": [StrOptions({"ascii", "unicode"}), None, callable],
"lowercase": ["boolean"],
"preprocessor": [callable, None],
"tokenizer": [callable, None],
"stop_words": [StrOptions({"english"}), list, None],
"token_pattern": [str, None],
"ngram_range": [tuple],
"analyzer": [StrOptions({"word", "char", "char_wb"}), callable],
"n_features": [Interval(Integral, 1, np.iinfo(np.int32).max, closed="left")],
"binary": ["boolean"],
"norm": [StrOptions({"l1", "l2"}), None],
"alternate_sign": ["boolean"],
"dtype": "no_validation", # delegate to numpy
}
def __init__(
self,
*,
input="content",
encoding="utf-8",
decode_error="strict",
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
stop_words=None,
token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1),
analyzer="word",
n_features=(2**20),
binary=False,
norm="l2",
alternate_sign=True,
dtype=np.float64,
):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.alternate_sign = alternate_sign
self.dtype = dtype
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y=None):
"""Only validates estimator's parameters.
This method allows to: (i) validate the estimator's parameters and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : ndarray of shape [n_samples, n_features]
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
HashingVectorizer instance.
"""
return self
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Only validates estimator's parameters.
This method allows to: (i) validate the estimator's parameters and
(ii) be consistent with the scikit-learn transformer API.
Parameters
----------
X : ndarray of shape [n_samples, n_features]
Training data.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
HashingVectorizer instance.
"""
# triggers a parameter validation
if isinstance(X, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._warn_for_unused_params()
self._validate_ngram_range()
self._get_hasher().fit(X, y=y)
return self
def transform(self, X):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
"""
if isinstance(X, str):
raise ValueError(
"Iterable over raw text documents expected, string object received."
)
self._validate_ngram_range()
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
def fit_transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
X : sparse matrix of shape (n_samples, n_features)
Document-term matrix.
"""
return self.fit(X, y).transform(X)
def _get_hasher(self):
return FeatureHasher(
n_features=self.n_features,
input_type="string",
dtype=self.dtype,
alternate_sign=self.alternate_sign,
)
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.string = True
tags.input_tags.two_d_array = False
tags.requires_fit = False
return tags
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.issparse(X) and X.format == "csr":
return np.bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(X.indptr)
class CountVectorizer(_VectorizerMixin, BaseEstimator):
r"""Convert a collection of text documents to a matrix of token counts.
This implementation produces a sparse representation of the counts using
scipy.sparse.csr_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
For an efficiency comparison of the different feature extractors, see
:ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : {'filename', 'file', 'content'}, default='content'
- If `'filename'`, the sequence passed as an argument to fit is
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_extraction/_dict_vectorizer.py | sklearn/feature_extraction/_dict_vectorizer.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from array import array
from collections.abc import Iterable, Mapping
from numbers import Number
from operator import itemgetter
import numpy as np
import scipy.sparse as sp
from sklearn.base import BaseEstimator, TransformerMixin, _fit_context
from sklearn.utils import check_array, metadata_routing
from sklearn.utils.validation import check_is_fitted
class DictVectorizer(TransformerMixin, BaseEstimator):
"""Transforms lists of feature-value mappings to vectors.
This transformer turns lists of mappings (dict-like objects) of feature
names to feature values into Numpy arrays or scipy.sparse matrices for use
with scikit-learn estimators.
When feature values are strings, this transformer will do a binary one-hot
(aka one-of-K) coding: one boolean-valued feature is constructed for each
of the possible string values that the feature can take on. For instance,
a feature "f" that can take on the values "ham" and "spam" will become two
features in the output, one signifying "f=ham", the other "f=spam".
If a feature value is a sequence or set of strings, this transformer
will iterate over the values and will count the occurrences of each string
value.
However, note that this transformer will only do a binary one-hot encoding
when feature values are of type string. If categorical features are
represented as numeric values such as int or iterables of strings, the
DictVectorizer can be followed by
:class:`~sklearn.preprocessing.OneHotEncoder` to complete
binary one-hot encoding.
Features that do not occur in a sample (mapping) will have a zero value
in the resulting array/matrix.
For an efficiency comparison of the different feature extractors, see
:ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`.
Read more in the :ref:`User Guide <dict_feature_extraction>`.
Parameters
----------
dtype : dtype, default=np.float64
The type of feature values. Passed to Numpy array/scipy.sparse matrix
constructors as the dtype argument.
separator : str, default="="
Separator string used when constructing new features for one-hot
coding.
sparse : bool, default=True
Whether transform should produce scipy.sparse matrices.
sort : bool, default=True
Whether ``feature_names_`` and ``vocabulary_`` should be
sorted when fitting.
Attributes
----------
vocabulary_ : dict
A dictionary mapping feature names to feature indices.
feature_names_ : list
A list of length n_features containing the feature names (e.g., "f=ham"
and "f=spam").
See Also
--------
FeatureHasher : Performs vectorization using only a hash function.
sklearn.preprocessing.OrdinalEncoder : Handles nominal/categorical
features encoded as columns of arbitrary data types.
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> v = DictVectorizer(sparse=False)
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> X
array([[2., 0., 1.],
[0., 1., 3.]])
>>> v.inverse_transform(X) == [{'bar': 2.0, 'foo': 1.0},
... {'baz': 1.0, 'foo': 3.0}]
True
>>> v.transform({'foo': 4, 'unseen_feature': 3})
array([[0., 0., 4.]])
"""
# This isn't something that people should be routing / using in a pipeline.
__metadata_request__inverse_transform = {"dict_type": metadata_routing.UNUSED}
_parameter_constraints: dict = {
"dtype": "no_validation", # validation delegated to numpy,
"separator": [str],
"sparse": ["boolean"],
"sort": ["boolean"],
}
def __init__(self, *, dtype=np.float64, separator="=", sparse=True, sort=True):
self.dtype = dtype
self.separator = separator
self.sparse = sparse
self.sort = sort
def _add_iterable_element(
self,
f,
v,
feature_names,
vocab,
*,
fitting=True,
transforming=False,
indices=None,
values=None,
):
"""Add feature names for iterable of strings"""
for vv in v:
if isinstance(vv, str):
feature_name = "%s%s%s" % (f, self.separator, vv)
vv = 1
else:
raise TypeError(
f"Unsupported type {type(vv)} in iterable "
"value. Only iterables of string are "
"supported."
)
if fitting and feature_name not in vocab:
vocab[feature_name] = len(feature_names)
feature_names.append(feature_name)
if transforming and feature_name in vocab:
indices.append(vocab[feature_name])
values.append(self.dtype(vv))
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Learn a list of feature name -> indices mappings.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
.. versionchanged:: 0.24
Accepts multiple string values for one categorical feature.
y : (ignored)
Ignored parameter.
Returns
-------
self : object
DictVectorizer class instance.
"""
feature_names = []
vocab = {}
for x in X:
for f, v in x.items():
if isinstance(v, str):
feature_name = "%s%s%s" % (f, self.separator, v)
elif isinstance(v, Number) or (v is None):
feature_name = f
elif isinstance(v, Mapping):
raise TypeError(
f"Unsupported value type {type(v)} "
f"for {f}: {v}.\n"
"Mapping objects are not supported."
)
elif isinstance(v, Iterable):
feature_name = None
self._add_iterable_element(f, v, feature_names, vocab)
if feature_name is not None:
if feature_name not in vocab:
vocab[feature_name] = len(feature_names)
feature_names.append(feature_name)
if self.sort:
feature_names.sort()
vocab = {f: i for i, f in enumerate(feature_names)}
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def _transform(self, X, fitting):
# Sanity check: Python's array has no way of explicitly requesting the
# signed 32-bit integers that scipy.sparse needs, so we use the next
# best thing: typecode "i" (int). However, if that gives larger or
# smaller integers than 32-bit ones, np.frombuffer screws up.
assert array("i").itemsize == 4, (
"sizeof(int) != 4 on your platform; please report this at"
" https://github.com/scikit-learn/scikit-learn/issues and"
" include the output from platform.platform() in your bug report"
)
dtype = self.dtype
if fitting:
feature_names = []
vocab = {}
else:
feature_names = self.feature_names_
vocab = self.vocabulary_
transforming = True
# Process everything as sparse regardless of setting
X = [X] if isinstance(X, Mapping) else X
indices = array("i")
indptr = [0]
# XXX we could change values to an array.array as well, but it
# would require (heuristic) conversion of dtype to typecode...
values = []
# collect all the possible feature names and build sparse matrix at
# same time
for x in X:
for f, v in x.items():
if isinstance(v, str):
feature_name = "%s%s%s" % (f, self.separator, v)
v = 1
elif isinstance(v, Number) or (v is None):
feature_name = f
elif not isinstance(v, Mapping) and isinstance(v, Iterable):
feature_name = None
self._add_iterable_element(
f,
v,
feature_names,
vocab,
fitting=fitting,
transforming=transforming,
indices=indices,
values=values,
)
else:
raise TypeError(
f"Unsupported value Type {type(v)} "
f"for {f}: {v}.\n"
f"{type(v)} objects are not supported."
)
if feature_name is not None:
if fitting and feature_name not in vocab:
vocab[feature_name] = len(feature_names)
feature_names.append(feature_name)
if feature_name in vocab:
indices.append(vocab[feature_name])
values.append(self.dtype(v))
indptr.append(len(indices))
if len(indptr) == 1:
raise ValueError("Sample sequence X is empty.")
indices = np.frombuffer(indices, dtype=np.intc)
shape = (len(indptr) - 1, len(vocab))
result_matrix = sp.csr_matrix(
(values, indices, indptr), shape=shape, dtype=dtype
)
# Sort everything if asked
if fitting and self.sort:
feature_names.sort()
map_index = np.empty(len(feature_names), dtype=np.int32)
for new_val, f in enumerate(feature_names):
map_index[new_val] = vocab[f]
vocab[f] = new_val
result_matrix = result_matrix[:, map_index]
if self.sparse:
result_matrix.sort_indices()
else:
result_matrix = result_matrix.toarray()
if fitting:
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return result_matrix
@_fit_context(prefer_skip_nested_validation=True)
def fit_transform(self, X, y=None):
"""Learn a list of feature name -> indices mappings and transform X.
Like fit(X) followed by transform(X), but does not require
materializing X in memory.
Parameters
----------
X : Mapping or iterable over Mappings
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
.. versionchanged:: 0.24
Accepts multiple string values for one categorical feature.
y : (ignored)
Ignored parameter.
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
return self._transform(X, fitting=True)
def inverse_transform(self, X, dict_type=dict):
"""Transform array or sparse matrix X back to feature mappings.
X must have been produced by this DictVectorizer's transform or
fit_transform method; it may only have passed through transformers
that preserve the number of features and their order.
In the case of one-hot/one-of-K coding, the constructed feature
names and values are returned rather than the original ones.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Sample matrix.
dict_type : type, default=dict
Constructor for feature mappings. Must conform to the
collections.Mapping API.
Returns
-------
X_original : list of dict_type objects of shape (n_samples,)
Feature mappings for the samples in X.
"""
check_is_fitted(self, "feature_names_")
# COO matrix is not subscriptable
X = check_array(X, accept_sparse=["csr", "csc"])
n_samples = X.shape[0]
names = self.feature_names_
dicts = [dict_type() for _ in range(n_samples)]
if sp.issparse(X):
for i, j in zip(*X.nonzero()):
dicts[i][names[j]] = X[i, j]
else:
for i, d in enumerate(dicts):
for j, v in enumerate(X[i, :]):
if v != 0:
d[names[j]] = X[i, j]
return dicts
def transform(self, X):
"""Transform feature->value dicts to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
X : Mapping or iterable over Mappings of shape (n_samples,)
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
Returns
-------
Xa : {array, sparse matrix}
Feature vectors; always 2-d.
"""
check_is_fitted(self, ["feature_names_", "vocabulary_"])
return self._transform(X, fitting=False)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Not used, present here for API consistency by convention.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
check_is_fitted(self, "feature_names_")
if any(not isinstance(name, str) for name in self.feature_names_):
feature_names = [str(name) for name in self.feature_names_]
else:
feature_names = self.feature_names_
return np.asarray(feature_names, dtype=object)
def restrict(self, support, indices=False):
"""Restrict the features to those in support using feature selection.
This function modifies the estimator in-place.
Parameters
----------
support : array-like
Boolean mask or list of indices (as returned by the get_support
member of feature selectors).
indices : bool, default=False
Whether support is a list of indices.
Returns
-------
self : object
DictVectorizer class instance.
Examples
--------
>>> from sklearn.feature_extraction import DictVectorizer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> v = DictVectorizer()
>>> D = [{'foo': 1, 'bar': 2}, {'foo': 3, 'baz': 1}]
>>> X = v.fit_transform(D)
>>> support = SelectKBest(chi2, k=2).fit(X, [0, 1])
>>> v.get_feature_names_out()
array(['bar', 'baz', 'foo'], ...)
>>> v.restrict(support.get_support())
DictVectorizer()
>>> v.get_feature_names_out()
array(['bar', 'foo'], ...)
"""
check_is_fitted(self, "feature_names_")
if not indices:
support = np.where(support)[0]
names = self.feature_names_
new_vocab = {}
for i in support:
new_vocab[names[i]] = len(new_vocab)
self.vocabulary_ = new_vocab
self.feature_names_ = [
f for f, i in sorted(new_vocab.items(), key=itemgetter(1))
]
return self
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.dict = True
tags.input_tags.two_d_array = False
return tags
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_extraction/tests/test_feature_hasher.py | sklearn/feature_extraction/tests/test_feature_hasher.py | import numpy as np
import pytest
from numpy.testing import assert_array_equal
from sklearn.feature_extraction import FeatureHasher
from sklearn.feature_extraction._hashing_fast import transform as _hashing_transform
def test_feature_hasher_dicts():
feature_hasher = FeatureHasher(n_features=16)
assert "dict" == feature_hasher.input_type
raw_X = [{"foo": "bar", "dada": 42, "tzara": 37}, {"foo": "baz", "gaga": "string1"}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [
["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"],
]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2**lg_n_features
it = (x for x in raw_X) # iterable
feature_hasher = FeatureHasher(
n_features=n_features, input_type="string", alternate_sign=False
)
X = feature_hasher.transform(it)
assert X.shape[0] == len(raw_X)
assert X.shape[1] == n_features
assert X[0].sum() == 4
assert X[1].sum() == 3
assert X.nnz == 6
@pytest.mark.parametrize("input_type", ["list", "generator"])
def test_feature_hasher_single_string(input_type):
"""FeatureHasher raises error when a sample is a single string.
Non-regression test for gh-13199.
"""
msg = "Samples can not be a single string"
raw_X = ["my_string", "another_string"]
if input_type == "generator":
raw_X = (x for x in raw_X)
feature_hasher = FeatureHasher(n_features=10, input_type="string")
with pytest.raises(ValueError, match=msg):
feature_hasher.transform(raw_X)
def test_hashing_transform_seed():
# check the influence of the seed when computing the hashes
raw_X = [
["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"],
]
raw_X_ = (((f, 1) for f in x) for x in raw_X)
indices, indptr, _ = _hashing_transform(raw_X_, 2**7, str, False)
raw_X_ = (((f, 1) for f in x) for x in raw_X)
indices_0, indptr_0, _ = _hashing_transform(raw_X_, 2**7, str, False, seed=0)
assert_array_equal(indices, indices_0)
assert_array_equal(indptr, indptr_0)
raw_X_ = (((f, 1) for f in x) for x in raw_X)
indices_1, _, _ = _hashing_transform(raw_X_, 2**7, str, False, seed=1)
with pytest.raises(AssertionError):
assert_array_equal(indices, indices_1)
def test_feature_hasher_pairs():
raw_X = (
iter(d.items())
for d in [{"foo": 1, "bar": 2}, {"baz": 3, "quux": 4, "foo": -1}]
)
feature_hasher = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = feature_hasher.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert [1, 2] == x1_nz
assert [1, 3, 4] == x2_nz
def test_feature_hasher_pairs_with_string_values():
raw_X = (
iter(d.items())
for d in [{"foo": 1, "bar": "a"}, {"baz": "abc", "quux": 4, "foo": -1}]
)
feature_hasher = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = feature_hasher.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert [1, 1] == x1_nz
assert [1, 1, 4] == x2_nz
raw_X = (iter(d.items()) for d in [{"bax": "abc"}, {"bax": "abc"}])
x1, x2 = feature_hasher.transform(raw_X).toarray()
x1_nz = np.abs(x1[x1 != 0])
x2_nz = np.abs(x2[x2 != 0])
assert [1] == x1_nz
assert [1] == x2_nz
assert_array_equal(x1, x2)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
feature_hasher = FeatureHasher(n_features=n_features, input_type="string")
X = feature_hasher.transform(raw_X)
assert_array_equal(X.toarray(), np.zeros((len(raw_X), n_features)))
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{"foo": 0}])
assert X.data.shape == (0,)
def test_hasher_alternate_sign():
X = [list("Thequickbrownfoxjumped")]
Xt = FeatureHasher(alternate_sign=True, input_type="string").fit_transform(X)
assert Xt.data.min() < 0 and Xt.data.max() > 0
Xt = FeatureHasher(alternate_sign=False, input_type="string").fit_transform(X)
assert Xt.data.min() > 0
def test_hash_collisions():
X = [list("Thequickbrownfoxjumped")]
Xt = FeatureHasher(
alternate_sign=True, n_features=1, input_type="string"
).fit_transform(X)
# check that some of the hashed tokens are added
# with an opposite sign and cancel out
assert abs(Xt.data[0]) < len(X[0])
Xt = FeatureHasher(
alternate_sign=False, n_features=1, input_type="string"
).fit_transform(X)
assert Xt.data[0] == len(X[0])
def test_feature_hasher_requires_fit_tag():
"""Test that FeatureHasher has requires_fit=False tag."""
hasher = FeatureHasher()
tags = hasher.__sklearn_tags__()
assert not tags.requires_fit
def test_feature_hasher_transform_without_fit():
"""Test that FeatureHasher can transform without fitting."""
hasher = FeatureHasher(n_features=10)
data = [{"dog": 1, "cat": 2}, {"dog": 2, "run": 5}]
result = hasher.transform(data)
assert result.shape == (2, 10)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_extraction/tests/test_image.py | sklearn/feature_extraction/tests/test_image.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import pytest
from scipy import ndimage
from scipy.sparse.csgraph import connected_components
from sklearn.feature_extraction.image import (
PatchExtractor,
_extract_patches,
extract_patches_2d,
grid_to_graph,
img_to_graph,
reconstruct_from_patches_2d,
)
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert grad_x.nnz == grad_y.nnz
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(
grad_x.data[grad_x.data > 0], grad_y.data[grad_y.data > 0]
)
def test_img_to_graph_sparse():
# Check that the edges are in the right position
# when using a sparse image with a singleton component
mask = np.zeros((2, 3), dtype=bool)
mask[0, 0] = 1
mask[:, 2] = 1
x = np.zeros((2, 3))
x[0, 0] = 1
x[0, 2] = -1
x[1, 2] = -2
grad_x = img_to_graph(x, mask=mask).todense()
desired = np.array([[1, 0, 0], [0, -1, 1], [0, 1, -2]])
np.testing.assert_array_equal(grad_x, desired)
def test_grid_to_graph():
# Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size**2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert connected_components(A)[0] == 2
# check ordering
mask = np.zeros((2, 3), dtype=bool)
mask[0, 0] = 1
mask[:, 2] = 1
graph = grid_to_graph(2, 3, 1, mask=mask.ravel()).todense()
desired = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]])
np.testing.assert_array_equal(graph, desired)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert connected_components(A)[0] == 1
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=bool)
assert A.dtype == bool
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=int)
assert A.dtype == int
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float64)
assert A.dtype == np.float64
def test_connect_regions(raccoon_face_fxt):
face = raccoon_face_fxt
# subsample by 4 to reduce run time
face = face[::4, ::4]
for thr in (50, 150):
mask = face > thr
graph = img_to_graph(face, mask=mask)
assert ndimage.label(mask)[1] == connected_components(graph)[0]
def test_connect_regions_with_grid(raccoon_face_fxt):
face = raccoon_face_fxt
# subsample by 4 to reduce run time
face = face[::4, ::4]
mask = face > 50
graph = grid_to_graph(*face.shape, mask=mask)
assert ndimage.label(mask)[1] == connected_components(graph)[0]
mask = face > 150
graph = grid_to_graph(*face.shape, mask=mask, dtype=None)
assert ndimage.label(mask)[1] == connected_components(graph)[0]
@pytest.fixture
def downsampled_face(raccoon_face_fxt):
face = raccoon_face_fxt
face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
face = face.astype(np.float32)
face /= 16.0
return face
@pytest.fixture
def orange_face(downsampled_face):
face = downsampled_face
face_color = np.zeros(face.shape + (3,))
face_color[:, :, 0] = 256 - face
face_color[:, :, 1] = 256 - face / 2
face_color[:, :, 2] = 256 - face / 4
return face_color
def _make_images(face):
# make a collection of faces
images = np.zeros((3,) + face.shape)
images[0] = face
images[1] = face + 1
images[2] = face + 2
return images
@pytest.fixture
def downsampled_face_collection(downsampled_face):
return _make_images(downsampled_face)
def test_extract_patches_all(downsampled_face):
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert patches.shape == (expected_n_patches, p_h, p_w)
def test_extract_patches_all_color(orange_face):
face = orange_face
i_h, i_w = face.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert patches.shape == (expected_n_patches, p_h, p_w, 3)
def test_extract_patches_all_rect(downsampled_face):
face = downsampled_face
face = face[:, 32:97]
i_h, i_w = face.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w))
assert patches.shape == (expected_n_patches, p_h, p_w)
def test_extract_patches_max_patches(downsampled_face):
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w), max_patches=100)
assert patches.shape == (100, p_h, p_w)
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(face, (p_h, p_w), max_patches=0.5)
assert patches.shape == (expected_n_patches, p_h, p_w)
with pytest.raises(ValueError):
extract_patches_2d(face, (p_h, p_w), max_patches=2.0)
with pytest.raises(ValueError):
extract_patches_2d(face, (p_h, p_w), max_patches=-1.0)
def test_extract_patch_same_size_image(downsampled_face):
face = downsampled_face
# Request patches of the same size as image
# Should return just the single patch a.k.a. the image
patches = extract_patches_2d(face, face.shape, max_patches=2)
assert patches.shape[0] == 1
def test_extract_patches_less_than_max_patches(downsampled_face):
face = downsampled_face
i_h, i_w = face.shape
p_h, p_w = 3 * i_h // 4, 3 * i_w // 4
# this is 3185
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(face, (p_h, p_w), max_patches=4000)
assert patches.shape == (expected_n_patches, p_h, p_w)
def test_reconstruct_patches_perfect(downsampled_face):
face = downsampled_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_reconstruct_patches_perfect_color(orange_face):
face = orange_face
p_h, p_w = 16, 16
patches = extract_patches_2d(face, (p_h, p_w))
face_reconstructed = reconstruct_from_patches_2d(patches, face.shape)
np.testing.assert_array_almost_equal(face, face_reconstructed)
def test_patch_extractor_fit(downsampled_face_collection, global_random_seed):
faces = downsampled_face_collection
extr = PatchExtractor(
patch_size=(8, 8), max_patches=100, random_state=global_random_seed
)
assert extr == extr.fit(faces)
def test_patch_extractor_max_patches(downsampled_face_collection, global_random_seed):
faces = downsampled_face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(faces) * max_patches
extr = PatchExtractor(
patch_size=(p_h, p_w), max_patches=max_patches, random_state=global_random_seed
)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w)
max_patches = 0.5
expected_n_patches = len(faces) * int(
(i_h - p_h + 1) * (i_w - p_w + 1) * max_patches
)
extr = PatchExtractor(
patch_size=(p_h, p_w), max_patches=max_patches, random_state=global_random_seed
)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w)
def test_patch_extractor_max_patches_default(
downsampled_face_collection, global_random_seed
):
faces = downsampled_face_collection
extr = PatchExtractor(max_patches=100, random_state=global_random_seed)
patches = extr.transform(faces)
assert patches.shape == (len(faces) * 100, 19, 25)
def test_patch_extractor_all_patches(downsampled_face_collection, global_random_seed):
faces = downsampled_face_collection
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=global_random_seed)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w)
def test_patch_extractor_color(orange_face, global_random_seed):
faces = _make_images(orange_face)
i_h, i_w = faces.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(faces) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=global_random_seed)
patches = extr.transform(faces)
assert patches.shape == (expected_n_patches, p_h, p_w, 3)
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for image_shape, patch_size, patch_step, expected_view, last_patch in zip(
image_shapes, patch_sizes, patch_steps, expected_views, last_patches
):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = _extract_patches(
image, patch_shape=patch_size, extraction_step=patch_step
)
ndim = len(image_shape)
assert patches.shape[:ndim] == expected_view
last_patch_slices = tuple(
slice(i, i + j, None) for i, j in zip(last_patch, patch_size)
)
assert (
patches[(-1, None, None) * ndim] == image[last_patch_slices].squeeze()
).all()
def test_extract_patches_square(downsampled_face):
# test same patch size for all dimensions
face = downsampled_face
i_h, i_w = face.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = _extract_patches(face, patch_shape=p)
assert patches.shape == (expected_n_patches[0], expected_n_patches[1], p, p)
def test_width_patch():
# width and height of the patch should be less than the image
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
with pytest.raises(ValueError):
extract_patches_2d(x, (4, 1))
with pytest.raises(ValueError):
extract_patches_2d(x, (1, 4))
def test_patch_extractor_wrong_input(orange_face):
"""Check that an informative error is raised if the patch_size is not valid."""
faces = _make_images(orange_face)
err_msg = "patch_size must be a tuple of two integers"
extractor = PatchExtractor(patch_size=(8, 8, 8))
with pytest.raises(ValueError, match=err_msg):
extractor.transform(faces)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/feature_extraction/tests/test_text.py | sklearn/feature_extraction/tests/test_text.py | import pickle
import re
import uuid
import warnings
from collections import defaultdict
from collections.abc import Mapping
from functools import partial
from io import StringIO
from itertools import product
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy import sparse
from sklearn.base import clone
from sklearn.feature_extraction.text import (
ENGLISH_STOP_WORDS,
CountVectorizer,
HashingVectorizer,
TfidfTransformer,
TfidfVectorizer,
strip_accents_ascii,
strip_accents_unicode,
strip_tags,
)
from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.utils._testing import (
assert_allclose_dense_sparse,
assert_almost_equal,
skip_if_32bit,
)
from sklearn.utils.fixes import _IS_WASM, CSC_CONTAINERS, CSR_CONTAINERS
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace("é", "e")
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ["the_ultimate_feature"]
def test_strip_accents():
# check some classical latin accentuated symbols
a = "àáâãäåçèéêë"
expected = "aaaaaaceeee"
assert strip_accents_unicode(a) == expected
a = "ìíîïñòóôõöùúûüý"
expected = "iiiinooooouuuuy"
assert strip_accents_unicode(a) == expected
# check some arabic
a = "\u0625" # alef with a hamza below: إ
expected = "\u0627" # simple alef: ا
assert strip_accents_unicode(a) == expected
# mix letters accentuated and not
a = "this is à test"
expected = "this is a test"
assert strip_accents_unicode(a) == expected
# strings that are already decomposed
a = "o\u0308" # o with diaeresis
expected = "o"
assert strip_accents_unicode(a) == expected
# combining marks by themselves
a = "\u0300\u0301\u0302\u0303"
expected = ""
assert strip_accents_unicode(a) == expected
# Multiple combining marks on one character
a = "o\u0308\u0304"
expected = "o"
assert strip_accents_unicode(a) == expected
def test_to_ascii():
# check some classical latin accentuated symbols
a = "àáâãäåçèéêë"
expected = "aaaaaaceeee"
assert strip_accents_ascii(a) == expected
a = "ìíîïñòóôõöùúûüý"
expected = "iiiinooooouuuuy"
assert strip_accents_ascii(a) == expected
# check some arabic
a = "\u0625" # halef with a hamza below
expected = "" # halef has no direct ascii match
assert strip_accents_ascii(a) == expected
# mix letters accentuated and not
a = "this is à test"
expected = "this is a test"
assert strip_accents_ascii(a) == expected
@pytest.mark.parametrize("Vectorizer", (CountVectorizer, HashingVectorizer))
def test_word_analyzer_unigrams(Vectorizer):
wa = Vectorizer(strip_accents="ascii").build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
expected = [
"ai",
"mange",
"du",
"kangourou",
"ce",
"midi",
"etait",
"pas",
"tres",
"bon",
]
assert wa(text) == expected
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ["this", "is", "test", "really", "met", "harry", "yesterday"]
assert wa(text) == expected
wa = Vectorizer(input="file").build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ["this", "is", "test", "with", "file", "like", "object"]
assert wa(text) == expected
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
expected = [
"AI",
"MANGE",
"DU",
"KANGOUROU",
"CE",
"MIDI",
"ETAIT",
"PAS",
"TRES",
"BON",
]
assert wa(text) == expected
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize, strip_accents="ascii").build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
expected = [
"j'ai",
"mange",
"du",
"kangourou",
"ce",
"midi,",
"c'etait",
"pas",
"tres",
"bon.",
]
assert wa(text) == expected
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(
analyzer="word", strip_accents="unicode", ngram_range=(1, 2)
).build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
expected = [
"ai",
"mange",
"du",
"kangourou",
"ce",
"midi",
"etait",
"pas",
"tres",
"bon",
"ai mange",
"mange du",
"du kangourou",
"kangourou ce",
"ce midi",
"midi etait",
"etait pas",
"pas tres",
"tres bon",
]
assert wa(text) == expected
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
text_bytes = text.encode("utf-8")
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding="ascii").build_analyzer()
with pytest.raises(UnicodeDecodeError):
wa(text_bytes)
ca = CountVectorizer(
analyzer="char", ngram_range=(3, 6), encoding="ascii"
).build_analyzer()
with pytest.raises(UnicodeDecodeError):
ca(text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(
analyzer="char", strip_accents="unicode", ngram_range=(3, 6)
).build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon"
expected = ["j'a", "'ai", "ai ", "i m", " ma"]
assert cnga(text)[:5] == expected
expected = ["s tres", " tres ", "tres b", "res bo", "es bon"]
assert cnga(text)[-5:] == expected
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ["thi", "his", "is ", "s i", " is"]
assert cnga(text)[:5] == expected
expected = [" yeste", "yester", "esterd", "sterda", "terday"]
assert cnga(text)[-5:] == expected
cnga = CountVectorizer(
input="file", analyzer="char", ngram_range=(3, 6)
).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ["thi", "his", "is ", "s i", " is"]
assert cnga(text)[:5] == expected
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(
analyzer="char_wb", strip_accents="unicode", ngram_range=(3, 6)
).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [" th", "thi", "his", "is ", " thi"]
assert cnga(text)[:5] == expected
expected = ["yester", "esterd", "sterda", "terday", "erday "]
assert cnga(text)[-5:] == expected
cnga = CountVectorizer(
input="file", analyzer="char_wb", ngram_range=(3, 6)
).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [" a ", " te", "tes", "est", "st ", " tes"]
assert cnga(text)[:6] == expected
def test_word_ngram_analyzer():
cnga = CountVectorizer(
analyzer="word", strip_accents="unicode", ngram_range=(3, 6)
).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ["this is test", "is test really", "test really met"]
assert cnga(text)[:3] == expected
expected = [
"test really met harry yesterday",
"this is test really met harry",
"is test really met harry yesterday",
]
assert cnga(text)[-3:] == expected
cnga_file = CountVectorizer(
input="file", analyzer="word", ngram_range=(3, 6)
).build_analyzer()
file = StringIO(text)
assert cnga_file(file) == cnga(text)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert vect.vocabulary_ == vocab
else:
assert set(vect.vocabulary_) == terms
X = vect.transform(JUNK_FOOD_DOCS)
assert X.shape[1] == len(terms)
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
inv = vect.inverse_transform(X)
assert len(inv) == X.shape[0]
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline(
[
("count", CountVectorizer(vocabulary=what_we_like)),
("tfidf", TfidfTransformer()),
]
)
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert set(pipe.named_steps["count"].vocabulary_) == set(what_we_like)
assert X.shape[1] == len(what_we_like)
def test_countvectorizer_custom_vocabulary_repeated_indices():
vocab = {"pizza": 0, "beer": 0}
msg = "Vocabulary contains repeated indices"
with pytest.raises(ValueError, match=msg):
vect = CountVectorizer(vocabulary=vocab)
vect.fit(["pasta_siziliana"])
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
with pytest.raises(ValueError, match="doesn't contain index"):
vect = CountVectorizer(vocabulary=vocab)
vect.fit(["pasta_verdura"])
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words="english")
assert cv.get_stop_words() == ENGLISH_STOP_WORDS
cv.set_params(stop_words="_bad_str_stop_")
with pytest.raises(ValueError):
cv.get_stop_words()
cv.set_params(stop_words="_bad_unicode_stop_")
with pytest.raises(ValueError):
cv.get_stop_words()
stoplist = ["some", "other", "words"]
cv.set_params(stop_words=stoplist)
assert cv.get_stop_words() == set(stoplist)
def test_countvectorizer_empty_vocabulary():
with pytest.raises(ValueError, match="empty vocabulary"):
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
with pytest.raises(ValueError, match="empty vocabulary"):
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert X1.shape[1] != X2.shape[1]
def test_countvectorizer_custom_token_pattern():
"""Check `get_feature_names_out()` when a custom token pattern is passed.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12971
"""
corpus = [
"This is the 1st document in my corpus.",
"This document is the 2nd sample.",
"And this is the 3rd one.",
"Is this the 4th document?",
]
token_pattern = r"[0-9]{1,3}(?:st|nd|rd|th)\s\b(\w{2,})\b"
vectorizer = CountVectorizer(token_pattern=token_pattern)
vectorizer.fit_transform(corpus)
expected = ["document", "one", "sample"]
feature_names_out = vectorizer.get_feature_names_out()
assert_array_equal(feature_names_out, expected)
def test_countvectorizer_custom_token_pattern_with_several_group():
"""Check that we raise an error if token pattern capture several groups.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12971
"""
corpus = [
"This is the 1st document in my corpus.",
"This document is the 2nd sample.",
"And this is the 3rd one.",
"Is this the 4th document?",
]
token_pattern = r"([0-9]{1,3}(?:st|nd|rd|th))\s\b(\w{2,})\b"
err_msg = "More than 1 capturing group in token pattern"
vectorizer = CountVectorizer(token_pattern=token_pattern)
with pytest.raises(ValueError, match=err_msg):
vectorizer.fit(corpus)
def test_countvectorizer_uppercase_in_vocab():
# Check that the check for uppercase in the provided vocabulary is only done at fit
# time and not at transform time (#21251)
vocabulary = ["Sample", "Upper", "Case", "Vocabulary"]
message = (
"Upper case characters found in"
" vocabulary while 'lowercase'"
" is True. These entries will not"
" be matched with any documents"
)
vectorizer = CountVectorizer(lowercase=True, vocabulary=vocabulary)
with pytest.warns(UserWarning, match=message):
vectorizer.fit(vocabulary)
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
vectorizer.transform(vocabulary)
def test_tf_transformer_feature_names_out():
"""Check get_feature_names_out for TfidfTransformer"""
X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm="l2").fit(X)
feature_names_in = ["a", "c", "b"]
feature_names_out = tr.get_feature_names_out(feature_names_in)
assert_array_equal(feature_names_in, feature_names_out)
def test_tf_idf_smoothing():
X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm="l2")
tfidf = tr.fit_transform(X).toarray()
assert (tfidf >= 0).all()
# check normalization
assert_array_almost_equal((tfidf**2).sum(axis=1), [1.0, 1.0, 1.0])
# this is robust to features with only zeros
X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm="l2")
tfidf = tr.fit_transform(X).toarray()
assert (tfidf >= 0).all()
@pytest.mark.xfail(
_IS_WASM,
reason=(
"no floating point exceptions, see"
" https://github.com/numpy/numpy/pull/21895#issuecomment-1311525881"
),
)
def test_tfidf_no_smoothing():
X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm="l2")
tfidf = tr.fit_transform(X).toarray()
assert (tfidf >= 0).all()
# check normalization
assert_array_almost_equal((tfidf**2).sum(axis=1), [1.0, 1.0, 1.0])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm="l2")
in_warning_message = "divide by zero"
with pytest.warns(RuntimeWarning, match=in_warning_message):
tr.fit_transform(X).toarray()
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert tfidf[0] == 1
assert tfidf[1] > tfidf[0]
assert tfidf[2] > tfidf[1]
assert tfidf[1] < 2
assert tfidf[2] < 3
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, "tocsr"):
counts_train = counts_train.tocsr()
assert counts_train[0, v1.vocabulary_["pizza"]] == 2
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, "tocsr"):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert counts_test[0, vocabulary["salad"]] == 1
assert counts_test[0, vocabulary["tomato"]] == 1
assert counts_test[0, vocabulary["water"]] == 1
# stop word from the fixed list
assert "the" not in vocabulary
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert "copyright" not in vocabulary
# not present in the sample
assert counts_test[0, vocabulary["coke"]] == 0
assert counts_test[0, vocabulary["burger"]] == 0
assert counts_test[0, vocabulary["beer"]] == 0
assert counts_test[0, vocabulary["pizza"]] == 0
# test tf-idf
t1 = TfidfTransformer(norm="l1")
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert len(t1.idf_) == len(v1.vocabulary_)
assert tfidf.shape == (n_train, len(v1.vocabulary_))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert tfidf_test.shape == (len(test_data), len(v1.vocabulary_))
# test tf alone
t2 = TfidfTransformer(norm="l1", use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert not hasattr(t2, "idf_")
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
with pytest.raises(ValueError):
t3.transform(counts_train)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm="l1")
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert not tv.fixed_vocabulary_
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
with pytest.raises(ValueError):
v3.transform(train_data)
# ascii preprocessor?
v3.set_params(strip_accents="ascii", lowercase=False)
processor = v3.build_preprocessor()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
expected = strip_accents_ascii(text)
result = processor(text)
assert expected == result
# error on bad strip_accents param
v3.set_params(strip_accents="_gabbledegook_", preprocessor=None)
with pytest.raises(ValueError):
v3.build_preprocessor()
# error with bad analyzer type
v3.set_params = "_invalid_analyzer_type_"
with pytest.raises(ValueError):
v3.build_analyzer()
def test_tfidf_vectorizer_setters():
norm, use_idf, smooth_idf, sublinear_tf = "l2", False, False, False
tv = TfidfVectorizer(
norm=norm, use_idf=use_idf, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf
)
tv.fit(JUNK_FOOD_DOCS)
assert tv._tfidf.norm == norm
assert tv._tfidf.use_idf == use_idf
assert tv._tfidf.smooth_idf == smooth_idf
assert tv._tfidf.sublinear_tf == sublinear_tf
# assigning value to `TfidfTransformer` should not have any effect until
# fitting
tv.norm = "l1"
tv.use_idf = True
tv.smooth_idf = True
tv.sublinear_tf = True
assert tv._tfidf.norm == norm
assert tv._tfidf.use_idf == use_idf
assert tv._tfidf.smooth_idf == smooth_idf
assert tv._tfidf.sublinear_tf == sublinear_tf
tv.fit(JUNK_FOOD_DOCS)
assert tv._tfidf.norm == tv.norm
assert tv._tfidf.use_idf == tv.use_idf
assert tv._tfidf.smooth_idf == tv.smooth_idf
assert tv._tfidf.sublinear_tf == tv.sublinear_tf
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert X.shape == (len(ALL_FOOD_DOCS), v.n_features)
assert X.dtype == v.dtype
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert np.min(X.data) > -1
assert np.min(X.data) < 0
assert np.max(X.data) > 0
assert np.max(X.data) < 1
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), norm="l1")
X = v.transform(ALL_FOOD_DOCS)
assert X.shape == (len(ALL_FOOD_DOCS), v.n_features)
assert X.dtype == v.dtype
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert ngrams_nnz > token_nnz
assert ngrams_nnz < 2 * token_nnz
# makes the feature values bounded
assert np.min(X.data) > -1
assert np.max(X.data) < 1
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
with pytest.raises(ValueError):
cv.get_feature_names_out()
assert not cv.fixed_vocabulary_
# test for vocabulary learned from data
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert len(cv.vocabulary_) == n_features
feature_names = cv.get_feature_names_out()
assert isinstance(feature_names, np.ndarray)
assert feature_names.dtype == object
assert len(feature_names) == n_features
assert_array_equal(
[
"beer",
"burger",
"celeri",
"coke",
"pizza",
"salad",
"sparkling",
"tomato",
"water",
],
feature_names,
)
for idx, name in enumerate(feature_names):
assert idx == cv.vocabulary_.get(name)
# test for custom vocabulary
vocab = [
"beer",
"burger",
"celeri",
"coke",
"pizza",
"salad",
"sparkling",
"tomato",
"water",
]
cv = CountVectorizer(vocabulary=vocab)
feature_names = cv.get_feature_names_out()
assert_array_equal(
[
"beer",
"burger",
"celeri",
"coke",
"pizza",
"salad",
"sparkling",
"tomato",
"water",
],
feature_names,
)
assert cv.fixed_vocabulary_
for idx, name in enumerate(feature_names):
assert idx == cv.vocabulary_.get(name)
@pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer))
def test_vectorizer_max_features(Vectorizer):
expected_vocabulary = {"burger", "beer", "salad", "pizza"}
# test bounded number of extracted features
vectorizer = Vectorizer(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert set(vectorizer.vocabulary_) == expected_vocabulary
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names_out()
features_3 = cv_3.get_feature_names_out()
features_None = cv_None.get_feature_names_out()
# The most common feature is "the", with frequency 7.
assert 7 == counts_1.max()
assert 7 == counts_3.max()
assert 7 == counts_None.max()
# The most common feature should be the same
assert "the" == features_1[np.argmax(counts_1)]
assert "the" == features_3[np.argmax(counts_3)]
assert "the" == features_None[np.argmax(counts_None)]
def test_vectorizer_max_df():
test_data = ["abc", "dea", "eat"]
vect = CountVectorizer(analyzer="char", max_df=1.0)
vect.fit(test_data)
assert "a" in vect.vocabulary_.keys()
assert len(vect.vocabulary_.keys()) == 6
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert "a" not in vect.vocabulary_.keys() # {ae} ignored
assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain
vect.max_df = 1
vect.fit(test_data)
assert "a" not in vect.vocabulary_.keys() # {ae} ignored
assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain
def test_vectorizer_min_df():
test_data = ["abc", "dea", "eat"]
vect = CountVectorizer(analyzer="char", min_df=1)
vect.fit(test_data)
assert "a" in vect.vocabulary_.keys()
assert len(vect.vocabulary_.keys()) == 6
vect.min_df = 2
vect.fit(test_data)
assert "c" not in vect.vocabulary_.keys() # {bcdt} ignored
assert len(vect.vocabulary_.keys()) == 2 # {ae} remain
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert "c" not in vect.vocabulary_.keys() # {bcdet} ignored
assert len(vect.vocabulary_.keys()) == 1 # {a} remains
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ["aaabc", "abbde"]
vect = CountVectorizer(analyzer="char", max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(["a", "b", "c", "d", "e"], vect.get_feature_names_out())
assert_array_equal([[3, 1, 1, 0, 0], [1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer="char", max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0], [1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer="char", max_df=1.0, binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert X_sparse.dtype == np.float32
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ["aaabc", "abbde"]
vect = HashingVectorizer(alternate_sign=False, analyzer="char", norm=None)
X = vect.transform(test_data)
assert np.max(X[0:1].data) == 3
assert np.max(X[1:2].data) == 2
assert X.dtype == np.float64
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(
analyzer="char", alternate_sign=False, binary=True, norm=None
)
X = vect.transform(test_data)
assert np.max(X.data) == 1
assert X.dtype == np.float64
# check the ability to change the dtype
vect = HashingVectorizer(
analyzer="char", alternate_sign=False, binary=True, norm=None, dtype=np.float64
)
X = vect.transform(test_data)
assert X.dtype == np.float64
@pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer))
def test_vectorizer_inverse_transform(Vectorizer):
# raw documents
data = ALL_FOOD_DOCS
vectorizer = Vectorizer()
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
assert isinstance(inversed_data, list)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
assert sparse.issparse(transformed_data)
assert transformed_data.format == "csr"
# Test that inverse_transform also works with numpy arrays and
# scipy
transformed_data2 = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data2)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
# Check that inverse_transform also works on non CSR sparse data:
transformed_data3 = transformed_data.tocsc()
inversed_data3 = vectorizer.inverse_transform(transformed_data3)
for terms, terms3 in zip(inversed_data, inversed_data3):
assert_array_equal(np.sort(terms), np.sort(terms3))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=0.2, random_state=0
)
pipeline = Pipeline([("vect", CountVectorizer()), ("svc", LinearSVC())])
parameters = {
"vect__ngram_range": [(1, 1), (1, 2)],
"svc__loss": ("hinge", "squared_hinge"),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1, cv=3)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert grid_search.best_score_ == 1.0
best_vectorizer = grid_search.best_estimator_.named_steps["vect"]
assert best_vectorizer.ngram_range == (1, 1)
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=0.1, random_state=0
)
pipeline = Pipeline([("vect", TfidfVectorizer()), ("svc", LinearSVC())])
parameters = {
"vect__ngram_range": [(1, 1), (1, 2)],
"vect__norm": ("l1", "l2"),
"svc__loss": ("hinge", "squared_hinge"),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert grid_search.best_score_ == 1.0
best_vectorizer = grid_search.best_estimator_.named_steps["vect"]
assert best_vectorizer.ngram_range == (1, 1)
assert best_vectorizer.norm == "l2"
assert not best_vectorizer.fixed_vocabulary_
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([("vect", TfidfVectorizer()), ("svc", LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1.0, 1.0, 1.0])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"Машинное обучение — обширный подраздел искусственного "
"интеллекта, изучающий методы построения алгоритмов, "
"способных обучаться."
)
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert X_counted.shape == (1, 12)
vect = HashingVectorizer(norm=None, alternate_sign=False)
X_hashed = vect.transform([document])
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.