repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/gaussian_process/plot_gpc.py
examples/gaussian_process/plot_gpc.py
""" ==================================================================== Probabilistic predictions with Gaussian process classification (GPC) ==================================================================== This example illustrates the predicted probability of GPC for an RBF kernel with different choices of the hyperparameters. The first figure shows the predicted probability of GPC with arbitrarily chosen hyperparameters and with the hyperparameters corresponding to the maximum log-marginal-likelihood (LML). While the hyperparameters chosen by optimizing LML have a considerable larger LML, they perform slightly worse according to the log-loss on test data. The figure shows that this is because they exhibit a steep change of the class probabilities at the class boundaries (which is good) but have predicted probabilities close to 0.5 far away from the class boundaries (which is bad) This undesirable effect is caused by the Laplace approximation used internally by GPC. The second figure shows the log-marginal-likelihood for different choices of the kernel's hyperparameters, highlighting the two choices of the hyperparameters used in the first figure by black dots. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np from matplotlib import pyplot as plt from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF from sklearn.metrics import accuracy_score, log_loss # Generate data train_size = 50 rng = np.random.RandomState(0) X = rng.uniform(0, 5, 100)[:, np.newaxis] y = np.array(X[:, 0] > 2.5, dtype=int) # Specify Gaussian Processes with fixed and optimized hyperparameters gp_fix = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0), optimizer=None) gp_fix.fit(X[:train_size], y[:train_size]) gp_opt = GaussianProcessClassifier(kernel=1.0 * RBF(length_scale=1.0)) gp_opt.fit(X[:train_size], y[:train_size]) print( "Log Marginal Likelihood (initial): %.3f" % gp_fix.log_marginal_likelihood(gp_fix.kernel_.theta) ) print( "Log Marginal Likelihood (optimized): %.3f" % gp_opt.log_marginal_likelihood(gp_opt.kernel_.theta) ) print( "Accuracy: %.3f (initial) %.3f (optimized)" % ( accuracy_score(y[:train_size], gp_fix.predict(X[:train_size])), accuracy_score(y[:train_size], gp_opt.predict(X[:train_size])), ) ) print( "Log-loss: %.3f (initial) %.3f (optimized)" % ( log_loss(y[:train_size], gp_fix.predict_proba(X[:train_size])[:, 1]), log_loss(y[:train_size], gp_opt.predict_proba(X[:train_size])[:, 1]), ) ) # Plot posteriors plt.figure() plt.scatter( X[:train_size, 0], y[:train_size], c="k", label="Train data", edgecolors=(0, 0, 0) ) plt.scatter( X[train_size:, 0], y[train_size:], c="g", label="Test data", edgecolors=(0, 0, 0) ) X_ = np.linspace(0, 5, 100) plt.plot( X_, gp_fix.predict_proba(X_[:, np.newaxis])[:, 1], "r", label="Initial kernel: %s" % gp_fix.kernel_, ) plt.plot( X_, gp_opt.predict_proba(X_[:, np.newaxis])[:, 1], "b", label="Optimized kernel: %s" % gp_opt.kernel_, ) plt.xlabel("Feature") plt.ylabel("Class 1 probability") plt.xlim(0, 5) plt.ylim(-0.25, 1.5) plt.legend(loc="best") # Plot LML landscape plt.figure() theta0 = np.logspace(0, 8, 30) theta1 = np.logspace(-1, 1, 29) Theta0, Theta1 = np.meshgrid(theta0, theta1) LML = [ [ gp_opt.log_marginal_likelihood(np.log([Theta0[i, j], Theta1[i, j]])) for i in range(Theta0.shape[0]) ] for j in range(Theta0.shape[1]) ] LML = np.array(LML).T plt.plot( np.exp(gp_fix.kernel_.theta)[0], np.exp(gp_fix.kernel_.theta)[1], "ko", zorder=10 ) plt.plot( np.exp(gp_opt.kernel_.theta)[0], np.exp(gp_opt.kernel_.theta)[1], "ko", zorder=10 ) plt.pcolor(Theta0, Theta1, LML) plt.xscale("log") plt.yscale("log") plt.colorbar() plt.xlabel("Magnitude") plt.ylabel("Length-scale") plt.title("Log-marginal-likelihood") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/gaussian_process/plot_gpc_xor.py
examples/gaussian_process/plot_gpc_xor.py
""" ======================================================================== Illustration of Gaussian process classification (GPC) on the XOR dataset ======================================================================== This example illustrates GPC on XOR data. Compared are a stationary, isotropic kernel (RBF) and a non-stationary kernel (DotProduct). On this particular dataset, the DotProduct kernel obtains considerably better results because the class-boundaries are linear and coincide with the coordinate axes. In general, stationary kernels often obtain better results. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF, DotProduct xx, yy = np.meshgrid(np.linspace(-3, 3, 50), np.linspace(-3, 3, 50)) rng = np.random.RandomState(0) X = rng.randn(200, 2) Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0) # fit the model plt.figure(figsize=(10, 5)) kernels = [1.0 * RBF(length_scale=1.15), 1.0 * DotProduct(sigma_0=1.0) ** 2] for i, kernel in enumerate(kernels): clf = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X, Y) # plot the decision function for each datapoint on the grid Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1] Z = Z.reshape(xx.shape) plt.subplot(1, 2, i + 1) image = plt.imshow( Z, interpolation="nearest", extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect="auto", origin="lower", cmap=plt.cm.PuOr_r, ) contours = plt.contour(xx, yy, Z, levels=[0.5], linewidths=2, colors=["k"]) plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired, edgecolors=(0, 0, 0)) plt.xticks(()) plt.yticks(()) plt.axis([-3, 3, -3, 3]) plt.colorbar(image) plt.title( "%s\n Log-Marginal-Likelihood:%.3f" % (clf.kernel_, clf.log_marginal_likelihood(clf.kernel_.theta)), fontsize=12, ) plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/gaussian_process/plot_compare_gpr_krr.py
examples/gaussian_process/plot_compare_gpr_krr.py
""" ========================================================== Comparison of kernel ridge and Gaussian process regression ========================================================== This example illustrates differences between a kernel ridge regression and a Gaussian process regression. Both kernel ridge regression and Gaussian process regression are using a so-called "kernel trick" to make their models expressive enough to fit the training data. However, the machine learning problems solved by the two methods are drastically different. Kernel ridge regression will find the target function that minimizes a loss function (the mean squared error). Instead of finding a single target function, the Gaussian process regression employs a probabilistic approach : a Gaussian posterior distribution over target functions is defined based on the Bayes' theorem, Thus prior probabilities on target functions are being combined with a likelihood function defined by the observed training data to provide estimates of the posterior distributions. We will illustrate these differences with an example and we will also focus on tuning the kernel hyperparameters. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generating a dataset # -------------------- # # We create a synthetic dataset. The true generative process will take a 1-D # vector and compute its sine. Note that the period of this sine is thus # :math:`2 \pi`. We will reuse this information later in this example. import numpy as np rng = np.random.RandomState(0) data = np.linspace(0, 30, num=1_000).reshape(-1, 1) target = np.sin(data).ravel() # %% # Now, we can imagine a scenario where we get observations from this true # process. However, we will add some challenges: # # - the measurements will be noisy; # - only samples from the beginning of the signal will be available. training_sample_indices = rng.choice(np.arange(0, 400), size=40, replace=False) training_data = data[training_sample_indices] training_noisy_target = target[training_sample_indices] + 0.5 * rng.randn( len(training_sample_indices) ) # %% # Let's plot the true signal and the noisy measurements available for training. import matplotlib.pyplot as plt plt.plot(data, target, label="True signal", linewidth=2) plt.scatter( training_data, training_noisy_target, color="black", label="Noisy measurements", ) plt.legend() plt.xlabel("data") plt.ylabel("target") _ = plt.title( "Illustration of the true generative process and \n" "noisy measurements available during training" ) # %% # Limitations of a simple linear model # ------------------------------------ # # First, we would like to highlight the limitations of a linear model given # our dataset. We fit a :class:`~sklearn.linear_model.Ridge` and check the # predictions of this model on our dataset. from sklearn.linear_model import Ridge ridge = Ridge().fit(training_data, training_noisy_target) plt.plot(data, target, label="True signal", linewidth=2) plt.scatter( training_data, training_noisy_target, color="black", label="Noisy measurements", ) plt.plot(data, ridge.predict(data), label="Ridge regression") plt.legend() plt.xlabel("data") plt.ylabel("target") _ = plt.title("Limitation of a linear model such as ridge") # %% # Such a ridge regressor underfits data since it is not expressive enough. # # Kernel methods: kernel ridge and Gaussian process # ------------------------------------------------- # # Kernel ridge # ............ # # We can make the previous linear model more expressive by using a so-called # kernel. A kernel is an embedding from the original feature space to another # one. Simply put, it is used to map our original data into a newer and more # complex feature space. This new space is explicitly defined by the choice of # kernel. # # In our case, we know that the true generative process is a periodic function. # We can use a :class:`~sklearn.gaussian_process.kernels.ExpSineSquared` kernel # which allows recovering the periodicity. The class # :class:`~sklearn.kernel_ridge.KernelRidge` will accept such a kernel. # # Using this model together with a kernel is equivalent to embed the data # using the mapping function of the kernel and then apply a ridge regression. # In practice, the data are not mapped explicitly; instead the dot product # between samples in the higher dimensional feature space is computed using the # "kernel trick". # # Thus, let's use such a :class:`~sklearn.kernel_ridge.KernelRidge`. import time from sklearn.gaussian_process.kernels import ExpSineSquared from sklearn.kernel_ridge import KernelRidge kernel_ridge = KernelRidge(kernel=ExpSineSquared()) start_time = time.time() kernel_ridge.fit(training_data, training_noisy_target) print( f"Fitting KernelRidge with default kernel: {time.time() - start_time:.3f} seconds" ) # %% plt.plot(data, target, label="True signal", linewidth=2, linestyle="dashed") plt.scatter( training_data, training_noisy_target, color="black", label="Noisy measurements", ) plt.plot( data, kernel_ridge.predict(data), label="Kernel ridge", linewidth=2, linestyle="dashdot", ) plt.legend(loc="lower right") plt.xlabel("data") plt.ylabel("target") _ = plt.title( "Kernel ridge regression with an exponential sine squared\n " "kernel using default hyperparameters" ) # %% # This fitted model is not accurate. Indeed, we did not set the parameters of # the kernel and instead used the default ones. We can inspect them. kernel_ridge.kernel # %% # Our kernel has two parameters: the length-scale and the periodicity. For our # dataset, we use `sin` as the generative process, implying a # :math:`2 \pi`-periodicity for the signal. The default value of the parameter # being :math:`1`, it explains the high frequency observed in the predictions of # our model. # Similar conclusions could be drawn with the length-scale parameter. Thus, it # tells us that the kernel parameters need to be tuned. We will use a randomized # search to tune the different parameters the kernel ridge model: the `alpha` # parameter and the kernel parameters. # %% from scipy.stats import loguniform from sklearn.model_selection import RandomizedSearchCV param_distributions = { "alpha": loguniform(1e0, 1e3), "kernel__length_scale": loguniform(1e-2, 1e2), "kernel__periodicity": loguniform(1e0, 1e1), } kernel_ridge_tuned = RandomizedSearchCV( kernel_ridge, param_distributions=param_distributions, n_iter=500, random_state=0, ) start_time = time.time() kernel_ridge_tuned.fit(training_data, training_noisy_target) print(f"Time for KernelRidge fitting: {time.time() - start_time:.3f} seconds") # %% # Fitting the model is now more computationally expensive since we have to try # several combinations of hyperparameters. We can have a look at the # hyperparameters found to get some intuitions. kernel_ridge_tuned.best_params_ # %% # Looking at the best parameters, we see that they are different from the # defaults. We also see that the periodicity is closer to the expected value: # :math:`2 \pi`. We can now inspect the predictions of our tuned kernel ridge. start_time = time.time() predictions_kr = kernel_ridge_tuned.predict(data) print(f"Time for KernelRidge predict: {time.time() - start_time:.3f} seconds") # %% plt.plot(data, target, label="True signal", linewidth=2, linestyle="dashed") plt.scatter( training_data, training_noisy_target, color="black", label="Noisy measurements", ) plt.plot( data, predictions_kr, label="Kernel ridge", linewidth=2, linestyle="dashdot", ) plt.legend(loc="lower right") plt.xlabel("data") plt.ylabel("target") _ = plt.title( "Kernel ridge regression with an exponential sine squared\n " "kernel using tuned hyperparameters" ) # %% # We get a much more accurate model. We still observe some errors mainly due to # the noise added to the dataset. # # Gaussian process regression # ........................... # # Now, we will use a # :class:`~sklearn.gaussian_process.GaussianProcessRegressor` to fit the same # dataset. When training a Gaussian process, the hyperparameters of the kernel # are optimized during the fitting process. There is no need for an external # hyperparameter search. Here, we create a slightly more complex kernel than # for the kernel ridge regressor: we add a # :class:`~sklearn.gaussian_process.kernels.WhiteKernel` that is used to # estimate the noise in the dataset. from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import WhiteKernel kernel = 1.0 * ExpSineSquared(1.0, 5.0, periodicity_bounds=(1e-2, 1e1)) + WhiteKernel( 1e-1 ) gaussian_process = GaussianProcessRegressor(kernel=kernel) start_time = time.time() gaussian_process.fit(training_data, training_noisy_target) print( f"Time for GaussianProcessRegressor fitting: {time.time() - start_time:.3f} seconds" ) # %% # The computation cost of training a Gaussian process is much less than the # kernel ridge that uses a randomized search. We can check the parameters of # the kernels that we computed. gaussian_process.kernel_ # %% # Indeed, we see that the parameters have been optimized. Looking at the # `periodicity` parameter, we see that we found a period close to the # theoretical value :math:`2 \pi`. We can have a look now at the predictions of # our model. start_time = time.time() mean_predictions_gpr, std_predictions_gpr = gaussian_process.predict( data, return_std=True, ) print( f"Time for GaussianProcessRegressor predict: {time.time() - start_time:.3f} seconds" ) # %% plt.plot(data, target, label="True signal", linewidth=2, linestyle="dashed") plt.scatter( training_data, training_noisy_target, color="black", label="Noisy measurements", ) # Plot the predictions of the kernel ridge plt.plot( data, predictions_kr, label="Kernel ridge", linewidth=2, linestyle="dashdot", ) # Plot the predictions of the gaussian process regressor plt.plot( data, mean_predictions_gpr, label="Gaussian process regressor", linewidth=2, linestyle="dotted", ) plt.fill_between( data.ravel(), mean_predictions_gpr - std_predictions_gpr, mean_predictions_gpr + std_predictions_gpr, color="tab:green", alpha=0.2, ) plt.legend(loc="lower right") plt.xlabel("data") plt.ylabel("target") _ = plt.title("Comparison between kernel ridge and gaussian process regressor") # %% # We observe that the results of the kernel ridge and the Gaussian process # regressor are close. However, the Gaussian process regressor also provide # an uncertainty information that is not available with a kernel ridge. # Due to the probabilistic formulation of the target functions, the # Gaussian process can output the standard deviation (or the covariance) # together with the mean predictions of the target functions. # # However, it comes at a cost: the time to compute the predictions is higher # with a Gaussian process. # # Final conclusion # ---------------- # # We can give a final word regarding the possibility of the two models to # extrapolate. Indeed, we only provided the beginning of the signal as a # training set. Using a periodic kernel forces our model to repeat the pattern # found on the training set. Using this kernel information together with the # capacity of the both models to extrapolate, we observe that the models will # continue to predict the sine pattern. # # Gaussian process allows to combine kernels together. Thus, we could associate # the exponential sine squared kernel together with a radial basis function # kernel. from sklearn.gaussian_process.kernels import RBF kernel = 1.0 * ExpSineSquared(1.0, 5.0, periodicity_bounds=(1e-2, 1e1)) * RBF( length_scale=15, length_scale_bounds="fixed" ) + WhiteKernel(1e-1) gaussian_process = GaussianProcessRegressor(kernel=kernel) gaussian_process.fit(training_data, training_noisy_target) mean_predictions_gpr, std_predictions_gpr = gaussian_process.predict( data, return_std=True, ) # %% plt.plot(data, target, label="True signal", linewidth=2, linestyle="dashed") plt.scatter( training_data, training_noisy_target, color="black", label="Noisy measurements", ) # Plot the predictions of the kernel ridge plt.plot( data, predictions_kr, label="Kernel ridge", linewidth=2, linestyle="dashdot", ) # Plot the predictions of the gaussian process regressor plt.plot( data, mean_predictions_gpr, label="Gaussian process regressor", linewidth=2, linestyle="dotted", ) plt.fill_between( data.ravel(), mean_predictions_gpr - std_predictions_gpr, mean_predictions_gpr + std_predictions_gpr, color="tab:green", alpha=0.2, ) plt.legend(loc="lower right") plt.xlabel("data") plt.ylabel("target") _ = plt.title("Effect of using a radial basis function kernel") # %% # The effect of using a radial basis function kernel will attenuate the # periodicity effect once that no sample are available in the training. # As testing samples get further away from the training ones, predictions # are converging towards their mean and their standard deviation # also increases.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/gaussian_process/plot_gpr_co2.py
examples/gaussian_process/plot_gpr_co2.py
""" ==================================================================================== Forecasting of CO2 level on Mona Loa dataset using Gaussian process regression (GPR) ==================================================================================== This example is based on Section 5.4.3 of "Gaussian Processes for Machine Learning" [1]_. It illustrates an example of complex kernel engineering and hyperparameter optimization using gradient ascent on the log-marginal-likelihood. The data consists of the monthly average atmospheric CO2 concentrations (in parts per million by volume (ppm)) collected at the Mauna Loa Observatory in Hawaii, between 1958 and 2001. The objective is to model the CO2 concentration as a function of the time :math:`t` and extrapolate for years after 2001. .. rubric:: References .. [1] `Rasmussen, Carl Edward. "Gaussian processes in machine learning." Summer school on machine learning. Springer, Berlin, Heidelberg, 2003 <http://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Build the dataset # ----------------- # # We will derive a dataset from the Mauna Loa Observatory that collected air # samples. We are interested in estimating the concentration of CO2 and # extrapolate it for further years. First, we load the original dataset available # in OpenML as a pandas dataframe. This will be replaced with Polars # once `fetch_openml` adds a native support for it. from sklearn.datasets import fetch_openml co2 = fetch_openml(data_id=41187, as_frame=True) co2.frame.head() # %% # First, we process the original dataframe to create a date column and select # it along with the CO2 column. import polars as pl co2_data = pl.DataFrame(co2.frame[["year", "month", "day", "co2"]]).select( pl.date("year", "month", "day"), "co2" ) co2_data.head() # %% co2_data["date"].min(), co2_data["date"].max() # %% # We see that we get CO2 concentration for some days from March, 1958 to # December, 2001. We can plot the raw information to have a better # understanding. import matplotlib.pyplot as plt plt.plot(co2_data["date"], co2_data["co2"]) plt.xlabel("date") plt.ylabel("CO$_2$ concentration (ppm)") _ = plt.title("Raw air samples measurements from the Mauna Loa Observatory") # %% # We will preprocess the dataset by taking a monthly average and drop months # for which no measurements were collected. Such a processing will have a # smoothing effect on the data. co2_data = ( co2_data.sort(by="date") .group_by_dynamic("date", every="1mo") .agg(pl.col("co2").mean()) .drop_nulls() ) plt.plot(co2_data["date"], co2_data["co2"]) plt.xlabel("date") plt.ylabel("Monthly average of CO$_2$ concentration (ppm)") _ = plt.title( "Monthly average of air samples measurements\nfrom the Mauna Loa Observatory" ) # %% # The idea in this example will be to predict the CO2 concentration in function # of the date. We are as well interested in extrapolating for upcoming year # after 2001. # # As a first step, we will divide the data and the target to estimate. The data # being a date, we will convert it into a numeric. X = co2_data.select( pl.col("date").dt.year() + pl.col("date").dt.month() / 12 ).to_numpy() y = co2_data["co2"].to_numpy() # %% # Design the proper kernel # ------------------------ # # To design the kernel to use with our Gaussian process, we can make some # assumption regarding the data at hand. We observe that they have several # characteristics: we see a long term rising trend, a pronounced seasonal # variation and some smaller irregularities. We can use different appropriate # kernel that would capture these features. # # First, the long term rising trend could be fitted using a radial basis # function (RBF) kernel with a large length-scale parameter. The RBF kernel # with a large length-scale enforces this component to be smooth. A trending # increase is not enforced as to give a degree of freedom to our model. The # specific length-scale and the amplitude are free hyperparameters. from sklearn.gaussian_process.kernels import RBF long_term_trend_kernel = 50.0**2 * RBF(length_scale=50.0) # %% # The seasonal variation is explained by the periodic exponential sine squared # kernel with a fixed periodicity of 1 year. The length-scale of this periodic # component, controlling its smoothness, is a free parameter. In order to allow # decaying away from exact periodicity, the product with an RBF kernel is # taken. The length-scale of this RBF component controls the decay time and is # a further free parameter. This type of kernel is also known as locally # periodic kernel. from sklearn.gaussian_process.kernels import ExpSineSquared seasonal_kernel = ( 2.0**2 * RBF(length_scale=100.0) * ExpSineSquared(length_scale=1.0, periodicity=1.0, periodicity_bounds="fixed") ) # %% # The small irregularities are to be explained by a rational quadratic kernel # component, whose length-scale and alpha parameter, which quantifies the # diffuseness of the length-scales, are to be determined. A rational quadratic # kernel is equivalent to an RBF kernel with several length-scale and will # better accommodate the different irregularities. from sklearn.gaussian_process.kernels import RationalQuadratic irregularities_kernel = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0) # %% # Finally, the noise in the dataset can be accounted with a kernel consisting # of an RBF kernel contribution, which shall explain the correlated noise # components such as local weather phenomena, and a white kernel contribution # for the white noise. The relative amplitudes and the RBF's length scale are # further free parameters. from sklearn.gaussian_process.kernels import WhiteKernel noise_kernel = 0.1**2 * RBF(length_scale=0.1) + WhiteKernel( noise_level=0.1**2, noise_level_bounds=(1e-5, 1e5) ) # %% # Thus, our final kernel is an addition of all previous kernel. co2_kernel = ( long_term_trend_kernel + seasonal_kernel + irregularities_kernel + noise_kernel ) co2_kernel # %% # Model fitting and extrapolation # ------------------------------- # # Now, we are ready to use a Gaussian process regressor and fit the available # data. To follow the example from the literature, we will subtract the mean # from the target. We could have used `normalize_y=True`. However, doing so # would have also scaled the target (dividing `y` by its standard deviation). # Thus, the hyperparameters of the different kernel would have had different # meaning since they would not have been expressed in ppm. from sklearn.gaussian_process import GaussianProcessRegressor y_mean = y.mean() gaussian_process = GaussianProcessRegressor(kernel=co2_kernel, normalize_y=False) gaussian_process.fit(X, y - y_mean) # %% # Now, we will use the Gaussian process to predict on: # # - training data to inspect the goodness of fit; # - future data to see the extrapolation done by the model. # # Thus, we create synthetic data from 1958 to the current month. In addition, # we need to add the subtracted mean computed during training. import datetime import numpy as np today = datetime.datetime.now() current_month = today.year + today.month / 12 X_test = np.linspace(start=1958, stop=current_month, num=1_000).reshape(-1, 1) mean_y_pred, std_y_pred = gaussian_process.predict(X_test, return_std=True) mean_y_pred += y_mean # %% plt.plot(X, y, color="black", linestyle="dashed", label="Measurements") plt.plot(X_test, mean_y_pred, color="tab:blue", alpha=0.4, label="Gaussian process") plt.fill_between( X_test.ravel(), mean_y_pred - std_y_pred, mean_y_pred + std_y_pred, color="tab:blue", alpha=0.2, ) plt.legend() plt.xlabel("Year") plt.ylabel("Monthly average of CO$_2$ concentration (ppm)") _ = plt.title( "Monthly average of air samples measurements\nfrom the Mauna Loa Observatory" ) # %% # Our fitted model is capable to fit previous data properly and extrapolate to # future year with confidence. # # Interpretation of kernel hyperparameters # ---------------------------------------- # # Now, we can have a look at the hyperparameters of the kernel. gaussian_process.kernel_ # %% # Thus, most of the target signal, with the mean subtracted, is explained by a # long-term rising trend for ~45 ppm and a length-scale of ~52 years. The # periodic component has an amplitude of ~2.6ppm, a decay time of ~90 years and # a length-scale of ~1.5. The long decay time indicates that we have a # component very close to a seasonal periodicity. The correlated noise has an # amplitude of ~0.2 ppm with a length scale of ~0.12 years and a white-noise # contribution of ~0.04 ppm. Thus, the overall noise level is very small, # indicating that the data can be very well explained by the model.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/gaussian_process/plot_gpr_noisy.py
examples/gaussian_process/plot_gpr_noisy.py
""" ========================================================================= Ability of Gaussian process regression (GPR) to estimate data noise-level ========================================================================= This example shows the ability of the :class:`~sklearn.gaussian_process.kernels.WhiteKernel` to estimate the noise level in the data. Moreover, we show the importance of kernel hyperparameters initialization. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Data generation # --------------- # # We will work in a setting where `X` will contain a single feature. We create a # function that will generate the target to be predicted. We will add an # option to add some noise to the generated target. import numpy as np def target_generator(X, add_noise=False): target = 0.5 + np.sin(3 * X) if add_noise: rng = np.random.RandomState(1) target += rng.normal(0, 0.3, size=target.shape) return target.squeeze() # %% # Let's have a look to the target generator where we will not add any noise to # observe the signal that we would like to predict. X = np.linspace(0, 5, num=80).reshape(-1, 1) y = target_generator(X, add_noise=False) # %% import matplotlib.pyplot as plt plt.plot(X, y, label="Expected signal") plt.legend() plt.xlabel("X") _ = plt.ylabel("y") # %% # The target is transforming the input `X` using a sine function. Now, we will # generate few noisy training samples. To illustrate the noise level, we will # plot the true signal together with the noisy training samples. rng = np.random.RandomState(0) X_train = rng.uniform(0, 5, size=20).reshape(-1, 1) y_train = target_generator(X_train, add_noise=True) # %% plt.plot(X, y, label="Expected signal") plt.scatter( x=X_train[:, 0], y=y_train, color="black", alpha=0.4, label="Observations", ) plt.legend() plt.xlabel("X") _ = plt.ylabel("y") # %% # Optimisation of kernel hyperparameters in GPR # --------------------------------------------- # # Now, we will create a # :class:`~sklearn.gaussian_process.GaussianProcessRegressor` # using an additive kernel adding a # :class:`~sklearn.gaussian_process.kernels.RBF` and # :class:`~sklearn.gaussian_process.kernels.WhiteKernel` kernels. # The :class:`~sklearn.gaussian_process.kernels.WhiteKernel` is a kernel that # will able to estimate the amount of noise present in the data while the # :class:`~sklearn.gaussian_process.kernels.RBF` will serve at fitting the # non-linearity between the data and the target. # # However, we will show that the hyperparameter space contains several local # minima. It will highlights the importance of initial hyperparameter values. # # We will create a model using a kernel with a high noise level and a large # length scale, which will explain all variations in the data by noise. from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import RBF, WhiteKernel kernel = 1.0 * RBF(length_scale=1e1, length_scale_bounds=(1e-2, 1e3)) + WhiteKernel( noise_level=1, noise_level_bounds=(1e-10, 1e1) ) gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0) gpr.fit(X_train, y_train) y_mean, y_std = gpr.predict(X, return_std=True) # %% plt.plot(X, y, label="Expected signal") plt.scatter(x=X_train[:, 0], y=y_train, color="black", alpha=0.4, label="Observations") plt.errorbar(X, y_mean, y_std, label="Posterior mean ± std") plt.legend() plt.xlabel("X") plt.ylabel("y") _ = plt.title( ( f"Initial: {kernel}\nOptimum: {gpr.kernel_}\nLog-Marginal-Likelihood: " f"{gpr.log_marginal_likelihood(gpr.kernel_.theta)}" ), fontsize=8, ) # %% # We see that the optimum kernel found still has a high noise level and an even # larger length scale. The length scale reaches the maximum bound that we # allowed for this parameter and we got a warning as a result. # # More importantly, we observe that the model does not provide useful # predictions: the mean prediction seems to be constant: it does not follow the # expected noise-free signal. # # Now, we will initialize the :class:`~sklearn.gaussian_process.kernels.RBF` # with a larger `length_scale` initial value and the # :class:`~sklearn.gaussian_process.kernels.WhiteKernel` with a smaller initial # noise level lower while keeping the parameter bounds unchanged. kernel = 1.0 * RBF(length_scale=1e-1, length_scale_bounds=(1e-2, 1e3)) + WhiteKernel( noise_level=1e-2, noise_level_bounds=(1e-10, 1e1) ) gpr = GaussianProcessRegressor(kernel=kernel, alpha=0.0) gpr.fit(X_train, y_train) y_mean, y_std = gpr.predict(X, return_std=True) # %% plt.plot(X, y, label="Expected signal") plt.scatter(x=X_train[:, 0], y=y_train, color="black", alpha=0.4, label="Observations") plt.errorbar(X, y_mean, y_std, label="Posterior mean ± std") plt.legend() plt.xlabel("X") plt.ylabel("y") _ = plt.title( ( f"Initial: {kernel}\nOptimum: {gpr.kernel_}\nLog-Marginal-Likelihood: " f"{gpr.log_marginal_likelihood(gpr.kernel_.theta)}" ), fontsize=8, ) # %% # First, we see that the model's predictions are more precise than the # previous model's: this new model is able to estimate the noise-free # functional relationship. # # Looking at the kernel hyperparameters, we see that the best combination found # has a smaller noise level and shorter length scale than the first model. # # We can inspect the negative Log-Marginal-Likelihood (LML) of # :class:`~sklearn.gaussian_process.GaussianProcessRegressor` # for different hyperparameters to get a sense of the local minima. from matplotlib.colors import LogNorm length_scale = np.logspace(-2, 4, num=80) noise_level = np.logspace(-2, 1, num=80) length_scale_grid, noise_level_grid = np.meshgrid(length_scale, noise_level) log_marginal_likelihood = [ gpr.log_marginal_likelihood(theta=np.log([0.36, scale, noise])) for scale, noise in zip(length_scale_grid.ravel(), noise_level_grid.ravel()) ] log_marginal_likelihood = np.reshape(log_marginal_likelihood, noise_level_grid.shape) # %% vmin, vmax = (-log_marginal_likelihood).min(), 50 level = np.around(np.logspace(np.log10(vmin), np.log10(vmax), num=20), decimals=1) plt.contour( length_scale_grid, noise_level_grid, -log_marginal_likelihood, levels=level, norm=LogNorm(vmin=vmin, vmax=vmax), ) plt.colorbar() plt.xscale("log") plt.yscale("log") plt.xlabel("Length-scale") plt.ylabel("Noise-level") plt.title("Negative log-marginal-likelihood") plt.show() # %% # # We see that there are two local minima that correspond to the combination of # hyperparameters previously found. Depending on the initial values for the # hyperparameters, the gradient-based optimization might or might not # converge to the best model. It is thus important to repeat the optimization # several times for different initializations. This can be done by setting the # `n_restarts_optimizer` parameter of the # :class:`~sklearn.gaussian_process.GaussianProcessRegressor` class. # # Let's try again to fit our model with the bad initial values but this time # with 10 random restarts. kernel = 1.0 * RBF(length_scale=1e1, length_scale_bounds=(1e-2, 1e3)) + WhiteKernel( noise_level=1, noise_level_bounds=(1e-10, 1e1) ) gpr = GaussianProcessRegressor( kernel=kernel, alpha=0.0, n_restarts_optimizer=10, random_state=0 ) gpr.fit(X_train, y_train) y_mean, y_std = gpr.predict(X, return_std=True) # %% plt.plot(X, y, label="Expected signal") plt.scatter(x=X_train[:, 0], y=y_train, color="black", alpha=0.4, label="Observations") plt.errorbar(X, y_mean, y_std, label="Posterior mean ± std") plt.legend() plt.xlabel("X") plt.ylabel("y") _ = plt.title( ( f"Initial: {kernel}\nOptimum: {gpr.kernel_}\nLog-Marginal-Likelihood: " f"{gpr.log_marginal_likelihood(gpr.kernel_.theta)}" ), fontsize=8, ) # %% # # As we hoped, random restarts allow the optimization to find the best set # of hyperparameters despite the bad initial values.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/gaussian_process/plot_gpr_on_structured_data.py
examples/gaussian_process/plot_gpr_on_structured_data.py
""" ========================================================================== Gaussian processes on discrete data structures ========================================================================== This example illustrates the use of Gaussian processes for regression and classification tasks on data that are not in fixed-length feature vector form. This is achieved through the use of kernel functions that operates directly on discrete structures such as variable-length sequences, trees, and graphs. Specifically, here the input variables are some gene sequences stored as variable-length strings consisting of letters 'A', 'T', 'C', and 'G', while the output variables are floating point numbers and True/False labels in the regression and classification tasks, respectively. A kernel between the gene sequences is defined using R-convolution [1]_ by integrating a binary letter-wise kernel over all pairs of letters among a pair of strings. This example will generate three figures. In the first figure, we visualize the value of the kernel, i.e. the similarity of the sequences, using a colormap. Brighter color here indicates higher similarity. In the second figure, we show some regression result on a dataset of 6 sequences. Here we use the 1st, 2nd, 4th, and 5th sequences as the training set to make predictions on the 3rd and 6th sequences. In the third figure, we demonstrate a classification model by training on 6 sequences and make predictions on another 5 sequences. The ground truth here is simply whether there is at least one 'A' in the sequence. Here the model makes four correct classifications and fails on one. .. [1] Haussler, D. (1999). Convolution kernels on discrete structures (Vol. 646). Technical report, Department of Computer Science, University of California at Santa Cruz. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% import numpy as np from sklearn.base import clone from sklearn.gaussian_process import GaussianProcessClassifier, GaussianProcessRegressor from sklearn.gaussian_process.kernels import GenericKernelMixin, Hyperparameter, Kernel class SequenceKernel(GenericKernelMixin, Kernel): """ A minimal (but valid) convolutional kernel for sequences of variable lengths.""" def __init__(self, baseline_similarity=0.5, baseline_similarity_bounds=(1e-5, 1)): self.baseline_similarity = baseline_similarity self.baseline_similarity_bounds = baseline_similarity_bounds @property def hyperparameter_baseline_similarity(self): return Hyperparameter( "baseline_similarity", "numeric", self.baseline_similarity_bounds ) def _f(self, s1, s2): """ kernel value between a pair of sequences """ return sum( [1.0 if c1 == c2 else self.baseline_similarity for c1 in s1 for c2 in s2] ) def _g(self, s1, s2): """ kernel derivative between a pair of sequences """ return sum([0.0 if c1 == c2 else 1.0 for c1 in s1 for c2 in s2]) def __call__(self, X, Y=None, eval_gradient=False): if Y is None: Y = X if eval_gradient: return ( np.array([[self._f(x, y) for y in Y] for x in X]), np.array([[[self._g(x, y)] for y in Y] for x in X]), ) else: return np.array([[self._f(x, y) for y in Y] for x in X]) def diag(self, X): return np.array([self._f(x, x) for x in X]) def is_stationary(self): return False def clone_with_theta(self, theta): cloned = clone(self) cloned.theta = theta return cloned kernel = SequenceKernel() # %% # Sequence similarity matrix under the kernel # =========================================== import matplotlib.pyplot as plt X = np.array(["AGCT", "AGC", "AACT", "TAA", "AAA", "GAACA"]) K = kernel(X) D = kernel.diag(X) plt.figure(figsize=(8, 5)) plt.imshow(np.diag(D**-0.5).dot(K).dot(np.diag(D**-0.5))) plt.xticks(np.arange(len(X)), X) plt.yticks(np.arange(len(X)), X) plt.title("Sequence similarity under the kernel") plt.show() # %% # Regression # ========== X = np.array(["AGCT", "AGC", "AACT", "TAA", "AAA", "GAACA"]) Y = np.array([1.0, 1.0, 2.0, 2.0, 3.0, 3.0]) training_idx = [0, 1, 3, 4] gp = GaussianProcessRegressor(kernel=kernel) gp.fit(X[training_idx], Y[training_idx]) plt.figure(figsize=(8, 5)) plt.bar(np.arange(len(X)), gp.predict(X), color="b", label="prediction") plt.bar(training_idx, Y[training_idx], width=0.2, color="r", alpha=1, label="training") plt.xticks(np.arange(len(X)), X) plt.title("Regression on sequences") plt.legend() plt.show() # %% # Classification # ============== X_train = np.array(["AGCT", "CGA", "TAAC", "TCG", "CTTT", "TGCT"]) # whether there are 'A's in the sequence Y_train = np.array([True, True, True, False, False, False]) gp = GaussianProcessClassifier(kernel) gp.fit(X_train, Y_train) X_test = ["AAA", "ATAG", "CTC", "CT", "C"] Y_test = [True, True, False, False, False] plt.figure(figsize=(8, 5)) plt.scatter( np.arange(len(X_train)), [1.0 if c else -1.0 for c in Y_train], s=100, marker="o", edgecolor="none", facecolor=(1, 0.75, 0), label="training", ) plt.scatter( len(X_train) + np.arange(len(X_test)), [1.0 if c else -1.0 for c in Y_test], s=100, marker="o", edgecolor="none", facecolor="r", label="truth", ) plt.scatter( len(X_train) + np.arange(len(X_test)), [1.0 if c else -1.0 for c in gp.predict(X_test)], s=100, marker="x", facecolor="b", linewidth=2, label="prediction", ) plt.xticks(np.arange(len(X_train) + len(X_test)), np.concatenate((X_train, X_test))) plt.yticks([-1, 1], [False, True]) plt.title("Classification on sequences") plt.legend() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/gaussian_process/plot_gpc_isoprobability.py
examples/gaussian_process/plot_gpc_isoprobability.py
""" ================================================================= Iso-probability lines for Gaussian Processes classification (GPC) ================================================================= A two-dimensional classification example showing iso-probability lines for the predicted probabilities. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np from matplotlib import cm from matplotlib import pyplot as plt from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import ConstantKernel as C from sklearn.gaussian_process.kernels import DotProduct # A few constants lim = 8 def g(x): """The function to predict (classification will then consist in predicting whether g(x) <= 0 or not)""" return 5.0 - x[:, 1] - 0.5 * x[:, 0] ** 2.0 # Design of experiments X = np.array( [ [-4.61611719, -6.00099547], [4.10469096, 5.32782448], [0.00000000, -0.50000000], [-6.17289014, -4.6984743], [1.3109306, -6.93271427], [-5.03823144, 3.10584743], [-2.87600388, 6.74310541], [5.21301203, 4.26386883], ] ) # Observations y = np.array(g(X) > 0, dtype=int) # Instantiate and fit Gaussian Process Model kernel = C(0.1, (1e-5, np.inf)) * DotProduct(sigma_0=0.1) ** 2 gp = GaussianProcessClassifier(kernel=kernel) gp.fit(X, y) print("Learned kernel: %s " % gp.kernel_) # Evaluate real function and the predicted probability res = 50 x1, x2 = np.meshgrid(np.linspace(-lim, lim, res), np.linspace(-lim, lim, res)) xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T y_true = g(xx) y_prob = gp.predict_proba(xx)[:, 1] y_true = y_true.reshape((res, res)) y_prob = y_prob.reshape((res, res)) # Plot the probabilistic classification iso-values fig = plt.figure(1) ax = fig.gca() ax.axes.set_aspect("equal") plt.xticks([]) plt.yticks([]) ax.set_xticklabels([]) ax.set_yticklabels([]) plt.xlabel("$x_1$") plt.ylabel("$x_2$") cax = plt.imshow(y_prob, cmap=cm.gray_r, alpha=0.8, extent=(-lim, lim, -lim, lim)) norm = plt.matplotlib.colors.Normalize(vmin=0.0, vmax=0.9) cb = plt.colorbar(cax, ticks=[0.0, 0.2, 0.4, 0.6, 0.8, 1.0], norm=norm) cb.set_label(r"${\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\right]$") plt.clim(0, 1) plt.plot(X[y <= 0, 0], X[y <= 0, 1], "r.", markersize=12) plt.plot(X[y > 0, 0], X[y > 0, 1], "b.", markersize=12) plt.contour(x1, x2, y_true, [0.0], colors="k", linestyles="dashdot") cs = plt.contour(x1, x2, y_prob, [0.666], colors="b", linestyles="solid") plt.clabel(cs, fontsize=11) cs = plt.contour(x1, x2, y_prob, [0.5], colors="k", linestyles="dashed") plt.clabel(cs, fontsize=11) cs = plt.contour(x1, x2, y_prob, [0.334], colors="r", linestyles="solid") plt.clabel(cs, fontsize=11) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_likelihood_ratios.py
examples/model_selection/plot_likelihood_ratios.py
""" ============================================================= Class Likelihood Ratios to measure classification performance ============================================================= This example demonstrates the :func:`~sklearn.metrics.class_likelihood_ratios` function, which computes the positive and negative likelihood ratios (`LR+`, `LR-`) to assess the predictive power of a binary classifier. As we will see, these metrics are independent of the proportion between classes in the test set, which makes them very useful when the available data for a study has a different class proportion than the target application. A typical use is a case-control study in medicine, which has nearly balanced classes while the general population has large class imbalance. In such application, the pre-test probability of an individual having the target condition can be chosen to be the prevalence, i.e. the proportion of a particular population found to be affected by a medical condition. The post-test probabilities represent then the probability that the condition is truly present given a positive test result. In this example we first discuss the link between pre-test and post-test odds given by the :ref:`class_likelihood_ratios`. Then we evaluate their behavior in some controlled scenarios. In the last section we plot them as a function of the prevalence of the positive class. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Pre-test vs. post-test analysis # =============================== # # Suppose we have a population of subjects with physiological measurements `X` # that can hopefully serve as indirect bio-markers of the disease and actual # disease indicators `y` (ground truth). Most of the people in the population do # not carry the disease but a minority (in this case around 10%) does: from sklearn.datasets import make_classification X, y = make_classification(n_samples=10_000, weights=[0.9, 0.1], random_state=0) print(f"Percentage of people carrying the disease: {100 * y.mean():.2f}%") # %% # A machine learning model is built to diagnose if a person with some given # physiological measurements is likely to carry the disease of interest. To # evaluate the model, we need to assess its performance on a held-out test set: from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # %% # Then we can fit our diagnosis model and compute the positive likelihood # ratio to evaluate the usefulness of this classifier as a disease diagnosis # tool: from sklearn.linear_model import LogisticRegression from sklearn.metrics import class_likelihood_ratios estimator = LogisticRegression().fit(X_train, y_train) y_pred = estimator.predict(X_test) pos_LR, neg_LR = class_likelihood_ratios(y_test, y_pred, replace_undefined_by=1.0) print(f"LR+: {pos_LR:.3f}") # %% # Since the positive class likelihood ratio is much larger than 1.0, it means # that the machine learning-based diagnosis tool is useful: the post-test odds # that the condition is truly present given a positive test result are more than # 12 times larger than the pre-test odds. # # Cross-validation of likelihood ratios # ===================================== # # We assess the variability of the measurements for the class likelihood ratios # in some particular cases. import pandas as pd def scoring(estimator, X, y): y_pred = estimator.predict(X) pos_lr, neg_lr = class_likelihood_ratios(y, y_pred, replace_undefined_by=1.0) return {"positive_likelihood_ratio": pos_lr, "negative_likelihood_ratio": neg_lr} def extract_score(cv_results): lr = pd.DataFrame( { "positive": cv_results["test_positive_likelihood_ratio"], "negative": cv_results["test_negative_likelihood_ratio"], } ) return lr.aggregate(["mean", "std"]) # %% # We first validate the :class:`~sklearn.linear_model.LogisticRegression` model # with default hyperparameters as used in the previous section. from sklearn.model_selection import cross_validate estimator = LogisticRegression() extract_score(cross_validate(estimator, X, y, scoring=scoring, cv=10)) # %% # We confirm that the model is useful: the post-test odds are between 12 and 20 # times larger than the pre-test odds. # # On the contrary, let's consider a dummy model that will output random # predictions with similar odds as the average disease prevalence in the # training set: from sklearn.dummy import DummyClassifier estimator = DummyClassifier(strategy="stratified", random_state=1234) extract_score(cross_validate(estimator, X, y, scoring=scoring, cv=10)) # %% # Here both class likelihood ratios are compatible with 1.0 which makes this # classifier useless as a diagnostic tool to improve disease detection. # # Another option for the dummy model is to always predict the most frequent # class, which in this case is "no-disease". estimator = DummyClassifier(strategy="most_frequent") extract_score(cross_validate(estimator, X, y, scoring=scoring, cv=10)) # %% # The absence of positive predictions means there will be no true positives nor # false positives, leading to an undefined `LR+` that by no means should be # interpreted as an infinite `LR+` (the classifier perfectly identifying # positive cases). In such situation the # :func:`~sklearn.metrics.class_likelihood_ratios` function returns `nan` and # raises a warning by default. Indeed, the value of `LR-` helps us discard this # model. # # A similar scenario may arise when cross-validating highly imbalanced data with # few samples: some folds will have no samples with the disease and therefore # they will output no true positives nor false negatives when used for testing. # Mathematically this leads to an infinite `LR+`, which should also not be # interpreted as the model perfectly identifying positive cases. Such event # leads to a higher variance of the estimated likelihood ratios, but can still # be interpreted as an increment of the post-test odds of having the condition. estimator = LogisticRegression() X, y = make_classification(n_samples=300, weights=[0.9, 0.1], random_state=0) extract_score(cross_validate(estimator, X, y, scoring=scoring, cv=10)) # %% # Invariance with respect to prevalence # ===================================== # # The likelihood ratios are independent of the disease prevalence and can be # extrapolated between populations regardless of any possible class imbalance, # **as long as the same model is applied to all of them**. Notice that in the # plots below **the decision boundary is constant** (see # :ref:`sphx_glr_auto_examples_svm_plot_separating_hyperplane_unbalanced.py` for # a study of the boundary decision for unbalanced classes). # # Here we train a :class:`~sklearn.linear_model.LogisticRegression` base model # on a case-control study with a prevalence of 50%. It is then evaluated over # populations with varying prevalence. We use the # :func:`~sklearn.datasets.make_classification` function to ensure the # data-generating process is always the same as shown in the plots below. The # label `1` corresponds to the positive class "disease", whereas the label `0` # stands for "no-disease". from collections import defaultdict import matplotlib.pyplot as plt import numpy as np from sklearn.inspection import DecisionBoundaryDisplay populations = defaultdict(list) common_params = { "n_samples": 10_000, "n_features": 2, "n_informative": 2, "n_redundant": 0, "random_state": 0, } weights = np.linspace(0.1, 0.8, 6) weights = weights[::-1] # fit and evaluate base model on balanced classes X, y = make_classification(**common_params, weights=[0.5, 0.5]) estimator = LogisticRegression().fit(X, y) lr_base = extract_score(cross_validate(estimator, X, y, scoring=scoring, cv=10)) pos_lr_base, pos_lr_base_std = lr_base["positive"].values neg_lr_base, neg_lr_base_std = lr_base["negative"].values # %% # We will now show the decision boundary for each level of prevalence. Note that # we only plot a subset of the original data to better assess the linear model # decision boundary. fig, axs = plt.subplots(nrows=3, ncols=2, figsize=(15, 12)) for ax, (n, weight) in zip(axs.ravel(), enumerate(weights)): X, y = make_classification( **common_params, weights=[weight, 1 - weight], ) prevalence = y.mean() populations["prevalence"].append(prevalence) populations["X"].append(X) populations["y"].append(y) # down-sample for plotting rng = np.random.RandomState(1) plot_indices = rng.choice(np.arange(X.shape[0]), size=500, replace=True) X_plot, y_plot = X[plot_indices], y[plot_indices] # plot fixed decision boundary of base model with varying prevalence disp = DecisionBoundaryDisplay.from_estimator( estimator, X_plot, response_method="predict", alpha=0.5, ax=ax, ) scatter = disp.ax_.scatter(X_plot[:, 0], X_plot[:, 1], c=y_plot, edgecolor="k") disp.ax_.set_title(f"prevalence = {y_plot.mean():.2f}") disp.ax_.legend(*scatter.legend_elements()) # %% # We define a function for bootstrapping. def scoring_on_bootstrap(estimator, X, y, rng, n_bootstrap=100): results_for_prevalence = defaultdict(list) for _ in range(n_bootstrap): bootstrap_indices = rng.choice( np.arange(X.shape[0]), size=X.shape[0], replace=True ) for key, value in scoring( estimator, X[bootstrap_indices], y[bootstrap_indices] ).items(): results_for_prevalence[key].append(value) return pd.DataFrame(results_for_prevalence) # %% # We score the base model for each prevalence using bootstrapping. results = defaultdict(list) n_bootstrap = 100 rng = np.random.default_rng(seed=0) for prevalence, X, y in zip( populations["prevalence"], populations["X"], populations["y"] ): results_for_prevalence = scoring_on_bootstrap( estimator, X, y, rng, n_bootstrap=n_bootstrap ) results["prevalence"].append(prevalence) results["metrics"].append( results_for_prevalence.aggregate(["mean", "std"]).unstack() ) results = pd.DataFrame(results["metrics"], index=results["prevalence"]) results.index.name = "prevalence" results # %% # In the plots below we observe that the class likelihood ratios re-computed # with different prevalences are indeed constant within one standard deviation # of those computed with on balanced classes. fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(15, 6)) results["positive_likelihood_ratio"]["mean"].plot( ax=ax1, color="r", label="extrapolation through populations" ) ax1.axhline(y=pos_lr_base + pos_lr_base_std, color="r", linestyle="--") ax1.axhline( y=pos_lr_base - pos_lr_base_std, color="r", linestyle="--", label="base model confidence band", ) ax1.fill_between( results.index, results["positive_likelihood_ratio"]["mean"] - results["positive_likelihood_ratio"]["std"], results["positive_likelihood_ratio"]["mean"] + results["positive_likelihood_ratio"]["std"], color="r", alpha=0.3, ) ax1.set( title="Positive likelihood ratio", ylabel="LR+", ylim=[0, 5], ) ax1.legend(loc="lower right") ax2 = results["negative_likelihood_ratio"]["mean"].plot( ax=ax2, color="b", label="extrapolation through populations" ) ax2.axhline(y=neg_lr_base + neg_lr_base_std, color="b", linestyle="--") ax2.axhline( y=neg_lr_base - neg_lr_base_std, color="b", linestyle="--", label="base model confidence band", ) ax2.fill_between( results.index, results["negative_likelihood_ratio"]["mean"] - results["negative_likelihood_ratio"]["std"], results["negative_likelihood_ratio"]["mean"] + results["negative_likelihood_ratio"]["std"], color="b", alpha=0.3, ) ax2.set( title="Negative likelihood ratio", ylabel="LR-", ylim=[0, 0.5], ) ax2.legend(loc="lower right") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_cv_predict.py
examples/model_selection/plot_cv_predict.py
""" ==================================== Plotting Cross-Validated Predictions ==================================== This example shows how to use :func:`~sklearn.model_selection.cross_val_predict` together with :class:`~sklearn.metrics.PredictionErrorDisplay` to visualize prediction errors. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # We will load the diabetes dataset and create an instance of a linear # regression model. from sklearn.datasets import load_diabetes from sklearn.linear_model import LinearRegression X, y = load_diabetes(return_X_y=True) lr = LinearRegression() # %% # :func:`~sklearn.model_selection.cross_val_predict` returns an array of the # same size of `y` where each entry is a prediction obtained by cross # validation. from sklearn.model_selection import cross_val_predict y_pred = cross_val_predict(lr, X, y, cv=10) # %% # Since `cv=10`, it means that we trained 10 models and each model was # used to predict on one of the 10 folds. We can now use the # :class:`~sklearn.metrics.PredictionErrorDisplay` to visualize the # prediction errors. # # On the left axis, we plot the observed values :math:`y` vs. the predicted # values :math:`\hat{y}` given by the models. On the right axis, we plot the # residuals (i.e. the difference between the observed values and the predicted # values) vs. the predicted values. import matplotlib.pyplot as plt from sklearn.metrics import PredictionErrorDisplay fig, axs = plt.subplots(ncols=2, figsize=(8, 4)) PredictionErrorDisplay.from_predictions( y, y_pred=y_pred, kind="actual_vs_predicted", subsample=100, ax=axs[0], random_state=0, ) axs[0].set_title("Actual vs. Predicted values") PredictionErrorDisplay.from_predictions( y, y_pred=y_pred, kind="residual_vs_predicted", subsample=100, ax=axs[1], random_state=0, ) axs[1].set_title("Residuals vs. Predicted Values") fig.suptitle("Plotting cross-validated predictions") plt.tight_layout() plt.show() # %% # It is important to note that we used # :func:`~sklearn.model_selection.cross_val_predict` for visualization # purpose only in this example. # # It would be problematic to # quantitatively assess the model performance by computing a single # performance metric from the concatenated predictions returned by # :func:`~sklearn.model_selection.cross_val_predict` # when the different CV folds vary by size and distributions. # # It is recommended to compute per-fold performance metrics using: # :func:`~sklearn.model_selection.cross_val_score` or # :func:`~sklearn.model_selection.cross_validate` instead.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_grid_search_stats.py
examples/model_selection/plot_grid_search_stats.py
""" ================================================== Statistical comparison of models using grid search ================================================== This example illustrates how to statistically compare the performance of models trained and evaluated using :class:`~sklearn.model_selection.GridSearchCV`. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # We will start by simulating moon shaped data (where the ideal separation # between classes is non-linear), adding to it a moderate degree of noise. # Datapoints will belong to one of two possible classes to be predicted by two # features. We will simulate 50 samples for each class: import matplotlib.pyplot as plt import seaborn as sns from sklearn.datasets import make_moons X, y = make_moons(noise=0.352, random_state=1, n_samples=100) sns.scatterplot( x=X[:, 0], y=X[:, 1], hue=y, marker="o", s=25, edgecolor="k", legend=False ).set_title("Data") plt.show() # %% # We will compare the performance of :class:`~sklearn.svm.SVC` estimators that # vary on their `kernel` parameter, to decide which choice of this # hyper-parameter predicts our simulated data best. # We will evaluate the performance of the models using # :class:`~sklearn.model_selection.RepeatedStratifiedKFold`, repeating 10 times # a 10-fold stratified cross validation using a different randomization of the # data in each repetition. The performance will be evaluated using # :class:`~sklearn.metrics.roc_auc_score`. from sklearn.model_selection import GridSearchCV, RepeatedStratifiedKFold from sklearn.svm import SVC param_grid = [ {"kernel": ["linear"]}, {"kernel": ["poly"], "degree": [2, 3]}, {"kernel": ["rbf"]}, ] svc = SVC(random_state=0) cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=10, random_state=0) search = GridSearchCV(estimator=svc, param_grid=param_grid, scoring="roc_auc", cv=cv) search.fit(X, y) # %% # We can now inspect the results of our search, sorted by their # `mean_test_score`: import pandas as pd results_df = pd.DataFrame(search.cv_results_) results_df = results_df.sort_values(by=["rank_test_score"]) results_df = results_df.set_index( results_df["params"].apply(lambda x: "_".join(str(val) for val in x.values())) ).rename_axis("kernel") results_df[["params", "rank_test_score", "mean_test_score", "std_test_score"]] # %% # We can see that the estimator using the `'rbf'` kernel performed best, # closely followed by `'linear'`. Both estimators with a `'poly'` kernel # performed worse, with the one using a two-degree polynomial achieving a much # lower performance than all other models. # # Usually, the analysis just ends here, but half the story is missing. The # output of :class:`~sklearn.model_selection.GridSearchCV` does not provide # information on the certainty of the differences between the models. # We don't know if these are **statistically** significant. # To evaluate this, we need to conduct a statistical test. # Specifically, to contrast the performance of two models we should # statistically compare their AUC scores. There are 100 samples (AUC # scores) for each model as we repreated 10 times a 10-fold cross-validation. # # However, the scores of the models are not independent: all models are # evaluated on the **same** 100 partitions, increasing the correlation # between the performance of the models. # Since some partitions of the data can make the distinction of the classes # particularly easy or hard to find for all models, the models scores will # co-vary. # # Let's inspect this partition effect by plotting the performance of all models # in each fold, and calculating the correlation between models across folds: # create df of model scores ordered by performance model_scores = results_df.filter(regex=r"split\d*_test_score") # plot 30 examples of dependency between cv fold and AUC scores fig, ax = plt.subplots() sns.lineplot( data=model_scores.transpose().iloc[:30], dashes=False, palette="Set1", marker="o", alpha=0.5, ax=ax, ) ax.set_xlabel("CV test fold", size=12, labelpad=10) ax.set_ylabel("Model AUC", size=12) ax.tick_params(bottom=True, labelbottom=False) plt.show() # print correlation of AUC scores across folds print(f"Correlation of models:\n {model_scores.transpose().corr()}") # %% # We can observe that the performance of the models highly depends on the fold. # # As a consequence, if we assume independence between samples we will be # underestimating the variance computed in our statistical tests, increasing # the number of false positive errors (i.e. detecting a significant difference # between models when such does not exist) [1]_. # # Several variance-corrected statistical tests have been developed for these # cases. In this example we will show how to implement one of them (the so # called Nadeau and Bengio's corrected t-test) under two different statistical # frameworks: frequentist and Bayesian. # %% # Comparing two models: frequentist approach # ------------------------------------------ # # We can start by asking: "Is the first model significantly better than the # second model (when ranked by `mean_test_score`)?" # # To answer this question using a frequentist approach we could # run a paired t-test and compute the p-value. This is also known as # Diebold-Mariano test in the forecast literature [5]_. # Many variants of such a t-test have been developed to account for the # 'non-independence of samples problem' # described in the previous section. We will use the one proven to obtain the # highest replicability scores (which rate how similar the performance of a # model is when evaluating it on different random partitions of the same # dataset) while maintaining a low rate of false positives and false negatives: # the Nadeau and Bengio's corrected t-test [2]_ that uses a 10 times repeated # 10-fold cross validation [3]_. # # This corrected paired t-test is computed as: # # .. math:: # t=\frac{\frac{1}{k \cdot r}\sum_{i=1}^{k}\sum_{j=1}^{r}x_{ij}} # {\sqrt{(\frac{1}{k \cdot r}+\frac{n_{test}}{n_{train}})\hat{\sigma}^2}} # # where :math:`k` is the number of folds, # :math:`r` the number of repetitions in the cross-validation, # :math:`x` is the difference in performance of the models, # :math:`n_{test}` is the number of samples used for testing, # :math:`n_{train}` is the number of samples used for training, # and :math:`\hat{\sigma}^2` represents the variance of the observed # differences. # # Let's implement a corrected right-tailed paired t-test to evaluate if the # performance of the first model is significantly better than that of the # second model. Our null hypothesis is that the second model performs at least # as good as the first model. import numpy as np from scipy.stats import t def corrected_std(differences, n_train, n_test): """Corrects standard deviation using Nadeau and Bengio's approach. Parameters ---------- differences : ndarray of shape (n_samples,) Vector containing the differences in the score metrics of two models. n_train : int Number of samples in the training set. n_test : int Number of samples in the testing set. Returns ------- corrected_std : float Variance-corrected standard deviation of the set of differences. """ # kr = k times r, r times repeated k-fold crossvalidation, # kr equals the number of times the model was evaluated kr = len(differences) corrected_var = np.var(differences, ddof=1) * (1 / kr + n_test / n_train) corrected_std = np.sqrt(corrected_var) return corrected_std def compute_corrected_ttest(differences, df, n_train, n_test): """Computes right-tailed paired t-test with corrected variance. Parameters ---------- differences : array-like of shape (n_samples,) Vector containing the differences in the score metrics of two models. df : int Degrees of freedom. n_train : int Number of samples in the training set. n_test : int Number of samples in the testing set. Returns ------- t_stat : float Variance-corrected t-statistic. p_val : float Variance-corrected p-value. """ mean = np.mean(differences) std = corrected_std(differences, n_train, n_test) t_stat = mean / std p_val = t.sf(np.abs(t_stat), df) # right-tailed t-test return t_stat, p_val # %% model_1_scores = model_scores.iloc[0].values # scores of the best model model_2_scores = model_scores.iloc[1].values # scores of the second-best model differences = model_1_scores - model_2_scores n = differences.shape[0] # number of test sets df = n - 1 n_train = len(next(iter(cv.split(X, y)))[0]) n_test = len(next(iter(cv.split(X, y)))[1]) t_stat, p_val = compute_corrected_ttest(differences, df, n_train, n_test) print(f"Corrected t-value: {t_stat:.3f}\nCorrected p-value: {p_val:.3f}") # %% # We can compare the corrected t- and p-values with the uncorrected ones: t_stat_uncorrected = np.mean(differences) / np.sqrt(np.var(differences, ddof=1) / n) p_val_uncorrected = t.sf(np.abs(t_stat_uncorrected), df) print( f"Uncorrected t-value: {t_stat_uncorrected:.3f}\n" f"Uncorrected p-value: {p_val_uncorrected:.3f}" ) # %% # Using the conventional significance alpha level at `p=0.05`, we observe that # the uncorrected t-test concludes that the first model is significantly better # than the second. # # With the corrected approach, in contrast, we fail to detect this difference. # # In the latter case, however, the frequentist approach does not let us # conclude that the first and second model have an equivalent performance. If # we wanted to make this assertion we need to use a Bayesian approach. # %% # Comparing two models: Bayesian approach # --------------------------------------- # We can use Bayesian estimation to calculate the probability that the first # model is better than the second. Bayesian estimation will output a # distribution followed by the mean :math:`\mu` of the differences in the # performance of two models. # # To obtain the posterior distribution we need to define a prior that models # our beliefs of how the mean is distributed before looking at the data, # and multiply it by a likelihood function that computes how likely our # observed differences are, given the values that the mean of differences # could take. # # Bayesian estimation can be carried out in many forms to answer our question, # but in this example we will implement the approach suggested by Benavoli and # colleagues [4]_. # # One way of defining our posterior using a closed-form expression is to select # a prior conjugate to the likelihood function. Benavoli and colleagues [4]_ # show that when comparing the performance of two classifiers we can model the # prior as a Normal-Gamma distribution (with both mean and variance unknown) # conjugate to a normal likelihood, to thus express the posterior as a normal # distribution. # Marginalizing out the variance from this normal posterior, we can define the # posterior of the mean parameter as a Student's t-distribution. Specifically: # # .. math:: # St(\mu;n-1,\overline{x},(\frac{1}{n}+\frac{n_{test}}{n_{train}}) # \hat{\sigma}^2) # # where :math:`n` is the total number of samples, # :math:`\overline{x}` represents the mean difference in the scores, # :math:`n_{test}` is the number of samples used for testing, # :math:`n_{train}` is the number of samples used for training, # and :math:`\hat{\sigma}^2` represents the variance of the observed # differences. # # Notice that we are using Nadeau and Bengio's corrected variance in our # Bayesian approach as well. # # Let's compute and plot the posterior: # initialize random variable t_post = t( df, loc=np.mean(differences), scale=corrected_std(differences, n_train, n_test) ) # %% # Let's plot the posterior distribution: x = np.linspace(t_post.ppf(0.001), t_post.ppf(0.999), 100) plt.plot(x, t_post.pdf(x)) plt.xticks(np.arange(-0.04, 0.06, 0.01)) plt.fill_between(x, t_post.pdf(x), 0, facecolor="blue", alpha=0.2) plt.ylabel("Probability density") plt.xlabel(r"Mean difference ($\mu$)") plt.title("Posterior distribution") plt.show() # %% # We can calculate the probability that the first model is better than the # second by computing the area under the curve of the posterior distribution # from zero to infinity. And also the reverse: we can calculate the probability # that the second model is better than the first by computing the area under # the curve from minus infinity to zero. better_prob = 1 - t_post.cdf(0) print( f"Probability of {model_scores.index[0]} being more accurate than " f"{model_scores.index[1]}: {better_prob:.3f}" ) print( f"Probability of {model_scores.index[1]} being more accurate than " f"{model_scores.index[0]}: {1 - better_prob:.3f}" ) # %% # In contrast with the frequentist approach, we can compute the probability # that one model is better than the other. # # Note that we obtained similar results as those in the frequentist approach. # Given our choice of priors, we are essentially performing the same # computations, but we are allowed to make different assertions. # %% # Region of Practical Equivalence # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # Sometimes we are interested in determining the probabilities that our models # have an equivalent performance, where "equivalent" is defined in a practical # way. A naive approach [4]_ would be to define estimators as practically # equivalent when they differ by less than 1% in their accuracy. But we could # also define this practical equivalence taking into account the problem we are # trying to solve. For example, a difference of 5% in accuracy would mean an # increase of $1000 in sales, and we consider any quantity above that as # relevant for our business. # # In this example we are going to define the # Region of Practical Equivalence (ROPE) to be :math:`[-0.01, 0.01]`. That is, # we will consider two models as practically equivalent if they differ by less # than 1% in their performance. # # To compute the probabilities of the classifiers being practically equivalent, # we calculate the area under the curve of the posterior over the ROPE # interval: rope_interval = [-0.01, 0.01] rope_prob = t_post.cdf(rope_interval[1]) - t_post.cdf(rope_interval[0]) print( f"Probability of {model_scores.index[0]} and {model_scores.index[1]} " f"being practically equivalent: {rope_prob:.3f}" ) # %% # We can plot how the posterior is distributed over the ROPE interval: x_rope = np.linspace(rope_interval[0], rope_interval[1], 100) plt.plot(x, t_post.pdf(x)) plt.xticks(np.arange(-0.04, 0.06, 0.01)) plt.vlines([-0.01, 0.01], ymin=0, ymax=(np.max(t_post.pdf(x)) + 1)) plt.fill_between(x_rope, t_post.pdf(x_rope), 0, facecolor="blue", alpha=0.2) plt.ylabel("Probability density") plt.xlabel(r"Mean difference ($\mu$)") plt.title("Posterior distribution under the ROPE") plt.show() # %% # As suggested in [4]_, we can further interpret these probabilities using the # same criteria as the frequentist approach: is the probability of falling # inside the ROPE bigger than 95% (alpha value of 5%)? In that case we can # conclude that both models are practically equivalent. # %% # The Bayesian estimation approach also allows us to compute how uncertain we # are about our estimation of the difference. This can be calculated using # credible intervals. For a given probability, they show the range of values # that the estimated quantity, in our case the mean difference in # performance, can take. # For example, a 50% credible interval [x, y] tells us that there is a 50% # probability that the true (mean) difference of performance between models is # between x and y. # # Let's determine the credible intervals of our data using 50%, 75% and 95%: cred_intervals = [] intervals = [0.5, 0.75, 0.95] for interval in intervals: cred_interval = list(t_post.interval(interval)) cred_intervals.append([interval, cred_interval[0], cred_interval[1]]) cred_int_df = pd.DataFrame( cred_intervals, columns=["interval", "lower value", "upper value"] ).set_index("interval") cred_int_df # %% # As shown in the table, there is a 50% probability that the true mean # difference between models will be between 0.000977 and 0.019023, 70% # probability that it will be between -0.005422 and 0.025422, and 95% # probability that it will be between -0.016445 and 0.036445. # %% # Pairwise comparison of all models: frequentist approach # ------------------------------------------------------- # # We could also be interested in comparing the performance of all our models # evaluated with :class:`~sklearn.model_selection.GridSearchCV`. In this case # we would be running our statistical test multiple times, which leads us to # the `multiple comparisons problem # <https://en.wikipedia.org/wiki/Multiple_comparisons_problem>`_. # # There are many possible ways to tackle this problem, but a standard approach # is to apply a `Bonferroni correction # <https://en.wikipedia.org/wiki/Bonferroni_correction>`_. Bonferroni can be # computed by multiplying the p-value by the number of comparisons we are # testing. # # Let's compare the performance of the models using the corrected t-test: from itertools import combinations from math import factorial n_comparisons = factorial(len(model_scores)) / ( factorial(2) * factorial(len(model_scores) - 2) ) pairwise_t_test = [] for model_i, model_k in combinations(range(len(model_scores)), 2): model_i_scores = model_scores.iloc[model_i].values model_k_scores = model_scores.iloc[model_k].values differences = model_i_scores - model_k_scores t_stat, p_val = compute_corrected_ttest(differences, df, n_train, n_test) p_val *= n_comparisons # implement Bonferroni correction # Bonferroni can output p-values higher than 1 p_val = 1 if p_val > 1 else p_val pairwise_t_test.append( [model_scores.index[model_i], model_scores.index[model_k], t_stat, p_val] ) pairwise_comp_df = pd.DataFrame( pairwise_t_test, columns=["model_1", "model_2", "t_stat", "p_val"] ).round(3) pairwise_comp_df # %% # We observe that after correcting for multiple comparisons, the only model # that significantly differs from the others is `'2_poly'`. # `'rbf'`, the model ranked first by # :class:`~sklearn.model_selection.GridSearchCV`, does not significantly # differ from `'linear'` or `'3_poly'`. # %% # Pairwise comparison of all models: Bayesian approach # ---------------------------------------------------- # # When using Bayesian estimation to compare multiple models, we don't need to # correct for multiple comparisons (for reasons why see [4]_). # # We can carry out our pairwise comparisons the same way as in the first # section: pairwise_bayesian = [] for model_i, model_k in combinations(range(len(model_scores)), 2): model_i_scores = model_scores.iloc[model_i].values model_k_scores = model_scores.iloc[model_k].values differences = model_i_scores - model_k_scores t_post = t( df, loc=np.mean(differences), scale=corrected_std(differences, n_train, n_test) ) worse_prob = t_post.cdf(rope_interval[0]) better_prob = 1 - t_post.cdf(rope_interval[1]) rope_prob = t_post.cdf(rope_interval[1]) - t_post.cdf(rope_interval[0]) pairwise_bayesian.append([worse_prob, better_prob, rope_prob]) pairwise_bayesian_df = pd.DataFrame( pairwise_bayesian, columns=["worse_prob", "better_prob", "rope_prob"] ).round(3) pairwise_comp_df = pairwise_comp_df.join(pairwise_bayesian_df) pairwise_comp_df # %% # Using the Bayesian approach we can compute the probability that a model # performs better, worse or practically equivalent to another. # # Results show that the model ranked first by # :class:`~sklearn.model_selection.GridSearchCV` `'rbf'`, has approximately a # 6.8% chance of being worse than `'linear'`, and a 1.8% chance of being worse # than `'3_poly'`. # `'rbf'` and `'linear'` have a 43% probability of being practically # equivalent, while `'rbf'` and `'3_poly'` have a 10% chance of being so. # # Similarly to the conclusions obtained using the frequentist approach, all # models have a 100% probability of being better than `'2_poly'`, and none have # a practically equivalent performance with the latter. # %% # Take-home messages # ------------------ # - Small differences in performance measures might easily turn out to be # merely by chance, but not because one model predicts systematically better # than the other. As shown in this example, statistics can tell you how # likely that is. # - When statistically comparing the performance of two models evaluated in # GridSearchCV, it is necessary to correct the calculated variance which # could be underestimated since the scores of the models are not independent # from each other. # - A frequentist approach that uses a (variance-corrected) paired t-test can # tell us if the performance of one model is better than another with a # degree of certainty above chance. # - A Bayesian approach can provide the probabilities of one model being # better, worse or practically equivalent than another. It can also tell us # how confident we are of knowing that the true differences of our models # fall under a certain range of values. # - If multiple models are statistically compared, a multiple comparisons # correction is needed when using the frequentist approach. # %% # .. rubric:: References # # .. [1] Dietterich, T. G. (1998). `Approximate statistical tests for # comparing supervised classification learning algorithms # <http://web.cs.iastate.edu/~jtian/cs573/Papers/Dietterich-98.pdf>`_. # Neural computation, 10(7). # .. [2] Nadeau, C., & Bengio, Y. (2000). `Inference for the generalization # error # <https://papers.nips.cc/paper/1661-inference-for-the-generalization-error.pdf>`_. # In Advances in neural information processing systems. # .. [3] Bouckaert, R. R., & Frank, E. (2004). `Evaluating the replicability # of significance tests for comparing learning algorithms # <https://www.cms.waikato.ac.nz/~ml/publications/2004/bouckaert-frank.pdf>`_. # In Pacific-Asia Conference on Knowledge Discovery and Data Mining. # .. [4] Benavoli, A., Corani, G., Demšar, J., & Zaffalon, M. (2017). `Time # for a change: a tutorial for comparing multiple classifiers through # Bayesian analysis # <http://www.jmlr.org/papers/volume18/16-305/16-305.pdf>`_. # The Journal of Machine Learning Research, 18(1). See the Python # library that accompanies this paper `here # <https://github.com/janezd/baycomp>`_. # .. [5] Diebold, F.X. & Mariano R.S. (1995). `Comparing predictive accuracy # <http://www.est.uc3m.es/esp/nueva_docencia/comp_col_get/lade/tecnicas_prediccion/Practicas0708/Comparing%20Predictive%20Accuracy%20(Dielbold).pdf>`_ # Journal of Business & economic statistics, 20(1), 134-144.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_cv_indices.py
examples/model_selection/plot_cv_indices.py
""" Visualizing cross-validation behavior in scikit-learn ===================================================== Choosing the right cross-validation object is a crucial part of fitting a model properly. There are many ways to split data into training and test sets in order to avoid model overfitting, to standardize the number of groups in test sets, etc. This example visualizes the behavior of several common scikit-learn objects for comparison. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from matplotlib.patches import Patch from sklearn.model_selection import ( GroupKFold, GroupShuffleSplit, KFold, ShuffleSplit, StratifiedGroupKFold, StratifiedKFold, StratifiedShuffleSplit, TimeSeriesSplit, ) rng = np.random.RandomState(1338) cmap_data = plt.cm.Paired cmap_cv = plt.cm.coolwarm n_splits = 4 # %% # Visualize our data # ------------------ # # First, we must understand the structure of our data. It has 100 randomly # generated input datapoints, 3 classes split unevenly across datapoints, # and 10 "groups" split evenly across datapoints. # # As we'll see, some cross-validation objects do specific things with # labeled data, others behave differently with grouped data, and others # do not use this information. # # To begin, we'll visualize our data. # Generate the class/group data n_points = 100 X = rng.randn(100, 10) percentiles_classes = [0.1, 0.3, 0.6] y = np.hstack([[ii] * int(100 * perc) for ii, perc in enumerate(percentiles_classes)]) # Generate uneven groups group_prior = rng.dirichlet([2] * 10) groups = np.repeat(np.arange(10), rng.multinomial(100, group_prior)) def visualize_groups(classes, groups, name): # Visualize dataset groups fig, ax = plt.subplots() ax.scatter( range(len(groups)), [0.5] * len(groups), c=groups, marker="_", lw=50, cmap=cmap_data, ) ax.scatter( range(len(groups)), [3.5] * len(groups), c=classes, marker="_", lw=50, cmap=cmap_data, ) ax.set( ylim=[-1, 5], yticks=[0.5, 3.5], yticklabels=["Data\ngroup", "Data\nclass"], xlabel="Sample index", ) visualize_groups(y, groups, "no groups") # %% # Define a function to visualize cross-validation behavior # -------------------------------------------------------- # # We'll define a function that lets us visualize the behavior of each # cross-validation object. We'll perform 4 splits of the data. On each # split, we'll visualize the indices chosen for the training set # (in blue) and the test set (in red). def plot_cv_indices(cv, X, y, group, ax, n_splits, lw=10): """Create a sample plot for indices of a cross-validation object.""" use_groups = "Group" in type(cv).__name__ groups = group if use_groups else None # Generate the training/testing visualizations for each CV split for ii, (tr, tt) in enumerate(cv.split(X=X, y=y, groups=groups)): # Fill in indices with the training/test groups indices = np.array([np.nan] * len(X)) indices[tt] = 1 indices[tr] = 0 # Visualize the results ax.scatter( range(len(indices)), [ii + 0.5] * len(indices), c=indices, marker="_", lw=lw, cmap=cmap_cv, vmin=-0.2, vmax=1.2, ) # Plot the data classes and groups at the end ax.scatter( range(len(X)), [ii + 1.5] * len(X), c=y, marker="_", lw=lw, cmap=cmap_data ) ax.scatter( range(len(X)), [ii + 2.5] * len(X), c=group, marker="_", lw=lw, cmap=cmap_data ) # Formatting yticklabels = list(range(n_splits)) + ["class", "group"] ax.set( yticks=np.arange(n_splits + 2) + 0.5, yticklabels=yticklabels, xlabel="Sample index", ylabel="CV iteration", ylim=[n_splits + 2.2, -0.2], xlim=[0, 100], ) ax.set_title("{}".format(type(cv).__name__), fontsize=15) return ax # %% # Let's see how it looks for the :class:`~sklearn.model_selection.KFold` # cross-validation object: fig, ax = plt.subplots() cv = KFold(n_splits) plot_cv_indices(cv, X, y, groups, ax, n_splits) # %% # As you can see, by default the KFold cross-validation iterator does not # take either datapoint class or group into consideration. We can change this # by using either: # # - ``StratifiedKFold`` to preserve the percentage of samples for each class. # - ``GroupKFold`` to ensure that the same group will not appear in two # different folds. # - ``StratifiedGroupKFold`` to keep the constraint of ``GroupKFold`` while # attempting to return stratified folds. cvs = [StratifiedKFold, GroupKFold, StratifiedGroupKFold] for cv in cvs: fig, ax = plt.subplots(figsize=(6, 3)) plot_cv_indices(cv(n_splits), X, y, groups, ax, n_splits) ax.legend( [Patch(color=cmap_cv(0.8)), Patch(color=cmap_cv(0.02))], ["Testing set", "Training set"], loc=(1.02, 0.8), ) # Make the legend fit plt.tight_layout() fig.subplots_adjust(right=0.7) # %% # Next we'll visualize this behavior for a number of CV iterators. # # Visualize cross-validation indices for many CV objects # ------------------------------------------------------ # # Let's visually compare the cross validation behavior for many # scikit-learn cross-validation objects. Below we will loop through several # common cross-validation objects, visualizing the behavior of each. # # Note how some use the group/class information while others do not. cvs = [ KFold, GroupKFold, ShuffleSplit, StratifiedKFold, StratifiedGroupKFold, GroupShuffleSplit, StratifiedShuffleSplit, TimeSeriesSplit, ] for cv in cvs: this_cv = cv(n_splits=n_splits) fig, ax = plt.subplots(figsize=(6, 3)) plot_cv_indices(this_cv, X, y, groups, ax, n_splits) ax.legend( [Patch(color=cmap_cv(0.8)), Patch(color=cmap_cv(0.02))], ["Testing set", "Training set"], loc=(1.02, 0.8), ) # Make the legend fit plt.tight_layout() fig.subplots_adjust(right=0.7) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_permutation_tests_for_classification.py
examples/model_selection/plot_permutation_tests_for_classification.py
""" ================================================================= Test with permutations the significance of a classification score ================================================================= This example demonstrates the use of :func:`~sklearn.model_selection.permutation_test_score` to evaluate the significance of a cross-validated score using permutations. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Dataset # ------- # # We will use the :ref:`iris_dataset`, which consists of measurements taken # from 3 Iris species. Our model will use the measurements to predict # the iris species. from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target # %% # For comparison, we also generate some random feature data (i.e., 20 features), # uncorrelated with the class labels in the iris dataset. import numpy as np n_uncorrelated_features = 20 rng = np.random.RandomState(seed=0) # Use same number of samples as in iris and 20 features X_rand = rng.normal(size=(X.shape[0], n_uncorrelated_features)) # %% # Permutation test score # ---------------------- # # Next, we calculate the # :func:`~sklearn.model_selection.permutation_test_score` for both, the original # iris dataset (where there's a strong relationship between features and labels) and # the randomly generated features with iris labels (where no dependency between features # and labels is expected). We use the # :class:`~sklearn.svm.SVC` classifier and :ref:`accuracy_score` to evaluate # the model at each round. # # :func:`~sklearn.model_selection.permutation_test_score` generates a null # distribution by calculating the accuracy of the classifier # on 1000 different permutations of the dataset, where features # remain the same but labels undergo different random permutations. This is the # distribution for the null hypothesis which states there is no dependency # between the features and labels. An empirical p-value is then calculated as # the proportion of permutations, for which the score obtained by the model trained on # the permutation, is greater than or equal to the score obtained using the original # data. from sklearn.model_selection import StratifiedKFold, permutation_test_score from sklearn.svm import SVC clf = SVC(kernel="linear", random_state=7) cv = StratifiedKFold(n_splits=2, shuffle=True, random_state=0) score_iris, perm_scores_iris, pvalue_iris = permutation_test_score( clf, X, y, scoring="accuracy", cv=cv, n_permutations=1000 ) score_rand, perm_scores_rand, pvalue_rand = permutation_test_score( clf, X_rand, y, scoring="accuracy", cv=cv, n_permutations=1000 ) # %% # Original data # ^^^^^^^^^^^^^ # # Below we plot a histogram of the permutation scores (the null # distribution). The red line indicates the score obtained by the classifier # on the original data (without permuted labels). The score is much better than those # obtained by using permuted data and the p-value is thus very low. This indicates that # there is a low likelihood that this good score would be obtained by chance # alone. It provides evidence that the iris dataset contains real dependency # between features and labels and the classifier was able to utilize this # to obtain good results. The low p-value can lead us to reject the null hypothesis. import matplotlib.pyplot as plt fig, ax = plt.subplots() ax.hist(perm_scores_iris, bins=20, density=True) ax.axvline(score_iris, ls="--", color="r") score_label = ( f"Score on original\niris data: {score_iris:.2f}\n(p-value: {pvalue_iris:.3f})" ) ax.text(0.7, 10, score_label, fontsize=12) ax.set_xlabel("Accuracy score") _ = ax.set_ylabel("Probability density") # %% # Random data # ^^^^^^^^^^^ # # Below we plot the null distribution for the randomized data. The permutation # scores are similar to those obtained using the original iris dataset # because the permutation always destroys any feature-label dependency present. # The score obtained on the randomized data in this case # though, is very poor. This results in a large p-value, confirming that there was no # feature-label dependency in the randomized data. fig, ax = plt.subplots() ax.hist(perm_scores_rand, bins=20, density=True) ax.set_xlim(0.13) ax.axvline(score_rand, ls="--", color="r") score_label = ( f"Score on original\nrandom data: {score_rand:.2f}\n(p-value: {pvalue_rand:.3f})" ) ax.text(0.14, 7.5, score_label, fontsize=12) ax.set_xlabel("Accuracy score") ax.set_ylabel("Probability density") plt.show() # %% # Another possible reason for obtaining a high p-value could be that the classifier # was not able to use the structure in the data. In this case, the p-value # would only be low for classifiers that are able to utilize the dependency # present. In our case above, where the data is random, all classifiers would # have a high p-value as there is no structure present in the data. We might or might # not fail to reject the null hypothesis depending on whether the p-value is high on a # more appropriate estimator as well. # # Finally, note that this test has been shown to produce low p-values even # if there is only weak structure in the data [1]_. # # .. rubric:: References # # .. [1] Ojala and Garriga. `Permutation Tests for Studying Classifier # Performance # <http://www.jmlr.org/papers/volume11/ojala10a/ojala10a.pdf>`_. The # Journal of Machine Learning Research (2010) vol. 11 #
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_grid_search_refit_callable.py
examples/model_selection/plot_grid_search_refit_callable.py
""" ================================================== Balance model complexity and cross-validated score ================================================== This example demonstrates how to balance model complexity and cross-validated score by finding a decent accuracy within 1 standard deviation of the best accuracy score while minimising the number of :class:`~sklearn.decomposition.PCA` components [1]_. It uses :class:`~sklearn.model_selection.GridSearchCV` with a custom refit callable to select the optimal model. The figure shows the trade-off between cross-validated score and the number of PCA components. The balanced case is when `n_components=10` and `accuracy=0.88`, which falls into the range within 1 standard deviation of the best accuracy score. References ---------- .. [1] Hastie, T., Tibshirani, R., Friedman, J. (2001). Model Assessment and Selection. The Elements of Statistical Learning (pp. 219-260). New York, NY, USA: Springer New York Inc. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np import polars as pl from sklearn.datasets import load_digits from sklearn.decomposition import PCA from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV, ShuffleSplit from sklearn.pipeline import Pipeline # %% # Introduction # ------------ # # When tuning hyperparameters, we often want to balance model complexity and # performance. The "one-standard-error" rule is a common approach: select the simplest # model whose performance is within one standard error of the best model's performance. # This helps to avoid overfitting by preferring simpler models when their performance is # statistically comparable to more complex ones. # %% # Helper functions # ---------------- # # We define two helper functions: # # 1. `lower_bound`: Calculates the threshold for acceptable performance # (best score - 1 std) # # 2. `best_low_complexity`: Selects the model with the fewest PCA components that # exceeds this threshold def lower_bound(cv_results): """ Calculate the lower bound within 1 standard deviation of the best `mean_test_scores`. Parameters ---------- cv_results : dict of numpy(masked) ndarrays See attribute cv_results_ of `GridSearchCV` Returns ------- float Lower bound within 1 standard deviation of the best `mean_test_score`. """ best_score_idx = np.argmax(cv_results["mean_test_score"]) return ( cv_results["mean_test_score"][best_score_idx] - cv_results["std_test_score"][best_score_idx] ) def best_low_complexity(cv_results): """ Balance model complexity with cross-validated score. Parameters ---------- cv_results : dict of numpy(masked) ndarrays See attribute cv_results_ of `GridSearchCV`. Return ------ int Index of a model that has the fewest PCA components while has its test score within 1 standard deviation of the best `mean_test_score`. """ threshold = lower_bound(cv_results) candidate_idx = np.flatnonzero(cv_results["mean_test_score"] >= threshold) best_idx = candidate_idx[ cv_results["param_reduce_dim__n_components"][candidate_idx].argmin() ] return best_idx # %% # Set up the pipeline and parameter grid # -------------------------------------- # # We create a pipeline with two steps: # # 1. Dimensionality reduction using PCA # # 2. Classification using LogisticRegression # # We'll search over different numbers of PCA components to find the optimal complexity. pipe = Pipeline( [ ("reduce_dim", PCA(random_state=42)), ("classify", LogisticRegression(random_state=42, C=0.01, max_iter=1000)), ] ) param_grid = {"reduce_dim__n_components": [6, 8, 10, 15, 20, 25, 35, 45, 55]} # %% # Perform the search with GridSearchCV # ------------------------------------ # # We use `GridSearchCV` with our custom `best_low_complexity` function as the refit # parameter. This function will select the model with the fewest PCA components that # still performs within one standard deviation of the best model. grid = GridSearchCV( pipe, # Use a non-stratified CV strategy to make sure that the inter-fold # standard deviation of the test scores is informative. cv=ShuffleSplit(n_splits=30, random_state=0), n_jobs=1, # increase this on your machine to use more physical cores param_grid=param_grid, scoring="accuracy", refit=best_low_complexity, return_train_score=True, ) # %% # Load the digits dataset and fit the model # ----------------------------------------- X, y = load_digits(return_X_y=True) grid.fit(X, y) # %% # Visualize the results # --------------------- # # We'll create a bar chart showing the test scores for different numbers of PCA # components, along with horizontal lines indicating the best score and the # one-standard-deviation threshold. n_components = grid.cv_results_["param_reduce_dim__n_components"] test_scores = grid.cv_results_["mean_test_score"] # Create a polars DataFrame for better data manipulation and visualization results_df = pl.DataFrame( { "n_components": n_components, "mean_test_score": test_scores, "std_test_score": grid.cv_results_["std_test_score"], "mean_train_score": grid.cv_results_["mean_train_score"], "std_train_score": grid.cv_results_["std_train_score"], "mean_fit_time": grid.cv_results_["mean_fit_time"], "rank_test_score": grid.cv_results_["rank_test_score"], } ) # Sort by number of components results_df = results_df.sort("n_components") # Calculate the lower bound threshold lower = lower_bound(grid.cv_results_) # Get the best model information best_index_ = grid.best_index_ best_components = n_components[best_index_] best_score = grid.cv_results_["mean_test_score"][best_index_] # Add a column to mark the selected model results_df = results_df.with_columns( pl.when(pl.col("n_components") == best_components) .then(pl.lit("Selected")) .otherwise(pl.lit("Regular")) .alias("model_type") ) # Get the number of CV splits from the results n_splits = sum( 1 for key in grid.cv_results_.keys() if key.startswith("split") and key.endswith("test_score") ) # Extract individual scores for each split test_scores = np.array( [ [grid.cv_results_[f"split{i}_test_score"][j] for i in range(n_splits)] for j in range(len(n_components)) ] ) train_scores = np.array( [ [grid.cv_results_[f"split{i}_train_score"][j] for i in range(n_splits)] for j in range(len(n_components)) ] ) # Calculate mean and std of test scores mean_test_scores = np.mean(test_scores, axis=1) std_test_scores = np.std(test_scores, axis=1) # Find best score and threshold best_mean_score = np.max(mean_test_scores) threshold = best_mean_score - std_test_scores[np.argmax(mean_test_scores)] # Create a single figure for visualization fig, ax = plt.subplots(figsize=(12, 8)) # Plot individual points for i, comp in enumerate(n_components): # Plot individual test points plt.scatter( [comp] * n_splits, test_scores[i], alpha=0.2, color="blue", s=20, label="Individual test scores" if i == 0 else "", ) # Plot individual train points plt.scatter( [comp] * n_splits, train_scores[i], alpha=0.2, color="green", s=20, label="Individual train scores" if i == 0 else "", ) # Plot mean lines with error bands plt.plot( n_components, np.mean(test_scores, axis=1), "-", color="blue", linewidth=2, label="Mean test score", ) plt.fill_between( n_components, np.mean(test_scores, axis=1) - np.std(test_scores, axis=1), np.mean(test_scores, axis=1) + np.std(test_scores, axis=1), alpha=0.15, color="blue", ) plt.plot( n_components, np.mean(train_scores, axis=1), "-", color="green", linewidth=2, label="Mean train score", ) plt.fill_between( n_components, np.mean(train_scores, axis=1) - np.std(train_scores, axis=1), np.mean(train_scores, axis=1) + np.std(train_scores, axis=1), alpha=0.15, color="green", ) # Add threshold lines plt.axhline( best_mean_score, color="#9b59b6", # Purple linestyle="--", label="Best score", linewidth=2, ) plt.axhline( threshold, color="#e67e22", # Orange linestyle="--", label="Best score - 1 std", linewidth=2, ) # Highlight selected model plt.axvline( best_components, color="#9b59b6", # Purple alpha=0.2, linewidth=8, label="Selected model", ) # Set titles and labels plt.xlabel("Number of PCA components", fontsize=12) plt.ylabel("Score", fontsize=12) plt.title("Model Selection: Balancing Complexity and Performance", fontsize=14) plt.grid(True, linestyle="--", alpha=0.7) plt.legend( bbox_to_anchor=(1.02, 1), loc="upper left", borderaxespad=0, ) # Set axis properties plt.xticks(n_components) plt.ylim((0.85, 1.0)) # # Adjust layout plt.tight_layout() # %% # Print the results # ----------------- # # We print information about the selected model, including its complexity and # performance. We also show a summary table of all models using polars. print("Best model selected by the one-standard-error rule:") print(f"Number of PCA components: {best_components}") print(f"Accuracy score: {best_score:.4f}") print(f"Best possible accuracy: {np.max(test_scores):.4f}") print(f"Accuracy threshold (best - 1 std): {lower:.4f}") # Create a summary table with polars summary_df = results_df.select( pl.col("n_components"), pl.col("mean_test_score").round(4).alias("test_score"), pl.col("std_test_score").round(4).alias("test_std"), pl.col("mean_train_score").round(4).alias("train_score"), pl.col("std_train_score").round(4).alias("train_std"), pl.col("mean_fit_time").round(3).alias("fit_time"), pl.col("rank_test_score").alias("rank"), ) # Add a column to mark the selected model summary_df = summary_df.with_columns( pl.when(pl.col("n_components") == best_components) .then(pl.lit("*")) .otherwise(pl.lit("")) .alias("selected") ) print("\nModel comparison table:") print(summary_df) # %% # Conclusion # ---------- # # The one-standard-error rule helps us select a simpler model (fewer PCA components) # while maintaining performance statistically comparable to the best model. # This approach can help prevent overfitting and improve model interpretability # and efficiency. # # In this example, we've seen how to implement this rule using a custom refit # callable with :class:`~sklearn.model_selection.GridSearchCV`. # # Key takeaways: # # 1. The one-standard-error rule provides a good rule of thumb to select simpler models # # 2. Custom refit callables in :class:`~sklearn.model_selection.GridSearchCV` allow for # flexible model selection strategies # # 3. Visualizing both train and test scores helps identify potential overfitting # # This approach can be applied to other model selection scenarios where balancing # complexity and performance is important, or in cases where a use-case specific # selection of the "best" model is desired. # Display the figure plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_roc_crossval.py
examples/model_selection/plot_roc_crossval.py
""" ============================================================= Receiver Operating Characteristic (ROC) with cross validation ============================================================= This example presents how to estimate and visualize the variance of the Receiver Operating Characteristic (ROC) metric using cross-validation. ROC curves typically feature true positive rate (TPR) on the Y axis, and false positive rate (FPR) on the X axis. This means that the top left corner of the plot is the "ideal" point - a FPR of zero, and a TPR of one. This is not very realistic, but it does mean that a larger Area Under the Curve (AUC) is usually better. The "steepness" of ROC curves is also important, since it is ideal to maximize the TPR while minimizing the FPR. This example shows the ROC response of different datasets, created from K-fold cross-validation. Taking all of these curves, it is possible to calculate the mean AUC, and see the variance of the curve when the training set is split into different subsets. This roughly shows how the classifier output is affected by changes in the training data, and how different the splits generated by K-fold cross-validation are from one another. .. note:: See :ref:`sphx_glr_auto_examples_model_selection_plot_roc.py` for a complement of the present example explaining the averaging strategies to generalize the metrics for multiclass classifiers. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Load and prepare data # ===================== # # We import the :ref:`iris_dataset` which contains 3 classes, each one # corresponding to a type of iris plant. One class is linearly separable from # the other 2; the latter are **not** linearly separable from each other. # # In the following we binarize the dataset by dropping the "virginica" class # (`class_id=2`). This means that the "versicolor" class (`class_id=1`) is # regarded as the positive class and "setosa" as the negative class # (`class_id=0`). import numpy as np from sklearn.datasets import load_iris iris = load_iris() target_names = iris.target_names X, y = iris.data, iris.target X, y = X[y != 2], y[y != 2] n_samples, n_features = X.shape # %% # We also add noisy features to make the problem harder. random_state = np.random.RandomState(0) X = np.concatenate([X, random_state.randn(n_samples, 200 * n_features)], axis=1) # %% # Classification and ROC analysis # ------------------------------- # # Here we run :func:`~sklearn.model_selection.cross_validate` on a # :class:`~sklearn.svm.SVC` classifier, then use the computed cross-validation results # to plot the ROC curves fold-wise. Notice that the baseline to define the chance # level (dashed ROC curve) is a classifier that would always predict the most # frequent class. import matplotlib.pyplot as plt from sklearn import svm from sklearn.metrics import RocCurveDisplay, auc from sklearn.model_selection import StratifiedKFold, cross_validate n_splits = 6 cv = StratifiedKFold(n_splits=n_splits) classifier = svm.SVC(kernel="linear", probability=True, random_state=random_state) cv_results = cross_validate( classifier, X, y, cv=cv, return_estimator=True, return_indices=True ) prop_cycle = plt.rcParams["axes.prop_cycle"] colors = prop_cycle.by_key()["color"] curve_kwargs_list = [ dict(alpha=0.3, lw=1, color=colors[fold % len(colors)]) for fold in range(n_splits) ] names = [f"ROC fold {idx}" for idx in range(n_splits)] mean_fpr = np.linspace(0, 1, 100) interp_tprs = [] _, ax = plt.subplots(figsize=(6, 6)) viz = RocCurveDisplay.from_cv_results( cv_results, X, y, ax=ax, name=names, curve_kwargs=curve_kwargs_list, plot_chance_level=True, ) for idx in range(n_splits): interp_tpr = np.interp(mean_fpr, viz.fpr[idx], viz.tpr[idx]) interp_tpr[0] = 0.0 interp_tprs.append(interp_tpr) mean_tpr = np.mean(interp_tprs, axis=0) mean_tpr[-1] = 1.0 mean_auc = auc(mean_fpr, mean_tpr) std_auc = np.std(viz.roc_auc) ax.plot( mean_fpr, mean_tpr, color="b", label=r"Mean ROC (AUC = %0.2f $\pm$ %0.2f)" % (mean_auc, std_auc), lw=2, alpha=0.8, ) std_tpr = np.std(interp_tprs, axis=0) tprs_upper = np.minimum(mean_tpr + std_tpr, 1) tprs_lower = np.maximum(mean_tpr - std_tpr, 0) ax.fill_between( mean_fpr, tprs_lower, tprs_upper, color="grey", alpha=0.2, label=r"$\pm$ 1 std. dev.", ) ax.set( xlabel="False Positive Rate", ylabel="True Positive Rate", title=f"Mean ROC curve with variability\n(Positive label '{target_names[1]}')", ) ax.legend(loc="lower right") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_det.py
examples/model_selection/plot_det.py
""" ==================================== Detection error tradeoff (DET) curve ==================================== In this example, we compare two binary classification multi-threshold metrics: the Receiver Operating Characteristic (ROC) and the Detection Error Tradeoff (DET). For such purpose, we evaluate two different classifiers for the same classification task. ROC curves feature true positive rate (TPR) on the Y axis, and false positive rate (FPR) on the X axis. This means that the top left corner of the plot is the "ideal" point - a FPR of zero, and a TPR of one. DET curves are a variation of ROC curves where False Negative Rate (FNR) is plotted on the y-axis instead of the TPR. In this case the origin (bottom left corner) is the "ideal" point. .. note:: - See :func:`sklearn.metrics.roc_curve` for further information about ROC curves. - See :func:`sklearn.metrics.det_curve` for further information about DET curves. - This example is loosely based on :ref:`sphx_glr_auto_examples_classification_plot_classifier_comparison.py` example. - See :ref:`sphx_glr_auto_examples_model_selection_plot_roc_crossval.py` for an example estimating the variance of the ROC curves and ROC-AUC. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate synthetic data # ----------------------- from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler X, y = make_classification( n_samples=1_000, n_features=2, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=1, ) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=0) # %% # Define the classifiers # ---------------------- # # Here we define two different classifiers. The goal is to visually compare their # statistical performance across thresholds using the ROC and DET curves. from sklearn.dummy import DummyClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.pipeline import make_pipeline from sklearn.svm import LinearSVC classifiers = { "Linear SVM": make_pipeline(StandardScaler(), LinearSVC(C=0.025)), "Random Forest": RandomForestClassifier( max_depth=5, n_estimators=10, max_features=1, random_state=0 ), "Non-informative baseline": DummyClassifier(), } # %% # Compare ROC and DET curves # -------------------------- # # DET curves are commonly plotted in normal deviate scale. To achieve this the # DET display transforms the error rates as returned by the # :func:`~sklearn.metrics.det_curve` and the axis scale using # `scipy.stats.norm`. import matplotlib.pyplot as plt from sklearn.dummy import DummyClassifier from sklearn.metrics import DetCurveDisplay, RocCurveDisplay fig, [ax_roc, ax_det] = plt.subplots(1, 2, figsize=(11, 5)) ax_roc.set_title("Receiver Operating Characteristic (ROC) curves") ax_det.set_title("Detection Error Tradeoff (DET) curves") ax_roc.grid(linestyle="--") ax_det.grid(linestyle="--") for name, clf in classifiers.items(): (color, linestyle) = ( ("black", "--") if name == "Non-informative baseline" else (None, None) ) clf.fit(X_train, y_train) RocCurveDisplay.from_estimator( clf, X_test, y_test, ax=ax_roc, name=name, curve_kwargs=dict(color=color, linestyle=linestyle), ) DetCurveDisplay.from_estimator( clf, X_test, y_test, ax=ax_det, name=name, color=color, linestyle=linestyle ) plt.legend() plt.show() # %% # Notice that it is easier to visually assess the overall performance of # different classification algorithms using DET curves than using ROC curves. As # ROC curves are plot in a linear scale, different classifiers usually appear # similar for a large part of the plot and differ the most in the top left # corner of the graph. On the other hand, because DET curves represent straight # lines in normal deviate scale, they tend to be distinguishable as a whole and # the area of interest spans a large part of the plot. # # DET curves give direct feedback of the detection error tradeoff to aid in # operating point analysis. The user can then decide the FNR they are willing to # accept at the expense of the FPR (or vice-versa). # # Non-informative classifier baseline for the ROC and DET curves # -------------------------------------------------------------- # # The diagonal black-dotted lines in the plots above correspond to a # :class:`~sklearn.dummy.DummyClassifier` using the default "prior" strategy, to # serve as baseline for comparison with other classifiers. This classifier makes # constant predictions, independent of the input features in `X`, making it a # non-informative classifier. # # To further understand the non-informative baseline of the ROC and DET curves, # we recall the following mathematical definitions: # # :math:`\text{FPR} = \frac{\text{FP}}{\text{FP} + \text{TN}}` # # :math:`\text{FNR} = \frac{\text{FN}}{\text{TP} + \text{FN}}` # # :math:`\text{TPR} = \frac{\text{TP}}{\text{TP} + \text{FN}}` # # A classifier that always predict the positive class would have no true # negatives nor false negatives, giving :math:`\text{FPR} = \text{TPR} = 1` and # :math:`\text{FNR} = 0`, i.e.: # # - a single point in the upper right corner of the ROC plane, # - a single point in the lower right corner of the DET plane. # # Similarly, a classifier that always predict the negative class would have no # true positives nor false positives, thus :math:`\text{FPR} = \text{TPR} = 0` # and :math:`\text{FNR} = 1`, i.e.: # # - a single point in the lower left corner of the ROC plane, # - a single point in the upper left corner of the DET plane.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_successive_halving_iterations.py
examples/model_selection/plot_successive_halving_iterations.py
""" Successive Halving Iterations ============================= This example illustrates how a successive halving search (:class:`~sklearn.model_selection.HalvingGridSearchCV` and :class:`~sklearn.model_selection.HalvingRandomSearchCV`) iteratively chooses the best parameter combination out of multiple candidates. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np import pandas as pd from scipy.stats import randint from sklearn import datasets from sklearn.ensemble import RandomForestClassifier from sklearn.experimental import enable_halving_search_cv # noqa: F401 from sklearn.model_selection import HalvingRandomSearchCV # %% # We first define the parameter space and train a # :class:`~sklearn.model_selection.HalvingRandomSearchCV` instance. rng = np.random.RandomState(0) X, y = datasets.make_classification(n_samples=400, n_features=12, random_state=rng) clf = RandomForestClassifier(n_estimators=20, random_state=rng) param_dist = { "max_depth": [3, None], "max_features": randint(1, 6), "min_samples_split": randint(2, 11), "bootstrap": [True, False], "criterion": ["gini", "entropy"], } rsh = HalvingRandomSearchCV( estimator=clf, param_distributions=param_dist, factor=2, random_state=rng ) rsh.fit(X, y) # %% # We can now use the `cv_results_` attribute of the search estimator to inspect # and plot the evolution of the search. results = pd.DataFrame(rsh.cv_results_) results["params_str"] = results.params.apply(str) results.drop_duplicates(subset=("params_str", "iter"), inplace=True) mean_scores = results.pivot( index="iter", columns="params_str", values="mean_test_score" ) ax = mean_scores.plot(legend=False, alpha=0.6) labels = [ f"iter={i}\nn_samples={rsh.n_resources_[i]}\nn_candidates={rsh.n_candidates_[i]}" for i in range(rsh.n_iterations_) ] ax.set_xticks(range(rsh.n_iterations_)) ax.set_xticklabels(labels, rotation=45, multialignment="left") ax.set_title("Scores of candidates over iterations") ax.set_ylabel("mean test score", fontsize=15) ax.set_xlabel("iterations", fontsize=15) plt.tight_layout() plt.show() # %% # Number of candidates and amount of resource at each iteration # ------------------------------------------------------------- # # At the first iteration, a small amount of resources is used. The resource # here is the number of samples that the estimators are trained on. All # candidates are evaluated. # # At the second iteration, only the best half of the candidates is evaluated. # The number of allocated resources is doubled: candidates are evaluated on # twice as many samples. # # This process is repeated until the last iteration, where only 2 candidates # are left. The best candidate is the candidate that has the best score at the # last iteration.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_confusion_matrix.py
examples/model_selection/plot_confusion_matrix.py
""" ============================================================== Evaluate the performance of a classifier with Confusion Matrix ============================================================== Example of confusion matrix usage to evaluate the quality of the output of a classifier on the iris data set. The diagonal elements represent the number of points for which the predicted label is equal to the true label, while off-diagonal elements are those that are mislabeled by the classifier. The higher the diagonal values of the confusion matrix the better, indicating many correct predictions. The figures show the confusion matrix with and without normalization by class support size (number of elements in each class). This kind of normalization can be interesting in case of class imbalance to have a more visual interpretation of which class is being misclassified. Here the results are not as good as they could be as our choice for the regularization parameter C was not the best. In real life applications this parameter is usually chosen using :ref:`grid_search`. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn import datasets, svm from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split # import some data to play with iris = datasets.load_iris() X = iris.data y = iris.target class_names = iris.target_names # Split the data into a training set and a test set X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # Run classifier, using a model that is too regularized (C too low) to see # the impact on the results classifier = svm.SVC(kernel="linear", C=0.01).fit(X_train, y_train) np.set_printoptions(precision=2) # Plot non-normalized confusion matrix titles_options = [ ("Confusion matrix, without normalization", None), ("Normalized confusion matrix", "true"), ] for title, normalize in titles_options: disp = ConfusionMatrixDisplay.from_estimator( classifier, X_test, y_test, display_labels=class_names, cmap=plt.cm.Blues, normalize=normalize, ) disp.ax_.set_title(title) print(title) print(disp.confusion_matrix) plt.show() # %% # Binary Classification # ===================== # # For binary problems, :func:`sklearn.metrics.confusion_matrix` has the `ravel` method # we can use get counts of true negatives, false positives, false negatives and # true positives. # # To obtain true negatives, false positives, false negatives and true # positives counts at different thresholds, one can use # :func:`sklearn.metrics.confusion_matrix_at_thresholds`. # This is fundamental for binary classification # metrics like :func:`~sklearn.metrics.roc_auc_score` and # :func:`~sklearn.metrics.det_curve`. from sklearn.datasets import make_classification from sklearn.metrics import confusion_matrix_at_thresholds X, y = make_classification( n_samples=100, n_features=20, n_informative=20, n_redundant=0, n_classes=2, random_state=42, ) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=42 ) classifier = svm.SVC(kernel="linear", C=0.01, probability=True) classifier.fit(X_train, y_train) y_score = classifier.predict_proba(X_test)[:, 1] tns, fps, fns, tps, threshold = confusion_matrix_at_thresholds(y_test, y_score) # Plot TNs, FPs, FNs and TPs vs Thresholds plt.figure(figsize=(10, 6)) plt.plot(threshold, tns, label="True Negatives (TNs)") plt.plot(threshold, fps, label="False Positives (FPs)") plt.plot(threshold, fns, label="False Negatives (FNs)") plt.plot(threshold, tps, label="True Positives (TPs)") plt.xlabel("Thresholds") plt.ylabel("Count") plt.title("TNs, FPs, FNs and TPs vs Thresholds") plt.legend() plt.grid() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_tuned_decision_threshold.py
examples/model_selection/plot_tuned_decision_threshold.py
""" ====================================================== Post-hoc tuning the cut-off point of decision function ====================================================== Once a binary classifier is trained, the :term:`predict` method outputs class label predictions corresponding to a thresholding of either the :term:`decision_function` or the :term:`predict_proba` output. The default threshold is defined as a posterior probability estimate of 0.5 or a decision score of 0.0. However, this default strategy may not be optimal for the task at hand. This example shows how to use the :class:`~sklearn.model_selection.TunedThresholdClassifierCV` to tune the decision threshold, depending on a metric of interest. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # The diabetes dataset # -------------------- # # To illustrate the tuning of the decision threshold, we will use the diabetes dataset. # This dataset is available on OpenML: https://www.openml.org/d/37. We use the # :func:`~sklearn.datasets.fetch_openml` function to fetch this dataset. from sklearn.datasets import fetch_openml diabetes = fetch_openml(data_id=37, as_frame=True, parser="pandas") data, target = diabetes.data, diabetes.target # %% # We look at the target to understand the type of problem we are dealing with. target.value_counts() # %% # We can see that we are dealing with a binary classification problem. Since the # labels are not encoded as 0 and 1, we make it explicit that we consider the class # labeled "tested_negative" as the negative class (which is also the most frequent) # and the class labeled "tested_positive" the positive as the positive class: neg_label, pos_label = target.value_counts().index # %% # We can also observe that this binary problem is slightly imbalanced where we have # around twice more samples from the negative class than from the positive class. When # it comes to evaluation, we should consider this aspect to interpret the results. # # Our vanilla classifier # ---------------------- # # We define a basic predictive model composed of a scaler followed by a logistic # regression classifier. from sklearn.linear_model import LogisticRegression from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler model = make_pipeline(StandardScaler(), LogisticRegression()) model # %% # We evaluate our model using cross-validation. We use the accuracy and the balanced # accuracy to report the performance of our model. The balanced accuracy is a metric # that is less sensitive to class imbalance and will allow us to put the accuracy # score in perspective. # # Cross-validation allows us to study the variance of the decision threshold across # different splits of the data. However, the dataset is rather small and it would be # detrimental to use more than 5 folds to evaluate the dispersion. Therefore, we use # a :class:`~sklearn.model_selection.RepeatedStratifiedKFold` where we apply several # repetitions of 5-fold cross-validation. import pandas as pd from sklearn.model_selection import RepeatedStratifiedKFold, cross_validate scoring = ["accuracy", "balanced_accuracy"] cv_scores = [ "train_accuracy", "test_accuracy", "train_balanced_accuracy", "test_balanced_accuracy", ] cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=10, random_state=42) cv_results_vanilla_model = pd.DataFrame( cross_validate( model, data, target, scoring=scoring, cv=cv, return_train_score=True, return_estimator=True, ) ) cv_results_vanilla_model[cv_scores].aggregate(["mean", "std"]).T # %% # Our predictive model succeeds to grasp the relationship between the data and the # target. The training and testing scores are close to each other, meaning that our # predictive model is not overfitting. We can also observe that the balanced accuracy is # lower than the accuracy, due to the class imbalance previously mentioned. # # For this classifier, we let the decision threshold, used convert the probability of # the positive class into a class prediction, to its default value: 0.5. However, this # threshold might not be optimal. If our interest is to maximize the balanced accuracy, # we should select another threshold that would maximize this metric. # # The :class:`~sklearn.model_selection.TunedThresholdClassifierCV` meta-estimator allows # to tune the decision threshold of a classifier given a metric of interest. # # Tuning the decision threshold # ----------------------------- # # We create a :class:`~sklearn.model_selection.TunedThresholdClassifierCV` and # configure it to maximize the balanced accuracy. We evaluate the model using the same # cross-validation strategy as previously. from sklearn.model_selection import TunedThresholdClassifierCV tuned_model = TunedThresholdClassifierCV(estimator=model, scoring="balanced_accuracy") cv_results_tuned_model = pd.DataFrame( cross_validate( tuned_model, data, target, scoring=scoring, cv=cv, return_train_score=True, return_estimator=True, ) ) cv_results_tuned_model[cv_scores].aggregate(["mean", "std"]).T # %% # In comparison with the vanilla model, we observe that the balanced accuracy score # increased. Of course, it comes at the cost of a lower accuracy score. It means that # our model is now more sensitive to the positive class but makes more mistakes on the # negative class. # # However, it is important to note that this tuned predictive model is internally the # same model as the vanilla model: they have the same fitted coefficients. import matplotlib.pyplot as plt vanilla_model_coef = pd.DataFrame( [est[-1].coef_.ravel() for est in cv_results_vanilla_model["estimator"]], columns=diabetes.feature_names, ) tuned_model_coef = pd.DataFrame( [est.estimator_[-1].coef_.ravel() for est in cv_results_tuned_model["estimator"]], columns=diabetes.feature_names, ) fig, ax = plt.subplots(ncols=2, figsize=(12, 4), sharex=True, sharey=True) vanilla_model_coef.boxplot(ax=ax[0]) ax[0].set_ylabel("Coefficient value") ax[0].set_title("Vanilla model") tuned_model_coef.boxplot(ax=ax[1]) ax[1].set_title("Tuned model") _ = fig.suptitle("Coefficients of the predictive models") # %% # Only the decision threshold of each model was changed during the cross-validation. decision_threshold = pd.Series( [est.best_threshold_ for est in cv_results_tuned_model["estimator"]], ) ax = decision_threshold.plot.kde() ax.axvline( decision_threshold.mean(), color="k", linestyle="--", label=f"Mean decision threshold: {decision_threshold.mean():.2f}", ) ax.set_xlabel("Decision threshold") ax.legend(loc="upper right") _ = ax.set_title( "Distribution of the decision threshold \nacross different cross-validation folds" ) # %% # In average, a decision threshold around 0.32 maximizes the balanced accuracy, which is # different from the default decision threshold of 0.5. Thus tuning the decision # threshold is particularly important when the output of the predictive model # is used to make decisions. Besides, the metric used to tune the decision threshold # should be chosen carefully. Here, we used the balanced accuracy but it might not be # the most appropriate metric for the problem at hand. The choice of the "right" metric # is usually problem-dependent and might require some domain knowledge. Refer to the # example entitled, # :ref:`sphx_glr_auto_examples_model_selection_plot_cost_sensitive_learning.py`, # for more details.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_randomized_search.py
examples/model_selection/plot_randomized_search.py
""" ========================================================================= Comparing randomized search and grid search for hyperparameter estimation ========================================================================= Compare randomized search and grid search for optimizing hyperparameters of a linear SVM with SGD training. All parameters that influence the learning are searched simultaneously (except for the number of estimators, which poses a time / quality tradeoff). The randomized search and the grid search explore exactly the same space of parameters. The result in parameter settings is quite similar, while the run time for randomized search is drastically lower. The performance is may slightly worse for the randomized search, and is likely due to a noise effect and would not carry over to a held-out test set. Note that in practice, one would not search over this many different parameters simultaneously using grid search, but pick only the ones deemed most important. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from time import time import numpy as np import scipy.stats as stats from sklearn.datasets import load_digits from sklearn.linear_model import SGDClassifier from sklearn.model_selection import GridSearchCV, RandomizedSearchCV # get some data X, y = load_digits(return_X_y=True, n_class=3) # build a classifier clf = SGDClassifier(loss="hinge", penalty="elasticnet", fit_intercept=True) # Utility function to report best scores def report(results, n_top=3): for i in range(1, n_top + 1): candidates = np.flatnonzero(results["rank_test_score"] == i) for candidate in candidates: print("Model with rank: {0}".format(i)) print( "Mean validation score: {0:.3f} (std: {1:.3f})".format( results["mean_test_score"][candidate], results["std_test_score"][candidate], ) ) print("Parameters: {0}".format(results["params"][candidate])) print("") # specify parameters and distributions to sample from param_dist = { "average": [True, False], "l1_ratio": stats.uniform(0, 1), "alpha": stats.loguniform(1e-2, 1e0), } # run randomized search n_iter_search = 15 random_search = RandomizedSearchCV( clf, param_distributions=param_dist, n_iter=n_iter_search ) start = time() random_search.fit(X, y) print( "RandomizedSearchCV took %.2f seconds for %d candidates parameter settings." % ((time() - start), n_iter_search) ) report(random_search.cv_results_) # use a full grid over all parameters param_grid = { "average": [True, False], "l1_ratio": np.linspace(0, 1, num=10), "alpha": np.power(10, np.arange(-2, 1, dtype=float)), } # run grid search grid_search = GridSearchCV(clf, param_grid=param_grid) start = time() grid_search.fit(X, y) print( "GridSearchCV took %.2f seconds for %d candidate parameter settings." % (time() - start, len(grid_search.cv_results_["params"])) ) report(grid_search.cv_results_)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_roc.py
examples/model_selection/plot_roc.py
""" ================================================== Multiclass Receiver Operating Characteristic (ROC) ================================================== This example describes the use of the Receiver Operating Characteristic (ROC) metric to evaluate the quality of multiclass classifiers. ROC curves typically feature true positive rate (TPR) on the Y axis, and false positive rate (FPR) on the X axis. This means that the top left corner of the plot is the "ideal" point - a FPR of zero, and a TPR of one. This is not very realistic, but it does mean that a larger area under the curve (AUC) is usually better. The "steepness" of ROC curves is also important, since it is ideal to maximize the TPR while minimizing the FPR. ROC curves are typically used in binary classification, where the TPR and FPR can be defined unambiguously. In the case of multiclass classification, a notion of TPR or FPR is obtained only after binarizing the output. This can be done in 2 different ways: - the One-vs-Rest scheme compares each class against all the others (assumed as one); - the One-vs-One scheme compares every unique pairwise combination of classes. In this example we explore both schemes and demo the concepts of micro and macro averaging as different ways of summarizing the information of the multiclass ROC curves. .. note:: See :ref:`sphx_glr_auto_examples_model_selection_plot_roc_crossval.py` for an extension of the present example estimating the variance of the ROC curves and their respective AUC. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Load and prepare data # ===================== # # We import the :ref:`iris_dataset` which contains 3 classes, each one # corresponding to a type of iris plant. One class is linearly separable from # the other 2; the latter are **not** linearly separable from each other. # # Here we binarize the output and add noisy features to make the problem harder. import numpy as np from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris() target_names = iris.target_names X, y = iris.data, iris.target y = iris.target_names[y] random_state = np.random.RandomState(0) n_samples, n_features = X.shape n_classes = len(np.unique(y)) X = np.concatenate([X, random_state.randn(n_samples, 200 * n_features)], axis=1) ( X_train, X_test, y_train, y_test, ) = train_test_split(X, y, test_size=0.5, stratify=y, random_state=0) # %% # We train a :class:`~sklearn.linear_model.LogisticRegression` model which can # naturally handle multiclass problems, thanks to the use of the multinomial # formulation. from sklearn.linear_model import LogisticRegression classifier = LogisticRegression() y_score = classifier.fit(X_train, y_train).predict_proba(X_test) # %% # One-vs-Rest multiclass ROC # ========================== # # The One-vs-the-Rest (OvR) multiclass strategy, also known as one-vs-all, # consists in computing a ROC curve per each of the `n_classes`. In each step, a # given class is regarded as the positive class and the remaining classes are # regarded as the negative class as a bulk. # # .. note:: One should not confuse the OvR strategy used for the **evaluation** # of multiclass classifiers with the OvR strategy used to **train** a # multiclass classifier by fitting a set of binary classifiers (for instance # via the :class:`~sklearn.multiclass.OneVsRestClassifier` meta-estimator). # The OvR ROC evaluation can be used to scrutinize any kind of classification # models irrespectively of how they were trained (see :ref:`multiclass`). # # In this section we use a :class:`~sklearn.preprocessing.LabelBinarizer` to # binarize the target by one-hot-encoding in a OvR fashion. This means that the # target of shape (`n_samples`,) is mapped to a target of shape (`n_samples`, # `n_classes`). from sklearn.preprocessing import LabelBinarizer label_binarizer = LabelBinarizer().fit(y_train) y_onehot_test = label_binarizer.transform(y_test) y_onehot_test.shape # (n_samples, n_classes) # %% # We can as well easily check the encoding of a specific class: label_binarizer.transform(["virginica"]) # %% # ROC curve showing a specific class # ---------------------------------- # # In the following plot we show the resulting ROC curve when regarding the iris # flowers as either "virginica" (`class_id=2`) or "non-virginica" (the rest). class_of_interest = "virginica" class_id = np.flatnonzero(label_binarizer.classes_ == class_of_interest)[0] class_id # %% import matplotlib.pyplot as plt from sklearn.metrics import RocCurveDisplay display = RocCurveDisplay.from_predictions( y_onehot_test[:, class_id], y_score[:, class_id], name=f"{class_of_interest} vs the rest", curve_kwargs=dict(color="darkorange"), plot_chance_level=True, despine=True, ) _ = display.ax_.set( xlabel="False Positive Rate", ylabel="True Positive Rate", title="One-vs-Rest ROC curves:\nVirginica vs (Setosa & Versicolor)", ) # %% # ROC curve using micro-averaged OvR # ---------------------------------- # # Micro-averaging aggregates the contributions from all the classes (using # :func:`numpy.ravel`) to compute the average metrics as follows: # # :math:`TPR=\frac{\sum_{c}TP_c}{\sum_{c}(TP_c + FN_c)}` ; # # :math:`FPR=\frac{\sum_{c}FP_c}{\sum_{c}(FP_c + TN_c)}` . # # We can briefly demo the effect of :func:`numpy.ravel`: print(f"y_score:\n{y_score[0:2, :]}") print() print(f"y_score.ravel():\n{y_score[0:2, :].ravel()}") # %% # In a multi-class classification setup with highly imbalanced classes, # micro-averaging is preferable over macro-averaging. In such cases, one can # alternatively use a weighted macro-averaging, not demonstrated here. display = RocCurveDisplay.from_predictions( y_onehot_test.ravel(), y_score.ravel(), name="micro-average OvR", curve_kwargs=dict(color="darkorange"), plot_chance_level=True, despine=True, ) _ = display.ax_.set( xlabel="False Positive Rate", ylabel="True Positive Rate", title="Micro-averaged One-vs-Rest\nReceiver Operating Characteristic", ) # %% # In the case where the main interest is not the plot but the ROC-AUC score # itself, we can reproduce the value shown in the plot using # :class:`~sklearn.metrics.roc_auc_score`. from sklearn.metrics import roc_auc_score micro_roc_auc_ovr = roc_auc_score( y_test, y_score, multi_class="ovr", average="micro", ) print(f"Micro-averaged One-vs-Rest ROC AUC score:\n{micro_roc_auc_ovr:.2f}") # %% # This is equivalent to computing the ROC curve with # :class:`~sklearn.metrics.roc_curve` and then the area under the curve with # :class:`~sklearn.metrics.auc` for the raveled true and predicted classes. from sklearn.metrics import auc, roc_curve # store the fpr, tpr, and roc_auc for all averaging strategies fpr, tpr, roc_auc = dict(), dict(), dict() # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve(y_onehot_test.ravel(), y_score.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) print(f"Micro-averaged One-vs-Rest ROC AUC score:\n{roc_auc['micro']:.2f}") # %% # .. note:: By default, the computation of the ROC curve adds a single point at # the maximal false positive rate by using linear interpolation and the # McClish correction [:doi:`Analyzing a portion of the ROC curve Med Decis # Making. 1989 Jul-Sep; 9(3):190-5.<10.1177/0272989x8900900307>`]. # # ROC curve using the OvR macro-average # ------------------------------------- # # Obtaining the macro-average requires computing the metric independently for # each class and then taking the average over them, hence treating all classes # equally a priori. We first aggregate the true/false positive rates per class: # # :math:`TPR=\frac{1}{C}\sum_{c}\frac{TP_c}{TP_c + FN_c}` ; # # :math:`FPR=\frac{1}{C}\sum_{c}\frac{FP_c}{FP_c + TN_c}` . # # where `C` is the total number of classes. for i in range(n_classes): fpr[i], tpr[i], _ = roc_curve(y_onehot_test[:, i], y_score[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) fpr_grid = np.linspace(0.0, 1.0, 1000) # Interpolate all ROC curves at these points mean_tpr = np.zeros_like(fpr_grid) for i in range(n_classes): mean_tpr += np.interp(fpr_grid, fpr[i], tpr[i]) # linear interpolation # Average it and compute AUC mean_tpr /= n_classes fpr["macro"] = fpr_grid tpr["macro"] = mean_tpr roc_auc["macro"] = auc(fpr["macro"], tpr["macro"]) print(f"Macro-averaged One-vs-Rest ROC AUC score:\n{roc_auc['macro']:.2f}") # %% # This computation is equivalent to simply calling macro_roc_auc_ovr = roc_auc_score( y_test, y_score, multi_class="ovr", average="macro", ) print(f"Macro-averaged One-vs-Rest ROC AUC score:\n{macro_roc_auc_ovr:.2f}") # %% # Plot all OvR ROC curves together # -------------------------------- from itertools import cycle fig, ax = plt.subplots(figsize=(6, 6)) plt.plot( fpr["micro"], tpr["micro"], label=f"micro-average ROC curve (AUC = {roc_auc['micro']:.2f})", color="deeppink", linestyle=":", linewidth=4, ) plt.plot( fpr["macro"], tpr["macro"], label=f"macro-average ROC curve (AUC = {roc_auc['macro']:.2f})", color="navy", linestyle=":", linewidth=4, ) colors = cycle(["aqua", "darkorange", "cornflowerblue"]) for class_id, color in zip(range(n_classes), colors): RocCurveDisplay.from_predictions( y_onehot_test[:, class_id], y_score[:, class_id], name=f"ROC curve for {target_names[class_id]}", curve_kwargs=dict(color=color), ax=ax, plot_chance_level=(class_id == 2), despine=True, ) _ = ax.set( xlabel="False Positive Rate", ylabel="True Positive Rate", title="Extension of Receiver Operating Characteristic\nto One-vs-Rest multiclass", ) # %% # One-vs-One multiclass ROC # ========================= # # The One-vs-One (OvO) multiclass strategy consists in fitting one classifier # per class pair. Since it requires to train `n_classes` * (`n_classes` - 1) / 2 # classifiers, this method is usually slower than One-vs-Rest due to its # O(`n_classes` ^2) complexity. # # In this section, we demonstrate the macro-averaged AUC using the OvO scheme # for the 3 possible combinations in the :ref:`iris_dataset`: "setosa" vs # "versicolor", "versicolor" vs "virginica" and "virginica" vs "setosa". Notice # that micro-averaging is not defined for the OvO scheme. # # ROC curve using the OvO macro-average # ------------------------------------- # # In the OvO scheme, the first step is to identify all possible unique # combinations of pairs. The computation of scores is done by treating one of # the elements in a given pair as the positive class and the other element as # the negative class, then re-computing the score by inversing the roles and # taking the mean of both scores. from itertools import combinations pair_list = list(combinations(np.unique(y), 2)) print(pair_list) # %% pair_scores = [] mean_tpr = dict() for ix, (label_a, label_b) in enumerate(pair_list): a_mask = y_test == label_a b_mask = y_test == label_b ab_mask = np.logical_or(a_mask, b_mask) a_true = a_mask[ab_mask] b_true = b_mask[ab_mask] idx_a = np.flatnonzero(label_binarizer.classes_ == label_a)[0] idx_b = np.flatnonzero(label_binarizer.classes_ == label_b)[0] fpr_a, tpr_a, _ = roc_curve(a_true, y_score[ab_mask, idx_a]) fpr_b, tpr_b, _ = roc_curve(b_true, y_score[ab_mask, idx_b]) mean_tpr[ix] = np.zeros_like(fpr_grid) mean_tpr[ix] += np.interp(fpr_grid, fpr_a, tpr_a) mean_tpr[ix] += np.interp(fpr_grid, fpr_b, tpr_b) mean_tpr[ix] /= 2 mean_score = auc(fpr_grid, mean_tpr[ix]) pair_scores.append(mean_score) fig, ax = plt.subplots(figsize=(6, 6)) plt.plot( fpr_grid, mean_tpr[ix], label=f"Mean {label_a} vs {label_b} (AUC = {mean_score:.2f})", linestyle=":", linewidth=4, ) RocCurveDisplay.from_predictions( a_true, y_score[ab_mask, idx_a], ax=ax, name=f"{label_a} as positive class", ) RocCurveDisplay.from_predictions( b_true, y_score[ab_mask, idx_b], ax=ax, name=f"{label_b} as positive class", plot_chance_level=True, despine=True, ) ax.set( xlabel="False Positive Rate", ylabel="True Positive Rate", title=f"{target_names[idx_a]} vs {label_b} ROC curves", ) print(f"Macro-averaged One-vs-One ROC AUC score:\n{np.average(pair_scores):.2f}") # %% # One can also assert that the macro-average we computed "by hand" is equivalent # to the implemented `average="macro"` option of the # :class:`~sklearn.metrics.roc_auc_score` function. macro_roc_auc_ovo = roc_auc_score( y_test, y_score, multi_class="ovo", average="macro", ) print(f"Macro-averaged One-vs-One ROC AUC score:\n{macro_roc_auc_ovo:.2f}") # %% # Plot all OvO ROC curves together # -------------------------------- ovo_tpr = np.zeros_like(fpr_grid) fig, ax = plt.subplots(figsize=(6, 6)) for ix, (label_a, label_b) in enumerate(pair_list): ovo_tpr += mean_tpr[ix] ax.plot( fpr_grid, mean_tpr[ix], label=f"Mean {label_a} vs {label_b} (AUC = {pair_scores[ix]:.2f})", ) ovo_tpr /= sum(1 for pair in enumerate(pair_list)) ax.plot( fpr_grid, ovo_tpr, label=f"One-vs-One macro-average (AUC = {macro_roc_auc_ovo:.2f})", linestyle=":", linewidth=4, ) ax.plot([0, 1], [0, 1], "k--", label="Chance level (AUC = 0.5)") _ = ax.set( xlabel="False Positive Rate", ylabel="True Positive Rate", title="Extension of Receiver Operating Characteristic\nto One-vs-One multiclass", aspect="equal", xlim=(-0.01, 1.01), ylim=(-0.01, 1.01), ) # %% # We confirm that the classes "versicolor" and "virginica" are not well # identified by a linear classifier. Notice that the "virginica"-vs-the-rest # ROC-AUC score (0.77) is between the OvO ROC-AUC scores for "versicolor" vs # "virginica" (0.64) and "setosa" vs "virginica" (0.90). Indeed, the OvO # strategy gives additional information on the confusion between a pair of # classes, at the expense of computational cost when the number of classes # is large. # # The OvO strategy is recommended if the user is mainly interested in correctly # identifying a particular class or subset of classes, whereas evaluating the # global performance of a classifier can still be summarized via a given # averaging strategy. # # When dealing with imbalanced datasets, choosing the appropriate metric based on # the business context or problem you are addressing is crucial. # It is also essential to select an appropriate averaging method (micro vs. macro) # depending on the desired outcome: # # - Micro-averaging aggregates metrics across all instances, treating each # individual instance equally, regardless of its class. This approach is useful # when evaluating overall performance, but note that it can be dominated by # the majority class in imbalanced datasets. # # - Macro-averaging calculates metrics for each class independently and then # averages them, giving equal weight to each class. This is particularly useful # when you want under-represented classes to be considered as important as highly # populated classes.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_train_error_vs_test_error.py
examples/model_selection/plot_train_error_vs_test_error.py
""" ========================================================= Effect of model regularization on training and test error ========================================================= In this example, we evaluate the impact of the regularization parameter in a linear model called :class:`~sklearn.linear_model.ElasticNet`. To carry out this evaluation, we use a validation curve using :class:`~sklearn.model_selection.ValidationCurveDisplay`. This curve shows the training and test scores of the model for different values of the regularization parameter. Once we identify the optimal regularization parameter, we compare the true and estimated coefficients of the model to determine if the model is able to recover the coefficients from the noisy input data. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate sample data # -------------------- # # We generate a regression dataset that contains many features relative to the # number of samples. However, only 10% of the features are informative. In this context, # linear models exposing L1 penalization are commonly used to recover a sparse # set of coefficients. from sklearn.datasets import make_regression from sklearn.model_selection import train_test_split n_samples_train, n_samples_test, n_features = 150, 300, 500 X, y, true_coef = make_regression( n_samples=n_samples_train + n_samples_test, n_features=n_features, n_informative=50, shuffle=False, noise=1.0, coef=True, random_state=42, ) X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=n_samples_train, test_size=n_samples_test, shuffle=False ) # %% # Model definition # ---------------- # # Here, we do not use a model that only exposes an L1 penalty. Instead, we use # an :class:`~sklearn.linear_model.ElasticNet` model that exposes both L1 and L2 # penalties. # # We fix the `l1_ratio` parameter such that the solution found by the model is still # sparse. Therefore, this type of model tries to find a sparse solution but at the same # time also tries to shrink all coefficients towards zero. # # In addition, we force the coefficients of the model to be positive since we know that # `make_regression` generates a response with a positive signal. So we use this # pre-knowledge to get a better model. from sklearn.linear_model import ElasticNet enet = ElasticNet(l1_ratio=0.9, positive=True, max_iter=10_000) # %% # Evaluate the impact of the regularization parameter # --------------------------------------------------- # # To evaluate the impact of the regularization parameter, we use a validation # curve. This curve shows the training and test scores of the model for different # values of the regularization parameter. # # The regularization `alpha` is a parameter applied to the coefficients of the model: # when it tends to zero, no regularization is applied and the model tries to fit the # training data with the least amount of error. However, it leads to overfitting when # features are noisy. When `alpha` increases, the model coefficients are constrained, # and thus the model cannot fit the training data as closely, avoiding overfitting. # However, if too much regularization is applied, the model underfits the data and # is not able to properly capture the signal. # # The validation curve helps in finding a good trade-off between both extremes: the # model is not regularized and thus flexible enough to fit the signal, but not too # flexible to overfit. The :class:`~sklearn.model_selection.ValidationCurveDisplay` # allows us to display the training and validation scores across a range of alpha # values. import numpy as np from sklearn.model_selection import ValidationCurveDisplay alphas = np.logspace(-5, 1, 60) disp = ValidationCurveDisplay.from_estimator( enet, X_train, y_train, param_name="alpha", param_range=alphas, scoring="r2", n_jobs=2, score_type="both", ) disp.ax_.set( title=r"Validation Curve for ElasticNet (R$^2$ Score)", xlabel=r"alpha (regularization strength)", ylabel="R$^2$ Score", ) test_scores_mean = disp.test_scores.mean(axis=1) idx_avg_max_test_score = np.argmax(test_scores_mean) disp.ax_.vlines( alphas[idx_avg_max_test_score], disp.ax_.get_ylim()[0], test_scores_mean[idx_avg_max_test_score], color="k", linewidth=2, linestyle="--", label=f"Optimum on test\n$\\alpha$ = {alphas[idx_avg_max_test_score]:.2e}", ) _ = disp.ax_.legend(loc="lower right") # %% # To find the optimal regularization parameter, we can select the value of `alpha` # that maximizes the validation score. # # Coefficients comparison # ----------------------- # # Now that we have identified the optimal regularization parameter, we can compare the # true coefficients and the estimated coefficients. # # First, let's set the regularization parameter to the optimal value and fit the # model on the training data. In addition, we'll show the test score for this model. enet.set_params(alpha=alphas[idx_avg_max_test_score]).fit(X_train, y_train) print( f"Test score: {enet.score(X_test, y_test):.3f}", ) # %% # Now, we plot the true coefficients and the estimated coefficients. import matplotlib.pyplot as plt fig, axs = plt.subplots(ncols=2, figsize=(12, 6), sharex=True, sharey=True) for ax, coef, title in zip(axs, [true_coef, enet.coef_], ["True", "Model"]): ax.stem(coef) ax.set( title=f"{title} Coefficients", xlabel="Feature Index", ylabel="Coefficient Value", ) fig.suptitle( "Comparison of the coefficients of the true generative model and \n" "the estimated elastic net coefficients" ) plt.show() # %% # While the original coefficients are sparse, the estimated coefficients are not # as sparse. The reason is that we fixed the `l1_ratio` parameter to 0.9. We could # force the model to get a sparser solution by increasing the `l1_ratio` parameter. # # However, we observed that for the estimated coefficients that are close to zero in # the true generative model, our model shrinks them towards zero. So we don't recover # the true coefficients, but we get a sensible outcome in line with the performance # obtained on the test set.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_learning_curve.py
examples/model_selection/plot_learning_curve.py
""" ========================================================= Plotting Learning Curves and Checking Models' Scalability ========================================================= In this example, we show how to use the class :class:`~sklearn.model_selection.LearningCurveDisplay` to easily plot learning curves. In addition, we give an interpretation to the learning curves obtained for a naive Bayes and SVM classifiers. Then, we explore and draw some conclusions about the scalability of these predictive models by looking at their computational cost and not only at their statistical accuracy. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Learning Curve # ============== # # Learning curves show the effect of adding more samples during the training # process. The effect is depicted by checking the statistical performance of # the model in terms of training score and testing score. # # Here, we compute the learning curve of a naive Bayes classifier and an SVM # classifier with an RBF kernel using the digits dataset. from sklearn.datasets import load_digits from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC X, y = load_digits(return_X_y=True) naive_bayes = GaussianNB() svc = SVC(kernel="rbf", gamma=0.001) # %% # The :meth:`~sklearn.model_selection.LearningCurveDisplay.from_estimator` # displays the learning curve given the dataset and the predictive model to # analyze. To get an estimate of the scores uncertainty, this method uses # a cross-validation procedure. import matplotlib.pyplot as plt import numpy as np from sklearn.model_selection import LearningCurveDisplay, ShuffleSplit fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 6), sharey=True) common_params = { "X": X, "y": y, "train_sizes": np.linspace(0.1, 1.0, 5), "cv": ShuffleSplit(n_splits=50, test_size=0.2, random_state=0), "score_type": "both", "n_jobs": 4, "line_kw": {"marker": "o"}, "std_display_style": "fill_between", "score_name": "Accuracy", } for ax_idx, estimator in enumerate([naive_bayes, svc]): LearningCurveDisplay.from_estimator(estimator, **common_params, ax=ax[ax_idx]) handles, label = ax[ax_idx].get_legend_handles_labels() ax[ax_idx].legend(handles[:2], ["Training Score", "Test Score"]) ax[ax_idx].set_title(f"Learning Curve for {estimator.__class__.__name__}") # %% # We first analyze the learning curve of the naive Bayes classifier. Its shape # can be found in more complex datasets very often: the training score is very # high when using few samples for training and decreases when increasing the # number of samples, whereas the test score is very low at the beginning and # then increases when adding samples. The training and test scores become more # realistic when all the samples are used for training. # # We see another typical learning curve for the SVM classifier with RBF kernel. # The training score remains high regardless of the size of the training set. # On the other hand, the test score increases with the size of the training # dataset. Indeed, it increases up to a point where it reaches a plateau. # Observing such a plateau is an indication that it might not be useful to # acquire new data to train the model since the generalization performance of # the model will not increase anymore. # # Complexity analysis # =================== # # In addition to these learning curves, it is also possible to look at the # scalability of the predictive models in terms of training and scoring times. # # The :class:`~sklearn.model_selection.LearningCurveDisplay` class does not # provide such information. We need to resort to the # :func:`~sklearn.model_selection.learning_curve` function instead and make # the plot manually. # %% from sklearn.model_selection import learning_curve common_params = { "X": X, "y": y, "train_sizes": np.linspace(0.1, 1.0, 5), "cv": ShuffleSplit(n_splits=50, test_size=0.2, random_state=0), "n_jobs": 4, "return_times": True, } train_sizes, _, test_scores_nb, fit_times_nb, score_times_nb = learning_curve( naive_bayes, **common_params ) train_sizes, _, test_scores_svm, fit_times_svm, score_times_svm = learning_curve( svc, **common_params ) # %% fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(16, 12), sharex=True) for ax_idx, (fit_times, score_times, estimator) in enumerate( zip( [fit_times_nb, fit_times_svm], [score_times_nb, score_times_svm], [naive_bayes, svc], ) ): # scalability regarding the fit time ax[0, ax_idx].plot(train_sizes, fit_times.mean(axis=1), "o-") ax[0, ax_idx].fill_between( train_sizes, fit_times.mean(axis=1) - fit_times.std(axis=1), fit_times.mean(axis=1) + fit_times.std(axis=1), alpha=0.3, ) ax[0, ax_idx].set_ylabel("Fit time (s)") ax[0, ax_idx].set_title( f"Scalability of the {estimator.__class__.__name__} classifier" ) # scalability regarding the score time ax[1, ax_idx].plot(train_sizes, score_times.mean(axis=1), "o-") ax[1, ax_idx].fill_between( train_sizes, score_times.mean(axis=1) - score_times.std(axis=1), score_times.mean(axis=1) + score_times.std(axis=1), alpha=0.3, ) ax[1, ax_idx].set_ylabel("Score time (s)") ax[1, ax_idx].set_xlabel("Number of training samples") # %% # We see that the scalability of the SVM and naive Bayes classifiers is very # different. The SVM classifier complexity at fit and score time increases # rapidly with the number of samples. Indeed, it is known that the fit time # complexity of this classifier is more than quadratic with the number of # samples which makes it hard to scale to dataset with more than a few # 10,000 samples. In contrast, the naive Bayes classifier scales much better # with a lower complexity at fit and score time. # # Subsequently, we can check the trade-off between increased training time and # the cross-validation score. # %% fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 6)) for ax_idx, (fit_times, test_scores, estimator) in enumerate( zip( [fit_times_nb, fit_times_svm], [test_scores_nb, test_scores_svm], [naive_bayes, svc], ) ): ax[ax_idx].plot(fit_times.mean(axis=1), test_scores.mean(axis=1), "o-") ax[ax_idx].fill_between( fit_times.mean(axis=1), test_scores.mean(axis=1) - test_scores.std(axis=1), test_scores.mean(axis=1) + test_scores.std(axis=1), alpha=0.3, ) ax[ax_idx].set_ylabel("Accuracy") ax[ax_idx].set_xlabel("Fit time (s)") ax[ax_idx].set_title( f"Performance of the {estimator.__class__.__name__} classifier" ) plt.show() # %% # In these plots, we can look for the inflection point for which the # cross-validation score does not increase anymore and only the training time # increases.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_grid_search_digits.py
examples/model_selection/plot_grid_search_digits.py
""" ============================================================ Custom refit strategy of a grid search with cross-validation ============================================================ This examples shows how a classifier is optimized by cross-validation, which is done using the :class:`~sklearn.model_selection.GridSearchCV` object on a development set that comprises only half of the available labeled data. The performance of the selected hyper-parameters and trained model is then measured on a dedicated evaluation set that was not used during the model selection step. More details on tools available for model selection can be found in the sections on :ref:`cross_validation` and :ref:`grid_search`. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # The dataset # ----------- # # We will work with the `digits` dataset. The goal is to classify handwritten # digits images. # We transform the problem into a binary classification for easier # understanding: the goal is to identify whether a digit is `8` or not. from sklearn import datasets digits = datasets.load_digits() # %% # In order to train a classifier on images, we need to flatten them into vectors. # Each image of 8 by 8 pixels needs to be transformed to a vector of 64 pixels. # Thus, we will get a final data array of shape `(n_images, n_pixels)`. n_samples = len(digits.images) X = digits.images.reshape((n_samples, -1)) y = digits.target == 8 print( f"The number of images is {X.shape[0]} and each image contains {X.shape[1]} pixels" ) # %% # As presented in the introduction, the data will be split into a training # and a testing set of equal size. from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0) # %% # Define our grid-search strategy # ------------------------------- # # We will select a classifier by searching the best hyper-parameters on folds # of the training set. To do this, we need to define # the scores to select the best candidate. scores = ["precision", "recall"] # %% # We can also define a function to be passed to the `refit` parameter of the # :class:`~sklearn.model_selection.GridSearchCV` instance. It will implement the # custom strategy to select the best candidate from the `cv_results_` attribute # of the :class:`~sklearn.model_selection.GridSearchCV`. Once the candidate is # selected, it is automatically refitted by the # :class:`~sklearn.model_selection.GridSearchCV` instance. # # Here, the strategy is to short-list the models which are the best in terms of # precision and recall. From the selected models, we finally select the fastest # model at predicting. Notice that these custom choices are completely # arbitrary. import pandas as pd def print_dataframe(filtered_cv_results): """Pretty print for filtered dataframe""" for mean_precision, std_precision, mean_recall, std_recall, params in zip( filtered_cv_results["mean_test_precision"], filtered_cv_results["std_test_precision"], filtered_cv_results["mean_test_recall"], filtered_cv_results["std_test_recall"], filtered_cv_results["params"], ): print( f"precision: {mean_precision:0.3f} (±{std_precision:0.03f})," f" recall: {mean_recall:0.3f} (±{std_recall:0.03f})," f" for {params}" ) print() def refit_strategy(cv_results): """Define the strategy to select the best estimator. The strategy defined here is to filter-out all results below a precision threshold of 0.98, rank the remaining by recall and keep all models with one standard deviation of the best by recall. Once these models are selected, we can select the fastest model to predict. Parameters ---------- cv_results : dict of numpy (masked) ndarrays CV results as returned by the `GridSearchCV`. Returns ------- best_index : int The index of the best estimator as it appears in `cv_results`. """ # print the info about the grid-search for the different scores precision_threshold = 0.98 cv_results_ = pd.DataFrame(cv_results) print("All grid-search results:") print_dataframe(cv_results_) # Filter-out all results below the threshold high_precision_cv_results = cv_results_[ cv_results_["mean_test_precision"] > precision_threshold ] print(f"Models with a precision higher than {precision_threshold}:") print_dataframe(high_precision_cv_results) high_precision_cv_results = high_precision_cv_results[ [ "mean_score_time", "mean_test_recall", "std_test_recall", "mean_test_precision", "std_test_precision", "rank_test_recall", "rank_test_precision", "params", ] ] # Select the most performant models in terms of recall # (within 1 sigma from the best) best_recall_std = high_precision_cv_results["mean_test_recall"].std() best_recall = high_precision_cv_results["mean_test_recall"].max() best_recall_threshold = best_recall - best_recall_std high_recall_cv_results = high_precision_cv_results[ high_precision_cv_results["mean_test_recall"] > best_recall_threshold ] print( "Out of the previously selected high precision models, we keep all the\n" "the models within one standard deviation of the highest recall model:" ) print_dataframe(high_recall_cv_results) # From the best candidates, select the fastest model to predict fastest_top_recall_high_precision_index = high_recall_cv_results[ "mean_score_time" ].idxmin() print( "\nThe selected final model is the fastest to predict out of the previously\n" "selected subset of best models based on precision and recall.\n" "Its scoring time is:\n\n" f"{high_recall_cv_results.loc[fastest_top_recall_high_precision_index]}" ) return fastest_top_recall_high_precision_index # %% # # Tuning hyper-parameters # ----------------------- # # Once we defined our strategy to select the best model, we define the values # of the hyper-parameters and create the grid-search instance: from sklearn.model_selection import GridSearchCV from sklearn.svm import SVC tuned_parameters = [ {"kernel": ["rbf"], "gamma": [1e-3, 1e-4], "C": [1, 10, 100, 1000]}, {"kernel": ["linear"], "C": [1, 10, 100, 1000]}, ] grid_search = GridSearchCV( SVC(), tuned_parameters, scoring=scores, refit=refit_strategy ) grid_search.fit(X_train, y_train) # %% # # The parameters selected by the grid-search with our custom strategy are: grid_search.best_params_ # %% # # Finally, we evaluate the fine-tuned model on the left-out evaluation set: the # `grid_search` object **has automatically been refit** on the full training # set with the parameters selected by our custom refit strategy. # # We can use the classification report to compute standard classification # metrics on the left-out set: from sklearn.metrics import classification_report y_pred = grid_search.predict(X_test) print(classification_report(y_test, y_pred)) # %% # .. note:: # The problem is too easy: the hyperparameter plateau is too flat and the # output model is the same for precision and recall with ties in quality.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_cost_sensitive_learning.py
examples/model_selection/plot_cost_sensitive_learning.py
""" ============================================================== Post-tuning the decision threshold for cost-sensitive learning ============================================================== Once a classifier is trained, the output of the :term:`predict` method outputs class label predictions corresponding to a thresholding of either the :term:`decision_function` or the :term:`predict_proba` output. For a binary classifier, the default threshold is defined as a posterior probability estimate of 0.5 or a decision score of 0.0. However, this default strategy is most likely not optimal for the task at hand. Here, we use the "Statlog" German credit dataset [1]_ to illustrate a use case. In this dataset, the task is to predict whether a person has a "good" or "bad" credit. In addition, a cost-matrix is provided that specifies the cost of misclassification. Specifically, misclassifying a "bad" credit as "good" is five times more costly on average than misclassifying a "good" credit as "bad". We use the :class:`~sklearn.model_selection.TunedThresholdClassifierCV` to select the cut-off point of the decision function that minimizes the provided business cost. In the second part of the example, we further extend this approach by considering the problem of fraud detection in credit card transactions: in this case, the business metric depends on the amount of each individual transaction. .. rubric :: References .. [1] "Statlog (German Credit Data) Data Set", UCI Machine Learning Repository, `Link <https://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29>`_. .. [2] `Charles Elkan, "The Foundations of Cost-Sensitive Learning", International joint conference on artificial intelligence. Vol. 17. No. 1. Lawrence Erlbaum Associates Ltd, 2001. <https://cseweb.ucsd.edu/~elkan/rescale.pdf>`_ """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Cost-sensitive learning with constant gains and costs # ----------------------------------------------------- # # In this first section, we illustrate the use of the # :class:`~sklearn.model_selection.TunedThresholdClassifierCV` in a setting of # cost-sensitive learning when the gains and costs associated to each entry of the # confusion matrix are constant. We use the problematic presented in [2]_ using the # "Statlog" German credit dataset [1]_. # # "Statlog" German credit dataset # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # We fetch the German credit dataset from OpenML. import sklearn from sklearn.datasets import fetch_openml sklearn.set_config(transform_output="pandas") german_credit = fetch_openml(data_id=31, as_frame=True, parser="pandas") X, y = german_credit.data, german_credit.target # %% # We check the feature types available in `X`. X.info() # %% # Many features are categorical and usually string-encoded. We need to encode # these categories when we develop our predictive model. Let's check the targets. y.value_counts() # %% # Another observation is that the dataset is imbalanced. We would need to be careful # when evaluating our predictive model and use a family of metrics that are adapted # to this setting. # # In addition, we observe that the target is string-encoded. Some metrics # (e.g. precision and recall) require to provide the label of interest also called # the "positive label". Here, we define that our goal is to predict whether or not # a sample is a "bad" credit. pos_label, neg_label = "bad", "good" # %% # To carry our analysis, we split our dataset using a single stratified split. from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) # %% # We are ready to design our predictive model and the associated evaluation strategy. # # Evaluation metrics # ^^^^^^^^^^^^^^^^^^ # # In this section, we define a set of metrics that we use later. To see # the effect of tuning the cut-off point, we evaluate the predictive model using # the Receiver Operating Characteristic (ROC) curve and the Precision-Recall curve. # The values reported on these plots are therefore the true positive rate (TPR), # also known as the recall or the sensitivity, and the false positive rate (FPR), # also known as the specificity, for the ROC curve and the precision and recall for # the Precision-Recall curve. # # From these four metrics, scikit-learn does not provide a scorer for the FPR. We # therefore need to define a small custom function to compute it. from sklearn.metrics import confusion_matrix def fpr_score(y, y_pred, neg_label, pos_label): cm = confusion_matrix(y, y_pred, labels=[neg_label, pos_label]) tn, fp, _, _ = cm.ravel() tnr = tn / (tn + fp) return 1 - tnr # %% # As previously stated, the "positive label" is not defined as the value "1" and calling # some of the metrics with this non-standard value raise an error. We need to # provide the indication of the "positive label" to the metrics. # # We therefore need to define a scikit-learn scorer using # :func:`~sklearn.metrics.make_scorer` where the information is passed. We store all # the custom scorers in a dictionary. To use them, we need to pass the fitted model, # the data and the target on which we want to evaluate the predictive model. from sklearn.metrics import make_scorer, precision_score, recall_score tpr_score = recall_score # TPR and recall are the same metric scoring = { "precision": make_scorer(precision_score, pos_label=pos_label), "recall": make_scorer(recall_score, pos_label=pos_label), "fpr": make_scorer(fpr_score, neg_label=neg_label, pos_label=pos_label), "tpr": make_scorer(tpr_score, pos_label=pos_label), } # %% # In addition, the original research [1]_ defines a custom business metric. We # call a "business metric" any metric function that aims at quantifying how the # predictions (correct or wrong) might impact the business value of deploying a # given machine learning model in a specific application context. For our # credit prediction task, the authors provide a custom cost-matrix which # encodes that classifying a "bad" credit as "good" is 5 times more costly on # average than the opposite: it is less costly for the financing institution to # not grant a credit to a potential customer that will not default (and # therefore miss a good customer that would have otherwise both reimbursed the # credit and paid interests) than to grant a credit to a customer that will # default. # # We define a python function that weighs the confusion matrix and returns the # overall cost. # The rows of the confusion matrix hold the counts of observed classes # while the columns hold counts of predicted classes. Recall that here we # consider "bad" as the positive class (second row and column). # Scikit-learn model selection tools expect that we follow a convention # that "higher" means "better", hence the following gain matrix assigns # negative gains (costs) to the two kinds of prediction errors: # # - a gain of `-1` for each false positive ("good" credit labeled as "bad"), # - a gain of `-5` for each false negative ("bad" credit labeled as "good"), # - a `0` gain for true positives and true negatives. # # Note that theoretically, given that our model is calibrated and our data # set representative and large enough, we do not need to tune the # threshold, but can safely set it to 1/5 of the cost ratio, as stated by # Eq. (2) in Elkan's paper [2]_. import numpy as np def credit_gain_score(y, y_pred, neg_label, pos_label): cm = confusion_matrix(y, y_pred, labels=[neg_label, pos_label]) gain_matrix = np.array( [ [0, -1], # -1 gain for false positives [-5, 0], # -5 gain for false negatives ] ) return np.sum(cm * gain_matrix) scoring["credit_gain"] = make_scorer( credit_gain_score, neg_label=neg_label, pos_label=pos_label ) # %% # Vanilla predictive model # ^^^^^^^^^^^^^^^^^^^^^^^^ # # We use :class:`~sklearn.ensemble.HistGradientBoostingClassifier` as a predictive model # that natively handles categorical features and missing values. from sklearn.ensemble import HistGradientBoostingClassifier model = HistGradientBoostingClassifier( categorical_features="from_dtype", random_state=0 ).fit(X_train, y_train) model # %% # We evaluate the performance of our predictive model using the ROC and Precision-Recall # curves. import matplotlib.pyplot as plt from sklearn.metrics import PrecisionRecallDisplay, RocCurveDisplay fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(14, 6)) PrecisionRecallDisplay.from_estimator( model, X_test, y_test, pos_label=pos_label, ax=axs[0], name="GBDT" ) axs[0].plot( scoring["recall"](model, X_test, y_test), scoring["precision"](model, X_test, y_test), marker="o", markersize=10, color="tab:blue", label="Default cut-off point at a probability of 0.5", ) axs[0].set_title("Precision-Recall curve") axs[0].legend() RocCurveDisplay.from_estimator( model, X_test, y_test, pos_label=pos_label, ax=axs[1], name="GBDT", plot_chance_level=True, ) axs[1].plot( scoring["fpr"](model, X_test, y_test), scoring["tpr"](model, X_test, y_test), marker="o", markersize=10, color="tab:blue", label="Default cut-off point at a probability of 0.5", ) axs[1].set_title("ROC curve") axs[1].legend() _ = fig.suptitle("Evaluation of the vanilla GBDT model") # %% # We recall that these curves give insights on the statistical performance of the # predictive model for different cut-off points. For the Precision-Recall curve, the # reported metrics are the precision and recall and for the ROC curve, the reported # metrics are the TPR (same as recall) and FPR. # # Here, the different cut-off points correspond to different levels of posterior # probability estimates ranging between 0 and 1. By default, `model.predict` uses a # cut-off point at a probability estimate of 0.5. The metrics for such a cut-off point # are reported with the blue dot on the curves: it corresponds to the statistical # performance of the model when using `model.predict`. # # However, we recall that the original aim was to minimize the cost (or maximize the # gain) as defined by the business metric. We can compute the value of the business # metric: print(f"Business defined metric: {scoring['credit_gain'](model, X_test, y_test)}") # %% # At this stage we don't know if any other cut-off can lead to a greater gain. To find # the optimal one, we need to compute the cost-gain using the business metric for all # possible cut-off points and choose the best. This strategy can be quite tedious to # implement by hand, but the # :class:`~sklearn.model_selection.TunedThresholdClassifierCV` class is here to help us. # It automatically computes the cost-gain for all possible cut-off points and optimizes # for the `scoring`. # # .. _cost_sensitive_learning_example: # # Tuning the cut-off point # ^^^^^^^^^^^^^^^^^^^^^^^^ # # We use :class:`~sklearn.model_selection.TunedThresholdClassifierCV` to tune the # cut-off point. We need to provide the business metric to optimize as well as the # positive label. Internally, the optimum cut-off point is chosen such that it maximizes # the business metric via cross-validation. By default a 5-fold stratified # cross-validation is used. from sklearn.model_selection import TunedThresholdClassifierCV tuned_model = TunedThresholdClassifierCV( estimator=model, scoring=scoring["credit_gain"], store_cv_results=True, # necessary to inspect all results ) tuned_model.fit(X_train, y_train) print(f"{tuned_model.best_threshold_=:0.2f}") # %% # We plot the ROC and Precision-Recall curves for the vanilla model and the tuned model. # Also we plot the cut-off points that would be used by each model. Because, we are # reusing the same code later, we define a function that generates the plots. def plot_roc_pr_curves(vanilla_model, tuned_model, *, title): fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(21, 6)) linestyles = ("dashed", "dotted") markerstyles = ("o", ">") colors = ("tab:blue", "tab:orange") names = ("Vanilla GBDT", "Tuned GBDT") for idx, (est, linestyle, marker, color, name) in enumerate( zip((vanilla_model, tuned_model), linestyles, markerstyles, colors, names) ): decision_threshold = getattr(est, "best_threshold_", 0.5) PrecisionRecallDisplay.from_estimator( est, X_test, y_test, pos_label=pos_label, linestyle=linestyle, color=color, ax=axs[0], name=name, ) axs[0].plot( scoring["recall"](est, X_test, y_test), scoring["precision"](est, X_test, y_test), marker, markersize=10, color=color, label=f"Cut-off point at probability of {decision_threshold:.2f}", ) RocCurveDisplay.from_estimator( est, X_test, y_test, pos_label=pos_label, curve_kwargs=dict(linestyle=linestyle, color=color), ax=axs[1], name=name, plot_chance_level=idx == 1, ) axs[1].plot( scoring["fpr"](est, X_test, y_test), scoring["tpr"](est, X_test, y_test), marker, markersize=10, color=color, label=f"Cut-off point at probability of {decision_threshold:.2f}", ) axs[0].set_title("Precision-Recall curve") axs[0].legend() axs[1].set_title("ROC curve") axs[1].legend() axs[2].plot( tuned_model.cv_results_["thresholds"], tuned_model.cv_results_["scores"], color="tab:orange", ) axs[2].plot( tuned_model.best_threshold_, tuned_model.best_score_, "o", markersize=10, color="tab:orange", label="Optimal cut-off point for the business metric", ) axs[2].legend() axs[2].set_xlabel("Decision threshold (probability)") axs[2].set_ylabel("Objective score (using cost-matrix)") axs[2].set_title("Objective score as a function of the decision threshold") fig.suptitle(title) # %% title = "Comparison of the cut-off point for the vanilla and tuned GBDT model" plot_roc_pr_curves(model, tuned_model, title=title) # %% # The first remark is that both classifiers have exactly the same ROC and # Precision-Recall curves. It is expected because by default, the classifier is fitted # on the same training data. In a later section, we discuss more in detail the # available options regarding model refitting and cross-validation. # # The second remark is that the cut-off points of the vanilla and tuned model are # different. To understand why the tuned model has chosen this cut-off point, we can # look at the right-hand side plot that plots the objective score that is our exactly # the same as our business metric. We see that the optimum threshold corresponds to the # maximum of the objective score. This maximum is reached for a decision threshold # much lower than 0.5: the tuned model enjoys a much higher recall at the cost of # of significantly lower precision: the tuned model is much more eager to # predict the "bad" class label to larger fraction of individuals. # # We can now check if choosing this cut-off point leads to a better score on the testing # set: print(f"Business defined metric: {scoring['credit_gain'](tuned_model, X_test, y_test)}") # %% # We observe that tuning the decision threshold almost improves our business gains # by factor of 2. # # .. _TunedThresholdClassifierCV_no_cv: # # Consideration regarding model refitting and cross-validation # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # In the above experiment, we used the default setting of the # :class:`~sklearn.model_selection.TunedThresholdClassifierCV`. In particular, the # cut-off point is tuned using a 5-fold stratified cross-validation. Also, the # underlying predictive model is refitted on the entire training data once the cut-off # point is chosen. # # These two strategies can be changed by providing the `refit` and `cv` parameters. # For instance, one could provide a fitted `estimator` and set `cv="prefit"`, in which # case the cut-off point is found on the entire dataset provided at fitting time. # Also, the underlying classifier is not be refitted by setting `refit=False`. Here, we # can try to do such experiment. model.fit(X_train, y_train) tuned_model.set_params(cv="prefit", refit=False).fit(X_train, y_train) print(f"{tuned_model.best_threshold_=:0.2f}") # %% # Then, we evaluate our model with the same approach as before: title = "Tuned GBDT model without refitting and using the entire dataset" plot_roc_pr_curves(model, tuned_model, title=title) # %% # We observe the that the optimum cut-off point is different from the one found # in the previous experiment. If we look at the right-hand side plot, we # observe that the business gain has large plateau of near-optimal 0 gain for a # large span of decision thresholds. This behavior is symptomatic of an # overfitting. Because we disable cross-validation, we tuned the cut-off point # on the same set as the model was trained on, and this is the reason for the # observed overfitting. # # This option should therefore be used with caution. One needs to make sure that the # data provided at fitting time to the # :class:`~sklearn.model_selection.TunedThresholdClassifierCV` is not the same as the # data used to train the underlying classifier. This could happen sometimes when the # idea is just to tune the predictive model on a completely new validation set without a # costly complete refit. # # When cross-validation is too costly, a potential alternative is to use a # single train-test split by providing a floating number in range `[0, 1]` to the `cv` # parameter. It splits the data into a training and testing set. Let's explore this # option: tuned_model.set_params(cv=0.75).fit(X_train, y_train) # %% title = "Tuned GBDT model without refitting and using the entire dataset" plot_roc_pr_curves(model, tuned_model, title=title) # %% # Regarding the cut-off point, we observe that the optimum is similar to the multiple # repeated cross-validation case. However, be aware that a single split does not account # for the variability of the fit/predict process and thus we are unable to know if there # is any variance in the cut-off point. The repeated cross-validation averages out # this effect. # # Another observation concerns the ROC and Precision-Recall curves of the tuned model. # As expected, these curves differ from those of the vanilla model, given that we # trained the underlying classifier on a subset of the data provided during fitting and # reserved a validation set for tuning the cut-off point. # # Cost-sensitive learning when gains and costs are not constant # ------------------------------------------------------------- # # As stated in [2]_, gains and costs are generally not constant in real-world problems. # In this section, we use a similar example as in [2]_ for the problem of # detecting fraud in credit card transaction records. # # The credit card dataset # ^^^^^^^^^^^^^^^^^^^^^^^ credit_card = fetch_openml(data_id=1597, as_frame=True, parser="pandas") credit_card.frame.info() # %% # The dataset contains information about credit card records from which some are # fraudulent and others are legitimate. The goal is therefore to predict whether or # not a credit card record is fraudulent. columns_to_drop = ["Class"] data = credit_card.frame.drop(columns=columns_to_drop) target = credit_card.frame["Class"].astype(int) # %% # First, we check the class distribution of the datasets. target.value_counts(normalize=True) # %% # The dataset is highly imbalanced with fraudulent transaction representing only 0.17% # of the data. Since we are interested in training a machine learning model, we should # also make sure that we have enough samples in the minority class to train the model. target.value_counts() # %% # We observe that we have around 500 samples that is on the low end of the number of # samples required to train a machine learning model. In addition of the target # distribution, we check the distribution of the amount of the # fraudulent transactions. fraud = target == 1 amount_fraud = data["Amount"][fraud] _, ax = plt.subplots() ax.hist(amount_fraud, bins=30) ax.set_title("Amount of fraud transaction") _ = ax.set_xlabel("Amount (€)") # %% # Addressing the problem with a business metric # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # Now, we create the business metric that depends on the amount of each transaction. We # define the cost matrix similarly to [2]_. Accepting a legitimate transaction provides # a gain of 2% of the amount of the transaction. However, accepting a fraudulent # transaction result in a loss of the amount of the transaction. As stated in [2]_, the # gain and loss related to refusals (of fraudulent and legitimate transactions) are not # trivial to define. Here, we define that a refusal of a legitimate transaction # is estimated to a loss of 5€ while the refusal of a fraudulent transaction is # estimated to a gain of 50€. Therefore, we define the following function to # compute the total benefit of a given decision: def business_metric(y_true, y_pred, amount): mask_true_positive = (y_true == 1) & (y_pred == 1) mask_true_negative = (y_true == 0) & (y_pred == 0) mask_false_positive = (y_true == 0) & (y_pred == 1) mask_false_negative = (y_true == 1) & (y_pred == 0) fraudulent_refuse = mask_true_positive.sum() * 50 fraudulent_accept = -amount[mask_false_negative].sum() legitimate_refuse = mask_false_positive.sum() * -5 legitimate_accept = (amount[mask_true_negative] * 0.02).sum() return fraudulent_refuse + fraudulent_accept + legitimate_refuse + legitimate_accept # %% # From this business metric, we create a scikit-learn scorer that given a fitted # classifier and a test set compute the business metric. In this regard, we use # the :func:`~sklearn.metrics.make_scorer` factory. The variable `amount` is an # additional metadata to be passed to the scorer and we need to use # :ref:`metadata routing <metadata_routing>` to take into account this information. sklearn.set_config(enable_metadata_routing=True) business_scorer = make_scorer(business_metric).set_score_request(amount=True) # %% # So at this stage, we observe that the amount of the transaction is used twice: once # as a feature to train our predictive model and once as a metadata to compute the # the business metric and thus the statistical performance of our model. When used as a # feature, we are only required to have a column in `data` that contains the amount of # each transaction. To use this information as metadata, we need to have an external # variable that we can pass to the scorer or the model that internally routes this # metadata to the scorer. So let's create this variable. amount = credit_card.frame["Amount"].to_numpy() # %% from sklearn.model_selection import train_test_split data_train, data_test, target_train, target_test, amount_train, amount_test = ( train_test_split( data, target, amount, stratify=target, test_size=0.5, random_state=42 ) ) # %% # We first evaluate some baseline policies to serve as reference. Recall that # class "0" is the legitimate class and class "1" is the fraudulent class. from sklearn.dummy import DummyClassifier always_accept_policy = DummyClassifier(strategy="constant", constant=0) always_accept_policy.fit(data_train, target_train) benefit = business_scorer( always_accept_policy, data_test, target_test, amount=amount_test ) print(f"Benefit of the 'always accept' policy: {benefit:,.2f}€") # %% # A policy that considers all transactions as legitimate would create a profit of # around 220,000€. We make the same evaluation for a classifier that predicts all # transactions as fraudulent. always_reject_policy = DummyClassifier(strategy="constant", constant=1) always_reject_policy.fit(data_train, target_train) benefit = business_scorer( always_reject_policy, data_test, target_test, amount=amount_test ) print(f"Benefit of the 'always reject' policy: {benefit:,.2f}€") # %% # Such a policy would entail a catastrophic loss: around 670,000€. This is # expected since the vast majority of the transactions are legitimate and the # policy would refuse them at a non-trivial cost. # # A predictive model that adapts the accept/reject decisions on a per # transaction basis should ideally allow us to make a profit larger than the # 220,000€ of the best of our constant baseline policies. # # We start with a logistic regression model with the default decision threshold # at 0.5. Here we tune the hyperparameter `C` of the logistic regression with a # proper scoring rule (the log loss) to ensure that the model's probabilistic # predictions returned by its `predict_proba` method are as accurate as # possible, irrespectively of the choice of the value of the decision # threshold. from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler logistic_regression = make_pipeline(StandardScaler(), LogisticRegression()) param_grid = {"logisticregression__C": np.logspace(-6, 6, 13)} model = GridSearchCV(logistic_regression, param_grid, scoring="neg_log_loss").fit( data_train, target_train ) model # %% print( "Benefit of logistic regression with default threshold: " f"{business_scorer(model, data_test, target_test, amount=amount_test):,.2f}€" ) # %% # The business metric shows that our predictive model with a default decision # threshold is already winning over the baseline in terms of profit and it would be # already beneficial to use it to accept or reject transactions instead of # accepting all transactions. # # Tuning the decision threshold # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # Now the question is: is our model optimum for the type of decision that we want to do? # Up to now, we did not optimize the decision threshold. We use the # :class:`~sklearn.model_selection.TunedThresholdClassifierCV` to optimize the decision # given our business scorer. To avoid a nested cross-validation, we will use the # best estimator found during the previous grid-search. tuned_model = TunedThresholdClassifierCV( estimator=model.best_estimator_, scoring=business_scorer, thresholds=100, n_jobs=2, ) # %% # Since our business scorer requires the amount of each transaction, we need to pass # this information in the `fit` method. The # :class:`~sklearn.model_selection.TunedThresholdClassifierCV` is in charge of # automatically dispatching this metadata to the underlying scorer. tuned_model.fit(data_train, target_train, amount=amount_train) # %% # We observe that the tuned decision threshold is far away from the default 0.5: print(f"Tuned decision threshold: {tuned_model.best_threshold_:.2f}") # %% print( "Benefit of logistic regression with a tuned threshold: " f"{business_scorer(tuned_model, data_test, target_test, amount=amount_test):,.2f}€" ) # %% # We observe that tuning the decision threshold increases the expected profit # when deploying our model - as indicated by the business metric. It is therefore # valuable, whenever possible, to optimize the decision threshold with respect # to the business metric. # # Manually setting the decision threshold instead of tuning it # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # In the previous example, we used the # :class:`~sklearn.model_selection.TunedThresholdClassifierCV` to find the optimal # decision threshold. However, in some cases, we might have some prior knowledge about # the problem at hand and we might be happy to set the decision threshold manually. # # The class :class:`~sklearn.model_selection.FixedThresholdClassifier` allows us to # manually set the decision threshold. At prediction time, it behave as the previous # tuned model but no search is performed during the fitting process. Note that here # we use :class:`~sklearn.frozen.FrozenEstimator` to wrap the predictive model to # avoid any refitting. # # Here, we will reuse the decision threshold found in the previous section to create a # new model and check that it gives the same results. from sklearn.frozen import FrozenEstimator from sklearn.model_selection import FixedThresholdClassifier model_fixed_threshold = FixedThresholdClassifier( estimator=FrozenEstimator(model), threshold=tuned_model.best_threshold_ ) # %% business_score = business_scorer( model_fixed_threshold, data_test, target_test, amount=amount_test ) print(f"Benefit of logistic regression with a tuned threshold: {business_score:,.2f}€") # %% # We observe that we obtained the exact same results but the fitting process # was much faster since we did not perform any hyper-parameter search. # # Finally, the estimate of the (average) business metric itself can be unreliable, in # particular when the number of data points in the minority class is very small. # Any business impact estimated by cross-validation of a business metric on # historical data (offline evaluation) should ideally be confirmed by A/B testing # on live data (online evaluation). Note however that A/B testing models is # beyond the scope of the scikit-learn library itself. # # At the end, we disable the configuration flag for metadata routing:: sklearn.set_config(enable_metadata_routing=False)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_grid_search_text_feature_extraction.py
examples/model_selection/plot_grid_search_text_feature_extraction.py
""" ========================================================== Sample pipeline for text feature extraction and evaluation ========================================================== The dataset used in this example is :ref:`20newsgroups_dataset` which will be automatically downloaded, cached and reused for the document classification example. In this example, we tune the hyperparameters of a particular classifier using a :class:`~sklearn.model_selection.RandomizedSearchCV`. For a demo on the performance of some other classifiers, see the :ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py` notebook. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Data loading # ------------ # We load two categories from the training set. You can adjust the number of # categories by adding their names to the list or setting `categories=None` when # calling the dataset loader :func:`~sklearn.datasets.fetch_20newsgroups` to get # the 20 of them. from sklearn.datasets import fetch_20newsgroups categories = [ "alt.atheism", "talk.religion.misc", ] data_train = fetch_20newsgroups( subset="train", categories=categories, shuffle=True, random_state=42, remove=("headers", "footers", "quotes"), ) data_test = fetch_20newsgroups( subset="test", categories=categories, shuffle=True, random_state=42, remove=("headers", "footers", "quotes"), ) print(f"Loading 20 newsgroups dataset for {len(data_train.target_names)} categories:") print(data_train.target_names) print(f"{len(data_train.data)} documents") # %% # Pipeline with hyperparameter tuning # ----------------------------------- # # We define a pipeline combining a text feature vectorizer with a simple # classifier yet effective for text classification. from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.naive_bayes import ComplementNB from sklearn.pipeline import Pipeline pipeline = Pipeline( [ ("vect", TfidfVectorizer()), ("clf", ComplementNB()), ] ) pipeline # %% # We define a grid of hyperparameters to be explored by the # :class:`~sklearn.model_selection.RandomizedSearchCV`. Using a # :class:`~sklearn.model_selection.GridSearchCV` instead would explore all the # possible combinations on the grid, which can be costly to compute, whereas the # parameter `n_iter` of the :class:`~sklearn.model_selection.RandomizedSearchCV` # controls the number of different random combination that are evaluated. Notice # that setting `n_iter` larger than the number of possible combinations in a # grid would lead to repeating already-explored combinations. We search for the # best parameter combination for both the feature extraction (`vect__`) and the # classifier (`clf__`). import numpy as np parameter_grid = { "vect__max_df": (0.2, 0.4, 0.6, 0.8, 1.0), "vect__min_df": (1, 3, 5, 10), "vect__ngram_range": ((1, 1), (1, 2)), # unigrams or bigrams "vect__norm": ("l1", "l2"), "clf__alpha": np.logspace(-6, 6, 13), } # %% # In this case `n_iter=40` is not an exhaustive search of the hyperparameters' # grid. In practice it would be interesting to increase the parameter `n_iter` # to get a more informative analysis. As a consequence, the computional time # increases. We can reduce it by taking advantage of the parallelisation over # the parameter combinations evaluation by increasing the number of CPUs used # via the parameter `n_jobs`. from pprint import pprint from sklearn.model_selection import RandomizedSearchCV random_search = RandomizedSearchCV( estimator=pipeline, param_distributions=parameter_grid, n_iter=40, random_state=0, n_jobs=2, verbose=1, ) print("Performing grid search...") print("Hyperparameters to be evaluated:") pprint(parameter_grid) # %% from time import time t0 = time() random_search.fit(data_train.data, data_train.target) print(f"Done in {time() - t0:.3f}s") # %% print("Best parameters combination found:") best_parameters = random_search.best_estimator_.get_params() for param_name in sorted(parameter_grid.keys()): print(f"{param_name}: {best_parameters[param_name]}") # %% test_accuracy = random_search.score(data_test.data, data_test.target) print( "Accuracy of the best parameters using the inner CV of " f"the random search: {random_search.best_score_:.3f}" ) print(f"Accuracy on test set: {test_accuracy:.3f}") # %% # The prefixes `vect` and `clf` are required to avoid possible ambiguities in # the pipeline, but are not necessary for visualizing the results. Because of # this, we define a function that will rename the tuned hyperparameters and # improve the readability. import pandas as pd def shorten_param(param_name): """Remove components' prefixes in param_name.""" if "__" in param_name: return param_name.rsplit("__", 1)[1] return param_name cv_results = pd.DataFrame(random_search.cv_results_) cv_results = cv_results.rename(shorten_param, axis=1) # %% # We can use a `plotly.express.scatter # <https://plotly.com/python-api-reference/generated/plotly.express.scatter.html>`_ # to visualize the trade-off between scoring time and mean test score (i.e. "CV # score"). Passing the cursor over a given point displays the corresponding # parameters. Error bars correspond to one standard deviation as computed in the # different folds of the cross-validation. import plotly.express as px param_names = [shorten_param(name) for name in parameter_grid.keys()] labels = { "mean_score_time": "CV Score time (s)", "mean_test_score": "CV score (accuracy)", } fig = px.scatter( cv_results, x="mean_score_time", y="mean_test_score", error_x="std_score_time", error_y="std_test_score", hover_data=param_names, labels=labels, ) fig.update_layout( title={ "text": "trade-off between scoring time and mean test score", "y": 0.95, "x": 0.5, "xanchor": "center", "yanchor": "top", } ) fig # %% # Notice that the cluster of models in the upper-left corner of the plot have # the best trade-off between accuracy and scoring time. In this case, using # bigrams increases the required scoring time without improving considerably the # accuracy of the pipeline. # # .. note:: For more information on how to customize an automated tuning to # maximize score and minimize scoring time, see the example notebook # :ref:`sphx_glr_auto_examples_model_selection_plot_grid_search_digits.py`. # # We can also use a `plotly.express.parallel_coordinates # <https://plotly.com/python-api-reference/generated/plotly.express.parallel_coordinates.html>`_ # to further visualize the mean test score as a function of the tuned # hyperparameters. This helps finding interactions between more than two # hyperparameters and provide intuition on their relevance for improving the # performance of a pipeline. # # We apply a `math.log10` transformation on the `alpha` axis to spread the # active range and improve the readability of the plot. A value :math:`x` on # said axis is to be understood as :math:`10^x`. import math column_results = param_names + ["mean_test_score", "mean_score_time"] transform_funcs = dict.fromkeys(column_results, lambda x: x) # Using a logarithmic scale for alpha transform_funcs["alpha"] = math.log10 # L1 norms are mapped to index 1, and L2 norms to index 2 transform_funcs["norm"] = lambda x: 2 if x == "l2" else 1 # Unigrams are mapped to index 1 and bigrams to index 2 transform_funcs["ngram_range"] = lambda x: x[1] fig = px.parallel_coordinates( cv_results[column_results].apply(transform_funcs), color="mean_test_score", color_continuous_scale=px.colors.sequential.Viridis_r, labels=labels, ) fig.update_layout( title={ "text": "Parallel coordinates plot of text classifier pipeline", "y": 0.99, "x": 0.5, "xanchor": "center", "yanchor": "top", } ) fig # %% # The parallel coordinates plot displays the values of the hyperparameters on # different columns while the performance metric is color coded. It is possible # to select a range of results by clicking and holding on any axis of the # parallel coordinate plot. You can then slide (move) the range selection and # cross two selections to see the intersections. You can undo a selection by # clicking once again on the same axis. # # In particular for this hyperparameter search, it is interesting to notice that # the top performing models do not seem to depend on the regularization `norm`, # but they do depend on a trade-off between `max_df`, `min_df` and the # regularization strength `alpha`. The reason is that including noisy features # (i.e. `max_df` close to :math:`1.0` or `min_df` close to :math:`0`) tend to # overfit and therefore require a stronger regularization to compensate. Having # less features require less regularization and less scoring time. # # The best accuracy scores are obtained when `alpha` is between :math:`10^{-6}` # and :math:`10^0`, regardless of the hyperparameter `norm`.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_successive_halving_heatmap.py
examples/model_selection/plot_successive_halving_heatmap.py
""" Comparison between grid search and successive halving ===================================================== This example compares the parameter search performed by :class:`~sklearn.model_selection.HalvingGridSearchCV` and :class:`~sklearn.model_selection.GridSearchCV`. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from time import time import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn import datasets from sklearn.experimental import enable_halving_search_cv # noqa: F401 from sklearn.model_selection import GridSearchCV, HalvingGridSearchCV from sklearn.svm import SVC # %% # We first define the parameter space for an :class:`~sklearn.svm.SVC` # estimator, and compute the time required to train a # :class:`~sklearn.model_selection.HalvingGridSearchCV` instance, as well as a # :class:`~sklearn.model_selection.GridSearchCV` instance. rng = np.random.RandomState(0) X, y = datasets.make_classification(n_samples=1000, random_state=rng) gammas = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7] Cs = [1, 10, 100, 1e3, 1e4, 1e5] param_grid = {"gamma": gammas, "C": Cs} clf = SVC(random_state=rng) tic = time() gsh = HalvingGridSearchCV( estimator=clf, param_grid=param_grid, factor=2, random_state=rng ) gsh.fit(X, y) gsh_time = time() - tic tic = time() gs = GridSearchCV(estimator=clf, param_grid=param_grid) gs.fit(X, y) gs_time = time() - tic # %% # We now plot heatmaps for both search estimators. def make_heatmap(ax, gs, is_sh=False, make_cbar=False): """Helper to make a heatmap.""" results = pd.DataFrame(gs.cv_results_) results[["param_C", "param_gamma"]] = results[["param_C", "param_gamma"]].astype( np.float64 ) if is_sh: # SH dataframe: get mean_test_score values for the highest iter scores_matrix = results.sort_values("iter").pivot_table( index="param_gamma", columns="param_C", values="mean_test_score", aggfunc="last", ) else: scores_matrix = results.pivot( index="param_gamma", columns="param_C", values="mean_test_score" ) im = ax.imshow(scores_matrix) ax.set_xticks(np.arange(len(Cs))) ax.set_xticklabels(["{:.0E}".format(x) for x in Cs]) ax.set_xlabel("C", fontsize=15) ax.set_yticks(np.arange(len(gammas))) ax.set_yticklabels(["{:.0E}".format(x) for x in gammas]) ax.set_ylabel("gamma", fontsize=15) # Rotate the tick labels and set their alignment. plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") if is_sh: iterations = results.pivot_table( index="param_gamma", columns="param_C", values="iter", aggfunc="max" ).values for i in range(len(gammas)): for j in range(len(Cs)): ax.text( j, i, iterations[i, j], ha="center", va="center", color="w", fontsize=20, ) if make_cbar: fig.subplots_adjust(right=0.8) cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7]) fig.colorbar(im, cax=cbar_ax) cbar_ax.set_ylabel("mean_test_score", rotation=-90, va="bottom", fontsize=15) fig, axes = plt.subplots(ncols=2, sharey=True) ax1, ax2 = axes make_heatmap(ax1, gsh, is_sh=True) make_heatmap(ax2, gs, make_cbar=True) ax1.set_title("Successive Halving\ntime = {:.3f}s".format(gsh_time), fontsize=15) ax2.set_title("GridSearch\ntime = {:.3f}s".format(gs_time), fontsize=15) plt.show() # %% # The heatmaps show the mean test score of the parameter combinations for an # :class:`~sklearn.svm.SVC` instance. The # :class:`~sklearn.model_selection.HalvingGridSearchCV` also shows the # iteration at which the combinations where last used. The combinations marked # as ``0`` were only evaluated at the first iteration, while the ones with # ``5`` are the parameter combinations that are considered the best ones. # # We can see that the :class:`~sklearn.model_selection.HalvingGridSearchCV` # class is able to find parameter combinations that are just as accurate as # :class:`~sklearn.model_selection.GridSearchCV`, in much less time.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_nested_cross_validation_iris.py
examples/model_selection/plot_nested_cross_validation_iris.py
""" ========================================= Nested versus non-nested cross-validation ========================================= This example compares non-nested and nested cross-validation strategies on a classifier of the iris data set. Nested cross-validation (CV) is often used to train a model in which hyperparameters also need to be optimized. Nested CV estimates the generalization error of the underlying model and its (hyper)parameter search. Choosing the parameters that maximize non-nested CV biases the model to the dataset, yielding an overly-optimistic score. Model selection without nested CV uses the same data to tune model parameters and evaluate model performance. Information may thus "leak" into the model and overfit the data. The magnitude of this effect is primarily dependent on the size of the dataset and the stability of the model. See Cawley and Talbot [1]_ for an analysis of these issues. To avoid this problem, nested CV effectively uses a series of train/validation/test set splits. In the inner loop (here executed by :class:`GridSearchCV <sklearn.model_selection.GridSearchCV>`), the score is approximately maximized by fitting a model to each training set, and then directly maximized in selecting (hyper)parameters over the validation set. In the outer loop (here in :func:`cross_val_score <sklearn.model_selection.cross_val_score>`), generalization error is estimated by averaging test set scores over several dataset splits. The example below uses a support vector classifier with a non-linear kernel to build a model with optimized hyperparameters by grid search. We compare the performance of non-nested and nested CV strategies by taking the difference between their scores. .. seealso:: - :ref:`cross_validation` - :ref:`grid_search` .. rubric:: References .. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and subsequent selection bias in performance evaluation. J. Mach. Learn. Res 2010,11, 2079-2107. <http://jmlr.csail.mit.edu/papers/volume11/cawley10a/cawley10a.pdf>`_ """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.model_selection import GridSearchCV, KFold, cross_val_score from sklearn.svm import SVC # Number of random trials NUM_TRIALS = 30 # Load the dataset iris = load_iris() X_iris = iris.data y_iris = iris.target # Set up possible values of parameters to optimize over p_grid = {"C": [1, 10, 100], "gamma": [0.01, 0.1]} # We will use a Support Vector Classifier with "rbf" kernel svm = SVC(kernel="rbf") # Arrays to store scores non_nested_scores = np.zeros(NUM_TRIALS) nested_scores = np.zeros(NUM_TRIALS) # Loop for each trial for i in range(NUM_TRIALS): # Choose cross-validation techniques for the inner and outer loops, # independently of the dataset. # E.g "GroupKFold", "LeaveOneOut", "LeaveOneGroupOut", etc. inner_cv = KFold(n_splits=4, shuffle=True, random_state=i) outer_cv = KFold(n_splits=4, shuffle=True, random_state=i) # Non_nested parameter search and scoring clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=outer_cv) clf.fit(X_iris, y_iris) non_nested_scores[i] = clf.best_score_ # Nested CV with parameter optimization clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=inner_cv) nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv) nested_scores[i] = nested_score.mean() score_difference = non_nested_scores - nested_scores print( "Average difference of {:6f} with std. dev. of {:6f}.".format( score_difference.mean(), score_difference.std() ) ) # Plot scores on each trial for nested and non-nested CV plt.figure() plt.subplot(211) (non_nested_scores_line,) = plt.plot(non_nested_scores, color="r") (nested_line,) = plt.plot(nested_scores, color="b") plt.ylabel("score", fontsize="14") plt.legend( [non_nested_scores_line, nested_line], ["Non-Nested CV", "Nested CV"], bbox_to_anchor=(0, 0.4, 0.5, 0), ) plt.title( "Non-Nested and Nested Cross Validation on Iris Dataset", x=0.5, y=1.1, fontsize="15", ) # Plot bar chart of the difference. plt.subplot(212) difference_plot = plt.bar(range(NUM_TRIALS), score_difference) plt.xlabel("Individual Trial #") plt.legend( [difference_plot], ["Non-Nested CV - Nested CV Score"], bbox_to_anchor=(0, 1, 0.8, 0), ) plt.ylabel("score difference", fontsize="14") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_multi_metric_evaluation.py
examples/model_selection/plot_multi_metric_evaluation.py
""" ============================================================================ Demonstration of multi-metric evaluation on cross_val_score and GridSearchCV ============================================================================ Multiple metric parameter search can be done by setting the ``scoring`` parameter to a list of metric scorer names or a dict mapping the scorer names to the scorer callables. The scores of all the scorers are available in the ``cv_results_`` dict at keys ending in ``'_<scorer_name>'`` (``'mean_test_precision'``, ``'rank_test_precision'``, etc...) The ``best_estimator_``, ``best_index_``, ``best_score_`` and ``best_params_`` correspond to the scorer (key) that is set to the ``refit`` attribute. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import make_hastie_10_2 from sklearn.metrics import accuracy_score, make_scorer from sklearn.model_selection import GridSearchCV from sklearn.tree import DecisionTreeClassifier # %% # Running ``GridSearchCV`` using multiple evaluation metrics # ---------------------------------------------------------- # X, y = make_hastie_10_2(n_samples=8000, random_state=42) # The scorers can be either one of the predefined metric strings or a scorer # callable, like the one returned by make_scorer scoring = {"AUC": "roc_auc", "Accuracy": make_scorer(accuracy_score)} # Setting refit='AUC', refits an estimator on the whole dataset with the # parameter setting that has the best cross-validated AUC score. # That estimator is made available at ``gs.best_estimator_`` along with # parameters like ``gs.best_score_``, ``gs.best_params_`` and # ``gs.best_index_`` gs = GridSearchCV( DecisionTreeClassifier(random_state=42), param_grid={"min_samples_split": range(2, 403, 20)}, scoring=scoring, refit="AUC", n_jobs=2, return_train_score=True, ) gs.fit(X, y) results = gs.cv_results_ # %% # Plotting the result # ------------------- plt.figure(figsize=(13, 13)) plt.title("GridSearchCV evaluating using multiple scorers simultaneously", fontsize=16) plt.xlabel("min_samples_split") plt.ylabel("Score") ax = plt.gca() ax.set_xlim(0, 402) ax.set_ylim(0.73, 1) # Get the regular numpy array from the MaskedArray X_axis = np.array(results["param_min_samples_split"].data, dtype=float) for scorer, color in zip(sorted(scoring), ["g", "k"]): for sample, style in (("train", "--"), ("test", "-")): sample_score_mean = results["mean_%s_%s" % (sample, scorer)] sample_score_std = results["std_%s_%s" % (sample, scorer)] ax.fill_between( X_axis, sample_score_mean - sample_score_std, sample_score_mean + sample_score_std, alpha=0.1 if sample == "test" else 0, color=color, ) ax.plot( X_axis, sample_score_mean, style, color=color, alpha=1 if sample == "test" else 0.7, label="%s (%s)" % (scorer, sample), ) best_index = np.nonzero(results["rank_test_%s" % scorer] == 1)[0][0] best_score = results["mean_test_%s" % scorer][best_index] # Plot a dotted vertical line at the best score for that scorer marked by x ax.plot( [ X_axis[best_index], ] * 2, [0, best_score], linestyle="-.", color=color, marker="x", markeredgewidth=3, ms=8, ) # Annotate the best score for that scorer ax.annotate("%0.2f" % best_score, (X_axis[best_index], best_score + 0.005)) plt.legend(loc="best") plt.grid(False) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_underfitting_overfitting.py
examples/model_selection/plot_underfitting_overfitting.py
""" ============================ Underfitting vs. Overfitting ============================ This example demonstrates the problems of underfitting and overfitting and how we can use linear regression with polynomial features to approximate nonlinear functions. The plot shows the function that we want to approximate, which is a part of the cosine function. In addition, the samples from the real function and the approximations of different models are displayed. The models have polynomial features of different degrees. We can see that a linear function (polynomial with degree 1) is not sufficient to fit the training samples. This is called **underfitting**. A polynomial of degree 4 approximates the true function almost perfectly. However, for higher degrees the model will **overfit** the training data, i.e. it learns the noise of the training data. We evaluate quantitatively **overfitting** / **underfitting** by using cross-validation. We calculate the mean squared error (MSE) on the validation set, the higher, the less likely the model generalizes correctly from the training data. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures def true_fun(X): return np.cos(1.5 * np.pi * X) np.random.seed(0) n_samples = 30 degrees = [1, 4, 15] X = np.sort(np.random.rand(n_samples)) y = true_fun(X) + np.random.randn(n_samples) * 0.1 plt.figure(figsize=(14, 5)) for i in range(len(degrees)): ax = plt.subplot(1, len(degrees), i + 1) plt.setp(ax, xticks=(), yticks=()) polynomial_features = PolynomialFeatures(degree=degrees[i], include_bias=False) linear_regression = LinearRegression() pipeline = Pipeline( [ ("polynomial_features", polynomial_features), ("linear_regression", linear_regression), ] ) pipeline.fit(X[:, np.newaxis], y) # Evaluate the models using crossvalidation scores = cross_val_score( pipeline, X[:, np.newaxis], y, scoring="neg_mean_squared_error", cv=10 ) X_test = np.linspace(0, 1, 100) plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model") plt.plot(X_test, true_fun(X_test), label="True function") plt.scatter(X, y, edgecolor="b", s=20, label="Samples") plt.xlabel("x") plt.ylabel("y") plt.xlim((0, 1)) plt.ylim((-2, 2)) plt.legend(loc="best") plt.title( "Degree {}\nMSE = {:.2e}(+/- {:.2e})".format( degrees[i], -scores.mean(), scores.std() ) ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/model_selection/plot_precision_recall.py
examples/model_selection/plot_precision_recall.py
""" ================ Precision-Recall ================ Example of Precision-Recall metric to evaluate classifier output quality. Precision-Recall is a useful measure of success of prediction when the classes are very imbalanced. In information retrieval, precision is a measure of the fraction of relevant items among actually returned items while recall is a measure of the fraction of items that were returned among all items that should have been returned. 'Relevancy' here refers to items that are positively labeled, i.e., true positives and false negatives. Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`) over the number of true positives plus the number of false positives (:math:`F_p`). .. math:: P = \\frac{T_p}{T_p+F_p} Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`) over the number of true positives plus the number of false negatives (:math:`F_n`). .. math:: R = \\frac{T_p}{T_p + F_n} The precision-recall curve shows the tradeoff between precision and recall for different thresholds. A high area under the curve represents both high recall and high precision. High precision is achieved by having few false positives in the returned results, and high recall is achieved by having few false negatives in the relevant results. High scores for both show that the classifier is returning accurate results (high precision), as well as returning a majority of all relevant results (high recall). A system with high recall but low precision returns most of the relevant items, but the proportion of returned results that are incorrectly labeled is high. A system with high precision but low recall is just the opposite, returning very few of the relevant items, but most of its predicted labels are correct when compared to the actual labels. An ideal system with high precision and high recall will return most of the relevant items, with most results labeled correctly. The definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering the threshold of a classifier may increase the denominator, by increasing the number of results returned. If the threshold was previously set too high, the new results may all be true positives, which will increase precision. If the previous threshold was about right or too low, further lowering the threshold will introduce false positives, decreasing precision. Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does not depend on the classifier threshold. Changing the classifier threshold can only change the numerator, :math:`T_p`. Lowering the classifier threshold may increase recall, by increasing the number of true positive results. It is also possible that lowering the threshold may leave recall unchanged, while the precision fluctuates. Thus, precision does not necessarily decrease with recall. The relationship between recall and precision can be observed in the stairstep area of the plot - at the edges of these steps a small change in the threshold considerably reduces precision, with only a minor gain in recall. **Average precision** (AP) summarizes such a plot as the weighted mean of precisions achieved at each threshold, with the increase in recall from the previous threshold used as the weight: :math:`\\text{AP} = \\sum_n (R_n - R_{n-1}) P_n` where :math:`P_n` and :math:`R_n` are the precision and recall at the nth threshold. A pair :math:`(R_k, P_k)` is referred to as an *operating point*. AP and the trapezoidal area under the operating points (:func:`sklearn.metrics.auc`) are common ways to summarize a precision-recall curve that lead to different results. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Precision-recall curves are typically used in binary classification to study the output of a classifier. In order to extend the precision-recall curve and average precision to multi-class or multi-label classification, it is necessary to binarize the output. One curve can be drawn per label, but one can also draw a precision-recall curve by considering each element of the label indicator matrix as a binary prediction (:ref:`micro-averaging <average>`). .. note:: See also :func:`sklearn.metrics.average_precision_score`, :func:`sklearn.metrics.recall_score`, :func:`sklearn.metrics.precision_score`, :func:`sklearn.metrics.f1_score` """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # In binary classification settings # --------------------------------- # # Dataset and model # ................. # # We will use a Linear SVC classifier to differentiate two types of irises. import numpy as np from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split X, y = load_iris(return_X_y=True) # Add noisy features random_state = np.random.RandomState(0) n_samples, n_features = X.shape X = np.concatenate([X, random_state.randn(n_samples, 200 * n_features)], axis=1) # Limit to the two first classes, and split into training and test X_train, X_test, y_train, y_test = train_test_split( X[y < 2], y[y < 2], test_size=0.5, random_state=random_state ) # %% # Linear SVC will expect each feature to have a similar range of values. Thus, # we will first scale the data using a # :class:`~sklearn.preprocessing.StandardScaler`. from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import LinearSVC classifier = make_pipeline(StandardScaler(), LinearSVC(random_state=random_state)) classifier.fit(X_train, y_train) # %% # Plot the Precision-Recall curve # ............................... # # To plot the precision-recall curve, you should use # :class:`~sklearn.metrics.PrecisionRecallDisplay`. Indeed, there is two # methods available depending if you already computed the predictions of the # classifier or not. # # Let's first plot the precision-recall curve without the classifier # predictions. We use # :func:`~sklearn.metrics.PrecisionRecallDisplay.from_estimator` that # computes the predictions for us before plotting the curve. from sklearn.metrics import PrecisionRecallDisplay display = PrecisionRecallDisplay.from_estimator( classifier, X_test, y_test, name="LinearSVC", plot_chance_level=True, despine=True ) _ = display.ax_.set_title("2-class Precision-Recall curve") # %% # If we already got the estimated probabilities or scores for # our model, then we can use # :func:`~sklearn.metrics.PrecisionRecallDisplay.from_predictions`. y_score = classifier.decision_function(X_test) display = PrecisionRecallDisplay.from_predictions( y_test, y_score, name="LinearSVC", plot_chance_level=True, despine=True ) _ = display.ax_.set_title("2-class Precision-Recall curve") # %% # In multi-label settings # ----------------------- # # The precision-recall curve does not support the multilabel setting. However, # one can decide how to handle this case. We show such an example below. # # Create multi-label data, fit, and predict # ......................................... # # We create a multi-label dataset, to illustrate the precision-recall in # multi-label settings. from sklearn.preprocessing import label_binarize # Use label_binarize to be multi-label like settings Y = label_binarize(y, classes=[0, 1, 2]) n_classes = Y.shape[1] # Split into training and test X_train, X_test, Y_train, Y_test = train_test_split( X, Y, test_size=0.5, random_state=random_state ) # %% # We use :class:`~sklearn.multiclass.OneVsRestClassifier` for multi-label # prediction. from sklearn.multiclass import OneVsRestClassifier classifier = OneVsRestClassifier( make_pipeline(StandardScaler(), LinearSVC(random_state=random_state)) ) classifier.fit(X_train, Y_train) y_score = classifier.decision_function(X_test) # %% # The average precision score in multi-label settings # ................................................... from sklearn.metrics import average_precision_score, precision_recall_curve # For each class precision = dict() recall = dict() average_precision = dict() for i in range(n_classes): precision[i], recall[i], _ = precision_recall_curve(Y_test[:, i], y_score[:, i]) average_precision[i] = average_precision_score(Y_test[:, i], y_score[:, i]) # A "micro-average": quantifying score on all classes jointly precision["micro"], recall["micro"], _ = precision_recall_curve( Y_test.ravel(), y_score.ravel() ) average_precision["micro"] = average_precision_score(Y_test, y_score, average="micro") # %% # Plot the micro-averaged Precision-Recall curve # .............................................. from collections import Counter display = PrecisionRecallDisplay( recall=recall["micro"], precision=precision["micro"], average_precision=average_precision["micro"], prevalence_pos_label=Counter(Y_test.ravel())[1] / Y_test.size, ) display.plot(plot_chance_level=True, despine=True) _ = display.ax_.set_title("Micro-averaged over all classes") # %% # Plot Precision-Recall curve for each class and iso-f1 curves # ............................................................ from itertools import cycle import matplotlib.pyplot as plt # setup plot details colors = cycle(["navy", "turquoise", "darkorange", "cornflowerblue", "teal"]) _, ax = plt.subplots(figsize=(7, 8)) f_scores = np.linspace(0.2, 0.8, num=4) lines, labels = [], [] for f_score in f_scores: x = np.linspace(0.01, 1) y = f_score * x / (2 * x - f_score) (l,) = plt.plot(x[y >= 0], y[y >= 0], color="gray", alpha=0.2) plt.annotate("f1={0:0.1f}".format(f_score), xy=(0.9, y[45] + 0.02)) display = PrecisionRecallDisplay( recall=recall["micro"], precision=precision["micro"], average_precision=average_precision["micro"], ) display.plot(ax=ax, name="Micro-average precision-recall", color="gold") for i, color in zip(range(n_classes), colors): display = PrecisionRecallDisplay( recall=recall[i], precision=precision[i], average_precision=average_precision[i], ) display.plot( ax=ax, name=f"Precision-recall for class {i}", color=color, despine=True ) # add the legend for the iso-f1 curves handles, labels = display.ax_.get_legend_handles_labels() handles.extend([l]) labels.extend(["iso-f1 curves"]) # set the legend and the axes ax.legend(handles=handles, labels=labels, loc="best") ax.set_title("Extension of Precision-Recall curve to multi-class") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_forest_hist_grad_boosting_comparison.py
examples/ensemble/plot_forest_hist_grad_boosting_comparison.py
""" =============================================================== Comparing Random Forests and Histogram Gradient Boosting models =============================================================== In this example we compare the performance of Random Forest (RF) and Histogram Gradient Boosting (HGBT) models in terms of score and computation time for a regression dataset, though **all the concepts here presented apply to classification as well**. The comparison is made by varying the parameters that control the number of trees according to each estimator: - `n_estimators` controls the number of trees in the forest. It's a fixed number. - `max_iter` is the maximum number of iterations in a gradient boosting based model. The number of iterations corresponds to the number of trees for regression and binary classification problems. Furthermore, the actual number of trees required by the model depends on the stopping criteria. HGBT uses gradient boosting to iteratively improve the model's performance by fitting each tree to the negative gradient of the loss function with respect to the predicted value. RFs, on the other hand, are based on bagging and use a majority vote to predict the outcome. See the :ref:`User Guide <ensemble>` for more information on ensemble models or see :ref:`sphx_glr_auto_examples_ensemble_plot_hgbt_regression.py` for an example showcasing some other features of HGBT models. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Load dataset # ------------ from sklearn.datasets import fetch_california_housing X, y = fetch_california_housing(return_X_y=True, as_frame=True) n_samples, n_features = X.shape # %% # HGBT uses a histogram-based algorithm on binned feature values that can # efficiently handle large datasets (tens of thousands of samples or more) with # a high number of features (see :ref:`Why_it's_faster`). The scikit-learn # implementation of RF does not use binning and relies on exact splitting, which # can be computationally expensive. print(f"The dataset consists of {n_samples} samples and {n_features} features") # %% # Compute score and computation times # ----------------------------------- # # Notice that many parts of the implementation of # :class:`~sklearn.ensemble.HistGradientBoostingClassifier` and # :class:`~sklearn.ensemble.HistGradientBoostingRegressor` are parallelized by # default. # # The implementation of :class:`~sklearn.ensemble.RandomForestRegressor` and # :class:`~sklearn.ensemble.RandomForestClassifier` can also be run on multiple # cores by using the `n_jobs` parameter, here set to match the number of # physical cores on the host machine. See :ref:`parallelism` for more # information. import joblib N_CORES = joblib.cpu_count(only_physical_cores=True) print(f"Number of physical cores: {N_CORES}") # %% # Unlike RF, HGBT models offer an early-stopping option (see # :ref:`sphx_glr_auto_examples_ensemble_plot_gradient_boosting_early_stopping.py`) # to avoid adding new unnecessary trees. Internally, the algorithm uses an # out-of-sample set to compute the generalization performance of the model at # each addition of a tree. Thus, if the generalization performance is not # improving for more than `n_iter_no_change` iterations, it stops adding trees. # # The other parameters of both models were tuned but the procedure is not shown # here to keep the example simple. import pandas as pd from sklearn.ensemble import HistGradientBoostingRegressor, RandomForestRegressor from sklearn.model_selection import GridSearchCV, KFold models = { "Random Forest": RandomForestRegressor( min_samples_leaf=5, random_state=0, n_jobs=N_CORES ), "Hist Gradient Boosting": HistGradientBoostingRegressor( max_leaf_nodes=15, random_state=0, early_stopping=False ), } param_grids = { "Random Forest": {"n_estimators": [10, 20, 50, 100]}, "Hist Gradient Boosting": {"max_iter": [10, 20, 50, 100, 300, 500]}, } cv = KFold(n_splits=4, shuffle=True, random_state=0) results = [] for name, model in models.items(): grid_search = GridSearchCV( estimator=model, param_grid=param_grids[name], return_train_score=True, cv=cv, ).fit(X, y) result = {"model": name, "cv_results": pd.DataFrame(grid_search.cv_results_)} results.append(result) # %% # .. Note:: # Tuning the `n_estimators` for RF generally results in a waste of computer # power. In practice one just needs to ensure that it is large enough so that # doubling its value does not lead to a significant improvement of the testing # score. # # Plot results # ------------ # We can use a `plotly.express.scatter # <https://plotly.com/python-api-reference/generated/plotly.express.scatter.html>`_ # to visualize the trade-off between elapsed computing time and mean test score. # Passing the cursor over a given point displays the corresponding parameters. # Error bars correspond to one standard deviation as computed in the different # folds of the cross-validation. import plotly.colors as colors import plotly.express as px from plotly.subplots import make_subplots fig = make_subplots( rows=1, cols=2, shared_yaxes=True, subplot_titles=["Train time vs score", "Predict time vs score"], ) model_names = [result["model"] for result in results] colors_list = colors.qualitative.Plotly * ( len(model_names) // len(colors.qualitative.Plotly) + 1 ) for idx, result in enumerate(results): cv_results = result["cv_results"].round(3) model_name = result["model"] param_name = next(iter(param_grids[model_name].keys())) cv_results[param_name] = cv_results["param_" + param_name] cv_results["model"] = model_name scatter_fig = px.scatter( cv_results, x="mean_fit_time", y="mean_test_score", error_x="std_fit_time", error_y="std_test_score", hover_data=param_name, color="model", ) line_fig = px.line( cv_results, x="mean_fit_time", y="mean_test_score", ) scatter_trace = scatter_fig["data"][0] line_trace = line_fig["data"][0] scatter_trace.update(marker=dict(color=colors_list[idx])) line_trace.update(line=dict(color=colors_list[idx])) fig.add_trace(scatter_trace, row=1, col=1) fig.add_trace(line_trace, row=1, col=1) scatter_fig = px.scatter( cv_results, x="mean_score_time", y="mean_test_score", error_x="std_score_time", error_y="std_test_score", hover_data=param_name, ) line_fig = px.line( cv_results, x="mean_score_time", y="mean_test_score", ) scatter_trace = scatter_fig["data"][0] line_trace = line_fig["data"][0] scatter_trace.update(marker=dict(color=colors_list[idx])) line_trace.update(line=dict(color=colors_list[idx])) fig.add_trace(scatter_trace, row=1, col=2) fig.add_trace(line_trace, row=1, col=2) fig.update_layout( xaxis=dict(title="Train time (s) - lower is better"), yaxis=dict(title="Test R2 score - higher is better"), xaxis2=dict(title="Predict time (s) - lower is better"), legend=dict(x=0.72, y=0.05, traceorder="normal", borderwidth=1), title=dict(x=0.5, text="Speed-score trade-off of tree-based ensembles"), ) # %% # Both HGBT and RF models improve when increasing the number of trees in the # ensemble. However, the scores reach a plateau where adding new trees just # makes fitting and scoring slower. The RF model reaches such plateau earlier # and can never reach the test score of the largest HGBDT model. # # Note that the results shown on the above plot can change slightly across runs # and even more significantly when running on other machines: try to run this # example on your own local machine. # # Overall, one should often observe that the Histogram-based gradient boosting # models uniformly dominate the Random Forest models in the "test score vs # training speed trade-off" (the HGBDT curve should be on the top left of the RF # curve, without ever crossing). The "test score vs prediction speed" trade-off # can also be more disputed, but it's most often favorable to HGBDT. It's always # a good idea to check both kinds of model (with hyper-parameter tuning) and # compare their performance on your specific problem to determine which model is # the best fit but **HGBT almost always offers a more favorable speed-accuracy # trade-off than RF**, either with the default hyper-parameters or including the # hyper-parameter tuning cost. # # There is one exception to this rule of thumb though: when training a # multiclass classification model with a large number of possible classes, HGBDT # fits internally one-tree per class at each boosting iteration while the trees # used by the RF models are naturally multiclass which should improve the speed # accuracy trade-off of the RF models in this case.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_random_forest_embedding.py
examples/ensemble/plot_random_forest_embedding.py
""" ========================================================= Hashing feature transformation using Totally Random Trees ========================================================= RandomTreesEmbedding provides a way to map data to a very high-dimensional, sparse representation, which might be beneficial for classification. The mapping is completely unsupervised and very efficient. This example visualizes the partitions given by several trees and shows how the transformation can also be used for non-linear dimensionality reduction or non-linear classification. Points that are neighboring often share the same leaf of a tree and therefore share large parts of their hashed representation. This allows to separate two concentric circles simply based on the principal components of the transformed data with truncated SVD. In high-dimensional spaces, linear classifiers often achieve excellent accuracy. For sparse binary data, BernoulliNB is particularly well-suited. The bottom row compares the decision boundary obtained by BernoulliNB in the transformed space with an ExtraTreesClassifier forests learned on the original data. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_circles from sklearn.decomposition import TruncatedSVD from sklearn.ensemble import ExtraTreesClassifier, RandomTreesEmbedding from sklearn.naive_bayes import BernoulliNB # make a synthetic dataset X, y = make_circles(factor=0.5, random_state=0, noise=0.05) # use RandomTreesEmbedding to transform data hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3) X_transformed = hasher.fit_transform(X) # Visualize result after dimensionality reduction using truncated SVD svd = TruncatedSVD(n_components=2) X_reduced = svd.fit_transform(X_transformed) # Learn a Naive Bayes classifier on the transformed data nb = BernoulliNB() nb.fit(X_transformed, y) # Learn an ExtraTreesClassifier for comparison trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0) trees.fit(X, y) # scatter plot of original and reduced data fig = plt.figure(figsize=(9, 8)) ax = plt.subplot(221) ax.scatter(X[:, 0], X[:, 1], c=y, s=50, edgecolor="k") ax.set_title("Original Data (2d)") ax.set_xticks(()) ax.set_yticks(()) ax = plt.subplot(222) ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50, edgecolor="k") ax.set_title( "Truncated SVD reduction (2d) of transformed data (%dd)" % X_transformed.shape[1] ) ax.set_xticks(()) ax.set_yticks(()) # Plot the decision in original space. For that, we will assign a color # to each point in the mesh [x_min, x_max]x[y_min, y_max]. h = 0.01 x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5 y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # transform grid using RandomTreesEmbedding transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()]) y_grid_pred = nb.predict_proba(transformed_grid)[:, 1] ax = plt.subplot(223) ax.set_title("Naive Bayes on Transformed data") ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape)) ax.scatter(X[:, 0], X[:, 1], c=y, s=50, edgecolor="k") ax.set_ylim(-1.4, 1.4) ax.set_xlim(-1.4, 1.4) ax.set_xticks(()) ax.set_yticks(()) # transform grid using ExtraTreesClassifier y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1] ax = plt.subplot(224) ax.set_title("ExtraTrees predictions") ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape)) ax.scatter(X[:, 0], X[:, 1], c=y, s=50, edgecolor="k") ax.set_ylim(-1.4, 1.4) ax.set_xlim(-1.4, 1.4) ax.set_xticks(()) ax.set_yticks(()) plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_stack_predictors.py
examples/ensemble/plot_stack_predictors.py
""" ================================= Combine predictors using stacking ================================= .. currentmodule:: sklearn Stacking refers to a method to blend estimators. In this strategy, some estimators are individually fitted on some training data while a final estimator is trained using the stacked predictions of these base estimators. In this example, we illustrate the use case in which different regressors are stacked together and a final linear penalized regressor is used to output the prediction. We compare the performance of each individual regressor with the stacking strategy. Stacking slightly improves the overall performance. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Download the dataset # #################### # # We will use the `Ames Housing`_ dataset which was first compiled by Dean De Cock # and became better known after it was used in Kaggle challenge. It is a set # of 1460 residential homes in Ames, Iowa, each described by 80 features. We # will use it to predict the final logarithmic price of the houses. In this # example we will use only 20 most interesting features chosen using # GradientBoostingRegressor() and limit number of entries (here we won't go # into the details on how to select the most interesting features). # # The Ames housing dataset is not shipped with scikit-learn and therefore we # will fetch it from `OpenML`_. # # .. _`Ames Housing`: http://jse.amstat.org/v19n3/decock.pdf # .. _`OpenML`: https://www.openml.org/d/42165 import numpy as np from sklearn.datasets import fetch_openml from sklearn.utils import shuffle def load_ames_housing(): df = fetch_openml(name="house_prices", as_frame=True) X = df.data y = df.target features = [ "YrSold", "HeatingQC", "Street", "YearRemodAdd", "Heating", "MasVnrType", "BsmtUnfSF", "Foundation", "MasVnrArea", "MSSubClass", "ExterQual", "Condition2", "GarageCars", "GarageType", "OverallQual", "TotalBsmtSF", "BsmtFinSF1", "HouseStyle", "MiscFeature", "MoSold", ] X = X.loc[:, features] X, y = shuffle(X, y, random_state=0) X = X.iloc[:600] y = y.iloc[:600] return X, np.log(y) X, y = load_ames_housing() # %% # Make pipeline to preprocess the data # #################################### # # Before we can use Ames dataset we still need to do some preprocessing. # First, we will select the categorical and numerical columns of the dataset to # construct the first step of the pipeline. from sklearn.compose import make_column_selector cat_selector = make_column_selector(dtype_include=[object, "string"]) num_selector = make_column_selector(dtype_include=np.number) cat_selector(X) # %% num_selector(X) # %% # Then, we will need to design preprocessing pipelines which depends on the # ending regressor. If the ending regressor is a linear model, one needs to # one-hot encode the categories. If the ending regressor is a tree-based model # an ordinal encoder will be sufficient. Besides, numerical values need to be # standardized for a linear model while the raw numerical data can be treated # as is by a tree-based model. However, both models need an imputer to # handle missing values. # # We will first design the pipeline required for the tree-based models. from sklearn.compose import make_column_transformer from sklearn.impute import SimpleImputer from sklearn.pipeline import make_pipeline from sklearn.preprocessing import OrdinalEncoder cat_tree_processor = OrdinalEncoder( handle_unknown="use_encoded_value", unknown_value=-1, encoded_missing_value=-2, ) num_tree_processor = SimpleImputer(strategy="mean", add_indicator=True) tree_preprocessor = make_column_transformer( (num_tree_processor, num_selector), (cat_tree_processor, cat_selector) ) tree_preprocessor # %% # Then, we will now define the preprocessor used when the ending regressor # is a linear model. from sklearn.preprocessing import OneHotEncoder, StandardScaler cat_linear_processor = OneHotEncoder(handle_unknown="ignore") num_linear_processor = make_pipeline( StandardScaler(), SimpleImputer(strategy="mean", add_indicator=True) ) linear_preprocessor = make_column_transformer( (num_linear_processor, num_selector), (cat_linear_processor, cat_selector) ) linear_preprocessor # %% # Stack of predictors on a single data set # ######################################## # # It is sometimes tedious to find the model which will best perform on a given # dataset. Stacking provide an alternative by combining the outputs of several # learners, without the need to choose a model specifically. The performance of # stacking is usually close to the best model and sometimes it can outperform # the prediction performance of each individual model. # # Here, we combine 3 learners (linear and non-linear) and use a ridge regressor # to combine their outputs together. # # .. note:: # Although we will make new pipelines with the processors which we wrote in # the previous section for the 3 learners, the final estimator # :class:`~sklearn.linear_model.RidgeCV()` does not need preprocessing of # the data as it will be fed with the already preprocessed output from the 3 # learners. from sklearn.linear_model import LassoCV lasso_pipeline = make_pipeline(linear_preprocessor, LassoCV()) lasso_pipeline # %% from sklearn.ensemble import RandomForestRegressor rf_pipeline = make_pipeline(tree_preprocessor, RandomForestRegressor(random_state=42)) rf_pipeline # %% from sklearn.ensemble import HistGradientBoostingRegressor gbdt_pipeline = make_pipeline( tree_preprocessor, HistGradientBoostingRegressor(random_state=0) ) gbdt_pipeline # %% from sklearn.ensemble import StackingRegressor from sklearn.linear_model import RidgeCV estimators = [ ("Random Forest", rf_pipeline), ("Lasso", lasso_pipeline), ("Gradient Boosting", gbdt_pipeline), ] stacking_regressor = StackingRegressor(estimators=estimators, final_estimator=RidgeCV()) stacking_regressor # %% # Measure and plot the results # ############################ # # Now we can use Ames Housing dataset to make the predictions. We check the # performance of each individual predictor as well as of the stack of the # regressors. import time import matplotlib.pyplot as plt from sklearn.metrics import PredictionErrorDisplay from sklearn.model_selection import cross_val_predict, cross_validate fig, axs = plt.subplots(2, 2, figsize=(9, 7)) axs = np.ravel(axs) for ax, (name, est) in zip( axs, estimators + [("Stacking Regressor", stacking_regressor)] ): scorers = {"R2": "r2", "MAE": "neg_mean_absolute_error"} start_time = time.time() scores = cross_validate( est, X, y, scoring=list(scorers.values()), n_jobs=-1, verbose=0 ) elapsed_time = time.time() - start_time y_pred = cross_val_predict(est, X, y, n_jobs=-1, verbose=0) scores = { key: ( f"{np.abs(np.mean(scores[f'test_{value}'])):.2f} +- " f"{np.std(scores[f'test_{value}']):.2f}" ) for key, value in scorers.items() } display = PredictionErrorDisplay.from_predictions( y_true=y, y_pred=y_pred, kind="actual_vs_predicted", ax=ax, scatter_kwargs={"alpha": 0.2, "color": "tab:blue"}, line_kwargs={"color": "tab:red"}, ) ax.set_title(f"{name}\nEvaluation in {elapsed_time:.2f} seconds") for name, score in scores.items(): ax.plot([], [], " ", label=f"{name}: {score}") ax.legend(loc="upper left") plt.suptitle("Single predictors versus stacked predictors") plt.tight_layout() plt.subplots_adjust(top=0.9) plt.show() # %% # The stacked regressor will combine the strengths of the different regressors. # However, we also see that training the stacked regressor is much more # computationally expensive.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_adaboost_multiclass.py
examples/ensemble/plot_adaboost_multiclass.py
""" ===================================== Multi-class AdaBoosted Decision Trees ===================================== This example shows how boosting can improve the prediction accuracy on a multi-label classification problem. It reproduces a similar experiment as depicted by Figure 1 in Zhu et al [1]_. The core principle of AdaBoost (Adaptive Boosting) is to fit a sequence of weak learners (e.g. Decision Trees) on repeatedly re-sampled versions of the data. Each sample carries a weight that is adjusted after each training step, such that misclassified samples will be assigned higher weights. The re-sampling process with replacement takes into account the weights assigned to each sample. Samples with higher weights have a greater chance of being selected multiple times in the new data set, while samples with lower weights are less likely to be selected. This ensures that subsequent iterations of the algorithm focus on the difficult-to-classify samples. .. rubric:: References .. [1] :doi:`J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class adaboost." Statistics and its Interface 2.3 (2009): 349-360. <10.4310/SII.2009.v2.n3.a8>` """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Creating the dataset # -------------------- # The classification dataset is constructed by taking a ten-dimensional standard # normal distribution (:math:`x` in :math:`R^{10}`) and defining three classes # separated by nested concentric ten-dimensional spheres such that roughly equal # numbers of samples are in each class (quantiles of the :math:`\chi^2` # distribution). from sklearn.datasets import make_gaussian_quantiles X, y = make_gaussian_quantiles( n_samples=2_000, n_features=10, n_classes=3, random_state=1 ) # %% # We split the dataset into 2 sets: 70 percent of the samples are used for # training and the remaining 30 percent for testing. from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=0.7, random_state=42 ) # %% # Training the `AdaBoostClassifier` # --------------------------------- # We train the :class:`~sklearn.ensemble.AdaBoostClassifier`. The estimator # utilizes boosting to improve the classification accuracy. Boosting is a method # designed to train weak learners (i.e. `estimator`) that learn from their # predecessor's mistakes. # # Here, we define the weak learner as a # :class:`~sklearn.tree.DecisionTreeClassifier` and set the maximum number of # leaves to 8. In a real setting, this parameter should be tuned. We set it to a # rather low value to limit the runtime of the example. # # The `SAMME` algorithm build into the # :class:`~sklearn.ensemble.AdaBoostClassifier` then uses the correct or # incorrect predictions made be the current weak learner to update the sample # weights used for training the consecutive weak learners. Also, the weight of # the weak learner itself is calculated based on its accuracy in classifying the # training examples. The weight of the weak learner determines its influence on # the final ensemble prediction. from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier weak_learner = DecisionTreeClassifier(max_leaf_nodes=8) n_estimators = 300 adaboost_clf = AdaBoostClassifier( estimator=weak_learner, n_estimators=n_estimators, random_state=42, ).fit(X_train, y_train) # %% # Analysis # -------- # Convergence of the `AdaBoostClassifier` # *************************************** # To demonstrate the effectiveness of boosting in improving accuracy, we # evaluate the misclassification error of the boosted trees in comparison to two # baseline scores. The first baseline score is the `misclassification_error` # obtained from a single weak-learner (i.e. # :class:`~sklearn.tree.DecisionTreeClassifier`), which serves as a reference # point. The second baseline score is obtained from the # :class:`~sklearn.dummy.DummyClassifier`, which predicts the most prevalent # class in a dataset. from sklearn.dummy import DummyClassifier from sklearn.metrics import accuracy_score dummy_clf = DummyClassifier() def misclassification_error(y_true, y_pred): return 1 - accuracy_score(y_true, y_pred) weak_learners_misclassification_error = misclassification_error( y_test, weak_learner.fit(X_train, y_train).predict(X_test) ) dummy_classifiers_misclassification_error = misclassification_error( y_test, dummy_clf.fit(X_train, y_train).predict(X_test) ) print( "DecisionTreeClassifier's misclassification_error: " f"{weak_learners_misclassification_error:.3f}" ) print( "DummyClassifier's misclassification_error: " f"{dummy_classifiers_misclassification_error:.3f}" ) # %% # After training the :class:`~sklearn.tree.DecisionTreeClassifier` model, the # achieved error surpasses the expected value that would have been obtained by # guessing the most frequent class label, as the # :class:`~sklearn.dummy.DummyClassifier` does. # # Now, we calculate the `misclassification_error`, i.e. `1 - accuracy`, of the # additive model (:class:`~sklearn.tree.DecisionTreeClassifier`) at each # boosting iteration on the test set to assess its performance. # # We use :meth:`~sklearn.ensemble.AdaBoostClassifier.staged_predict` that makes # as many iterations as the number of fitted estimator (i.e. corresponding to # `n_estimators`). At iteration `n`, the predictions of AdaBoost only use the # `n` first weak learners. We compare these predictions with the true # predictions `y_test` and we, therefore, conclude on the benefit (or not) of adding a # new weak learner into the chain. # # We plot the misclassification error for the different stages: import matplotlib.pyplot as plt import pandas as pd boosting_errors = pd.DataFrame( { "Number of trees": range(1, n_estimators + 1), "AdaBoost": [ misclassification_error(y_test, y_pred) for y_pred in adaboost_clf.staged_predict(X_test) ], } ).set_index("Number of trees") ax = boosting_errors.plot() ax.set_ylabel("Misclassification error on test set") ax.set_title("Convergence of AdaBoost algorithm") plt.plot( [boosting_errors.index.min(), boosting_errors.index.max()], [weak_learners_misclassification_error, weak_learners_misclassification_error], color="tab:orange", linestyle="dashed", ) plt.plot( [boosting_errors.index.min(), boosting_errors.index.max()], [ dummy_classifiers_misclassification_error, dummy_classifiers_misclassification_error, ], color="c", linestyle="dotted", ) plt.legend(["AdaBoost", "DecisionTreeClassifier", "DummyClassifier"], loc=1) plt.show() # %% # The plot shows the missclassification error on the test set after each # boosting iteration. We see that the error of the boosted trees converges to an # error of around 0.3 after 50 iterations, indicating a significantly higher # accuracy compared to a single tree, as illustrated by the dashed line in the # plot. # # The misclassification error jitters because the `SAMME` algorithm uses the # discrete outputs of the weak learners to train the boosted model. # # The convergence of :class:`~sklearn.ensemble.AdaBoostClassifier` is mainly # influenced by the learning rate (i.e. `learning_rate`), the number of weak # learners used (`n_estimators`), and the expressivity of the weak learners # (e.g. `max_leaf_nodes`). # %% # Errors and weights of the Weak Learners # *************************************** # As previously mentioned, AdaBoost is a forward stagewise additive model. We # now focus on understanding the relationship between the attributed weights of # the weak learners and their statistical performance. # # We use the fitted :class:`~sklearn.ensemble.AdaBoostClassifier`'s attributes # `estimator_errors_` and `estimator_weights_` to investigate this link. weak_learners_info = pd.DataFrame( { "Number of trees": range(1, n_estimators + 1), "Errors": adaboost_clf.estimator_errors_, "Weights": adaboost_clf.estimator_weights_, } ).set_index("Number of trees") axs = weak_learners_info.plot( subplots=True, layout=(1, 2), figsize=(10, 4), legend=False, color="tab:blue" ) axs[0, 0].set_ylabel("Train error") axs[0, 0].set_title("Weak learner's training error") axs[0, 1].set_ylabel("Weight") axs[0, 1].set_title("Weak learner's weight") fig = axs[0, 0].get_figure() fig.suptitle("Weak learner's errors and weights for the AdaBoostClassifier") fig.tight_layout() # %% # On the left plot, we show the weighted error of each weak learner on the # reweighted training set at each boosting iteration. On the right plot, we show # the weights associated with each weak learner later used to make the # predictions of the final additive model. # # We see that the error of the weak learner is the inverse of the weights. It # means that our additive model will trust more a weak learner that makes # smaller errors (on the training set) by increasing its impact on the final # decision. Indeed, this exactly is the formulation of updating the base # estimators' weights after each iteration in AdaBoost. # # .. dropdown:: Mathematical details # # The weight associated with a weak learner trained at the stage :math:`m` is # inversely associated with its misclassification error such that: # # .. math:: \alpha^{(m)} = \log \frac{1 - err^{(m)}}{err^{(m)}} + \log (K - 1), # # where :math:`\alpha^{(m)}` and :math:`err^{(m)}` are the weight and the error # of the :math:`m` th weak learner, respectively, and :math:`K` is the number of # classes in our classification problem. # # Another interesting observation boils down to the fact that the first weak # learners of the model make fewer errors than later weak learners of the # boosting chain. # # The intuition behind this observation is the following: due to the sample # reweighting, later classifiers are forced to try to classify more difficult or # noisy samples and to ignore already well classified samples. Therefore, the # overall error on the training set will increase. That's why the weak learner's # weights are built to counter-balance the worse performing weak learners.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_gradient_boosting_regression.py
examples/ensemble/plot_gradient_boosting_regression.py
""" ============================ Gradient Boosting regression ============================ This example demonstrates Gradient Boosting to produce a predictive model from an ensemble of weak predictive models. Gradient boosting can be used for regression and classification problems. Here, we will train a model to tackle a diabetes regression task. We will obtain the results from :class:`~sklearn.ensemble.GradientBoostingRegressor` with least squares loss and 500 regression trees of depth 4. Note: For larger datasets (n_samples >= 10000), please refer to :class:`~sklearn.ensemble.HistGradientBoostingRegressor`. See :ref:`sphx_glr_auto_examples_ensemble_plot_hgbt_regression.py` for an example showcasing some other advantages of :class:`~ensemble.HistGradientBoostingRegressor`. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib import matplotlib.pyplot as plt import numpy as np from sklearn import datasets, ensemble from sklearn.inspection import permutation_importance from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split from sklearn.utils.fixes import parse_version # %% # Load the data # ------------------------------------- # # First we need to load the data. diabetes = datasets.load_diabetes() X, y = diabetes.data, diabetes.target # %% # Data preprocessing # ------------------------------------- # # Next, we will split our dataset to use 90% for training and leave the rest # for testing. We will also set the regression model parameters. You can play # with these parameters to see how the results change. # # `n_estimators` : the number of boosting stages that will be performed. # Later, we will plot deviance against boosting iterations. # # `max_depth` : limits the number of nodes in the tree. # The best value depends on the interaction of the input variables. # # `min_samples_split` : the minimum number of samples required to split an # internal node. # # `learning_rate` : how much the contribution of each tree will shrink. # # `loss` : loss function to optimize. The least squares function is used in # this case however, there are many other options (see # :class:`~sklearn.ensemble.GradientBoostingRegressor` ). X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.1, random_state=13 ) params = { "n_estimators": 500, "max_depth": 4, "min_samples_split": 5, "learning_rate": 0.01, "loss": "squared_error", } # %% # Fit regression model # -------------------- # # Now we will initiate the gradient boosting regressors and fit it with our # training data. Let's also look and the mean squared error on the test data. reg = ensemble.GradientBoostingRegressor(**params) reg.fit(X_train, y_train) mse = mean_squared_error(y_test, reg.predict(X_test)) print("The mean squared error (MSE) on test set: {:.4f}".format(mse)) # %% # Plot training deviance # ---------------------- # # Finally, we will visualize the results. To do that we will first compute the # test set deviance and then plot it against boosting iterations. test_score = np.zeros((params["n_estimators"],), dtype=np.float64) for i, y_pred in enumerate(reg.staged_predict(X_test)): test_score[i] = mean_squared_error(y_test, y_pred) fig = plt.figure(figsize=(6, 6)) plt.subplot(1, 1, 1) plt.title("Deviance") plt.plot( np.arange(params["n_estimators"]) + 1, reg.train_score_, "b-", label="Training Set Deviance", ) plt.plot( np.arange(params["n_estimators"]) + 1, test_score, "r-", label="Test Set Deviance" ) plt.legend(loc="upper right") plt.xlabel("Boosting Iterations") plt.ylabel("Deviance") fig.tight_layout() plt.show() # %% # Plot feature importance # ----------------------- # # .. warning:: # Careful, impurity-based feature importances can be misleading for # **high cardinality** features (many unique values). As an alternative, # the permutation importances of ``reg`` can be computed on a # held out test set. See :ref:`permutation_importance` for more details. # # For this example, the impurity-based and permutation methods identify the # same 2 strongly predictive features but not in the same order. The third most # predictive feature, "bp", is also the same for the 2 methods. The remaining # features are less predictive and the error bars of the permutation plot # show that they overlap with 0. feature_importance = reg.feature_importances_ sorted_idx = np.argsort(feature_importance) pos = np.arange(sorted_idx.shape[0]) + 0.5 fig = plt.figure(figsize=(12, 6)) plt.subplot(1, 2, 1) plt.barh(pos, feature_importance[sorted_idx], align="center") plt.yticks(pos, np.array(diabetes.feature_names)[sorted_idx]) plt.title("Feature Importance (MDI)") result = permutation_importance( reg, X_test, y_test, n_repeats=10, random_state=42, n_jobs=2 ) sorted_idx = result.importances_mean.argsort() plt.subplot(1, 2, 2) # `labels` argument in boxplot is deprecated in matplotlib 3.9 and has been # renamed to `tick_labels`. The following code handles this, but as a # scikit-learn user you probably can write simpler code by using `labels=...` # (matplotlib < 3.9) or `tick_labels=...` (matplotlib >= 3.9). tick_labels_parameter_name = ( "tick_labels" if parse_version(matplotlib.__version__) >= parse_version("3.9") else "labels" ) tick_labels_dict = { tick_labels_parameter_name: np.array(diabetes.feature_names)[sorted_idx] } plt.boxplot(result.importances[sorted_idx].T, vert=False, **tick_labels_dict) plt.title("Permutation Importance (test set)") fig.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_voting_decision_regions.py
examples/ensemble/plot_voting_decision_regions.py
""" =============================================================== Visualizing the probabilistic predictions of a VotingClassifier =============================================================== .. currentmodule:: sklearn Plot the predicted class probabilities in a toy dataset predicted by three different classifiers and averaged by the :class:`~ensemble.VotingClassifier`. First, three linear classifiers are initialized. Two are spline models with interaction terms, one using constant extrapolation and the other using periodic extrapolation. The third classifier is a :class:`~kernel_approximation.Nystroem` with the default "rbf" kernel. In the first part of this example, these three classifiers are used to demonstrate soft-voting using :class:`~ensemble.VotingClassifier` with weighted average. We set `weights=[2, 1, 3]`, meaning the constant extrapolation spline model's predictions are weighted twice as much as the periodic spline model's, and the Nystroem model's predictions are weighted three times as much as the periodic spline. The second part demonstrates how soft predictions can be converted into hard predictions. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # We first generate a noisy XOR dataset, which is a binary classification task. import matplotlib.pyplot as plt import numpy as np import pandas as pd from matplotlib.colors import ListedColormap n_samples = 500 rng = np.random.default_rng(0) feature_names = ["Feature #0", "Feature #1"] common_scatter_plot_params = dict( cmap=ListedColormap(["tab:red", "tab:blue"]), edgecolor="white", linewidth=1, ) xor = pd.DataFrame( np.random.RandomState(0).uniform(low=-1, high=1, size=(n_samples, 2)), columns=feature_names, ) noise = rng.normal(loc=0, scale=0.1, size=(n_samples, 2)) target_xor = np.logical_xor( xor["Feature #0"] + noise[:, 0] > 0, xor["Feature #1"] + noise[:, 1] > 0 ) X = xor[feature_names] y = target_xor.astype(np.int32) fig, ax = plt.subplots() ax.scatter(X["Feature #0"], X["Feature #1"], c=y, **common_scatter_plot_params) ax.set_title("The XOR dataset") plt.show() # %% # Due to the inherent non-linear separability of the XOR dataset, tree-based # models would often be preferred. However, appropriate feature engineering # combined with a linear model can yield effective results, with the added # benefit of producing better-calibrated probabilities for samples located in # the transition regions affected by noise. # # We define and fit the models on the whole dataset. from sklearn.ensemble import VotingClassifier from sklearn.kernel_approximation import Nystroem from sklearn.linear_model import LogisticRegression from sklearn.pipeline import make_pipeline from sklearn.preprocessing import PolynomialFeatures, SplineTransformer, StandardScaler clf1 = make_pipeline( SplineTransformer(degree=2, n_knots=2), PolynomialFeatures(interaction_only=True), LogisticRegression(C=10), ) clf2 = make_pipeline( SplineTransformer( degree=2, n_knots=4, extrapolation="periodic", include_bias=True, ), PolynomialFeatures(interaction_only=True), LogisticRegression(C=10), ) clf3 = make_pipeline( StandardScaler(), Nystroem(gamma=2, random_state=0), LogisticRegression(C=10), ) weights = [2, 1, 3] eclf = VotingClassifier( estimators=[ ("constant splines model", clf1), ("periodic splines model", clf2), ("nystroem model", clf3), ], voting="soft", weights=weights, ) clf1.fit(X, y) clf2.fit(X, y) clf3.fit(X, y) eclf.fit(X, y) # %% # Finally we use :class:`~inspection.DecisionBoundaryDisplay` to plot the # predicted probabilities. By using a diverging colormap (such as `"RdBu"`), we # can ensure that darker colors correspond to `predict_proba` close to either 0 # or 1, and white corresponds to `predict_proba` of 0.5. from itertools import product from sklearn.inspection import DecisionBoundaryDisplay fig, axarr = plt.subplots(2, 2, sharex="col", sharey="row", figsize=(10, 8)) for idx, clf, title in zip( product([0, 1], [0, 1]), [clf1, clf2, clf3, eclf], [ "Splines with\nconstant extrapolation", "Splines with\nperiodic extrapolation", "RBF Nystroem", "Soft Voting", ], ): disp = DecisionBoundaryDisplay.from_estimator( clf, X, response_method="predict_proba", plot_method="pcolormesh", cmap="RdBu", alpha=0.8, ax=axarr[idx[0], idx[1]], ) axarr[idx[0], idx[1]].scatter( X["Feature #0"], X["Feature #1"], c=y, **common_scatter_plot_params, ) axarr[idx[0], idx[1]].set_title(title) fig.colorbar(disp.surface_, ax=axarr[idx[0], idx[1]], label="Probability estimate") plt.show() # %% # As a sanity check, we can verify for a given sample that the probability # predicted by the :class:`~ensemble.VotingClassifier` is indeed the weighted # average of the individual classifiers' soft-predictions. # # In the case of binary classification such as in the present example, the # :term:`predict_proba` arrays contain the probability of belonging to class 0 # (here in red) as the first entry, and the probability of belonging to class 1 # (here in blue) as the second entry. test_sample = pd.DataFrame({"Feature #0": [-0.5], "Feature #1": [1.5]}) predict_probas = [est.predict_proba(test_sample).ravel() for est in eclf.estimators_] for (est_name, _), est_probas in zip(eclf.estimators, predict_probas): print(f"{est_name}'s predicted probabilities: {est_probas}") # %% print( "Weighted average of soft-predictions: " f"{np.dot(weights, predict_probas) / np.sum(weights)}" ) # %% # We can see that manual calculation of predicted probabilities above is # equivalent to that produced by the `VotingClassifier`: print( "Predicted probability of VotingClassifier: " f"{eclf.predict_proba(test_sample).ravel()}" ) # %% # To convert soft predictions into hard predictions when weights are provided, # the weighted average predicted probabilities are computed for each class. # Then, the final class label is then derived from the class label with the # highest average probability, which corresponds to the default threshold at # `predict_proba=0.5` in the case of binary classification. print( "Class with the highest weighted average of soft-predictions: " f"{np.argmax(np.dot(weights, predict_probas) / np.sum(weights))}" ) # %% # This is equivalent to the output of `VotingClassifier`'s `predict` method: print(f"Predicted class of VotingClassifier: {eclf.predict(test_sample).ravel()}") # %% # Soft votes can be thresholded as for any other probabilistic classifier. This # allows you to set a threshold probability at which the positive class will be # predicted, instead of simply selecting the class with the highest predicted # probability. from sklearn.model_selection import FixedThresholdClassifier eclf_other_threshold = FixedThresholdClassifier( eclf, threshold=0.7, response_method="predict_proba" ).fit(X, y) print( "Predicted class of thresholded VotingClassifier: " f"{eclf_other_threshold.predict(test_sample)}" )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_gradient_boosting_quantile.py
examples/ensemble/plot_gradient_boosting_quantile.py
""" ===================================================== Prediction Intervals for Gradient Boosting Regression ===================================================== This example shows how quantile regression can be used to create prediction intervals. See :ref:`sphx_glr_auto_examples_ensemble_plot_hgbt_regression.py` for an example showcasing some other features of :class:`~ensemble.HistGradientBoostingRegressor`. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate some data for a synthetic regression problem by applying the # function f to uniformly sampled random inputs. import numpy as np from sklearn.model_selection import train_test_split def f(x): """The function to predict.""" return x * np.sin(x) rng = np.random.RandomState(42) X = np.atleast_2d(rng.uniform(0, 10.0, size=1000)).T expected_y = f(X).ravel() # %% # To make the problem interesting, we generate observations of the target y as # the sum of a deterministic term computed by the function f and a random noise # term that follows a centered `log-normal # <https://en.wikipedia.org/wiki/Log-normal_distribution>`_. To make this even # more interesting we consider the case where the amplitude of the noise # depends on the input variable x (heteroscedastic noise). # # The lognormal distribution is non-symmetric and long tailed: observing large # outliers is likely but it is impossible to observe small outliers. sigma = 0.5 + X.ravel() / 10 noise = rng.lognormal(sigma=sigma) - np.exp(sigma**2 / 2) y = expected_y + noise # %% # Split into train, test datasets: X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # %% # Fitting non-linear quantile and least squares regressors # -------------------------------------------------------- # # Fit gradient boosting models trained with the quantile loss and # alpha=0.05, 0.5, 0.95. # # The models obtained for alpha=0.05 and alpha=0.95 produce a 90% confidence # interval (95% - 5% = 90%). # # The model trained with alpha=0.5 produces a regression of the median: on # average, there should be the same number of target observations above and # below the predicted values. from sklearn.ensemble import GradientBoostingRegressor from sklearn.metrics import mean_pinball_loss, mean_squared_error all_models = {} common_params = dict( learning_rate=0.05, n_estimators=200, max_depth=2, min_samples_leaf=9, min_samples_split=9, ) for alpha in [0.05, 0.5, 0.95]: gbr = GradientBoostingRegressor(loss="quantile", alpha=alpha, **common_params) all_models["q %1.2f" % alpha] = gbr.fit(X_train, y_train) # %% # Notice that :class:`~sklearn.ensemble.HistGradientBoostingRegressor` is much # faster than :class:`~sklearn.ensemble.GradientBoostingRegressor` starting with # intermediate datasets (`n_samples >= 10_000`), which is not the case of the # present example. # # For the sake of comparison, we also fit a baseline model trained with the # usual (mean) squared error (MSE). gbr_ls = GradientBoostingRegressor(loss="squared_error", **common_params) all_models["mse"] = gbr_ls.fit(X_train, y_train) # %% # Create an evenly spaced evaluation set of input values spanning the [0, 10] # range. xx = np.atleast_2d(np.linspace(0, 10, 1000)).T # %% # Plot the true conditional mean function f, the predictions of the conditional # mean (loss equals squared error), the conditional median and the conditional # 90% interval (from 5th to 95th conditional percentiles). import matplotlib.pyplot as plt y_pred = all_models["mse"].predict(xx) y_lower = all_models["q 0.05"].predict(xx) y_upper = all_models["q 0.95"].predict(xx) y_med = all_models["q 0.50"].predict(xx) fig = plt.figure(figsize=(10, 10)) plt.plot(xx, f(xx), "black", linewidth=3, label=r"$f(x) = x\,\sin(x)$") plt.plot(X_test, y_test, "b.", markersize=10, label="Test observations") plt.plot(xx, y_med, "tab:orange", linewidth=3, label="Predicted median") plt.plot(xx, y_pred, "tab:green", linewidth=3, label="Predicted mean") plt.fill_between( xx.ravel(), y_lower, y_upper, alpha=0.4, label="Predicted 90% interval" ) plt.xlabel("$x$") plt.ylabel("$f(x)$") plt.ylim(-10, 25) plt.legend(loc="upper left") plt.show() # %% # Comparing the predicted median with the predicted mean, we note that the # median is on average below the mean as the noise is skewed towards high # values (large outliers). The median estimate also seems to be smoother # because of its natural robustness to outliers. # # Also observe that the inductive bias of gradient boosting trees is # unfortunately preventing our 0.05 quantile to fully capture the sinoisoidal # shape of the signal, in particular around x=8. Tuning hyper-parameters can # reduce this effect as shown in the last part of this notebook. # # Analysis of the error metrics # ----------------------------- # # Measure the models with :func:`~sklearn.metrics.mean_squared_error` and # :func:`~sklearn.metrics.mean_pinball_loss` metrics on the training dataset. import pandas as pd def highlight_min(x): x_min = x.min() return ["font-weight: bold" if v == x_min else "" for v in x] results = [] for name, gbr in sorted(all_models.items()): metrics = {"model": name} y_pred = gbr.predict(X_train) for alpha in [0.05, 0.5, 0.95]: metrics["pbl=%1.2f" % alpha] = mean_pinball_loss(y_train, y_pred, alpha=alpha) metrics["MSE"] = mean_squared_error(y_train, y_pred) results.append(metrics) pd.DataFrame(results).set_index("model").style.apply(highlight_min) # %% # One column shows all models evaluated by the same metric. The minimum number # on a column should be obtained when the model is trained and measured with # the same metric. This should be always the case on the training set if the # training converged. # # Note that because the target distribution is asymmetric, the expected # conditional mean and conditional median are significantly different and # therefore one could not use the squared error model get a good estimation of # the conditional median nor the converse. # # If the target distribution were symmetric and had no outliers (e.g. with a # Gaussian noise), then median estimator and the least squares estimator would # have yielded similar predictions. # # We then do the same on the test set. results = [] for name, gbr in sorted(all_models.items()): metrics = {"model": name} y_pred = gbr.predict(X_test) for alpha in [0.05, 0.5, 0.95]: metrics["pbl=%1.2f" % alpha] = mean_pinball_loss(y_test, y_pred, alpha=alpha) metrics["MSE"] = mean_squared_error(y_test, y_pred) results.append(metrics) pd.DataFrame(results).set_index("model").style.apply(highlight_min) # %% # Errors are higher meaning the models slightly overfitted the data. It still # shows that the best test metric is obtained when the model is trained by # minimizing this same metric. # # Note that the conditional median estimator is competitive with the squared # error estimator in terms of MSE on the test set: this can be explained by # the fact the squared error estimator is very sensitive to large outliers # which can cause significant overfitting. This can be seen on the right hand # side of the previous plot. The conditional median estimator is biased # (underestimation for this asymmetric noise) but is also naturally robust to # outliers and overfits less. # # .. _calibration-section: # # Calibration of the confidence interval # -------------------------------------- # # We can also evaluate the ability of the two extreme quantile estimators at # producing a well-calibrated conditional 90%-confidence interval. # # To do this we can compute the fraction of observations that fall between the # predictions: def coverage_fraction(y, y_low, y_high): return np.mean(np.logical_and(y >= y_low, y <= y_high)) coverage_fraction( y_train, all_models["q 0.05"].predict(X_train), all_models["q 0.95"].predict(X_train), ) # %% # On the training set the calibration is very close to the expected coverage # value for a 90% confidence interval. coverage_fraction( y_test, all_models["q 0.05"].predict(X_test), all_models["q 0.95"].predict(X_test) ) # %% # On the test set, the estimated confidence interval is slightly too narrow. # Note, however, that we would need to wrap those metrics in a cross-validation # loop to assess their variability under data resampling. # # Tuning the hyper-parameters of the quantile regressors # ------------------------------------------------------ # # In the plot above, we observed that the 5th percentile regressor seems to # underfit and could not adapt to sinusoidal shape of the signal. # # The hyper-parameters of the model were approximately hand-tuned for the # median regressor and there is no reason that the same hyper-parameters are # suitable for the 5th percentile regressor. # # To confirm this hypothesis, we tune the hyper-parameters of a new regressor # of the 5th percentile by selecting the best model parameters by # cross-validation on the pinball loss with alpha=0.05: # %% from pprint import pprint from sklearn.experimental import enable_halving_search_cv # noqa: F401 from sklearn.metrics import make_scorer from sklearn.model_selection import HalvingRandomSearchCV param_grid = dict( learning_rate=[0.05, 0.1, 0.2], max_depth=[2, 5, 10], min_samples_leaf=[1, 5, 10, 20], min_samples_split=[5, 10, 20, 30, 50], ) alpha = 0.05 neg_mean_pinball_loss_05p_scorer = make_scorer( mean_pinball_loss, alpha=alpha, greater_is_better=False, # maximize the negative loss ) gbr = GradientBoostingRegressor(loss="quantile", alpha=alpha, random_state=0) search_05p = HalvingRandomSearchCV( gbr, param_grid, resource="n_estimators", max_resources=250, min_resources=50, scoring=neg_mean_pinball_loss_05p_scorer, n_jobs=2, random_state=0, ).fit(X_train, y_train) pprint(search_05p.best_params_) # %% # We observe that the hyper-parameters that were hand-tuned for the median # regressor are in the same range as the hyper-parameters suitable for the 5th # percentile regressor. # # Let's now tune the hyper-parameters for the 95th percentile regressor. We # need to redefine the `scoring` metric used to select the best model, along # with adjusting the alpha parameter of the inner gradient boosting estimator # itself: from sklearn.base import clone alpha = 0.95 neg_mean_pinball_loss_95p_scorer = make_scorer( mean_pinball_loss, alpha=alpha, greater_is_better=False, # maximize the negative loss ) search_95p = clone(search_05p).set_params( estimator__alpha=alpha, scoring=neg_mean_pinball_loss_95p_scorer, ) search_95p.fit(X_train, y_train) pprint(search_95p.best_params_) # %% # The result shows that the hyper-parameters for the 95th percentile regressor # identified by the search procedure are roughly in the same range as the hand-tuned # hyper-parameters for the median regressor and the hyper-parameters # identified by the search procedure for the 5th percentile regressor. However, # the hyper-parameter searches did lead to an improved 90% confidence interval # that is comprised by the predictions of those two tuned quantile regressors. # Note that the prediction of the upper 95th percentile has a much coarser shape # than the prediction of the lower 5th percentile because of the outliers: y_lower = search_05p.predict(xx) y_upper = search_95p.predict(xx) fig = plt.figure(figsize=(10, 10)) plt.plot(xx, f(xx), "black", linewidth=3, label=r"$f(x) = x\,\sin(x)$") plt.plot(X_test, y_test, "b.", markersize=10, label="Test observations") plt.fill_between( xx.ravel(), y_lower, y_upper, alpha=0.4, label="Predicted 90% interval" ) plt.xlabel("$x$") plt.ylabel("$f(x)$") plt.ylim(-10, 25) plt.legend(loc="upper left") plt.title("Prediction with tuned hyper-parameters") plt.show() # %% # The plot looks qualitatively better than for the untuned models, especially # for the shape of the of lower quantile. # # We now quantitatively evaluate the joint-calibration of the pair of # estimators: coverage_fraction(y_train, search_05p.predict(X_train), search_95p.predict(X_train)) # %% coverage_fraction(y_test, search_05p.predict(X_test), search_95p.predict(X_test)) # %% # The calibration of the tuned pair is sadly not better on the test set: the # width of the estimated confidence interval is still too narrow. # # Again, we would need to wrap this study in a cross-validation loop to # better assess the variability of those estimates.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_voting_regressor.py
examples/ensemble/plot_voting_regressor.py
""" ================================================= Plot individual and voting regression predictions ================================================= .. currentmodule:: sklearn A voting regressor is an ensemble meta-estimator that fits several base regressors, each on the whole dataset. Then it averages the individual predictions to form a final prediction. We will use three different regressors to predict the data: :class:`~ensemble.GradientBoostingRegressor`, :class:`~ensemble.RandomForestRegressor`, and :class:`~linear_model.LinearRegression`). Then the above 3 regressors will be used for the :class:`~ensemble.VotingRegressor`. Finally, we will plot the predictions made by all models for comparison. We will work with the diabetes dataset which consists of 10 features collected from a cohort of diabetes patients. The target is a quantitative measure of disease progression one year after baseline. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt from sklearn.datasets import load_diabetes from sklearn.ensemble import ( GradientBoostingRegressor, RandomForestRegressor, VotingRegressor, ) from sklearn.linear_model import LinearRegression # %% # Training classifiers # -------------------------------- # # First, we will load the diabetes dataset and initiate a gradient boosting # regressor, a random forest regressor and a linear regression. Next, we will # use the 3 regressors to build the voting regressor: X, y = load_diabetes(return_X_y=True) # Train classifiers reg1 = GradientBoostingRegressor(random_state=1) reg2 = RandomForestRegressor(random_state=1) reg3 = LinearRegression() reg1.fit(X, y) reg2.fit(X, y) reg3.fit(X, y) ereg = VotingRegressor([("gb", reg1), ("rf", reg2), ("lr", reg3)]) ereg.fit(X, y) # %% # Making predictions # -------------------------------- # # Now we will use each of the regressors to make the 20 first predictions. xt = X[:20] pred1 = reg1.predict(xt) pred2 = reg2.predict(xt) pred3 = reg3.predict(xt) pred4 = ereg.predict(xt) # %% # Plot the results # -------------------------------- # # Finally, we will visualize the 20 predictions. The red stars show the average # prediction made by :class:`~ensemble.VotingRegressor`. plt.figure() plt.plot(pred1, "gd", label="GradientBoostingRegressor") plt.plot(pred2, "b^", label="RandomForestRegressor") plt.plot(pred3, "ys", label="LinearRegression") plt.plot(pred4, "r*", ms=10, label="VotingRegressor") plt.tick_params(axis="x", which="both", bottom=False, top=False, labelbottom=False) plt.ylabel("predicted") plt.xlabel("training samples") plt.legend(loc="best") plt.title("Regressor predictions and their average") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_feature_transformation.py
examples/ensemble/plot_feature_transformation.py
""" =============================================== Feature transformations with ensembles of trees =============================================== Transform your features into a higher dimensional, sparse space. Then train a linear model on these features. First fit an ensemble of trees (totally random trees, a random forest, or gradient boosted trees) on the training set. Then each leaf of each tree in the ensemble is assigned a fixed arbitrary feature index in a new feature space. These leaf indices are then encoded in a one-hot fashion. Each sample goes through the decisions of each tree of the ensemble and ends up in one leaf per tree. The sample is encoded by setting feature values for these leaves to 1 and the other feature values to 0. The resulting transformer has then learned a supervised, sparse, high-dimensional categorical embedding of the data. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # First, we will create a large dataset and split it into three sets: # # - a set to train the ensemble methods which are later used to as a feature # engineering transformer; # - a set to train the linear model; # - a set to test the linear model. # # It is important to split the data in such way to avoid overfitting by leaking # data. from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split X, y = make_classification(n_samples=80_000, random_state=10) X_full_train, X_test, y_full_train, y_test = train_test_split( X, y, test_size=0.5, random_state=10 ) X_train_ensemble, X_train_linear, y_train_ensemble, y_train_linear = train_test_split( X_full_train, y_full_train, test_size=0.5, random_state=10 ) # %% # For each of the ensemble methods, we will use 10 estimators and a maximum # depth of 3 levels. n_estimators = 10 max_depth = 3 # %% # First, we will start by training the random forest and gradient boosting on # the separated training set from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier random_forest = RandomForestClassifier( n_estimators=n_estimators, max_depth=max_depth, random_state=10 ) random_forest.fit(X_train_ensemble, y_train_ensemble) gradient_boosting = GradientBoostingClassifier( n_estimators=n_estimators, max_depth=max_depth, random_state=10 ) _ = gradient_boosting.fit(X_train_ensemble, y_train_ensemble) # %% # Notice that :class:`~sklearn.ensemble.HistGradientBoostingClassifier` is much # faster than :class:`~sklearn.ensemble.GradientBoostingClassifier` starting # with intermediate datasets (`n_samples >= 10_000`), which is not the case of # the present example. # # The :class:`~sklearn.ensemble.RandomTreesEmbedding` is an unsupervised method # and thus does not required to be trained independently. from sklearn.ensemble import RandomTreesEmbedding random_tree_embedding = RandomTreesEmbedding( n_estimators=n_estimators, max_depth=max_depth, random_state=0 ) # %% # Now, we will create three pipelines that will use the above embedding as # a preprocessing stage. # # The random trees embedding can be directly pipelined with the logistic # regression because it is a standard scikit-learn transformer. from sklearn.linear_model import LogisticRegression from sklearn.pipeline import make_pipeline rt_model = make_pipeline(random_tree_embedding, LogisticRegression(max_iter=1000)) rt_model.fit(X_train_linear, y_train_linear) # %% # Then, we can pipeline random forest or gradient boosting with a logistic # regression. However, the feature transformation will happen by calling the # method `apply`. The pipeline in scikit-learn expects a call to `transform`. # Therefore, we wrapped the call to `apply` within a `FunctionTransformer`. from sklearn.preprocessing import FunctionTransformer, OneHotEncoder def rf_apply(X, model): return model.apply(X) rf_leaves_yielder = FunctionTransformer(rf_apply, kw_args={"model": random_forest}) rf_model = make_pipeline( rf_leaves_yielder, OneHotEncoder(handle_unknown="ignore"), LogisticRegression(max_iter=1000), ) rf_model.fit(X_train_linear, y_train_linear) # %% def gbdt_apply(X, model): return model.apply(X)[:, :, 0] gbdt_leaves_yielder = FunctionTransformer( gbdt_apply, kw_args={"model": gradient_boosting} ) gbdt_model = make_pipeline( gbdt_leaves_yielder, OneHotEncoder(handle_unknown="ignore"), LogisticRegression(max_iter=1000), ) gbdt_model.fit(X_train_linear, y_train_linear) # %% # We can finally show the different ROC curves for all the models. import matplotlib.pyplot as plt from sklearn.metrics import RocCurveDisplay _, ax = plt.subplots() models = [ ("RT embedding -> LR", rt_model), ("RF", random_forest), ("RF embedding -> LR", rf_model), ("GBDT", gradient_boosting), ("GBDT embedding -> LR", gbdt_model), ] model_displays = {} for name, pipeline in models: model_displays[name] = RocCurveDisplay.from_estimator( pipeline, X_test, y_test, ax=ax, name=name ) _ = ax.set_title("ROC curve") # %% _, ax = plt.subplots() for name, pipeline in models: model_displays[name].plot(ax=ax) ax.set_xlim(0, 0.2) ax.set_ylim(0.8, 1) _ = ax.set_title("ROC curve (zoomed in at top left)")
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_random_forest_regression_multioutput.py
examples/ensemble/plot_random_forest_regression_multioutput.py
""" ============================================================ Comparing random forests and the multi-output meta estimator ============================================================ An example to compare multi-output regression with random forest and the :ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator. This example illustrates the use of the :ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator to perform multi-output regression. A random forest regressor is used, which supports multi-output regression natively, so the results can be compared. The random forest regressor will only ever predict values within the range of observations or closer to zero for each of the targets. As a result the predictions are biased towards the centre of the circle. Using a single underlying feature the model learns both the x and y coordinate as output. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split from sklearn.multioutput import MultiOutputRegressor # Create a random dataset rng = np.random.RandomState(1) X = np.sort(200 * rng.rand(600, 1) - 100, axis=0) y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T y += 0.5 - rng.rand(*y.shape) X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=400, test_size=200, random_state=4 ) max_depth = 30 regr_multirf = MultiOutputRegressor( RandomForestRegressor(n_estimators=100, max_depth=max_depth, random_state=0) ) regr_multirf.fit(X_train, y_train) regr_rf = RandomForestRegressor(n_estimators=100, max_depth=max_depth, random_state=2) regr_rf.fit(X_train, y_train) # Predict on new data y_multirf = regr_multirf.predict(X_test) y_rf = regr_rf.predict(X_test) # Plot the results plt.figure() s = 50 a = 0.4 plt.scatter( y_test[:, 0], y_test[:, 1], edgecolor="k", c="navy", s=s, marker="s", alpha=a, label="Data", ) plt.scatter( y_multirf[:, 0], y_multirf[:, 1], edgecolor="k", c="cornflowerblue", s=s, alpha=a, label="Multi RF score=%.2f" % regr_multirf.score(X_test, y_test), ) plt.scatter( y_rf[:, 0], y_rf[:, 1], edgecolor="k", c="c", s=s, marker="^", alpha=a, label="RF score=%.2f" % regr_rf.score(X_test, y_test), ) plt.xlim([-6, 6]) plt.ylim([-6, 6]) plt.xlabel("target 1") plt.ylabel("target 2") plt.title("Comparing random forests and the multi-output meta estimator") plt.legend() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_gradient_boosting_oob.py
examples/ensemble/plot_gradient_boosting_oob.py
""" ====================================== Gradient Boosting Out-of-Bag estimates ====================================== Out-of-bag (OOB) estimates can be a useful heuristic to estimate the "optimal" number of boosting iterations. OOB estimates are almost identical to cross-validation estimates but they can be computed on-the-fly without the need for repeated model fitting. OOB estimates are only available for Stochastic Gradient Boosting (i.e. ``subsample < 1.0``), the estimates are derived from the improvement in loss based on the examples not included in the bootstrap sample (the so-called out-of-bag examples). The OOB estimator is a pessimistic estimator of the true test loss, but remains a fairly good approximation for a small number of trees. The figure shows the cumulative sum of the negative OOB improvements as a function of the boosting iteration. As you can see, it tracks the test loss for the first hundred iterations but then diverges in a pessimistic way. The figure also shows the performance of 3-fold cross validation which usually gives a better estimate of the test loss but is computationally more demanding. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from scipy.special import expit from sklearn import ensemble from sklearn.metrics import log_loss from sklearn.model_selection import KFold, train_test_split # Generate data (adapted from G. Ridgeway's gbm example) n_samples = 1000 random_state = np.random.RandomState(13) x1 = random_state.uniform(size=n_samples) x2 = random_state.uniform(size=n_samples) x3 = random_state.randint(0, 4, size=n_samples) p = expit(np.sin(3 * x1) - 4 * x2 + x3) y = random_state.binomial(1, p, size=n_samples) X = np.c_[x1, x2, x3] X = X.astype(np.float32) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=9) # Fit classifier with out-of-bag estimates params = { "n_estimators": 1200, "max_depth": 3, "subsample": 0.5, "learning_rate": 0.01, "min_samples_leaf": 1, "random_state": 3, } clf = ensemble.GradientBoostingClassifier(**params) clf.fit(X_train, y_train) acc = clf.score(X_test, y_test) print("Accuracy: {:.4f}".format(acc)) n_estimators = params["n_estimators"] x = np.arange(n_estimators) + 1 def heldout_score(clf, X_test, y_test): """compute deviance scores on ``X_test`` and ``y_test``.""" score = np.zeros((n_estimators,), dtype=np.float64) for i, y_proba in enumerate(clf.staged_predict_proba(X_test)): score[i] = 2 * log_loss(y_test, y_proba[:, 1]) return score def cv_estimate(n_splits=None): cv = KFold(n_splits=n_splits) cv_clf = ensemble.GradientBoostingClassifier(**params) val_scores = np.zeros((n_estimators,), dtype=np.float64) for train, test in cv.split(X_train, y_train): cv_clf.fit(X_train[train], y_train[train]) val_scores += heldout_score(cv_clf, X_train[test], y_train[test]) val_scores /= n_splits return val_scores # Estimate best n_estimator using cross-validation cv_score = cv_estimate(3) # Compute best n_estimator for test data test_score = heldout_score(clf, X_test, y_test) # negative cumulative sum of oob improvements cumsum = -np.cumsum(clf.oob_improvement_) # min loss according to OOB oob_best_iter = x[np.argmin(cumsum)] # min loss according to test (normalize such that first loss is 0) test_score -= test_score[0] test_best_iter = x[np.argmin(test_score)] # min loss according to cv (normalize such that first loss is 0) cv_score -= cv_score[0] cv_best_iter = x[np.argmin(cv_score)] # color brew for the three curves oob_color = list(map(lambda x: x / 256.0, (190, 174, 212))) test_color = list(map(lambda x: x / 256.0, (127, 201, 127))) cv_color = list(map(lambda x: x / 256.0, (253, 192, 134))) # line type for the three curves oob_line = "dashed" test_line = "solid" cv_line = "dashdot" # plot curves and vertical lines for best iterations plt.figure(figsize=(8, 4.8)) plt.plot(x, cumsum, label="OOB loss", color=oob_color, linestyle=oob_line) plt.plot(x, test_score, label="Test loss", color=test_color, linestyle=test_line) plt.plot(x, cv_score, label="CV loss", color=cv_color, linestyle=cv_line) plt.axvline(x=oob_best_iter, color=oob_color, linestyle=oob_line) plt.axvline(x=test_best_iter, color=test_color, linestyle=test_line) plt.axvline(x=cv_best_iter, color=cv_color, linestyle=cv_line) # add three vertical lines to xticks xticks = plt.xticks() xticks_pos = np.array( xticks[0].tolist() + [oob_best_iter, cv_best_iter, test_best_iter] ) xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) + ["OOB", "CV", "Test"]) ind = np.argsort(xticks_pos) xticks_pos = xticks_pos[ind] xticks_label = xticks_label[ind] plt.xticks(xticks_pos, xticks_label, rotation=90) plt.legend(loc="upper center") plt.ylabel("normalized loss") plt.xlabel("number of iterations") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_hgbt_regression.py
examples/ensemble/plot_hgbt_regression.py
""" ============================================== Features in Histogram Gradient Boosting Trees ============================================== :ref:`histogram_based_gradient_boosting` (HGBT) models may be one of the most useful supervised learning models in scikit-learn. They are based on a modern gradient boosting implementation comparable to LightGBM and XGBoost. As such, HGBT models are more feature rich than and often outperform alternative models like random forests, especially when the number of samples is larger than some ten thousands (see :ref:`sphx_glr_auto_examples_ensemble_plot_forest_hist_grad_boosting_comparison.py`). The top usability features of HGBT models are: 1. Several available loss functions for mean and quantile regression tasks, see :ref:`Quantile loss <quantile_support_hgbdt>`. 2. :ref:`categorical_support_gbdt`, see :ref:`sphx_glr_auto_examples_ensemble_plot_gradient_boosting_categorical.py`. 3. Early stopping. 4. :ref:`nan_support_hgbt`, which avoids the need for an imputer. 5. :ref:`monotonic_cst_gbdt`. 6. :ref:`interaction_cst_hgbt`. This example aims at showcasing all points except 2 and 6 in a real life setting. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Preparing the data # ================== # The `electricity dataset <http://www.openml.org/d/151>`_ consists of data # collected from the Australian New South Wales Electricity Market. In this # market, prices are not fixed and are affected by supply and demand. They are # set every five minutes. Electricity transfers to/from the neighboring state of # Victoria were done to alleviate fluctuations. # # The dataset, originally named ELEC2, contains 45,312 instances dated from 7 # May 1996 to 5 December 1998. Each sample of the dataset refers to a period of # 30 minutes, i.e. there are 48 instances for each time period of one day. Each # sample on the dataset has 7 columns: # # - date: between 7 May 1996 to 5 December 1998. Normalized between 0 and 1; # - day: day of week (1-7); # - period: half hour intervals over 24 hours. Normalized between 0 and 1; # - nswprice/nswdemand: electricity price/demand of New South Wales; # - vicprice/vicdemand: electricity price/demand of Victoria. # # Originally, it is a classification task, but here we use it for the regression # task to predict the scheduled electricity transfer between states. from sklearn.datasets import fetch_openml electricity = fetch_openml( name="electricity", version=1, as_frame=True, parser="pandas" ) df = electricity.frame # %% # This particular dataset has a stepwise constant target for the first 17,760 # samples: df["transfer"][:17_760].unique() # %% # Let us drop those entries and explore the hourly electricity transfer over # different days of the week: import matplotlib.pyplot as plt import seaborn as sns df = electricity.frame.iloc[17_760:] X = df.drop(columns=["transfer", "class"]) y = df["transfer"] fig, ax = plt.subplots(figsize=(15, 10)) pointplot = sns.lineplot(x=df["period"], y=df["transfer"], hue=df["day"], ax=ax) handles, labels = ax.get_legend_handles_labels() ax.set( title="Hourly energy transfer for different days of the week", xlabel="Normalized time of the day", ylabel="Normalized energy transfer", ) _ = ax.legend(handles, ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]) # %% # Notice that energy transfer increases systematically during weekends. # # Effect of number of trees and early stopping # ============================================ # For the sake of illustrating the effect of the (maximum) number of trees, we # train a :class:`~sklearn.ensemble.HistGradientBoostingRegressor` over the # daily electricity transfer using the whole dataset. Then we visualize its # predictions depending on the `max_iter` parameter. Here we don't try to # evaluate the performance of the model and its capacity to generalize but # rather its capability to learn from the training data. from sklearn.ensemble import HistGradientBoostingRegressor from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, shuffle=False) print(f"Training sample size: {X_train.shape[0]}") print(f"Test sample size: {X_test.shape[0]}") print(f"Number of features: {X_train.shape[1]}") # %% max_iter_list = [5, 50] average_week_demand = ( df.loc[X_test.index].groupby(["day", "period"], observed=False)["transfer"].mean() ) colors = sns.color_palette("colorblind") fig, ax = plt.subplots(figsize=(10, 5)) average_week_demand.plot(color=colors[0], label="recorded average", linewidth=2, ax=ax) for idx, max_iter in enumerate(max_iter_list): hgbt = HistGradientBoostingRegressor( max_iter=max_iter, categorical_features=None, random_state=42 ) hgbt.fit(X_train, y_train) y_pred = hgbt.predict(X_test) prediction_df = df.loc[X_test.index].copy() prediction_df["y_pred"] = y_pred average_pred = prediction_df.groupby(["day", "period"], observed=False)[ "y_pred" ].mean() average_pred.plot( color=colors[idx + 1], label=f"max_iter={max_iter}", linewidth=2, ax=ax ) ax.set( title="Predicted average energy transfer during the week", xticks=[(i + 0.2) * 48 for i in range(7)], xticklabels=["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"], xlabel="Time of the week", ylabel="Normalized energy transfer", ) _ = ax.legend() # %% # With just a few iterations, HGBT models can achieve convergence (see # :ref:`sphx_glr_auto_examples_ensemble_plot_forest_hist_grad_boosting_comparison.py`), # meaning that adding more trees does not improve the model anymore. In the # figure above, 5 iterations are not enough to get good predictions. With 50 # iterations, we are already able to do a good job. # # Setting `max_iter` too high might degrade the prediction quality and cost a lot of # avoidable computing resources. Therefore, the HGBT implementation in scikit-learn # provides an automatic **early stopping** strategy. With it, the model # uses a fraction of the training data as internal validation set # (`validation_fraction`) and stops training if the validation score does not # improve (or degrades) after `n_iter_no_change` iterations up to a certain # tolerance (`tol`). # # Notice that there is a trade-off between `learning_rate` and `max_iter`: # Generally, smaller learning rates are preferable but require more iterations # to converge to the minimum loss, while larger learning rates converge faster # (less iterations/trees needed) but at the cost of a larger minimum loss. # # Because of this high correlation between the learning rate the number of iterations, # a good practice is to tune the learning rate along with all (important) other # hyperparameters, fit the HBGT on the training set with a large enough value # for `max_iter` and determine the best `max_iter` via early stopping and some # explicit `validation_fraction`. common_params = { "max_iter": 1_000, "learning_rate": 0.3, "validation_fraction": 0.2, "random_state": 42, "categorical_features": None, "scoring": "neg_root_mean_squared_error", } hgbt = HistGradientBoostingRegressor(early_stopping=True, **common_params) hgbt.fit(X_train, y_train) _, ax = plt.subplots() plt.plot(-hgbt.validation_score_) _ = ax.set( xlabel="number of iterations", ylabel="root mean squared error", title=f"Loss of hgbt with early stopping (n_iter={hgbt.n_iter_})", ) # %% # We can then overwrite the value for `max_iter` to a reasonable value and avoid # the extra computational cost of the inner validation. Rounding up the number # of iterations may account for variability of the training set: import math common_params["max_iter"] = math.ceil(hgbt.n_iter_ / 100) * 100 common_params["early_stopping"] = False hgbt = HistGradientBoostingRegressor(**common_params) # %% # .. note:: The inner validation done during early stopping is not optimal for # time series. # # Support for missing values # ========================== # HGBT models have native support of missing values. During training, the tree # grower decides where samples with missing values should go (left or right # child) at each split, based on the potential gain. When predicting, these # samples are sent to the learnt child accordingly. If a feature had no missing # values during training, then for prediction, samples with missing values for that # feature are sent to the child with the most samples (as seen during fit). # # The present example shows how HGBT regressions deal with values missing # completely at random (MCAR), i.e. the missingness does not depend on the # observed data or the unobserved data. We can simulate such scenario by # randomly replacing values from randomly selected features with `nan` values. import numpy as np from sklearn.metrics import root_mean_squared_error rng = np.random.RandomState(42) first_week = slice(0, 336) # first week in the test set as 7 * 48 = 336 missing_fraction_list = [0, 0.01, 0.03] def generate_missing_values(X, missing_fraction): total_cells = X.shape[0] * X.shape[1] num_missing_cells = int(total_cells * missing_fraction) row_indices = rng.choice(X.shape[0], num_missing_cells, replace=True) col_indices = rng.choice(X.shape[1], num_missing_cells, replace=True) X_missing = X.copy() X_missing.iloc[row_indices, col_indices] = np.nan return X_missing fig, ax = plt.subplots(figsize=(12, 6)) ax.plot(y_test.values[first_week], label="Actual transfer") for missing_fraction in missing_fraction_list: X_train_missing = generate_missing_values(X_train, missing_fraction) X_test_missing = generate_missing_values(X_test, missing_fraction) hgbt.fit(X_train_missing, y_train) y_pred = hgbt.predict(X_test_missing[first_week]) rmse = root_mean_squared_error(y_test[first_week], y_pred) ax.plot( y_pred[first_week], label=f"missing_fraction={missing_fraction}, RMSE={rmse:.3f}", alpha=0.5, ) ax.set( title="Daily energy transfer predictions on data with MCAR values", xticks=[(i + 0.2) * 48 for i in range(7)], xticklabels=["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"], xlabel="Time of the week", ylabel="Normalized energy transfer", ) _ = ax.legend(loc="lower right") # %% # As expected, the model degrades as the proportion of missing values increases. # # Support for quantile loss # ========================= # # The quantile loss in regression enables a view of the variability or # uncertainty of the target variable. For instance, predicting the 5th and 95th # percentiles can provide a 90% prediction interval, i.e. the range within which # we expect a new observed value to fall with 90% probability. from sklearn.metrics import mean_pinball_loss quantiles = [0.95, 0.05] predictions = [] fig, ax = plt.subplots(figsize=(12, 6)) ax.plot(y_test.values[first_week], label="Actual transfer") for quantile in quantiles: hgbt_quantile = HistGradientBoostingRegressor( loss="quantile", quantile=quantile, **common_params ) hgbt_quantile.fit(X_train, y_train) y_pred = hgbt_quantile.predict(X_test[first_week]) predictions.append(y_pred) score = mean_pinball_loss(y_test[first_week], y_pred) ax.plot( y_pred[first_week], label=f"quantile={quantile}, pinball loss={score:.2f}", alpha=0.5, ) ax.fill_between( range(len(predictions[0][first_week])), predictions[0][first_week], predictions[1][first_week], color=colors[0], alpha=0.1, ) ax.set( title="Daily energy transfer predictions with quantile loss", xticks=[(i + 0.2) * 48 for i in range(7)], xticklabels=["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"], xlabel="Time of the week", ylabel="Normalized energy transfer", ) _ = ax.legend(loc="lower right") # %% # We observe a tendence to over-estimate the energy transfer. This could be be # quantitatively confirmed by computing empirical coverage numbers as done in # the :ref:`calibration of confidence intervals section <calibration-section>`. # Keep in mind that those predicted percentiles are just estimations from a # model. One can still improve the quality of such estimations by: # # - collecting more data-points; # - better tuning of the model hyperparameters, see # :ref:`sphx_glr_auto_examples_ensemble_plot_gradient_boosting_quantile.py`; # - engineering more predictive features from the same data, see # :ref:`sphx_glr_auto_examples_applications_plot_cyclical_feature_engineering.py`. # # Monotonic constraints # ===================== # # Given specific domain knowledge that requires the relationship between a # feature and the target to be monotonically increasing or decreasing, one can # enforce such behaviour in the predictions of a HGBT model using monotonic # constraints. This makes the model more interpretable and can reduce its # variance (and potentially mitigate overfitting) at the risk of increasing # bias. Monotonic constraints can also be used to enforce specific regulatory # requirements, ensure compliance and align with ethical considerations. # # In the present example, the policy of transferring energy from Victoria to New # South Wales is meant to alleviate price fluctuations, meaning that the model # predictions have to enforce such goal, i.e. transfer should increase with # price and demand in New South Wales, but also decrease with price and demand # in Victoria, in order to benefit both populations. # # If the training data has feature names, it’s possible to specify the monotonic # constraints by passing a dictionary with the convention: # # - 1: monotonic increase # - 0: no constraint # - -1: monotonic decrease # # Alternatively, one can pass an array-like object encoding the above convention by # position. from sklearn.inspection import PartialDependenceDisplay monotonic_cst = { "date": 0, "day": 0, "period": 0, "nswdemand": 1, "nswprice": 1, "vicdemand": -1, "vicprice": -1, } hgbt_no_cst = HistGradientBoostingRegressor( categorical_features=None, random_state=42 ).fit(X, y) hgbt_cst = HistGradientBoostingRegressor( monotonic_cst=monotonic_cst, categorical_features=None, random_state=42 ).fit(X, y) fig, ax = plt.subplots(nrows=2, figsize=(15, 10)) disp = PartialDependenceDisplay.from_estimator( hgbt_no_cst, X, features=["nswdemand", "nswprice"], line_kw={"linewidth": 2, "label": "unconstrained", "color": "tab:blue"}, ax=ax[0], ) PartialDependenceDisplay.from_estimator( hgbt_cst, X, features=["nswdemand", "nswprice"], line_kw={"linewidth": 2, "label": "constrained", "color": "tab:orange"}, ax=disp.axes_, ) disp = PartialDependenceDisplay.from_estimator( hgbt_no_cst, X, features=["vicdemand", "vicprice"], line_kw={"linewidth": 2, "label": "unconstrained", "color": "tab:blue"}, ax=ax[1], ) PartialDependenceDisplay.from_estimator( hgbt_cst, X, features=["vicdemand", "vicprice"], line_kw={"linewidth": 2, "label": "constrained", "color": "tab:orange"}, ax=disp.axes_, ) _ = plt.legend() # %% # Observe that `nswdemand` and `vicdemand` seem already monotonic without constraint. # This is a good example to show that the model with monotonicity constraints is # "overconstraining". # # Additionally, we can verify that the predictive quality of the model is not # significantly degraded by introducing the monotonic constraints. For such # purpose we use :class:`~sklearn.model_selection.TimeSeriesSplit` # cross-validation to estimate the variance of the test score. By doing so we # guarantee that the training data does not succeed the testing data, which is # crucial when dealing with data that have a temporal relationship. from sklearn.metrics import make_scorer, root_mean_squared_error from sklearn.model_selection import TimeSeriesSplit, cross_validate ts_cv = TimeSeriesSplit(n_splits=5, gap=48, test_size=336) # a week has 336 samples scorer = make_scorer(root_mean_squared_error) cv_results = cross_validate(hgbt_no_cst, X, y, cv=ts_cv, scoring=scorer) rmse = cv_results["test_score"] print(f"RMSE without constraints = {rmse.mean():.3f} +/- {rmse.std():.3f}") cv_results = cross_validate(hgbt_cst, X, y, cv=ts_cv, scoring=scorer) rmse = cv_results["test_score"] print(f"RMSE with constraints = {rmse.mean():.3f} +/- {rmse.std():.3f}") # %% # That being said, notice the comparison is between two different models that # may be optimized by a different combination of hyperparameters. That is the # reason why we do no use the `common_params` in this section as done before.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_isolation_forest.py
examples/ensemble/plot_isolation_forest.py
""" ======================= IsolationForest example ======================= An example using :class:`~sklearn.ensemble.IsolationForest` for anomaly detection. The :ref:`isolation_forest` is an ensemble of "Isolation Trees" that "isolate" observations by recursive random partitioning, which can be represented by a tree structure. The number of splittings required to isolate a sample is lower for outliers and higher for inliers. In the present example we demo two ways to visualize the decision boundary of an Isolation Forest trained on a toy dataset. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Data generation # --------------- # # We generate two clusters (each one containing `n_samples`) by randomly # sampling the standard normal distribution as returned by # :func:`numpy.random.randn`. One of them is spherical and the other one is # slightly deformed. # # For consistency with the :class:`~sklearn.ensemble.IsolationForest` notation, # the inliers (i.e. the gaussian clusters) are assigned a ground truth label `1` # whereas the outliers (created with :func:`numpy.random.uniform`) are assigned # the label `-1`. import numpy as np from sklearn.model_selection import train_test_split n_samples, n_outliers = 120, 40 rng = np.random.RandomState(0) covariance = np.array([[0.5, -0.1], [0.7, 0.4]]) cluster_1 = 0.4 * rng.randn(n_samples, 2) @ covariance + np.array([2, 2]) # general cluster_2 = 0.3 * rng.randn(n_samples, 2) + np.array([-2, -2]) # spherical outliers = rng.uniform(low=-4, high=4, size=(n_outliers, 2)) X = np.concatenate([cluster_1, cluster_2, outliers]) y = np.concatenate( [np.ones((2 * n_samples), dtype=int), -np.ones((n_outliers), dtype=int)] ) X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42) # %% # We can visualize the resulting clusters: import matplotlib.pyplot as plt scatter = plt.scatter(X[:, 0], X[:, 1], c=y, s=20, edgecolor="k") handles, labels = scatter.legend_elements() plt.axis("square") plt.legend(handles=handles, labels=["outliers", "inliers"], title="true class") plt.title("Gaussian inliers with \nuniformly distributed outliers") plt.show() # %% # Training of the model # --------------------- from sklearn.ensemble import IsolationForest clf = IsolationForest(max_samples=100, random_state=0) clf.fit(X_train) # %% # Plot discrete decision boundary # ------------------------------- # # We use the class :class:`~sklearn.inspection.DecisionBoundaryDisplay` to # visualize a discrete decision boundary. The background color represents # whether a sample in that given area is predicted to be an outlier # or not. The scatter plot displays the true labels. import matplotlib.pyplot as plt from sklearn.inspection import DecisionBoundaryDisplay disp = DecisionBoundaryDisplay.from_estimator( clf, X, response_method="predict", alpha=0.5, ) disp.ax_.scatter(X[:, 0], X[:, 1], c=y, s=20, edgecolor="k") disp.ax_.set_title("Binary decision boundary \nof IsolationForest") plt.axis("square") plt.legend(handles=handles, labels=["outliers", "inliers"], title="true class") plt.show() # %% # Plot path length decision boundary # ---------------------------------- # # By setting the `response_method="decision_function"`, the background of the # :class:`~sklearn.inspection.DecisionBoundaryDisplay` represents the measure of # normality of an observation. Such score is given by the path length averaged # over a forest of random trees, which itself is given by the depth of the leaf # (or equivalently the number of splits) required to isolate a given sample. # # When a forest of random trees collectively produce short path lengths for # isolating some particular samples, they are highly likely to be anomalies and # the measure of normality is close to `0`. Similarly, large paths correspond to # values close to `1` and are more likely to be inliers. disp = DecisionBoundaryDisplay.from_estimator( clf, X, response_method="decision_function", alpha=0.5, ) disp.ax_.scatter(X[:, 0], X[:, 1], c=y, s=20, edgecolor="k") disp.ax_.set_title("Path length decision boundary \nof IsolationForest") plt.axis("square") plt.legend(handles=handles, labels=["outliers", "inliers"], title="true class") plt.colorbar(disp.ax_.collections[1]) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_forest_iris.py
examples/ensemble/plot_forest_iris.py
""" ==================================================================== Plot the decision surfaces of ensembles of trees on the iris dataset ==================================================================== Plot the decision surfaces of forests of randomized trees trained on pairs of features of the iris dataset. This plot compares the decision surfaces learned by a decision tree classifier (first column), by a random forest classifier (second column), by an extra-trees classifier (third column) and by an AdaBoost classifier (fourth column). In the first row, the classifiers are built using the sepal width and the sepal length features only, on the second row using the petal length and sepal length only, and on the third row using the petal width and the petal length only. In descending order of quality, when trained (outside of this example) on all 4 features using 30 estimators and scored using 10 fold cross validation, we see:: ExtraTreesClassifier() # 0.95 score RandomForestClassifier() # 0.94 score AdaBoost(DecisionTree(max_depth=3)) # 0.94 score DecisionTree(max_depth=None) # 0.94 score Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but the average score does not improve). See the console's output for further details about each model. In this example you might try to: 1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and ``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the ``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier`` 2) vary ``n_estimators`` It is worth noting that RandomForests and ExtraTrees can be fitted in parallel on many cores as each tree is built independently of the others. AdaBoost's samples are built sequentially and so do not use multiple cores. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from matplotlib.colors import ListedColormap from sklearn.datasets import load_iris from sklearn.ensemble import ( AdaBoostClassifier, ExtraTreesClassifier, RandomForestClassifier, ) from sklearn.tree import DecisionTreeClassifier # Parameters n_classes = 3 n_estimators = 30 cmap = plt.cm.RdYlBu plot_step = 0.02 # fine step width for decision surface contours plot_step_coarser = 0.5 # step widths for coarse classifier guesses RANDOM_SEED = 13 # fix the seed on each iteration # Load data iris = load_iris() plot_idx = 1 models = [ DecisionTreeClassifier(max_depth=None), RandomForestClassifier(n_estimators=n_estimators), ExtraTreesClassifier(n_estimators=n_estimators), AdaBoostClassifier(DecisionTreeClassifier(max_depth=3), n_estimators=n_estimators), ] for pair in ([0, 1], [0, 2], [2, 3]): for model in models: # We only take the two corresponding features X = iris.data[:, pair] y = iris.target # Shuffle idx = np.arange(X.shape[0]) np.random.seed(RANDOM_SEED) np.random.shuffle(idx) X = X[idx] y = y[idx] # Standardize mean = X.mean(axis=0) std = X.std(axis=0) X = (X - mean) / std # Train model.fit(X, y) scores = model.score(X, y) # Create a title for each column and the console by using str() and # slicing away useless parts of the string model_title = str(type(model)).split(".")[-1][:-2][: -len("Classifier")] model_details = model_title if hasattr(model, "estimators_"): model_details += " with {} estimators".format(len(model.estimators_)) print(model_details + " with features", pair, "has a score of", scores) plt.subplot(3, 4, plot_idx) if plot_idx <= len(models): # Add a title at the top of each column plt.title(model_title, fontsize=9) # Now plot the decision boundary using a fine mesh as input to a # filled contour plot x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid( np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step) ) # Plot either a single DecisionTreeClassifier or alpha blend the # decision surfaces of the ensemble of classifiers if isinstance(model, DecisionTreeClassifier): Z = model.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, cmap=cmap) else: # Choose alpha blend level with respect to the number # of estimators # that are in use (noting that AdaBoost can use fewer estimators # than its maximum if it achieves a good enough fit early on) estimator_alpha = 1.0 / len(model.estimators_) for tree in model.estimators_: Z = tree.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap) # Build a coarser grid to plot a set of ensemble classifications # to show how these are different to what we see in the decision # surfaces. These points are regularly space and do not have a # black outline xx_coarser, yy_coarser = np.meshgrid( np.arange(x_min, x_max, plot_step_coarser), np.arange(y_min, y_max, plot_step_coarser), ) Z_points_coarser = model.predict( np.c_[xx_coarser.ravel(), yy_coarser.ravel()] ).reshape(xx_coarser.shape) cs_points = plt.scatter( xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none", ) # Plot the training points, these are clustered together and have a # black outline plt.scatter( X[:, 0], X[:, 1], c=y, cmap=ListedColormap(["r", "y", "b"]), edgecolor="k", s=20, ) plot_idx += 1 # move on to the next plot in sequence plt.suptitle("Classifiers on feature subsets of the Iris dataset", fontsize=12) plt.axis("tight") plt.tight_layout(h_pad=0.2, w_pad=0.2, pad=2.5) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_forest_importances.py
examples/ensemble/plot_forest_importances.py
""" ========================================== Feature importances with a forest of trees ========================================== This example shows the use of a forest of trees to evaluate the importance of features on an artificial classification task. The blue bars are the feature importances of the forest, along with their inter-trees variability represented by the error bars. As expected, the plot suggests that 3 features are informative, while the remaining are not. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt # %% # Data generation and model fitting # --------------------------------- # We generate a synthetic dataset with only 3 informative features. We will # explicitly not shuffle the dataset to ensure that the informative features # will correspond to the three first columns of X. In addition, we will split # our dataset into training and testing subsets. from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split X, y = make_classification( n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, n_classes=2, random_state=0, shuffle=False, ) X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42) # %% # A random forest classifier will be fitted to compute the feature importances. from sklearn.ensemble import RandomForestClassifier feature_names = [f"feature {i}" for i in range(X.shape[1])] forest = RandomForestClassifier(random_state=0) forest.fit(X_train, y_train) # %% # Feature importance based on mean decrease in impurity # ----------------------------------------------------- # Feature importances are provided by the fitted attribute # `feature_importances_` and they are computed as the mean and standard # deviation of accumulation of the impurity decrease within each tree. # # .. warning:: # Impurity-based feature importances can be misleading for **high # cardinality** features (many unique values). See # :ref:`permutation_importance` as an alternative below. import time import numpy as np start_time = time.time() importances = forest.feature_importances_ std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0) elapsed_time = time.time() - start_time print(f"Elapsed time to compute the importances: {elapsed_time:.3f} seconds") # %% # Let's plot the impurity-based importance. import pandas as pd forest_importances = pd.Series(importances, index=feature_names) fig, ax = plt.subplots() forest_importances.plot.bar(yerr=std, ax=ax) ax.set_title("Feature importances using MDI") ax.set_ylabel("Mean decrease in impurity") fig.tight_layout() # %% # We observe that, as expected, the three first features are found important. # # Feature importance based on feature permutation # ----------------------------------------------- # Permutation feature importance overcomes limitations of the impurity-based # feature importance: they do not have a bias toward high-cardinality features # and can be computed on a left-out test set. from sklearn.inspection import permutation_importance start_time = time.time() result = permutation_importance( forest, X_test, y_test, n_repeats=10, random_state=42, n_jobs=2 ) elapsed_time = time.time() - start_time print(f"Elapsed time to compute the importances: {elapsed_time:.3f} seconds") forest_importances = pd.Series(result.importances_mean, index=feature_names) # %% # The computation for full permutation importance is more costly. Each feature is # shuffled n times and the model is used to make predictions on the permuted data to see # the drop in performance. Please see :ref:`permutation_importance` for more details. # We can now plot the importance ranking. fig, ax = plt.subplots() forest_importances.plot.bar(yerr=result.importances_std, ax=ax) ax.set_title("Feature importances using permutation on full model") ax.set_ylabel("Mean accuracy decrease") fig.tight_layout() plt.show() # %% # The same features are detected as most important using both methods. Although # the relative importances vary. As seen on the plots, MDI is less likely than # permutation importance to fully omit a feature.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_bias_variance.py
examples/ensemble/plot_bias_variance.py
""" ============================================================ Single estimator versus bagging: bias-variance decomposition ============================================================ This example illustrates and compares the bias-variance decomposition of the expected mean squared error of a single estimator against a bagging ensemble. In regression, the expected mean squared error of an estimator can be decomposed in terms of bias, variance and noise. On average over datasets of the regression problem, the bias term measures the average amount by which the predictions of the estimator differ from the predictions of the best possible estimator for the problem (i.e., the Bayes model). The variance term measures the variability of the predictions of the estimator when fit over different random instances of the same problem. Each problem instance is noted "LS", for "Learning Sample", in the following. Finally, the noise measures the irreducible part of the error which is due the variability in the data. The upper left figure illustrates the predictions (in dark red) of a single decision tree trained over a random dataset LS (the blue dots) of a toy 1d regression problem. It also illustrates the predictions (in light red) of other single decision trees trained over other (and different) randomly drawn instances LS of the problem. Intuitively, the variance term here corresponds to the width of the beam of predictions (in light red) of the individual estimators. The larger the variance, the more sensitive are the predictions for `x` to small changes in the training set. The bias term corresponds to the difference between the average prediction of the estimator (in cyan) and the best possible model (in dark blue). On this problem, we can thus observe that the bias is quite low (both the cyan and the blue curves are close to each other) while the variance is large (the red beam is rather wide). The lower left figure plots the pointwise decomposition of the expected mean squared error of a single decision tree. It confirms that the bias term (in blue) is low while the variance is large (in green). It also illustrates the noise part of the error which, as expected, appears to be constant and around `0.01`. The right figures correspond to the same plots but using instead a bagging ensemble of decision trees. In both figures, we can observe that the bias term is larger than in the previous case. In the upper right figure, the difference between the average prediction (in cyan) and the best possible model is larger (e.g., notice the offset around `x=2`). In the lower right figure, the bias curve is also slightly higher than in the lower left figure. In terms of variance however, the beam of predictions is narrower, which suggests that the variance is lower. Indeed, as the lower right figure confirms, the variance term (in green) is lower than for single decision trees. Overall, the bias-variance decomposition is therefore no longer the same. The tradeoff is better for bagging: averaging several decision trees fit on bootstrap copies of the dataset slightly increases the bias term but allows for a larger reduction of the variance, which results in a lower overall mean squared error (compare the red curves int the lower figures). The script output also confirms this intuition. The total error of the bagging ensemble is lower than the total error of a single decision tree, and this difference indeed mainly stems from a reduced variance. For further details on bias-variance decomposition, see section 7.3 of [1]_. References ---------- .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning", Springer, 2009. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.ensemble import BaggingRegressor from sklearn.tree import DecisionTreeRegressor # Settings n_repeat = 50 # Number of iterations for computing expectations n_train = 50 # Size of the training set n_test = 1000 # Size of the test set noise = 0.1 # Standard deviation of the noise np.random.seed(0) # Change this for exploring the bias-variance decomposition of other # estimators. This should work well for estimators with high variance (e.g., # decision trees or KNN), but poorly for estimators with low variance (e.g., # linear models). estimators = [ ("Tree", DecisionTreeRegressor()), ("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor())), ] n_estimators = len(estimators) # Generate data def f(x): x = x.ravel() return np.exp(-(x**2)) + 1.5 * np.exp(-((x - 2) ** 2)) def generate(n_samples, noise, n_repeat=1): X = np.random.rand(n_samples) * 10 - 5 X = np.sort(X) if n_repeat == 1: y = f(X) + np.random.normal(0.0, noise, n_samples) else: y = np.zeros((n_samples, n_repeat)) for i in range(n_repeat): y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples) X = X.reshape((n_samples, 1)) return X, y X_train = [] y_train = [] for i in range(n_repeat): X, y = generate(n_samples=n_train, noise=noise) X_train.append(X) y_train.append(y) X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat) plt.figure(figsize=(10, 8)) # Loop over estimators to compare for n, (name, estimator) in enumerate(estimators): # Compute predictions y_predict = np.zeros((n_test, n_repeat)) for i in range(n_repeat): estimator.fit(X_train[i], y_train[i]) y_predict[:, i] = estimator.predict(X_test) # Bias^2 + Variance + Noise decomposition of the mean squared error y_error = np.zeros(n_test) for i in range(n_repeat): for j in range(n_repeat): y_error += (y_test[:, j] - y_predict[:, i]) ** 2 y_error /= n_repeat * n_repeat y_noise = np.var(y_test, axis=1) y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2 y_var = np.var(y_predict, axis=1) print( "{0}: {1:.4f} (error) = {2:.4f} (bias^2) " " + {3:.4f} (var) + {4:.4f} (noise)".format( name, np.mean(y_error), np.mean(y_bias), np.mean(y_var), np.mean(y_noise) ) ) # Plot figures plt.subplot(2, n_estimators, n + 1) plt.plot(X_test, f(X_test), "b", label="$f(x)$") plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$") for i in range(n_repeat): if i == 0: plt.plot(X_test, y_predict[:, i], "r", label=r"$\^y(x)$") else: plt.plot(X_test, y_predict[:, i], "r", alpha=0.05) plt.plot(X_test, np.mean(y_predict, axis=1), "c", label=r"$\mathbb{E}_{LS} \^y(x)$") plt.xlim([-5, 5]) plt.title(name) if n == n_estimators - 1: plt.legend(loc=(1.1, 0.5)) plt.subplot(2, n_estimators, n_estimators + n + 1) plt.plot(X_test, y_error, "r", label="$error(x)$") plt.plot(X_test, y_bias, "b", label="$bias^2(x)$") plt.plot(X_test, y_var, "g", label="$variance(x)$") plt.plot(X_test, y_noise, "c", label="$noise(x)$") plt.xlim([-5, 5]) plt.ylim([0, 0.1]) if n == n_estimators - 1: plt.legend(loc=(1.1, 0.5)) plt.subplots_adjust(right=0.75) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_monotonic_constraints.py
examples/ensemble/plot_monotonic_constraints.py
""" ===================== Monotonic Constraints ===================== This example illustrates the effect of monotonic constraints on a gradient boosting estimator. We build an artificial dataset where the target value is in general positively correlated with the first feature (with some random and non-random variations), and in general negatively correlated with the second feature. By imposing a monotonic increase or a monotonic decrease constraint, respectively, on the features during the learning process, the estimator is able to properly follow the general trend instead of being subject to the variations. This example was inspired by the `XGBoost documentation <https://xgboost.readthedocs.io/en/latest/tutorials/monotonic.html>`_. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% import matplotlib.pyplot as plt import numpy as np from sklearn.ensemble import HistGradientBoostingRegressor from sklearn.inspection import PartialDependenceDisplay rng = np.random.RandomState(0) n_samples = 1000 f_0 = rng.rand(n_samples) f_1 = rng.rand(n_samples) X = np.c_[f_0, f_1] noise = rng.normal(loc=0.0, scale=0.01, size=n_samples) # y is positively correlated with f_0, and negatively correlated with f_1 y = 5 * f_0 + np.sin(10 * np.pi * f_0) - 5 * f_1 - np.cos(10 * np.pi * f_1) + noise # %% # Fit a first model on this dataset without any constraints. gbdt_no_cst = HistGradientBoostingRegressor() gbdt_no_cst.fit(X, y) # %% # Fit a second model on this dataset with monotonic increase (1) # and a monotonic decrease (-1) constraints, respectively. gbdt_with_monotonic_cst = HistGradientBoostingRegressor(monotonic_cst=[1, -1]) gbdt_with_monotonic_cst.fit(X, y) # %% # Let's display the partial dependence of the predictions on the two features. fig, ax = plt.subplots() disp = PartialDependenceDisplay.from_estimator( gbdt_no_cst, X, features=[0, 1], feature_names=( "First feature", "Second feature", ), line_kw={"linewidth": 4, "label": "unconstrained", "color": "tab:blue"}, ax=ax, ) PartialDependenceDisplay.from_estimator( gbdt_with_monotonic_cst, X, features=[0, 1], line_kw={"linewidth": 4, "label": "constrained", "color": "tab:orange"}, ax=disp.axes_, ) for f_idx in (0, 1): disp.axes_[0, f_idx].plot( X[:, f_idx], y, "o", alpha=0.3, zorder=-1, color="tab:green" ) disp.axes_[0, f_idx].set_ylim(-6, 6) plt.legend() fig.suptitle("Monotonic constraints effect on partial dependences") plt.show() # %% # We can see that the predictions of the unconstrained model capture the # oscillations of the data while the constrained model follows the general # trend and ignores the local variations. # %% # .. _monotonic_cst_features_names: # # Using feature names to specify monotonic constraints # ---------------------------------------------------- # # Note that if the training data has feature names, it's possible to specify the # monotonic constraints by passing a dictionary: import pandas as pd X_df = pd.DataFrame(X, columns=["f_0", "f_1"]) gbdt_with_monotonic_cst_df = HistGradientBoostingRegressor( monotonic_cst={"f_0": 1, "f_1": -1} ).fit(X_df, y) np.allclose( gbdt_with_monotonic_cst_df.predict(X_df), gbdt_with_monotonic_cst.predict(X) )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_adaboost_twoclass.py
examples/ensemble/plot_adaboost_twoclass.py
""" ================== Two-class AdaBoost ================== This example fits an AdaBoosted decision stump on a non-linearly separable classification dataset composed of two "Gaussian quantiles" clusters (see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision boundary and decision scores. The distributions of decision scores are shown separately for samples of class A and B. The predicted class label for each sample is determined by the sign of the decision score. Samples with decision scores greater than zero are classified as B, and are otherwise classified as A. The magnitude of a decision score determines the degree of likeness with the predicted class label. Additionally, a new dataset could be constructed containing a desired purity of class B, for example, by only selecting samples with a decision score above some value. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_gaussian_quantiles from sklearn.ensemble import AdaBoostClassifier from sklearn.inspection import DecisionBoundaryDisplay from sklearn.tree import DecisionTreeClassifier # Construct dataset X1, y1 = make_gaussian_quantiles( cov=2.0, n_samples=200, n_features=2, n_classes=2, random_state=1 ) X2, y2 = make_gaussian_quantiles( mean=(3, 3), cov=1.5, n_samples=300, n_features=2, n_classes=2, random_state=1 ) X = np.concatenate((X1, X2)) y = np.concatenate((y1, -y2 + 1)) # Create and fit an AdaBoosted decision tree bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1), n_estimators=200) bdt.fit(X, y) plot_colors = "br" plot_step = 0.02 class_names = "AB" plt.figure(figsize=(10, 5)) # Plot the decision boundaries ax = plt.subplot(121) disp = DecisionBoundaryDisplay.from_estimator( bdt, X, cmap=plt.cm.Paired, response_method="predict", ax=ax, xlabel="x", ylabel="y", ) x_min, x_max = disp.xx0.min(), disp.xx0.max() y_min, y_max = disp.xx1.min(), disp.xx1.max() plt.axis("tight") # Plot the training points for i, n, c in zip(range(2), class_names, plot_colors): idx = (y == i).nonzero() plt.scatter( X[idx, 0], X[idx, 1], c=c, s=20, edgecolor="k", label="Class %s" % n, ) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.legend(loc="upper right") plt.title("Decision Boundary") # Plot the two-class decision scores twoclass_output = bdt.decision_function(X) plot_range = (twoclass_output.min(), twoclass_output.max()) plt.subplot(122) for i, n, c in zip(range(2), class_names, plot_colors): plt.hist( twoclass_output[y == i], bins=10, range=plot_range, facecolor=c, label="Class %s" % n, alpha=0.5, edgecolor="k", ) x1, x2, y1, y2 = plt.axis() plt.axis((x1, x2, y1, y2 * 1.2)) plt.legend(loc="upper right") plt.ylabel("Samples") plt.xlabel("Score") plt.title("Decision Scores") plt.tight_layout() plt.subplots_adjust(wspace=0.35) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_gradient_boosting_categorical.py
examples/ensemble/plot_gradient_boosting_categorical.py
""" ================================================ Categorical Feature Support in Gradient Boosting ================================================ .. currentmodule:: sklearn In this example, we compare the training times and prediction performances of :class:`~ensemble.HistGradientBoostingRegressor` with different encoding strategies for categorical features. In particular, we evaluate: - "Dropped": dropping the categorical features; - "One Hot": using a :class:`~preprocessing.OneHotEncoder`; - "Ordinal": using an :class:`~preprocessing.OrdinalEncoder` and treat categories as ordered, equidistant quantities; - "Target": using a :class:`~preprocessing.TargetEncoder`; - "Native": relying on the :ref:`native category support <categorical_support_gbdt>` of the :class:`~ensemble.HistGradientBoostingRegressor` estimator. For such purpose we use the Ames Iowa Housing dataset, which consists of numerical and categorical features, where the target is the house sale price. See :ref:`sphx_glr_auto_examples_ensemble_plot_hgbt_regression.py` for an example showcasing some other features of :class:`~ensemble.HistGradientBoostingRegressor`. See :ref:`sphx_glr_auto_examples_preprocessing_plot_target_encoder.py` for a comparison of encoding strategies in the presence of high cardinality categorical features. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Load Ames Housing dataset # ------------------------- # First, we load the Ames Housing data as a pandas dataframe. The features # are either categorical or numerical: from sklearn.datasets import fetch_openml X, y = fetch_openml(data_id=42165, as_frame=True, return_X_y=True) # Select only a subset of features of X to make the example faster to run categorical_columns_subset = [ "BldgType", "GarageFinish", "LotConfig", "Functional", "MasVnrType", "HouseStyle", "FireplaceQu", "ExterCond", "ExterQual", "PoolQC", ] numerical_columns_subset = [ "3SsnPorch", "Fireplaces", "BsmtHalfBath", "HalfBath", "GarageCars", "TotRmsAbvGrd", "BsmtFinSF1", "BsmtFinSF2", "GrLivArea", "ScreenPorch", ] X = X[categorical_columns_subset + numerical_columns_subset] X[categorical_columns_subset] = X[categorical_columns_subset].astype("category") categorical_columns = X.select_dtypes(include="category").columns n_categorical_features = len(categorical_columns) n_numerical_features = X.select_dtypes(include="number").shape[1] print(f"Number of samples: {X.shape[0]}") print(f"Number of features: {X.shape[1]}") print(f"Number of categorical features: {n_categorical_features}") print(f"Number of numerical features: {n_numerical_features}") # %% # Gradient boosting estimator with dropped categorical features # ------------------------------------------------------------- # As a baseline, we create an estimator where the categorical features are # dropped: from sklearn.compose import make_column_selector, make_column_transformer from sklearn.ensemble import HistGradientBoostingRegressor from sklearn.pipeline import make_pipeline dropper = make_column_transformer( ("drop", make_column_selector(dtype_include="category")), remainder="passthrough" ) hist_dropped = make_pipeline(dropper, HistGradientBoostingRegressor(random_state=42)) hist_dropped # %% # Gradient boosting estimator with one-hot encoding # ------------------------------------------------- # Next, we create a pipeline to one-hot encode the categorical features, # while letting the remaining features `"passthrough"` unchanged: from sklearn.preprocessing import OneHotEncoder one_hot_encoder = make_column_transformer( ( OneHotEncoder(sparse_output=False, handle_unknown="ignore"), make_column_selector(dtype_include="category"), ), remainder="passthrough", ) hist_one_hot = make_pipeline( one_hot_encoder, HistGradientBoostingRegressor(random_state=42) ) hist_one_hot # %% # Gradient boosting estimator with ordinal encoding # ------------------------------------------------- # Next, we create a pipeline that treats categorical features as ordered # quantities, i.e. the categories are encoded as 0, 1, 2, etc., and treated as # continuous features. import numpy as np from sklearn.preprocessing import OrdinalEncoder ordinal_encoder = make_column_transformer( ( OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=np.nan), make_column_selector(dtype_include="category"), ), remainder="passthrough", ) hist_ordinal = make_pipeline( ordinal_encoder, HistGradientBoostingRegressor(random_state=42) ) hist_ordinal # %% # Gradient boosting estimator with target encoding # ------------------------------------------------ # Another possibility is to use the :class:`~preprocessing.TargetEncoder`, which # encodes the categories computed from the mean of the (training) target # variable, as computed using a smoothed `np.mean(y, axis=0)` i.e.: # # - in regression it uses the mean of `y`; # - in binary classification, the positive-class rate; # - in multiclass, a vector of class rates (one per class). # # For each category, it computes these target averages using :term:`cross # fitting`, meaning that the training data are split into folds: in each fold # the averages are calculated only on a subset of data and then applied to the # held-out part. This way, each sample is encoded using statistics from data it # was not part of, preventing information leakage from the target. from sklearn.preprocessing import TargetEncoder target_encoder = make_column_transformer( ( TargetEncoder(target_type="continuous", random_state=42), make_column_selector(dtype_include="category"), ), remainder="passthrough", ) hist_target = make_pipeline( target_encoder, HistGradientBoostingRegressor(random_state=42) ) hist_target # %% # Gradient boosting estimator with native categorical support # ----------------------------------------------------------- # We now create a :class:`~ensemble.HistGradientBoostingRegressor` estimator # that can natively handle categorical features without explicit encoding. Such # functionality can be enabled by setting `categorical_features="from_dtype"`, # which automatically detects features with categorical dtypes, or more explicitly # by `categorical_features=categorical_columns_subset`. # # Unlike previous encoding approaches, the estimator natively deals with the # categorical features. At each split, it partitions the categories of such a # feature into disjoint sets using a heuristic that sorts them by their effect # on the target variable, see `Split finding with categorical features # <https://scikit-learn.org/stable/modules/ensemble.html#split-finding-with-categorical-features>`_ # for details. # # While ordinal encoding may work well for low-cardinality features even if # categories have no natural order, reaching meaningful splits requires deeper # trees as the cardinality increases. The native categorical support avoids this # by directly working with unordered categories. The advantage over one-hot # encoding is the omitted preprocessing and faster fit and predict time. hist_native = HistGradientBoostingRegressor( random_state=42, categorical_features="from_dtype" ) hist_native # %% # Model comparison # ---------------- # Here we use :term:`cross validation` to compare the models performance in # terms of :func:`~metrics.mean_absolute_percentage_error` and fit times. In the # upcoming plots, error bars represent 1 standard deviation as computed across # cross-validation splits. from sklearn.model_selection import cross_validate common_params = {"cv": 5, "scoring": "neg_mean_absolute_percentage_error", "n_jobs": -1} dropped_result = cross_validate(hist_dropped, X, y, **common_params) one_hot_result = cross_validate(hist_one_hot, X, y, **common_params) ordinal_result = cross_validate(hist_ordinal, X, y, **common_params) target_result = cross_validate(hist_target, X, y, **common_params) native_result = cross_validate(hist_native, X, y, **common_params) results = [ ("Dropped", dropped_result), ("One Hot", one_hot_result), ("Ordinal", ordinal_result), ("Target", target_result), ("Native", native_result), ] # %% import matplotlib.pyplot as plt import matplotlib.ticker as ticker def plot_performance_tradeoff(results, title): fig, ax = plt.subplots() markers = ["s", "o", "^", "x", "D"] for idx, (name, result) in enumerate(results): test_error = -result["test_score"] mean_fit_time = np.mean(result["fit_time"]) mean_score = np.mean(test_error) std_fit_time = np.std(result["fit_time"]) std_score = np.std(test_error) ax.scatter( result["fit_time"], test_error, label=name, marker=markers[idx], ) ax.scatter( mean_fit_time, mean_score, color="k", marker=markers[idx], ) ax.errorbar( x=mean_fit_time, y=mean_score, yerr=std_score, c="k", capsize=2, ) ax.errorbar( x=mean_fit_time, y=mean_score, xerr=std_fit_time, c="k", capsize=2, ) ax.set_xscale("log") nticks = 7 x0, x1 = np.log10(ax.get_xlim()) ticks = np.logspace(x0, x1, nticks) ax.set_xticks(ticks) ax.xaxis.set_major_formatter(ticker.FormatStrFormatter("%1.1e")) ax.minorticks_off() ax.annotate( " best\nmodels", xy=(0.04, 0.04), xycoords="axes fraction", xytext=(0.09, 0.14), textcoords="axes fraction", arrowprops=dict(arrowstyle="->", lw=1.5), ) ax.set_xlabel("Time to fit (seconds)") ax.set_ylabel("Mean Absolute Percentage Error") ax.set_title(title) ax.legend() plt.show() plot_performance_tradeoff(results, "Gradient Boosting on Ames Housing") # %% # In the plot above, the "best models" are those that are closer to the # down-left corner, as indicated by the arrow. Those models would indeed # correspond to faster fitting and lower error. # # The model using one-hot encoded data is the slowest. This is to be expected, # as one-hot encoding creates an additional feature for each category value of # every categorical feature, greatly increasing the number of split candidates # during training. In theory, we expect the native handling of categorical # features to be slightly slower than treating categories as ordered quantities # ('Ordinal'), since native handling requires :ref:`sorting categories # <categorical_support_gbdt>`. Fitting times should however be close when the # number of categories is small, and this may not always be reflected in # practice. # # The time required to fit when using the `TargetEncoder` depends on the # cross fitting parameter `cv`, as adding splits come at a computational cost. # # In terms of prediction performance, dropping the categorical features leads to # the worst performance. The four models that make use of the categorical # features have comparable error rates, with a slight edge for the native # handling. # %% # Limiting the number of splits # ----------------------------- # In general, one can expect poorer predictions from one-hot-encoded data, # especially when the tree depths or the number of nodes are limited: with # one-hot-encoded data, one needs more split points, i.e. more depth, in order # to recover an equivalent split that could be obtained in one single split # point with native handling. # # This is also true when categories are treated as ordinal quantities: if # categories are `A..F` and the best split is `ACF - BDE` the one-hot-encoder # model would need 3 split points (one per category in the left node), and the # ordinal non-native model would need 4 splits: 1 split to isolate `A`, 1 split # to isolate `F`, and 2 splits to isolate `C` from `BCDE`. # # How strongly the models' performances differ in practice depends on the # dataset and on the flexibility of the trees. # # To see this, let us re-run the same analysis with under-fitting models where # we artificially limit the total number of splits by both limiting the number # of trees and the depth of each tree. for pipe in (hist_dropped, hist_one_hot, hist_ordinal, hist_target, hist_native): if pipe is hist_native: # The native model does not use a pipeline so, we can set the parameters # directly. pipe.set_params(max_depth=3, max_iter=15) else: pipe.set_params( histgradientboostingregressor__max_depth=3, histgradientboostingregressor__max_iter=15, ) dropped_result = cross_validate(hist_dropped, X, y, **common_params) one_hot_result = cross_validate(hist_one_hot, X, y, **common_params) ordinal_result = cross_validate(hist_ordinal, X, y, **common_params) target_result = cross_validate(hist_target, X, y, **common_params) native_result = cross_validate(hist_native, X, y, **common_params) results_underfit = [ ("Dropped", dropped_result), ("One Hot", one_hot_result), ("Ordinal", ordinal_result), ("Target", target_result), ("Native", native_result), ] # %% plot_performance_tradeoff( results_underfit, "Gradient Boosting on Ames Housing (few and shallow trees)" ) # %% # The results for these underfitting models confirm our previous intuition: the # native category handling strategy performs the best when the splitting budget # is constrained. The three explicit encoding strategies (one-hot, ordinal and # target encoding) lead to slightly larger errors than the estimator's native # handling, but still perform better than the baseline model that just dropped # the categorical features altogether.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_gradient_boosting_regularization.py
examples/ensemble/plot_gradient_boosting_regularization.py
""" ================================ Gradient Boosting regularization ================================ Illustration of the effect of different regularization strategies for Gradient Boosting. The example is taken from Hastie et al 2009 [1]_. The loss function used is binomial deviance. Regularization via shrinkage (``learning_rate < 1.0``) improves performance considerably. In combination with shrinkage, stochastic gradient boosting (``subsample < 1.0``) can produce more accurate models by reducing the variance via bagging. Subsampling without shrinkage usually does poorly. Another strategy to reduce the variance is by subsampling the features analogous to the random splits in Random Forests (via the ``max_features`` parameter). .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", Springer, 2009. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn import datasets, ensemble from sklearn.metrics import log_loss from sklearn.model_selection import train_test_split X, y = datasets.make_hastie_10_2(n_samples=4000, random_state=1) # map labels from {-1, 1} to {0, 1} labels, y = np.unique(y, return_inverse=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.8, random_state=0) original_params = { "n_estimators": 400, "max_leaf_nodes": 4, "max_depth": None, "random_state": 2, "min_samples_split": 5, } plt.figure() for label, color, setting in [ ("No shrinkage", "orange", {"learning_rate": 1.0, "subsample": 1.0}), ("learning_rate=0.2", "turquoise", {"learning_rate": 0.2, "subsample": 1.0}), ("subsample=0.5", "blue", {"learning_rate": 1.0, "subsample": 0.5}), ( "learning_rate=0.2, subsample=0.5", "gray", {"learning_rate": 0.2, "subsample": 0.5}, ), ( "learning_rate=0.2, max_features=2", "magenta", {"learning_rate": 0.2, "max_features": 2}, ), ]: params = dict(original_params) params.update(setting) clf = ensemble.GradientBoostingClassifier(**params) clf.fit(X_train, y_train) # compute test set deviance test_deviance = np.zeros((params["n_estimators"],), dtype=np.float64) for i, y_proba in enumerate(clf.staged_predict_proba(X_test)): test_deviance[i] = 2 * log_loss(y_test, y_proba[:, 1]) plt.plot( (np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5], "-", color=color, label=label, ) plt.legend(loc="upper right") plt.xlabel("Boosting Iterations") plt.ylabel("Test Set Deviance") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_gradient_boosting_early_stopping.py
examples/ensemble/plot_gradient_boosting_early_stopping.py
""" =================================== Early stopping in Gradient Boosting =================================== Gradient Boosting is an ensemble technique that combines multiple weak learners, typically decision trees, to create a robust and powerful predictive model. It does so in an iterative fashion, where each new stage (tree) corrects the errors of the previous ones. Early stopping is a technique in Gradient Boosting that allows us to find the optimal number of iterations required to build a model that generalizes well to unseen data and avoids overfitting. The concept is simple: we set aside a portion of our dataset as a validation set (specified using `validation_fraction`) to assess the model's performance during training. As the model is iteratively built with additional stages (trees), its performance on the validation set is monitored as a function of the number of steps. Early stopping becomes effective when the model's performance on the validation set plateaus or worsens (within deviations specified by `tol`) over a certain number of consecutive stages (specified by `n_iter_no_change`). This signals that the model has reached a point where further iterations may lead to overfitting, and it's time to stop training. The number of estimators (trees) in the final model, when early stopping is applied, can be accessed using the `n_estimators_` attribute. Overall, early stopping is a valuable tool to strike a balance between model performance and efficiency in gradient boosting. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Data Preparation # ---------------- # First we load and prepares the California Housing Prices dataset for # training and evaluation. It subsets the dataset, splits it into training # and validation sets. import time import matplotlib.pyplot as plt from sklearn.datasets import fetch_california_housing from sklearn.ensemble import GradientBoostingRegressor from sklearn.metrics import mean_squared_error from sklearn.model_selection import train_test_split data = fetch_california_housing() X, y = data.data[:600], data.target[:600] X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42) # %% # Model Training and Comparison # ----------------------------- # Two :class:`~sklearn.ensemble.GradientBoostingRegressor` models are trained: # one with and another without early stopping. The purpose is to compare their # performance. It also calculates the training time and the `n_estimators_` # used by both models. params = dict(n_estimators=1000, max_depth=5, learning_rate=0.1, random_state=42) gbm_full = GradientBoostingRegressor(**params) gbm_early_stopping = GradientBoostingRegressor( **params, validation_fraction=0.1, n_iter_no_change=10, ) start_time = time.time() gbm_full.fit(X_train, y_train) training_time_full = time.time() - start_time n_estimators_full = gbm_full.n_estimators_ start_time = time.time() gbm_early_stopping.fit(X_train, y_train) training_time_early_stopping = time.time() - start_time estimators_early_stopping = gbm_early_stopping.n_estimators_ # %% # Error Calculation # ----------------- # The code calculates the :func:`~sklearn.metrics.mean_squared_error` for both # training and validation datasets for the models trained in the previous # section. It computes the errors for each boosting iteration. The purpose is # to assess the performance and convergence of the models. train_errors_without = [] val_errors_without = [] train_errors_with = [] val_errors_with = [] for i, (train_pred, val_pred) in enumerate( zip( gbm_full.staged_predict(X_train), gbm_full.staged_predict(X_val), ) ): train_errors_without.append(mean_squared_error(y_train, train_pred)) val_errors_without.append(mean_squared_error(y_val, val_pred)) for i, (train_pred, val_pred) in enumerate( zip( gbm_early_stopping.staged_predict(X_train), gbm_early_stopping.staged_predict(X_val), ) ): train_errors_with.append(mean_squared_error(y_train, train_pred)) val_errors_with.append(mean_squared_error(y_val, val_pred)) # %% # Visualize Comparison # -------------------- # It includes three subplots: # # 1. Plotting training errors of both models over boosting iterations. # 2. Plotting validation errors of both models over boosting iterations. # 3. Creating a bar chart to compare the training times and the estimator used # of the models with and without early stopping. # fig, axes = plt.subplots(ncols=3, figsize=(12, 4)) axes[0].plot(train_errors_without, label="gbm_full") axes[0].plot(train_errors_with, label="gbm_early_stopping") axes[0].set_xlabel("Boosting Iterations") axes[0].set_ylabel("MSE (Training)") axes[0].set_yscale("log") axes[0].legend() axes[0].set_title("Training Error") axes[1].plot(val_errors_without, label="gbm_full") axes[1].plot(val_errors_with, label="gbm_early_stopping") axes[1].set_xlabel("Boosting Iterations") axes[1].set_ylabel("MSE (Validation)") axes[1].set_yscale("log") axes[1].legend() axes[1].set_title("Validation Error") training_times = [training_time_full, training_time_early_stopping] labels = ["gbm_full", "gbm_early_stopping"] bars = axes[2].bar(labels, training_times) axes[2].set_ylabel("Training Time (s)") for bar, n_estimators in zip(bars, [n_estimators_full, estimators_early_stopping]): height = bar.get_height() axes[2].text( bar.get_x() + bar.get_width() / 2, height + 0.001, f"Estimators: {n_estimators}", ha="center", va="bottom", ) plt.tight_layout() plt.show() # %% # The difference in training error between the `gbm_full` and the # `gbm_early_stopping` stems from the fact that `gbm_early_stopping` sets # aside `validation_fraction` of the training data as internal validation set. # Early stopping is decided based on this internal validation score. # %% # Summary # ------- # In our example with the :class:`~sklearn.ensemble.GradientBoostingRegressor` # model on the California Housing Prices dataset, we have demonstrated the # practical benefits of early stopping: # # - **Preventing Overfitting:** We showed how the validation error stabilizes # or starts to increase after a certain point, indicating that the model # generalizes better to unseen data. This is achieved by stopping the training # process before overfitting occurs. # - **Improving Training Efficiency:** We compared training times between # models with and without early stopping. The model with early stopping # achieved comparable accuracy while requiring significantly fewer # estimators, resulting in faster training.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_ensemble_oob.py
examples/ensemble/plot_ensemble_oob.py
""" ============================= OOB Errors for Random Forests ============================= The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where each new tree is fit from a bootstrap sample of the training observations :math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for each :math:`z_i` calculated using predictions from the trees that do not contain :math:`z_i` in their respective bootstrap sample. This allows the ``RandomForestClassifier`` to be fit and validated whilst being trained [1]_. The example below demonstrates how the OOB error can be measured at the addition of each new tree during training. The resulting plot allows a practitioner to approximate a suitable value of ``n_estimators`` at which the error stabilizes. .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", p592-593, Springer, 2009. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from collections import OrderedDict import matplotlib.pyplot as plt from sklearn.datasets import make_classification from sklearn.ensemble import RandomForestClassifier RANDOM_STATE = 123 # Generate a binary classification dataset. X, y = make_classification( n_samples=500, n_features=25, n_clusters_per_class=1, n_informative=15, random_state=RANDOM_STATE, ) # NOTE: Setting the `warm_start` construction parameter to `True` disables # support for parallelized ensembles but is necessary for tracking the OOB # error trajectory during training. ensemble_clfs = [ ( "RandomForestClassifier, max_features='sqrt'", RandomForestClassifier( warm_start=True, oob_score=True, max_features="sqrt", random_state=RANDOM_STATE, ), ), ( "RandomForestClassifier, max_features='log2'", RandomForestClassifier( warm_start=True, max_features="log2", oob_score=True, random_state=RANDOM_STATE, ), ), ( "RandomForestClassifier, max_features=None", RandomForestClassifier( warm_start=True, max_features=None, oob_score=True, random_state=RANDOM_STATE, ), ), ] # Map a classifier name to a list of (<n_estimators>, <error rate>) pairs. error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs) # Range of `n_estimators` values to explore. min_estimators = 15 max_estimators = 150 for label, clf in ensemble_clfs: for i in range(min_estimators, max_estimators + 1, 5): clf.set_params(n_estimators=i) clf.fit(X, y) # Record the OOB error for each `n_estimators=i` setting. oob_error = 1 - clf.oob_score_ error_rate[label].append((i, oob_error)) # Generate the "OOB error rate" vs. "n_estimators" plot. for label, clf_err in error_rate.items(): xs, ys = zip(*clf_err) plt.plot(xs, ys, label=label) plt.xlim(min_estimators, max_estimators) plt.xlabel("n_estimators") plt.ylabel("OOB error rate") plt.legend(loc="upper right") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/ensemble/plot_adaboost_regression.py
examples/ensemble/plot_adaboost_regression.py
""" ====================================== Decision Tree Regression with AdaBoost ====================================== A decision tree is boosted using the AdaBoost.R2 [1]_ algorithm on a 1D sinusoidal dataset with a small amount of Gaussian noise. 299 boosts (300 decision trees) is compared with a single decision tree regressor. As the number of boosts is increased the regressor can fit more detail. See :ref:`sphx_glr_auto_examples_ensemble_plot_hgbt_regression.py` for an example showcasing the benefits of using more efficient regression models such as :class:`~ensemble.HistGradientBoostingRegressor`. .. [1] `H. Drucker, "Improving Regressors using Boosting Techniques", 1997. <https://citeseerx.ist.psu.edu/doc_view/pid/8d49e2dedb817f2c3330e74b63c5fc86d2399ce3>`_ """ # %% # Preparing the data # ------------------ # First, we prepare dummy data with a sinusoidal relationship and some gaussian noise. # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np rng = np.random.RandomState(1) X = np.linspace(0, 6, 100)[:, np.newaxis] y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0]) # %% # Training and prediction with DecisionTree and AdaBoost Regressors # ----------------------------------------------------------------- # Now, we define the classifiers and fit them to the data. # Then we predict on that same data to see how well they could fit it. # The first regressor is a `DecisionTreeRegressor` with `max_depth=4`. # The second regressor is an `AdaBoostRegressor` with a `DecisionTreeRegressor` # of `max_depth=4` as base learner and will be built with `n_estimators=300` # of those base learners. from sklearn.ensemble import AdaBoostRegressor from sklearn.tree import DecisionTreeRegressor regr_1 = DecisionTreeRegressor(max_depth=4) regr_2 = AdaBoostRegressor( DecisionTreeRegressor(max_depth=4), n_estimators=300, random_state=rng ) regr_1.fit(X, y) regr_2.fit(X, y) y_1 = regr_1.predict(X) y_2 = regr_2.predict(X) # %% # Plotting the results # -------------------- # Finally, we plot how well our two regressors, # single decision tree regressor and AdaBoost regressor, could fit the data. import matplotlib.pyplot as plt import seaborn as sns colors = sns.color_palette("colorblind") plt.figure() plt.scatter(X, y, color=colors[0], label="training samples") plt.plot(X, y_1, color=colors[1], label="n_estimators=1", linewidth=2) plt.plot(X, y_2, color=colors[2], label="n_estimators=300", linewidth=2) plt.xlabel("data") plt.ylabel("target") plt.title("Boosted Decision Tree Regression") plt.legend() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/neighbors/plot_lof_novelty_detection.py
examples/neighbors/plot_lof_novelty_detection.py
""" ================================================= Novelty detection with Local Outlier Factor (LOF) ================================================= The Local Outlier Factor (LOF) algorithm is an unsupervised anomaly detection method which computes the local density deviation of a given data point with respect to its neighbors. It considers as outliers the samples that have a substantially lower density than their neighbors. This example shows how to use LOF for novelty detection. Note that when LOF is used for novelty detection you MUST not use predict, decision_function and score_samples on the training set as this would lead to wrong results. You must only use these methods on new unseen data (which are not in the training set). See :ref:`User Guide <outlier_detection>`: for details on the difference between outlier detection and novelty detection and how to use LOF for outlier detection. The number of neighbors considered, (parameter n_neighbors) is typically set 1) greater than the minimum number of samples a cluster has to contain, so that other samples can be local outliers relative to this cluster, and 2) smaller than the maximum number of close by samples that can potentially be local outliers. In practice, such information is generally not available, and taking n_neighbors=20 appears to work well in general. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib import matplotlib.lines as mlines import matplotlib.pyplot as plt import numpy as np from sklearn.neighbors import LocalOutlierFactor np.random.seed(42) xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500)) # Generate normal (not abnormal) training observations X = 0.3 * np.random.randn(100, 2) X_train = np.r_[X + 2, X - 2] # Generate new normal (not abnormal) observations X = 0.3 * np.random.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2)) # fit the model for novelty detection (novelty=True) clf = LocalOutlierFactor(n_neighbors=20, novelty=True, contamination=0.1) clf.fit(X_train) # DO NOT use predict, decision_function and score_samples on X_train as this # would give wrong results but only on new unseen data (not used in X_train), # e.g. X_test, X_outliers or the meshgrid y_pred_test = clf.predict(X_test) y_pred_outliers = clf.predict(X_outliers) n_error_test = y_pred_test[y_pred_test == -1].size n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size # plot the learned frontier, the points, and the nearest vectors to the plane Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) plt.title("Novelty Detection with LOF") plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.PuBu) a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors="darkred") plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors="palevioletred") s = 40 b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c="white", s=s, edgecolors="k") b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c="blueviolet", s=s, edgecolors="k") c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c="gold", s=s, edgecolors="k") plt.axis("tight") plt.xlim((-5, 5)) plt.ylim((-5, 5)) plt.legend( [mlines.Line2D([], [], color="darkred"), b1, b2, c], [ "learned frontier", "training observations", "new regular observations", "new abnormal observations", ], loc=(1.05, 0.4), prop=matplotlib.font_manager.FontProperties(size=11), ) plt.xlabel( "errors novel regular: %d/40 ; errors novel abnormal: %d/40" % (n_error_test, n_error_outliers) ) plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/neighbors/plot_classification.py
examples/neighbors/plot_classification.py
""" ================================ Nearest Neighbors Classification ================================ This example shows how to use :class:`~sklearn.neighbors.KNeighborsClassifier`. We train such a classifier on the iris dataset and observe the difference of the decision boundary obtained with regards to the parameter `weights`. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Load the data # ------------- # # In this example, we use the iris dataset. We split the data into a train and test # dataset. from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split iris = load_iris(as_frame=True) X = iris.data[["sepal length (cm)", "sepal width (cm)"]] y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) # %% # K-nearest neighbors classifier # ------------------------------ # # We want to use a k-nearest neighbors classifier considering a neighborhood of 11 data # points. Since our k-nearest neighbors model uses euclidean distance to find the # nearest neighbors, it is therefore important to scale the data beforehand. Refer to # the example entitled # :ref:`sphx_glr_auto_examples_preprocessing_plot_scaling_importance.py` for more # detailed information. # # Thus, we use a :class:`~sklearn.pipeline.Pipeline` to chain a scaler before to use # our classifier. from sklearn.neighbors import KNeighborsClassifier from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler clf = Pipeline( steps=[("scaler", StandardScaler()), ("knn", KNeighborsClassifier(n_neighbors=11))] ) # %% # Decision boundary # ----------------- # # Now, we fit two classifiers with different values of the parameter # `weights`. We plot the decision boundary of each classifier as well as the original # dataset to observe the difference. import matplotlib.pyplot as plt from sklearn.inspection import DecisionBoundaryDisplay _, axs = plt.subplots(ncols=2, figsize=(12, 5)) for ax, weights in zip(axs, ("uniform", "distance")): clf.set_params(knn__weights=weights).fit(X_train, y_train) disp = DecisionBoundaryDisplay.from_estimator( clf, X_test, response_method="predict", plot_method="pcolormesh", xlabel=iris.feature_names[0], ylabel=iris.feature_names[1], shading="auto", alpha=0.5, ax=ax, ) scatter = disp.ax_.scatter(X.iloc[:, 0], X.iloc[:, 1], c=y, edgecolors="k") disp.ax_.legend( scatter.legend_elements()[0], iris.target_names, loc="lower left", title="Classes", ) _ = disp.ax_.set_title( f"3-Class classification\n(k={clf[-1].n_neighbors}, weights={weights!r})" ) plt.show() # %% # Conclusion # ---------- # # We observe that the parameter `weights` has an impact on the decision boundary. When # `weights="unifom"` all nearest neighbors will have the same impact on the decision. # Whereas when `weights="distance"` the weight given to each neighbor is proportional # to the inverse of the distance from that neighbor to the query point. # # In some cases, taking the distance into account might improve the model.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/neighbors/plot_nca_illustration.py
examples/neighbors/plot_nca_illustration.py
""" ============================================= Neighborhood Components Analysis Illustration ============================================= This example illustrates a learned distance metric that maximizes the nearest neighbors classification accuracy. It provides a visual representation of this metric compared to the original point space. Please refer to the :ref:`User Guide <nca>` for more information. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from matplotlib import cm from scipy.special import logsumexp from sklearn.datasets import make_classification from sklearn.neighbors import NeighborhoodComponentsAnalysis # %% # Original points # --------------- # First we create a data set of 9 samples from 3 classes, and plot the points # in the original space. For this example, we focus on the classification of # point no. 3. The thickness of a link between point no. 3 and another point # is proportional to their distance. X, y = make_classification( n_samples=9, n_features=2, n_informative=2, n_redundant=0, n_classes=3, n_clusters_per_class=1, class_sep=1.0, random_state=0, ) plt.figure(1) ax = plt.gca() for i in range(X.shape[0]): ax.text(X[i, 0], X[i, 1], str(i), va="center", ha="center") ax.scatter(X[i, 0], X[i, 1], s=300, c=cm.Set1(y[[i]]), alpha=0.4) ax.set_title("Original points") ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) ax.axis("equal") # so that boundaries are displayed correctly as circles def link_thickness_i(X, i): diff_embedded = X[i] - X dist_embedded = np.einsum("ij,ij->i", diff_embedded, diff_embedded) dist_embedded[i] = np.inf # compute exponentiated distances (use the log-sum-exp trick to # avoid numerical instabilities exp_dist_embedded = np.exp(-dist_embedded - logsumexp(-dist_embedded)) return exp_dist_embedded def relate_point(X, i, ax): pt_i = X[i] for j, pt_j in enumerate(X): thickness = link_thickness_i(X, i) if i != j: line = ([pt_i[0], pt_j[0]], [pt_i[1], pt_j[1]]) ax.plot(*line, c=cm.Set1(y[j]), linewidth=5 * thickness[j]) i = 3 relate_point(X, i, ax) plt.show() # %% # Learning an embedding # --------------------- # We use :class:`~sklearn.neighbors.NeighborhoodComponentsAnalysis` to learn an # embedding and plot the points after the transformation. We then take the # embedding and find the nearest neighbors. nca = NeighborhoodComponentsAnalysis(max_iter=30, random_state=0) nca = nca.fit(X, y) plt.figure(2) ax2 = plt.gca() X_embedded = nca.transform(X) relate_point(X_embedded, i, ax2) for i in range(len(X)): ax2.text(X_embedded[i, 0], X_embedded[i, 1], str(i), va="center", ha="center") ax2.scatter(X_embedded[i, 0], X_embedded[i, 1], s=300, c=cm.Set1(y[[i]]), alpha=0.4) ax2.set_title("NCA embedding") ax2.axes.get_xaxis().set_visible(False) ax2.axes.get_yaxis().set_visible(False) ax2.axis("equal") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/neighbors/approximate_nearest_neighbors.py
examples/neighbors/approximate_nearest_neighbors.py
""" ===================================== Approximate nearest neighbors in TSNE ===================================== This example presents how to chain KNeighborsTransformer and TSNE in a pipeline. It also shows how to wrap the packages `nmslib` and `pynndescent` to replace KNeighborsTransformer and perform approximate nearest neighbors. These packages can be installed with `pip install nmslib pynndescent`. Note: In KNeighborsTransformer we use the definition which includes each training point as its own neighbor in the count of `n_neighbors`, and for compatibility reasons, one extra neighbor is computed when `mode == 'distance'`. Please note that we do the same in the proposed `nmslib` wrapper. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # First we try to import the packages and warn the user in case they are # missing. import sys try: import nmslib except ImportError: print("The package 'nmslib' is required to run this example.") sys.exit() try: from pynndescent import PyNNDescentTransformer except ImportError: print("The package 'pynndescent' is required to run this example.") sys.exit() # %% # We define a wrapper class for implementing the scikit-learn API to the # `nmslib`, as well as a loading function. import joblib import numpy as np from scipy.sparse import csr_matrix from sklearn.base import BaseEstimator, TransformerMixin from sklearn.datasets import fetch_openml from sklearn.utils import shuffle class NMSlibTransformer(TransformerMixin, BaseEstimator): """Wrapper for using nmslib as sklearn's KNeighborsTransformer""" def __init__(self, n_neighbors=5, metric="euclidean", method="sw-graph", n_jobs=-1): self.n_neighbors = n_neighbors self.method = method self.metric = metric self.n_jobs = n_jobs def fit(self, X): self.n_samples_fit_ = X.shape[0] # see more metric in the manual # https://github.com/nmslib/nmslib/tree/master/manual space = { "euclidean": "l2", "cosine": "cosinesimil", "l1": "l1", "l2": "l2", }[self.metric] self.nmslib_ = nmslib.init(method=self.method, space=space) self.nmslib_.addDataPointBatch(X.copy()) self.nmslib_.createIndex() return self def transform(self, X): n_samples_transform = X.shape[0] # For compatibility reasons, as each sample is considered as its own # neighbor, one extra neighbor will be computed. n_neighbors = self.n_neighbors + 1 if self.n_jobs < 0: # Same handling as done in joblib for negative values of n_jobs: # in particular, `n_jobs == -1` means "as many threads as CPUs". num_threads = joblib.cpu_count() + self.n_jobs + 1 else: num_threads = self.n_jobs results = self.nmslib_.knnQueryBatch( X.copy(), k=n_neighbors, num_threads=num_threads ) indices, distances = zip(*results) indices, distances = np.vstack(indices), np.vstack(distances) indptr = np.arange(0, n_samples_transform * n_neighbors + 1, n_neighbors) kneighbors_graph = csr_matrix( (distances.ravel(), indices.ravel(), indptr), shape=(n_samples_transform, self.n_samples_fit_), ) return kneighbors_graph def load_mnist(n_samples): """Load MNIST, shuffle the data, and return only n_samples.""" mnist = fetch_openml("mnist_784", as_frame=False) X, y = shuffle(mnist.data, mnist.target, random_state=2) return X[:n_samples] / 255, y[:n_samples] # %% # We benchmark the different exact/approximate nearest neighbors transformers. import time from sklearn.manifold import TSNE from sklearn.neighbors import KNeighborsTransformer from sklearn.pipeline import make_pipeline datasets = [ ("MNIST_10000", load_mnist(n_samples=10_000)), ("MNIST_20000", load_mnist(n_samples=20_000)), ] max_iter = 500 perplexity = 30 metric = "euclidean" # TSNE requires a certain number of neighbors which depends on the # perplexity parameter. # Add one since we include each sample as its own neighbor. n_neighbors = int(3.0 * perplexity + 1) + 1 tsne_params = dict( init="random", # pca cannot be used with precomputed distances perplexity=perplexity, method="barnes_hut", random_state=42, max_iter=max_iter, learning_rate="auto", ) transformers = [ ( "KNeighborsTransformer", KNeighborsTransformer(n_neighbors=n_neighbors, mode="distance", metric=metric), ), ( "NMSlibTransformer", NMSlibTransformer(n_neighbors=n_neighbors, metric=metric), ), ( "PyNNDescentTransformer", PyNNDescentTransformer( n_neighbors=n_neighbors, metric=metric, parallel_batch_queries=True ), ), ] for dataset_name, (X, y) in datasets: msg = f"Benchmarking on {dataset_name}:" print(f"\n{msg}\n" + str("-" * len(msg))) for transformer_name, transformer in transformers: longest = np.max([len(name) for name, model in transformers]) start = time.time() transformer.fit(X) fit_duration = time.time() - start print(f"{transformer_name:<{longest}} {fit_duration:.3f} sec (fit)") start = time.time() Xt = transformer.transform(X) transform_duration = time.time() - start print(f"{transformer_name:<{longest}} {transform_duration:.3f} sec (transform)") if transformer_name == "PyNNDescentTransformer": start = time.time() Xt = transformer.transform(X) transform_duration = time.time() - start print( f"{transformer_name:<{longest}} {transform_duration:.3f} sec" " (transform)" ) # %% # Sample output:: # # Benchmarking on MNIST_10000: # ---------------------------- # KNeighborsTransformer 0.007 sec (fit) # KNeighborsTransformer 1.139 sec (transform) # NMSlibTransformer 0.208 sec (fit) # NMSlibTransformer 0.315 sec (transform) # PyNNDescentTransformer 4.823 sec (fit) # PyNNDescentTransformer 4.884 sec (transform) # PyNNDescentTransformer 0.744 sec (transform) # # Benchmarking on MNIST_20000: # ---------------------------- # KNeighborsTransformer 0.011 sec (fit) # KNeighborsTransformer 5.769 sec (transform) # NMSlibTransformer 0.733 sec (fit) # NMSlibTransformer 1.077 sec (transform) # PyNNDescentTransformer 14.448 sec (fit) # PyNNDescentTransformer 7.103 sec (transform) # PyNNDescentTransformer 1.759 sec (transform) # # Notice that the `PyNNDescentTransformer` takes more time during the first # `fit` and the first `transform` due to the overhead of the numba just in time # compiler. But after the first call, the compiled Python code is kept in a # cache by numba and subsequent calls do not suffer from this initial overhead. # Both :class:`~sklearn.neighbors.KNeighborsTransformer` and `NMSlibTransformer` # are only run once here as they would show more stable `fit` and `transform` # times (they don't have the cold start problem of PyNNDescentTransformer). # %% import matplotlib.pyplot as plt from matplotlib.ticker import NullFormatter transformers = [ ("TSNE with internal NearestNeighbors", TSNE(metric=metric, **tsne_params)), ( "TSNE with KNeighborsTransformer", make_pipeline( KNeighborsTransformer( n_neighbors=n_neighbors, mode="distance", metric=metric ), TSNE(metric="precomputed", **tsne_params), ), ), ( "TSNE with NMSlibTransformer", make_pipeline( NMSlibTransformer(n_neighbors=n_neighbors, metric=metric), TSNE(metric="precomputed", **tsne_params), ), ), ] # init the plot nrows = len(datasets) ncols = np.sum([1 for name, model in transformers if "TSNE" in name]) fig, axes = plt.subplots( nrows=nrows, ncols=ncols, squeeze=False, figsize=(5 * ncols, 4 * nrows) ) axes = axes.ravel() i_ax = 0 for dataset_name, (X, y) in datasets: msg = f"Benchmarking on {dataset_name}:" print(f"\n{msg}\n" + str("-" * len(msg))) for transformer_name, transformer in transformers: longest = np.max([len(name) for name, model in transformers]) start = time.time() Xt = transformer.fit_transform(X) transform_duration = time.time() - start print( f"{transformer_name:<{longest}} {transform_duration:.3f} sec" " (fit_transform)" ) # plot TSNE embedding which should be very similar across methods axes[i_ax].set_title(transformer_name + "\non " + dataset_name) axes[i_ax].scatter( Xt[:, 0], Xt[:, 1], c=y.astype(np.int32), alpha=0.2, cmap=plt.cm.viridis, ) axes[i_ax].xaxis.set_major_formatter(NullFormatter()) axes[i_ax].yaxis.set_major_formatter(NullFormatter()) axes[i_ax].axis("tight") i_ax += 1 fig.tight_layout() plt.show() # %% # Sample output:: # # Benchmarking on MNIST_10000: # ---------------------------- # TSNE with internal NearestNeighbors 24.828 sec (fit_transform) # TSNE with KNeighborsTransformer 20.111 sec (fit_transform) # TSNE with NMSlibTransformer 21.757 sec (fit_transform) # # Benchmarking on MNIST_20000: # ---------------------------- # TSNE with internal NearestNeighbors 51.955 sec (fit_transform) # TSNE with KNeighborsTransformer 50.994 sec (fit_transform) # TSNE with NMSlibTransformer 43.536 sec (fit_transform) # # We can observe that the default :class:`~sklearn.manifold.TSNE` estimator with # its internal :class:`~sklearn.neighbors.NearestNeighbors` implementation is # roughly equivalent to the pipeline with :class:`~sklearn.manifold.TSNE` and # :class:`~sklearn.neighbors.KNeighborsTransformer` in terms of performance. # This is expected because both pipelines rely internally on the same # :class:`~sklearn.neighbors.NearestNeighbors` implementation that performs # exacts neighbors search. The approximate `NMSlibTransformer` is already # slightly faster than the exact search on the smallest dataset but this speed # difference is expected to become more significant on datasets with a larger # number of samples. # # Notice however that not all approximate search methods are guaranteed to # improve the speed of the default exact search method: indeed the exact search # implementation significantly improved since scikit-learn 1.1. Furthermore, the # brute-force exact search method does not require building an index at `fit` # time. So, to get an overall performance improvement in the context of the # :class:`~sklearn.manifold.TSNE` pipeline, the gains of the approximate search # at `transform` need to be larger than the extra time spent to build the # approximate search index at `fit` time. # # Finally, the TSNE algorithm itself is also computationally intensive, # irrespective of the nearest neighbors search. So speeding-up the nearest # neighbors search step by a factor of 5 would not result in a speed up by a # factor of 5 for the overall pipeline.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/neighbors/plot_caching_nearest_neighbors.py
examples/neighbors/plot_caching_nearest_neighbors.py
""" ========================= Caching nearest neighbors ========================= This example demonstrates how to precompute the k nearest neighbors before using them in KNeighborsClassifier. KNeighborsClassifier can compute the nearest neighbors internally, but precomputing them can have several benefits, such as finer parameter control, caching for multiple use, or custom implementations. Here we use the caching property of pipelines to cache the nearest neighbors graph between multiple fits of KNeighborsClassifier. The first call is slow since it computes the neighbors graph, while subsequent calls are faster as they do not need to recompute the graph. Here the durations are small since the dataset is small, but the gain can be more substantial when the dataset grows larger, or when the grid of parameter to search is large. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from tempfile import TemporaryDirectory import matplotlib.pyplot as plt from sklearn.datasets import load_digits from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsClassifier, KNeighborsTransformer from sklearn.pipeline import Pipeline X, y = load_digits(return_X_y=True) n_neighbors_list = [1, 2, 3, 4, 5, 6, 7, 8, 9] # The transformer computes the nearest neighbors graph using the maximum number # of neighbors necessary in the grid search. The classifier model filters the # nearest neighbors graph as required by its own n_neighbors parameter. graph_model = KNeighborsTransformer(n_neighbors=max(n_neighbors_list), mode="distance") classifier_model = KNeighborsClassifier(metric="precomputed") # Note that we give `memory` a directory to cache the graph computation # that will be used several times when tuning the hyperparameters of the # classifier. with TemporaryDirectory(prefix="sklearn_graph_cache_") as tmpdir: full_model = Pipeline( steps=[("graph", graph_model), ("classifier", classifier_model)], memory=tmpdir ) param_grid = {"classifier__n_neighbors": n_neighbors_list} grid_model = GridSearchCV(full_model, param_grid) grid_model.fit(X, y) # Plot the results of the grid search. fig, axes = plt.subplots(1, 2, figsize=(8, 4)) axes[0].errorbar( x=n_neighbors_list, y=grid_model.cv_results_["mean_test_score"], yerr=grid_model.cv_results_["std_test_score"], ) axes[0].set(xlabel="n_neighbors", title="Classification accuracy") axes[1].errorbar( x=n_neighbors_list, y=grid_model.cv_results_["mean_fit_time"], yerr=grid_model.cv_results_["std_fit_time"], color="r", ) axes[1].set(xlabel="n_neighbors", title="Fit time (with caching)") fig.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/neighbors/plot_nearest_centroid.py
examples/neighbors/plot_nearest_centroid.py
""" =============================== Nearest Centroid Classification =============================== Sample usage of Nearest Centroid classification. It will plot the decision boundaries for each class. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from matplotlib.colors import ListedColormap from sklearn import datasets from sklearn.inspection import DecisionBoundaryDisplay from sklearn.neighbors import NearestCentroid # import some data to play with iris = datasets.load_iris() # we only take the first two features. We could avoid this ugly # slicing by using a two-dim dataset X = iris.data[:, :2] y = iris.target # Create color maps cmap_light = ListedColormap(["orange", "cyan", "cornflowerblue"]) cmap_bold = ListedColormap(["darkorange", "c", "darkblue"]) for shrinkage in [None, 0.2]: # we create an instance of Nearest Centroid Classifier and fit the data. clf = NearestCentroid(shrink_threshold=shrinkage) clf.fit(X, y) y_pred = clf.predict(X) print(shrinkage, np.mean(y == y_pred)) _, ax = plt.subplots() DecisionBoundaryDisplay.from_estimator( clf, X, cmap=cmap_light, ax=ax, response_method="predict" ) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold, edgecolor="k", s=20) plt.title("3-Class classification (shrink_threshold=%r)" % shrinkage) plt.axis("tight") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/neighbors/plot_nca_classification.py
examples/neighbors/plot_nca_classification.py
""" ============================================================================= Comparing Nearest Neighbors with and without Neighborhood Components Analysis ============================================================================= An example comparing nearest neighbors classification with and without Neighborhood Components Analysis. It will plot the class decision boundaries given by a Nearest Neighbors classifier when using the Euclidean distance on the original features, versus using the Euclidean distance after the transformation learned by Neighborhood Components Analysis. The latter aims to find a linear transformation that maximises the (stochastic) nearest neighbor classification accuracy on the training set. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn import datasets from sklearn.inspection import DecisionBoundaryDisplay from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier, NeighborhoodComponentsAnalysis from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler n_neighbors = 1 dataset = datasets.load_iris() X, y = dataset.data, dataset.target # we only take two features. We could avoid this ugly # slicing by using a two-dim dataset X = X[:, [0, 2]] X_train, X_test, y_train, y_test = train_test_split( X, y, stratify=y, test_size=0.7, random_state=42 ) h = 0.05 # step size in the mesh # Create color maps cmap_light = ListedColormap(["#FFAAAA", "#AAFFAA", "#AAAAFF"]) cmap_bold = ListedColormap(["#FF0000", "#00FF00", "#0000FF"]) names = ["KNN", "NCA, KNN"] classifiers = [ Pipeline( [ ("scaler", StandardScaler()), ("knn", KNeighborsClassifier(n_neighbors=n_neighbors)), ] ), Pipeline( [ ("scaler", StandardScaler()), ("nca", NeighborhoodComponentsAnalysis()), ("knn", KNeighborsClassifier(n_neighbors=n_neighbors)), ] ), ] for name, clf in zip(names, classifiers): clf.fit(X_train, y_train) score = clf.score(X_test, y_test) _, ax = plt.subplots() DecisionBoundaryDisplay.from_estimator( clf, X, cmap=cmap_light, alpha=0.8, ax=ax, response_method="predict", plot_method="pcolormesh", shading="auto", ) # Plot also the training and testing points plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold, edgecolor="k", s=20) plt.title("{} (k = {})".format(name, n_neighbors)) plt.text( 0.9, 0.1, "{:.2f}".format(score), size=15, ha="center", va="center", transform=plt.gca().transAxes, ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/neighbors/plot_lof_outlier_detection.py
examples/neighbors/plot_lof_outlier_detection.py
""" ================================================= Outlier detection with Local Outlier Factor (LOF) ================================================= The Local Outlier Factor (LOF) algorithm is an unsupervised anomaly detection method which computes the local density deviation of a given data point with respect to its neighbors. It considers as outliers the samples that have a substantially lower density than their neighbors. This example shows how to use LOF for outlier detection which is the default use case of this estimator in scikit-learn. Note that when LOF is used for outlier detection it has no `predict`, `decision_function` and `score_samples` methods. See the :ref:`User Guide <outlier_detection>` for details on the difference between outlier detection and novelty detection and how to use LOF for novelty detection. The number of neighbors considered (parameter `n_neighbors`) is typically set 1) greater than the minimum number of samples a cluster has to contain, so that other samples can be local outliers relative to this cluster, and 2) smaller than the maximum number of close by samples that can potentially be local outliers. In practice, such information is generally not available, and taking `n_neighbors=20` appears to work well in general. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate data with outliers # --------------------------- # %% import numpy as np np.random.seed(42) X_inliers = 0.3 * np.random.randn(100, 2) X_inliers = np.r_[X_inliers + 2, X_inliers - 2] X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2)) X = np.r_[X_inliers, X_outliers] n_outliers = len(X_outliers) ground_truth = np.ones(len(X), dtype=int) ground_truth[-n_outliers:] = -1 # %% # Fit the model for outlier detection (default) # --------------------------------------------- # # Use `fit_predict` to compute the predicted labels of the training samples # (when LOF is used for outlier detection, the estimator has no `predict`, # `decision_function` and `score_samples` methods). from sklearn.neighbors import LocalOutlierFactor clf = LocalOutlierFactor(n_neighbors=20, contamination=0.1) y_pred = clf.fit_predict(X) n_errors = (y_pred != ground_truth).sum() X_scores = clf.negative_outlier_factor_ # %% # Plot results # ------------ # %% import matplotlib.pyplot as plt from matplotlib.legend_handler import HandlerPathCollection def update_legend_marker_size(handle, orig): "Customize size of the legend marker" handle.update_from(orig) handle.set_sizes([20]) plt.scatter(X[:, 0], X[:, 1], color="k", s=3.0, label="Data points") # plot circles with radius proportional to the outlier scores radius = (X_scores.max() - X_scores) / (X_scores.max() - X_scores.min()) scatter = plt.scatter( X[:, 0], X[:, 1], s=1000 * radius, edgecolors="r", facecolors="none", label="Outlier scores", ) plt.axis("tight") plt.xlim((-5, 5)) plt.ylim((-5, 5)) plt.xlabel("prediction errors: %d" % (n_errors)) plt.legend( handler_map={scatter: HandlerPathCollection(update_func=update_legend_marker_size)} ) plt.title("Local Outlier Factor (LOF)") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/neighbors/plot_regression.py
examples/neighbors/plot_regression.py
""" ============================ Nearest Neighbors regression ============================ Demonstrate the resolution of a regression problem using a k-Nearest Neighbor and the interpolation of the target using both barycenter and constant weights. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate sample data # -------------------- # Here we generate a few data points to use to train the model. We also generate # data in the whole range of the training data to visualize how the model would # react in that whole region. import matplotlib.pyplot as plt import numpy as np from sklearn import neighbors rng = np.random.RandomState(0) X_train = np.sort(5 * rng.rand(40, 1), axis=0) X_test = np.linspace(0, 5, 500)[:, np.newaxis] y = np.sin(X_train).ravel() # Add noise to targets y[::5] += 1 * (0.5 - np.random.rand(8)) # %% # Fit regression model # -------------------- # Here we train a model and visualize how `uniform` and `distance` # weights in prediction effect predicted values. n_neighbors = 5 for i, weights in enumerate(["uniform", "distance"]): knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights) y_ = knn.fit(X_train, y).predict(X_test) plt.subplot(2, 1, i + 1) plt.scatter(X_train, y, color="darkorange", label="data") plt.plot(X_test, y_, color="navy", label="prediction") plt.axis("tight") plt.legend() plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors, weights)) plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/neighbors/plot_kde_1d.py
examples/neighbors/plot_kde_1d.py
""" =================================== Simple 1D Kernel Density Estimation =================================== This example uses the :class:`~sklearn.neighbors.KernelDensity` class to demonstrate the principles of Kernel Density Estimation in one dimension. The first plot shows one of the problems with using histograms to visualize the density of points in 1D. Intuitively, a histogram can be thought of as a scheme in which a unit "block" is stacked above each point on a regular grid. As the top two panels show, however, the choice of gridding for these blocks can lead to wildly divergent ideas about the underlying shape of the density distribution. If we instead center each block on the point it represents, we get the estimate shown in the bottom left panel. This is a kernel density estimation with a "top hat" kernel. This idea can be generalized to other kernel shapes: the bottom-right panel of the first figure shows a Gaussian kernel density estimate over the same distribution. Scikit-learn implements efficient kernel density estimation using either a Ball Tree or KD Tree structure, through the :class:`~sklearn.neighbors.KernelDensity` estimator. The available kernels are shown in the second figure of this example. The third figure compares kernel density estimates for a distribution of 100 samples in 1 dimension. Though this example uses 1D distributions, kernel density estimation is easily and efficiently extensible to higher dimensions as well. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from scipy.stats import norm from sklearn.neighbors import KernelDensity # ---------------------------------------------------------------------- # Plot the progression of histograms to kernels np.random.seed(1) N = 20 X = np.concatenate( (np.random.normal(0, 1, int(0.3 * N)), np.random.normal(5, 1, int(0.7 * N))) )[:, np.newaxis] X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis] bins = np.linspace(-5, 10, 10) fig, ax = plt.subplots(2, 2, sharex=True, sharey=True) fig.subplots_adjust(hspace=0.05, wspace=0.05) # histogram 1 ax[0, 0].hist(X[:, 0], bins=bins, fc="#AAAAFF", density=True) ax[0, 0].text(-3.5, 0.31, "Histogram") # histogram 2 ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc="#AAAAFF", density=True) ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted") # tophat KDE kde = KernelDensity(kernel="tophat", bandwidth=0.75).fit(X) log_dens = kde.score_samples(X_plot) ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc="#AAAAFF") ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density") # Gaussian KDE kde = KernelDensity(kernel="gaussian", bandwidth=0.75).fit(X) log_dens = kde.score_samples(X_plot) ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc="#AAAAFF") ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density") for axi in ax.ravel(): axi.plot(X[:, 0], np.full(X.shape[0], -0.01), "+k") axi.set_xlim(-4, 9) axi.set_ylim(-0.02, 0.34) for axi in ax[:, 0]: axi.set_ylabel("Normalized Density") for axi in ax[1, :]: axi.set_xlabel("x") # ---------------------------------------------------------------------- # Plot all available kernels X_plot = np.linspace(-6, 6, 1000)[:, None] X_src = np.zeros((1, 1)) fig, ax = plt.subplots(2, 3, sharex=True, sharey=True) fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05) def format_func(x, loc): if x == 0: return "0" elif x == 1: return "h" elif x == -1: return "-h" else: return "%ih" % x for i, kernel in enumerate( ["gaussian", "tophat", "epanechnikov", "exponential", "linear", "cosine"] ): axi = ax.ravel()[i] log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot) axi.fill(X_plot[:, 0], np.exp(log_dens), "-k", fc="#AAAAFF") axi.text(-2.6, 0.95, kernel) axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func)) axi.xaxis.set_major_locator(plt.MultipleLocator(1)) axi.yaxis.set_major_locator(plt.NullLocator()) axi.set_ylim(0, 1.05) axi.set_xlim(-2.9, 2.9) ax[0, 1].set_title("Available Kernels") # ---------------------------------------------------------------------- # Plot a 1D density example N = 100 np.random.seed(1) X = np.concatenate( (np.random.normal(0, 1, int(0.3 * N)), np.random.normal(5, 1, int(0.7 * N))) )[:, np.newaxis] X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis] true_dens = 0.3 * norm(0, 1).pdf(X_plot[:, 0]) + 0.7 * norm(5, 1).pdf(X_plot[:, 0]) fig, ax = plt.subplots() ax.fill(X_plot[:, 0], true_dens, fc="black", alpha=0.2, label="input distribution") colors = ["navy", "cornflowerblue", "darkorange"] kernels = ["gaussian", "tophat", "epanechnikov"] lw = 2 for color, kernel in zip(colors, kernels): kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X) log_dens = kde.score_samples(X_plot) ax.plot( X_plot[:, 0], np.exp(log_dens), color=color, lw=lw, linestyle="-", label="kernel = '{0}'".format(kernel), ) ax.text(6, 0.38, "N={0} points".format(N)) ax.legend(loc="upper left") ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), "+k") ax.set_xlim(-4, 9) ax.set_ylim(-0.02, 0.4) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/neighbors/plot_species_kde.py
examples/neighbors/plot_species_kde.py
""" ================================================ Kernel Density Estimate of Species Distributions ================================================ This shows an example of a neighbors-based query (in particular a kernel density estimate) on geospatial data, using a Ball Tree built upon the Haversine distance metric -- i.e. distances over points in latitude/longitude. The dataset is provided by Phillips et. al. (2006) [1]_. If available, the example uses `basemap <https://matplotlib.org/basemap/>`_ to plot the coast lines and national boundaries of South America. This example does not perform any learning over the data (see :ref:`sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py` for an example of classification based on the attributes in this dataset). It simply shows the kernel density estimate of observed data points in geospatial coordinates. The two species are: - `"Bradypus variegatus" <https://www.iucnredlist.org/species/3038/47437046>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References ---------- .. [1] `"Maximum entropy modeling of species geographic distributions" <http://rob.schapire.net/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import fetch_species_distributions from sklearn.neighbors import KernelDensity # if basemap is available, we'll use it. # otherwise, we'll improvise later... try: from mpl_toolkits.basemap import Basemap basemap = True except ImportError: basemap = False def construct_grids(batch): """Construct the map grid from the batch object Parameters ---------- batch : Batch object The object returned by :func:`fetch_species_distributions` Returns ------- (xgrid, ygrid) : 1-D arrays The grid corresponding to the values in batch.coverages """ # x,y coordinates for corner cells xmin = batch.x_left_lower_corner + batch.grid_size xmax = xmin + (batch.Nx * batch.grid_size) ymin = batch.y_left_lower_corner + batch.grid_size ymax = ymin + (batch.Ny * batch.grid_size) # x coordinates of the grid cells xgrid = np.arange(xmin, xmax, batch.grid_size) # y coordinates of the grid cells ygrid = np.arange(ymin, ymax, batch.grid_size) return (xgrid, ygrid) # Get matrices/arrays of species IDs and locations data = fetch_species_distributions() species_names = ["Bradypus Variegatus", "Microryzomys Minutus"] Xtrain = np.vstack([data["train"]["dd lat"], data["train"]["dd long"]]).T ytrain = np.array( [d.decode("ascii").startswith("micro") for d in data["train"]["species"]], dtype="int", ) Xtrain *= np.pi / 180.0 # Convert lat/long to radians # Set up the data grid for the contour plot xgrid, ygrid = construct_grids(data) X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1]) land_reference = data.coverages[6][::5, ::5] land_mask = (land_reference > -9999).ravel() xy = np.vstack([Y.ravel(), X.ravel()]).T xy = xy[land_mask] xy *= np.pi / 180.0 # Plot map of South America with distributions of each species fig = plt.figure() fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05) for i in range(2): plt.subplot(1, 2, i + 1) # construct a kernel density estimate of the distribution print(" - computing KDE in spherical coordinates") kde = KernelDensity( bandwidth=0.04, metric="haversine", kernel="gaussian", algorithm="ball_tree" ) kde.fit(Xtrain[ytrain == i]) # evaluate only on the land: -9999 indicates ocean Z = np.full(land_mask.shape[0], -9999, dtype="int") Z[land_mask] = np.exp(kde.score_samples(xy)) Z = Z.reshape(X.shape) # plot contours of the density levels = np.linspace(0, Z.max(), 25) plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds) if basemap: print(" - plot coastlines using basemap") m = Basemap( projection="cyl", llcrnrlat=Y.min(), urcrnrlat=Y.max(), llcrnrlon=X.min(), urcrnrlon=X.max(), resolution="c", ) m.drawcoastlines() m.drawcountries() else: print(" - plot coastlines from coverage") plt.contour( X, Y, land_reference, levels=[-9998], colors="k", linestyles="solid" ) plt.xticks([]) plt.yticks([]) plt.title(species_names[i]) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/neighbors/plot_digits_kde_sampling.py
examples/neighbors/plot_digits_kde_sampling.py
""" ========================= Kernel Density Estimation ========================= This example shows how kernel density estimation (KDE), a powerful non-parametric density estimation technique, can be used to learn a generative model for a dataset. With this generative model in place, new samples can be drawn. These new samples reflect the underlying model of the data. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import load_digits from sklearn.decomposition import PCA from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KernelDensity # load the data digits = load_digits() # project the 64-dimensional data to a lower dimension pca = PCA(n_components=15, whiten=False) data = pca.fit_transform(digits.data) # use grid search cross-validation to optimize the bandwidth params = {"bandwidth": np.logspace(-1, 1, 20)} grid = GridSearchCV(KernelDensity(), params) grid.fit(data) print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth)) # use the best estimator to compute the kernel density estimate kde = grid.best_estimator_ # sample 44 new points from the data new_data = kde.sample(44, random_state=0) new_data = pca.inverse_transform(new_data) # turn data into a 4x11 grid new_data = new_data.reshape((4, 11, -1)) real_data = digits.data[:44].reshape((4, 11, -1)) # plot real digits and resampled digits fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[])) for j in range(11): ax[4, j].set_visible(False) for i in range(4): im = ax[i, j].imshow( real_data[i, j].reshape((8, 8)), cmap=plt.cm.binary, interpolation="nearest" ) im.set_clim(0, 16) im = ax[i + 5, j].imshow( new_data[i, j].reshape((8, 8)), cmap=plt.cm.binary, interpolation="nearest" ) im.set_clim(0, 16) ax[0, 5].set_title("Selection from the input data") ax[5, 5].set_title('"New" digits drawn from the kernel density model') plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/neighbors/plot_nca_dim_reduction.py
examples/neighbors/plot_nca_dim_reduction.py
""" ============================================================== Dimensionality Reduction with Neighborhood Components Analysis ============================================================== Sample usage of Neighborhood Components Analysis for dimensionality reduction. This example compares different (linear) dimensionality reduction methods applied on the Digits data set. The data set contains images of digits from 0 to 9 with approximately 180 samples of each class. Each image is of dimension 8x8 = 64, and is reduced to a two-dimensional data point. Principal Component Analysis (PCA) applied to this data identifies the combination of attributes (principal components, or directions in the feature space) that account for the most variance in the data. Here we plot the different samples on the 2 first principal components. Linear Discriminant Analysis (LDA) tries to identify attributes that account for the most variance *between classes*. In particular, LDA, in contrast to PCA, is a supervised method, using known class labels. Neighborhood Components Analysis (NCA) tries to find a feature space such that a stochastic nearest neighbor algorithm will give the best accuracy. Like LDA, it is a supervised method. One can see that NCA enforces a clustering of the data that is visually meaningful despite the large reduction in dimension. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn import datasets from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier, NeighborhoodComponentsAnalysis from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler n_neighbors = 3 random_state = 0 # Load Digits dataset X, y = datasets.load_digits(return_X_y=True) # Split into train/test X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.5, stratify=y, random_state=random_state ) dim = len(X[0]) n_classes = len(np.unique(y)) # Reduce dimension to 2 with PCA pca = make_pipeline(StandardScaler(), PCA(n_components=2, random_state=random_state)) # Reduce dimension to 2 with LinearDiscriminantAnalysis lda = make_pipeline(StandardScaler(), LinearDiscriminantAnalysis(n_components=2)) # Reduce dimension to 2 with NeighborhoodComponentAnalysis nca = make_pipeline( StandardScaler(), NeighborhoodComponentsAnalysis(n_components=2, random_state=random_state), ) # Use a nearest neighbor classifier to evaluate the methods knn = KNeighborsClassifier(n_neighbors=n_neighbors) # Make a list of the methods to be compared dim_reduction_methods = [("PCA", pca), ("LDA", lda), ("NCA", nca)] # plt.figure() for i, (name, model) in enumerate(dim_reduction_methods): plt.figure() # plt.subplot(1, 3, i + 1, aspect=1) # Fit the method's model model.fit(X_train, y_train) # Fit a nearest neighbor classifier on the embedded training set knn.fit(model.transform(X_train), y_train) # Compute the nearest neighbor accuracy on the embedded test set acc_knn = knn.score(model.transform(X_test), y_test) # Embed the data set in 2 dimensions using the fitted model X_embedded = model.transform(X) # Plot the projected points and show the evaluation score plt.scatter(X_embedded[:, 0], X_embedded[:, 1], c=y, s=30, cmap="Set1") plt.title( "{}, KNN (k={})\nTest accuracy = {:.2f}".format(name, n_neighbors, acc_knn) ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/covariance/plot_lw_vs_oas.py
examples/covariance/plot_lw_vs_oas.py
""" ============================= Ledoit-Wolf vs OAS estimation ============================= The usual covariance maximum likelihood estimate can be regularized using shrinkage. Ledoit and Wolf proposed a close formula to compute the asymptotically optimal shrinkage parameter (minimizing a MSE criterion), yielding the Ledoit-Wolf covariance estimate. Chen et al. [1]_ proposed an improvement of the Ledoit-Wolf shrinkage parameter, the OAS coefficient, whose convergence is significantly better under the assumption that the data are Gaussian. This example, inspired from Chen's publication [1]_, shows a comparison of the estimated MSE of the LW and OAS methods, using Gaussian distributed data. .. rubric :: References .. [1] "Shrinkage Algorithms for MMSE Covariance Estimation" Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from scipy.linalg import cholesky, toeplitz from sklearn.covariance import OAS, LedoitWolf np.random.seed(0) # %% n_features = 100 # simulation covariance matrix (AR(1) process) r = 0.1 real_cov = toeplitz(r ** np.arange(n_features)) coloring_matrix = cholesky(real_cov) n_samples_range = np.arange(6, 31, 1) repeat = 100 lw_mse = np.zeros((n_samples_range.size, repeat)) oa_mse = np.zeros((n_samples_range.size, repeat)) lw_shrinkage = np.zeros((n_samples_range.size, repeat)) oa_shrinkage = np.zeros((n_samples_range.size, repeat)) for i, n_samples in enumerate(n_samples_range): for j in range(repeat): X = np.dot(np.random.normal(size=(n_samples, n_features)), coloring_matrix.T) lw = LedoitWolf(store_precision=False, assume_centered=True) lw.fit(X) lw_mse[i, j] = lw.error_norm(real_cov, scaling=False) lw_shrinkage[i, j] = lw.shrinkage_ oa = OAS(store_precision=False, assume_centered=True) oa.fit(X) oa_mse[i, j] = oa.error_norm(real_cov, scaling=False) oa_shrinkage[i, j] = oa.shrinkage_ # plot MSE plt.subplot(2, 1, 1) plt.errorbar( n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1), label="Ledoit-Wolf", color="navy", lw=2, ) plt.errorbar( n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1), label="OAS", color="darkorange", lw=2, ) plt.ylabel("Squared error") plt.legend(loc="upper right") plt.title("Comparison of covariance estimators") plt.xlim(5, 31) # plot shrinkage coefficient plt.subplot(2, 1, 2) plt.errorbar( n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1), label="Ledoit-Wolf", color="navy", lw=2, ) plt.errorbar( n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1), label="OAS", color="darkorange", lw=2, ) plt.xlabel("n_samples") plt.ylabel("Shrinkage") plt.legend(loc="lower right") plt.ylim(plt.ylim()[0], 1.0 + (plt.ylim()[1] - plt.ylim()[0]) / 10.0) plt.xlim(5, 31) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/covariance/plot_covariance_estimation.py
examples/covariance/plot_covariance_estimation.py
""" ======================================================================= Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood ======================================================================= When working with covariance estimation, the usual approach is to use a maximum likelihood estimator, such as the :class:`~sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it converges to the true (population) covariance when given many observations. However, it can also be beneficial to regularize it, in order to reduce its variance; this, in turn, introduces some bias. This example illustrates the simple regularization used in :ref:`shrunk_covariance` estimators. In particular, it focuses on how to set the amount of regularization, i.e. how to choose the bias-variance trade-off. .. rubric:: References .. [1] "Shrinkage Algorithms for MMSE Covariance Estimation" Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate sample data # -------------------- import numpy as np n_features, n_samples = 40, 20 np.random.seed(42) base_X_train = np.random.normal(size=(n_samples, n_features)) base_X_test = np.random.normal(size=(n_samples, n_features)) # Color samples coloring_matrix = np.random.normal(size=(n_features, n_features)) X_train = np.dot(base_X_train, coloring_matrix) X_test = np.dot(base_X_test, coloring_matrix) # %% # Compute the likelihood on test data # ----------------------------------- from scipy import linalg from sklearn.covariance import ShrunkCovariance, empirical_covariance, log_likelihood # spanning a range of possible shrinkage coefficient values shrinkages = np.logspace(-2, 0, 30) negative_logliks = [ -ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test) for s in shrinkages ] # under the ground-truth model, which we would not have access to in real # settings real_cov = np.dot(coloring_matrix.T, coloring_matrix) emp_cov = empirical_covariance(X_train) loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov)) # %% # Compare different approaches to setting the regularization parameter # -------------------------------------------------------------------- # # Here we compare 3 approaches: # # * Setting the parameter by cross-validating the likelihood on three folds # according to a grid of potential shrinkage parameters. # # * A close formula proposed by Ledoit and Wolf to compute # the asymptotically optimal regularization parameter (minimizing a MSE # criterion), yielding the :class:`~sklearn.covariance.LedoitWolf` # covariance estimate. # # * An improvement of the Ledoit-Wolf shrinkage, the # :class:`~sklearn.covariance.OAS`, proposed by Chen et al. [1]_. Its # convergence is significantly better under the assumption that the data # are Gaussian, in particular for small samples. from sklearn.covariance import OAS, LedoitWolf from sklearn.model_selection import GridSearchCV # GridSearch for an optimal shrinkage coefficient tuned_parameters = [{"shrinkage": shrinkages}] cv = GridSearchCV(ShrunkCovariance(), tuned_parameters) cv.fit(X_train) # Ledoit-Wolf optimal shrinkage coefficient estimate lw = LedoitWolf() loglik_lw = lw.fit(X_train).score(X_test) # OAS coefficient estimate oa = OAS() loglik_oa = oa.fit(X_train).score(X_test) # %% # Plot results # ------------ # # # To quantify estimation error, we plot the likelihood of unseen data for # different values of the shrinkage parameter. We also show the choices by # cross-validation, or with the LedoitWolf and OAS estimates. import matplotlib.pyplot as plt fig = plt.figure() plt.title("Regularized covariance: likelihood and shrinkage coefficient") plt.xlabel("Regularization parameter: shrinkage coefficient") plt.ylabel("Error: negative log-likelihood on test data") # range shrinkage curve plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood") plt.plot(plt.xlim(), 2 * [loglik_real], "--r", label="Real covariance likelihood") # adjust view lik_max = np.amax(negative_logliks) lik_min = np.amin(negative_logliks) ymin = lik_min - 6.0 * np.log((plt.ylim()[1] - plt.ylim()[0])) ymax = lik_max + 10.0 * np.log(lik_max - lik_min) xmin = shrinkages[0] xmax = shrinkages[-1] # LW likelihood plt.vlines( lw.shrinkage_, ymin, -loglik_lw, color="magenta", linewidth=3, label="Ledoit-Wolf estimate", ) # OAS likelihood plt.vlines( oa.shrinkage_, ymin, -loglik_oa, color="purple", linewidth=3, label="OAS estimate" ) # best CV estimator likelihood plt.vlines( cv.best_estimator_.shrinkage, ymin, -cv.best_estimator_.score(X_test), color="cyan", linewidth=3, label="Cross-validation best estimate", ) plt.ylim(ymin, ymax) plt.xlim(xmin, xmax) plt.legend() plt.show() # %% # .. note:: # # The maximum likelihood estimate corresponds to no shrinkage, # and thus performs poorly. The Ledoit-Wolf estimate performs really well, # as it is close to the optimal and is not computationally costly. In this # example, the OAS estimate is a bit further away. Interestingly, both # approaches outperform cross-validation, which is significantly most # computationally costly.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/covariance/plot_mahalanobis_distances.py
examples/covariance/plot_mahalanobis_distances.py
r""" ================================================================ Robust covariance estimation and Mahalanobis distances relevance ================================================================ This example shows covariance estimation with Mahalanobis distances on Gaussian distributed data. For Gaussian distributed data, the distance of an observation :math:`x_i` to the mode of the distribution can be computed using its Mahalanobis distance: .. math:: d_{(\mu,\Sigma)}(x_i)^2 = (x_i - \mu)^T\Sigma^{-1}(x_i - \mu) where :math:`\mu` and :math:`\Sigma` are the location and the covariance of the underlying Gaussian distributions. In practice, :math:`\mu` and :math:`\Sigma` are replaced by some estimates. The standard covariance maximum likelihood estimate (MLE) is very sensitive to the presence of outliers in the data set and therefore, the downstream Mahalanobis distances also are. It would be better to use a robust estimator of covariance to guarantee that the estimation is resistant to "erroneous" observations in the dataset and that the calculated Mahalanobis distances accurately reflect the true organization of the observations. The Minimum Covariance Determinant estimator (MCD) is a robust, high-breakdown point (i.e. it can be used to estimate the covariance matrix of highly contaminated datasets, up to :math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers) estimator of covariance. The idea behind the MCD is to find :math:`\frac{n_\text{samples}+n_\text{features}+1}{2}` observations whose empirical covariance has the smallest determinant, yielding a "pure" subset of observations from which to compute standards estimates of location and covariance. The MCD was introduced by P.J.Rousseuw in [1]_. This example illustrates how the Mahalanobis distances are affected by outlying data. Observations drawn from a contaminating distribution are not distinguishable from the observations coming from the real, Gaussian distribution when using standard covariance MLE based Mahalanobis distances. Using MCD-based Mahalanobis distances, the two populations become distinguishable. Associated applications include outlier detection, observation ranking and clustering. .. note:: See also :ref:`sphx_glr_auto_examples_covariance_plot_robust_vs_empirical_covariance.py` .. rubric:: References .. [1] P. J. Rousseeuw. `Least median of squares regression <http://web.ipac.caltech.edu/staff/fmasci/home/astro_refs/LeastMedianOfSquares.pdf>`_. J. Am Stat Ass, 79:871, 1984. .. [2] Wilson, E. B., & Hilferty, M. M. (1931). `The distribution of chi-square. <https://water.usgs.gov/osw/bulletin17b/Wilson_Hilferty_1931.pdf>`_ Proceedings of the National Academy of Sciences of the United States of America, 17, 684-688. """ # noqa: E501 # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate data # -------------- # # First, we generate a dataset of 125 samples and 2 features. Both features # are Gaussian distributed with mean of 0 but feature 1 has a standard # deviation equal to 2 and feature 2 has a standard deviation equal to 1. Next, # 25 samples are replaced with Gaussian outlier samples where feature 1 has # a standard deviation equal to 1 and feature 2 has a standard deviation equal # to 7. import numpy as np # for consistent results np.random.seed(7) n_samples = 125 n_outliers = 25 n_features = 2 # generate Gaussian data of shape (125, 2) gen_cov = np.eye(n_features) gen_cov[0, 0] = 2.0 X = np.dot(np.random.randn(n_samples, n_features), gen_cov) # add some outliers outliers_cov = np.eye(n_features) outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.0 X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov) # %% # Comparison of results # --------------------- # # Below, we fit MCD and MLE based covariance estimators to our data and print # the estimated covariance matrices. Note that the estimated variance of # feature 2 is much higher with the MLE based estimator (7.5) than # that of the MCD robust estimator (1.2). This shows that the MCD based # robust estimator is much more resistant to the outlier samples, which were # designed to have a much larger variance in feature 2. import matplotlib.pyplot as plt from sklearn.covariance import EmpiricalCovariance, MinCovDet # fit a MCD robust estimator to data robust_cov = MinCovDet().fit(X) # fit a MLE estimator to data emp_cov = EmpiricalCovariance().fit(X) print( "Estimated covariance matrix:\nMCD (Robust):\n{}\nMLE:\n{}".format( robust_cov.covariance_, emp_cov.covariance_ ) ) # %% # To better visualize the difference, we plot contours of the # Mahalanobis distances calculated by both methods. Notice that the robust # MCD based Mahalanobis distances fit the inlier black points much better, # whereas the MLE based distances are more influenced by the outlier # red points. import matplotlib.lines as mlines fig, ax = plt.subplots(figsize=(10, 5)) # Plot data set inlier_plot = ax.scatter(X[:, 0], X[:, 1], color="black", label="inliers") outlier_plot = ax.scatter( X[:, 0][-n_outliers:], X[:, 1][-n_outliers:], color="red", label="outliers" ) ax.set_xlim(ax.get_xlim()[0], 10.0) ax.set_title("Mahalanobis distances of a contaminated data set") # Create meshgrid of feature 1 and feature 2 values xx, yy = np.meshgrid( np.linspace(plt.xlim()[0], plt.xlim()[1], 100), np.linspace(plt.ylim()[0], plt.ylim()[1], 100), ) zz = np.c_[xx.ravel(), yy.ravel()] # Calculate the MLE based Mahalanobis distances of the meshgrid mahal_emp_cov = emp_cov.mahalanobis(zz) mahal_emp_cov = mahal_emp_cov.reshape(xx.shape) emp_cov_contour = plt.contour( xx, yy, np.sqrt(mahal_emp_cov), cmap=plt.cm.PuBu_r, linestyles="dashed" ) # Calculate the MCD based Mahalanobis distances mahal_robust_cov = robust_cov.mahalanobis(zz) mahal_robust_cov = mahal_robust_cov.reshape(xx.shape) robust_contour = ax.contour( xx, yy, np.sqrt(mahal_robust_cov), cmap=plt.cm.YlOrBr_r, linestyles="dotted" ) # Add legend ax.legend( [ mlines.Line2D([], [], color="tab:blue", linestyle="dashed"), mlines.Line2D([], [], color="tab:orange", linestyle="dotted"), inlier_plot, outlier_plot, ], ["MLE dist", "MCD dist", "inliers", "outliers"], loc="upper right", borderaxespad=0, ) plt.show() # %% # Finally, we highlight the ability of MCD based Mahalanobis distances to # distinguish outliers. We take the cubic root of the Mahalanobis distances, # yielding approximately normal distributions (as suggested by Wilson and # Hilferty [2]_), then plot the values of inlier and outlier samples with # boxplots. The distribution of outlier samples is more separated from the # distribution of inlier samples for robust MCD based Mahalanobis distances. fig, (ax1, ax2) = plt.subplots(1, 2) plt.subplots_adjust(wspace=0.6) # Calculate cubic root of MLE Mahalanobis distances for samples emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33) # Plot boxplots ax1.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=0.25) # Plot individual samples ax1.plot( np.full(n_samples - n_outliers, 1.26), emp_mahal[:-n_outliers], "+k", markeredgewidth=1, ) ax1.plot(np.full(n_outliers, 2.26), emp_mahal[-n_outliers:], "+k", markeredgewidth=1) ax1.axes.set_xticklabels(("inliers", "outliers"), size=15) ax1.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16) ax1.set_title("Using non-robust estimates\n(Maximum Likelihood)") # Calculate cubic root of MCD Mahalanobis distances for samples robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33) # Plot boxplots ax2.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]], widths=0.25) # Plot individual samples ax2.plot( np.full(n_samples - n_outliers, 1.26), robust_mahal[:-n_outliers], "+k", markeredgewidth=1, ) ax2.plot(np.full(n_outliers, 2.26), robust_mahal[-n_outliers:], "+k", markeredgewidth=1) ax2.axes.set_xticklabels(("inliers", "outliers"), size=15) ax2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16) ax2.set_title("Using robust estimates\n(Minimum Covariance Determinant)") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/covariance/plot_robust_vs_empirical_covariance.py
examples/covariance/plot_robust_vs_empirical_covariance.py
r""" ======================================= Robust vs Empirical covariance estimate ======================================= The usual covariance maximum likelihood estimate is very sensitive to the presence of outliers in the data set. In such a case, it would be better to use a robust estimator of covariance to guarantee that the estimation is resistant to "erroneous" observations in the data set. [1]_, [2]_ Minimum Covariance Determinant Estimator ---------------------------------------- The Minimum Covariance Determinant estimator is a robust, high-breakdown point (i.e. it can be used to estimate the covariance matrix of highly contaminated datasets, up to :math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of covariance. The idea is to find :math:`\frac{n_\text{samples} + n_\text{features}+1}{2}` observations whose empirical covariance has the smallest determinant, yielding a "pure" subset of observations from which to compute standards estimates of location and covariance. After a correction step aiming at compensating the fact that the estimates were learned from only a portion of the initial data, we end up with robust estimates of the data set location and covariance. The Minimum Covariance Determinant estimator (MCD) has been introduced by P.J.Rousseuw in [3]_. Evaluation ---------- In this example, we compare the estimation errors that are made when using various types of location and covariance estimates on contaminated Gaussian distributed data sets: - The mean and the empirical covariance of the full dataset, which break down as soon as there are outliers in the data set - The robust MCD, that has a low error provided :math:`n_\text{samples} > 5n_\text{features}` - The mean and the empirical covariance of the observations that are known to be good ones. This can be considered as a "perfect" MCD estimation, so one can trust our implementation by comparing to this case. References ---------- .. [1] Johanna Hardin, David M Rocke. The distribution of robust distances. Journal of Computational and Graphical Statistics. December 1, 2005, 14(4): 928-946. .. [2] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust estimation in signal processing: A tutorial-style treatment of fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80. .. [3] P. J. Rousseeuw. Least median of squares regression. Journal of American Statistical Ass., 79:871, 1984. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.font_manager import matplotlib.pyplot as plt import numpy as np from sklearn.covariance import EmpiricalCovariance, MinCovDet # example settings n_samples = 80 n_features = 5 repeat = 10 range_n_outliers = np.concatenate( ( np.linspace(0, n_samples / 8, 5), np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1], ) ).astype(int) # definition of arrays to store results err_loc_mcd = np.zeros((range_n_outliers.size, repeat)) err_cov_mcd = np.zeros((range_n_outliers.size, repeat)) err_loc_emp_full = np.zeros((range_n_outliers.size, repeat)) err_cov_emp_full = np.zeros((range_n_outliers.size, repeat)) err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat)) err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat)) # computation for i, n_outliers in enumerate(range_n_outliers): for j in range(repeat): rng = np.random.RandomState(i * j) # generate data X = rng.randn(n_samples, n_features) # add some outliers outliers_index = rng.permutation(n_samples)[:n_outliers] outliers_offset = 10.0 * ( np.random.randint(2, size=(n_outliers, n_features)) - 0.5 ) X[outliers_index] += outliers_offset inliers_mask = np.ones(n_samples).astype(bool) inliers_mask[outliers_index] = False # fit a Minimum Covariance Determinant (MCD) robust estimator to data mcd = MinCovDet().fit(X) # compare raw robust estimates with the true location and covariance err_loc_mcd[i, j] = np.sum(mcd.location_**2) err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features)) # compare estimators learned from the full data set with true # parameters err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2) err_cov_emp_full[i, j] = ( EmpiricalCovariance().fit(X).error_norm(np.eye(n_features)) ) # compare with an empirical covariance learned from a pure data set # (i.e. "perfect" mcd) pure_X = X[inliers_mask] pure_location = pure_X.mean(0) pure_emp_cov = EmpiricalCovariance().fit(pure_X) err_loc_emp_pure[i, j] = np.sum(pure_location**2) err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features)) # Display results font_prop = matplotlib.font_manager.FontProperties(size=11) plt.subplot(2, 1, 1) lw = 2 plt.errorbar( range_n_outliers, err_loc_mcd.mean(1), yerr=err_loc_mcd.std(1) / np.sqrt(repeat), label="Robust location", lw=lw, color="m", ) plt.errorbar( range_n_outliers, err_loc_emp_full.mean(1), yerr=err_loc_emp_full.std(1) / np.sqrt(repeat), label="Full data set mean", lw=lw, color="green", ) plt.errorbar( range_n_outliers, err_loc_emp_pure.mean(1), yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat), label="Pure data set mean", lw=lw, color="black", ) plt.title("Influence of outliers on the location estimation") plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)") plt.legend(loc="upper left", prop=font_prop) plt.subplot(2, 1, 2) x_size = range_n_outliers.size plt.errorbar( range_n_outliers, err_cov_mcd.mean(1), yerr=err_cov_mcd.std(1), label="Robust covariance (mcd)", color="m", ) plt.errorbar( range_n_outliers[: (x_size // 5 + 1)], err_cov_emp_full.mean(1)[: (x_size // 5 + 1)], yerr=err_cov_emp_full.std(1)[: (x_size // 5 + 1)], label="Full data set empirical covariance", color="green", ) plt.plot( range_n_outliers[(x_size // 5) : (x_size // 2 - 1)], err_cov_emp_full.mean(1)[(x_size // 5) : (x_size // 2 - 1)], color="green", ls="--", ) plt.errorbar( range_n_outliers, err_cov_emp_pure.mean(1), yerr=err_cov_emp_pure.std(1), label="Pure data set empirical covariance", color="black", ) plt.title("Influence of outliers on the covariance estimation") plt.xlabel("Amount of contamination (%)") plt.ylabel("RMSE") plt.legend(loc="center", prop=font_prop) plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/covariance/plot_sparse_cov.py
examples/covariance/plot_sparse_cov.py
""" ====================================== Sparse inverse covariance estimation ====================================== Using the GraphicalLasso estimator to learn a covariance and sparse precision from a small number of samples. To estimate a probabilistic model (e.g. a Gaussian model), estimating the precision matrix, that is the inverse covariance matrix, is as important as estimating the covariance matrix. Indeed a Gaussian model is parametrized by the precision matrix. To be in favorable recovery conditions, we sample the data from a model with a sparse inverse covariance matrix. In addition, we ensure that the data is not too much correlated (limiting the largest coefficient of the precision matrix) and that there a no small coefficients in the precision matrix that cannot be recovered. In addition, with a small number of observations, it is easier to recover a correlation matrix rather than a covariance, thus we scale the time series. Here, the number of samples is slightly larger than the number of dimensions, thus the empirical covariance is still invertible. However, as the observations are strongly correlated, the empirical covariance matrix is ill-conditioned and as a result its inverse --the empirical precision matrix-- is very far from the ground truth. If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number of samples is small, we need to shrink a lot. As a result, the Ledoit-Wolf precision is fairly close to the ground truth precision, that is not far from being diagonal, but the off-diagonal structure is lost. The l1-penalized estimator can recover part of this off-diagonal structure. It learns a sparse precision. It is not able to recover the exact sparsity pattern: it detects too many non-zero coefficients. However, the highest non-zero coefficients of the l1 estimated correspond to the non-zero coefficients in the ground truth. Finally, the coefficients of the l1 precision estimate are biased toward zero: because of the penalty, they are all smaller than the corresponding ground truth value, as can be seen on the figure. Note that, the color range of the precision matrices is tweaked to improve readability of the figure. The full range of values of the empirical precision is not displayed. The alpha parameter of the GraphicalLasso setting the sparsity of the model is set by internal cross-validation in the GraphicalLassoCV. As can be seen on figure 2, the grid to compute the cross-validation score is iteratively refined in the neighborhood of the maximum. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate the data # ----------------- import numpy as np from scipy import linalg from sklearn.datasets import make_sparse_spd_matrix n_samples = 60 n_features = 20 prng = np.random.RandomState(1) prec = make_sparse_spd_matrix( n_features, alpha=0.98, smallest_coef=0.4, largest_coef=0.7, random_state=prng ) cov = linalg.inv(prec) d = np.sqrt(np.diag(cov)) cov /= d cov /= d[:, np.newaxis] prec *= d prec *= d[:, np.newaxis] X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples) X -= X.mean(axis=0) X /= X.std(axis=0) # %% # Estimate the covariance # ----------------------- from sklearn.covariance import GraphicalLassoCV, ledoit_wolf emp_cov = np.dot(X.T, X) / n_samples model = GraphicalLassoCV() model.fit(X) cov_ = model.covariance_ prec_ = model.precision_ lw_cov_, _ = ledoit_wolf(X) lw_prec_ = linalg.inv(lw_cov_) # %% # Plot the results # ---------------- import matplotlib.pyplot as plt plt.figure(figsize=(10, 6)) plt.subplots_adjust(left=0.02, right=0.98) # plot the covariances covs = [ ("Empirical", emp_cov), ("Ledoit-Wolf", lw_cov_), ("GraphicalLassoCV", cov_), ("True", cov), ] vmax = cov_.max() for i, (name, this_cov) in enumerate(covs): plt.subplot(2, 4, i + 1) plt.imshow( this_cov, interpolation="nearest", vmin=-vmax, vmax=vmax, cmap=plt.cm.RdBu_r ) plt.xticks(()) plt.yticks(()) plt.title("%s covariance" % name) # plot the precisions precs = [ ("Empirical", linalg.inv(emp_cov)), ("Ledoit-Wolf", lw_prec_), ("GraphicalLasso", prec_), ("True", prec), ] vmax = 0.9 * prec_.max() for i, (name, this_prec) in enumerate(precs): ax = plt.subplot(2, 4, i + 5) plt.imshow( np.ma.masked_equal(this_prec, 0), interpolation="nearest", vmin=-vmax, vmax=vmax, cmap=plt.cm.RdBu_r, ) plt.xticks(()) plt.yticks(()) plt.title("%s precision" % name) if hasattr(ax, "set_facecolor"): ax.set_facecolor(".7") else: ax.set_axis_bgcolor(".7") # %% # plot the model selection metric plt.figure(figsize=(4, 3)) plt.axes([0.2, 0.15, 0.75, 0.7]) plt.plot(model.cv_results_["alphas"], model.cv_results_["mean_test_score"], "o-") plt.axvline(model.alpha_, color=".5") plt.title("Model selection") plt.ylabel("Cross-validation score") plt.xlabel("alpha") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/feature_selection/plot_rfe_with_cross_validation.py
examples/feature_selection/plot_rfe_with_cross_validation.py
""" =================================================== Recursive feature elimination with cross-validation =================================================== A Recursive Feature Elimination (RFE) example with automatic tuning of the number of features selected with cross-validation. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Data generation # --------------- # # We build a classification task using 3 informative features. The introduction # of 2 additional redundant (i.e. correlated) features has the effect that the # selected features vary depending on the cross-validation fold. The remaining # features are non-informative as they are drawn at random. from sklearn.datasets import make_classification n_features = 15 feat_names = [f"feature_{i}" for i in range(15)] X, y = make_classification( n_samples=500, n_features=n_features, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, class_sep=0.8, random_state=0, ) # %% # Model training and selection # ---------------------------- # # We create the RFE object and compute the cross-validated scores. The scoring # strategy "accuracy" optimizes the proportion of correctly classified samples. from sklearn.feature_selection import RFECV from sklearn.linear_model import LogisticRegression from sklearn.model_selection import StratifiedKFold min_features_to_select = 1 # Minimum number of features to consider clf = LogisticRegression() cv = StratifiedKFold(5) rfecv = RFECV( estimator=clf, step=1, cv=cv, scoring="accuracy", min_features_to_select=min_features_to_select, n_jobs=2, ) rfecv.fit(X, y) print(f"Optimal number of features: {rfecv.n_features_}") # %% # In the present case, the model with 3 features (which corresponds to the true # generative model) is found to be the most optimal. # # Plot number of features VS. cross-validation scores # --------------------------------------------------- import matplotlib.pyplot as plt import pandas as pd data = { key: value for key, value in rfecv.cv_results_.items() if key in ["n_features", "mean_test_score", "std_test_score"] } cv_results = pd.DataFrame(data) plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Mean test accuracy") plt.errorbar( x=cv_results["n_features"], y=cv_results["mean_test_score"], yerr=cv_results["std_test_score"], ) plt.title("Recursive Feature Elimination \nwith correlated features") plt.show() # %% # From the plot above one can further notice a plateau of equivalent scores # (similar mean value and overlapping errorbars) for 3 to 5 selected features. # This is the result of introducing correlated features. Indeed, the optimal # model selected by the RFE can lie within this range, depending on the # cross-validation technique. The test accuracy decreases above 5 selected # features, this is, keeping non-informative features leads to over-fitting and # is therefore detrimental for the statistical performance of the models. # %% import numpy as np for i in range(cv.n_splits): mask = rfecv.cv_results_[f"split{i}_support"][ rfecv.n_features_ - 1 ] # mask of features selected by the RFE features_selected = np.ma.compressed(np.ma.masked_array(feat_names, mask=1 - mask)) print(f"Features selected in fold {i}: {features_selected}") # %% # In the five folds, the selected features are consistent. This is good news, # it means that the selection is stable across folds, and it confirms that # these features are the most informative ones.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/feature_selection/plot_feature_selection_pipeline.py
examples/feature_selection/plot_feature_selection_pipeline.py
""" ================== Pipeline ANOVA SVM ================== This example shows how a feature selection can be easily integrated within a machine learning pipeline. We also show that you can easily inspect part of the pipeline. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # We will start by generating a binary classification dataset. Subsequently, we # will divide the dataset into two subsets. from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split X, y = make_classification( n_features=20, n_informative=3, n_redundant=0, n_classes=2, n_clusters_per_class=2, random_state=42, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) # %% # A common mistake done with feature selection is to search a subset of # discriminative features on the full dataset, instead of only using the # training set. The usage of scikit-learn :func:`~sklearn.pipeline.Pipeline` # prevents to make such mistake. # # Here, we will demonstrate how to build a pipeline where the first step will # be the feature selection. # # When calling `fit` on the training data, a subset of feature will be selected # and the index of these selected features will be stored. The feature selector # will subsequently reduce the number of features, and pass this subset to the # classifier which will be trained. from sklearn.feature_selection import SelectKBest, f_classif from sklearn.pipeline import make_pipeline from sklearn.svm import LinearSVC anova_filter = SelectKBest(f_classif, k=3) clf = LinearSVC() anova_svm = make_pipeline(anova_filter, clf) anova_svm.fit(X_train, y_train) # %% # Once the training is complete, we can predict on new unseen samples. In this # case, the feature selector will only select the most discriminative features # based on the information stored during training. Then, the data will be # passed to the classifier which will make the prediction. # # Here, we show the final metrics via a classification report. from sklearn.metrics import classification_report y_pred = anova_svm.predict(X_test) print(classification_report(y_test, y_pred)) # %% # Be aware that you can inspect a step in the pipeline. For instance, we might # be interested about the parameters of the classifier. Since we selected # three features, we expect to have three coefficients. anova_svm[-1].coef_ # %% # However, we do not know which features were selected from the original # dataset. We could proceed by several manners. Here, we will invert the # transformation of these coefficients to get information about the original # space. anova_svm[:-1].inverse_transform(anova_svm[-1].coef_) # %% # We can see that the features with non-zero coefficients are the selected # features by the first step.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/feature_selection/plot_feature_selection.py
examples/feature_selection/plot_feature_selection.py
""" ============================ Univariate Feature Selection ============================ This notebook is an example of using univariate feature selection to improve classification accuracy on a noisy dataset. In this example, some noisy (non informative) features are added to the iris dataset. Support vector machine (SVM) is used to classify the dataset both before and after applying univariate feature selection. For each feature, we plot the p-values for the univariate feature selection and the corresponding weights of SVMs. With this, we will compare model accuracy and examine the impact of univariate feature selection on model weights. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate sample data # -------------------- # import numpy as np from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split # The iris dataset X, y = load_iris(return_X_y=True) # Some noisy data not correlated E = np.random.RandomState(42).uniform(0, 0.1, size=(X.shape[0], 20)) # Add the noisy data to the informative features X = np.hstack((X, E)) # Split dataset to select feature and evaluate the classifier X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) # %% # Univariate feature selection # ---------------------------- # # Univariate feature selection with F-test for feature scoring. # We use the default selection function to select # the four most significant features. from sklearn.feature_selection import SelectKBest, f_classif selector = SelectKBest(f_classif, k=4) selector.fit(X_train, y_train) scores = -np.log10(selector.pvalues_) scores /= scores.max() # %% import matplotlib.pyplot as plt X_indices = np.arange(X.shape[-1]) plt.figure(1) plt.clf() plt.bar(X_indices - 0.05, scores, width=0.2) plt.title("Feature univariate score") plt.xlabel("Feature number") plt.ylabel(r"Univariate score ($-Log(p_{value})$)") plt.show() # %% # In the total set of features, only the 4 of the original features are significant. # We can see that they have the highest score with univariate feature # selection. # %% # Compare with SVMs # ----------------- # # Without univariate feature selection from sklearn.pipeline import make_pipeline from sklearn.preprocessing import MinMaxScaler from sklearn.svm import LinearSVC clf = make_pipeline(MinMaxScaler(), LinearSVC()) clf.fit(X_train, y_train) print( "Classification accuracy without selecting features: {:.3f}".format( clf.score(X_test, y_test) ) ) svm_weights = np.abs(clf[-1].coef_).sum(axis=0) svm_weights /= svm_weights.sum() # %% # After univariate feature selection clf_selected = make_pipeline(SelectKBest(f_classif, k=4), MinMaxScaler(), LinearSVC()) clf_selected.fit(X_train, y_train) print( "Classification accuracy after univariate feature selection: {:.3f}".format( clf_selected.score(X_test, y_test) ) ) svm_weights_selected = np.abs(clf_selected[-1].coef_).sum(axis=0) svm_weights_selected /= svm_weights_selected.sum() # %% plt.bar( X_indices - 0.45, scores, width=0.2, label=r"Univariate score ($-Log(p_{value})$)" ) plt.bar(X_indices - 0.25, svm_weights, width=0.2, label="SVM weight") plt.bar( X_indices[selector.get_support()] - 0.05, svm_weights_selected, width=0.2, label="SVM weights after selection", ) plt.title("Comparing feature selection") plt.xlabel("Feature number") plt.yticks(()) plt.axis("tight") plt.legend(loc="upper right") plt.show() # %% # Without univariate feature selection, the SVM assigns a large weight # to the first 4 original significant features, but also selects many of the # non-informative features. Applying univariate feature selection before # the SVM increases the SVM weight attributed to the significant features, # and will thus improve classification.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/feature_selection/plot_rfe_digits.py
examples/feature_selection/plot_rfe_digits.py
""" ============================= Recursive feature elimination ============================= This example demonstrates how Recursive Feature Elimination (:class:`~sklearn.feature_selection.RFE`) can be used to determine the importance of individual pixels for classifying handwritten digits. :class:`~sklearn.feature_selection.RFE` recursively removes the least significant features, assigning ranks based on their importance, where higher `ranking_` values denote lower importance. The ranking is visualized using both shades of blue and pixel annotations for clarity. As expected, pixels positioned at the center of the image tend to be more predictive than those near the edges. .. note:: See also :ref:`sphx_glr_auto_examples_feature_selection_plot_rfe_with_cross_validation.py` """ # noqa: E501 # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt from sklearn.datasets import load_digits from sklearn.feature_selection import RFE from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler # Load the digits dataset digits = load_digits() X = digits.images.reshape((len(digits.images), -1)) y = digits.target pipe = Pipeline( [ ("scaler", MinMaxScaler()), ("rfe", RFE(estimator=LogisticRegression(), n_features_to_select=1, step=1)), ] ) pipe.fit(X, y) ranking = pipe.named_steps["rfe"].ranking_.reshape(digits.images[0].shape) # Plot pixel ranking plt.matshow(ranking, cmap=plt.cm.Blues) # Add annotations for pixel numbers for i in range(ranking.shape[0]): for j in range(ranking.shape[1]): plt.text(j, i, str(ranking[i, j]), ha="center", va="center", color="black") plt.colorbar() plt.title("Ranking of pixels with RFE\n(Logistic Regression)") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/feature_selection/plot_f_test_vs_mi.py
examples/feature_selection/plot_f_test_vs_mi.py
""" =========================================== Comparison of F-test and mutual information =========================================== This example illustrates the differences between univariate F-test statistics and mutual information. We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the target depends on them as follows: y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third feature is completely irrelevant. The code below plots the dependency of y against individual x_i and normalized values of univariate F-tests statistics and mutual information. As F-test captures only linear dependency, it rates x_1 as the most discriminative feature. On the other hand, mutual information can capture any kind of dependency between variables and it rates x_2 as the most discriminative feature, which probably agrees better with our intuitive perception for this example. Both methods correctly mark x_3 as irrelevant. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.feature_selection import f_regression, mutual_info_regression np.random.seed(0) X = np.random.rand(1000, 3) y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000) f_test, _ = f_regression(X, y) f_test /= np.max(f_test) mi = mutual_info_regression(X, y) mi /= np.max(mi) plt.figure(figsize=(15, 5)) for i in range(3): plt.subplot(1, 3, i + 1) plt.scatter(X[:, i], y, edgecolor="black", s=20) plt.xlabel("$x_{}$".format(i + 1), fontsize=14) if i == 0: plt.ylabel("$y$", fontsize=14) plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]), fontsize=16) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/feature_selection/plot_select_from_model_diabetes.py
examples/feature_selection/plot_select_from_model_diabetes.py
""" ============================================ Model-based and sequential feature selection ============================================ This example illustrates and compares two approaches for feature selection: :class:`~sklearn.feature_selection.SelectFromModel` which is based on feature importance, and :class:`~sklearn.feature_selection.SequentialFeatureSelector` which relies on a greedy approach. We use the Diabetes dataset, which consists of 10 features collected from 442 diabetes patients. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Loading the data # ---------------- # # We first load the diabetes dataset which is available from within # scikit-learn, and print its description: from sklearn.datasets import load_diabetes diabetes = load_diabetes() X, y = diabetes.data, diabetes.target print(diabetes.DESCR) # %% # Feature importance from coefficients # ------------------------------------ # # To get an idea of the importance of the features, we are going to use the # :class:`~sklearn.linear_model.RidgeCV` estimator. The features with the # highest absolute `coef_` value are considered the most important. # We can observe the coefficients directly without needing to scale them (or # scale the data) because from the description above, we know that the features # were already standardized. # For a more complete example on the interpretations of the coefficients of # linear models, you may refer to # :ref:`sphx_glr_auto_examples_inspection_plot_linear_model_coefficient_interpretation.py`. # noqa: E501 import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import RidgeCV ridge = RidgeCV(alphas=np.logspace(-6, 6, num=5)).fit(X, y) importance = np.abs(ridge.coef_) feature_names = np.array(diabetes.feature_names) plt.bar(height=importance, x=feature_names) plt.title("Feature importances via coefficients") plt.show() # %% # Selecting features based on importance # -------------------------------------- # # Now we want to select the two features which are the most important according # to the coefficients. The :class:`~sklearn.feature_selection.SelectFromModel` # is meant just for that. :class:`~sklearn.feature_selection.SelectFromModel` # accepts a `threshold` parameter and will select the features whose importance # (defined by the coefficients) are above this threshold. # # Since we want to select only 2 features, we will set this threshold slightly # above the coefficient of third most important feature. from time import time from sklearn.feature_selection import SelectFromModel threshold = np.sort(importance)[-3] + 0.01 tic = time() sfm = SelectFromModel(ridge, threshold=threshold).fit(X, y) toc = time() print(f"Features selected by SelectFromModel: {feature_names[sfm.get_support()]}") print(f"Done in {toc - tic:.3f}s") # %% # Selecting features with Sequential Feature Selection # ---------------------------------------------------- # # Another way of selecting features is to use # :class:`~sklearn.feature_selection.SequentialFeatureSelector` # (SFS). SFS is a greedy procedure where, at each iteration, we choose the best # new feature to add to our selected features based a cross-validation score. # That is, we start with 0 features and choose the best single feature with the # highest score. The procedure is repeated until we reach the desired number of # selected features. # # We can also go in the reverse direction (backward SFS), *i.e.* start with all # the features and greedily choose features to remove one by one. We illustrate # both approaches here. from sklearn.feature_selection import SequentialFeatureSelector tic_fwd = time() sfs_forward = SequentialFeatureSelector( ridge, n_features_to_select=2, direction="forward" ).fit(X, y) toc_fwd = time() tic_bwd = time() sfs_backward = SequentialFeatureSelector( ridge, n_features_to_select=2, direction="backward" ).fit(X, y) toc_bwd = time() print( "Features selected by forward sequential selection: " f"{feature_names[sfs_forward.get_support()]}" ) print(f"Done in {toc_fwd - tic_fwd:.3f}s") print( "Features selected by backward sequential selection: " f"{feature_names[sfs_backward.get_support()]}" ) print(f"Done in {toc_bwd - tic_bwd:.3f}s") # %% # Interestingly, forward and backward selection have selected the same set of # features. In general, this isn't the case and the two methods would lead to # different results. # # We also note that the features selected by SFS differ from those selected by # feature importance: SFS selects `bmi` instead of `s1`. This does sound # reasonable though, since `bmi` corresponds to the third most important # feature according to the coefficients. It is quite remarkable considering # that SFS makes no use of the coefficients at all. # # To finish with, we should note that # :class:`~sklearn.feature_selection.SelectFromModel` is significantly faster # than SFS. Indeed, :class:`~sklearn.feature_selection.SelectFromModel` only # needs to fit a model once, while SFS needs to cross-validate many different # models for each of the iterations. SFS however works with any model, while # :class:`~sklearn.feature_selection.SelectFromModel` requires the underlying # estimator to expose a `coef_` attribute or a `feature_importances_` # attribute. The forward SFS is faster than the backward SFS because it only # needs to perform `n_features_to_select = 2` iterations, while the backward # SFS needs to perform `n_features - n_features_to_select = 8` iterations. # # Using negative tolerance values # ------------------------------- # # :class:`~sklearn.feature_selection.SequentialFeatureSelector` can be used # to remove features present in the dataset and return a # smaller subset of the original features with `direction="backward"` # and a negative value of `tol`. # # We begin by loading the Breast Cancer dataset, consisting of 30 different # features and 569 samples. import numpy as np from sklearn.datasets import load_breast_cancer breast_cancer_data = load_breast_cancer() X, y = breast_cancer_data.data, breast_cancer_data.target feature_names = np.array(breast_cancer_data.feature_names) print(breast_cancer_data.DESCR) # %% # We will make use of the :class:`~sklearn.linear_model.LogisticRegression` # estimator with :class:`~sklearn.feature_selection.SequentialFeatureSelector` # to perform the feature selection. from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_auc_score from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler for tol in [-1e-2, -1e-3, -1e-4]: start = time() feature_selector = SequentialFeatureSelector( LogisticRegression(), n_features_to_select="auto", direction="backward", scoring="roc_auc", tol=tol, n_jobs=2, ) model = make_pipeline(StandardScaler(), feature_selector, LogisticRegression()) model.fit(X, y) end = time() print(f"\ntol: {tol}") print(f"Features selected: {feature_names[model[1].get_support()]}") print(f"ROC AUC score: {roc_auc_score(y, model.predict_proba(X)[:, 1]):.3f}") print(f"Done in {end - start:.3f}s") # %% # We can see that the number of features selected tend to increase as negative # values of `tol` approach to zero. The time taken for feature selection also # decreases as the values of `tol` come closer to zero.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/impute/plot_missing_values.py
examples/impute/plot_missing_values.py
""" ==================================================== Imputing missing values before building an estimator ==================================================== Missing values can be replaced by the mean, the median or the most frequent value using the basic :class:`~sklearn.impute.SimpleImputer`. In this example we will investigate different imputation techniques: - imputation by the constant value 0 - imputation by the mean value of each feature - k nearest neighbor imputation - iterative imputation In all the cases, for each feature, we add a new feature indicating the missingness. We will use two datasets: Diabetes dataset which consists of 10 feature variables collected from diabetes patients with an aim to predict disease progression and California housing dataset for which the target is the median house value for California districts. As neither of these datasets have missing values, we will remove some values to create new versions with artificially missing data. The performance of :class:`~sklearn.ensemble.RandomForestRegressor` on the full original dataset is then compared the performance on the altered datasets with the artificially missing values imputed using different techniques. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Download the data and make missing values sets # ############################################## # # First we download the two datasets. Diabetes dataset is shipped with # scikit-learn. It has 442 entries, each with 10 features. California housing # dataset is much larger with 20640 entries and 8 features. It needs to be # downloaded. We will only use the first 300 entries for the sake of speeding # up the calculations but feel free to use the whole dataset. # import numpy as np from sklearn.datasets import fetch_california_housing, load_diabetes X_diabetes, y_diabetes = load_diabetes(return_X_y=True) X_california, y_california = fetch_california_housing(return_X_y=True) X_diabetes = X_diabetes[:300] y_diabetes = y_diabetes[:300] X_california = X_california[:300] y_california = y_california[:300] def add_missing_values(X_full, y_full, rng): n_samples, n_features = X_full.shape # Add missing values in 75% of the lines missing_rate = 0.75 n_missing_samples = int(n_samples * missing_rate) missing_samples = np.zeros(n_samples, dtype=bool) missing_samples[:n_missing_samples] = True rng.shuffle(missing_samples) missing_features = rng.randint(0, n_features, n_missing_samples) X_missing = X_full.copy() X_missing[missing_samples, missing_features] = np.nan y_missing = y_full.copy() return X_missing, y_missing rng = np.random.RandomState(42) X_miss_diabetes, y_miss_diabetes = add_missing_values(X_diabetes, y_diabetes, rng) X_miss_california, y_miss_california = add_missing_values( X_california, y_california, rng ) # %% # Impute the missing data and score # ################################# # Now we will write a function which will score the results on the differently # imputed data, including the case of no imputation for full data. # We will use :class:`~sklearn.ensemble.RandomForestRegressor` for the target # regression. # from sklearn.ensemble import RandomForestRegressor # To use the experimental IterativeImputer, we need to explicitly ask for it: from sklearn.experimental import enable_iterative_imputer # noqa: F401 from sklearn.impute import IterativeImputer, KNNImputer, SimpleImputer from sklearn.model_selection import cross_val_score from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler N_SPLITS = 4 def get_score(X, y, imputer=None): regressor = RandomForestRegressor(random_state=0) if imputer is not None: estimator = make_pipeline(imputer, regressor) else: estimator = regressor scores = cross_val_score( estimator, X, y, scoring="neg_mean_squared_error", cv=N_SPLITS ) return scores.mean(), scores.std() x_labels = [] mses_diabetes = np.zeros(5) stds_diabetes = np.zeros(5) mses_california = np.zeros(5) stds_california = np.zeros(5) # %% # Estimate the score # ------------------ # First, we want to estimate the score on the original data: # mses_diabetes[0], stds_diabetes[0] = get_score(X_diabetes, y_diabetes) mses_california[0], stds_california[0] = get_score(X_california, y_california) x_labels.append("Full Data") # %% # Replace missing values by 0 # --------------------------- # # Now we will estimate the score on the data where the missing values are # replaced by 0: # imputer = SimpleImputer(strategy="constant", fill_value=0, add_indicator=True) mses_diabetes[1], stds_diabetes[1] = get_score( X_miss_diabetes, y_miss_diabetes, imputer ) mses_california[1], stds_california[1] = get_score( X_miss_california, y_miss_california, imputer ) x_labels.append("Zero Imputation") # %% # Impute missing values with mean # ------------------------------- # imputer = SimpleImputer(strategy="mean", add_indicator=True) mses_diabetes[2], stds_diabetes[2] = get_score( X_miss_diabetes, y_miss_diabetes, imputer ) mses_california[2], stds_california[2] = get_score( X_miss_california, y_miss_california, imputer ) x_labels.append("Mean Imputation") # %% # kNN-imputation of the missing values # ------------------------------------ # # :class:`~sklearn.impute.KNNImputer` imputes missing values using the weighted # or unweighted mean of the desired number of nearest neighbors. If your features # have vastly different scales (as in the California housing dataset), # consider re-scaling them to potentially improve performance. # imputer = KNNImputer(add_indicator=True) mses_diabetes[3], stds_diabetes[3] = get_score( X_miss_diabetes, y_miss_diabetes, imputer ) mses_california[3], stds_california[3] = get_score( X_miss_california, y_miss_california, make_pipeline(RobustScaler(), imputer) ) x_labels.append("KNN Imputation") # %% # Iterative imputation of the missing values # ------------------------------------------ # # Another option is the :class:`~sklearn.impute.IterativeImputer`. This uses # round-robin regression, modeling each feature with missing values as a # function of other features, in turn. We use the class's default choice # of the regressor model (:class:`~sklearn.linear_model.BayesianRidge`) # to predict missing feature values. The performance of the predictor # may be negatively affected by vastly different scales of the features, # so we re-scale the features in the California housing dataset. # imputer = IterativeImputer(add_indicator=True) mses_diabetes[4], stds_diabetes[4] = get_score( X_miss_diabetes, y_miss_diabetes, imputer ) mses_california[4], stds_california[4] = get_score( X_miss_california, y_miss_california, make_pipeline(RobustScaler(), imputer) ) x_labels.append("Iterative Imputation") mses_diabetes = mses_diabetes * -1 mses_california = mses_california * -1 # %% # Plot the results # ################ # # Finally we are going to visualize the score: # import matplotlib.pyplot as plt n_bars = len(mses_diabetes) xval = np.arange(n_bars) colors = ["r", "g", "b", "orange", "black"] # plot diabetes results plt.figure(figsize=(12, 6)) ax1 = plt.subplot(121) for j in xval: ax1.barh( j, mses_diabetes[j], xerr=stds_diabetes[j], color=colors[j], alpha=0.6, align="center", ) ax1.set_title("Imputation Techniques with Diabetes Data") ax1.set_xlim(left=np.min(mses_diabetes) * 0.9, right=np.max(mses_diabetes) * 1.1) ax1.set_yticks(xval) ax1.set_xlabel("MSE") ax1.invert_yaxis() ax1.set_yticklabels(x_labels) # plot california dataset results ax2 = plt.subplot(122) for j in xval: ax2.barh( j, mses_california[j], xerr=stds_california[j], color=colors[j], alpha=0.6, align="center", ) ax2.set_title("Imputation Techniques with California Data") ax2.set_yticks(xval) ax2.set_xlabel("MSE") ax2.invert_yaxis() ax2.set_yticklabels([""] * n_bars) plt.show() # %% # You can also try different techniques. For instance, the median is a more # robust estimator for data with high magnitude variables which could dominate # results (otherwise known as a 'long tail').
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/impute/plot_iterative_imputer_variants_comparison.py
examples/impute/plot_iterative_imputer_variants_comparison.py
""" ========================================================= Imputing missing values with variants of IterativeImputer ========================================================= .. currentmodule:: sklearn The :class:`~impute.IterativeImputer` class is very flexible - it can be used with a variety of estimators to do round-robin regression, treating every variable as an output in turn. In this example we compare some estimators for the purpose of missing feature imputation with :class:`~impute.IterativeImputer`: * :class:`~linear_model.BayesianRidge`: regularized linear regression * :class:`~ensemble.RandomForestRegressor`: forests of randomized trees regression * :func:`~pipeline.make_pipeline` (:class:`~kernel_approximation.Nystroem`, :class:`~linear_model.Ridge`): a pipeline with the expansion of a degree 2 polynomial kernel and regularized linear regression * :class:`~neighbors.KNeighborsRegressor`: comparable to other KNN imputation approaches Of particular interest is the ability of :class:`~impute.IterativeImputer` to mimic the behavior of missForest, a popular imputation package for R. Note that :class:`~neighbors.KNeighborsRegressor` is different from KNN imputation, which learns from samples with missing values by using a distance metric that accounts for missing values, rather than imputing them. The goal is to compare different estimators to see which one is best for the :class:`~impute.IterativeImputer` when using a :class:`~linear_model.BayesianRidge` estimator on the California housing dataset with a single value randomly removed from each row. For this particular pattern of missing values we see that :class:`~linear_model.BayesianRidge` and :class:`~ensemble.RandomForestRegressor` give the best results. It should be noted that some estimators such as :class:`~ensemble.HistGradientBoostingRegressor` can natively deal with missing features and are often recommended over building pipelines with complex and costly missing values imputation strategies. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn.datasets import fetch_california_housing from sklearn.ensemble import RandomForestRegressor # To use this experimental feature, we need to explicitly ask for it: from sklearn.experimental import enable_iterative_imputer # noqa: F401 from sklearn.impute import IterativeImputer, SimpleImputer from sklearn.kernel_approximation import Nystroem from sklearn.linear_model import BayesianRidge, Ridge from sklearn.model_selection import cross_val_score from sklearn.neighbors import KNeighborsRegressor from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler N_SPLITS = 5 X_full, y_full = fetch_california_housing(return_X_y=True) # ~2k samples is enough for the purpose of the example. # Remove the following two lines for a slower run with different error bars. X_full = X_full[::10] y_full = y_full[::10] n_samples, n_features = X_full.shape def compute_score_for(X, y, imputer=None): # We scale data before imputation and training a target estimator, # because our target estimator and some of the imputers assume # that the features have similar scales. if imputer is None: estimator = make_pipeline(RobustScaler(), BayesianRidge()) else: estimator = make_pipeline(RobustScaler(), imputer, BayesianRidge()) return cross_val_score( estimator, X, y, scoring="neg_mean_squared_error", cv=N_SPLITS ) # Estimate the score on the entire dataset, with no missing values score_full_data = pd.DataFrame( compute_score_for(X_full, y_full), columns=["Full Data"], ) # Add a single missing value to each row rng = np.random.RandomState(0) X_missing = X_full.copy() y_missing = y_full missing_samples = np.arange(n_samples) missing_features = rng.choice(n_features, n_samples, replace=True) X_missing[missing_samples, missing_features] = np.nan # Estimate the score after imputation (mean and median strategies) score_simple_imputer = pd.DataFrame() for strategy in ("mean", "median"): score_simple_imputer[strategy] = compute_score_for( X_missing, y_missing, SimpleImputer(strategy=strategy) ) # Estimate the score after iterative imputation of the missing values # with different estimators named_estimators = [ ("Bayesian Ridge", BayesianRidge()), ( "Random Forest", RandomForestRegressor( # We tuned the hyperparameters of the RandomForestRegressor to get a good # enough predictive performance for a restricted execution time. n_estimators=5, max_depth=10, bootstrap=True, max_samples=0.5, n_jobs=2, random_state=0, ), ), ( "Nystroem + Ridge", make_pipeline( Nystroem(kernel="polynomial", degree=2, random_state=0), Ridge(alpha=1e4) ), ), ( "k-NN", KNeighborsRegressor(n_neighbors=10), ), ] score_iterative_imputer = pd.DataFrame() # Iterative imputer is sensitive to the tolerance and # dependent on the estimator used internally. # We tuned the tolerance to keep this example run with limited computational # resources while not changing the results too much compared to keeping the # stricter default value for the tolerance parameter. tolerances = (1e-3, 1e-1, 1e-1, 1e-2) for (name, impute_estimator), tol in zip(named_estimators, tolerances): score_iterative_imputer[name] = compute_score_for( X_missing, y_missing, IterativeImputer( random_state=0, estimator=impute_estimator, max_iter=40, tol=tol ), ) scores = pd.concat( [score_full_data, score_simple_imputer, score_iterative_imputer], keys=["Original", "SimpleImputer", "IterativeImputer"], axis=1, ) # plot california housing results fig, ax = plt.subplots(figsize=(13, 6)) means = -scores.mean() errors = scores.std() means.plot.barh(xerr=errors, ax=ax) ax.set_title("California Housing Regression with Different Imputation Methods") ax.set_xlabel("MSE (smaller is better)") ax.set_yticks(np.arange(means.shape[0])) ax.set_yticklabels([" w/ ".join(label) for label in means.index.tolist()]) plt.tight_layout(pad=1) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/mixture/plot_gmm_init.py
examples/mixture/plot_gmm_init.py
""" ========================== GMM Initialization Methods ========================== Examples of the different methods of initialization in Gaussian Mixture Models See :ref:`gmm` for more information on the estimator. Here we generate some sample data with four easy to identify clusters. The purpose of this example is to show the four different methods for the initialization parameter *init_param*. The four initializations are *kmeans* (default), *random*, *random_from_data* and *k-means++*. Orange diamonds represent the initialization centers for the gmm generated by the *init_param*. The rest of the data is represented as crosses and the colouring represents the eventual associated classification after the GMM has finished. The numbers in the top right of each subplot represent the number of iterations taken for the GaussianMixture to converge and the relative time taken for the initialization part of the algorithm to run. The shorter initialization times tend to have a greater number of iterations to converge. The initialization time is the ratio of the time taken for that method versus the time taken for the default *kmeans* method. As you can see all three alternative methods take less time to initialize when compared to *kmeans*. In this example, when initialized with *random_from_data* or *random* the model takes more iterations to converge. Here *k-means++* does a good job of both low time to initialize and low number of GaussianMixture iterations to converge. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from timeit import default_timer as timer import matplotlib.pyplot as plt import numpy as np from sklearn.datasets._samples_generator import make_blobs from sklearn.mixture import GaussianMixture from sklearn.utils.extmath import row_norms # Generate some data X, y_true = make_blobs(n_samples=4000, centers=4, cluster_std=0.60, random_state=0) X = X[:, ::-1] n_samples = 4000 n_components = 4 x_squared_norms = row_norms(X, squared=True) def get_initial_means(X, init_params, r): # Run a GaussianMixture with max_iter=0 to output the initialization means gmm = GaussianMixture( n_components=4, init_params=init_params, tol=1e-9, max_iter=0, random_state=r ).fit(X) return gmm.means_ methods = ["kmeans", "random_from_data", "k-means++", "random"] colors = ["navy", "turquoise", "cornflowerblue", "darkorange"] times_init = {} relative_times = {} plt.figure(figsize=(4 * len(methods) // 2, 6)) plt.subplots_adjust( bottom=0.1, top=0.9, hspace=0.15, wspace=0.05, left=0.05, right=0.95 ) for n, method in enumerate(methods): r = np.random.RandomState(seed=1234) plt.subplot(2, len(methods) // 2, n + 1) start = timer() ini = get_initial_means(X, method, r) end = timer() init_time = end - start gmm = GaussianMixture( n_components=4, means_init=ini, tol=1e-9, max_iter=2000, random_state=r ).fit(X) times_init[method] = init_time for i, color in enumerate(colors): data = X[gmm.predict(X) == i] plt.scatter(data[:, 0], data[:, 1], color=color, marker="x") plt.scatter( ini[:, 0], ini[:, 1], s=75, marker="D", c="orange", lw=1.5, edgecolors="black" ) relative_times[method] = times_init[method] / times_init[methods[0]] plt.xticks(()) plt.yticks(()) plt.title(method, loc="left", fontsize=12) plt.title( "Iter %i | Init Time %.2fx" % (gmm.n_iter_, relative_times[method]), loc="right", fontsize=10, ) plt.suptitle("GMM iterations and relative time taken to initialize") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/mixture/plot_gmm_pdf.py
examples/mixture/plot_gmm_pdf.py
""" ========================================= Density Estimation for a Gaussian mixture ========================================= Plot the density estimation of a mixture of two Gaussians. Data is generated from two Gaussians with different centers and covariance matrices. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from matplotlib.colors import LogNorm from sklearn import mixture n_samples = 300 # generate random sample, two components np.random.seed(0) # generate spherical data centered on (20, 20) shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20]) # generate zero centered stretched Gaussian data C = np.array([[0.0, -0.7], [3.5, 0.7]]) stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C) # concatenate the two datasets into the final training set X_train = np.vstack([shifted_gaussian, stretched_gaussian]) # fit a Gaussian Mixture Model with two components clf = mixture.GaussianMixture(n_components=2, covariance_type="full") clf.fit(X_train) # display predicted scores by the model as a contour plot x = np.linspace(-20.0, 30.0) y = np.linspace(-20.0, 40.0) X, Y = np.meshgrid(x, y) XX = np.array([X.ravel(), Y.ravel()]).T Z = -clf.score_samples(XX) Z = Z.reshape(X.shape) CS = plt.contour( X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0), levels=np.logspace(0, 3, 10) ) CB = plt.colorbar(CS, shrink=0.8, extend="both") plt.scatter(X_train[:, 0], X_train[:, 1], 0.8) plt.title("Negative log-likelihood predicted by a GMM") plt.axis("tight") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/mixture/plot_concentration_prior.py
examples/mixture/plot_concentration_prior.py
""" ======================================================================== Concentration Prior Type Analysis of Variation Bayesian Gaussian Mixture ======================================================================== This example plots the ellipsoids obtained from a toy dataset (mixture of three Gaussians) fitted by the ``BayesianGaussianMixture`` class models with a Dirichlet distribution prior (``weight_concentration_prior_type='dirichlet_distribution'``) and a Dirichlet process prior (``weight_concentration_prior_type='dirichlet_process'``). On each figure, we plot the results for three different values of the weight concentration prior. The ``BayesianGaussianMixture`` class can adapt its number of mixture components automatically. The parameter ``weight_concentration_prior`` has a direct link with the resulting number of components with non-zero weights. Specifying a low value for the concentration prior will make the model put most of the weight on few components set the remaining components weights very close to zero. High values of the concentration prior will allow a larger number of components to be active in the mixture. The Dirichlet process prior allows to define an infinite number of components and automatically selects the correct number of components: it activates a component only if it is necessary. On the contrary the classical finite mixture model with a Dirichlet distribution prior will favor more uniformly weighted components and therefore tends to divide natural clusters into unnecessary sub-components. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib as mpl import matplotlib.gridspec as gridspec import matplotlib.pyplot as plt import numpy as np from sklearn.mixture import BayesianGaussianMixture def plot_ellipses(ax, weights, means, covars): for n in range(means.shape[0]): eig_vals, eig_vecs = np.linalg.eigh(covars[n]) unit_eig_vec = eig_vecs[0] / np.linalg.norm(eig_vecs[0]) angle = np.arctan2(unit_eig_vec[1], unit_eig_vec[0]) # Ellipse needs degrees angle = 180 * angle / np.pi # eigenvector normalization eig_vals = 2 * np.sqrt(2) * np.sqrt(eig_vals) ell = mpl.patches.Ellipse( means[n], eig_vals[0], eig_vals[1], angle=180 + angle, edgecolor="black" ) ell.set_clip_box(ax.bbox) ell.set_alpha(weights[n]) ell.set_facecolor("#56B4E9") ax.add_artist(ell) def plot_results(ax1, ax2, estimator, X, y, title, plot_title=False): ax1.set_title(title) ax1.scatter(X[:, 0], X[:, 1], s=5, marker="o", color=colors[y], alpha=0.8) ax1.set_xlim(-2.0, 2.0) ax1.set_ylim(-3.0, 3.0) ax1.set_xticks(()) ax1.set_yticks(()) plot_ellipses(ax1, estimator.weights_, estimator.means_, estimator.covariances_) ax2.get_xaxis().set_tick_params(direction="out") ax2.yaxis.grid(True, alpha=0.7) for k, w in enumerate(estimator.weights_): ax2.bar( k, w, width=0.9, color="#56B4E9", zorder=3, align="center", edgecolor="black", ) ax2.text(k, w + 0.007, "%.1f%%" % (w * 100.0), horizontalalignment="center") ax2.set_xlim(-0.6, 2 * n_components - 0.4) ax2.set_ylim(0.0, 1.1) ax2.tick_params(axis="y", which="both", left=False, right=False, labelleft=False) ax2.tick_params(axis="x", which="both", top=False) if plot_title: ax1.set_ylabel("Estimated Mixtures") ax2.set_ylabel("Weight of each component") # Parameters of the dataset random_state, n_components, n_features = 2, 3, 2 colors = np.array(["#0072B2", "#F0E442", "#D55E00"]) covars = np.array( [[[0.7, 0.0], [0.0, 0.1]], [[0.5, 0.0], [0.0, 0.1]], [[0.5, 0.0], [0.0, 0.1]]] ) samples = np.array([200, 500, 200]) means = np.array([[0.0, -0.70], [0.0, 0.0], [0.0, 0.70]]) # mean_precision_prior= 0.8 to minimize the influence of the prior estimators = [ ( "Finite mixture with a Dirichlet distribution\n" r"prior and $\gamma_0=$", BayesianGaussianMixture( weight_concentration_prior_type="dirichlet_distribution", n_components=2 * n_components, reg_covar=0, init_params="random", max_iter=1500, mean_precision_prior=0.8, random_state=random_state, ), [0.001, 1, 1000], ), ( "Infinite mixture with a Dirichlet process\n" r"prior and $\gamma_0=$", BayesianGaussianMixture( weight_concentration_prior_type="dirichlet_process", n_components=2 * n_components, reg_covar=0, init_params="random", max_iter=1500, mean_precision_prior=0.8, random_state=random_state, ), [1, 1000, 100000], ), ] # Generate data rng = np.random.RandomState(random_state) X = np.vstack( [ rng.multivariate_normal(means[j], covars[j], samples[j]) for j in range(n_components) ] ) y = np.concatenate([np.full(samples[j], j, dtype=int) for j in range(n_components)]) # Plot results in two different figures for title, estimator, concentrations_prior in estimators: plt.figure(figsize=(4.7 * 3, 8)) plt.subplots_adjust( bottom=0.04, top=0.90, hspace=0.05, wspace=0.05, left=0.03, right=0.99 ) gs = gridspec.GridSpec(3, len(concentrations_prior)) for k, concentration in enumerate(concentrations_prior): estimator.weight_concentration_prior = concentration estimator.fit(X) plot_results( plt.subplot(gs[0:2, k]), plt.subplot(gs[2, k]), estimator, X, y, r"%s$%.1e$" % (title, concentration), plot_title=k == 0, ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/mixture/plot_gmm.py
examples/mixture/plot_gmm.py
""" ================================= Gaussian Mixture Model Ellipsoids ================================= Plot the confidence ellipsoids of a mixture of two Gaussians obtained with Expectation Maximisation (``GaussianMixture`` class) and Variational Inference (``BayesianGaussianMixture`` class models with a Dirichlet process prior). Both models have access to five components with which to fit the data. Note that the Expectation Maximisation model will necessarily use all five components while the Variational Inference model will effectively only use as many as are needed for a good fit. Here we can see that the Expectation Maximisation model splits some components arbitrarily, because it is trying to fit too many components, while the Dirichlet Process model adapts it number of state automatically. This example doesn't show it, as we're in a low-dimensional space, but another advantage of the Dirichlet process model is that it can fit full covariance matrices effectively even when there are less examples per cluster than there are dimensions in the data, due to regularization properties of the inference algorithm. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import itertools import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np from scipy import linalg from sklearn import mixture color_iter = itertools.cycle(["navy", "c", "cornflowerblue", "gold", "darkorange"]) def plot_results(X, Y_, means, covariances, index, title): splot = plt.subplot(2, 1, 1 + index) for i, (mean, covar, color) in enumerate(zip(means, covariances, color_iter)): v, w = linalg.eigh(covar) v = 2.0 * np.sqrt(2.0) * np.sqrt(v) u = w[0] / linalg.norm(w[0]) # as the DP will not use every component it has access to # unless it needs it, we shouldn't plot the redundant # components. if not np.any(Y_ == i): continue plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], 0.8, color=color) # Plot an ellipse to show the Gaussian component angle = np.arctan(u[1] / u[0]) angle = 180.0 * angle / np.pi # convert to degrees ell = mpl.patches.Ellipse(mean, v[0], v[1], angle=180.0 + angle, color=color) ell.set_clip_box(splot.bbox) ell.set_alpha(0.5) splot.add_artist(ell) plt.xlim(-9.0, 5.0) plt.ylim(-3.0, 6.0) plt.xticks(()) plt.yticks(()) plt.title(title) # Number of samples per component n_samples = 500 # Generate random sample, two components np.random.seed(0) C = np.array([[0.0, -0.1], [1.7, 0.4]]) X = np.r_[ np.dot(np.random.randn(n_samples, 2), C), 0.7 * np.random.randn(n_samples, 2) + np.array([-6, 3]), ] # Fit a Gaussian mixture with EM using five components gmm = mixture.GaussianMixture(n_components=5, covariance_type="full").fit(X) plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0, "Gaussian Mixture") # Fit a Dirichlet process Gaussian mixture using five components dpgmm = mixture.BayesianGaussianMixture(n_components=5, covariance_type="full").fit(X) plot_results( X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1, "Bayesian Gaussian Mixture with a Dirichlet process prior", ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/mixture/plot_gmm_selection.py
examples/mixture/plot_gmm_selection.py
""" ================================ Gaussian Mixture Model Selection ================================ This example shows that model selection can be performed with Gaussian Mixture Models (GMM) using :ref:`information-theory criteria <aic_bic>`. Model selection concerns both the covariance type and the number of components in the model. In this case, both the Akaike Information Criterion (AIC) and the Bayes Information Criterion (BIC) provide the right result, but we only demo the latter as BIC is better suited to identify the true model among a set of candidates. Unlike Bayesian procedures, such inferences are prior-free. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Data generation # --------------- # # We generate two components (each one containing `n_samples`) by randomly # sampling the standard normal distribution as returned by `numpy.random.randn`. # One component is kept spherical yet shifted and re-scaled. The other one is # deformed to have a more general covariance matrix. import numpy as np n_samples = 500 np.random.seed(0) C = np.array([[0.0, -0.1], [1.7, 0.4]]) component_1 = np.dot(np.random.randn(n_samples, 2), C) # general component_2 = 0.7 * np.random.randn(n_samples, 2) + np.array([-4, 1]) # spherical X = np.concatenate([component_1, component_2]) # %% # We can visualize the different components: import matplotlib.pyplot as plt plt.scatter(component_1[:, 0], component_1[:, 1], s=0.8) plt.scatter(component_2[:, 0], component_2[:, 1], s=0.8) plt.title("Gaussian Mixture components") plt.axis("equal") plt.show() # %% # Model training and selection # ---------------------------- # # We vary the number of components from 1 to 6 and the type of covariance # parameters to use: # # - `"full"`: each component has its own general covariance matrix. # - `"tied"`: all components share the same general covariance matrix. # - `"diag"`: each component has its own diagonal covariance matrix. # - `"spherical"`: each component has its own single variance. # # We score the different models and keep the best model (the lowest BIC). This # is done by using :class:`~sklearn.model_selection.GridSearchCV` and a # user-defined score function which returns the negative BIC score, as # :class:`~sklearn.model_selection.GridSearchCV` is designed to **maximize** a # score (maximizing the negative BIC is equivalent to minimizing the BIC). # # The best set of parameters and estimator are stored in `best_parameters_` and # `best_estimator_`, respectively. from sklearn.mixture import GaussianMixture from sklearn.model_selection import GridSearchCV def gmm_bic_score(estimator, X): """Callable to pass to GridSearchCV that will use the BIC score.""" # Make it negative since GridSearchCV expects a score to maximize return -estimator.bic(X) param_grid = { "n_components": range(1, 7), "covariance_type": ["spherical", "tied", "diag", "full"], } grid_search = GridSearchCV( GaussianMixture(), param_grid=param_grid, scoring=gmm_bic_score ) grid_search.fit(X) # %% # Plot the BIC scores # ------------------- # # To ease the plotting we can create a `pandas.DataFrame` from the results of # the cross-validation done by the grid search. We re-inverse the sign of the # BIC score to show the effect of minimizing it. import pandas as pd df = pd.DataFrame(grid_search.cv_results_)[ ["param_n_components", "param_covariance_type", "mean_test_score"] ] df["mean_test_score"] = -df["mean_test_score"] df = df.rename( columns={ "param_n_components": "Number of components", "param_covariance_type": "Type of covariance", "mean_test_score": "BIC score", } ) df.sort_values(by="BIC score").head() # %% import seaborn as sns sns.catplot( data=df, kind="bar", x="Number of components", y="BIC score", hue="Type of covariance", ) plt.show() # %% # In the present case, the model with 2 components and full covariance (which # corresponds to the true generative model) has the lowest BIC score and is # therefore selected by the grid search. # # Plot the best model # ------------------- # # We plot an ellipse to show each Gaussian component of the selected model. For # such purpose, one needs to find the eigenvalues of the covariance matrices as # returned by the `covariances_` attribute. The shape of such matrices depends # on the `covariance_type`: # # - `"full"`: (`n_components`, `n_features`, `n_features`) # - `"tied"`: (`n_features`, `n_features`) # - `"diag"`: (`n_components`, `n_features`) # - `"spherical"`: (`n_components`,) from matplotlib.patches import Ellipse from scipy import linalg color_iter = sns.color_palette("tab10", 2)[::-1] Y_ = grid_search.predict(X) fig, ax = plt.subplots() for i, (mean, cov, color) in enumerate( zip( grid_search.best_estimator_.means_, grid_search.best_estimator_.covariances_, color_iter, ) ): v, w = linalg.eigh(cov) if not np.any(Y_ == i): continue plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], 0.8, color=color) angle = np.arctan2(w[0][1], w[0][0]) angle = 180.0 * angle / np.pi # convert to degrees v = 2.0 * np.sqrt(2.0) * np.sqrt(v) ellipse = Ellipse(mean, v[0], v[1], angle=180.0 + angle, color=color) ellipse.set_clip_box(fig.bbox) ellipse.set_alpha(0.5) ax.add_artist(ellipse) plt.title( f"Selected GMM: {grid_search.best_params_['covariance_type']} model, " f"{grid_search.best_params_['n_components']} components" ) plt.axis("equal") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/mixture/plot_gmm_sin.py
examples/mixture/plot_gmm_sin.py
""" ================================= Gaussian Mixture Model Sine Curve ================================= This example demonstrates the behavior of Gaussian mixture models fit on data that was not sampled from a mixture of Gaussian random variables. The dataset is formed by 100 points loosely spaced following a noisy sine curve. There is therefore no ground truth value for the number of Gaussian components. The first model is a classical Gaussian Mixture Model with 10 components fit with the Expectation-Maximization algorithm. The second model is a Bayesian Gaussian Mixture Model with a Dirichlet process prior fit with variational inference. The low value of the concentration prior makes the model favor a lower number of active components. This models "decides" to focus its modeling power on the big picture of the structure of the dataset: groups of points with alternating directions modeled by non-diagonal covariance matrices. Those alternating directions roughly capture the alternating nature of the original sine signal. The third model is also a Bayesian Gaussian mixture model with a Dirichlet process prior but this time the value of the concentration prior is higher giving the model more liberty to model the fine-grained structure of the data. The result is a mixture with a larger number of active components that is similar to the first model where we arbitrarily decided to fix the number of components to 10. Which model is the best is a matter of subjective judgment: do we want to favor models that only capture the big picture to summarize and explain most of the structure of the data while ignoring the details or do we prefer models that closely follow the high density regions of the signal? The last two panels show how we can sample from the last two models. The resulting samples distributions do not look exactly like the original data distribution. The difference primarily stems from the approximation error we made by using a model that assumes that the data was generated by a finite number of Gaussian components instead of a continuous noisy sine curve. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import itertools import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np from scipy import linalg from sklearn import mixture color_iter = itertools.cycle(["navy", "c", "cornflowerblue", "gold", "darkorange"]) def plot_results(X, Y, means, covariances, index, title): splot = plt.subplot(5, 1, 1 + index) for i, (mean, covar, color) in enumerate(zip(means, covariances, color_iter)): v, w = linalg.eigh(covar) v = 2.0 * np.sqrt(2.0) * np.sqrt(v) u = w[0] / linalg.norm(w[0]) # as the DP will not use every component it has access to # unless it needs it, we shouldn't plot the redundant # components. if not np.any(Y == i): continue plt.scatter(X[Y == i, 0], X[Y == i, 1], 0.8, color=color) # Plot an ellipse to show the Gaussian component angle = np.arctan(u[1] / u[0]) angle = 180.0 * angle / np.pi # convert to degrees ell = mpl.patches.Ellipse(mean, v[0], v[1], angle=180.0 + angle, color=color) ell.set_clip_box(splot.bbox) ell.set_alpha(0.5) splot.add_artist(ell) plt.xlim(-6.0, 4.0 * np.pi - 6.0) plt.ylim(-5.0, 5.0) plt.title(title) plt.xticks(()) plt.yticks(()) def plot_samples(X, Y, n_components, index, title): plt.subplot(5, 1, 4 + index) for i, color in zip(range(n_components), color_iter): # as the DP will not use every component it has access to # unless it needs it, we shouldn't plot the redundant # components. if not np.any(Y == i): continue plt.scatter(X[Y == i, 0], X[Y == i, 1], 0.8, color=color) plt.xlim(-6.0, 4.0 * np.pi - 6.0) plt.ylim(-5.0, 5.0) plt.title(title) plt.xticks(()) plt.yticks(()) # Parameters n_samples = 100 # Generate random sample following a sine curve np.random.seed(0) X = np.zeros((n_samples, 2)) step = 4.0 * np.pi / n_samples for i in range(X.shape[0]): x = i * step - 6.0 X[i, 0] = x + np.random.normal(0, 0.1) X[i, 1] = 3.0 * (np.sin(x) + np.random.normal(0, 0.2)) plt.figure(figsize=(10, 10)) plt.subplots_adjust( bottom=0.04, top=0.95, hspace=0.2, wspace=0.05, left=0.03, right=0.97 ) # Fit a Gaussian mixture with EM using ten components gmm = mixture.GaussianMixture( n_components=10, covariance_type="full", max_iter=100 ).fit(X) plot_results( X, gmm.predict(X), gmm.means_, gmm.covariances_, 0, "Expectation-maximization" ) dpgmm = mixture.BayesianGaussianMixture( n_components=10, covariance_type="full", weight_concentration_prior=1e-2, weight_concentration_prior_type="dirichlet_process", mean_precision_prior=1e-2, covariance_prior=1e0 * np.eye(2), init_params="random", max_iter=100, random_state=2, ).fit(X) plot_results( X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1, "Bayesian Gaussian mixture models with a Dirichlet process prior " r"for $\gamma_0=0.01$.", ) X_s, y_s = dpgmm.sample(n_samples=2000) plot_samples( X_s, y_s, dpgmm.n_components, 0, "Gaussian mixture with a Dirichlet process prior " r"for $\gamma_0=0.01$ sampled with $2000$ samples.", ) dpgmm = mixture.BayesianGaussianMixture( n_components=10, covariance_type="full", weight_concentration_prior=1e2, weight_concentration_prior_type="dirichlet_process", mean_precision_prior=1e-2, covariance_prior=1e0 * np.eye(2), init_params="kmeans", max_iter=100, random_state=2, ).fit(X) plot_results( X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 2, "Bayesian Gaussian mixture models with a Dirichlet process prior " r"for $\gamma_0=100$", ) X_s, y_s = dpgmm.sample(n_samples=2000) plot_samples( X_s, y_s, dpgmm.n_components, 1, "Gaussian mixture with a Dirichlet process prior " r"for $\gamma_0=100$ sampled with $2000$ samples.", ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/mixture/plot_gmm_covariances.py
examples/mixture/plot_gmm_covariances.py
""" =============== GMM covariances =============== Demonstration of several covariances types for Gaussian mixture models. See :ref:`gmm` for more information on the estimator. Although GMM are often used for clustering, we can compare the obtained clusters with the actual classes from the dataset. We initialize the means of the Gaussians with the means of the classes from the training set to make this comparison valid. We plot predicted labels on both training and held out test data using a variety of GMM covariance types on the iris dataset. We compare GMMs with spherical, diagonal, full, and tied covariance matrices in increasing order of performance. Although one would expect full covariance to perform best in general, it is prone to overfitting on small datasets and does not generalize well to held out test data. On the plots, train data is shown as dots, while test data is shown as crosses. The iris dataset is four-dimensional. Only the first two dimensions are shown here, and thus some points are separated in other dimensions. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np from sklearn import datasets from sklearn.mixture import GaussianMixture from sklearn.model_selection import StratifiedKFold colors = ["navy", "turquoise", "darkorange"] def make_ellipses(gmm, ax): for n, color in enumerate(colors): if gmm.covariance_type == "full": covariances = gmm.covariances_[n][:2, :2] elif gmm.covariance_type == "tied": covariances = gmm.covariances_[:2, :2] elif gmm.covariance_type == "diag": covariances = np.diag(gmm.covariances_[n][:2]) elif gmm.covariance_type == "spherical": covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n] v, w = np.linalg.eigh(covariances) u = w[0] / np.linalg.norm(w[0]) angle = np.arctan2(u[1], u[0]) angle = 180 * angle / np.pi # convert to degrees v = 2.0 * np.sqrt(2.0) * np.sqrt(v) ell = mpl.patches.Ellipse( gmm.means_[n, :2], v[0], v[1], angle=180 + angle, color=color ) ell.set_clip_box(ax.bbox) ell.set_alpha(0.5) ax.add_artist(ell) ax.set_aspect("equal", "datalim") iris = datasets.load_iris() # Break up the dataset into non-overlapping training (75%) and testing # (25%) sets. skf = StratifiedKFold(n_splits=4) # Only take the first fold. train_index, test_index = next(iter(skf.split(iris.data, iris.target))) X_train = iris.data[train_index] y_train = iris.target[train_index] X_test = iris.data[test_index] y_test = iris.target[test_index] n_classes = len(np.unique(y_train)) # Try GMMs using different types of covariances. estimators = { cov_type: GaussianMixture( n_components=n_classes, covariance_type=cov_type, max_iter=20, random_state=0 ) for cov_type in ["spherical", "diag", "tied", "full"] } n_estimators = len(estimators) plt.figure(figsize=(3 * n_estimators // 2, 6)) plt.subplots_adjust( bottom=0.01, top=0.95, hspace=0.15, wspace=0.05, left=0.01, right=0.99 ) for index, (name, estimator) in enumerate(estimators.items()): # Since we have class labels for the training data, we can # initialize the GMM parameters in a supervised manner. estimator.means_init = np.array( [X_train[y_train == i].mean(axis=0) for i in range(n_classes)] ) # Train the other parameters using the EM algorithm. estimator.fit(X_train) h = plt.subplot(2, n_estimators // 2, index + 1) make_ellipses(estimator, h) for n, color in enumerate(colors): data = iris.data[iris.target == n] plt.scatter( data[:, 0], data[:, 1], s=0.8, color=color, label=iris.target_names[n] ) # Plot the test data with crosses for n, color in enumerate(colors): data = X_test[y_test == n] plt.scatter(data[:, 0], data[:, 1], marker="x", color=color) y_train_pred = estimator.predict(X_train) train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100 plt.text(0.05, 0.9, "Train accuracy: %.1f" % train_accuracy, transform=h.transAxes) y_test_pred = estimator.predict(X_test) test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100 plt.text(0.05, 0.8, "Test accuracy: %.1f" % test_accuracy, transform=h.transAxes) plt.xticks(()) plt.yticks(()) plt.title(name) plt.legend(scatterpoints=1, loc="lower right", prop=dict(size=12)) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/miscellaneous/plot_set_output.py
examples/miscellaneous/plot_set_output.py
""" ================================ Introducing the `set_output` API ================================ .. currentmodule:: sklearn This example will demonstrate the `set_output` API to configure transformers to output pandas DataFrames. `set_output` can be configured per estimator by calling the `set_output` method or globally by setting `set_config(transform_output="pandas")`. For details, see `SLEP018 <https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep018/proposal.html>`__. """ # noqa: CPY001 # %% # First, we load the iris dataset as a DataFrame to demonstrate the `set_output` API. from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split X, y = load_iris(as_frame=True, return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) X_train.head() # %% # To configure an estimator such as :class:`preprocessing.StandardScaler` to return # DataFrames, call `set_output`. This feature requires pandas to be installed. from sklearn.preprocessing import StandardScaler scaler = StandardScaler().set_output(transform="pandas") scaler.fit(X_train) X_test_scaled = scaler.transform(X_test) X_test_scaled.head() # %% # `set_output` can be called after `fit` to configure `transform` after the fact. scaler2 = StandardScaler() scaler2.fit(X_train) X_test_np = scaler2.transform(X_test) print(f"Default output type: {type(X_test_np).__name__}") scaler2.set_output(transform="pandas") X_test_df = scaler2.transform(X_test) print(f"Configured pandas output type: {type(X_test_df).__name__}") # %% # In a :class:`pipeline.Pipeline`, `set_output` configures all steps to output # DataFrames. from sklearn.feature_selection import SelectPercentile from sklearn.linear_model import LogisticRegression from sklearn.pipeline import make_pipeline clf = make_pipeline( StandardScaler(), SelectPercentile(percentile=75), LogisticRegression() ) clf.set_output(transform="pandas") clf.fit(X_train, y_train) # %% # Each transformer in the pipeline is configured to return DataFrames. This # means that the final logistic regression step contains the feature names of the input. clf[-1].feature_names_in_ # %% # .. note:: If one uses the method `set_params`, the transformer will be # replaced by a new one with the default output format. clf.set_params(standardscaler=StandardScaler()) clf.fit(X_train, y_train) clf[-1].feature_names_in_ # %% # To keep the intended behavior, use `set_output` on the new transformer # beforehand scaler = StandardScaler().set_output(transform="pandas") clf.set_params(standardscaler=scaler) clf.fit(X_train, y_train) clf[-1].feature_names_in_ # %% # Next we load the titanic dataset to demonstrate `set_output` with # :class:`compose.ColumnTransformer` and heterogeneous data. from sklearn.datasets import fetch_openml X, y = fetch_openml("titanic", version=1, as_frame=True, return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y) # %% # The `set_output` API can be configured globally by using :func:`set_config` and # setting `transform_output` to `"pandas"`. from sklearn import set_config from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.preprocessing import OneHotEncoder, StandardScaler set_config(transform_output="pandas") num_pipe = make_pipeline(SimpleImputer(), StandardScaler()) num_cols = ["age", "fare"] ct = ColumnTransformer( ( ("numerical", num_pipe, num_cols), ( "categorical", OneHotEncoder( sparse_output=False, drop="if_binary", handle_unknown="ignore" ), ["embarked", "sex", "pclass"], ), ), verbose_feature_names_out=False, ) clf = make_pipeline(ct, SelectPercentile(percentile=50), LogisticRegression()) clf.fit(X_train, y_train) clf.score(X_test, y_test) # %% # With the global configuration, all transformers output DataFrames. This allows us to # easily plot the logistic regression coefficients with the corresponding feature names. import pandas as pd log_reg = clf[-1] coef = pd.Series(log_reg.coef_.ravel(), index=log_reg.feature_names_in_) _ = coef.sort_values().plot.barh() # %% # In order to demonstrate the :func:`config_context` functionality below, let # us first reset `transform_output` to its default value. set_config(transform_output="default") # %% # When configuring the output type with :func:`config_context` the # configuration at the time when `transform` or `fit_transform` are # called is what counts. Setting these only when you construct or fit # the transformer has no effect. from sklearn import config_context scaler = StandardScaler() scaler.fit(X_train[num_cols]) # %% with config_context(transform_output="pandas"): # the output of transform will be a Pandas DataFrame X_test_scaled = scaler.transform(X_test[num_cols]) X_test_scaled.head() # %% # outside of the context manager, the output will be a NumPy array X_test_scaled = scaler.transform(X_test[num_cols]) X_test_scaled[:5]
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/miscellaneous/plot_partial_dependence_visualization_api.py
examples/miscellaneous/plot_partial_dependence_visualization_api.py
""" ========================================= Advanced Plotting With Partial Dependence ========================================= The :class:`~sklearn.inspection.PartialDependenceDisplay` object can be used for plotting without needing to recalculate the partial dependence. In this example, we show how to plot partial dependence plots and how to quickly customize the plot with the visualization API. .. note:: See also :ref:`sphx_glr_auto_examples_miscellaneous_plot_roc_curve_visualization_api.py` """ # noqa: E501 # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import pandas as pd from sklearn.datasets import load_diabetes from sklearn.inspection import PartialDependenceDisplay from sklearn.neural_network import MLPRegressor from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeRegressor # %% # Train models on the diabetes dataset # ================================================ # # First, we train a decision tree and a multi-layer perceptron on the diabetes # dataset. diabetes = load_diabetes() X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names) y = diabetes.target tree = DecisionTreeRegressor() mlp = make_pipeline( StandardScaler(), MLPRegressor(hidden_layer_sizes=(100, 100), tol=1e-2, max_iter=500, random_state=0), ) tree.fit(X, y) mlp.fit(X, y) # %% # Plotting partial dependence for two features # ============================================ # # We plot partial dependence curves for features "age" and "bmi" (body mass # index) for the decision tree. With two features, # :func:`~sklearn.inspection.PartialDependenceDisplay.from_estimator` expects to plot # two curves. Here the plot function place a grid of two plots using the space # defined by `ax` . fig, ax = plt.subplots(figsize=(12, 6)) ax.set_title("Decision Tree") tree_disp = PartialDependenceDisplay.from_estimator(tree, X, ["age", "bmi"], ax=ax) # %% # The partial dependence curves can be plotted for the multi-layer perceptron. # In this case, `line_kw` is passed to # :func:`~sklearn.inspection.PartialDependenceDisplay.from_estimator` to change the # color of the curve. fig, ax = plt.subplots(figsize=(12, 6)) ax.set_title("Multi-layer Perceptron") mlp_disp = PartialDependenceDisplay.from_estimator( mlp, X, ["age", "bmi"], ax=ax, line_kw={"color": "red"} ) # %% # Plotting partial dependence of the two models together # ====================================================== # # The `tree_disp` and `mlp_disp` # :class:`~sklearn.inspection.PartialDependenceDisplay` objects contain all the # computed information needed to recreate the partial dependence curves. This # means we can easily create additional plots without needing to recompute the # curves. # # One way to plot the curves is to place them in the same figure, with the # curves of each model on each row. First, we create a figure with two axes # within two rows and one column. The two axes are passed to the # :func:`~sklearn.inspection.PartialDependenceDisplay.plot` functions of # `tree_disp` and `mlp_disp`. The given axes will be used by the plotting # function to draw the partial dependence. The resulting plot places the # decision tree partial dependence curves in the first row of the # multi-layer perceptron in the second row. fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 10)) tree_disp.plot(ax=ax1) ax1.set_title("Decision Tree") mlp_disp.plot(ax=ax2, line_kw={"color": "red"}) ax2.set_title("Multi-layer Perceptron") # %% # Another way to compare the curves is to plot them on top of each other. Here, # we create a figure with one row and two columns. The axes are passed into the # :func:`~sklearn.inspection.PartialDependenceDisplay.plot` function as a list, # which will plot the partial dependence curves of each model on the same axes. # The length of the axes list must be equal to the number of plots drawn. # sphinx_gallery_thumbnail_number = 4 fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 6)) tree_disp.plot(ax=[ax1, ax2], line_kw={"label": "Decision Tree"}) mlp_disp.plot( ax=[ax1, ax2], line_kw={"label": "Multi-layer Perceptron", "color": "red"} ) ax1.legend() ax2.legend() # %% # `tree_disp.axes_` is a numpy array container the axes used to draw the # partial dependence plots. This can be passed to `mlp_disp` to have the same # affect of drawing the plots on top of each other. Furthermore, the # `mlp_disp.figure_` stores the figure, which allows for resizing the figure # after calling `plot`. In this case `tree_disp.axes_` has two dimensions, thus # `plot` will only show the y label and y ticks on the left most plot. tree_disp.plot(line_kw={"label": "Decision Tree"}) mlp_disp.plot( line_kw={"label": "Multi-layer Perceptron", "color": "red"}, ax=tree_disp.axes_ ) tree_disp.figure_.set_size_inches(10, 6) tree_disp.axes_[0, 0].legend() tree_disp.axes_[0, 1].legend() plt.show() # %% # Plotting partial dependence for one feature # =========================================== # # Here, we plot the partial dependence curves for a single feature, "age", on # the same axes. In this case, `tree_disp.axes_` is passed into the second # plot function. tree_disp = PartialDependenceDisplay.from_estimator(tree, X, ["age"]) mlp_disp = PartialDependenceDisplay.from_estimator( mlp, X, ["age"], ax=tree_disp.axes_, line_kw={"color": "red"} )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/miscellaneous/plot_outlier_detection_bench.py
examples/miscellaneous/plot_outlier_detection_bench.py
""" ========================================== Evaluation of outlier detection estimators ========================================== This example compares two outlier detection algorithms, namely :ref:`local_outlier_factor` (LOF) and :ref:`isolation_forest` (IForest), on real-world datasets available in :class:`sklearn.datasets`. The goal is to show that different algorithms perform well on different datasets and contrast their training speed and sensitivity to hyperparameters. The algorithms are trained (without labels) on the whole dataset assumed to contain outliers. 1. The ROC curves are computed using knowledge of the ground-truth labels and displayed using :class:`~sklearn.metrics.RocCurveDisplay`. 2. The performance is assessed in terms of the ROC-AUC. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Dataset preprocessing and model training # ======================================== # # Different outlier detection models require different preprocessing. In the # presence of categorical variables, # :class:`~sklearn.preprocessing.OrdinalEncoder` is often a good strategy for # tree-based models such as :class:`~sklearn.ensemble.IsolationForest`, whereas # neighbors-based models such as :class:`~sklearn.neighbors.LocalOutlierFactor` # would be impacted by the ordering induced by ordinal encoding. To avoid # inducing an ordering, on should rather use # :class:`~sklearn.preprocessing.OneHotEncoder`. # # Neighbors-based models may also require scaling of the numerical features (see # for instance :ref:`neighbors_scaling`). In the presence of outliers, a good # option is to use a :class:`~sklearn.preprocessing.RobustScaler`. from sklearn.compose import ColumnTransformer from sklearn.ensemble import IsolationForest from sklearn.neighbors import LocalOutlierFactor from sklearn.pipeline import make_pipeline from sklearn.preprocessing import ( OneHotEncoder, OrdinalEncoder, RobustScaler, ) def make_estimator(name, categorical_columns=None, iforest_kw=None, lof_kw=None): """Create an outlier detection estimator based on its name.""" if name == "LOF": outlier_detector = LocalOutlierFactor(**(lof_kw or {})) if categorical_columns is None: preprocessor = RobustScaler() else: preprocessor = ColumnTransformer( transformers=[("categorical", OneHotEncoder(), categorical_columns)], remainder=RobustScaler(), ) else: # name == "IForest" outlier_detector = IsolationForest(**(iforest_kw or {})) if categorical_columns is None: preprocessor = None else: ordinal_encoder = OrdinalEncoder( handle_unknown="use_encoded_value", unknown_value=-1 ) preprocessor = ColumnTransformer( transformers=[ ("categorical", ordinal_encoder, categorical_columns), ], remainder="passthrough", ) return make_pipeline(preprocessor, outlier_detector) # %% # The following `fit_predict` function returns the average outlier score of X. from time import perf_counter def fit_predict(estimator, X): tic = perf_counter() if estimator[-1].__class__.__name__ == "LocalOutlierFactor": estimator.fit(X) y_score = estimator[-1].negative_outlier_factor_ else: # "IsolationForest" y_score = estimator.fit(X).decision_function(X) toc = perf_counter() print(f"Duration for {model_name}: {toc - tic:.2f} s") return y_score # %% # On the rest of the example we process one dataset per section. After loading # the data, the targets are modified to consist of two classes: 0 representing # inliers and 1 representing outliers. Due to computational constraints of the # scikit-learn documentation, the sample size of some datasets is reduced using # a stratified :class:`~sklearn.model_selection.train_test_split`. # # Furthermore, we set `n_neighbors` to match the expected number of anomalies # `expected_n_anomalies = n_samples * expected_anomaly_fraction`. This is a good # heuristic as long as the proportion of outliers is not very low, the reason # being that `n_neighbors` should be at least greater than the number of samples # in the less populated cluster (see # :ref:`sphx_glr_auto_examples_neighbors_plot_lof_outlier_detection.py`). # # KDDCup99 - SA dataset # --------------------- # # The :ref:`kddcup99_dataset` was generated using a closed network and # hand-injected attacks. The SA dataset is a subset of it obtained by simply # selecting all the normal data and an anomaly proportion of around 3%. # %% import numpy as np from sklearn.datasets import fetch_kddcup99 from sklearn.model_selection import train_test_split X, y = fetch_kddcup99( subset="SA", percent10=True, random_state=42, return_X_y=True, as_frame=True ) y = (y != b"normal.").astype(np.int32) X, _, y, _ = train_test_split(X, y, train_size=0.1, stratify=y, random_state=42) n_samples, anomaly_frac = X.shape[0], y.mean() print(f"{n_samples} datapoints with {y.sum()} anomalies ({anomaly_frac:.02%})") # %% # The SA dataset contains 41 features out of which 3 are categorical: # "protocol_type", "service" and "flag". # %% y_true = {} y_score = {"LOF": {}, "IForest": {}} model_names = ["LOF", "IForest"] cat_columns = ["protocol_type", "service", "flag"] y_true["KDDCup99 - SA"] = y for model_name in model_names: model = make_estimator( name=model_name, categorical_columns=cat_columns, lof_kw={"n_neighbors": int(n_samples * anomaly_frac)}, iforest_kw={"random_state": 42}, ) y_score[model_name]["KDDCup99 - SA"] = fit_predict(model, X) # %% # Forest covertypes dataset # ------------------------- # # The :ref:`covtype_dataset` is a multiclass dataset where the target is the # dominant species of tree in a given patch of forest. It contains 54 features, # some of which ("Wilderness_Area" and "Soil_Type") are already binary encoded. # Though originally meant as a classification task, one can regard inliers as # samples encoded with label 2 and outliers as those with label 4. # %% from sklearn.datasets import fetch_covtype X, y = fetch_covtype(return_X_y=True, as_frame=True) s = (y == 2) + (y == 4) X = X.loc[s] y = y.loc[s] y = (y != 2).astype(np.int32) X, _, y, _ = train_test_split(X, y, train_size=0.05, stratify=y, random_state=42) X_forestcover = X # save X for later use n_samples, anomaly_frac = X.shape[0], y.mean() print(f"{n_samples} datapoints with {y.sum()} anomalies ({anomaly_frac:.02%})") # %% y_true["forestcover"] = y for model_name in model_names: model = make_estimator( name=model_name, lof_kw={"n_neighbors": int(n_samples * anomaly_frac)}, iforest_kw={"random_state": 42}, ) y_score[model_name]["forestcover"] = fit_predict(model, X) # %% # Ames Housing dataset # -------------------- # # The `Ames housing dataset <http://www.openml.org/d/43926>`_ is originally a # regression dataset where the target are sales prices of houses in Ames, Iowa. # Here we convert it into an outlier detection problem by regarding houses with # price over 70 USD/sqft. To make the problem easier, we drop intermediate # prices between 40 and 70 USD/sqft. # %% import matplotlib.pyplot as plt from sklearn.datasets import fetch_openml X, y = fetch_openml(name="ames_housing", version=1, return_X_y=True, as_frame=True) y = y.div(X["Lot_Area"]) # None values in pandas 1.5.1 were mapped to np.nan in pandas 2.0.1 X["Misc_Feature"] = X["Misc_Feature"].cat.add_categories("NoInfo").fillna("NoInfo") X["Mas_Vnr_Type"] = X["Mas_Vnr_Type"].cat.add_categories("NoInfo").fillna("NoInfo") X.drop(columns="Lot_Area", inplace=True) mask = (y < 40) | (y > 70) X = X.loc[mask] y = y.loc[mask] y.hist(bins=20, edgecolor="black") plt.xlabel("House price in USD/sqft") _ = plt.title("Distribution of house prices in Ames") # %% y = (y > 70).astype(np.int32) n_samples, anomaly_frac = X.shape[0], y.mean() print(f"{n_samples} datapoints with {y.sum()} anomalies ({anomaly_frac:.02%})") # %% # The dataset contains 46 categorical features. In this case it is easier use a # :class:`~sklearn.compose.make_column_selector` to find them instead of passing # a list made by hand. # %% from sklearn.compose import make_column_selector as selector categorical_columns_selector = selector(dtype_include="category") cat_columns = categorical_columns_selector(X) y_true["ames_housing"] = y for model_name in model_names: model = make_estimator( name=model_name, categorical_columns=cat_columns, lof_kw={"n_neighbors": int(n_samples * anomaly_frac)}, iforest_kw={"random_state": 42}, ) y_score[model_name]["ames_housing"] = fit_predict(model, X) # %% # Cardiotocography dataset # ------------------------ # # The `Cardiotocography dataset <http://www.openml.org/d/1466>`_ is a multiclass # dataset of fetal cardiotocograms, the classes being the fetal heart rate (FHR) # pattern encoded with labels from 1 to 10. Here we set class 3 (the minority # class) to represent the outliers. It contains 30 numerical features, some of # which are binary encoded and some are continuous. # %% X, y = fetch_openml(name="cardiotocography", version=1, return_X_y=True, as_frame=False) X_cardiotocography = X # save X for later use s = y == "3" y = s.astype(np.int32) n_samples, anomaly_frac = X.shape[0], y.mean() print(f"{n_samples} datapoints with {y.sum()} anomalies ({anomaly_frac:.02%})") # %% y_true["cardiotocography"] = y for model_name in model_names: model = make_estimator( name=model_name, lof_kw={"n_neighbors": int(n_samples * anomaly_frac)}, iforest_kw={"random_state": 42}, ) y_score[model_name]["cardiotocography"] = fit_predict(model, X) # %% # Plot and interpret results # ========================== # # The algorithm performance relates to how good the true positive rate (TPR) is # at low value of the false positive rate (FPR). The best algorithms have the # curve on the top-left of the plot and the area under curve (AUC) close to 1. # The diagonal dashed line represents a random classification of outliers and # inliers. # %% import math from sklearn.metrics import RocCurveDisplay cols = 2 pos_label = 0 # mean 0 belongs to positive class datasets_names = y_true.keys() rows = math.ceil(len(datasets_names) / cols) fig, axs = plt.subplots(nrows=rows, ncols=cols, squeeze=False, figsize=(10, rows * 4)) for ax, dataset_name in zip(axs.ravel(), datasets_names): for model_idx, model_name in enumerate(model_names): display = RocCurveDisplay.from_predictions( y_true[dataset_name], y_score[model_name][dataset_name], pos_label=pos_label, name=model_name, ax=ax, plot_chance_level=(model_idx == len(model_names) - 1), chance_level_kw={"linestyle": ":"}, ) ax.set_title(dataset_name) _ = plt.tight_layout(pad=2.0) # spacing between subplots # %% # We observe that once the number of neighbors is tuned, LOF and IForest perform # similarly in terms of ROC AUC for the forestcover and cardiotocography # datasets. The score for IForest is slightly better for the SA dataset and LOF # performs considerably better on the Ames housing dataset than IForest. # # Recall however that Isolation Forest tends to train much faster than LOF on # datasets with a large number of samples. LOF needs to compute pairwise # distances to find nearest neighbors, which has a quadratic complexity with respect # to the number of observations. This can make this method prohibitive on large # datasets. # # Ablation study # ============== # # In this section we explore the impact of the hyperparameter `n_neighbors` and # the choice of scaling the numerical variables on the LOF model. Here we use # the :ref:`covtype_dataset` dataset as the binary encoded categories introduce # a natural scale of euclidean distances between 0 and 1. We then want a scaling # method to avoid granting a privilege to non-binary features and that is robust # enough to outliers so that the task of finding them does not become too # difficult. # %% X = X_forestcover y = y_true["forestcover"] n_samples = X.shape[0] n_neighbors_list = (n_samples * np.array([0.2, 0.02, 0.01, 0.001])).astype(np.int32) model = make_pipeline(RobustScaler(), LocalOutlierFactor()) linestyles = ["solid", "dashed", "dashdot", ":", (5, (10, 3))] fig, ax = plt.subplots() for model_idx, (linestyle, n_neighbors) in enumerate(zip(linestyles, n_neighbors_list)): model.set_params(localoutlierfactor__n_neighbors=n_neighbors) model.fit(X) y_score = model[-1].negative_outlier_factor_ display = RocCurveDisplay.from_predictions( y, y_score, pos_label=pos_label, name=f"n_neighbors = {n_neighbors}", ax=ax, plot_chance_level=(model_idx == len(n_neighbors_list) - 1), chance_level_kw={"linestyle": (0, (1, 10))}, curve_kwargs=dict(linestyle=linestyle, linewidth=2), ) _ = ax.set_title("RobustScaler with varying n_neighbors\non forestcover dataset") # %% # We observe that the number of neighbors has a big impact on the performance of # the model. If one has access to (at least some) ground truth labels, it is # then important to tune `n_neighbors` accordingly. A convenient way to do so is # to explore values for `n_neighbors` of the order of magnitud of the expected # contamination. # %% from sklearn.preprocessing import MinMaxScaler, SplineTransformer, StandardScaler preprocessor_list = [ None, RobustScaler(), StandardScaler(), MinMaxScaler(), SplineTransformer(), ] expected_anomaly_fraction = 0.02 lof = LocalOutlierFactor(n_neighbors=int(n_samples * expected_anomaly_fraction)) fig, ax = plt.subplots() for model_idx, (linestyle, preprocessor) in enumerate( zip(linestyles, preprocessor_list) ): model = make_pipeline(preprocessor, lof) model.fit(X) y_score = model[-1].negative_outlier_factor_ display = RocCurveDisplay.from_predictions( y, y_score, pos_label=pos_label, name=str(preprocessor).split("(")[0], ax=ax, plot_chance_level=(model_idx == len(preprocessor_list) - 1), chance_level_kw={"linestyle": (0, (1, 10))}, curve_kwargs=dict(linestyle=linestyle, linewidth=2), ) _ = ax.set_title("Fixed n_neighbors with varying preprocessing\non forestcover dataset") # %% # On the one hand, :class:`~sklearn.preprocessing.RobustScaler` scales each # feature independently by using the interquartile range (IQR) by default, which # is the range between the 25th and 75th percentiles of the data. It centers the # data by subtracting the median and then scale it by dividing by the IQR. The # IQR is robust to outliers: the median and interquartile range are less # affected by extreme values than the range, the mean and the standard # deviation. Furthermore, :class:`~sklearn.preprocessing.RobustScaler` does not # squash marginal outlier values, contrary to # :class:`~sklearn.preprocessing.StandardScaler`. # # On the other hand, :class:`~sklearn.preprocessing.MinMaxScaler` scales each # feature individually such that its range maps into the range between zero and # one. If there are outliers in the data, they can skew it towards either the # minimum or maximum values, leading to a completely different distribution of # data with large marginal outliers: all non-outlier values can be collapsed # almost together as a result. # # We also evaluated no preprocessing at all (by passing `None` to the pipeline), # :class:`~sklearn.preprocessing.StandardScaler` and # :class:`~sklearn.preprocessing.SplineTransformer`. Please refer to their # respective documentation for more details. # # Note that the optimal preprocessing depends on the dataset, as shown below: # %% X = X_cardiotocography y = y_true["cardiotocography"] n_samples, expected_anomaly_fraction = X.shape[0], 0.025 lof = LocalOutlierFactor(n_neighbors=int(n_samples * expected_anomaly_fraction)) fig, ax = plt.subplots() for model_idx, (linestyle, preprocessor) in enumerate( zip(linestyles, preprocessor_list) ): model = make_pipeline(preprocessor, lof) model.fit(X) y_score = model[-1].negative_outlier_factor_ display = RocCurveDisplay.from_predictions( y, y_score, pos_label=pos_label, name=str(preprocessor).split("(")[0], ax=ax, plot_chance_level=(model_idx == len(preprocessor_list) - 1), chance_level_kw={"linestyle": (0, (1, 10))}, curve_kwargs=dict(linestyle=linestyle, linewidth=2), ) ax.set_title( "Fixed n_neighbors with varying preprocessing\non cardiotocography dataset" ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/miscellaneous/plot_metadata_routing.py
examples/miscellaneous/plot_metadata_routing.py
""" ================ Metadata Routing ================ .. currentmodule:: sklearn This document shows how you can use the :ref:`metadata routing mechanism <metadata_routing>` in scikit-learn to route metadata to the estimators, scorers, and CV splitters consuming them. To better understand the following document, we need to introduce two concepts: routers and consumers. A router is an object which forwards some given data and metadata to other objects. In most cases, a router is a :term:`meta-estimator`, i.e. an estimator which takes another estimator as a parameter. A function such as :func:`sklearn.model_selection.cross_validate` which takes an estimator as a parameter and forwards data and metadata, is also a router. A consumer, on the other hand, is an object which accepts and uses some given metadata. For instance, an estimator taking into account ``sample_weight`` in its :term:`fit` method is a consumer of ``sample_weight``. It is possible for an object to be both a router and a consumer. For instance, a meta-estimator may take into account ``sample_weight`` in certain calculations, but it may also route it to the underlying estimator. First a few imports and some random data for the rest of the script. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% import warnings from pprint import pprint import numpy as np from sklearn import set_config from sklearn.base import ( BaseEstimator, ClassifierMixin, MetaEstimatorMixin, RegressorMixin, TransformerMixin, clone, ) from sklearn.linear_model import LinearRegression from sklearn.utils import metadata_routing from sklearn.utils.metadata_routing import ( MetadataRouter, MethodMapping, get_routing_for_object, process_routing, ) from sklearn.utils.validation import check_is_fitted n_samples, n_features = 100, 4 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) y = rng.randint(0, 2, size=n_samples) my_groups = rng.randint(0, 10, size=n_samples) my_weights = rng.rand(n_samples) my_other_weights = rng.rand(n_samples) # %% # Metadata routing is only available if explicitly enabled: set_config(enable_metadata_routing=True) # %% # This utility function is a dummy to check if a metadata is passed: def check_metadata(obj, **kwargs): for key, value in kwargs.items(): if value is not None: print( f"Received {key} of length = {len(value)} in {obj.__class__.__name__}." ) else: print(f"{key} is None in {obj.__class__.__name__}.") # %% # A utility function to nicely print the routing information of an object: def print_routing(obj): pprint(obj.get_metadata_routing()._serialize()) # %% # Consuming Estimator # ------------------- # Here we demonstrate how an estimator can expose the required API to support # metadata routing as a consumer. Imagine a simple classifier accepting # ``sample_weight`` as a metadata on its ``fit`` and ``groups`` in its # ``predict`` method: class ExampleClassifier(ClassifierMixin, BaseEstimator): def fit(self, X, y, sample_weight=None): check_metadata(self, sample_weight=sample_weight) # all classifiers need to expose a classes_ attribute once they're fit. self.classes_ = np.array([0, 1]) return self def predict(self, X, groups=None): check_metadata(self, groups=groups) # return a constant value of 1, not a very smart classifier! return np.ones(len(X)) # %% # The above estimator now has all it needs to consume metadata. This is # accomplished by some magic done in :class:`~base.BaseEstimator`. There are # now three methods exposed by the above class: ``set_fit_request``, # ``set_predict_request``, and ``get_metadata_routing``. There is also a # ``set_score_request`` for ``sample_weight`` which is present since # :class:`~base.ClassifierMixin` implements a ``score`` method accepting # ``sample_weight``. The same applies to regressors which inherit from # :class:`~base.RegressorMixin`. # # By default, no metadata is requested, which we can see as: print_routing(ExampleClassifier()) # %% # The above output means that ``sample_weight`` and ``groups`` are not # requested by `ExampleClassifier`, and if a router is given those metadata, it # should raise an error, since the user has not explicitly set whether they are # required or not. The same is true for ``sample_weight`` in the ``score`` # method, which is inherited from :class:`~base.ClassifierMixin`. In order to # explicitly set request values for those metadata, we can use these methods: est = ( ExampleClassifier() .set_fit_request(sample_weight=False) .set_predict_request(groups=True) .set_score_request(sample_weight=False) ) print_routing(est) # %% # .. note :: # Please note that as long as the above estimator is not used in a # meta-estimator, the user does not need to set any requests for the # metadata and the set values are ignored, since a consumer does not # validate or route given metadata. A simple usage of the above estimator # would work as expected. est = ExampleClassifier() est.fit(X, y, sample_weight=my_weights) est.predict(X[:3, :], groups=my_groups) # %% # Routing Meta-Estimator # ---------------------- # Now, we show how to design a meta-estimator to be a router. As a simplified # example, here is a meta-estimator, which doesn't do much other than routing # the metadata. class MetaClassifier(MetaEstimatorMixin, ClassifierMixin, BaseEstimator): def __init__(self, estimator): self.estimator = estimator def get_metadata_routing(self): # This method defines the routing for this meta-estimator. # In order to do so, a `MetadataRouter` instance is created, and the # routing is added to it. More explanations follow below. router = MetadataRouter(owner=self).add( estimator=self.estimator, method_mapping=MethodMapping() .add(caller="fit", callee="fit") .add(caller="predict", callee="predict") .add(caller="score", callee="score"), ) return router def fit(self, X, y, **fit_params): # `get_routing_for_object` returns a copy of the `MetadataRouter` # constructed by the above `get_metadata_routing` method, that is # internally called. request_router = get_routing_for_object(self) # Meta-estimators are responsible for validating the given metadata. # `method` refers to the parent's method, i.e. `fit` in this example. request_router.validate_metadata(params=fit_params, method="fit") # `MetadataRouter.route_params` maps the given metadata to the metadata # required by the underlying estimator based on the routing information # defined by the MetadataRouter. The output of type `Bunch` has a key # for each consuming object and those hold keys for their consuming # methods, which then contain key for the metadata which should be # routed to them. routed_params = request_router.route_params(params=fit_params, caller="fit") # A sub-estimator is fitted and its classes are attributed to the # meta-estimator. self.estimator_ = clone(self.estimator).fit(X, y, **routed_params.estimator.fit) self.classes_ = self.estimator_.classes_ return self def predict(self, X, **predict_params): check_is_fitted(self) # As in `fit`, we get a copy of the object's MetadataRouter, request_router = get_routing_for_object(self) # then we validate the given metadata, request_router.validate_metadata(params=predict_params, method="predict") # and then prepare the input to the underlying `predict` method. routed_params = request_router.route_params( params=predict_params, caller="predict" ) return self.estimator_.predict(X, **routed_params.estimator.predict) # %% # Let's break down different parts of the above code. # # First, the :meth:`~utils.metadata_routing.get_routing_for_object` takes our # meta-estimator (``self``) and returns a # :class:`~utils.metadata_routing.MetadataRouter` or, a # :class:`~utils.metadata_routing.MetadataRequest` if the object is a consumer, # based on the output of the estimator's ``get_metadata_routing`` method. # # Then in each method, we use the ``route_params`` method to construct a # dictionary of the form ``{"object_name": {"method_name": {"metadata": # value}}}`` to pass to the underlying estimator's method. The ``object_name`` # (``estimator`` in the above ``routed_params.estimator.fit`` example) is the # same as the one added in the ``get_metadata_routing``. ``validate_metadata`` # makes sure all given metadata are requested to avoid silent bugs. # # Next, we illustrate the different behaviors and notably the type of errors # raised. meta_est = MetaClassifier( estimator=ExampleClassifier().set_fit_request(sample_weight=True) ) meta_est.fit(X, y, sample_weight=my_weights) # %% # Note that the above example is calling our utility function # `check_metadata()` via the `ExampleClassifier`. It checks that # ``sample_weight`` is correctly passed to it. If it is not, like in the # following example, it would print that ``sample_weight`` is ``None``: meta_est.fit(X, y) # %% # If we pass an unknown metadata, an error is raised: try: meta_est.fit(X, y, test=my_weights) except TypeError as e: print(e) # %% # And if we pass a metadata which is not explicitly requested: try: meta_est.fit(X, y, sample_weight=my_weights).predict(X, groups=my_groups) except ValueError as e: print(e) # %% # Also, if we explicitly set it as not requested, but it is provided: meta_est = MetaClassifier( estimator=ExampleClassifier() .set_fit_request(sample_weight=True) .set_predict_request(groups=False) ) try: meta_est.fit(X, y, sample_weight=my_weights).predict(X[:3, :], groups=my_groups) except TypeError as e: print(e) # %% # Another concept to introduce is **aliased metadata**. This is when an # estimator requests a metadata with a different variable name than the default # variable name. For instance, in a setting where there are two estimators in a # pipeline, one could request ``sample_weight1`` and the other # ``sample_weight2``. Note that this doesn't change what the estimator expects, # it only tells the meta-estimator how to map the provided metadata to what is # required. Here's an example, where we pass ``aliased_sample_weight`` to the # meta-estimator, but the meta-estimator understands that # ``aliased_sample_weight`` is an alias for ``sample_weight``, and passes it as # ``sample_weight`` to the underlying estimator: meta_est = MetaClassifier( estimator=ExampleClassifier().set_fit_request(sample_weight="aliased_sample_weight") ) meta_est.fit(X, y, aliased_sample_weight=my_weights) # %% # Passing ``sample_weight`` here will fail since it is requested with an # alias and ``sample_weight`` with that name is not requested: try: meta_est.fit(X, y, sample_weight=my_weights) except TypeError as e: print(e) # %% # This leads us to the ``get_metadata_routing``. The way routing works in # scikit-learn is that consumers request what they need, and routers pass that # along. Additionally, a router exposes what it requires itself so that it can # be used inside another router, e.g. a pipeline inside a grid search object. # The output of the ``get_metadata_routing`` which is a dictionary # representation of a :class:`~utils.metadata_routing.MetadataRouter`, includes # the complete tree of requested metadata by all nested objects and their # corresponding method routings, i.e. which method of a sub-estimator is used # in which method of a meta-estimator: print_routing(meta_est) # %% # As you can see, the only metadata requested for method ``fit`` is # ``"sample_weight"`` with ``"aliased_sample_weight"`` as the alias. The # ``~utils.metadata_routing.MetadataRouter`` class enables us to easily create # the routing object which would create the output we need for our # ``get_metadata_routing``. # # In order to understand how aliases work in meta-estimators, imagine our # meta-estimator inside another one: meta_meta_est = MetaClassifier(estimator=meta_est).fit( X, y, aliased_sample_weight=my_weights ) # %% # In the above example, this is how the ``fit`` method of `meta_meta_est` # will call their sub-estimator's ``fit`` methods:: # # # user feeds `my_weights` as `aliased_sample_weight` into `meta_meta_est`: # meta_meta_est.fit(X, y, aliased_sample_weight=my_weights): # ... # # # the first sub-estimator (`meta_est`) expects `aliased_sample_weight` # self.estimator_.fit(X, y, aliased_sample_weight=aliased_sample_weight): # ... # # # the second sub-estimator (`est`) expects `sample_weight` # self.estimator_.fit(X, y, sample_weight=aliased_sample_weight): # ... # %% # Consuming and routing Meta-Estimator # ------------------------------------ # For a slightly more complex example, consider a meta-estimator that routes # metadata to an underlying estimator as before, but it also uses some metadata # in its own methods. This meta-estimator is a consumer and a router at the # same time. Implementing one is very similar to what we had before, but with a # few tweaks. class RouterConsumerClassifier(MetaEstimatorMixin, ClassifierMixin, BaseEstimator): def __init__(self, estimator): self.estimator = estimator def get_metadata_routing(self): router = ( MetadataRouter(owner=self) # defining metadata routing request values for usage in the meta-estimator .add_self_request(self) # defining metadata routing request values for usage in the sub-estimator .add( estimator=self.estimator, method_mapping=MethodMapping() .add(caller="fit", callee="fit") .add(caller="predict", callee="predict") .add(caller="score", callee="score"), ) ) return router # Since `sample_weight` is used and consumed here, it should be defined as # an explicit argument in the method's signature. All other metadata which # are only routed, will be passed as `**fit_params`: def fit(self, X, y, sample_weight, **fit_params): if self.estimator is None: raise ValueError("estimator cannot be None!") check_metadata(self, sample_weight=sample_weight) # We add `sample_weight` to the `fit_params` dictionary. if sample_weight is not None: fit_params["sample_weight"] = sample_weight request_router = get_routing_for_object(self) request_router.validate_metadata(params=fit_params, method="fit") routed_params = request_router.route_params(params=fit_params, caller="fit") self.estimator_ = clone(self.estimator).fit(X, y, **routed_params.estimator.fit) self.classes_ = self.estimator_.classes_ return self def predict(self, X, **predict_params): check_is_fitted(self) # As in `fit`, we get a copy of the object's MetadataRouter, request_router = get_routing_for_object(self) # we validate the given metadata, request_router.validate_metadata(params=predict_params, method="predict") # and then prepare the input to the underlying ``predict`` method. routed_params = request_router.route_params( params=predict_params, caller="predict" ) return self.estimator_.predict(X, **routed_params.estimator.predict) # %% # The key parts where the above meta-estimator differs from our previous # meta-estimator is accepting ``sample_weight`` explicitly in ``fit`` and # including it in ``fit_params``. Since ``sample_weight`` is an explicit # argument, we can be sure that ``set_fit_request(sample_weight=...)`` is # present for this method. The meta-estimator is both a consumer, as well as a # router of ``sample_weight``. # # In ``get_metadata_routing``, we add ``self`` to the routing using # ``add_self_request`` to indicate this estimator is consuming # ``sample_weight`` as well as being a router; which also adds a # ``$self_request`` key to the routing info as illustrated below. Now let's # look at some examples: # %% # - No metadata requested meta_est = RouterConsumerClassifier(estimator=ExampleClassifier()) print_routing(meta_est) # %% # - ``sample_weight`` requested by sub-estimator meta_est = RouterConsumerClassifier( estimator=ExampleClassifier().set_fit_request(sample_weight=True) ) print_routing(meta_est) # %% # - ``sample_weight`` requested by meta-estimator meta_est = RouterConsumerClassifier(estimator=ExampleClassifier()).set_fit_request( sample_weight=True ) print_routing(meta_est) # %% # Note the difference in the requested metadata representations above. # # - We can also alias the metadata to pass different values to the fit methods # of the meta- and the sub-estimator: meta_est = RouterConsumerClassifier( estimator=ExampleClassifier().set_fit_request(sample_weight="clf_sample_weight"), ).set_fit_request(sample_weight="meta_clf_sample_weight") print_routing(meta_est) # %% # However, ``fit`` of the meta-estimator only needs the alias for the # sub-estimator and addresses their own sample weight as `sample_weight`, since # it doesn't validate and route its own required metadata: meta_est.fit(X, y, sample_weight=my_weights, clf_sample_weight=my_other_weights) # %% # - Alias only on the sub-estimator: # # This is useful when we don't want the meta-estimator to use the metadata, but # the sub-estimator should. meta_est = RouterConsumerClassifier( estimator=ExampleClassifier().set_fit_request(sample_weight="aliased_sample_weight") ) print_routing(meta_est) # %% # The meta-estimator cannot use `aliased_sample_weight`, because it expects # it passed as `sample_weight`. This would apply even if # `set_fit_request(sample_weight=True)` was set on it. # %% # Simple Pipeline # --------------- # A slightly more complicated use-case is a meta-estimator resembling a # :class:`~pipeline.Pipeline`. Here is a meta-estimator, which accepts a # transformer and a classifier. When calling its `fit` method, it applies the # transformer's `fit` and `transform` before running the classifier on the # transformed data. Upon `predict`, it applies the transformer's `transform` # before predicting with the classifier's `predict` method on the transformed # new data. class SimplePipeline(ClassifierMixin, BaseEstimator): def __init__(self, transformer, classifier): self.transformer = transformer self.classifier = classifier def get_metadata_routing(self): router = ( MetadataRouter(owner=self) # We add the routing for the transformer. .add( transformer=self.transformer, method_mapping=MethodMapping() # The metadata is routed such that it retraces how # `SimplePipeline` internally calls the transformer's `fit` and # `transform` methods in its own methods (`fit` and `predict`). .add(caller="fit", callee="fit") .add(caller="fit", callee="transform") .add(caller="predict", callee="transform"), ) # We add the routing for the classifier. .add( classifier=self.classifier, method_mapping=MethodMapping() .add(caller="fit", callee="fit") .add(caller="predict", callee="predict"), ) ) return router def fit(self, X, y, **fit_params): routed_params = process_routing(self, "fit", **fit_params) self.transformer_ = clone(self.transformer).fit( X, y, **routed_params.transformer.fit ) X_transformed = self.transformer_.transform( X, **routed_params.transformer.transform ) self.classifier_ = clone(self.classifier).fit( X_transformed, y, **routed_params.classifier.fit ) return self def predict(self, X, **predict_params): routed_params = process_routing(self, "predict", **predict_params) X_transformed = self.transformer_.transform( X, **routed_params.transformer.transform ) return self.classifier_.predict( X_transformed, **routed_params.classifier.predict ) # %% # Note the usage of :class:`~utils.metadata_routing.MethodMapping` to # declare which methods of the child estimator (callee) are used in which # methods of the meta estimator (caller). As you can see, `SimplePipeline` uses # the transformer's ``transform`` and ``fit`` methods in ``fit``, and its # ``transform`` method in ``predict``, and that's what you see implemented in # the routing structure of the pipeline class. # # Another difference in the above example with the previous ones is the usage # of :func:`~utils.metadata_routing.process_routing`, which processes the input # parameters, does the required validation, and returns the `routed_params` # which we had created in previous examples. This reduces the boilerplate code # a developer needs to write in each meta-estimator's method. Developers are # strongly recommended to use this function unless there is a good reason # against it. # # In order to test the above pipeline, let's add an example transformer. class ExampleTransformer(TransformerMixin, BaseEstimator): def fit(self, X, y, sample_weight=None): check_metadata(self, sample_weight=sample_weight) return self def transform(self, X, groups=None): check_metadata(self, groups=groups) return X def fit_transform(self, X, y, sample_weight=None, groups=None): return self.fit(X, y, sample_weight).transform(X, groups) # %% # Note that in the above example, we have implemented ``fit_transform`` which # calls ``fit`` and ``transform`` with the appropriate metadata. This is only # required if ``transform`` accepts metadata, since the default ``fit_transform`` # implementation in :class:`~base.TransformerMixin` doesn't pass metadata to # ``transform``. # # Now we can test our pipeline, and see if metadata is correctly passed around. # This example uses our `SimplePipeline`, our `ExampleTransformer`, and our # `RouterConsumerClassifier` which uses our `ExampleClassifier`. pipe = SimplePipeline( transformer=ExampleTransformer() # we set transformer's fit to receive sample_weight .set_fit_request(sample_weight=True) # we set transformer's transform to receive groups .set_transform_request(groups=True), classifier=RouterConsumerClassifier( estimator=ExampleClassifier() # we want this sub-estimator to receive sample_weight in fit .set_fit_request(sample_weight=True) # but not groups in predict .set_predict_request(groups=False), ) # and we want the meta-estimator to receive sample_weight as well .set_fit_request(sample_weight=True), ) pipe.fit(X, y, sample_weight=my_weights, groups=my_groups).predict( X[:3], groups=my_groups ) # %% # Deprecation / Default Value Change # ---------------------------------- # In this section we show how one should handle the case where a router becomes # also a consumer, especially when it consumes the same metadata as its # sub-estimator, or a consumer starts consuming a metadata which it wasn't in # an older release. In this case, a warning should be raised for a while, to # let users know the behavior is changed from previous versions. class MetaRegressor(MetaEstimatorMixin, RegressorMixin, BaseEstimator): def __init__(self, estimator): self.estimator = estimator def fit(self, X, y, **fit_params): routed_params = process_routing(self, "fit", **fit_params) self.estimator_ = clone(self.estimator).fit(X, y, **routed_params.estimator.fit) def get_metadata_routing(self): router = MetadataRouter(owner=self).add( estimator=self.estimator, method_mapping=MethodMapping().add(caller="fit", callee="fit"), ) return router # %% # As explained above, this is a valid usage if `my_weights` aren't supposed # to be passed as `sample_weight` to `MetaRegressor`: reg = MetaRegressor(estimator=LinearRegression().set_fit_request(sample_weight=True)) reg.fit(X, y, sample_weight=my_weights) # %% # Now imagine we further develop ``MetaRegressor`` and it now also *consumes* # ``sample_weight``: class WeightedMetaRegressor(MetaEstimatorMixin, RegressorMixin, BaseEstimator): # show warning to remind user to explicitly set the value with # `.set_{method}_request(sample_weight={boolean})` __metadata_request__fit = {"sample_weight": metadata_routing.WARN} def __init__(self, estimator): self.estimator = estimator def fit(self, X, y, sample_weight=None, **fit_params): routed_params = process_routing( self, "fit", sample_weight=sample_weight, **fit_params ) check_metadata(self, sample_weight=sample_weight) self.estimator_ = clone(self.estimator).fit(X, y, **routed_params.estimator.fit) def get_metadata_routing(self): router = ( MetadataRouter(owner=self) .add_self_request(self) .add( estimator=self.estimator, method_mapping=MethodMapping().add(caller="fit", callee="fit"), ) ) return router # %% # The above implementation is almost the same as ``MetaRegressor``, and # because of the default request value defined in ``__metadata_request__fit`` # there is a warning raised when fitted. with warnings.catch_warnings(record=True) as record: WeightedMetaRegressor( estimator=LinearRegression().set_fit_request(sample_weight=False) ).fit(X, y, sample_weight=my_weights) for w in record: print(w.message) # %% # When an estimator consumes a metadata which it didn't consume before, the # following pattern can be used to warn the users about it. class ExampleRegressor(RegressorMixin, BaseEstimator): __metadata_request__fit = {"sample_weight": metadata_routing.WARN} def fit(self, X, y, sample_weight=None): check_metadata(self, sample_weight=sample_weight) return self def predict(self, X): return np.zeros(shape=(len(X))) with warnings.catch_warnings(record=True) as record: MetaRegressor(estimator=ExampleRegressor()).fit(X, y, sample_weight=my_weights) for w in record: print(w.message) # %% # At the end we disable the configuration flag for metadata routing: set_config(enable_metadata_routing=False) # %% # Third Party Development and scikit-learn Dependency # --------------------------------------------------- # # As seen above, information is communicated between classes using # :class:`~utils.metadata_routing.MetadataRequest` and # :class:`~utils.metadata_routing.MetadataRouter`. It is strongly not advised, # but possible to vendor the tools related to metadata-routing if you strictly # want to have a scikit-learn compatible estimator, without depending on the # scikit-learn package. If all of the following conditions are met, you do NOT # need to modify your code at all: # # - your estimator inherits from :class:`~base.BaseEstimator` # - the parameters consumed by your estimator's methods, e.g. ``fit``, are # explicitly defined in the method's signature, as opposed to being # ``*args`` or ``*kwargs``. # - your estimator does not route any metadata to the underlying objects, i.e. # it's not a *router*.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/miscellaneous/plot_multilabel.py
examples/miscellaneous/plot_multilabel.py
""" ========================= Multilabel classification ========================= This example simulates a multi-label document classification problem. The dataset is generated randomly based on the following process: - pick the number of labels: n ~ Poisson(n_labels) - n times, choose a class c: c ~ Multinomial(theta) - pick the document length: k ~ Poisson(length) - k times, choose a word: w ~ Multinomial(theta_c) In the above process, rejection sampling is used to make sure that n is more than 2, and that the document length is never zero. Likewise, we reject classes which have already been chosen. The documents that are assigned to both classes are plotted surrounded by two colored circles. The classification is performed by projecting to the first two principal components found by PCA and CCA for visualisation purposes, followed by using the :class:`~sklearn.multiclass.OneVsRestClassifier` metaclassifier using two SVCs with linear kernels to learn a discriminative model for each class. Note that PCA is used to perform an unsupervised dimensionality reduction, while CCA is used to perform a supervised one. Note: in the plot, "unlabeled samples" does not mean that we don't know the labels (as in semi-supervised learning) but that the samples simply do *not* have a label. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.cross_decomposition import CCA from sklearn.datasets import make_multilabel_classification from sklearn.decomposition import PCA from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC def plot_hyperplane(clf, min_x, max_x, linestyle, label): # get the separating hyperplane w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough yy = a * xx - (clf.intercept_[0]) / w[1] plt.plot(xx, yy, linestyle, label=label) def plot_subfigure(X, Y, subplot, title, transform): if transform == "pca": X = PCA(n_components=2).fit_transform(X) elif transform == "cca": X = CCA(n_components=2).fit(X, Y).transform(X) else: raise ValueError min_x = np.min(X[:, 0]) max_x = np.max(X[:, 0]) min_y = np.min(X[:, 1]) max_y = np.max(X[:, 1]) classif = OneVsRestClassifier(SVC(kernel="linear")) classif.fit(X, Y) plt.subplot(2, 2, subplot) plt.title(title) zero_class = (Y[:, 0]).nonzero() one_class = (Y[:, 1]).nonzero() plt.scatter(X[:, 0], X[:, 1], s=40, c="gray", edgecolors=(0, 0, 0)) plt.scatter( X[zero_class, 0], X[zero_class, 1], s=160, edgecolors="b", facecolors="none", linewidths=2, label="Class 1", ) plt.scatter( X[one_class, 0], X[one_class, 1], s=80, edgecolors="orange", facecolors="none", linewidths=2, label="Class 2", ) plot_hyperplane( classif.estimators_[0], min_x, max_x, "k--", "Boundary\nfor class 1" ) plot_hyperplane( classif.estimators_[1], min_x, max_x, "k-.", "Boundary\nfor class 2" ) plt.xticks(()) plt.yticks(()) plt.xlim(min_x - 0.5 * max_x, max_x + 0.5 * max_x) plt.ylim(min_y - 0.5 * max_y, max_y + 0.5 * max_y) if subplot == 2: plt.xlabel("First principal component") plt.ylabel("Second principal component") plt.legend(loc="upper left") plt.figure(figsize=(8, 6)) X, Y = make_multilabel_classification( n_classes=2, n_labels=1, allow_unlabeled=True, random_state=1 ) plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca") plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca") X, Y = make_multilabel_classification( n_classes=2, n_labels=1, allow_unlabeled=False, random_state=1 ) plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca") plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca") plt.subplots_adjust(0.04, 0.02, 0.97, 0.94, 0.09, 0.2) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/miscellaneous/plot_roc_curve_visualization_api.py
examples/miscellaneous/plot_roc_curve_visualization_api.py
""" ================================ ROC Curve with Visualization API ================================ Scikit-learn defines a simple API for creating visualizations for machine learning. The key features of this API is to allow for quick plotting and visual adjustments without recalculation. In this example, we will demonstrate how to use the visualization API by comparing ROC curves. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Load Data and Train an SVC # -------------------------- # First, we load the wine dataset and convert it to a binary classification # problem. Then, we train a support vector classifier on a training dataset. import matplotlib.pyplot as plt from sklearn.datasets import load_wine from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import RocCurveDisplay from sklearn.model_selection import train_test_split from sklearn.svm import SVC X, y = load_wine(return_X_y=True) y = y == 2 X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) svc = SVC(random_state=42) svc.fit(X_train, y_train) # %% # Plotting the ROC Curve # ---------------------- # Next, we plot the ROC curve with a single call to # :func:`sklearn.metrics.RocCurveDisplay.from_estimator`. The returned # `svc_disp` object allows us to continue using the already computed ROC curve # for the SVC in future plots. svc_disp = RocCurveDisplay.from_estimator(svc, X_test, y_test) plt.show() # %% # Training a Random Forest and Plotting the ROC Curve # --------------------------------------------------- # We train a random forest classifier and create a plot comparing it to the SVC # ROC curve. Notice how `svc_disp` uses # :func:`~sklearn.metrics.RocCurveDisplay.plot` to plot the SVC ROC curve # without recomputing the values of the roc curve itself. Furthermore, we # pass `alpha=0.8` to the plot functions to adjust the alpha values of the # curves. rfc = RandomForestClassifier(n_estimators=10, random_state=42) rfc.fit(X_train, y_train) ax = plt.gca() rfc_disp = RocCurveDisplay.from_estimator( rfc, X_test, y_test, ax=ax, curve_kwargs=dict(alpha=0.8) ) svc_disp.plot(ax=ax, curve_kwargs=dict(alpha=0.8)) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/miscellaneous/plot_multioutput_face_completion.py
examples/miscellaneous/plot_multioutput_face_completion.py
""" ============================================== Face completion with a multi-output estimators ============================================== This example shows the use of multi-output estimator to complete images. The goal is to predict the lower half of a face given its upper half. The first column of images shows true faces. The next columns illustrate how extremely randomized trees, k nearest neighbors, linear regression and ridge regression complete the lower half of those faces. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import fetch_olivetti_faces from sklearn.ensemble import ExtraTreesRegressor from sklearn.linear_model import LinearRegression, RidgeCV from sklearn.neighbors import KNeighborsRegressor from sklearn.utils.validation import check_random_state # Load the faces datasets data, targets = fetch_olivetti_faces(return_X_y=True) train = data[targets < 30] test = data[targets >= 30] # Test on independent people # Test on a subset of people n_faces = 5 rng = check_random_state(4) face_ids = rng.randint(test.shape[0], size=(n_faces,)) test = test[face_ids, :] n_pixels = data.shape[1] # Upper half of the faces X_train = train[:, : (n_pixels + 1) // 2] # Lower half of the faces y_train = train[:, n_pixels // 2 :] X_test = test[:, : (n_pixels + 1) // 2] y_test = test[:, n_pixels // 2 :] # Fit estimators ESTIMATORS = { "Extra trees": ExtraTreesRegressor( n_estimators=10, max_features=32, random_state=0 ), "K-nn": KNeighborsRegressor(), "Linear regression": LinearRegression(), "Ridge": RidgeCV(), } y_test_predict = dict() for name, estimator in ESTIMATORS.items(): estimator.fit(X_train, y_train) y_test_predict[name] = estimator.predict(X_test) # Plot the completed faces image_shape = (64, 64) n_cols = 1 + len(ESTIMATORS) plt.figure(figsize=(2.0 * n_cols, 2.26 * n_faces)) plt.suptitle("Face completion with multi-output estimators", size=16) for i in range(n_faces): true_face = np.hstack((X_test[i], y_test[i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 1, title="true faces") sub.axis("off") sub.imshow( true_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest" ) for j, est in enumerate(sorted(ESTIMATORS)): completed_face = np.hstack((X_test[i], y_test_predict[est][i])) if i: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j) else: sub = plt.subplot(n_faces, n_cols, i * n_cols + 2 + j, title=est) sub.axis("off") sub.imshow( completed_face.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest", ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/miscellaneous/plot_anomaly_comparison.py
examples/miscellaneous/plot_anomaly_comparison.py
""" ============================================================================ Comparing anomaly detection algorithms for outlier detection on toy datasets ============================================================================ This example shows characteristics of different anomaly detection algorithms on 2D datasets. Datasets contain one or two modes (regions of high density) to illustrate the ability of algorithms to cope with multimodal data. For each dataset, 15% of samples are generated as random uniform noise. This proportion is the value given to the nu parameter of the OneClassSVM and the contamination parameter of the other outlier detection algorithms. Decision boundaries between inliers and outliers are displayed in black except for Local Outlier Factor (LOF) as it has no predict method to be applied on new data when it is used for outlier detection. The :class:`~sklearn.svm.OneClassSVM` is known to be sensitive to outliers and thus does not perform very well for outlier detection. This estimator is best suited for novelty detection when the training set is not contaminated by outliers. That said, outlier detection in high-dimension, or without any assumptions on the distribution of the inlying data is very challenging, and a One-class SVM might give useful results in these situations depending on the value of its hyperparameters. The :class:`sklearn.linear_model.SGDOneClassSVM` is an implementation of the One-Class SVM based on stochastic gradient descent (SGD). Combined with kernel approximation, this estimator can be used to approximate the solution of a kernelized :class:`sklearn.svm.OneClassSVM`. We note that, although not identical, the decision boundaries of the :class:`sklearn.linear_model.SGDOneClassSVM` and the ones of :class:`sklearn.svm.OneClassSVM` are very similar. The main advantage of using :class:`sklearn.linear_model.SGDOneClassSVM` is that it scales linearly with the number of samples. :class:`sklearn.covariance.EllipticEnvelope` assumes the data is Gaussian and learns an ellipse. It thus degrades when the data is not unimodal. Notice however that this estimator is robust to outliers. :class:`~sklearn.ensemble.IsolationForest` and :class:`~sklearn.neighbors.LocalOutlierFactor` seem to perform reasonably well for multi-modal data sets. The advantage of :class:`~sklearn.neighbors.LocalOutlierFactor` over the other estimators is shown for the third data set, where the two modes have different densities. This advantage is explained by the local aspect of LOF, meaning that it only compares the score of abnormality of one sample with the scores of its neighbors. Finally, for the last data set, it is hard to say that one sample is more abnormal than another sample as they are uniformly distributed in a hypercube. Except for the :class:`~sklearn.svm.OneClassSVM` which overfits a little, all estimators present decent solutions for this situation. In such a case, it would be wise to look more closely at the scores of abnormality of the samples as a good estimator should assign similar scores to all the samples. While these examples give some intuition about the algorithms, this intuition might not apply to very high dimensional data. Finally, note that parameters of the models have been here handpicked but that in practice they need to be adjusted. In the absence of labelled data, the problem is completely unsupervised so model selection can be a challenge. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import time import matplotlib import matplotlib.pyplot as plt import numpy as np from sklearn import svm from sklearn.covariance import EllipticEnvelope from sklearn.datasets import make_blobs, make_moons from sklearn.ensemble import IsolationForest from sklearn.kernel_approximation import Nystroem from sklearn.linear_model import SGDOneClassSVM from sklearn.neighbors import LocalOutlierFactor from sklearn.pipeline import make_pipeline matplotlib.rcParams["contour.negative_linestyle"] = "solid" # Example settings n_samples = 300 outliers_fraction = 0.15 n_outliers = int(outliers_fraction * n_samples) n_inliers = n_samples - n_outliers # define outlier/anomaly detection methods to be compared. # the SGDOneClassSVM must be used in a pipeline with a kernel approximation # to give similar results to the OneClassSVM anomaly_algorithms = [ ( "Robust covariance", EllipticEnvelope(contamination=outliers_fraction, random_state=42), ), ("One-Class SVM", svm.OneClassSVM(nu=outliers_fraction, kernel="rbf", gamma=0.1)), ( "One-Class SVM (SGD)", make_pipeline( Nystroem(gamma=0.1, random_state=42, n_components=150), SGDOneClassSVM( nu=outliers_fraction, shuffle=True, fit_intercept=True, random_state=42, tol=1e-6, ), ), ), ( "Isolation Forest", IsolationForest(contamination=outliers_fraction, random_state=42), ), ( "Local Outlier Factor", LocalOutlierFactor(n_neighbors=35, contamination=outliers_fraction), ), ] # Define datasets blobs_params = dict(random_state=0, n_samples=n_inliers, n_features=2) datasets = [ make_blobs(centers=[[0, 0], [0, 0]], cluster_std=0.5, **blobs_params)[0], make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[0.5, 0.5], **blobs_params)[0], make_blobs(centers=[[2, 2], [-2, -2]], cluster_std=[1.5, 0.3], **blobs_params)[0], 4.0 * ( make_moons(n_samples=n_samples, noise=0.05, random_state=0)[0] - np.array([0.5, 0.25]) ), 14.0 * (np.random.RandomState(42).rand(n_samples, 2) - 0.5), ] # Compare given classifiers under given settings xx, yy = np.meshgrid(np.linspace(-7, 7, 150), np.linspace(-7, 7, 150)) plt.figure(figsize=(len(anomaly_algorithms) * 2 + 4, 12.5)) plt.subplots_adjust( left=0.02, right=0.98, bottom=0.001, top=0.96, wspace=0.05, hspace=0.01 ) plot_num = 1 rng = np.random.RandomState(42) for i_dataset, X in enumerate(datasets): # Add outliers X = np.concatenate([X, rng.uniform(low=-6, high=6, size=(n_outliers, 2))], axis=0) for name, algorithm in anomaly_algorithms: t0 = time.time() algorithm.fit(X) t1 = time.time() plt.subplot(len(datasets), len(anomaly_algorithms), plot_num) if i_dataset == 0: plt.title(name, size=18) # fit the data and tag outliers if name == "Local Outlier Factor": y_pred = algorithm.fit_predict(X) else: y_pred = algorithm.fit(X).predict(X) # plot the levels lines and the points if name != "Local Outlier Factor": # LOF does not implement predict Z = algorithm.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors="black") colors = np.array(["#377eb8", "#ff7f00"]) plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[(y_pred + 1) // 2]) plt.xlim(-7, 7) plt.ylim(-7, 7) plt.xticks(()) plt.yticks(()) plt.text( 0.99, 0.01, ("%.2fs" % (t1 - t0)).lstrip("0"), transform=plt.gca().transAxes, size=15, horizontalalignment="right", ) plot_num += 1 plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/miscellaneous/plot_isotonic_regression.py
examples/miscellaneous/plot_isotonic_regression.py
""" =================== Isotonic Regression =================== An illustration of the isotonic regression on generated data (non-linear monotonic trend with homoscedastic uniform noise). The isotonic regression algorithm finds a non-decreasing approximation of a function while minimizing the mean squared error on the training data. The benefit of such a non-parametric model is that it does not assume any shape for the target function besides monotonicity. For comparison a linear regression is also presented. The plot on the right-hand side shows the model prediction function that results from the linear interpolation of threshold points. The threshold points are a subset of the training input observations and their matching target values are computed by the isotonic non-parametric fit. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from matplotlib.collections import LineCollection from sklearn.isotonic import IsotonicRegression from sklearn.linear_model import LinearRegression from sklearn.utils import check_random_state n = 100 x = np.arange(n) rs = check_random_state(0) y = rs.randint(-50, 50, size=(n,)) + 50.0 * np.log1p(np.arange(n)) # %% # Fit IsotonicRegression and LinearRegression models: ir = IsotonicRegression(out_of_bounds="clip") y_ = ir.fit_transform(x, y) lr = LinearRegression() lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression # %% # Plot results: segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)] lc = LineCollection(segments, zorder=0) lc.set_array(np.ones(len(y))) lc.set_linewidths(np.full(n, 0.5)) fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(12, 6)) ax0.plot(x, y, "C0.", markersize=12) ax0.plot(x, y_, "C1.-", markersize=12) ax0.plot(x, lr.predict(x[:, np.newaxis]), "C2-") ax0.add_collection(lc) ax0.legend(("Training data", "Isotonic fit", "Linear fit"), loc="lower right") ax0.set_title("Isotonic regression fit on noisy data (n=%d)" % n) x_test = np.linspace(-10, 110, 1000) ax1.plot(x_test, ir.predict(x_test), "C1-") ax1.plot(ir.X_thresholds_, ir.y_thresholds_, "C1.", markersize=12) ax1.set_title("Prediction function (%d thresholds)" % len(ir.X_thresholds_)) plt.show() # %% # Note that we explicitly passed `out_of_bounds="clip"` to the constructor of # `IsotonicRegression` to control the way the model extrapolates outside of the # range of data observed in the training set. This "clipping" extrapolation can # be seen on the plot of the decision function on the right-hand.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/miscellaneous/plot_kernel_ridge_regression.py
examples/miscellaneous/plot_kernel_ridge_regression.py
""" ============================================= Comparison of kernel ridge regression and SVR ============================================= Both kernel ridge regression (KRR) and SVR learn a non-linear function by employing the kernel trick, i.e., they learn a linear function in the space induced by the respective kernel which corresponds to a non-linear function in the original space. They differ in the loss functions (ridge versus epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in closed-form and is typically faster for medium-sized datasets. On the other hand, the learned model is non-sparse and thus slower than SVR at prediction-time. This example illustrates both methods on an artificial dataset, which consists of a sinusoidal target function and strong noise added to every fifth datapoint. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate sample data # -------------------- import numpy as np rng = np.random.RandomState(42) X = 5 * rng.rand(10000, 1) y = np.sin(X).ravel() # Add noise to targets y[::5] += 3 * (0.5 - rng.rand(X.shape[0] // 5)) X_plot = np.linspace(0, 5, 100000)[:, None] # %% # Construct the kernel-based regression models # -------------------------------------------- from sklearn.kernel_ridge import KernelRidge from sklearn.model_selection import GridSearchCV from sklearn.svm import SVR train_size = 100 svr = GridSearchCV( SVR(kernel="rbf", gamma=0.1), param_grid={"C": [1e0, 1e1, 1e2, 1e3], "gamma": np.logspace(-2, 2, 5)}, ) kr = GridSearchCV( KernelRidge(kernel="rbf", gamma=0.1), param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3], "gamma": np.logspace(-2, 2, 5)}, ) # %% # Compare times of SVR and Kernel Ridge Regression # ------------------------------------------------ import time t0 = time.time() svr.fit(X[:train_size], y[:train_size]) svr_fit = time.time() - t0 print(f"Best SVR with params: {svr.best_params_} and R2 score: {svr.best_score_:.3f}") print("SVR complexity and bandwidth selected and model fitted in %.3f s" % svr_fit) t0 = time.time() kr.fit(X[:train_size], y[:train_size]) kr_fit = time.time() - t0 print(f"Best KRR with params: {kr.best_params_} and R2 score: {kr.best_score_:.3f}") print("KRR complexity and bandwidth selected and model fitted in %.3f s" % kr_fit) sv_ratio = svr.best_estimator_.support_.shape[0] / train_size print("Support vector ratio: %.3f" % sv_ratio) t0 = time.time() y_svr = svr.predict(X_plot) svr_predict = time.time() - t0 print("SVR prediction for %d inputs in %.3f s" % (X_plot.shape[0], svr_predict)) t0 = time.time() y_kr = kr.predict(X_plot) kr_predict = time.time() - t0 print("KRR prediction for %d inputs in %.3f s" % (X_plot.shape[0], kr_predict)) # %% # Look at the results # ------------------- import matplotlib.pyplot as plt sv_ind = svr.best_estimator_.support_ plt.scatter( X[sv_ind], y[sv_ind], c="r", s=50, label="SVR support vectors", zorder=2, edgecolors=(0, 0, 0), ) plt.scatter(X[:100], y[:100], c="k", label="data", zorder=1, edgecolors=(0, 0, 0)) plt.plot( X_plot, y_svr, c="r", label="SVR (fit: %.3fs, predict: %.3fs)" % (svr_fit, svr_predict), ) plt.plot( X_plot, y_kr, c="g", label="KRR (fit: %.3fs, predict: %.3fs)" % (kr_fit, kr_predict) ) plt.xlabel("data") plt.ylabel("target") plt.title("SVR versus Kernel Ridge") _ = plt.legend() # %% # The previous figure compares the learned model of KRR and SVR when both # complexity/regularization and bandwidth of the RBF kernel are optimized using # grid-search. The learned functions are very similar; however, fitting KRR is # approximately 3-4 times faster than fitting SVR (both with grid-search). # # Prediction of 100000 target values could be in theory approximately three # times faster with SVR since it has learned a sparse model using only # approximately 1/3 of the training datapoints as support vectors. However, in # practice, this is not necessarily the case because of implementation details # in the way the kernel function is computed for each model that can make the # KRR model as fast or even faster despite computing more arithmetic # operations. # %% # Visualize training and prediction times # --------------------------------------- plt.figure() sizes = np.logspace(1, 3.8, 7).astype(int) for name, estimator in { "KRR": KernelRidge(kernel="rbf", alpha=0.01, gamma=10), "SVR": SVR(kernel="rbf", C=1e2, gamma=10), }.items(): train_time = [] test_time = [] for train_test_size in sizes: t0 = time.time() estimator.fit(X[:train_test_size], y[:train_test_size]) train_time.append(time.time() - t0) t0 = time.time() estimator.predict(X_plot[:1000]) test_time.append(time.time() - t0) plt.plot( sizes, train_time, "o-", color="r" if name == "SVR" else "g", label="%s (train)" % name, ) plt.plot( sizes, test_time, "o--", color="r" if name == "SVR" else "g", label="%s (test)" % name, ) plt.xscale("log") plt.yscale("log") plt.xlabel("Train size") plt.ylabel("Time (seconds)") plt.title("Execution Time") _ = plt.legend(loc="best") # %% # This figure compares the time for fitting and prediction of KRR and SVR for # different sizes of the training set. Fitting KRR is faster than SVR for # medium-sized training sets (less than a few thousand samples); however, for # larger training sets SVR scales better. With regard to prediction time, SVR # should be faster than KRR for all sizes of the training set because of the # learned sparse solution, however this is not necessarily the case in practice # because of implementation details. Note that the degree of sparsity and thus # the prediction time depends on the parameters epsilon and C of the SVR. # %% # Visualize the learning curves # ----------------------------- from sklearn.model_selection import LearningCurveDisplay _, ax = plt.subplots() svr = SVR(kernel="rbf", C=1e1, gamma=0.1) kr = KernelRidge(kernel="rbf", alpha=0.1, gamma=0.1) common_params = { "X": X[:100], "y": y[:100], "train_sizes": np.linspace(0.1, 1, 10), "scoring": "neg_mean_squared_error", "negate_score": True, "score_name": "Mean Squared Error", "score_type": "test", "std_display_style": None, "ax": ax, } LearningCurveDisplay.from_estimator(svr, **common_params) LearningCurveDisplay.from_estimator(kr, **common_params) ax.set_title("Learning curves") ax.legend(handles=ax.get_legend_handles_labels()[0], labels=["SVR", "KRR"]) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/miscellaneous/plot_display_object_visualization.py
examples/miscellaneous/plot_display_object_visualization.py
""" =================================== Visualizations with Display Objects =================================== .. currentmodule:: sklearn.metrics In this example, we will construct display objects, :class:`ConfusionMatrixDisplay`, :class:`RocCurveDisplay`, and :class:`PrecisionRecallDisplay` directly from their respective metrics. This is an alternative to using their corresponding plot functions when a model's predictions are already computed or expensive to compute. Note that this is advanced usage, and in general we recommend using their respective plot functions. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Load Data and train model # ------------------------- # For this example, we load a blood transfusion service center data set from # `OpenML <https://www.openml.org/d/1464>`_. This is a binary classification # problem where the target is whether an individual donated blood. Then the # data is split into a train and test dataset and a logistic regression is # fitted with the train dataset. from sklearn.datasets import fetch_openml from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler X, y = fetch_openml(data_id=1464, return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y) clf = make_pipeline(StandardScaler(), LogisticRegression(random_state=0)) clf.fit(X_train, y_train) # %% # Create :class:`ConfusionMatrixDisplay` # ###################################### # With the fitted model, we compute the predictions of the model on the test # dataset. These predictions are used to compute the confusion matrix which # is plotted with the :class:`ConfusionMatrixDisplay` from sklearn.metrics import ConfusionMatrixDisplay, confusion_matrix y_pred = clf.predict(X_test) cm = confusion_matrix(y_test, y_pred) cm_display = ConfusionMatrixDisplay(cm).plot() # %% # Create :class:`RocCurveDisplay` # ############################### # The roc curve requires either the probabilities or the non-thresholded # decision values from the estimator. Since the logistic regression provides # a decision function, we will use it to plot the roc curve: from sklearn.metrics import RocCurveDisplay, roc_curve y_score = clf.decision_function(X_test) fpr, tpr, _ = roc_curve(y_test, y_score, pos_label=clf.classes_[1]) roc_display = RocCurveDisplay(fpr=fpr, tpr=tpr).plot() # %% # Create :class:`PrecisionRecallDisplay` # ###################################### # Similarly, the precision recall curve can be plotted using `y_score` from # the prevision sections. from sklearn.metrics import PrecisionRecallDisplay, precision_recall_curve prec, recall, _ = precision_recall_curve(y_test, y_score, pos_label=clf.classes_[1]) pr_display = PrecisionRecallDisplay(precision=prec, recall=recall).plot() # %% # Combining the display objects into a single plot # ################################################ # The display objects store the computed values that were passed as arguments. # This allows for the visualizations to be easliy combined using matplotlib's # API. In the following example, we place the displays next to each other in a # row. # sphinx_gallery_thumbnail_number = 4 import matplotlib.pyplot as plt fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8)) roc_display.plot(ax=ax1) pr_display.plot(ax=ax2) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/miscellaneous/plot_kernel_approximation.py
examples/miscellaneous/plot_kernel_approximation.py
""" ================================================== Explicit feature map approximation for RBF kernels ================================================== An example illustrating the approximation of the feature map of an RBF kernel. .. currentmodule:: sklearn.kernel_approximation It shows how to use :class:`RBFSampler` and :class:`Nystroem` to approximate the feature map of an RBF kernel for classification with an SVM on the digits dataset. Results using a linear SVM in the original space, a linear SVM using the approximate mappings and using a kernelized SVM are compared. Timings and accuracy for varying amounts of Monte Carlo samplings (in the case of :class:`RBFSampler`, which uses random Fourier features) and different sized subsets of the training set (for :class:`Nystroem`) for the approximate mapping are shown. Please note that the dataset here is not large enough to show the benefits of kernel approximation, as the exact SVM is still reasonably fast. Sampling more dimensions clearly leads to better classification results, but comes at a greater cost. This means there is a tradeoff between runtime and accuracy, given by the parameter n_components. Note that solving the Linear SVM and also the approximate kernel SVM could be greatly accelerated by using stochastic gradient descent via :class:`~sklearn.linear_model.SGDClassifier`. This is not easily possible for the case of the kernelized SVM. """ # %% # Python package and dataset imports, load dataset # --------------------------------------------------- # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # Standard scientific Python imports from time import time import matplotlib.pyplot as plt import numpy as np # Import datasets, classifiers and performance metrics from sklearn import datasets, pipeline, svm from sklearn.decomposition import PCA from sklearn.kernel_approximation import Nystroem, RBFSampler # The digits dataset digits = datasets.load_digits(n_class=9) # %% # Timing and accuracy plots # -------------------------------------------------- # To apply a classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: n_samples = len(digits.data) data = digits.data / 16.0 data -= data.mean(axis=0) # We learn the digits on the first half of the digits data_train, targets_train = (data[: n_samples // 2], digits.target[: n_samples // 2]) # Now predict the value of the digit on the second half: data_test, targets_test = (data[n_samples // 2 :], digits.target[n_samples // 2 :]) # data_test = scaler.transform(data_test) # Create a classifier: a support vector classifier kernel_svm = svm.SVC(gamma=0.2) linear_svm = svm.LinearSVC(random_state=42) # create pipeline from kernel approximation # and linear svm feature_map_fourier = RBFSampler(gamma=0.2, random_state=1) feature_map_nystroem = Nystroem(gamma=0.2, random_state=1) fourier_approx_svm = pipeline.Pipeline( [ ("feature_map", feature_map_fourier), ("svm", svm.LinearSVC(random_state=42)), ] ) nystroem_approx_svm = pipeline.Pipeline( [ ("feature_map", feature_map_nystroem), ("svm", svm.LinearSVC(random_state=42)), ] ) # fit and predict using linear and kernel svm: kernel_svm_time = time() kernel_svm.fit(data_train, targets_train) kernel_svm_score = kernel_svm.score(data_test, targets_test) kernel_svm_time = time() - kernel_svm_time linear_svm_time = time() linear_svm.fit(data_train, targets_train) linear_svm_score = linear_svm.score(data_test, targets_test) linear_svm_time = time() - linear_svm_time sample_sizes = 30 * np.arange(1, 10) fourier_scores = [] nystroem_scores = [] fourier_times = [] nystroem_times = [] for D in sample_sizes: fourier_approx_svm.set_params(feature_map__n_components=D) nystroem_approx_svm.set_params(feature_map__n_components=D) start = time() nystroem_approx_svm.fit(data_train, targets_train) nystroem_times.append(time() - start) start = time() fourier_approx_svm.fit(data_train, targets_train) fourier_times.append(time() - start) fourier_score = fourier_approx_svm.score(data_test, targets_test) nystroem_score = nystroem_approx_svm.score(data_test, targets_test) nystroem_scores.append(nystroem_score) fourier_scores.append(fourier_score) # plot the results: plt.figure(figsize=(16, 4)) accuracy = plt.subplot(121) # second y axis for timings timescale = plt.subplot(122) accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel") timescale.plot(sample_sizes, nystroem_times, "--", label="Nystroem approx. kernel") accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel") timescale.plot(sample_sizes, fourier_times, "--", label="Fourier approx. kernel") # horizontal lines for exact rbf and linear kernels: accuracy.plot( [sample_sizes[0], sample_sizes[-1]], [linear_svm_score, linear_svm_score], label="linear svm", ) timescale.plot( [sample_sizes[0], sample_sizes[-1]], [linear_svm_time, linear_svm_time], "--", label="linear svm", ) accuracy.plot( [sample_sizes[0], sample_sizes[-1]], [kernel_svm_score, kernel_svm_score], label="rbf svm", ) timescale.plot( [sample_sizes[0], sample_sizes[-1]], [kernel_svm_time, kernel_svm_time], "--", label="rbf svm", ) # vertical line for dataset dimensionality = 64 accuracy.plot([64, 64], [0.7, 1], label="n_features") # legends and labels accuracy.set_title("Classification accuracy") timescale.set_title("Training times") accuracy.set_xlim(sample_sizes[0], sample_sizes[-1]) accuracy.set_xticks(()) accuracy.set_ylim(np.min(fourier_scores), 1) timescale.set_xlabel("Sampling steps = transformed feature dimension") accuracy.set_ylabel("Classification accuracy") timescale.set_ylabel("Training time in seconds") accuracy.legend(loc="best") timescale.legend(loc="best") plt.tight_layout() plt.show() # %% # Decision Surfaces of RBF Kernel SVM and Linear SVM # -------------------------------------------------------- # The second plot visualized the decision surfaces of the RBF kernel SVM and # the linear SVM with approximate kernel maps. # The plot shows decision surfaces of the classifiers projected onto # the first two principal components of the data. This visualization should # be taken with a grain of salt since it is just an interesting slice through # the decision surface in 64 dimensions. In particular note that # a datapoint (represented as a dot) does not necessarily be classified # into the region it is lying in, since it will not lie on the plane # that the first two principal components span. # The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail # in :ref:`kernel_approximation`. # visualize the decision surface, projected down to the first # two principal components of the dataset pca = PCA(n_components=8, random_state=42).fit(data_train) X = pca.transform(data_train) # Generate grid along first two principal components multiples = np.arange(-2, 2, 0.1) # steps along first component first = multiples[:, np.newaxis] * pca.components_[0, :] # steps along second component second = multiples[:, np.newaxis] * pca.components_[1, :] # combine grid = first[np.newaxis, :, :] + second[:, np.newaxis, :] flat_grid = grid.reshape(-1, data.shape[1]) # title for the plots titles = [ "SVC with rbf kernel", "SVC (linear kernel)\n with Fourier rbf feature map\nn_components=100", "SVC (linear kernel)\n with Nystroem rbf feature map\nn_components=100", ] plt.figure(figsize=(18, 7.5)) plt.rcParams.update({"font.size": 14}) # predict and plot for i, clf in enumerate((kernel_svm, nystroem_approx_svm, fourier_approx_svm)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, x_max]x[y_min, y_max]. plt.subplot(1, 3, i + 1) Z = clf.predict(flat_grid) # Put the result into a color plot Z = Z.reshape(grid.shape[:-1]) levels = np.arange(10) lv_eps = 0.01 # Adjust a mapping from calculated contour levels to color. plt.contourf( multiples, multiples, Z, levels=levels - lv_eps, cmap=plt.cm.tab10, vmin=0, vmax=10, alpha=0.7, ) plt.axis("off") # Plot also the training points plt.scatter( X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.tab10, edgecolors=(0, 0, 0), vmin=0, vmax=10, ) plt.title(titles[i]) plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/miscellaneous/plot_johnson_lindenstrauss_bound.py
examples/miscellaneous/plot_johnson_lindenstrauss_bound.py
r""" ===================================================================== The Johnson-Lindenstrauss bound for embedding with random projections ===================================================================== The `Johnson-Lindenstrauss lemma`_ states that any high dimensional dataset can be randomly projected into a lower dimensional Euclidean space while controlling the distortion in the pairwise distances. .. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/\ Johnson%E2%80%93Lindenstrauss_lemma """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import sys from time import time import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import fetch_20newsgroups_vectorized, load_digits from sklearn.metrics.pairwise import euclidean_distances from sklearn.random_projection import ( SparseRandomProjection, johnson_lindenstrauss_min_dim, ) # %% # Theoretical bounds # ================== # The distortion introduced by a random projection `p` is asserted by # the fact that `p` is defining an eps-embedding with good probability # as defined by: # # .. math:: # (1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2 # # Where `u` and `v` are any rows taken from a dataset of shape `(n_samples, # n_features)` and `p` is a projection by a random Gaussian `N(0, 1)` matrix # of shape `(n_components, n_features)` (or a sparse Achlioptas matrix). # # The minimum number of components to guarantees the eps-embedding is # given by: # # .. math:: # n\_components \geq 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3) # # # The first plot shows that with an increasing number of samples ``n_samples``, # the minimal number of dimensions ``n_components`` increased logarithmically # in order to guarantee an ``eps``-embedding. # range of admissible distortions eps_range = np.linspace(0.1, 0.99, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range))) # range of number of samples (observation) to embed n_samples_range = np.logspace(1, 9, 9) plt.figure() for eps, color in zip(eps_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps) plt.loglog(n_samples_range, min_n_components, color=color) plt.legend([f"eps = {eps:0.1f}" for eps in eps_range], loc="lower right") plt.xlabel("Number of observations to eps-embed") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components") plt.show() # %% # The second plot shows that an increase of the admissible # distortion ``eps`` allows to reduce drastically the minimal number of # dimensions ``n_components`` for a given number of samples ``n_samples`` # range of admissible distortions eps_range = np.linspace(0.01, 0.99, 100) # range of number of samples (observation) to embed n_samples_range = np.logspace(2, 6, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range))) plt.figure() for n_samples, color in zip(n_samples_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range) plt.semilogy(eps_range, min_n_components, color=color) plt.legend([f"n_samples = {n}" for n in n_samples_range], loc="upper right") plt.xlabel("Distortion eps") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps") plt.show() # %% # Empirical validation # ==================== # # We validate the above bounds on the 20 newsgroups text document # (TF-IDF word frequencies) dataset or on the digits dataset: # # - for the 20 newsgroups dataset some 300 documents with 100k # features in total are projected using a sparse random matrix to smaller # euclidean spaces with various values for the target number of dimensions # ``n_components``. # # - for the digits dataset, some 8x8 gray level pixels data for 300 # handwritten digits pictures are randomly projected to spaces for various # larger number of dimensions ``n_components``. # # The default dataset is the 20 newsgroups dataset. To run the example on the # digits dataset, pass the ``--use-digits-dataset`` command line argument to # this script. if "--use-digits-dataset" in sys.argv: data = load_digits().data[:300] else: data = fetch_20newsgroups_vectorized().data[:300] # %% # For each value of ``n_components``, we plot: # # - 2D distribution of sample pairs with pairwise distances in original # and projected spaces as x- and y-axis respectively. # # - 1D histogram of the ratio of those distances (projected / original). n_samples, n_features = data.shape print( f"Embedding {n_samples} samples with dim {n_features} using various " "random projections" ) n_components_range = np.array([300, 1_000, 10_000]) dists = euclidean_distances(data, squared=True).ravel() # select only non-identical samples pairs nonzero = dists != 0 dists = dists[nonzero] for n_components in n_components_range: t0 = time() rp = SparseRandomProjection(n_components=n_components) projected_data = rp.fit_transform(data) print( f"Projected {n_samples} samples from {n_features} to {n_components} in " f"{time() - t0:0.3f}s" ) if hasattr(rp, "components_"): n_bytes = rp.components_.data.nbytes n_bytes += rp.components_.indices.nbytes print(f"Random matrix with size: {n_bytes / 1e6:0.3f} MB") projected_dists = euclidean_distances(projected_data, squared=True).ravel()[nonzero] plt.figure() min_dist = min(projected_dists.min(), dists.min()) max_dist = max(projected_dists.max(), dists.max()) plt.hexbin( dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu, extent=[min_dist, max_dist, min_dist, max_dist], ) plt.xlabel("Pairwise squared distances in original space") plt.ylabel("Pairwise squared distances in projected space") plt.title("Pairwise distances distribution for n_components=%d" % n_components) cb = plt.colorbar() cb.set_label("Sample pairs counts") rates = projected_dists / dists print(f"Mean distances rate: {np.mean(rates):.2f} ({np.std(rates):.2f})") plt.figure() plt.hist(rates, bins=50, range=(0.0, 2.0), edgecolor="k", density=True) plt.xlabel("Squared distances rate: projected / original") plt.ylabel("Distribution of samples pairs") plt.title("Histogram of pairwise distance rates for n_components=%d" % n_components) # TODO: compute the expected value of eps and add them to the previous plot # as vertical lines / region plt.show() # %% # We can see that for low values of ``n_components`` the distribution is wide # with many distorted pairs and a skewed distribution (due to the hard # limit of zero ratio on the left as distances are always positives) # while for larger values of `n_components` the distortion is controlled # and the distances are well preserved by the random projection. # # Remarks # ======= # # According to the JL lemma, projecting 300 samples without too much distortion # will require at least several thousands dimensions, irrespective of the # number of features of the original dataset. # # Hence using random projections on the digits dataset which only has 64 # features in the input space does not make sense: it does not allow # for dimensionality reduction in this case. # # On the twenty newsgroups on the other hand the dimensionality can be # decreased from 56,436 down to 10,000 while reasonably preserving # pairwise distances.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/miscellaneous/plot_pipeline_display.py
examples/miscellaneous/plot_pipeline_display.py
""" ================================================================= Displaying Pipelines ================================================================= The default configuration for displaying a pipeline in a Jupyter Notebook is `'diagram'` where `set_config(display='diagram')`. To deactivate HTML representation, use `set_config(display='text')`. To see more detailed steps in the visualization of the pipeline, click on the steps in the pipeline. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Displaying a Pipeline with a Preprocessing Step and Classifier # ############################################################## # This section constructs a :class:`~sklearn.pipeline.Pipeline` with a preprocessing # step, :class:`~sklearn.preprocessing.StandardScaler`, and classifier, # :class:`~sklearn.linear_model.LogisticRegression`, and displays its visual # representation. from sklearn import set_config from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler steps = [ ("preprocessing", StandardScaler()), ("classifier", LogisticRegression()), ] pipe = Pipeline(steps) # %% # To visualize the diagram, the default is `display='diagram'`. set_config(display="diagram") pipe # click on the diagram below to see the details of each step # %% # To view the text pipeline, change to `display='text'`. set_config(display="text") pipe # %% # Put back the default display set_config(display="diagram") # %% # Displaying a Pipeline Chaining Multiple Preprocessing Steps & Classifier # ######################################################################## # This section constructs a :class:`~sklearn.pipeline.Pipeline` with multiple # preprocessing steps, :class:`~sklearn.preprocessing.PolynomialFeatures` and # :class:`~sklearn.preprocessing.StandardScaler`, and a classifier step, # :class:`~sklearn.linear_model.LogisticRegression`, and displays its visual # representation. from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures, StandardScaler steps = [ ("standard_scaler", StandardScaler()), ("polynomial", PolynomialFeatures(degree=3)), ("classifier", LogisticRegression(C=2.0)), ] pipe = Pipeline(steps) pipe # click on the diagram below to see the details of each step # %% # Displaying a Pipeline and Dimensionality Reduction and Classifier # ################################################################# # This section constructs a :class:`~sklearn.pipeline.Pipeline` with a # dimensionality reduction step, :class:`~sklearn.decomposition.PCA`, # a classifier, :class:`~sklearn.svm.SVC`, and displays its visual # representation. from sklearn.decomposition import PCA from sklearn.pipeline import Pipeline from sklearn.svm import SVC steps = [("reduce_dim", PCA(n_components=4)), ("classifier", SVC(kernel="linear"))] pipe = Pipeline(steps) pipe # click on the diagram below to see the details of each step # %% # Displaying a Complex Pipeline Chaining a Column Transformer # ########################################################### # This section constructs a complex :class:`~sklearn.pipeline.Pipeline` with a # :class:`~sklearn.compose.ColumnTransformer` and a classifier, # :class:`~sklearn.linear_model.LogisticRegression`, and displays its visual # representation. import numpy as np from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline, make_pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler numeric_preprocessor = Pipeline( steps=[ ("imputation_mean", SimpleImputer(missing_values=np.nan, strategy="mean")), ("scaler", StandardScaler()), ] ) categorical_preprocessor = Pipeline( steps=[ ( "imputation_constant", SimpleImputer(fill_value="missing", strategy="constant"), ), ("onehot", OneHotEncoder(handle_unknown="ignore")), ] ) preprocessor = ColumnTransformer( [ ("categorical", categorical_preprocessor, ["state", "gender"]), ("numerical", numeric_preprocessor, ["age", "weight"]), ] ) pipe = make_pipeline(preprocessor, LogisticRegression(max_iter=500)) pipe # click on the diagram below to see the details of each step # %% # Displaying a Grid Search over a Pipeline with a Classifier # ########################################################## # This section constructs a :class:`~sklearn.model_selection.GridSearchCV` # over a :class:`~sklearn.pipeline.Pipeline` with # :class:`~sklearn.ensemble.RandomForestClassifier` and displays its visual # representation. import numpy as np from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline, make_pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler numeric_preprocessor = Pipeline( steps=[ ("imputation_mean", SimpleImputer(missing_values=np.nan, strategy="mean")), ("scaler", StandardScaler()), ] ) categorical_preprocessor = Pipeline( steps=[ ( "imputation_constant", SimpleImputer(fill_value="missing", strategy="constant"), ), ("onehot", OneHotEncoder(handle_unknown="ignore")), ] ) preprocessor = ColumnTransformer( [ ("categorical", categorical_preprocessor, ["state", "gender"]), ("numerical", numeric_preprocessor, ["age", "weight"]), ] ) pipe = Pipeline( steps=[("preprocessor", preprocessor), ("classifier", RandomForestClassifier())] ) param_grid = { "classifier__n_estimators": [200, 500], "classifier__max_features": ["auto", "sqrt", "log2"], "classifier__max_depth": [4, 5, 6, 7, 8], "classifier__criterion": ["gini", "entropy"], } grid_search = GridSearchCV(pipe, param_grid=param_grid, n_jobs=1) grid_search # click on the diagram below to see the details of each step
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false