repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/miscellaneous/plot_estimator_representation.py
examples/miscellaneous/plot_estimator_representation.py
""" =========================================== Displaying estimators and complex pipelines =========================================== This example illustrates different ways estimators and pipelines can be displayed. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from sklearn.compose import make_column_transformer from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression from sklearn.pipeline import make_pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler # %% # Compact text representation # --------------------------- # # Estimators will only show the parameters that have been set to non-default # values when displayed as a string. This reduces the visual noise and makes it # easier to spot what the differences are when comparing instances. lr = LogisticRegression(l1_ratio=1) print(lr) # %% # Rich HTML representation # ------------------------ # In notebooks estimators and pipelines will use a rich HTML representation. # This is particularly useful to summarise the # structure of pipelines and other composite estimators, with interactivity to # provide detail. Click on the example image below to expand Pipeline # elements. See :ref:`visualizing_composite_estimators` for how you can use # this feature. num_proc = make_pipeline(SimpleImputer(strategy="median"), StandardScaler()) cat_proc = make_pipeline( SimpleImputer(strategy="constant", fill_value="missing"), OneHotEncoder(handle_unknown="ignore"), ) preprocessor = make_column_transformer( (num_proc, ("feat1", "feat3")), (cat_proc, ("feat0", "feat2")) ) clf = make_pipeline(preprocessor, LogisticRegression()) clf
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/preprocessing/plot_scaling_importance.py
examples/preprocessing/plot_scaling_importance.py
""" ============================= Importance of Feature Scaling ============================= Feature scaling through standardization, also called Z-score normalization, is an important preprocessing step for many machine learning algorithms. It involves rescaling each feature such that it has a standard deviation of 1 and a mean of 0. Even if tree based models are (almost) not affected by scaling, many other algorithms require features to be normalized, often for different reasons: to ease the convergence (such as a non-penalized logistic regression), to create a completely different model fit compared to the fit with unscaled data (such as KNeighbors models). The latter is demonstrated on the first part of the present example. On the second part of the example we show how Principal Component Analysis (PCA) is impacted by normalization of features. To illustrate this, we compare the principal components found using :class:`~sklearn.decomposition.PCA` on unscaled data with those obtained when using a :class:`~sklearn.preprocessing.StandardScaler` to scale data first. In the last part of the example we show the effect of the normalization on the accuracy of a model trained on PCA-reduced data. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Load and prepare data # ===================== # # The dataset used is the :ref:`wine_dataset` available at UCI. This dataset has # continuous features that are heterogeneous in scale due to differing # properties that they measure (e.g. alcohol content and malic acid). from sklearn.datasets import load_wine from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler X, y = load_wine(return_X_y=True, as_frame=True) scaler = StandardScaler().set_output(transform="pandas") X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.30, random_state=42 ) scaled_X_train = scaler.fit_transform(X_train) # %% # .. _neighbors_scaling: # # Effect of rescaling on a k-neighbors models # =========================================== # # For the sake of visualizing the decision boundary of a # :class:`~sklearn.neighbors.KNeighborsClassifier`, in this section we select a # subset of 2 features that have values with different orders of magnitude. # # Keep in mind that using a subset of the features to train the model may likely # leave out feature with high predictive impact, resulting in a decision # boundary that is much worse in comparison to a model trained on the full set # of features. import matplotlib.pyplot as plt from sklearn.inspection import DecisionBoundaryDisplay from sklearn.neighbors import KNeighborsClassifier X_plot = X[["proline", "hue"]] X_plot_scaled = scaler.fit_transform(X_plot) clf = KNeighborsClassifier(n_neighbors=20) def fit_and_plot_model(X_plot, y, clf, ax): clf.fit(X_plot, y) disp = DecisionBoundaryDisplay.from_estimator( clf, X_plot, response_method="predict", alpha=0.5, ax=ax, ) disp.ax_.scatter(X_plot["proline"], X_plot["hue"], c=y, s=20, edgecolor="k") disp.ax_.set_xlim((X_plot["proline"].min(), X_plot["proline"].max())) disp.ax_.set_ylim((X_plot["hue"].min(), X_plot["hue"].max())) return disp.ax_ fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 6)) fit_and_plot_model(X_plot, y, clf, ax1) ax1.set_title("KNN without scaling") fit_and_plot_model(X_plot_scaled, y, clf, ax2) ax2.set_xlabel("scaled proline") ax2.set_ylabel("scaled hue") _ = ax2.set_title("KNN with scaling") # %% # Here the decision boundary shows that fitting scaled or non-scaled data lead # to completely different models. The reason is that the variable "proline" has # values which vary between 0 and 1,000; whereas the variable "hue" varies # between 1 and 10. Because of this, distances between samples are mostly # impacted by differences in values of "proline", while values of the "hue" will # be comparatively ignored. If one uses # :class:`~sklearn.preprocessing.StandardScaler` to normalize this database, # both scaled values lay approximately between -3 and 3 and the neighbors # structure will be impacted more or less equivalently by both variables. # # Effect of rescaling on a PCA dimensional reduction # ================================================== # # Dimensional reduction using :class:`~sklearn.decomposition.PCA` consists of # finding the features that maximize the variance. If one feature varies more # than the others only because of their respective scales, # :class:`~sklearn.decomposition.PCA` would determine that such feature # dominates the direction of the principal components. # # We can inspect the first principal components using all the original features: import pandas as pd from sklearn.decomposition import PCA pca = PCA(n_components=2).fit(X_train) scaled_pca = PCA(n_components=2).fit(scaled_X_train) X_train_transformed = pca.transform(X_train) X_train_std_transformed = scaled_pca.transform(scaled_X_train) first_pca_component = pd.DataFrame( pca.components_[0], index=X.columns, columns=["without scaling"] ) first_pca_component["with scaling"] = scaled_pca.components_[0] first_pca_component.plot.bar( title="Weights of the first principal component", figsize=(6, 8) ) _ = plt.tight_layout() # %% # Indeed we find that the "proline" feature dominates the direction of the first # principal component without scaling, being about two orders of magnitude above # the other features. This is contrasted when observing the first principal # component for the scaled version of the data, where the orders of magnitude # are roughly the same across all the features. # # We can visualize the distribution of the principal components in both cases: fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(10, 5)) target_classes = range(0, 3) colors = ("blue", "red", "green") markers = ("^", "s", "o") for target_class, color, marker in zip(target_classes, colors, markers): ax1.scatter( x=X_train_transformed[y_train == target_class, 0], y=X_train_transformed[y_train == target_class, 1], color=color, label=f"class {target_class}", alpha=0.5, marker=marker, ) ax2.scatter( x=X_train_std_transformed[y_train == target_class, 0], y=X_train_std_transformed[y_train == target_class, 1], color=color, label=f"class {target_class}", alpha=0.5, marker=marker, ) ax1.set_title("Unscaled training dataset after PCA") ax2.set_title("Standardized training dataset after PCA") for ax in (ax1, ax2): ax.set_xlabel("1st principal component") ax.set_ylabel("2nd principal component") ax.legend(loc="upper right") ax.grid() _ = plt.tight_layout() # %% # From the plot above we observe that scaling the features before reducing the # dimensionality results in components with the same order of magnitude. In this # case it also improves the separability of the classes. Indeed, in the next # section we confirm that a better separability has a good repercussion on the # overall model's performance. # # Effect of rescaling on model's performance # ========================================== # # First we show how the optimal regularization of a # :class:`~sklearn.linear_model.LogisticRegressionCV` depends on the scaling or # non-scaling of the data: import numpy as np from sklearn.linear_model import LogisticRegressionCV from sklearn.pipeline import make_pipeline Cs = np.logspace(-5, 5, 20) unscaled_clf = make_pipeline( pca, LogisticRegressionCV(Cs=Cs, use_legacy_attributes=False, l1_ratios=(0,)) ) unscaled_clf.fit(X_train, y_train) scaled_clf = make_pipeline( scaler, pca, LogisticRegressionCV(Cs=Cs, use_legacy_attributes=False, l1_ratios=(0,)), ) scaled_clf.fit(X_train, y_train) print(f"Optimal C for the unscaled PCA: {unscaled_clf[-1].C_:.4f}\n") print(f"Optimal C for the standardized data with PCA: {scaled_clf[-1].C_:.2f}") # %% # The need for regularization is higher (lower values of `C`) for the data that # was not scaled before applying PCA. We now evaluate the effect of scaling on # the accuracy and the mean log-loss of the optimal models: from sklearn.metrics import accuracy_score, log_loss y_pred = unscaled_clf.predict(X_test) y_pred_scaled = scaled_clf.predict(X_test) y_proba = unscaled_clf.predict_proba(X_test) y_proba_scaled = scaled_clf.predict_proba(X_test) print("Test accuracy for the unscaled PCA") print(f"{accuracy_score(y_test, y_pred):.2%}\n") print("Test accuracy for the standardized data with PCA") print(f"{accuracy_score(y_test, y_pred_scaled):.2%}\n") print("Log-loss for the unscaled PCA") print(f"{log_loss(y_test, y_proba):.3}\n") print("Log-loss for the standardized data with PCA") print(f"{log_loss(y_test, y_proba_scaled):.3}") # %% # A clear difference in prediction accuracies is observed when the data is # scaled before :class:`~sklearn.decomposition.PCA`, as it vastly outperforms # the unscaled version. This corresponds to the intuition obtained from the plot # in the previous section, where the components become linearly separable when # scaling before using :class:`~sklearn.decomposition.PCA`. # # Notice that in this case the models with scaled features perform better than # the models with non-scaled features because all the variables are expected to # be predictive and we rather avoid some of them being comparatively ignored. # # If the variables in lower scales were not predictive, one may experience a # decrease of the performance after scaling the features: noisy features would # contribute more to the prediction after scaling and therefore scaling would # increase overfitting. # # Last but not least, we observe that one achieves a lower log-loss by means of # the scaling step.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/preprocessing/plot_all_scaling.py
examples/preprocessing/plot_all_scaling.py
""" ============================================================= Compare the effect of different scalers on data with outliers ============================================================= Feature 0 (median income in a block) and feature 5 (average house occupancy) of the :ref:`california_housing_dataset` have very different scales and contain some very large outliers. These two characteristics lead to difficulties to visualize the data and, more importantly, they can degrade the predictive performance of many machine learning algorithms. Unscaled data can also slow down or even prevent the convergence of many gradient-based estimators. Indeed many estimators are designed with the assumption that each feature takes values close to zero or more importantly that all features vary on comparable scales. In particular, metric-based and gradient-based estimators often assume approximately standardized data (centered features with unit variances). A notable exception are decision tree-based estimators that are robust to arbitrary scaling of the data. This example uses different scalers, transformers, and normalizers to bring the data within a pre-defined range. Scalers are linear (or more precisely affine) transformers and differ from each other in the way they estimate the parameters used to shift and scale each feature. :class:`~sklearn.preprocessing.QuantileTransformer` provides non-linear transformations in which distances between marginal outliers and inliers are shrunk. :class:`~sklearn.preprocessing.PowerTransformer` provides non-linear transformations in which data is mapped to a normal distribution to stabilize variance and minimize skewness. Unlike the previous transformations, normalization refers to a per sample transformation instead of a per feature transformation. The following code is a bit verbose, feel free to jump directly to the analysis of the results_. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib as mpl import numpy as np from matplotlib import cm from matplotlib import pyplot as plt from sklearn.datasets import fetch_california_housing from sklearn.preprocessing import ( MaxAbsScaler, MinMaxScaler, Normalizer, PowerTransformer, QuantileTransformer, RobustScaler, StandardScaler, minmax_scale, ) dataset = fetch_california_housing() X_full, y_full = dataset.data, dataset.target feature_names = dataset.feature_names feature_mapping = { "MedInc": "Median income in block", "HouseAge": "Median house age in block", "AveRooms": "Average number of rooms", "AveBedrms": "Average number of bedrooms", "Population": "Block population", "AveOccup": "Average house occupancy", "Latitude": "House block latitude", "Longitude": "House block longitude", } # Take only 2 features to make visualization easier # Feature MedInc has a long tail distribution. # Feature AveOccup has a few but very large outliers. features = ["MedInc", "AveOccup"] features_idx = [feature_names.index(feature) for feature in features] X = X_full[:, features_idx] distributions = [ ("Unscaled data", X), ("Data after standard scaling", StandardScaler().fit_transform(X)), ("Data after min-max scaling", MinMaxScaler().fit_transform(X)), ("Data after max-abs scaling", MaxAbsScaler().fit_transform(X)), ( "Data after robust scaling", RobustScaler(quantile_range=(25, 75)).fit_transform(X), ), ( "Data after power transformation (Yeo-Johnson)", PowerTransformer(method="yeo-johnson").fit_transform(X), ), ( "Data after power transformation (Box-Cox)", PowerTransformer(method="box-cox").fit_transform(X), ), ( "Data after quantile transformation (uniform pdf)", QuantileTransformer( output_distribution="uniform", random_state=42 ).fit_transform(X), ), ( "Data after quantile transformation (gaussian pdf)", QuantileTransformer( output_distribution="normal", random_state=42 ).fit_transform(X), ), ("Data after sample-wise L2 normalizing", Normalizer().fit_transform(X)), ] # scale the output between 0 and 1 for the colorbar y = minmax_scale(y_full) # plasma does not exist in matplotlib < 1.5 cmap = getattr(cm, "plasma_r", cm.hot_r) def create_axes(title, figsize=(16, 6)): fig = plt.figure(figsize=figsize) fig.suptitle(title) # define the axis for the first plot left, width = 0.1, 0.22 bottom, height = 0.1, 0.7 bottom_h = height + 0.15 left_h = left + width + 0.02 rect_scatter = [left, bottom, width, height] rect_histx = [left, bottom_h, width, 0.1] rect_histy = [left_h, bottom, 0.05, height] ax_scatter = plt.axes(rect_scatter) ax_histx = plt.axes(rect_histx) ax_histy = plt.axes(rect_histy) # define the axis for the zoomed-in plot left = width + left + 0.2 left_h = left + width + 0.02 rect_scatter = [left, bottom, width, height] rect_histx = [left, bottom_h, width, 0.1] rect_histy = [left_h, bottom, 0.05, height] ax_scatter_zoom = plt.axes(rect_scatter) ax_histx_zoom = plt.axes(rect_histx) ax_histy_zoom = plt.axes(rect_histy) # define the axis for the colorbar left, width = width + left + 0.13, 0.01 rect_colorbar = [left, bottom, width, height] ax_colorbar = plt.axes(rect_colorbar) return ( (ax_scatter, ax_histy, ax_histx), (ax_scatter_zoom, ax_histy_zoom, ax_histx_zoom), ax_colorbar, ) def plot_distribution(axes, X, y, hist_nbins=50, title="", x0_label="", x1_label=""): ax, hist_X1, hist_X0 = axes ax.set_title(title) ax.set_xlabel(x0_label) ax.set_ylabel(x1_label) # The scatter plot colors = cmap(y) ax.scatter(X[:, 0], X[:, 1], alpha=0.5, marker="o", s=5, lw=0, c=colors) # Removing the top and the right spine for aesthetics # make nice axis layout ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.spines["left"].set_position(("outward", 10)) ax.spines["bottom"].set_position(("outward", 10)) # Histogram for axis X1 (feature 5) hist_X1.set_ylim(ax.get_ylim()) hist_X1.hist( X[:, 1], bins=hist_nbins, orientation="horizontal", color="grey", ec="grey" ) hist_X1.axis("off") # Histogram for axis X0 (feature 0) hist_X0.set_xlim(ax.get_xlim()) hist_X0.hist( X[:, 0], bins=hist_nbins, orientation="vertical", color="grey", ec="grey" ) hist_X0.axis("off") # %% # Two plots will be shown for each scaler/normalizer/transformer. The left # figure will show a scatter plot of the full data set while the right figure # will exclude the extreme values considering only 99 % of the data set, # excluding marginal outliers. In addition, the marginal distributions for each # feature will be shown on the sides of the scatter plot. def make_plot(item_idx): title, X = distributions[item_idx] ax_zoom_out, ax_zoom_in, ax_colorbar = create_axes(title) axarr = (ax_zoom_out, ax_zoom_in) plot_distribution( axarr[0], X, y, hist_nbins=200, x0_label=feature_mapping[features[0]], x1_label=feature_mapping[features[1]], title="Full data", ) # zoom-in zoom_in_percentile_range = (0, 99) cutoffs_X0 = np.percentile(X[:, 0], zoom_in_percentile_range) cutoffs_X1 = np.percentile(X[:, 1], zoom_in_percentile_range) non_outliers_mask = np.all(X > [cutoffs_X0[0], cutoffs_X1[0]], axis=1) & np.all( X < [cutoffs_X0[1], cutoffs_X1[1]], axis=1 ) plot_distribution( axarr[1], X[non_outliers_mask], y[non_outliers_mask], hist_nbins=50, x0_label=feature_mapping[features[0]], x1_label=feature_mapping[features[1]], title="Zoom-in", ) norm = mpl.colors.Normalize(y_full.min(), y_full.max()) mpl.colorbar.ColorbarBase( ax_colorbar, cmap=cmap, norm=norm, orientation="vertical", label="Color mapping for values of y", ) # %% # .. _results: # # Original data # ------------- # # Each transformation is plotted showing two transformed features, with the # left plot showing the entire dataset, and the right zoomed-in to show the # dataset without the marginal outliers. A large majority of the samples are # compacted to a specific range, [0, 10] for the median income and [0, 6] for # the average house occupancy. Note that there are some marginal outliers (some # blocks have average occupancy of more than 1200). Therefore, a specific # pre-processing can be very beneficial depending of the application. In the # following, we present some insights and behaviors of those pre-processing # methods in the presence of marginal outliers. make_plot(0) # %% # .. _plot_all_scaling_standard_scaler_section: # # StandardScaler # -------------- # # :class:`~sklearn.preprocessing.StandardScaler` removes the mean and scales # the data to unit variance. The scaling shrinks the range of the feature # values as shown in the left figure below. # However, the outliers have an influence when computing the empirical mean and # standard deviation. Note in particular that because the outliers on each # feature have different magnitudes, the spread of the transformed data on # each feature is very different: most of the data lie in the [-2, 4] range for # the transformed median income feature while the same data is squeezed in the # smaller [-0.2, 0.2] range for the transformed average house occupancy. # # :class:`~sklearn.preprocessing.StandardScaler` therefore cannot guarantee # balanced feature scales in the # presence of outliers. make_plot(1) # %% # .. _plot_all_scaling_minmax_scaler_section: # # MinMaxScaler # ------------ # # :class:`~sklearn.preprocessing.MinMaxScaler` rescales the data set such that # all feature values are in # the range [0, 1] as shown in the right panel below. However, this scaling # compresses all inliers into the narrow range [0, 0.005] for the transformed # average house occupancy. # # Both :class:`~sklearn.preprocessing.StandardScaler` and # :class:`~sklearn.preprocessing.MinMaxScaler` are very sensitive to the # presence of outliers. make_plot(2) # %% # .. _plot_all_scaling_max_abs_scaler_section: # # MaxAbsScaler # ------------ # # :class:`~sklearn.preprocessing.MaxAbsScaler` is similar to # :class:`~sklearn.preprocessing.MinMaxScaler` except that the # values are mapped across several ranges depending on whether negative # OR positive values are present. If only positive values are present, the # range is [0, 1]. If only negative values are present, the range is [-1, 0]. # If both negative and positive values are present, the range is [-1, 1]. # On positive only data, both :class:`~sklearn.preprocessing.MinMaxScaler` # and :class:`~sklearn.preprocessing.MaxAbsScaler` behave similarly. # :class:`~sklearn.preprocessing.MaxAbsScaler` therefore also suffers from # the presence of large outliers. make_plot(3) # %% # .. _plot_all_scaling_robust_scaler_section: # # RobustScaler # ------------ # # Unlike the previous scalers, the centering and scaling statistics of # :class:`~sklearn.preprocessing.RobustScaler` # are based on percentiles and are therefore not influenced by a small # number of very large marginal outliers. Consequently, the resulting range of # the transformed feature values is larger than for the previous scalers and, # more importantly, are approximately similar: for both features most of the # transformed values lie in a [-2, 3] range as seen in the zoomed-in figure. # Note that the outliers themselves are still present in the transformed data. # If a separate outlier clipping is desirable, a non-linear transformation is # required (see below). make_plot(4) # %% # .. _plot_all_scaling_power_transformer_section: # # PowerTransformer # ---------------- # # :class:`~sklearn.preprocessing.PowerTransformer` applies a power # transformation to each feature to make the data more Gaussian-like in order # to stabilize variance and minimize skewness. Currently the Yeo-Johnson # and Box-Cox transforms are supported and the optimal # scaling factor is determined via maximum likelihood estimation in both # methods. By default, :class:`~sklearn.preprocessing.PowerTransformer` applies # zero-mean, unit variance normalization. Note that # Box-Cox can only be applied to strictly positive data. Income and average # house occupancy happen to be strictly positive, but if negative values are # present the Yeo-Johnson transformed is preferred. make_plot(5) make_plot(6) # %% # .. _plot_all_scaling_quantile_transformer_section: # # QuantileTransformer (uniform output) # ------------------------------------ # # :class:`~sklearn.preprocessing.QuantileTransformer` applies a non-linear # transformation such that the # probability density function of each feature will be mapped to a uniform # or Gaussian distribution. In this case, all the data, including outliers, # will be mapped to a uniform distribution with the range [0, 1], making # outliers indistinguishable from inliers. # # :class:`~sklearn.preprocessing.RobustScaler` and # :class:`~sklearn.preprocessing.QuantileTransformer` are robust to outliers in # the sense that adding or removing outliers in the training set will yield # approximately the same transformation. But contrary to # :class:`~sklearn.preprocessing.RobustScaler`, # :class:`~sklearn.preprocessing.QuantileTransformer` will also automatically # collapse any outlier by setting them to the a priori defined range boundaries # (0 and 1). This can result in saturation artifacts for extreme values. make_plot(7) ############################################################################## # QuantileTransformer (Gaussian output) # ------------------------------------- # # To map to a Gaussian distribution, set the parameter # ``output_distribution='normal'``. make_plot(8) # %% # .. _plot_all_scaling_normalizer_section: # # Normalizer # ---------- # # The :class:`~sklearn.preprocessing.Normalizer` rescales the vector for each # sample to have unit norm, # independently of the distribution of the samples. It can be seen on both # figures below where all samples are mapped onto the unit circle. In our # example the two selected features have only positive values; therefore the # transformed data only lie in the positive quadrant. This would not be the # case if some original features had a mix of positive and negative values. make_plot(9) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/preprocessing/plot_target_encoder.py
examples/preprocessing/plot_target_encoder.py
""" ============================================ Comparing Target Encoder with Other Encoders ============================================ .. currentmodule:: sklearn.preprocessing The :class:`TargetEncoder` uses the value of the target to encode each categorical feature. In this example, we will compare three different approaches for handling categorical features: :class:`TargetEncoder`, :class:`OrdinalEncoder`, :class:`OneHotEncoder` and dropping the category. .. note:: `fit(X, y).transform(X)` does not equal `fit_transform(X, y)` because a cross fitting scheme is used in `fit_transform` for encoding. See the :ref:`User Guide <target_encoder>` for details. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Loading Data from OpenML # ======================== # First, we load the wine reviews dataset, where the target is the points given # be a reviewer: from sklearn.datasets import fetch_openml wine_reviews = fetch_openml(data_id=42074, as_frame=True) df = wine_reviews.frame df.head() # %% # For this example, we use the following subset of numerical and categorical # features in the data. The target are continuous values from 80 to 100: numerical_features = ["price"] categorical_features = [ "country", "province", "region_1", "region_2", "variety", "winery", ] target_name = "points" X = df[numerical_features + categorical_features] y = df[target_name] _ = y.hist() # %% # Training and Evaluating Pipelines with Different Encoders # ========================================================= # In this section, we will evaluate pipelines with # :class:`~sklearn.ensemble.HistGradientBoostingRegressor` with different encoding # strategies. First, we list out the encoders we will be using to preprocess # the categorical features: from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, TargetEncoder categorical_preprocessors = [ ("drop", "drop"), ("ordinal", OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1)), ( "one_hot", OneHotEncoder(handle_unknown="ignore", max_categories=20, sparse_output=False), ), ("target", TargetEncoder(target_type="continuous")), ] # %% # Next, we evaluate the models using cross validation and record the results: from sklearn.ensemble import HistGradientBoostingRegressor from sklearn.model_selection import cross_validate from sklearn.pipeline import make_pipeline n_cv_folds = 3 max_iter = 20 results = [] def evaluate_model_and_store(name, pipe): result = cross_validate( pipe, X, y, scoring="neg_root_mean_squared_error", cv=n_cv_folds, return_train_score=True, ) rmse_test_score = -result["test_score"] rmse_train_score = -result["train_score"] results.append( { "preprocessor": name, "rmse_test_mean": rmse_test_score.mean(), "rmse_test_std": rmse_train_score.std(), "rmse_train_mean": rmse_train_score.mean(), "rmse_train_std": rmse_train_score.std(), } ) for name, categorical_preprocessor in categorical_preprocessors: preprocessor = ColumnTransformer( [ ("numerical", "passthrough", numerical_features), ("categorical", categorical_preprocessor, categorical_features), ] ) pipe = make_pipeline( preprocessor, HistGradientBoostingRegressor(random_state=0, max_iter=max_iter) ) evaluate_model_and_store(name, pipe) # %% # Native Categorical Feature Support # ================================== # In this section, we build and evaluate a pipeline that uses native categorical # feature support in :class:`~sklearn.ensemble.HistGradientBoostingRegressor`, # which only supports up to 255 unique categories. In our dataset, the most of # the categorical features have more than 255 unique categories: n_unique_categories = df[categorical_features].nunique().sort_values(ascending=False) n_unique_categories # %% # To workaround the limitation above, we group the categorical features into # low cardinality and high cardinality features. The high cardinality features # will be target encoded and the low cardinality features will use the native # categorical feature in gradient boosting. high_cardinality_features = n_unique_categories[n_unique_categories > 255].index low_cardinality_features = n_unique_categories[n_unique_categories <= 255].index mixed_encoded_preprocessor = ColumnTransformer( [ ("numerical", "passthrough", numerical_features), ( "high_cardinality", TargetEncoder(target_type="continuous"), high_cardinality_features, ), ( "low_cardinality", OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1), low_cardinality_features, ), ], verbose_feature_names_out=False, ) # The output of the of the preprocessor must be set to pandas so the # gradient boosting model can detect the low cardinality features. mixed_encoded_preprocessor.set_output(transform="pandas") mixed_pipe = make_pipeline( mixed_encoded_preprocessor, HistGradientBoostingRegressor( random_state=0, max_iter=max_iter, categorical_features=low_cardinality_features ), ) mixed_pipe # %% # Finally, we evaluate the pipeline using cross validation and record the results: evaluate_model_and_store("mixed_target", mixed_pipe) # %% # Plotting the Results # ==================== # In this section, we display the results by plotting the test and train scores: import matplotlib.pyplot as plt import pandas as pd results_df = ( pd.DataFrame(results).set_index("preprocessor").sort_values("rmse_test_mean") ) fig, (ax1, ax2) = plt.subplots( 1, 2, figsize=(12, 8), sharey=True, constrained_layout=True ) xticks = range(len(results_df)) name_to_color = dict( zip((r["preprocessor"] for r in results), ["C0", "C1", "C2", "C3", "C4"]) ) for subset, ax in zip(["test", "train"], [ax1, ax2]): mean, std = f"rmse_{subset}_mean", f"rmse_{subset}_std" data = results_df[[mean, std]].sort_values(mean) ax.bar( x=xticks, height=data[mean], yerr=data[std], width=0.9, color=[name_to_color[name] for name in data.index], ) ax.set( title=f"RMSE ({subset.title()})", xlabel="Encoding Scheme", xticks=xticks, xticklabels=data.index, ) # %% # When evaluating the predictive performance on the test set, dropping the # categories perform the worst and the target encoders performs the best. This # can be explained as follows: # # - Dropping the categorical features makes the pipeline less expressive and # underfitting as a result; # - Due to the high cardinality and to reduce the training time, the one-hot # encoding scheme uses `max_categories=20` which prevents the features from # expanding too much, which can result in underfitting. # - If we had not set `max_categories=20`, the one-hot encoding scheme would have # likely made the pipeline overfitting as the number of features explodes with rare # category occurrences that are correlated with the target by chance (on the training # set only); # - The ordinal encoding imposes an arbitrary order to the features which are then # treated as numerical values by the # :class:`~sklearn.ensemble.HistGradientBoostingRegressor`. Since this # model groups numerical features in 256 bins per feature, many unrelated categories # can be grouped together and as a result overall pipeline can underfit; # - When using the target encoder, the same binning happens, but since the encoded # values are statistically ordered by marginal association with the target variable, # the binning use by the :class:`~sklearn.ensemble.HistGradientBoostingRegressor` # makes sense and leads to good results: the combination of smoothed target # encoding and binning works as a good regularizing strategy against # overfitting while not limiting the expressiveness of the pipeline too much.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/preprocessing/plot_discretization_classification.py
examples/preprocessing/plot_discretization_classification.py
""" ====================== Feature discretization ====================== A demonstration of feature discretization on synthetic classification datasets. Feature discretization decomposes each feature into a set of bins, here equally distributed in width. The discrete values are then one-hot encoded, and given to a linear classifier. This preprocessing enables a non-linear behavior even though the classifier is linear. On this example, the first two rows represent linearly non-separable datasets (moons and concentric circles) while the third is approximately linearly separable. On the two linearly non-separable datasets, feature discretization largely increases the performance of linear classifiers. On the linearly separable dataset, feature discretization decreases the performance of linear classifiers. Two non-linear classifiers are also shown for comparison. This example should be taken with a grain of salt, as the intuition conveyed does not necessarily carry over to real datasets. Particularly in high-dimensional spaces, data can more easily be separated linearly. Moreover, using feature discretization and one-hot encoding increases the number of features, which easily lead to overfitting when the number of samples is small. The plots show training points in solid colors and testing points semi-transparent. The lower right shows the classification accuracy on the test set. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from matplotlib.colors import ListedColormap from sklearn.datasets import make_circles, make_classification, make_moons from sklearn.ensemble import GradientBoostingClassifier from sklearn.exceptions import ConvergenceWarning from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import KBinsDiscretizer, StandardScaler from sklearn.svm import SVC, LinearSVC from sklearn.utils._testing import ignore_warnings h = 0.02 # step size in the mesh def get_name(estimator): name = estimator.__class__.__name__ if name == "Pipeline": name = [get_name(est[1]) for est in estimator.steps] name = " + ".join(name) return name # list of (estimator, param_grid), where param_grid is used in GridSearchCV # The parameter spaces in this example are limited to a narrow band to reduce # its runtime. In a real use case, a broader search space for the algorithms # should be used. classifiers = [ ( make_pipeline(StandardScaler(), LogisticRegression(random_state=0)), {"logisticregression__C": np.logspace(-1, 1, 3)}, ), ( make_pipeline(StandardScaler(), LinearSVC(random_state=0)), {"linearsvc__C": np.logspace(-1, 1, 3)}, ), ( make_pipeline( StandardScaler(), KBinsDiscretizer( encode="onehot", quantile_method="averaged_inverted_cdf", random_state=0 ), LogisticRegression(random_state=0), ), { "kbinsdiscretizer__n_bins": np.arange(5, 8), "logisticregression__C": np.logspace(-1, 1, 3), }, ), ( make_pipeline( StandardScaler(), KBinsDiscretizer( encode="onehot", quantile_method="averaged_inverted_cdf", random_state=0 ), LinearSVC(random_state=0), ), { "kbinsdiscretizer__n_bins": np.arange(5, 8), "linearsvc__C": np.logspace(-1, 1, 3), }, ), ( make_pipeline( StandardScaler(), GradientBoostingClassifier(n_estimators=5, random_state=0) ), {"gradientboostingclassifier__learning_rate": np.logspace(-2, 0, 5)}, ), ( make_pipeline(StandardScaler(), SVC(random_state=0)), {"svc__C": np.logspace(-1, 1, 3)}, ), ] names = [get_name(e).replace("StandardScaler + ", "") for e, _ in classifiers] n_samples = 100 datasets = [ make_moons(n_samples=n_samples, noise=0.2, random_state=0), make_circles(n_samples=n_samples, noise=0.2, factor=0.5, random_state=1), make_classification( n_samples=n_samples, n_features=2, n_redundant=0, n_informative=2, random_state=2, n_clusters_per_class=1, ), ] fig, axes = plt.subplots( nrows=len(datasets), ncols=len(classifiers) + 1, figsize=(21, 9) ) cm_piyg = plt.cm.PiYG cm_bright = ListedColormap(["#b30065", "#178000"]) # iterate over datasets for ds_cnt, (X, y) in enumerate(datasets): print(f"\ndataset {ds_cnt}\n---------") # split into training and test part X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.5, random_state=42 ) # create the grid for background colors x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5 y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # plot the dataset first ax = axes[ds_cnt, 0] if ds_cnt == 0: ax.set_title("Input data") # plot the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors="k") # and testing points ax.scatter( X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6, edgecolors="k" ) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) # iterate over classifiers for est_idx, (name, (estimator, param_grid)) in enumerate(zip(names, classifiers)): ax = axes[ds_cnt, est_idx + 1] clf = GridSearchCV(estimator=estimator, param_grid=param_grid) with ignore_warnings(category=ConvergenceWarning): clf.fit(X_train, y_train) score = clf.score(X_test, y_test) print(f"{name}: {score:.2f}") # plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, x_max]*[y_min, y_max]. if hasattr(clf, "decision_function"): Z = clf.decision_function(np.column_stack([xx.ravel(), yy.ravel()])) else: Z = clf.predict_proba(np.column_stack([xx.ravel(), yy.ravel()]))[:, 1] # put the result into a color plot Z = Z.reshape(xx.shape) ax.contourf(xx, yy, Z, cmap=cm_piyg, alpha=0.8) # plot the training points ax.scatter( X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors="k" ) # and testing points ax.scatter( X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, edgecolors="k", alpha=0.6, ) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) if ds_cnt == 0: ax.set_title(name.replace(" + ", "\n")) ax.text( 0.95, 0.06, (f"{score:.2f}").lstrip("0"), size=15, bbox=dict(boxstyle="round", alpha=0.8, facecolor="white"), transform=ax.transAxes, horizontalalignment="right", ) plt.tight_layout() # Add suptitles above the figure plt.subplots_adjust(top=0.90) suptitles = [ "Linear classifiers", "Feature discretization and linear classifiers", "Non-linear classifiers", ] for i, suptitle in zip([1, 3, 5], suptitles): ax = axes[0, i] ax.text( 1.05, 1.25, suptitle, transform=ax.transAxes, horizontalalignment="center", size="x-large", ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/preprocessing/plot_discretization_strategies.py
examples/preprocessing/plot_discretization_strategies.py
""" ========================================================== Demonstrating the different strategies of KBinsDiscretizer ========================================================== This example presents the different strategies implemented in KBinsDiscretizer: - 'uniform': The discretization is uniform in each feature, which means that the bin widths are constant in each dimension. - 'quantile': The discretization is done on the quantiled values, which means that each bin has approximately the same number of samples. - 'kmeans': The discretization is based on the centroids of a KMeans clustering procedure. The plot shows the regions where the discretized encoding is constant. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_blobs from sklearn.preprocessing import KBinsDiscretizer strategies = ["uniform", "quantile", "kmeans"] n_samples = 200 centers_0 = np.array([[0, 0], [0, 5], [2, 4], [8, 8]]) centers_1 = np.array([[0, 0], [3, 1]]) # construct the datasets random_state = 42 X_list = [ np.random.RandomState(random_state).uniform(-3, 3, size=(n_samples, 2)), make_blobs( n_samples=[ n_samples // 10, n_samples * 4 // 10, n_samples // 10, n_samples * 4 // 10, ], cluster_std=0.5, centers=centers_0, random_state=random_state, )[0], make_blobs( n_samples=[n_samples // 5, n_samples * 4 // 5], cluster_std=0.5, centers=centers_1, random_state=random_state, )[0], ] figure = plt.figure(figsize=(14, 9)) i = 1 for ds_cnt, X in enumerate(X_list): ax = plt.subplot(len(X_list), len(strategies) + 1, i) ax.scatter(X[:, 0], X[:, 1], edgecolors="k") if ds_cnt == 0: ax.set_title("Input data", size=14) xx, yy = np.meshgrid( np.linspace(X[:, 0].min(), X[:, 0].max(), 300), np.linspace(X[:, 1].min(), X[:, 1].max(), 300), ) grid = np.c_[xx.ravel(), yy.ravel()] ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) i += 1 # transform the dataset with KBinsDiscretizer for strategy in strategies: enc = KBinsDiscretizer( n_bins=4, encode="ordinal", quantile_method="averaged_inverted_cdf", strategy=strategy, ) enc.fit(X) grid_encoded = enc.transform(grid) ax = plt.subplot(len(X_list), len(strategies) + 1, i) # horizontal stripes horizontal = grid_encoded[:, 0].reshape(xx.shape) ax.contourf(xx, yy, horizontal, alpha=0.5) # vertical stripes vertical = grid_encoded[:, 1].reshape(xx.shape) ax.contourf(xx, yy, vertical, alpha=0.5) ax.scatter(X[:, 0], X[:, 1], edgecolors="k") ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) if ds_cnt == 0: ax.set_title("strategy='%s'" % (strategy,), size=14) i += 1 plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/preprocessing/plot_discretization.py
examples/preprocessing/plot_discretization.py
""" ================================================================ Using KBinsDiscretizer to discretize continuous features ================================================================ The example compares prediction result of linear regression (linear model) and decision tree (tree based model) with and without discretization of real-valued features. As is shown in the result before discretization, linear model is fast to build and relatively straightforward to interpret, but can only model linear relationships, while decision tree can build a much more complex model of the data. One way to make linear model more powerful on continuous data is to use discretization (also known as binning). In the example, we discretize the feature and one-hot encode the transformed data. Note that if the bins are not reasonably wide, there would appear to be a substantially increased risk of overfitting, so the discretizer parameters should usually be tuned under cross validation. After discretization, linear regression and decision tree make exactly the same prediction. As features are constant within each bin, any model must predict the same value for all points within a bin. Compared with the result before discretization, linear model become much more flexible while decision tree gets much less flexible. Note that binning features generally has no beneficial effect for tree-based models, as these models can learn to split up the data anywhere. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LinearRegression from sklearn.preprocessing import KBinsDiscretizer from sklearn.tree import DecisionTreeRegressor # construct the dataset rnd = np.random.RandomState(42) X = rnd.uniform(-3, 3, size=100) y = np.sin(X) + rnd.normal(size=len(X)) / 3 X = X.reshape(-1, 1) # transform the dataset with KBinsDiscretizer enc = KBinsDiscretizer( n_bins=10, encode="onehot", quantile_method="averaged_inverted_cdf" ) X_binned = enc.fit_transform(X) # predict with original dataset fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True, figsize=(10, 4)) line = np.linspace(-3, 3, 1000, endpoint=False).reshape(-1, 1) reg = LinearRegression().fit(X, y) ax1.plot(line, reg.predict(line), linewidth=2, color="green", label="linear regression") reg = DecisionTreeRegressor(min_samples_split=3, random_state=0).fit(X, y) ax1.plot(line, reg.predict(line), linewidth=2, color="red", label="decision tree") ax1.plot(X[:, 0], y, "o", c="k") ax1.legend(loc="best") ax1.set_ylabel("Regression output") ax1.set_xlabel("Input feature") ax1.set_title("Result before discretization") # predict with transformed dataset line_binned = enc.transform(line) reg = LinearRegression().fit(X_binned, y) ax2.plot( line, reg.predict(line_binned), linewidth=2, color="green", linestyle="-", label="linear regression", ) reg = DecisionTreeRegressor(min_samples_split=3, random_state=0).fit(X_binned, y) ax2.plot( line, reg.predict(line_binned), linewidth=2, color="red", linestyle=":", label="decision tree", ) ax2.plot(X[:, 0], y, "o", c="k") ax2.vlines(enc.bin_edges_[0], *plt.gca().get_ylim(), linewidth=1, alpha=0.2) ax2.legend(loc="best") ax2.set_xlabel("Input feature") ax2.set_title("Result after discretization") plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/preprocessing/plot_target_encoder_cross_val.py
examples/preprocessing/plot_target_encoder_cross_val.py
""" ======================================= Target Encoder's Internal Cross fitting ======================================= .. currentmodule:: sklearn.preprocessing The :class:`TargetEncoder` replaces each category of a categorical feature with the shrunk mean of the target variable for that category. This method is useful in cases where there is a strong relationship between the categorical feature and the target. To prevent overfitting, :meth:`TargetEncoder.fit_transform` uses an internal :term:`cross fitting` scheme to encode the training data to be used by a downstream model. This scheme involves splitting the data into *k* folds and encoding each fold using the encodings learnt using the *other k-1* folds. In this example, we demonstrate the importance of the cross fitting procedure to prevent overfitting. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Create Synthetic Dataset # ======================== # For this example, we build a dataset with three categorical features: # # * an informative feature with medium cardinality ("informative") # * an uninformative feature with medium cardinality ("shuffled") # * an uninformative feature with high cardinality ("near_unique") # # First, we generate the informative feature: import numpy as np from sklearn.preprocessing import KBinsDiscretizer n_samples = 50_000 rng = np.random.RandomState(42) y = rng.randn(n_samples) noise = 0.5 * rng.randn(n_samples) n_categories = 100 kbins = KBinsDiscretizer( n_bins=n_categories, encode="ordinal", strategy="uniform", random_state=rng, subsample=None, ) X_informative = kbins.fit_transform((y + noise).reshape(-1, 1)) # Remove the linear relationship between y and the bin index by permuting the # values of X_informative: permuted_categories = rng.permutation(n_categories) X_informative = permuted_categories[X_informative.astype(np.int32)] # %% # The uninformative feature with medium cardinality is generated by permuting the # informative feature and removing the relationship with the target: X_shuffled = rng.permutation(X_informative) # %% # The uninformative feature with high cardinality is generated so that it is # independent of the target variable. We will show that target encoding without # :term:`cross fitting` will cause catastrophic overfitting for the downstream # regressor. These high cardinality features are basically unique identifiers # for samples which should generally be removed from machine learning datasets. # In this example, we generate them to show how :class:`TargetEncoder`'s default # :term:`cross fitting` behavior mitigates the overfitting issue automatically. X_near_unique_categories = rng.choice( int(0.9 * n_samples), size=n_samples, replace=True ).reshape(-1, 1) # %% # Finally, we assemble the dataset and perform a train test split: import pandas as pd from sklearn.model_selection import train_test_split X = pd.DataFrame( np.concatenate( [X_informative, X_shuffled, X_near_unique_categories], axis=1, ), columns=["informative", "shuffled", "near_unique"], ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # %% # Training a Ridge Regressor # ========================== # In this section, we train a ridge regressor on the dataset with and without # encoding and explore the influence of target encoder with and without the # internal :term:`cross fitting`. First, we see the Ridge model trained on the # raw features will have low performance. This is because we permuted the order # of the informative feature meaning `X_informative` is not informative when # raw: import sklearn from sklearn.linear_model import Ridge # Configure transformers to always output DataFrames sklearn.set_config(transform_output="pandas") ridge = Ridge(alpha=1e-6, solver="lsqr", fit_intercept=False) raw_model = ridge.fit(X_train, y_train) print("Raw Model score on training set: ", raw_model.score(X_train, y_train)) print("Raw Model score on test set: ", raw_model.score(X_test, y_test)) # %% # Next, we create a pipeline with the target encoder and ridge model. The pipeline # uses :meth:`TargetEncoder.fit_transform` which uses :term:`cross fitting`. We # see that the model fits the data well and generalizes to the test set: from sklearn.pipeline import make_pipeline from sklearn.preprocessing import TargetEncoder model_with_cf = make_pipeline(TargetEncoder(random_state=0), ridge) model_with_cf.fit(X_train, y_train) print("Model with CF on train set: ", model_with_cf.score(X_train, y_train)) print("Model with CF on test set: ", model_with_cf.score(X_test, y_test)) # %% # The coefficients of the linear model shows that most of the weight is on the # feature at column index 0, which is the informative feature import matplotlib.pyplot as plt import pandas as pd plt.rcParams["figure.constrained_layout.use"] = True coefs_cf = pd.Series( model_with_cf[-1].coef_, index=model_with_cf[-1].feature_names_in_ ).sort_values() ax = coefs_cf.plot(kind="barh") _ = ax.set( title="Target encoded with cross fitting", xlabel="Ridge coefficient", ylabel="Feature", ) # %% # While :meth:`TargetEncoder.fit_transform` uses an internal # :term:`cross fitting` scheme to learn encodings for the training set, # :meth:`TargetEncoder.fit` followed by :meth:`TargetEncoder.transform` does not. # It uses the complete training set to learn encodings and to transform the # categorical features. Thus, we can use :meth:`TargetEncoder.fit` followed by # :meth:`TargetEncoder.transform` to disable the :term:`cross fitting`. This # encoding is then passed to the ridge model. target_encoder = TargetEncoder(random_state=0) target_encoder.fit(X_train, y_train) X_train_no_cf_encoding = target_encoder.transform(X_train) X_test_no_cf_encoding = target_encoder.transform(X_test) model_no_cf = ridge.fit(X_train_no_cf_encoding, y_train) # %% # We evaluate the model that did not use :term:`cross fitting` when encoding and # see that it overfits: print( "Model without CF on training set: ", model_no_cf.score(X_train_no_cf_encoding, y_train), ) print( "Model without CF on test set: ", model_no_cf.score( X_test_no_cf_encoding, y_test, ), ) # %% # The ridge model overfits because it assigns much more weight to the # uninformative extremely high cardinality ("near_unique") and medium # cardinality ("shuffled") features than when the model used # :term:`cross fitting` to encode the features. coefs_no_cf = pd.Series( model_no_cf.coef_, index=model_no_cf.feature_names_in_ ).sort_values() ax = coefs_no_cf.plot(kind="barh") _ = ax.set( title="Target encoded without cross fitting", xlabel="Ridge coefficient", ylabel="Feature", ) # %% # Conclusion # ========== # This example demonstrates the importance of :class:`TargetEncoder`'s internal # :term:`cross fitting`. It is important to use # :meth:`TargetEncoder.fit_transform` to encode training data before passing it # to a machine learning model. When a :class:`TargetEncoder` is a part of a # :class:`~sklearn.pipeline.Pipeline` and the pipeline is fitted, the pipeline # will correctly call :meth:`TargetEncoder.fit_transform` and use # :term:`cross fitting` when encoding the training data.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/preprocessing/plot_map_data_to_normal.py
examples/preprocessing/plot_map_data_to_normal.py
""" ================================= Map data to a normal distribution ================================= .. currentmodule:: sklearn.preprocessing This example demonstrates the use of the Box-Cox and Yeo-Johnson transforms through :class:`~PowerTransformer` to map data from various distributions to a normal distribution. The power transform is useful as a transformation in modeling problems where homoscedasticity and normality are desired. Below are examples of Box-Cox and Yeo-Johnwon applied to six different probability distributions: Lognormal, Chi-squared, Weibull, Gaussian, Uniform, and Bimodal. Note that the transformations successfully map the data to a normal distribution when applied to certain datasets, but are ineffective with others. This highlights the importance of visualizing the data before and after transformation. Also note that even though Box-Cox seems to perform better than Yeo-Johnson for lognormal and chi-squared distributions, keep in mind that Box-Cox does not support inputs with negative values. For comparison, we also add the output from :class:`~QuantileTransformer`. It can force any arbitrary distribution into a gaussian, provided that there are enough training samples (thousands). Because it is a non-parametric method, it is harder to interpret than the parametric ones (Box-Cox and Yeo-Johnson). On "small" datasets (less than a few hundred points), the quantile transformer is prone to overfitting. The use of the power transform is then recommended. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.model_selection import train_test_split from sklearn.preprocessing import PowerTransformer, QuantileTransformer N_SAMPLES = 1000 FONT_SIZE = 6 BINS = 30 rng = np.random.RandomState(304) bc = PowerTransformer(method="box-cox") yj = PowerTransformer(method="yeo-johnson") # n_quantiles is set to the training set size rather than the default value # to avoid a warning being raised by this example qt = QuantileTransformer( n_quantiles=500, output_distribution="normal", random_state=rng ) size = (N_SAMPLES, 1) # lognormal distribution X_lognormal = rng.lognormal(size=size) # chi-squared distribution df = 3 X_chisq = rng.chisquare(df=df, size=size) # weibull distribution a = 50 X_weibull = rng.weibull(a=a, size=size) # gaussian distribution loc = 100 X_gaussian = rng.normal(loc=loc, size=size) # uniform distribution X_uniform = rng.uniform(low=0, high=1, size=size) # bimodal distribution loc_a, loc_b = 100, 105 X_a, X_b = rng.normal(loc=loc_a, size=size), rng.normal(loc=loc_b, size=size) X_bimodal = np.concatenate([X_a, X_b], axis=0) # create plots distributions = [ ("Lognormal", X_lognormal), ("Chi-squared", X_chisq), ("Weibull", X_weibull), ("Gaussian", X_gaussian), ("Uniform", X_uniform), ("Bimodal", X_bimodal), ] colors = ["#D81B60", "#0188FF", "#FFC107", "#B7A2FF", "#000000", "#2EC5AC"] fig, axes = plt.subplots(nrows=8, ncols=3, figsize=plt.figaspect(2)) axes = axes.flatten() axes_idxs = [ (0, 3, 6, 9), (1, 4, 7, 10), (2, 5, 8, 11), (12, 15, 18, 21), (13, 16, 19, 22), (14, 17, 20, 23), ] axes_list = [(axes[i], axes[j], axes[k], axes[l]) for (i, j, k, l) in axes_idxs] for distribution, color, axes in zip(distributions, colors, axes_list): name, X = distribution X_train, X_test = train_test_split(X, test_size=0.5) # perform power transforms and quantile transform X_trans_bc = bc.fit(X_train).transform(X_test) lmbda_bc = round(bc.lambdas_[0], 2) X_trans_yj = yj.fit(X_train).transform(X_test) lmbda_yj = round(yj.lambdas_[0], 2) X_trans_qt = qt.fit(X_train).transform(X_test) ax_original, ax_bc, ax_yj, ax_qt = axes ax_original.hist(X_train, color=color, bins=BINS) ax_original.set_title(name, fontsize=FONT_SIZE) ax_original.tick_params(axis="both", which="major", labelsize=FONT_SIZE) for ax, X_trans, meth_name, lmbda in zip( (ax_bc, ax_yj, ax_qt), (X_trans_bc, X_trans_yj, X_trans_qt), ("Box-Cox", "Yeo-Johnson", "Quantile transform"), (lmbda_bc, lmbda_yj, None), ): ax.hist(X_trans, color=color, bins=BINS) title = "After {}".format(meth_name) if lmbda is not None: title += "\n$\\lambda$ = {}".format(lmbda) ax.set_title(title, fontsize=FONT_SIZE) ax.tick_params(axis="both", which="major", labelsize=FONT_SIZE) ax.set_xlim([-3.5, 3.5]) plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/classification/plot_classifier_comparison.py
examples/classification/plot_classifier_comparison.py
""" ===================== Classifier comparison ===================== A comparison of several classifiers in scikit-learn on synthetic datasets. The point of this example is to illustrate the nature of decision boundaries of different classifiers. This should be taken with a grain of salt, as the intuition conveyed by these examples does not necessarily carry over to real datasets. Particularly in high-dimensional spaces, data can more easily be separated linearly and the simplicity of classifiers such as naive Bayes and linear SVMs might lead to better generalization than is achieved by other classifiers. The plots show training points in solid colors and testing points semi-transparent. The lower right shows the classification accuracy on the test set. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from matplotlib.colors import ListedColormap from sklearn.datasets import make_circles, make_classification, make_moons from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF from sklearn.inspection import DecisionBoundaryDisplay from sklearn.model_selection import train_test_split from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.neural_network import MLPClassifier from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier names = [ "Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process", "Decision Tree", "Random Forest", "Neural Net", "AdaBoost", "Naive Bayes", "QDA", ] classifiers = [ KNeighborsClassifier(3), SVC(kernel="linear", C=0.025, random_state=42), SVC(gamma=2, C=1, random_state=42), GaussianProcessClassifier(1.0 * RBF(1.0), random_state=42), DecisionTreeClassifier(max_depth=5, random_state=42), RandomForestClassifier( max_depth=5, n_estimators=10, max_features=1, random_state=42 ), MLPClassifier(alpha=1, max_iter=1000, random_state=42), AdaBoostClassifier(random_state=42), GaussianNB(), QuadraticDiscriminantAnalysis(), ] X, y = make_classification( n_features=2, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=1 ) rng = np.random.RandomState(2) X += 2 * rng.uniform(size=X.shape) linearly_separable = (X, y) datasets = [ make_moons(noise=0.3, random_state=0), make_circles(noise=0.2, factor=0.5, random_state=1), linearly_separable, ] figure = plt.figure(figsize=(27, 9)) i = 1 # iterate over datasets for ds_cnt, ds in enumerate(datasets): # preprocess dataset, split into training and test part X, y = ds X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.4, random_state=42 ) x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5 y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5 # just plot the dataset first cm = plt.cm.RdBu cm_bright = ListedColormap(["#FF0000", "#0000FF"]) ax = plt.subplot(len(datasets), len(classifiers) + 1, i) if ds_cnt == 0: ax.set_title("Input data") # Plot the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors="k") # Plot the testing points ax.scatter( X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6, edgecolors="k" ) ax.set_xlim(x_min, x_max) ax.set_ylim(y_min, y_max) ax.set_xticks(()) ax.set_yticks(()) i += 1 # iterate over classifiers for name, clf in zip(names, classifiers): ax = plt.subplot(len(datasets), len(classifiers) + 1, i) clf = make_pipeline(StandardScaler(), clf) clf.fit(X_train, y_train) score = clf.score(X_test, y_test) DecisionBoundaryDisplay.from_estimator( clf, X, cmap=cm, alpha=0.8, ax=ax, eps=0.5 ) # Plot the training points ax.scatter( X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors="k" ) # Plot the testing points ax.scatter( X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, edgecolors="k", alpha=0.6, ) ax.set_xlim(x_min, x_max) ax.set_ylim(y_min, y_max) ax.set_xticks(()) ax.set_yticks(()) if ds_cnt == 0: ax.set_title(name) ax.text( x_max - 0.3, y_min + 0.3, ("%.2f" % score).lstrip("0"), size=15, horizontalalignment="right", ) i += 1 plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/classification/plot_digits_classification.py
examples/classification/plot_digits_classification.py
""" ================================ Recognizing hand-written digits ================================ This example shows how scikit-learn can be used to recognize images of hand-written digits, from 0-9. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # Standard scientific Python imports import matplotlib.pyplot as plt # Import datasets, classifiers and performance metrics from sklearn import datasets, metrics, svm from sklearn.model_selection import train_test_split ############################################################################### # Digits dataset # -------------- # # The digits dataset consists of 8x8 # pixel images of digits. The ``images`` attribute of the dataset stores # 8x8 arrays of grayscale values for each image. We will use these arrays to # visualize the first 4 images. The ``target`` attribute of the dataset stores # the digit each image represents and this is included in the title of the 4 # plots below. # # Note: if we were working from image files (e.g., 'png' files), we would load # them using :func:`matplotlib.pyplot.imread`. digits = datasets.load_digits() _, axes = plt.subplots(nrows=1, ncols=4, figsize=(10, 3)) for ax, image, label in zip(axes, digits.images, digits.target): ax.set_axis_off() ax.imshow(image, cmap=plt.cm.gray_r, interpolation="nearest") ax.set_title("Training: %i" % label) ############################################################################### # Classification # -------------- # # To apply a classifier on this data, we need to flatten the images, turning # each 2-D array of grayscale values from shape ``(8, 8)`` into shape # ``(64,)``. Subsequently, the entire dataset will be of shape # ``(n_samples, n_features)``, where ``n_samples`` is the number of images and # ``n_features`` is the total number of pixels in each image. # # We can then split the data into train and test subsets and fit a support # vector classifier on the train samples. The fitted classifier can # subsequently be used to predict the value of the digit for the samples # in the test subset. # flatten the images n_samples = len(digits.images) data = digits.images.reshape((n_samples, -1)) # Create a classifier: a support vector classifier clf = svm.SVC(gamma=0.001) # Split data into 50% train and 50% test subsets X_train, X_test, y_train, y_test = train_test_split( data, digits.target, test_size=0.5, shuffle=False ) # Learn the digits on the train subset clf.fit(X_train, y_train) # Predict the value of the digit on the test subset predicted = clf.predict(X_test) ############################################################################### # Below we visualize the first 4 test samples and show their predicted # digit value in the title. _, axes = plt.subplots(nrows=1, ncols=4, figsize=(10, 3)) for ax, image, prediction in zip(axes, X_test, predicted): ax.set_axis_off() image = image.reshape(8, 8) ax.imshow(image, cmap=plt.cm.gray_r, interpolation="nearest") ax.set_title(f"Prediction: {prediction}") ############################################################################### # :func:`~sklearn.metrics.classification_report` builds a text report showing # the main classification metrics. print( f"Classification report for classifier {clf}:\n" f"{metrics.classification_report(y_test, predicted)}\n" ) ############################################################################### # We can also plot a :ref:`confusion matrix <confusion_matrix>` of the # true digit values and the predicted digit values. disp = metrics.ConfusionMatrixDisplay.from_predictions(y_test, predicted) disp.figure_.suptitle("Confusion Matrix") print(f"Confusion matrix:\n{disp.confusion_matrix}") plt.show() ############################################################################### # If the results from evaluating a classifier are stored in the form of a # :ref:`confusion matrix <confusion_matrix>` and not in terms of `y_true` and # `y_pred`, one can still build a :func:`~sklearn.metrics.classification_report` # as follows: # The ground truth and predicted lists y_true = [] y_pred = [] cm = disp.confusion_matrix # For each cell in the confusion matrix, add the corresponding ground truths # and predictions to the lists for gt in range(len(cm)): for pred in range(len(cm)): y_true += [gt] * cm[gt][pred] y_pred += [pred] * cm[gt][pred] print( "Classification report rebuilt from confusion matrix:\n" f"{metrics.classification_report(y_true, y_pred)}\n" )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/classification/plot_classification_probability.py
examples/classification/plot_classification_probability.py
""" =============================== Plot classification probability =============================== This example illustrates the use of :class:`sklearn.inspection.DecisionBoundaryDisplay` to plot the predicted class probabilities of various classifiers in a 2D feature space, mostly for didactic purposes. The first three columns shows the predicted probability for varying values of the two features. Round markers represent the test data that was predicted to belong to that class. In the last column, all three classes are represented on each plot; the class with the highest predicted probability at each point is plotted. The round markers show the test data and are colored by their true label. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd from matplotlib import cm from sklearn import datasets from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF from sklearn.inspection import DecisionBoundaryDisplay from sklearn.kernel_approximation import Nystroem from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, log_loss, roc_auc_score from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import ( KBinsDiscretizer, PolynomialFeatures, SplineTransformer, ) # %% # Data: 2D projection of the iris dataset # --------------------------------------- iris = datasets.load_iris() X = iris.data[:, 0:2] # we only take the first two features for visualization y = iris.target X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.5, random_state=42 ) # %% # Probabilistic classifiers # ------------------------- # # We will plot the decision boundaries of several classifiers that have a # `predict_proba` method. This will allow us to visualize the uncertainty of # the classifier in regions where it is not certain of its prediction. classifiers = { "Logistic regression\n(C=0.01)": LogisticRegression(C=0.1), "Logistic regression\n(C=1)": LogisticRegression(C=100), "Gaussian Process": GaussianProcessClassifier(kernel=1.0 * RBF([1.0, 1.0])), "Logistic regression\n(RBF features)": make_pipeline( Nystroem(kernel="rbf", gamma=5e-1, n_components=50, random_state=1), LogisticRegression(C=10), ), "Gradient Boosting": HistGradientBoostingClassifier(), "Logistic regression\n(binned features)": make_pipeline( KBinsDiscretizer(n_bins=5, quantile_method="averaged_inverted_cdf"), PolynomialFeatures(interaction_only=True), LogisticRegression(C=10), ), "Logistic regression\n(spline features)": make_pipeline( SplineTransformer(n_knots=5), PolynomialFeatures(interaction_only=True), LogisticRegression(C=10), ), } # %% # Plotting the decision boundaries # -------------------------------- # # For each classifier, we plot the per-class probabilities on the first three # columns and the probabilities of the most likely class on the last column. n_classifiers = len(classifiers) scatter_kwargs = { "s": 25, "marker": "o", "linewidths": 0.8, "edgecolor": "k", "alpha": 0.7, } y_unique = np.unique(y) # Ensure legend not cut off mpl.rcParams["savefig.bbox"] = "tight" fig, axes = plt.subplots( nrows=n_classifiers, ncols=len(iris.target_names) + 1, figsize=(4 * 2.2, n_classifiers * 2.2), ) evaluation_results = [] levels = 100 for classifier_idx, (name, classifier) in enumerate(classifiers.items()): y_pred = classifier.fit(X_train, y_train).predict(X_test) y_pred_proba = classifier.predict_proba(X_test) accuracy_test = accuracy_score(y_test, y_pred) roc_auc_test = roc_auc_score(y_test, y_pred_proba, multi_class="ovr") log_loss_test = log_loss(y_test, y_pred_proba) evaluation_results.append( { "name": name.replace("\n", " "), "accuracy": accuracy_test, "roc_auc": roc_auc_test, "log_loss": log_loss_test, } ) for label in y_unique: # plot the probability estimate provided by the classifier disp = DecisionBoundaryDisplay.from_estimator( classifier, X_train, response_method="predict_proba", class_of_interest=label, ax=axes[classifier_idx, label], vmin=0, vmax=1, cmap="Blues", levels=levels, ) axes[classifier_idx, label].set_title(f"Class {label}") # plot data predicted to belong to given class mask_y_pred = y_pred == label axes[classifier_idx, label].scatter( X_test[mask_y_pred, 0], X_test[mask_y_pred, 1], c="w", **scatter_kwargs ) axes[classifier_idx, label].set(xticks=(), yticks=()) # add column that shows all classes by plotting class with max 'predict_proba' max_class_disp = DecisionBoundaryDisplay.from_estimator( classifier, X_train, response_method="predict_proba", class_of_interest=None, ax=axes[classifier_idx, len(y_unique)], vmin=0, vmax=1, levels=levels, ) for label in y_unique: mask_label = y_test == label axes[classifier_idx, 3].scatter( X_test[mask_label, 0], X_test[mask_label, 1], c=max_class_disp.multiclass_colors_[[label], :], **scatter_kwargs, ) axes[classifier_idx, 3].set(xticks=(), yticks=()) axes[classifier_idx, 3].set_title("Max class") axes[classifier_idx, 0].set_ylabel(name) # colorbar for single class plots ax_single = fig.add_axes([0.15, 0.01, 0.5, 0.02]) plt.title("Probability") _ = plt.colorbar( cm.ScalarMappable(norm=None, cmap=disp.surface_.cmap), cax=ax_single, orientation="horizontal", ) # colorbars for max probability class column max_class_cmaps = [s.cmap for s in max_class_disp.surface_] for label in y_unique: ax_max = fig.add_axes([0.73, (0.06 - (label * 0.04)), 0.16, 0.015]) plt.title(f"Probability class {label}", fontsize=10) _ = plt.colorbar( cm.ScalarMappable(norm=None, cmap=max_class_cmaps[label]), cax=ax_max, orientation="horizontal", ) if label in (0, 1): ax_max.set(xticks=(), yticks=()) # %% # Quantitative evaluation # ----------------------- pd.DataFrame(evaluation_results).round(2) # %% # Analysis # -------- # # The two logistic regression models fitted on the original features display # linear decision boundaries as expected. For this particular problem, this # does not seem to be detrimental as both models are competitive with the # non-linear models when quantitatively evaluated on the test set. We can # observe that the amount of regularization influences the model confidence: # lighter colors for the strongly regularized model with a lower value of `C`. # Regularization also impacts the orientation of decision boundary leading to # slightly different ROC AUC. # # The log-loss on the other hand evaluates both sharpness and calibration and # as a result strongly favors the weakly regularized logistic-regression model, # probably because the strongly regularized model is under-confident. This # could be confirmed by looking at the calibration curve using # :class:`sklearn.calibration.CalibrationDisplay`. # # The logistic regression model with RBF features has a "blobby" decision # boundary that is non-linear in the original feature space and is quite # similar to the decision boundary of the Gaussian process classifier which is # configured to use an RBF kernel. # # The logistic regression model fitted on binned features with interactions has # a decision boundary that is non-linear in the original feature space and is # quite similar to the decision boundary of the gradient boosting classifier: # both models favor axis-aligned decisions when extrapolating to unseen region # of the feature space. # # The logistic regression model fitted on spline features with interactions # has a similar axis-aligned extrapolation behavior but a smoother decision # boundary in the dense region of the feature space than the two previous # models. # # To conclude, it is interesting to observe that feature engineering for # logistic regression models can be used to mimic some of the inductive bias of # various non-linear models. However, for this particular dataset, using the # raw features is enough to train a competitive model. This would not # necessarily the case for other datasets.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/classification/plot_lda_qda.py
examples/classification/plot_lda_qda.py
""" ==================================================================== Linear and Quadratic Discriminant Analysis with covariance ellipsoid ==================================================================== This example plots the covariance ellipsoids of each class and the decision boundary learned by :class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis` (LDA) and :class:`~sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis` (QDA). The ellipsoids display the double standard deviation for each class. With LDA, the standard deviation is the same for all the classes, while each class has its own standard deviation with QDA. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Data generation # --------------- # # First, we define a function to generate synthetic data. It creates two blobs centered # at `(0, 0)` and `(1, 1)`. Each blob is assigned a specific class. The dispersion of # the blob is controlled by the parameters `cov_class_1` and `cov_class_2`, that are the # covariance matrices used when generating the samples from the Gaussian distributions. import numpy as np def make_data(n_samples, n_features, cov_class_1, cov_class_2, seed=0): rng = np.random.RandomState(seed) X = np.concatenate( [ rng.randn(n_samples, n_features) @ cov_class_1, rng.randn(n_samples, n_features) @ cov_class_2 + np.array([1, 1]), ] ) y = np.concatenate([np.zeros(n_samples), np.ones(n_samples)]) return X, y # %% # We generate three datasets. In the first dataset, the two classes share the same # covariance matrix, and this covariance matrix has the specificity of being spherical # (isotropic). The second dataset is similar to the first one but does not enforce the # covariance to be spherical. Finally, the third dataset has a non-spherical covariance # matrix for each class. covariance = np.array([[1, 0], [0, 1]]) X_isotropic_covariance, y_isotropic_covariance = make_data( n_samples=1_000, n_features=2, cov_class_1=covariance, cov_class_2=covariance, seed=0, ) covariance = np.array([[0.0, -0.23], [0.83, 0.23]]) X_shared_covariance, y_shared_covariance = make_data( n_samples=300, n_features=2, cov_class_1=covariance, cov_class_2=covariance, seed=0, ) cov_class_1 = np.array([[0.0, -1.0], [2.5, 0.7]]) * 2.0 cov_class_2 = cov_class_1.T X_different_covariance, y_different_covariance = make_data( n_samples=300, n_features=2, cov_class_1=cov_class_1, cov_class_2=cov_class_2, seed=0, ) # %% # Plotting Functions # ------------------ # # The code below is used to plot several pieces of information from the estimators used, # i.e., :class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis` (LDA) and # :class:`~sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis` (QDA). The # displayed information includes: # # - the decision boundary based on the probability estimate of the estimator; # - a scatter plot with circles representing the well-classified samples; # - a scatter plot with crosses representing the misclassified samples; # - the mean of each class, estimated by the estimator, marked with a star; # - the estimated covariance represented by an ellipse at 2 standard deviations from the # mean. import matplotlib as mpl from matplotlib import colors from sklearn.inspection import DecisionBoundaryDisplay def plot_ellipse(mean, cov, color, ax): v, w = np.linalg.eigh(cov) u = w[0] / np.linalg.norm(w[0]) angle = np.arctan(u[1] / u[0]) angle = 180 * angle / np.pi # convert to degrees # filled Gaussian at 2 standard deviation ell = mpl.patches.Ellipse( mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5, angle=180 + angle, facecolor=color, edgecolor="black", linewidth=2, ) ell.set_clip_box(ax.bbox) ell.set_alpha(0.4) ax.add_artist(ell) def plot_result(estimator, X, y, ax): cmap = colors.ListedColormap(["tab:red", "tab:blue"]) DecisionBoundaryDisplay.from_estimator( estimator, X, response_method="predict_proba", plot_method="pcolormesh", ax=ax, cmap="RdBu", alpha=0.3, ) DecisionBoundaryDisplay.from_estimator( estimator, X, response_method="predict_proba", plot_method="contour", ax=ax, alpha=1.0, levels=[0.5], ) y_pred = estimator.predict(X) X_right, y_right = X[y == y_pred], y[y == y_pred] X_wrong, y_wrong = X[y != y_pred], y[y != y_pred] ax.scatter(X_right[:, 0], X_right[:, 1], c=y_right, s=20, cmap=cmap, alpha=0.5) ax.scatter( X_wrong[:, 0], X_wrong[:, 1], c=y_wrong, s=30, cmap=cmap, alpha=0.9, marker="x", ) ax.scatter( estimator.means_[:, 0], estimator.means_[:, 1], c="yellow", s=200, marker="*", edgecolor="black", ) if isinstance(estimator, LinearDiscriminantAnalysis): covariance = [estimator.covariance_] * 2 else: covariance = estimator.covariance_ plot_ellipse(estimator.means_[0], covariance[0], "tab:red", ax) plot_ellipse(estimator.means_[1], covariance[1], "tab:blue", ax) ax.set_box_aspect(1) ax.spines["top"].set_visible(False) ax.spines["bottom"].set_visible(False) ax.spines["left"].set_visible(False) ax.spines["right"].set_visible(False) ax.set(xticks=[], yticks=[]) # %% # Comparison of LDA and QDA # ------------------------- # # We compare the two estimators LDA and QDA on all three datasets. import matplotlib.pyplot as plt from sklearn.discriminant_analysis import ( LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis, ) fig, axs = plt.subplots(nrows=3, ncols=2, sharex="row", sharey="row", figsize=(8, 12)) lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True) qda = QuadraticDiscriminantAnalysis(solver="svd", store_covariance=True) for ax_row, X, y in zip( axs, (X_isotropic_covariance, X_shared_covariance, X_different_covariance), (y_isotropic_covariance, y_shared_covariance, y_different_covariance), ): lda.fit(X, y) plot_result(lda, X, y, ax_row[0]) qda.fit(X, y) plot_result(qda, X, y, ax_row[1]) axs[0, 0].set_title("Linear Discriminant Analysis") axs[0, 0].set_ylabel("Data with fixed and spherical covariance") axs[1, 0].set_ylabel("Data with fixed covariance") axs[0, 1].set_title("Quadratic Discriminant Analysis") axs[2, 0].set_ylabel("Data with varying covariances") fig.suptitle( "Linear Discriminant Analysis vs Quadratic Discriminant Analysis", y=0.94, fontsize=15, ) plt.show() # %% # The first important thing to notice is that LDA and QDA are equivalent for the # first and second datasets. Indeed, the major difference is that LDA assumes # that the covariance matrix of each class is equal, while QDA estimates a # covariance matrix per class. Since in these cases the data generative process # has the same covariance matrix for both classes, QDA estimates two covariance # matrices that are (almost) equal and therefore equivalent to the covariance # matrix estimated by LDA. # # In the first dataset the covariance matrix used to generate the dataset is # spherical, which results in a discriminant boundary that aligns with the # perpendicular bisector between the two means. This is no longer the case for # the second dataset. The discriminant boundary only passes through the middle # of the two means. # # Finally, in the third dataset, we observe the real difference between LDA and # QDA. QDA fits two covariance matrices and provides a non-linear discriminant # boundary, whereas LDA underfits since it assumes that both classes share a # single covariance matrix.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/classification/plot_lda.py
examples/classification/plot_lda.py
""" =========================================================================== Normal, Ledoit-Wolf and OAS Linear Discriminant Analysis for classification =========================================================================== This example illustrates how the Ledoit-Wolf and Oracle Approximating Shrinkage (OAS) estimators of covariance can improve classification. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.covariance import OAS from sklearn.datasets import make_blobs from sklearn.discriminant_analysis import LinearDiscriminantAnalysis n_train = 20 # samples for training n_test = 200 # samples for testing n_averages = 50 # how often to repeat classification n_features_max = 75 # maximum number of features step = 4 # step size for the calculation def generate_data(n_samples, n_features): """Generate random blob-ish data with noisy features. This returns an array of input data with shape `(n_samples, n_features)` and an array of `n_samples` target labels. Only one feature contains discriminative information, the other features contain only noise. """ X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]]) # add non-discriminative features if n_features > 1: X = np.hstack([X, np.random.randn(n_samples, n_features - 1)]) return X, y acc_clf1, acc_clf2, acc_clf3 = [], [], [] n_features_range = range(1, n_features_max + 1, step) for n_features in n_features_range: score_clf1, score_clf2, score_clf3 = 0, 0, 0 for _ in range(n_averages): X, y = generate_data(n_train, n_features) clf1 = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=None).fit(X, y) clf2 = LinearDiscriminantAnalysis(solver="lsqr", shrinkage="auto").fit(X, y) oa = OAS(store_precision=False, assume_centered=False) clf3 = LinearDiscriminantAnalysis(solver="lsqr", covariance_estimator=oa).fit( X, y ) X, y = generate_data(n_test, n_features) score_clf1 += clf1.score(X, y) score_clf2 += clf2.score(X, y) score_clf3 += clf3.score(X, y) acc_clf1.append(score_clf1 / n_averages) acc_clf2.append(score_clf2 / n_averages) acc_clf3.append(score_clf3 / n_averages) features_samples_ratio = np.array(n_features_range) / n_train plt.plot( features_samples_ratio, acc_clf1, linewidth=2, label="LDA", color="gold", linestyle="solid", ) plt.plot( features_samples_ratio, acc_clf2, linewidth=2, label="LDA with Ledoit Wolf", color="navy", linestyle="dashed", ) plt.plot( features_samples_ratio, acc_clf3, linewidth=2, label="LDA with OAS", color="red", linestyle="dotted", ) plt.xlabel("n_features / n_samples") plt.ylabel("Classification accuracy") plt.legend(loc="lower left") plt.ylim((0.65, 1.0)) plt.suptitle( "LDA (Linear Discriminant Analysis) vs." "\n" "LDA with Ledoit Wolf vs." "\n" "LDA with OAS (1 discriminative feature)" ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/multioutput/plot_classifier_chain_yeast.py
examples/multioutput/plot_classifier_chain_yeast.py
""" ================================================== Multilabel classification using a classifier chain ================================================== This example shows how to use :class:`~sklearn.multioutput.ClassifierChain` to solve a multilabel classification problem. The most naive strategy to solve such a task is to independently train a binary classifier on each label (i.e. each column of the target variable). At prediction time, the ensemble of binary classifiers is used to assemble multitask prediction. This strategy does not allow to model relationship between different tasks. The :class:`~sklearn.multioutput.ClassifierChain` is the meta-estimator (i.e. an estimator taking an inner estimator) that implements a more advanced strategy. The ensemble of binary classifiers are used as a chain where the prediction of a classifier in the chain is used as a feature for training the next classifier on a new label. Therefore, these additional features allow each chain to exploit correlations among labels. The :ref:`Jaccard similarity <jaccard_similarity_score>` score for chain tends to be greater than that of the set independent base models. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Loading a dataset # ----------------- # For this example, we use the `yeast # <https://www.openml.org/d/40597>`_ dataset which contains # 2,417 datapoints each with 103 features and 14 possible labels. Each # data point has at least one label. As a baseline we first train a logistic # regression classifier for each of the 14 labels. To evaluate the performance of # these classifiers we predict on a held-out test set and calculate the # Jaccard similarity for each sample. import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import fetch_openml from sklearn.model_selection import train_test_split # Load a multi-label dataset from https://www.openml.org/d/40597 X, Y = fetch_openml("yeast", version=4, return_X_y=True) Y = Y == "TRUE" X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0) # %% # Fit models # ---------- # We fit :class:`~sklearn.linear_model.LogisticRegression` wrapped by # :class:`~sklearn.multiclass.OneVsRestClassifier` and ensemble of multiple # :class:`~sklearn.multioutput.ClassifierChain`. # # LogisticRegression wrapped by OneVsRestClassifier # ************************************************** # Since by default :class:`~sklearn.linear_model.LogisticRegression` can't # handle data with multiple targets, we need to use # :class:`~sklearn.multiclass.OneVsRestClassifier`. # After fitting the model we calculate Jaccard similarity. from sklearn.linear_model import LogisticRegression from sklearn.metrics import jaccard_score from sklearn.multiclass import OneVsRestClassifier base_lr = LogisticRegression() ovr = OneVsRestClassifier(base_lr) ovr.fit(X_train, Y_train) Y_pred_ovr = ovr.predict(X_test) ovr_jaccard_score = jaccard_score(Y_test, Y_pred_ovr, average="samples") # %% # Chain of binary classifiers # *************************** # Because the models in each chain are arranged randomly there is significant # variation in performance among the chains. Presumably there is an optimal # ordering of the classes in a chain that will yield the best performance. # However, we do not know that ordering a priori. Instead, we can build a # voting ensemble of classifier chains by averaging the binary predictions of # the chains and apply a threshold of 0.5. The Jaccard similarity score of the # ensemble is greater than that of the independent models and tends to exceed # the score of each chain in the ensemble (although this is not guaranteed # with randomly ordered chains). from sklearn.multioutput import ClassifierChain chains = [ClassifierChain(base_lr, order="random", random_state=i) for i in range(10)] for chain in chains: chain.fit(X_train, Y_train) Y_pred_chains = np.array([chain.predict_proba(X_test) for chain in chains]) chain_jaccard_scores = [ jaccard_score(Y_test, Y_pred_chain >= 0.5, average="samples") for Y_pred_chain in Y_pred_chains ] Y_pred_ensemble = Y_pred_chains.mean(axis=0) ensemble_jaccard_score = jaccard_score( Y_test, Y_pred_ensemble >= 0.5, average="samples" ) # %% # Plot results # ------------ # Plot the Jaccard similarity scores for the independent model, each of the # chains, and the ensemble (note that the vertical axis on this plot does # not begin at 0). model_scores = [ovr_jaccard_score] + chain_jaccard_scores + [ensemble_jaccard_score] model_names = ( "Independent", "Chain 1", "Chain 2", "Chain 3", "Chain 4", "Chain 5", "Chain 6", "Chain 7", "Chain 8", "Chain 9", "Chain 10", "Ensemble", ) x_pos = np.arange(len(model_names)) fig, ax = plt.subplots(figsize=(7, 4)) ax.grid(True) ax.set_title("Classifier Chain Ensemble Performance Comparison") ax.set_xticks(x_pos) ax.set_xticklabels(model_names, rotation="vertical") ax.set_ylabel("Jaccard Similarity Score") ax.set_ylim([min(model_scores) * 0.9, max(model_scores) * 1.1]) colors = ["r"] + ["b"] * len(chain_jaccard_scores) + ["g"] ax.bar(x_pos, model_scores, alpha=0.5, color=colors) plt.tight_layout() plt.show() # %% # Results interpretation # ---------------------- # There are three main takeaways from this plot: # # - Independent model wrapped by :class:`~sklearn.multiclass.OneVsRestClassifier` # performs worse than the ensemble of classifier chains and some of individual chains. # This is caused by the fact that the logistic regression doesn't model relationship # between the labels. # - :class:`~sklearn.multioutput.ClassifierChain` takes advantage of correlation # among labels but due to random nature of labels ordering, it could yield worse # result than an independent model. # - An ensemble of chains performs better because it not only captures relationship # between labels but also does not make strong assumptions about their correct order.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/datasets/plot_random_multilabel_dataset.py
examples/datasets/plot_random_multilabel_dataset.py
""" ============================================== Plot randomly generated multilabel dataset ============================================== This illustrates the :func:`~sklearn.datasets.make_multilabel_classification` dataset generator. Each sample consists of counts of two features (up to 50 in total), which are differently distributed in each of two classes. Points are labeled as follows, where Y means the class is present: ===== ===== ===== ====== 1 2 3 Color ===== ===== ===== ====== Y N N Red N Y N Blue N N Y Yellow Y Y N Purple Y N Y Orange Y Y N Green Y Y Y Brown ===== ===== ===== ====== A star marks the expected sample for each class; its size reflects the probability of selecting that class label. The left and right examples highlight the ``n_labels`` parameter: more of the samples in the right plot have 2 or 3 labels. Note that this two-dimensional example is very degenerate: generally the number of features would be much greater than the "document length", while here we have much larger documents than vocabulary. Similarly, with ``n_classes > n_features``, it is much less likely that a feature distinguishes a particular class. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_multilabel_classification as make_ml_clf COLORS = np.array( [ "!", "#FF3333", # red "#0198E1", # blue "#BF5FFF", # purple "#FCD116", # yellow "#FF7216", # orange "#4DBD33", # green "#87421F", # brown ] ) # Use same random seed for multiple calls to make_multilabel_classification to # ensure same distributions RANDOM_SEED = np.random.randint(2**10) def plot_2d(ax, n_labels=1, n_classes=3, length=50): X, Y, p_c, p_w_c = make_ml_clf( n_samples=150, n_features=2, n_classes=n_classes, n_labels=n_labels, length=length, allow_unlabeled=False, return_distributions=True, random_state=RANDOM_SEED, ) ax.scatter( X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]).sum(axis=1)), marker="." ) ax.scatter( p_w_c[0] * length, p_w_c[1] * length, marker="*", linewidth=0.5, edgecolor="black", s=20 + 1500 * p_c**2, color=COLORS.take([1, 2, 4]), ) ax.set_xlabel("Feature 0 count") return p_c, p_w_c _, (ax1, ax2) = plt.subplots(1, 2, sharex="row", sharey="row", figsize=(8, 4)) plt.subplots_adjust(bottom=0.15) p_c, p_w_c = plot_2d(ax1, n_labels=1) ax1.set_title("n_labels=1, length=50") ax1.set_ylabel("Feature 1 count") plot_2d(ax2, n_labels=3) ax2.set_title("n_labels=3, length=50") ax2.set_xlim(left=0, auto=True) ax2.set_ylim(bottom=0, auto=True) plt.show() print("The data was generated from (random_state=%d):" % RANDOM_SEED) print("Class", "P(C)", "P(w0|C)", "P(w1|C)", sep="\t") for k, p, p_w in zip(["red", "blue", "yellow"], p_c, p_w_c.T): print("%s\t%0.2f\t%0.2f\t%0.2f" % (k, p, p_w[0], p_w[1]))
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/applications/plot_face_recognition.py
examples/applications/plot_face_recognition.py
""" =================================================== Faces recognition example using eigenfaces and SVMs =================================================== The dataset used in this example is a preprocessed excerpt of the "Labeled Faces in the Wild", aka LFW: https://www.kaggle.com/datasets/jessicali9530/lfw-dataset """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% from time import time import matplotlib.pyplot as plt from scipy.stats import loguniform from sklearn.datasets import fetch_lfw_people from sklearn.decomposition import PCA from sklearn.metrics import ConfusionMatrixDisplay, classification_report from sklearn.model_selection import RandomizedSearchCV, train_test_split from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC # %% # Download the data, if not already on disk and load it as numpy arrays lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4) # introspect the images arrays to find the shapes (for plotting) n_samples, h, w = lfw_people.images.shape # for machine learning we use the 2 data directly (as relative pixel # positions info is ignored by this model) X = lfw_people.data n_features = X.shape[1] # the label to predict is the id of the person y = lfw_people.target target_names = lfw_people.target_names n_classes = target_names.shape[0] print("Total dataset size:") print("n_samples: %d" % n_samples) print("n_features: %d" % n_features) print("n_classes: %d" % n_classes) # %% # Split into a training set and a test and keep 25% of the data for testing. X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.25, random_state=42 ) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) # %% # Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled # dataset): unsupervised feature extraction / dimensionality reduction n_components = 150 print( "Extracting the top %d eigenfaces from %d faces" % (n_components, X_train.shape[0]) ) t0 = time() pca = PCA(n_components=n_components, svd_solver="randomized", whiten=True).fit(X_train) print("done in %0.3fs" % (time() - t0)) eigenfaces = pca.components_.reshape((n_components, h, w)) print("Projecting the input data on the eigenfaces orthonormal basis") t0 = time() X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) print("done in %0.3fs" % (time() - t0)) # %% # Train an SVM classification model print("Fitting the classifier to the training set") t0 = time() param_grid = { "C": loguniform(1e3, 1e5), "gamma": loguniform(1e-4, 1e-1), } clf = RandomizedSearchCV( SVC(kernel="rbf", class_weight="balanced"), param_grid, n_iter=10 ) clf = clf.fit(X_train_pca, y_train) print("done in %0.3fs" % (time() - t0)) print("Best estimator found by grid search:") print(clf.best_estimator_) # %% # Quantitative evaluation of the model quality on the test set print("Predicting people's names on the test set") t0 = time() y_pred = clf.predict(X_test_pca) print("done in %0.3fs" % (time() - t0)) print(classification_report(y_test, y_pred, target_names=target_names)) ConfusionMatrixDisplay.from_estimator( clf, X_test_pca, y_test, display_labels=target_names, xticks_rotation="vertical" ) plt.tight_layout() plt.show() # %% # Qualitative evaluation of the predictions using matplotlib def plot_gallery(images, titles, h, w, n_row=3, n_col=4): """Helper function to plot a gallery of portraits""" plt.figure(figsize=(1.8 * n_col, 2.4 * n_row)) plt.subplots_adjust(bottom=0, left=0.01, right=0.99, top=0.90, hspace=0.35) for i in range(n_row * n_col): plt.subplot(n_row, n_col, i + 1) plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray) plt.title(titles[i], size=12) plt.xticks(()) plt.yticks(()) # %% # plot the result of the prediction on a portion of the test set def title(y_pred, y_test, target_names, i): pred_name = target_names[y_pred[i]].rsplit(" ", 1)[-1] true_name = target_names[y_test[i]].rsplit(" ", 1)[-1] return "predicted: %s\ntrue: %s" % (pred_name, true_name) prediction_titles = [ title(y_pred, y_test, target_names, i) for i in range(y_pred.shape[0]) ] plot_gallery(X_test, prediction_titles, h, w) # %% # plot the gallery of the most significative eigenfaces eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])] plot_gallery(eigenfaces, eigenface_titles, h, w) plt.show() # %% # Face recognition problem would be much more effectively solved by training # convolutional neural networks but this family of models is outside of the scope of # the scikit-learn library. Interested readers should instead try to use pytorch or # tensorflow to implement such models.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/applications/plot_prediction_latency.py
examples/applications/plot_prediction_latency.py
""" ================== Prediction Latency ================== This is an example showing the prediction latency of various scikit-learn estimators. The goal is to measure the latency one can expect when doing predictions either in bulk or atomic (i.e. one by one) mode. The plots represent the distribution of the prediction latency as a boxplot. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import gc import time from collections import defaultdict import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_regression from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import Ridge, SGDRegressor from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.svm import SVR from sklearn.utils import shuffle def _not_in_sphinx(): # Hack to detect whether we are running by the sphinx builder return "__file__" in globals() # %% # Benchmark and plot helper functions # ----------------------------------- def atomic_benchmark_estimator(estimator, X_test, verbose=False): """Measure runtime prediction of each instance.""" n_instances = X_test.shape[0] runtimes = np.zeros(n_instances, dtype=float) for i in range(n_instances): instance = X_test[[i], :] start = time.time() estimator.predict(instance) runtimes[i] = time.time() - start if verbose: print( "atomic_benchmark runtimes:", min(runtimes), np.percentile(runtimes, 50), max(runtimes), ) return runtimes def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose): """Measure runtime prediction of the whole input.""" n_instances = X_test.shape[0] runtimes = np.zeros(n_bulk_repeats, dtype=float) for i in range(n_bulk_repeats): start = time.time() estimator.predict(X_test) runtimes[i] = time.time() - start runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes))) if verbose: print( "bulk_benchmark runtimes:", min(runtimes), np.percentile(runtimes, 50), max(runtimes), ) return runtimes def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False): """ Measure runtimes of prediction in both atomic and bulk mode. Parameters ---------- estimator : already trained estimator supporting `predict()` X_test : test input n_bulk_repeats : how many times to repeat when evaluating bulk mode Returns ------- atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the runtimes in seconds. """ atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose) bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose) return atomic_runtimes, bulk_runtimes def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False): """Generate a regression dataset with the given parameters.""" if verbose: print("generating dataset...") X, y, coef = make_regression( n_samples=n_train + n_test, n_features=n_features, noise=noise, coef=True ) random_seed = 13 X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=n_train, test_size=n_test, random_state=random_seed ) X_train, y_train = shuffle(X_train, y_train, random_state=random_seed) X_scaler = StandardScaler() X_train = X_scaler.fit_transform(X_train) X_test = X_scaler.transform(X_test) y_scaler = StandardScaler() y_train = y_scaler.fit_transform(y_train[:, None])[:, 0] y_test = y_scaler.transform(y_test[:, None])[:, 0] gc.collect() if verbose: print("ok") return X_train, y_train, X_test, y_test def boxplot_runtimes(runtimes, pred_type, configuration): """ Plot a new `Figure` with boxplots of prediction runtimes. Parameters ---------- runtimes : list of `np.array` of latencies in micro-seconds cls_names : list of estimator class names that generated the runtimes pred_type : 'bulk' or 'atomic' """ fig, ax1 = plt.subplots(figsize=(10, 6)) bp = plt.boxplot( runtimes, ) cls_infos = [ "%s\n(%d %s)" % ( estimator_conf["name"], estimator_conf["complexity_computer"](estimator_conf["instance"]), estimator_conf["complexity_label"], ) for estimator_conf in configuration["estimators"] ] plt.setp(ax1, xticklabels=cls_infos) plt.setp(bp["boxes"], color="black") plt.setp(bp["whiskers"], color="black") plt.setp(bp["fliers"], color="red", marker="+") ax1.yaxis.grid(True, linestyle="-", which="major", color="lightgrey", alpha=0.5) ax1.set_axisbelow(True) ax1.set_title( "Prediction Time per Instance - %s, %d feats." % (pred_type.capitalize(), configuration["n_features"]) ) ax1.set_ylabel("Prediction Time (us)") plt.show() def benchmark(configuration): """Run the whole benchmark.""" X_train, y_train, X_test, y_test = generate_dataset( configuration["n_train"], configuration["n_test"], configuration["n_features"] ) stats = {} for estimator_conf in configuration["estimators"]: print("Benchmarking", estimator_conf["instance"]) estimator_conf["instance"].fit(X_train, y_train) gc.collect() a, b = benchmark_estimator(estimator_conf["instance"], X_test) stats[estimator_conf["name"]] = {"atomic": a, "bulk": b} cls_names = [ estimator_conf["name"] for estimator_conf in configuration["estimators"] ] runtimes = [1e6 * stats[clf_name]["atomic"] for clf_name in cls_names] boxplot_runtimes(runtimes, "atomic", configuration) runtimes = [1e6 * stats[clf_name]["bulk"] for clf_name in cls_names] boxplot_runtimes(runtimes, "bulk (%d)" % configuration["n_test"], configuration) def n_feature_influence(estimators, n_train, n_test, n_features, percentile): """ Estimate influence of the number of features on prediction time. Parameters ---------- estimators : dict of (name (str), estimator) to benchmark n_train : nber of training instances (int) n_test : nber of testing instances (int) n_features : list of feature-space dimensionality to test (int) percentile : percentile at which to measure the speed (int [0-100]) Returns: -------- percentiles : dict(estimator_name, dict(n_features, percentile_perf_in_us)) """ percentiles = defaultdict(defaultdict) for n in n_features: print("benchmarking with %d features" % n) X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n) for cls_name, estimator in estimators.items(): estimator.fit(X_train, y_train) gc.collect() runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False) percentiles[cls_name][n] = 1e6 * np.percentile(runtimes, percentile) return percentiles def plot_n_features_influence(percentiles, percentile): fig, ax1 = plt.subplots(figsize=(10, 6)) colors = ["r", "g", "b"] for i, cls_name in enumerate(percentiles.keys()): x = np.array(sorted(percentiles[cls_name].keys())) y = np.array([percentiles[cls_name][n] for n in x]) plt.plot( x, y, color=colors[i], ) ax1.yaxis.grid(True, linestyle="-", which="major", color="lightgrey", alpha=0.5) ax1.set_axisbelow(True) ax1.set_title("Evolution of Prediction Time with #Features") ax1.set_xlabel("#Features") ax1.set_ylabel("Prediction Time at %d%%-ile (us)" % percentile) plt.show() def benchmark_throughputs(configuration, duration_secs=0.1): """benchmark throughput for different estimators.""" X_train, y_train, X_test, y_test = generate_dataset( configuration["n_train"], configuration["n_test"], configuration["n_features"] ) throughputs = dict() for estimator_config in configuration["estimators"]: estimator_config["instance"].fit(X_train, y_train) start_time = time.time() n_predictions = 0 while (time.time() - start_time) < duration_secs: estimator_config["instance"].predict(X_test[[0]]) n_predictions += 1 throughputs[estimator_config["name"]] = n_predictions / duration_secs return throughputs def plot_benchmark_throughput(throughputs, configuration): fig, ax = plt.subplots(figsize=(10, 6)) colors = ["r", "g", "b"] cls_infos = [ "%s\n(%d %s)" % ( estimator_conf["name"], estimator_conf["complexity_computer"](estimator_conf["instance"]), estimator_conf["complexity_label"], ) for estimator_conf in configuration["estimators"] ] cls_values = [ throughputs[estimator_conf["name"]] for estimator_conf in configuration["estimators"] ] plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors) ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs))) ax.set_xticklabels(cls_infos, fontsize=10) ymax = max(cls_values) * 1.2 ax.set_ylim((0, ymax)) ax.set_ylabel("Throughput (predictions/sec)") ax.set_title( "Prediction Throughput for different estimators (%d features)" % configuration["n_features"] ) plt.show() # %% # Benchmark bulk/atomic prediction speed for various regressors # ------------------------------------------------------------- configuration = { "n_train": int(1e3), "n_test": int(1e2), "n_features": int(1e2), "estimators": [ { "name": "Linear Model", "instance": SGDRegressor( penalty="elasticnet", alpha=0.01, l1_ratio=0.25, tol=1e-4 ), "complexity_label": "non-zero coefficients", "complexity_computer": lambda clf: np.count_nonzero(clf.coef_), }, { "name": "RandomForest", "instance": RandomForestRegressor(), "complexity_label": "estimators", "complexity_computer": lambda clf: clf.n_estimators, }, { "name": "SVR", "instance": SVR(kernel="rbf"), "complexity_label": "support vectors", "complexity_computer": lambda clf: len(clf.support_vectors_), }, ], } benchmark(configuration) # %% # Benchmark n_features influence on prediction speed # -------------------------------------------------- percentile = 90 percentiles = n_feature_influence( {"ridge": Ridge()}, configuration["n_train"], configuration["n_test"], [100, 250, 500], percentile, ) plot_n_features_influence(percentiles, percentile) # %% # Benchmark throughput # -------------------- throughputs = benchmark_throughputs(configuration) plot_benchmark_throughput(throughputs, configuration)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/applications/plot_cyclical_feature_engineering.py
examples/applications/plot_cyclical_feature_engineering.py
""" ================================ Time-related feature engineering ================================ This notebook introduces different strategies to leverage time-related features for a bike sharing demand regression task that is highly dependent on business cycles (days, weeks, months) and yearly season cycles. In the process, we introduce how to perform periodic feature engineering using the :class:`sklearn.preprocessing.SplineTransformer` class and its `extrapolation="periodic"` option. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Data exploration on the Bike Sharing Demand dataset # --------------------------------------------------- # # We start by loading the data from the OpenML repository. from sklearn.datasets import fetch_openml bike_sharing = fetch_openml("Bike_Sharing_Demand", version=2, as_frame=True) df = bike_sharing.frame # %% # To get a quick understanding of the periodic patterns of the data, let us # have a look at the average demand per hour during a week. # # Note that the week starts on a Sunday, during the weekend. We can clearly # distinguish the commute patterns in the morning and evenings of the work days # and the leisure use of the bikes on the weekends with a more spread peak # demand around the middle of the days: import matplotlib.pyplot as plt fig, ax = plt.subplots(figsize=(12, 4)) average_week_demand = df.groupby(["weekday", "hour"])["count"].mean() average_week_demand.plot(ax=ax) _ = ax.set( title="Average hourly bike demand during the week", xticks=[i * 24 for i in range(7)], xticklabels=["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"], xlabel="Time of the week", ylabel="Number of bike rentals", ) # %% # # The target of the prediction problem is the absolute count of bike rentals on # an hourly basis: df["count"].max() # %% # # Let us rescale the target variable (number of hourly bike rentals) to predict # a relative demand so that the mean absolute error is more easily interpreted # as a fraction of the maximum demand. # # .. note:: # # The fit method of the models used in this notebook all minimizes the # mean squared error to estimate the conditional mean. # The absolute error, however, would estimate the conditional median. # # Nevertheless, when reporting performance measures on the test set in # the discussion, we choose to focus on the mean absolute error instead # of the (root) mean squared error because it is more intuitive to # interpret. Note, however, that in this study the best models for one # metric are also the best ones in terms of the other metric. y = df["count"] / df["count"].max() # %% fig, ax = plt.subplots(figsize=(12, 4)) y.hist(bins=30, ax=ax) _ = ax.set( xlabel="Fraction of rented fleet demand", ylabel="Number of hours", ) # %% # The input feature data frame is a time annotated hourly log of variables # describing the weather conditions. It includes both numerical and categorical # variables. Note that the time information has already been expanded into # several complementary columns. # X = df.drop("count", axis="columns") X # %% # .. note:: # # If the time information was only present as a date or datetime column, we # could have expanded it into hour-in-the-day, day-in-the-week, # day-in-the-month, month-in-the-year using pandas: # https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#time-date-components # # We now introspect the distribution of the categorical variables, starting # with `"weather"`: # X["weather"].value_counts() # %% # Since there are only 3 `"heavy_rain"` events, we cannot use this category to # train machine learning models with cross validation. Instead, we simplify the # representation by collapsing those into the `"rain"` category. # X["weather"] = ( X["weather"] .astype(object) .replace(to_replace="heavy_rain", value="rain") .astype("category") ) # %% X["weather"].value_counts() # %% # As expected, the `"season"` variable is well balanced: # X["season"].value_counts() # %% # Time-based cross-validation # --------------------------- # # Since the dataset is a time-ordered event log (hourly demand), we will use a # time-sensitive cross-validation splitter to evaluate our demand forecasting # model as realistically as possible. We use a gap of 2 days between the train # and test side of the splits. We also limit the training set size to make the # performance of the CV folds more stable. # # 1000 test datapoints should be enough to quantify the performance of the # model. This represents a bit less than a month and a half of contiguous test # data: from sklearn.model_selection import TimeSeriesSplit ts_cv = TimeSeriesSplit( n_splits=5, gap=48, max_train_size=10000, test_size=1000, ) # %% # Let us manually inspect the various splits to check that the # `TimeSeriesSplit` works as we expect, starting with the first split: all_splits = list(ts_cv.split(X, y)) train_0, test_0 = all_splits[0] # %% X.iloc[test_0] # %% X.iloc[train_0] # %% # We now inspect the last split: train_4, test_4 = all_splits[4] # %% X.iloc[test_4] # %% X.iloc[train_4] # %% # All is well. We are now ready to do some predictive modeling! # # Gradient Boosting # ----------------- # # Gradient Boosting Regression with decision trees is often flexible enough to # efficiently handle heterogeneous tabular data with a mix of categorical and # numerical features as long as the number of samples is large enough. # # Here, we use the modern # :class:`~sklearn.ensemble.HistGradientBoostingRegressor` with native support # for categorical features. Therefore, we only need to set # `categorical_features="from_dtype"` such that features with categorical dtype # are considered categorical features. For reference, we extract the categorical # features from the dataframe based on the dtype. The internal trees use a dedicated # tree splitting rule for these features. # # The numerical variables need no preprocessing and, for the sake of simplicity, # we only try the default hyper-parameters for this model: from sklearn.compose import ColumnTransformer from sklearn.ensemble import HistGradientBoostingRegressor from sklearn.model_selection import cross_validate from sklearn.pipeline import make_pipeline gbrt = HistGradientBoostingRegressor(categorical_features="from_dtype", random_state=42) categorical_columns = X.columns[X.dtypes == "category"] print("Categorical features:", categorical_columns.tolist()) # %% # # Let's evaluate our gradient boosting model with the mean absolute error of the # relative demand averaged across our 5 time-based cross-validation splits: import numpy as np def evaluate(model, X, y, cv, model_prop=None, model_step=None): cv_results = cross_validate( model, X, y, cv=cv, scoring=["neg_mean_absolute_error", "neg_root_mean_squared_error"], return_estimator=model_prop is not None, ) if model_prop is not None: if model_step is not None: values = [ getattr(m[model_step], model_prop) for m in cv_results["estimator"] ] else: values = [getattr(m, model_prop) for m in cv_results["estimator"]] print(f"Mean model.{model_prop} = {np.mean(values)}") mae = -cv_results["test_neg_mean_absolute_error"] rmse = -cv_results["test_neg_root_mean_squared_error"] print( f"Mean Absolute Error: {mae.mean():.3f} +/- {mae.std():.3f}\n" f"Root Mean Squared Error: {rmse.mean():.3f} +/- {rmse.std():.3f}" ) evaluate(gbrt, X, y, cv=ts_cv, model_prop="n_iter_") # %% # We see that we set `max_iter` large enough such that early stopping took place. # # This model has an average error around 4 to 5% of the maximum demand. This is # quite good for a first trial without any hyper-parameter tuning! We just had # to make the categorical variables explicit. Note that the time related # features are passed as is, i.e. without processing them. But this is not much # of a problem for tree-based models as they can learn a non-monotonic # relationship between ordinal input features and the target. # # This is not the case for linear regression models as we will see in the # following. # # Naive linear regression # ----------------------- # # As usual for linear models, categorical variables need to be one-hot encoded. # For consistency, we scale the numerical features to the same 0-1 range using # :class:`~sklearn.preprocessing.MinMaxScaler`, although in this case it does not # impact the results much because they are already on comparable scales: from sklearn.linear_model import RidgeCV from sklearn.preprocessing import MinMaxScaler, OneHotEncoder one_hot_encoder = OneHotEncoder(handle_unknown="ignore", sparse_output=False) alphas = np.logspace(-6, 6, 25) naive_linear_pipeline = make_pipeline( ColumnTransformer( transformers=[ ("categorical", one_hot_encoder, categorical_columns), ], remainder=MinMaxScaler(), ), RidgeCV(alphas=alphas), ) evaluate( naive_linear_pipeline, X, y, cv=ts_cv, model_prop="alpha_", model_step="ridgecv" ) # %% # It is affirmative to see that the selected `alpha_` is in our specified # range. # # The performance is not good: the average error is around 14% of the maximum # demand. This is more than three times higher than the average error of the # gradient boosting model. We can suspect that the naive original encoding # (merely min-max scaled) of the periodic time-related features might prevent # the linear regression model to properly leverage the time information: linear # regression does not automatically model non-monotonic relationships between # the input features and the target. Non-linear terms have to be engineered in # the input. # # For example, the raw numerical encoding of the `"hour"` feature prevents the # linear model from recognizing that an increase of hour in the morning from 6 # to 8 should have a strong positive impact on the number of bike rentals while # an increase of similar magnitude in the evening from 18 to 20 should have a # strong negative impact on the predicted number of bike rentals. # # Time-steps as categories # ------------------------ # # Since the time features are encoded in a discrete manner using integers (24 # unique values in the "hours" feature), we could decide to treat those as # categorical variables using a one-hot encoding and thereby ignore any # assumption implied by the ordering of the hour values. # # Using one-hot encoding for the time features gives the linear model a lot # more flexibility as we introduce one additional feature per discrete time # level. one_hot_linear_pipeline = make_pipeline( ColumnTransformer( transformers=[ ("categorical", one_hot_encoder, categorical_columns), ("one_hot_time", one_hot_encoder, ["hour", "weekday", "month"]), ], remainder=MinMaxScaler(), ), RidgeCV(alphas=alphas), ) evaluate(one_hot_linear_pipeline, X, y, cv=ts_cv) # %% # The average error rate of this model is 10% which is much better than using # the original (ordinal) encoding of the time feature, confirming our intuition # that the linear regression model benefits from the added flexibility to not # treat time progression in a monotonic manner. # # However, this introduces a very large number of new features. If the time of # the day was represented in minutes since the start of the day instead of # hours, one-hot encoding would have introduced 1440 features instead of 24. # This could cause some significant overfitting. To avoid this we could use # :func:`sklearn.preprocessing.KBinsDiscretizer` instead to re-bin the number # of levels of fine-grained ordinal or numerical variables while still # benefitting from the non-monotonic expressivity advantages of one-hot # encoding. # # Finally, we also observe that one-hot encoding completely ignores the # ordering of the hour levels while this could be an interesting inductive bias # to preserve to some level. In the following we try to explore smooth, # non-monotonic encoding that locally preserves the relative ordering of time # features. # # Trigonometric features # ---------------------- # # As a first attempt, we can try to encode each of those periodic features # using a sine and cosine transformation with the matching period. # # Each ordinal time feature is transformed into 2 features that together encode # equivalent information in a non-monotonic way, and more importantly without # any jump between the first and the last value of the periodic range. from sklearn.preprocessing import FunctionTransformer def sin_transformer(period): return FunctionTransformer(lambda x: np.sin(x / period * 2 * np.pi)) def cos_transformer(period): return FunctionTransformer(lambda x: np.cos(x / period * 2 * np.pi)) # %% # # Let us visualize the effect of this feature expansion on some synthetic hour # data with a bit of extrapolation beyond hour=23: import pandas as pd hour_df = pd.DataFrame( np.arange(26).reshape(-1, 1), columns=["hour"], ) hour_df["hour_sin"] = sin_transformer(24).fit_transform(hour_df)["hour"] hour_df["hour_cos"] = cos_transformer(24).fit_transform(hour_df)["hour"] hour_df.plot(x="hour") _ = plt.title("Trigonometric encoding for the 'hour' feature") # %% # # Let's use a 2D scatter plot with the hours encoded as colors to better see # how this representation maps the 24 hours of the day to a 2D space, akin to # some sort of a 24 hour version of an analog clock. Note that the "25th" hour # is mapped back to the 1st hour because of the periodic nature of the # sine/cosine representation. fig, ax = plt.subplots(figsize=(7, 5)) sp = ax.scatter(hour_df["hour_sin"], hour_df["hour_cos"], c=hour_df["hour"]) ax.set( xlabel="sin(hour)", ylabel="cos(hour)", ) _ = fig.colorbar(sp) # %% # # We can now build a feature extraction pipeline using this strategy: cyclic_cossin_transformer = ColumnTransformer( transformers=[ ("categorical", one_hot_encoder, categorical_columns), ("month_sin", sin_transformer(12), ["month"]), ("month_cos", cos_transformer(12), ["month"]), ("weekday_sin", sin_transformer(7), ["weekday"]), ("weekday_cos", cos_transformer(7), ["weekday"]), ("hour_sin", sin_transformer(24), ["hour"]), ("hour_cos", cos_transformer(24), ["hour"]), ], remainder=MinMaxScaler(), ) cyclic_cossin_linear_pipeline = make_pipeline( cyclic_cossin_transformer, RidgeCV(alphas=alphas), ) evaluate(cyclic_cossin_linear_pipeline, X, y, cv=ts_cv) # %% # # The performance of our linear regression model with this simple feature # engineering is a bit better than using the original ordinal time features but # worse than using the one-hot encoded time features. We will further analyze # possible reasons for this disappointing outcome at the end of this notebook. # # Periodic spline features # ------------------------ # # We can try an alternative encoding of the periodic time-related features # using spline transformations with a large enough number of splines, and as a # result a larger number of expanded features compared to the sine/cosine # transformation: from sklearn.preprocessing import SplineTransformer def periodic_spline_transformer(period, n_splines=None, degree=3): if n_splines is None: n_splines = period n_knots = n_splines + 1 # periodic and include_bias is True return SplineTransformer( degree=degree, n_knots=n_knots, knots=np.linspace(0, period, n_knots).reshape(n_knots, 1), extrapolation="periodic", include_bias=True, ) # %% # # Again, let us visualize the effect of this feature expansion on some # synthetic hour data with a bit of extrapolation beyond hour=23: hour_df = pd.DataFrame( np.linspace(0, 26, 1000).reshape(-1, 1), columns=["hour"], ) splines = periodic_spline_transformer(24, n_splines=12).fit_transform(hour_df) splines_df = pd.DataFrame( splines, columns=[f"spline_{i}" for i in range(splines.shape[1])], ) pd.concat([hour_df, splines_df], axis="columns").plot(x="hour", cmap=plt.cm.tab20b) _ = plt.title("Periodic spline-based encoding for the 'hour' feature") # %% # Thanks to the use of the `extrapolation="periodic"` parameter, we observe # that the feature encoding stays smooth when extrapolating beyond midnight. # # We can now build a predictive pipeline using this alternative periodic # feature engineering strategy. # # It is possible to use fewer splines than discrete levels for those ordinal # values. This makes spline-based encoding more efficient than one-hot encoding # while preserving most of the expressivity: cyclic_spline_transformer = ColumnTransformer( transformers=[ ("categorical", one_hot_encoder, categorical_columns), ("cyclic_month", periodic_spline_transformer(12, n_splines=6), ["month"]), ("cyclic_weekday", periodic_spline_transformer(7, n_splines=3), ["weekday"]), ("cyclic_hour", periodic_spline_transformer(24, n_splines=12), ["hour"]), ], remainder=MinMaxScaler(), ) cyclic_spline_linear_pipeline = make_pipeline( cyclic_spline_transformer, RidgeCV(alphas=alphas), ) evaluate(cyclic_spline_linear_pipeline, X, y, cv=ts_cv) # %% # Spline features make it possible for the linear model to successfully # leverage the periodic time-related features and reduce the error from ~14% to # ~10% of the maximum demand, which is similar to what we observed with the # one-hot encoded features. # # Qualitative analysis of the impact of features on linear model predictions # -------------------------------------------------------------------------- # # Here, we want to visualize the impact of the feature engineering choices on # the time related shape of the predictions. # # To do so we consider an arbitrary time-based split to compare the predictions # on a range of held out data points. naive_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0]) naive_linear_predictions = naive_linear_pipeline.predict(X.iloc[test_0]) one_hot_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0]) one_hot_linear_predictions = one_hot_linear_pipeline.predict(X.iloc[test_0]) cyclic_cossin_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0]) cyclic_cossin_linear_predictions = cyclic_cossin_linear_pipeline.predict(X.iloc[test_0]) cyclic_spline_linear_pipeline.fit(X.iloc[train_0], y.iloc[train_0]) cyclic_spline_linear_predictions = cyclic_spline_linear_pipeline.predict(X.iloc[test_0]) # %% # We visualize those predictions by zooming on the last 96 hours (4 days) of # the test set to get some qualitative insights: last_hours = slice(-96, None) fig, ax = plt.subplots(figsize=(12, 4)) fig.suptitle("Predictions by linear models") ax.plot( y.iloc[test_0].values[last_hours], "x-", alpha=0.2, label="Actual demand", color="black", ) ax.plot(naive_linear_predictions[last_hours], "x-", label="Ordinal time features") ax.plot( cyclic_cossin_linear_predictions[last_hours], "x-", label="Trigonometric time features", ) ax.plot( cyclic_spline_linear_predictions[last_hours], "x-", label="Spline-based time features", ) ax.plot( one_hot_linear_predictions[last_hours], "x-", label="One-hot time features", ) _ = ax.legend() # %% # We can draw the following conclusions from the above plot: # # - The **raw ordinal time-related features** are problematic because they do # not capture the natural periodicity: we observe a big jump in the # predictions at the end of each day when the hour features goes from 23 back # to 0. We can expect similar artifacts at the end of each week or each year. # # - As expected, the **trigonometric features** (sine and cosine) do not have # these discontinuities at midnight, but the linear regression model fails to # leverage those features to properly model intra-day variations. # Using trigonometric features for higher harmonics or additional # trigonometric features for the natural period with different phases could # potentially fix this problem. # # - the **periodic spline-based features** fix those two problems at once: they # give more expressivity to the linear model by making it possible to focus # on specific hours thanks to the use of 12 splines. Furthermore the # `extrapolation="periodic"` option enforces a smooth representation between # `hour=23` and `hour=0`. # # - The **one-hot encoded features** behave similarly to the periodic # spline-based features but are more spiky: for instance they can better # model the morning peak during the week days since this peak lasts shorter # than an hour. However, we will see in the following that what can be an # advantage for linear models is not necessarily one for more expressive # models. # %% # We can also compare the number of features extracted by each feature # engineering pipeline: naive_linear_pipeline[:-1].transform(X).shape # %% one_hot_linear_pipeline[:-1].transform(X).shape # %% cyclic_cossin_linear_pipeline[:-1].transform(X).shape # %% cyclic_spline_linear_pipeline[:-1].transform(X).shape # %% # This confirms that the one-hot encoding and the spline encoding strategies # create a lot more features for the time representation than the alternatives, # which in turn gives the downstream linear model more flexibility (degrees of # freedom) to avoid underfitting. # # Finally, we observe that none of the linear models can approximate the true # bike rentals demand, especially for the peaks that can be very sharp at rush # hours during the working days but much flatter during the week-ends: the most # accurate linear models based on splines or one-hot encoding tend to forecast # peaks of commuting-related bike rentals even on the week-ends and # under-estimate the commuting-related events during the working days. # # These systematic prediction errors reveal a form of under-fitting and can be # explained by the lack of interactions terms between features, e.g. # "workingday" and features derived from "hours". This issue will be addressed # in the following section. # %% # Modeling pairwise interactions with splines and polynomial features # ------------------------------------------------------------------- # # Linear models do not automatically capture interaction effects between input # features. It does not help that some features are marginally non-linear as is # the case with features constructed by `SplineTransformer` (or one-hot # encoding or binning). # # However, it is possible to use the `PolynomialFeatures` class on coarse # grained spline encoded hours to model the "workingday"/"hours" interaction # explicitly without introducing too many new variables: from sklearn.pipeline import FeatureUnion from sklearn.preprocessing import PolynomialFeatures hour_workday_interaction = make_pipeline( ColumnTransformer( [ ("cyclic_hour", periodic_spline_transformer(24, n_splines=8), ["hour"]), ("workingday", FunctionTransformer(lambda x: x == "True"), ["workingday"]), ] ), PolynomialFeatures(degree=2, interaction_only=True, include_bias=False), ) # %% # Those features are then combined with the ones already computed in the # previous spline-base pipeline. We can observe a nice performance improvement # by modeling this pairwise interaction explicitly: cyclic_spline_interactions_pipeline = make_pipeline( FeatureUnion( [ ("marginal", cyclic_spline_transformer), ("interactions", hour_workday_interaction), ] ), RidgeCV(alphas=alphas), ) evaluate(cyclic_spline_interactions_pipeline, X, y, cv=ts_cv) # %% # Modeling non-linear feature interactions with kernels # ----------------------------------------------------- # # The previous analysis highlighted the need to model the interactions between # `"workingday"` and `"hours"`. Another example of a such a non-linear # interaction that we would like to model could be the impact of the rain that # might not be the same during the working days and the week-ends and holidays # for instance. # # To model all such interactions, we could either use a polynomial expansion on # all marginal features at once, after their spline-based expansion. However, # this would create a quadratic number of features which can cause overfitting # and computational tractability issues. # # Alternatively, we can use the Nyström method to compute an approximate # polynomial kernel expansion. Let us try the latter: from sklearn.kernel_approximation import Nystroem cyclic_spline_poly_pipeline = make_pipeline( cyclic_spline_transformer, Nystroem(kernel="poly", degree=2, n_components=300, random_state=0), RidgeCV(alphas=alphas), ) evaluate(cyclic_spline_poly_pipeline, X, y, cv=ts_cv) # %% # # We observe that this model can almost rival the performance of the gradient # boosted trees with an average error around 5% of the maximum demand. # # Note that while the final step of this pipeline is a linear regression model, # the intermediate steps such as the spline feature extraction and the Nyström # kernel approximation are highly non-linear. As a result the compound pipeline # is much more expressive than a simple linear regression model with raw features. # # For the sake of completeness, we also evaluate the combination of one-hot # encoding and kernel approximation: one_hot_poly_pipeline = make_pipeline( ColumnTransformer( transformers=[ ("categorical", one_hot_encoder, categorical_columns), ("one_hot_time", one_hot_encoder, ["hour", "weekday", "month"]), ], remainder="passthrough", ), Nystroem(kernel="poly", degree=2, n_components=300, random_state=0), RidgeCV(alphas=alphas), ) evaluate(one_hot_poly_pipeline, X, y, cv=ts_cv) # %% # While one-hot encoded features were competitive with spline-based features # when using linear models, this is no longer the case when using a low-rank # approximation of a non-linear kernel: this can be explained by the fact that # spline features are smoother and allow the kernel approximation to find a # more expressive decision function. # # Let us now have a qualitative look at the predictions of the kernel models # and of the gradient boosted trees that should be able to better model # non-linear interactions between features: gbrt.fit(X.iloc[train_0], y.iloc[train_0]) gbrt_predictions = gbrt.predict(X.iloc[test_0]) one_hot_poly_pipeline.fit(X.iloc[train_0], y.iloc[train_0]) one_hot_poly_predictions = one_hot_poly_pipeline.predict(X.iloc[test_0]) cyclic_spline_poly_pipeline.fit(X.iloc[train_0], y.iloc[train_0]) cyclic_spline_poly_predictions = cyclic_spline_poly_pipeline.predict(X.iloc[test_0]) # %% # Again we zoom on the last 4 days of the test set: last_hours = slice(-96, None) fig, ax = plt.subplots(figsize=(12, 4)) fig.suptitle("Predictions by non-linear regression models") ax.plot( y.iloc[test_0].values[last_hours], "x-", alpha=0.2, label="Actual demand", color="black", ) ax.plot( gbrt_predictions[last_hours], "x-", label="Gradient Boosted Trees", ) ax.plot( one_hot_poly_predictions[last_hours], "x-", label="One-hot + polynomial kernel", ) ax.plot( cyclic_spline_poly_predictions[last_hours], "x-", label="Splines + polynomial kernel", ) _ = ax.legend() # %% # First, note that trees can naturally model non-linear feature interactions # since, by default, decision trees are allowed to grow beyond a depth of 2 # levels. # # Here, we can observe that the combinations of spline features and non-linear # kernels works quite well and can almost rival the accuracy of the gradient # boosting regression trees. # # On the contrary, one-hot encoded time features do not perform that well with # the low rank kernel model. In particular, they significantly over-estimate # the low demand hours more than the competing models. # # We also observe that none of the models can successfully predict some of the # peak rentals at the rush hours during the working days. It is possible that # access to additional features would be required to further improve the # accuracy of the predictions. For instance, it could be useful to have access # to the geographical repartition of the fleet at any point in time or the # fraction of bikes that are immobilized because they need servicing. # # Let us finally get a more quantitative look at the prediction errors of those # three models using the true vs predicted demand scatter plots: from sklearn.metrics import PredictionErrorDisplay fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(13, 7), sharex=True, sharey="row") fig.suptitle("Non-linear regression models", y=1.0) predictions = [ one_hot_poly_predictions, cyclic_spline_poly_predictions, gbrt_predictions, ] labels = [ "One hot +\npolynomial kernel", "Splines +\npolynomial kernel", "Gradient Boosted\nTrees", ] plot_kinds = ["actual_vs_predicted", "residual_vs_predicted"] for axis_idx, kind in enumerate(plot_kinds): for ax, pred, label in zip(axes[axis_idx], predictions, labels): disp = PredictionErrorDisplay.from_predictions( y_true=y.iloc[test_0], y_pred=pred, kind=kind, scatter_kwargs={"alpha": 0.3}, ax=ax, ) ax.set_xticks(np.linspace(0, 1, num=5)) if axis_idx == 0: ax.set_yticks(np.linspace(0, 1, num=5)) ax.legend( ["Best model", label], loc="upper center", bbox_to_anchor=(0.5, 1.3), ncol=2, ) ax.set_aspect("equal", adjustable="box") plt.show() # %% # This visualization confirms the conclusions we draw on the previous plot. # # All models under-estimate the high demand events (working day rush hours), # but gradient boosting a bit less so. The low demand events are well predicted # on average by gradient boosting while the one-hot polynomial regression # pipeline seems to systematically over-estimate demand in that regime. Overall # the predictions of the gradient boosted trees are closer to the diagonal than # for the kernel models. # # Concluding remarks # ------------------ # # We note that we could have obtained slightly better results for kernel models # by using more components (higher rank kernel approximation) at the cost of # longer fit and prediction durations. For large values of `n_components`, the # performance of the one-hot encoded features would even match the spline # features. # # The `Nystroem` + `RidgeCV` regressor could also have been replaced by # :class:`~sklearn.neural_network.MLPRegressor` with one or two hidden layers # and we would have obtained quite similar results. # # The dataset we used in this case study is sampled on an hourly basis. However # cyclic spline-based features could model time-within-day or time-within-week # very efficiently with finer-grained time resolutions (for instance with # measurements taken every minute instead of every hour) without introducing # more features. One-hot encoding time representations would not offer this # flexibility. # # Finally, in this notebook we used `RidgeCV` because it is very efficient from # a computational point of view. However, it models the target variable as a # Gaussian random variable with constant variance. For positive regression # problems, it is likely that using a Poisson or Gamma distribution would make # more sense. This could be achieved by using # `GridSearchCV(TweedieRegressor(power=2), param_grid({"alpha": alphas}))` # instead of `RidgeCV`.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/applications/wikipedia_principal_eigenvector.py
examples/applications/wikipedia_principal_eigenvector.py
""" =============================== Wikipedia principal eigenvector =============================== A classical way to assert the relative importance of vertices in a graph is to compute the principal eigenvector of the adjacency matrix so as to assign to each vertex the values of the components of the first eigenvector as a centrality score: https://en.wikipedia.org/wiki/Eigenvector_centrality. On the graph of webpages and links those values are called the PageRank scores by Google. The goal of this example is to analyze the graph of links inside wikipedia articles to rank articles by relative importance according to this eigenvector centrality. The traditional way to compute the principal eigenvector is to use the `power iteration method <https://en.wikipedia.org/wiki/Power_iteration>`_. Here the computation is achieved thanks to Martinsson's Randomized SVD algorithm implemented in scikit-learn. The graph data is fetched from the DBpedia dumps. DBpedia is an extraction of the latent structured data of the Wikipedia content. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import os from bz2 import BZ2File from datetime import datetime from pprint import pprint from time import time from urllib.request import urlopen import numpy as np from scipy import sparse from sklearn.decomposition import randomized_svd # %% # Download data, if not already on disk # ------------------------------------- redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2" redirects_filename = redirects_url.rsplit("/", 1)[1] page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2" page_links_filename = page_links_url.rsplit("/", 1)[1] resources = [ (redirects_url, redirects_filename), (page_links_url, page_links_filename), ] for url, filename in resources: if not os.path.exists(filename): print("Downloading data from '%s', please wait..." % url) opener = urlopen(url) with open(filename, "wb") as f: f.write(opener.read()) print() # %% # Loading the redirect files # -------------------------- def index(redirects, index_map, k): """Find the index of an article name after redirect resolution""" k = redirects.get(k, k) return index_map.setdefault(k, len(index_map)) DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/") SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1) def short_name(nt_uri): """Remove the < and > URI markers and the common URI prefix""" return nt_uri[SHORTNAME_SLICE] def get_redirects(redirects_filename): """Parse the redirections and build a transitively closed map out of it""" redirects = {} print("Parsing the NT redirect file") for l, line in enumerate(BZ2File(redirects_filename)): split = line.split() if len(split) != 4: print("ignoring malformed line: " + line) continue redirects[short_name(split[0])] = short_name(split[2]) if l % 1000000 == 0: print("[%s] line: %08d" % (datetime.now().isoformat(), l)) # compute the transitive closure print("Computing the transitive closure of the redirect relation") for l, source in enumerate(redirects.keys()): transitive_target = None target = redirects[source] seen = {source} while True: transitive_target = target target = redirects.get(target) if target is None or target in seen: break seen.add(target) redirects[source] = transitive_target if l % 1000000 == 0: print("[%s] line: %08d" % (datetime.now().isoformat(), l)) return redirects # %% # Computing the Adjacency matrix # ------------------------------ def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None): """Extract the adjacency graph as a scipy sparse matrix Redirects are resolved first. Returns X, the scipy sparse adjacency matrix, redirects as python dict from article names to article names and index_map a python dict from article names to python int (article indexes). """ print("Computing the redirect map") redirects = get_redirects(redirects_filename) print("Computing the integer index map") index_map = dict() links = list() for l, line in enumerate(BZ2File(page_links_filename)): split = line.split() if len(split) != 4: print("ignoring malformed line: " + line) continue i = index(redirects, index_map, short_name(split[0])) j = index(redirects, index_map, short_name(split[2])) links.append((i, j)) if l % 1000000 == 0: print("[%s] line: %08d" % (datetime.now().isoformat(), l)) if limit is not None and l >= limit - 1: break print("Computing the adjacency matrix") X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32) for i, j in links: X[i, j] = 1.0 del links print("Converting to CSR representation") X = X.tocsr() print("CSR conversion done") return X, redirects, index_map # stop after 5M links to make it possible to work in RAM X, redirects, index_map = get_adjacency_matrix( redirects_filename, page_links_filename, limit=5000000 ) names = {i: name for name, i in index_map.items()} # %% # Computing Principal Singular Vector using Randomized SVD # -------------------------------------------------------- print("Computing the principal singular vectors using randomized_svd") t0 = time() U, s, V = randomized_svd(X, 5, n_iter=3) print("done in %0.3fs" % (time() - t0)) # print the names of the wikipedia related strongest components of the # principal singular vector which should be similar to the highest eigenvector print("Top wikipedia pages according to principal singular vectors") pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]]) pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]]) # %% # Computing Centrality scores # --------------------------- def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10): """Power iteration computation of the principal eigenvector This method is also known as Google PageRank and the implementation is based on the one from the NetworkX project (BSD licensed too) with copyrights by: Aric Hagberg <hagberg@lanl.gov> Dan Schult <dschult@colgate.edu> Pieter Swart <swart@lanl.gov> """ n = X.shape[0] X = X.copy() incoming_counts = np.asarray(X.sum(axis=1)).ravel() print("Normalizing the graph") for i in incoming_counts.nonzero()[0]: X.data[X.indptr[i] : X.indptr[i + 1]] *= 1.0 / incoming_counts[i] dangle = np.asarray(np.where(np.isclose(X.sum(axis=1), 0), 1.0 / n, 0)).ravel() scores = np.full(n, 1.0 / n, dtype=np.float32) # initial guess for i in range(max_iter): print("power iteration #%d" % i) prev_scores = scores scores = ( alpha * (scores * X + np.dot(dangle, prev_scores)) + (1 - alpha) * prev_scores.sum() / n ) # check convergence: normalized l_inf norm scores_max = np.abs(scores).max() if scores_max == 0.0: scores_max = 1.0 err = np.abs(scores - prev_scores).max() / scores_max print("error: %0.6f" % err) if err < n * tol: return scores return scores print("Computing principal eigenvector score using a power iteration method") t0 = time() scores = centrality_scores(X, max_iter=100) print("done in %0.3fs" % (time() - t0)) pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/applications/plot_model_complexity_influence.py
examples/applications/plot_model_complexity_influence.py
""" ========================== Model Complexity Influence ========================== Demonstrate how model complexity influences both prediction accuracy and computational performance. We will be using two datasets: - :ref:`diabetes_dataset` for regression. This dataset consists of 10 measurements taken from diabetes patients. The task is to predict disease progression; - :ref:`20newsgroups_dataset` for classification. This dataset consists of newsgroup posts. The task is to predict on which topic (out of 20 topics) the post is written about. We will model the complexity influence on three different estimators: - :class:`~sklearn.linear_model.SGDClassifier` (for classification data) which implements stochastic gradient descent learning; - :class:`~sklearn.svm.NuSVR` (for regression data) which implements Nu support vector regression; - :class:`~sklearn.ensemble.GradientBoostingRegressor` builds an additive model in a forward stage-wise fashion. Notice that :class:`~sklearn.ensemble.HistGradientBoostingRegressor` is much faster than :class:`~sklearn.ensemble.GradientBoostingRegressor` starting with intermediate datasets (`n_samples >= 10_000`), which is not the case for this example. We make the model complexity vary through the choice of relevant model parameters in each of our selected models. Next, we will measure the influence on both computational performance (latency) and predictive power (MSE or Hamming Loss). """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import time import matplotlib.pyplot as plt import numpy as np from sklearn import datasets from sklearn.ensemble import GradientBoostingRegressor from sklearn.linear_model import SGDClassifier from sklearn.metrics import hamming_loss, mean_squared_error from sklearn.model_selection import train_test_split from sklearn.svm import NuSVR # Initialize random generator np.random.seed(0) ############################################################################## # Load the data # ------------- # # First we load both datasets. # # .. note:: We are using # :func:`~sklearn.datasets.fetch_20newsgroups_vectorized` to download 20 # newsgroups dataset. It returns ready-to-use features. # # .. note:: ``X`` of the 20 newsgroups dataset is a sparse matrix while ``X`` # of diabetes dataset is a numpy array. # def generate_data(case): """Generate regression/classification data.""" if case == "regression": X, y = datasets.load_diabetes(return_X_y=True) train_size = 0.8 elif case == "classification": X, y = datasets.fetch_20newsgroups_vectorized(subset="all", return_X_y=True) train_size = 0.4 # to make the example run faster X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=train_size, random_state=0 ) data = {"X_train": X_train, "X_test": X_test, "y_train": y_train, "y_test": y_test} return data regression_data = generate_data("regression") classification_data = generate_data("classification") ############################################################################## # Benchmark influence # ------------------- # Next, we can calculate the influence of the parameters on the given # estimator. In each round, we will set the estimator with the new value of # ``changing_param`` and we will be collecting the prediction times, prediction # performance and complexities to see how those changes affect the estimator. # We will calculate the complexity using ``complexity_computer`` passed as a # parameter. # def benchmark_influence(conf): """ Benchmark influence of `changing_param` on both MSE and latency. """ prediction_times = [] prediction_powers = [] complexities = [] for param_value in conf["changing_param_values"]: conf["tuned_params"][conf["changing_param"]] = param_value estimator = conf["estimator"](**conf["tuned_params"]) print("Benchmarking %s" % estimator) estimator.fit(conf["data"]["X_train"], conf["data"]["y_train"]) conf["postfit_hook"](estimator) complexity = conf["complexity_computer"](estimator) complexities.append(complexity) start_time = time.time() for _ in range(conf["n_samples"]): y_pred = estimator.predict(conf["data"]["X_test"]) elapsed_time = (time.time() - start_time) / float(conf["n_samples"]) prediction_times.append(elapsed_time) pred_score = conf["prediction_performance_computer"]( conf["data"]["y_test"], y_pred ) prediction_powers.append(pred_score) print( "Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % ( complexity, conf["prediction_performance_label"], pred_score, elapsed_time, ) ) return prediction_powers, prediction_times, complexities ############################################################################## # Choose parameters # ----------------- # # We choose the parameters for each of our estimators by making # a dictionary with all the necessary values. # ``changing_param`` is the name of the parameter which will vary in each # estimator. # Complexity will be defined by the ``complexity_label`` and calculated using # `complexity_computer`. # Also note that depending on the estimator type we are passing # different data. # def _count_nonzero_coefficients(estimator): a = estimator.coef_.toarray() return np.count_nonzero(a) configurations = [ { "estimator": SGDClassifier, "tuned_params": { "penalty": "elasticnet", "alpha": 0.001, "loss": "modified_huber", "fit_intercept": True, "tol": 1e-1, "n_iter_no_change": 2, }, "changing_param": "l1_ratio", "changing_param_values": [0.25, 0.5, 0.75, 0.9], "complexity_label": "non_zero coefficients", "complexity_computer": _count_nonzero_coefficients, "prediction_performance_computer": hamming_loss, "prediction_performance_label": "Hamming Loss (Misclassification Ratio)", "postfit_hook": lambda x: x.sparsify(), "data": classification_data, "n_samples": 5, }, { "estimator": NuSVR, "tuned_params": {"C": 1e3, "gamma": 2**-15}, "changing_param": "nu", "changing_param_values": [0.05, 0.1, 0.2, 0.35, 0.5], "complexity_label": "n_support_vectors", "complexity_computer": lambda x: len(x.support_vectors_), "data": regression_data, "postfit_hook": lambda x: x, "prediction_performance_computer": mean_squared_error, "prediction_performance_label": "MSE", "n_samples": 15, }, { "estimator": GradientBoostingRegressor, "tuned_params": { "loss": "squared_error", "learning_rate": 0.05, "max_depth": 2, }, "changing_param": "n_estimators", "changing_param_values": [10, 25, 50, 75, 100], "complexity_label": "n_trees", "complexity_computer": lambda x: x.n_estimators, "data": regression_data, "postfit_hook": lambda x: x, "prediction_performance_computer": mean_squared_error, "prediction_performance_label": "MSE", "n_samples": 15, }, ] ############################################################################## # Run the code and plot the results # --------------------------------- # # We defined all the functions required to run our benchmark. Now, we will loop # over the different configurations that we defined previously. Subsequently, # we can analyze the plots obtained from the benchmark: # Relaxing the `L1` penalty in the SGD classifier reduces the prediction error # but leads to an increase in the training time. # We can draw a similar analysis regarding the training time which increases # with the number of support vectors with a Nu-SVR. However, we observed that # there is an optimal number of support vectors which reduces the prediction # error. Indeed, too few support vectors lead to an under-fitted model while # too many support vectors lead to an over-fitted model. # The exact same conclusion can be drawn for the gradient-boosting model. The # only the difference with the Nu-SVR is that having too many trees in the # ensemble is not as detrimental. # def plot_influence(conf, mse_values, prediction_times, complexities): """ Plot influence of model complexity on both accuracy and latency. """ fig = plt.figure() fig.subplots_adjust(right=0.75) # first axes (prediction error) ax1 = fig.add_subplot(111) line1 = ax1.plot(complexities, mse_values, c="tab:blue", ls="-")[0] ax1.set_xlabel("Model Complexity (%s)" % conf["complexity_label"]) y1_label = conf["prediction_performance_label"] ax1.set_ylabel(y1_label) ax1.spines["left"].set_color(line1.get_color()) ax1.yaxis.label.set_color(line1.get_color()) ax1.tick_params(axis="y", colors=line1.get_color()) # second axes (latency) ax2 = fig.add_subplot(111, sharex=ax1, frameon=False) line2 = ax2.plot(complexities, prediction_times, c="tab:orange", ls="-")[0] ax2.yaxis.tick_right() ax2.yaxis.set_label_position("right") y2_label = "Time (s)" ax2.set_ylabel(y2_label) ax1.spines["right"].set_color(line2.get_color()) ax2.yaxis.label.set_color(line2.get_color()) ax2.tick_params(axis="y", colors=line2.get_color()) plt.legend( (line1, line2), ("prediction error", "prediction latency"), loc="upper center" ) plt.title( "Influence of varying '%s' on %s" % (conf["changing_param"], conf["estimator"].__name__) ) for conf in configurations: prediction_performances, prediction_times, complexities = benchmark_influence(conf) plot_influence(conf, prediction_performances, prediction_times, complexities) plt.show() ############################################################################## # Conclusion # ---------- # # As a conclusion, we can deduce the following insights: # # * a model which is more complex (or expressive) will require a larger # training time; # * a more complex model does not guarantee to reduce the prediction error. # # These aspects are related to model generalization and avoiding model # under-fitting or over-fitting.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/applications/plot_time_series_lagged_features.py
examples/applications/plot_time_series_lagged_features.py
""" =========================================== Lagged features for time series forecasting =========================================== This example demonstrates how Polars-engineered lagged features can be used for time series forecasting with :class:`~sklearn.ensemble.HistGradientBoostingRegressor` on the Bike Sharing Demand dataset. See the example on :ref:`sphx_glr_auto_examples_applications_plot_cyclical_feature_engineering.py` for some data exploration on this dataset and a demo on periodic feature engineering. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Analyzing the Bike Sharing Demand dataset # ----------------------------------------- # # We start by loading the data from the OpenML repository as a raw parquet file # to illustrate how to work with an arbitrary parquet file instead of hiding this # step in a convenience tool such as `sklearn.datasets.fetch_openml`. # # The URL of the parquet file can be found in the JSON description of the # Bike Sharing Demand dataset with id 44063 on openml.org # (https://openml.org/search?type=data&status=active&id=44063). # # The `sha256` hash of the file is also provided to ensure the integrity of the # downloaded file. import numpy as np import polars as pl from sklearn.datasets import fetch_file pl.Config.set_fmt_str_lengths(20) bike_sharing_data_file = fetch_file( "https://data.openml.org/datasets/0004/44063/dataset_44063.pq", sha256="d120af76829af0d256338dc6dd4be5df4fd1f35bf3a283cab66a51c1c6abd06a", ) bike_sharing_data_file # %% # We load the parquet file with Polars for feature engineering. Polars # automatically caches common subexpressions which are reused in multiple # expressions (like `pl.col("count").shift(1)` below). See # https://docs.pola.rs/user-guide/lazy/optimizations/ for more information. df = pl.read_parquet(bike_sharing_data_file) # %% # Next, we take a look at the statistical summary of the dataset # so that we can better understand the data that we are working with. import polars.selectors as cs summary = df.select(cs.numeric()).describe() summary # %% # Let us look at the count of the seasons `"fall"`, `"spring"`, `"summer"` # and `"winter"` present in the dataset to confirm they are balanced. import matplotlib.pyplot as plt df["season"].value_counts() # %% # Generating Polars-engineered lagged features # -------------------------------------------- # Let's consider the problem of predicting the demand at the # next hour given past demands. Since the demand is a continuous # variable, one could intuitively use any regression model. However, we do # not have the usual `(X_train, y_train)` dataset. Instead, we just have # the `y_train` demand data sequentially organized by time. lagged_df = df.select( "count", *[pl.col("count").shift(i).alias(f"lagged_count_{i}h") for i in [1, 2, 3]], lagged_count_1d=pl.col("count").shift(24), lagged_count_1d_1h=pl.col("count").shift(24 + 1), lagged_count_7d=pl.col("count").shift(7 * 24), lagged_count_7d_1h=pl.col("count").shift(7 * 24 + 1), lagged_mean_24h=pl.col("count").shift(1).rolling_mean(24), lagged_max_24h=pl.col("count").shift(1).rolling_max(24), lagged_min_24h=pl.col("count").shift(1).rolling_min(24), lagged_mean_7d=pl.col("count").shift(1).rolling_mean(7 * 24), lagged_max_7d=pl.col("count").shift(1).rolling_max(7 * 24), lagged_min_7d=pl.col("count").shift(1).rolling_min(7 * 24), ) lagged_df.tail(10) # %% # Watch out however, the first lines have undefined values because their own # past is unknown. This depends on how much lag we used: lagged_df.head(10) # %% # We can now separate the lagged features in a matrix `X` and the target variable # (the counts to predict) in an array of the same first dimension `y`. lagged_df = lagged_df.drop_nulls() X = lagged_df.drop("count") y = lagged_df["count"] print("X shape: {}\ny shape: {}".format(X.shape, y.shape)) # %% # Naive evaluation of the next hour bike demand regression # -------------------------------------------------------- # Let's randomly split our tabularized dataset to train a gradient # boosting regression tree (GBRT) model and evaluate it using Mean # Absolute Percentage Error (MAPE). If our model is aimed at forecasting # (i.e., predicting future data from past data), we should not use training # data that are ulterior to the testing data. In time series machine learning # the "i.i.d" (independent and identically distributed) assumption does not # hold true as the data points are not independent and have a temporal # relationship. from sklearn.ensemble import HistGradientBoostingRegressor from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42 ) model = HistGradientBoostingRegressor().fit(X_train, y_train) # %% # Taking a look at the performance of the model. from sklearn.metrics import mean_absolute_percentage_error y_pred = model.predict(X_test) mean_absolute_percentage_error(y_test, y_pred) # %% # Proper next hour forecasting evaluation # --------------------------------------- # Let's use a proper evaluation splitting strategies that takes into account # the temporal structure of the dataset to evaluate our model's ability to # predict data points in the future (to avoid cheating by reading values from # the lagged features in the training set). from sklearn.model_selection import TimeSeriesSplit ts_cv = TimeSeriesSplit( n_splits=3, # to keep the notebook fast enough on common laptops gap=48, # 2 days data gap between train and test max_train_size=10000, # keep train sets of comparable sizes test_size=3000, # for 2 or 3 digits of precision in scores ) all_splits = list(ts_cv.split(X, y)) # %% # Training the model and evaluating its performance based on MAPE. train_idx, test_idx = all_splits[0] X_train, X_test = X[train_idx, :], X[test_idx, :] y_train, y_test = y[train_idx], y[test_idx] model = HistGradientBoostingRegressor().fit(X_train, y_train) y_pred = model.predict(X_test) mean_absolute_percentage_error(y_test, y_pred) # %% # The generalization error measured via a shuffled trained test split # is too optimistic. The generalization via a time-based split is likely to # be more representative of the true performance of the regression model. # Let's assess this variability of our error evaluation with proper # cross-validation: from sklearn.model_selection import cross_val_score cv_mape_scores = -cross_val_score( model, X, y, cv=ts_cv, scoring="neg_mean_absolute_percentage_error" ) cv_mape_scores # %% # The variability across splits is quite large! In a real life setting # it would be advised to use more splits to better assess the variability. # Let's report the mean CV scores and their standard deviation from now on. print(f"CV MAPE: {cv_mape_scores.mean():.3f} ± {cv_mape_scores.std():.3f}") # %% # We can compute several combinations of evaluation metrics and loss functions, # which are reported a bit below. from collections import defaultdict from sklearn.metrics import ( make_scorer, mean_absolute_error, mean_pinball_loss, root_mean_squared_error, ) from sklearn.model_selection import cross_validate def consolidate_scores(cv_results, scores, metric): if metric == "MAPE": scores[metric].append(f"{value.mean():.2f} ± {value.std():.2f}") else: scores[metric].append(f"{value.mean():.1f} ± {value.std():.1f}") return scores scoring = { "MAPE": make_scorer(mean_absolute_percentage_error), "RMSE": make_scorer(root_mean_squared_error), "MAE": make_scorer(mean_absolute_error), "pinball_loss_05": make_scorer(mean_pinball_loss, alpha=0.05), "pinball_loss_50": make_scorer(mean_pinball_loss, alpha=0.50), "pinball_loss_95": make_scorer(mean_pinball_loss, alpha=0.95), } loss_functions = ["squared_error", "poisson", "absolute_error"] scores = defaultdict(list) for loss_func in loss_functions: model = HistGradientBoostingRegressor(loss=loss_func) cv_results = cross_validate( model, X, y, cv=ts_cv, scoring=scoring, n_jobs=2, ) time = cv_results["fit_time"] scores["loss"].append(loss_func) scores["fit_time"].append(f"{time.mean():.2f} ± {time.std():.2f} s") for key, value in cv_results.items(): if key.startswith("test_"): metric = key.split("test_")[1] scores = consolidate_scores(cv_results, scores, metric) # %% # Modeling predictive uncertainty via quantile regression # ------------------------------------------------------- # Instead of modeling the expected value of the distribution of # :math:`Y|X` like the least squares and Poisson losses do, one could try to # estimate quantiles of the conditional distribution. # # :math:`Y|X=x_i` is expected to be a random variable for a given data point # :math:`x_i` because we expect that the number of rentals cannot be 100% # accurately predicted from the features. It can be influenced by other # variables not properly captured by the existing lagged features. For # instance whether or not it will rain in the next hour cannot be fully # anticipated from the past hours bike rental data. This is what we # call aleatoric uncertainty. # # Quantile regression makes it possible to give a finer description of that # distribution without making strong assumptions on its shape. quantile_list = [0.05, 0.5, 0.95] for quantile in quantile_list: model = HistGradientBoostingRegressor(loss="quantile", quantile=quantile) cv_results = cross_validate( model, X, y, cv=ts_cv, scoring=scoring, n_jobs=2, ) time = cv_results["fit_time"] scores["fit_time"].append(f"{time.mean():.2f} ± {time.std():.2f} s") scores["loss"].append(f"quantile {int(quantile * 100)}") for key, value in cv_results.items(): if key.startswith("test_"): metric = key.split("test_")[1] scores = consolidate_scores(cv_results, scores, metric) scores_df = pl.DataFrame(scores) scores_df # %% # Let us take a look at the losses that minimise each metric. def min_arg(col): col_split = pl.col(col).str.split(" ") return pl.arg_sort_by( col_split.list.get(0).cast(pl.Float64), col_split.list.get(2).cast(pl.Float64), ).first() scores_df.select( pl.col("loss").get(min_arg(col_name)).alias(col_name) for col_name in scores_df.columns if col_name != "loss" ) # %% # Even if the score distributions overlap due to the variance in the dataset, # it is true that the average RMSE is lower when `loss="squared_error"`, whereas # the average MAPE is lower when `loss="absolute_error"` as expected. That is # also the case for the Mean Pinball Loss with the quantiles 5 and 95. The score # corresponding to the 50 quantile loss is overlapping with the score obtained # by minimizing other loss functions, which is also the case for the MAE. # # A qualitative look at the predictions # ------------------------------------- # We can now visualize the performance of the model with regards # to the 5th percentile, median and the 95th percentile: all_splits = list(ts_cv.split(X, y)) train_idx, test_idx = all_splits[0] X_train, X_test = X[train_idx, :], X[test_idx, :] y_train, y_test = y[train_idx], y[test_idx] max_iter = 50 gbrt_mean_poisson = HistGradientBoostingRegressor(loss="poisson", max_iter=max_iter) gbrt_mean_poisson.fit(X_train, y_train) mean_predictions = gbrt_mean_poisson.predict(X_test) gbrt_median = HistGradientBoostingRegressor( loss="quantile", quantile=0.5, max_iter=max_iter ) gbrt_median.fit(X_train, y_train) median_predictions = gbrt_median.predict(X_test) gbrt_percentile_5 = HistGradientBoostingRegressor( loss="quantile", quantile=0.05, max_iter=max_iter ) gbrt_percentile_5.fit(X_train, y_train) percentile_5_predictions = gbrt_percentile_5.predict(X_test) gbrt_percentile_95 = HistGradientBoostingRegressor( loss="quantile", quantile=0.95, max_iter=max_iter ) gbrt_percentile_95.fit(X_train, y_train) percentile_95_predictions = gbrt_percentile_95.predict(X_test) # %% # We can now take a look at the predictions made by the regression models: last_hours = slice(-96, None) fig, ax = plt.subplots(figsize=(15, 7)) plt.title("Predictions by regression models") ax.plot( y_test[last_hours], "x-", alpha=0.2, label="Actual demand", color="black", ) ax.plot( median_predictions[last_hours], "^-", label="GBRT median", ) ax.plot( mean_predictions[last_hours], "x-", label="GBRT mean (Poisson)", ) ax.fill_between( np.arange(96), percentile_5_predictions[last_hours], percentile_95_predictions[last_hours], alpha=0.3, label="GBRT 90% interval", ) _ = ax.legend() # %% # Here it's interesting to notice that the blue area between the 5% and 95% # percentile estimators has a width that varies with the time of the day: # # - At night, the blue band is much narrower: the pair of models is quite # certain that there will be a small number of bike rentals. And furthermore # these seem correct in the sense that the actual demand stays in that blue # band. # - During the day, the blue band is much wider: the uncertainty grows, probably # because of the variability of the weather that can have a very large impact, # especially on week-ends. # - We can also see that during week-days, the commute pattern is still visible in # the 5% and 95% estimations. # - Finally, it is expected that 10% of the time, the actual demand does not lie # between the 5% and 95% percentile estimates. On this test span, the actual # demand seems to be higher, especially during the rush hours. It might reveal that # our 95% percentile estimator underestimates the demand peaks. This could be be # quantitatively confirmed by computing empirical coverage numbers as done in # the :ref:`calibration of confidence intervals <calibration-section>`. # # Looking at the performance of non-linear regression models vs # the best models: from sklearn.metrics import PredictionErrorDisplay fig, axes = plt.subplots(ncols=3, figsize=(15, 6), sharey=True) fig.suptitle("Non-linear regression models") predictions = [ median_predictions, percentile_5_predictions, percentile_95_predictions, ] labels = [ "Median", "5th percentile", "95th percentile", ] for ax, pred, label in zip(axes, predictions, labels): PredictionErrorDisplay.from_predictions( y_true=y_test, y_pred=pred, kind="residual_vs_predicted", scatter_kwargs={"alpha": 0.3}, ax=ax, ) ax.set(xlabel="Predicted demand", ylabel="True demand") ax.legend(["Best model", label]) plt.show() # %% # Conclusion # ---------- # Through this example we explored time series forecasting using lagged # features. We compared a naive regression (using the standardized # :class:`~sklearn.model_selection.train_test_split`) with a proper time # series evaluation strategy using # :class:`~sklearn.model_selection.TimeSeriesSplit`. We observed that the # model trained using :class:`~sklearn.model_selection.train_test_split`, # having a default value of `shuffle` set to `True` produced an overly # optimistic Mean Average Percentage Error (MAPE). The results # produced from the time-based split better represent the performance # of our time-series regression model. We also analyzed the predictive uncertainty # of our model via Quantile Regression. Predictions based on the 5th and # 95th percentile using `loss="quantile"` provide us with a quantitative estimate # of the uncertainty of the forecasts made by our time series regression model. # Uncertainty estimation can also be performed # using `MAPIE <https://mapie.readthedocs.io/en/latest/index.html>`_, # that provides an implementation based on recent work on conformal prediction # methods and estimates both aleatoric and epistemic uncertainty at the same time. # Furthermore, functionalities provided # by `sktime <https://www.sktime.net/en/latest/users.html>`_ # can be used to extend scikit-learn estimators by making use of recursive time # series forecasting, that enables dynamic predictions of future values.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/applications/plot_outlier_detection_wine.py
examples/applications/plot_outlier_detection_wine.py
""" ==================================== Outlier detection on a real data set ==================================== This example illustrates the need for robust covariance estimation on a real data set. It is useful both for outlier detection and for a better understanding of the data structure. We selected two sets of two variables from the Wine data set as an illustration of what kind of analysis can be done with several outlier detection tools. For the purpose of visualization, we are working with two-dimensional examples, but one should be aware that things are not so trivial in high-dimension, as it will be pointed out. In both examples below, the main result is that the empirical covariance estimate, as a non-robust one, is highly influenced by the heterogeneous structure of the observations. Although the robust covariance estimate is able to focus on the main mode of the data distribution, it sticks to the assumption that the data should be Gaussian distributed, yielding some biased estimation of the data structure, but yet accurate to some extent. The One-Class SVM does not assume any parametric form of the data distribution and can therefore model the complex shape of the data much better. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # First example # ------------- # # The first example illustrates how the Minimum Covariance Determinant # robust estimator can help concentrate on a relevant cluster when outlying # points exist. Here the empirical covariance estimation is skewed by points # outside of the main cluster. Of course, some screening tools would have pointed # out the presence of two clusters (Support Vector Machines, Gaussian Mixture # Models, univariate outlier detection, ...). But had it been a high-dimensional # example, none of these could be applied that easily. from sklearn.covariance import EllipticEnvelope from sklearn.inspection import DecisionBoundaryDisplay from sklearn.svm import OneClassSVM estimators = { "Empirical Covariance": EllipticEnvelope(support_fraction=1.0, contamination=0.25), "Robust Covariance (Minimum Covariance Determinant)": EllipticEnvelope( contamination=0.25 ), "OCSVM": OneClassSVM(nu=0.25, gamma=0.35), } # %% import matplotlib.lines as mlines import matplotlib.pyplot as plt from sklearn.datasets import load_wine X = load_wine()["data"][:, [1, 2]] # two clusters fig, ax = plt.subplots() colors = ["tab:blue", "tab:orange", "tab:red"] # Learn a frontier for outlier detection with several classifiers legend_lines = [] for color, (name, estimator) in zip(colors, estimators.items()): estimator.fit(X) DecisionBoundaryDisplay.from_estimator( estimator, X, response_method="decision_function", plot_method="contour", levels=[0], colors=color, ax=ax, ) legend_lines.append(mlines.Line2D([], [], color=color, label=name)) ax.scatter(X[:, 0], X[:, 1], color="black") bbox_args = dict(boxstyle="round", fc="0.8") arrow_args = dict(arrowstyle="->") ax.annotate( "outlying points", xy=(4, 2), xycoords="data", textcoords="data", xytext=(3, 1.25), bbox=bbox_args, arrowprops=arrow_args, ) ax.legend(handles=legend_lines, loc="upper center") _ = ax.set( xlabel="ash", ylabel="malic_acid", title="Outlier detection on a real data set (wine recognition)", ) # %% # Second example # -------------- # # The second example shows the ability of the Minimum Covariance Determinant # robust estimator of covariance to concentrate on the main mode of the data # distribution: the location seems to be well estimated, although the # covariance is hard to estimate due to the banana-shaped distribution. Anyway, # we can get rid of some outlying observations. The One-Class SVM is able to # capture the real data structure, but the difficulty is to adjust its kernel # bandwidth parameter so as to obtain a good compromise between the shape of # the data scatter matrix and the risk of over-fitting the data. X = load_wine()["data"][:, [6, 9]] # "banana"-shaped fig, ax = plt.subplots() colors = ["tab:blue", "tab:orange", "tab:red"] # Learn a frontier for outlier detection with several classifiers legend_lines = [] for color, (name, estimator) in zip(colors, estimators.items()): estimator.fit(X) DecisionBoundaryDisplay.from_estimator( estimator, X, response_method="decision_function", plot_method="contour", levels=[0], colors=color, ax=ax, ) legend_lines.append(mlines.Line2D([], [], color=color, label=name)) ax.scatter(X[:, 0], X[:, 1], color="black") ax.legend(handles=legend_lines, loc="upper center") ax.set( xlabel="flavanoids", ylabel="color_intensity", title="Outlier detection on a real data set (wine recognition)", ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/applications/plot_tomography_l1_reconstruction.py
examples/applications/plot_tomography_l1_reconstruction.py
""" ====================================================================== Compressive sensing: tomography reconstruction with L1 prior (Lasso) ====================================================================== This example shows the reconstruction of an image from a set of parallel projections, acquired along different angles. Such a dataset is acquired in **computed tomography** (CT). Without any prior information on the sample, the number of projections required to reconstruct the image is of the order of the linear size ``l`` of the image (in pixels). For simplicity we consider here a sparse image, where only pixels on the boundary of objects have a non-zero value. Such data could correspond for example to a cellular material. Note however that most images are sparse in a different basis, such as the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is necessary to use prior information available on the sample (its sparsity): this is an example of **compressive sensing**. The tomography projection operation is a linear transformation. In addition to the data-fidelity term corresponding to a linear regression, we penalize the L1 norm of the image to account for its sparsity. The resulting optimization problem is called the :ref:`lasso`. We use the class :class:`~sklearn.linear_model.Lasso`, that uses the coordinate descent algorithm. Importantly, this implementation is more computationally efficient on a sparse matrix, than the projection operator used here. The reconstruction with L1 penalization gives a result with zero error (all pixels are successfully labeled with 0 or 1), even if noise was added to the projections. In comparison, an L2 penalization (:class:`~sklearn.linear_model.Ridge`) produces a large number of labeling errors for the pixels. Important artifacts are observed on the reconstructed image, contrary to the L1 penalization. Note in particular the circular artifact separating the pixels in the corners, that have contributed to fewer projections than the central disk. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from scipy import ndimage, sparse from sklearn.linear_model import Lasso, Ridge def _weights(x, dx=1, orig=0): x = np.ravel(x) floor_x = np.floor((x - orig) / dx).astype(np.int64) alpha = (x - orig - floor_x * dx) / dx return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha)) def _generate_center_coordinates(l_x): X, Y = np.mgrid[:l_x, :l_x].astype(np.float64) center = l_x / 2.0 X += 0.5 - center Y += 0.5 - center return X, Y def build_projection_operator(l_x, n_dir): """Compute the tomography design matrix. Parameters ---------- l_x : int linear size of image array n_dir : int number of angles at which projections are acquired. Returns ------- p : sparse matrix of shape (n_dir l_x, l_x**2) """ X, Y = _generate_center_coordinates(l_x) angles = np.linspace(0, np.pi, n_dir, endpoint=False) data_inds, weights, camera_inds = [], [], [] data_unravel_indices = np.arange(l_x**2) data_unravel_indices = np.hstack((data_unravel_indices, data_unravel_indices)) for i, angle in enumerate(angles): Xrot = np.cos(angle) * X - np.sin(angle) * Y inds, w = _weights(Xrot, dx=1, orig=X.min()) mask = np.logical_and(inds >= 0, inds < l_x) weights += list(w[mask]) camera_inds += list(inds[mask] + i * l_x) data_inds += list(data_unravel_indices[mask]) proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds))) return proj_operator def generate_synthetic_data(): """Synthetic binary data""" rs = np.random.RandomState(0) n_pts = 36 x, y = np.ogrid[0:l, 0:l] mask_outer = (x - l / 2.0) ** 2 + (y - l / 2.0) ** 2 < (l / 2.0) ** 2 mask = np.zeros((l, l)) points = l * rs.rand(2, n_pts) mask[(points[0]).astype(int), (points[1]).astype(int)] = 1 mask = ndimage.gaussian_filter(mask, sigma=l / n_pts) res = np.logical_and(mask > mask.mean(), mask_outer) return np.logical_xor(res, ndimage.binary_erosion(res)) # Generate synthetic images, and projections l = 128 proj_operator = build_projection_operator(l, l // 7) data = generate_synthetic_data() proj = proj_operator @ data.ravel()[:, np.newaxis] proj += 0.15 * np.random.randn(*proj.shape) # Reconstruction with L2 (Ridge) penalization rgr_ridge = Ridge(alpha=0.2) rgr_ridge.fit(proj_operator, proj.ravel()) rec_l2 = rgr_ridge.coef_.reshape(l, l) # Reconstruction with L1 (Lasso) penalization # the best value of alpha was determined using cross validation # with LassoCV rgr_lasso = Lasso(alpha=0.001) rgr_lasso.fit(proj_operator, proj.ravel()) rec_l1 = rgr_lasso.coef_.reshape(l, l) plt.figure(figsize=(8, 3.3)) plt.subplot(131) plt.imshow(data, cmap=plt.cm.gray, interpolation="nearest") plt.axis("off") plt.title("original image") plt.subplot(132) plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation="nearest") plt.title("L2 penalization") plt.axis("off") plt.subplot(133) plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation="nearest") plt.title("L1 penalization") plt.axis("off") plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/applications/plot_out_of_core_classification.py
examples/applications/plot_out_of_core_classification.py
""" ====================================================== Out-of-core classification of text documents ====================================================== This is an example showing how scikit-learn can be used for classification using an out-of-core approach: learning from data that doesn't fit into main memory. We make use of an online classifier, i.e., one that supports the partial_fit method, that will be fed with batches of examples. To guarantee that the features space remains the same over time we leverage a HashingVectorizer that will project each example into the same feature space. This is especially useful in the case of text classification where new features (words) may appear in each batch. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import itertools import re import sys import tarfile import time from hashlib import sha256 from html.parser import HTMLParser from pathlib import Path from urllib.request import urlretrieve import matplotlib.pyplot as plt import numpy as np from matplotlib import rcParams from sklearn.datasets import get_data_home from sklearn.feature_extraction.text import HashingVectorizer from sklearn.linear_model import Perceptron, SGDClassifier from sklearn.naive_bayes import MultinomialNB def _not_in_sphinx(): # Hack to detect whether we are running by the sphinx builder return "__file__" in globals() # %% # Reuters Dataset related routines # -------------------------------- # # The dataset used in this example is Reuters-21578 as provided by the UCI ML # repository. It will be automatically downloaded and uncompressed on first # run. class ReutersParser(HTMLParser): """Utility class to parse a SGML file and yield documents one at a time.""" def __init__(self, encoding="latin-1"): HTMLParser.__init__(self) self._reset() self.encoding = encoding def handle_starttag(self, tag, attrs): method = "start_" + tag getattr(self, method, lambda x: None)(attrs) def handle_endtag(self, tag): method = "end_" + tag getattr(self, method, lambda: None)() def _reset(self): self.in_title = 0 self.in_body = 0 self.in_topics = 0 self.in_topic_d = 0 self.title = "" self.body = "" self.topics = [] self.topic_d = "" def parse(self, fd): self.docs = [] for chunk in fd: self.feed(chunk.decode(self.encoding)) for doc in self.docs: yield doc self.docs = [] self.close() def handle_data(self, data): if self.in_body: self.body += data elif self.in_title: self.title += data elif self.in_topic_d: self.topic_d += data def start_reuters(self, attributes): pass def end_reuters(self): self.body = re.sub(r"\s+", r" ", self.body) self.docs.append( {"title": self.title, "body": self.body, "topics": self.topics} ) self._reset() def start_title(self, attributes): self.in_title = 1 def end_title(self): self.in_title = 0 def start_body(self, attributes): self.in_body = 1 def end_body(self): self.in_body = 0 def start_topics(self, attributes): self.in_topics = 1 def end_topics(self): self.in_topics = 0 def start_d(self, attributes): self.in_topic_d = 1 def end_d(self): self.in_topic_d = 0 self.topics.append(self.topic_d) self.topic_d = "" def stream_reuters_documents(data_path=None): """Iterate over documents of the Reuters dataset. The Reuters archive will automatically be downloaded and uncompressed if the `data_path` directory does not exist. Documents are represented as dictionaries with 'body' (str), 'title' (str), 'topics' (list(str)) keys. """ DOWNLOAD_URL = "https://kdd.ics.uci.edu/databases/reuters21578/reuters21578.tar.gz" ARCHIVE_SHA256 = "3bae43c9b14e387f76a61b6d82bf98a4fb5d3ef99ef7e7075ff2ccbcf59f9d30" ARCHIVE_FILENAME = "reuters21578.tar.gz" if data_path is None: data_path = Path(get_data_home()) / "reuters" else: data_path = Path(data_path) if not data_path.exists(): """Download the dataset.""" print("downloading dataset (once and for all) into %s" % data_path) data_path.mkdir(parents=True, exist_ok=True) def progress(blocknum, bs, size): total_sz_mb = "%.2f MB" % (size / 1e6) current_sz_mb = "%.2f MB" % ((blocknum * bs) / 1e6) if _not_in_sphinx(): sys.stdout.write("\rdownloaded %s / %s" % (current_sz_mb, total_sz_mb)) archive_path = data_path / ARCHIVE_FILENAME urlretrieve(DOWNLOAD_URL, filename=archive_path, reporthook=progress) if _not_in_sphinx(): sys.stdout.write("\r") # Check that the archive was not tampered: assert sha256(archive_path.read_bytes()).hexdigest() == ARCHIVE_SHA256 print("untarring Reuters dataset...") with tarfile.open(archive_path, "r:gz") as fp: fp.extractall(data_path, filter="data") print("done.") parser = ReutersParser() for filename in data_path.glob("*.sgm"): for doc in parser.parse(open(filename, "rb")): yield doc # %% # Main # ---- # # Create the vectorizer and limit the number of features to a reasonable # maximum vectorizer = HashingVectorizer( decode_error="ignore", n_features=2**18, alternate_sign=False ) # Iterator over parsed Reuters SGML files. data_stream = stream_reuters_documents() # We learn a binary classification between the "acq" class and all the others. # "acq" was chosen as it is more or less evenly distributed in the Reuters # files. For other datasets, one should take care of creating a test set with # a realistic portion of positive instances. all_classes = np.array([0, 1]) positive_class = "acq" # Here are some classifiers that support the `partial_fit` method partial_fit_classifiers = { "SGD": SGDClassifier(max_iter=5), "Perceptron": Perceptron(), "NB Multinomial": MultinomialNB(alpha=0.01), "Passive-Aggressive": SGDClassifier( loss="hinge", penalty=None, learning_rate="pa1", eta0=1.0 ), } def get_minibatch(doc_iter, size, pos_class=positive_class): """Extract a minibatch of examples, return a tuple X_text, y. Note: size is before excluding invalid docs with no topics assigned. """ data = [ ("{title}\n\n{body}".format(**doc), pos_class in doc["topics"]) for doc in itertools.islice(doc_iter, size) if doc["topics"] ] if not len(data): return np.asarray([], dtype=int), np.asarray([], dtype=int) X_text, y = zip(*data) return X_text, np.asarray(y, dtype=int) def iter_minibatches(doc_iter, minibatch_size): """Generator of minibatches.""" X_text, y = get_minibatch(doc_iter, minibatch_size) while len(X_text): yield X_text, y X_text, y = get_minibatch(doc_iter, minibatch_size) # test data statistics test_stats = {"n_test": 0, "n_test_pos": 0} # First we hold out a number of examples to estimate accuracy n_test_documents = 1000 tick = time.time() X_test_text, y_test = get_minibatch(data_stream, 1000) parsing_time = time.time() - tick tick = time.time() X_test = vectorizer.transform(X_test_text) vectorizing_time = time.time() - tick test_stats["n_test"] += len(y_test) test_stats["n_test_pos"] += sum(y_test) print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test))) def progress(cls_name, stats): """Report progress information, return a string.""" duration = time.time() - stats["t0"] s = "%20s classifier : \t" % cls_name s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats s += "accuracy: %(accuracy).3f " % stats s += "in %.2fs (%5d docs/s)" % (duration, stats["n_train"] / duration) return s cls_stats = {} for cls_name in partial_fit_classifiers: stats = { "n_train": 0, "n_train_pos": 0, "accuracy": 0.0, "accuracy_history": [(0, 0)], "t0": time.time(), "runtime_history": [(0, 0)], "total_fit_time": 0.0, } cls_stats[cls_name] = stats get_minibatch(data_stream, n_test_documents) # Discard test set # We will feed the classifier with mini-batches of 1000 documents; this means # we have at most 1000 docs in memory at any time. The smaller the document # batch, the bigger the relative overhead of the partial fit methods. minibatch_size = 1000 # Create the data_stream that parses Reuters SGML files and iterates on # documents as a stream. minibatch_iterators = iter_minibatches(data_stream, minibatch_size) total_vect_time = 0.0 # Main loop : iterate on mini-batches of examples for i, (X_train_text, y_train) in enumerate(minibatch_iterators): tick = time.time() X_train = vectorizer.transform(X_train_text) total_vect_time += time.time() - tick for cls_name, cls in partial_fit_classifiers.items(): tick = time.time() # update estimator with examples in the current mini-batch cls.partial_fit(X_train, y_train, classes=all_classes) # accumulate test accuracy stats cls_stats[cls_name]["total_fit_time"] += time.time() - tick cls_stats[cls_name]["n_train"] += X_train.shape[0] cls_stats[cls_name]["n_train_pos"] += sum(y_train) tick = time.time() cls_stats[cls_name]["accuracy"] = cls.score(X_test, y_test) cls_stats[cls_name]["prediction_time"] = time.time() - tick acc_history = (cls_stats[cls_name]["accuracy"], cls_stats[cls_name]["n_train"]) cls_stats[cls_name]["accuracy_history"].append(acc_history) run_history = ( cls_stats[cls_name]["accuracy"], total_vect_time + cls_stats[cls_name]["total_fit_time"], ) cls_stats[cls_name]["runtime_history"].append(run_history) if i % 3 == 0: print(progress(cls_name, cls_stats[cls_name])) if i % 3 == 0: print("\n") # %% # Plot results # ------------ # # The plot represents the learning curve of the classifier: the evolution # of classification accuracy over the course of the mini-batches. Accuracy is # measured on the first 1000 samples, held out as a validation set. # # To limit the memory consumption, we queue examples up to a fixed amount # before feeding them to the learner. def plot_accuracy(x, y, x_legend): """Plot accuracy as a function of x.""" x = np.array(x) y = np.array(y) plt.title("Classification accuracy as a function of %s" % x_legend) plt.xlabel("%s" % x_legend) plt.ylabel("Accuracy") plt.grid(True) plt.plot(x, y) rcParams["legend.fontsize"] = 10 cls_names = list(sorted(cls_stats.keys())) # Plot accuracy evolution plt.figure() for _, stats in sorted(cls_stats.items()): # Plot accuracy evolution with #examples accuracy, n_examples = zip(*stats["accuracy_history"]) plot_accuracy(n_examples, accuracy, "training examples (#)") ax = plt.gca() ax.set_ylim((0.8, 1)) plt.legend(cls_names, loc="best") plt.figure() for _, stats in sorted(cls_stats.items()): # Plot accuracy evolution with runtime accuracy, runtime = zip(*stats["runtime_history"]) plot_accuracy(runtime, accuracy, "runtime (s)") ax = plt.gca() ax.set_ylim((0.8, 1)) plt.legend(cls_names, loc="best") # Plot fitting times plt.figure() fig = plt.gcf() cls_runtime = [stats["total_fit_time"] for cls_name, stats in sorted(cls_stats.items())] cls_runtime.append(total_vect_time) cls_names.append("Vectorization") bar_colors = ["b", "g", "r", "c", "m", "y"] ax = plt.subplot(111) rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5, color=bar_colors) ax.set_xticks(np.linspace(0, len(cls_names) - 1, len(cls_names))) ax.set_xticklabels(cls_names, fontsize=10) ymax = max(cls_runtime) * 1.2 ax.set_ylim((0, ymax)) ax.set_ylabel("runtime (s)") ax.set_title("Training Times") def autolabel(rectangles): """attach some text vi autolabel on rectangles.""" for rect in rectangles: height = rect.get_height() ax.text( rect.get_x() + rect.get_width() / 2.0, 1.05 * height, "%.4f" % height, ha="center", va="bottom", ) plt.setp(plt.xticks()[1], rotation=30) autolabel(rectangles) plt.tight_layout() plt.show() # Plot prediction times plt.figure() cls_runtime = [] cls_names = list(sorted(cls_stats.keys())) for cls_name, stats in sorted(cls_stats.items()): cls_runtime.append(stats["prediction_time"]) cls_runtime.append(parsing_time) cls_names.append("Read/Parse\n+Feat.Extr.") cls_runtime.append(vectorizing_time) cls_names.append("Hashing\n+Vect.") ax = plt.subplot(111) rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5, color=bar_colors) ax.set_xticks(np.linspace(0, len(cls_names) - 1, len(cls_names))) ax.set_xticklabels(cls_names, fontsize=8) plt.setp(plt.xticks()[1], rotation=30) ymax = max(cls_runtime) * 1.2 ax.set_ylim((0, ymax)) ax.set_ylabel("runtime (s)") ax.set_title("Prediction Times (%d instances)" % n_test_documents) autolabel(rectangles) plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/applications/plot_topics_extraction_with_nmf_lda.py
examples/applications/plot_topics_extraction_with_nmf_lda.py
""" ======================================================================================= Topic extraction with Non-negative Matrix Factorization and Latent Dirichlet Allocation ======================================================================================= This is an example of applying :class:`~sklearn.decomposition.NMF` and :class:`~sklearn.decomposition.LatentDirichletAllocation` on a corpus of documents and extract additive models of the topic structure of the corpus. The output is a plot of topics, each represented as bar plot using top few words based on weights. Non-negative Matrix Factorization is applied with two different objective functions: the Frobenius norm, and the generalized Kullback-Leibler divergence. The latter is equivalent to Probabilistic Latent Semantic Indexing. The default parameters (n_samples / n_features / n_components) should make the example runnable in a couple of tens of seconds. You can try to increase the dimensions of the problem, but be aware that the time complexity is polynomial in NMF. In LDA, the time complexity is proportional to (n_samples * iterations). """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from time import time import matplotlib.pyplot as plt from sklearn.datasets import fetch_20newsgroups from sklearn.decomposition import NMF, LatentDirichletAllocation, MiniBatchNMF from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer n_samples = 2000 n_features = 1000 n_components = 10 n_top_words = 20 batch_size = 128 init = "nndsvda" def plot_top_words(model, feature_names, n_top_words, title): fig, axes = plt.subplots(2, 5, figsize=(30, 15), sharex=True) axes = axes.flatten() for topic_idx, topic in enumerate(model.components_): top_features_ind = topic.argsort()[-n_top_words:] top_features = feature_names[top_features_ind] weights = topic[top_features_ind] ax = axes[topic_idx] ax.barh(top_features, weights, height=0.7) ax.set_title(f"Topic {topic_idx + 1}", fontdict={"fontsize": 30}) ax.tick_params(axis="both", which="major", labelsize=20) for i in "top right left".split(): ax.spines[i].set_visible(False) fig.suptitle(title, fontsize=40) plt.subplots_adjust(top=0.90, bottom=0.05, wspace=0.90, hspace=0.3) plt.show() # Load the 20 newsgroups dataset and vectorize it. We use a few heuristics # to filter out useless terms early on: the posts are stripped of headers, # footers and quoted replies, and common English words, words occurring in # only one document or in at least 95% of the documents are removed. print("Loading dataset...") t0 = time() data, _ = fetch_20newsgroups( shuffle=True, random_state=1, remove=("headers", "footers", "quotes"), return_X_y=True, ) data_samples = data[:n_samples] print("done in %0.3fs." % (time() - t0)) # Use tf-idf features for NMF. print("Extracting tf-idf features for NMF...") tfidf_vectorizer = TfidfVectorizer( max_df=0.95, min_df=2, max_features=n_features, stop_words="english" ) t0 = time() tfidf = tfidf_vectorizer.fit_transform(data_samples) print("done in %0.3fs." % (time() - t0)) # Use tf (raw term count) features for LDA. print("Extracting tf features for LDA...") tf_vectorizer = CountVectorizer( max_df=0.95, min_df=2, max_features=n_features, stop_words="english" ) t0 = time() tf = tf_vectorizer.fit_transform(data_samples) print("done in %0.3fs." % (time() - t0)) print() # Fit the NMF model print( "Fitting the NMF model (Frobenius norm) with tf-idf features, " "n_samples=%d and n_features=%d..." % (n_samples, n_features) ) t0 = time() nmf = NMF( n_components=n_components, random_state=1, init=init, beta_loss="frobenius", alpha_W=0.00005, alpha_H=0.00005, l1_ratio=1, ).fit(tfidf) print("done in %0.3fs." % (time() - t0)) tfidf_feature_names = tfidf_vectorizer.get_feature_names_out() plot_top_words( nmf, tfidf_feature_names, n_top_words, "Topics in NMF model (Frobenius norm)" ) # Fit the NMF model print( "\n" * 2, "Fitting the NMF model (generalized Kullback-Leibler " "divergence) with tf-idf features, n_samples=%d and n_features=%d..." % (n_samples, n_features), ) t0 = time() nmf = NMF( n_components=n_components, random_state=1, init=init, beta_loss="kullback-leibler", solver="mu", max_iter=1000, alpha_W=0.00005, alpha_H=0.00005, l1_ratio=0.5, ).fit(tfidf) print("done in %0.3fs." % (time() - t0)) tfidf_feature_names = tfidf_vectorizer.get_feature_names_out() plot_top_words( nmf, tfidf_feature_names, n_top_words, "Topics in NMF model (generalized Kullback-Leibler divergence)", ) # Fit the MiniBatchNMF model print( "\n" * 2, "Fitting the MiniBatchNMF model (Frobenius norm) with tf-idf " "features, n_samples=%d and n_features=%d, batch_size=%d..." % (n_samples, n_features, batch_size), ) t0 = time() mbnmf = MiniBatchNMF( n_components=n_components, random_state=1, batch_size=batch_size, init=init, beta_loss="frobenius", alpha_W=0.00005, alpha_H=0.00005, l1_ratio=0.5, ).fit(tfidf) print("done in %0.3fs." % (time() - t0)) tfidf_feature_names = tfidf_vectorizer.get_feature_names_out() plot_top_words( mbnmf, tfidf_feature_names, n_top_words, "Topics in MiniBatchNMF model (Frobenius norm)", ) # Fit the MiniBatchNMF model print( "\n" * 2, "Fitting the MiniBatchNMF model (generalized Kullback-Leibler " "divergence) with tf-idf features, n_samples=%d and n_features=%d, " "batch_size=%d..." % (n_samples, n_features, batch_size), ) t0 = time() mbnmf = MiniBatchNMF( n_components=n_components, random_state=1, batch_size=batch_size, init=init, beta_loss="kullback-leibler", alpha_W=0.00005, alpha_H=0.00005, l1_ratio=0.5, ).fit(tfidf) print("done in %0.3fs." % (time() - t0)) tfidf_feature_names = tfidf_vectorizer.get_feature_names_out() plot_top_words( mbnmf, tfidf_feature_names, n_top_words, "Topics in MiniBatchNMF model (generalized Kullback-Leibler divergence)", ) print( "\n" * 2, "Fitting LDA models with tf features, n_samples=%d and n_features=%d..." % (n_samples, n_features), ) lda = LatentDirichletAllocation( n_components=n_components, max_iter=5, learning_method="online", learning_offset=50.0, random_state=0, ) t0 = time() lda.fit(tf) print("done in %0.3fs." % (time() - t0)) tf_feature_names = tf_vectorizer.get_feature_names_out() plot_top_words(lda, tf_feature_names, n_top_words, "Topics in LDA model")
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/applications/plot_species_distribution_modeling.py
examples/applications/plot_species_distribution_modeling.py
""" ============================= Species distribution modeling ============================= Modeling species' geographic distributions is an important problem in conservation biology. In this example, we model the geographic distribution of two South American mammals given past observations and 14 environmental variables. Since we have only positive examples (there are no unsuccessful observations), we cast this problem as a density estimation problem and use the :class:`~sklearn.svm.OneClassSVM` as our modeling tool. The dataset is provided by Phillips et. al. (2006). If available, the example uses `basemap <https://matplotlib.org/basemap/>`_ to plot the coast lines and national boundaries of South America. The two species are: - `Bradypus variegatus <http://www.iucnredlist.org/details/3038/0>`_, the brown-throated sloth. - `Microryzomys minutus <http://www.iucnredlist.org/details/13408/0>`_, also known as the forest small rice rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References ---------- - `"Maximum entropy modeling of species geographic distributions" <http://rob.schapire.net/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from time import time import matplotlib.pyplot as plt import numpy as np from sklearn import metrics, svm from sklearn.datasets import fetch_species_distributions from sklearn.utils import Bunch # if basemap is available, we'll use it. # otherwise, we'll improvise later... try: from mpl_toolkits.basemap import Basemap basemap = True except ImportError: basemap = False def construct_grids(batch): """Construct the map grid from the batch object Parameters ---------- batch : Batch object The object returned by :func:`fetch_species_distributions` Returns ------- (xgrid, ygrid) : 1-D arrays The grid corresponding to the values in batch.coverages """ # x,y coordinates for corner cells xmin = batch.x_left_lower_corner + batch.grid_size xmax = xmin + (batch.Nx * batch.grid_size) ymin = batch.y_left_lower_corner + batch.grid_size ymax = ymin + (batch.Ny * batch.grid_size) # x coordinates of the grid cells xgrid = np.arange(xmin, xmax, batch.grid_size) # y coordinates of the grid cells ygrid = np.arange(ymin, ymax, batch.grid_size) return (xgrid, ygrid) def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid): """Create a bunch with information about a particular organism This will use the test/train record arrays to extract the data specific to the given species name. """ bunch = Bunch(name=" ".join(species_name.split("_")[:2])) species_name = species_name.encode("ascii") points = dict(test=test, train=train) for label, pts in points.items(): # choose points associated with the desired species pts = pts[pts["species"] == species_name] bunch["pts_%s" % label] = pts # determine coverage values for each of the training & testing points ix = np.searchsorted(xgrid, pts["dd long"]) iy = np.searchsorted(ygrid, pts["dd lat"]) bunch["cov_%s" % label] = coverages[:, -iy, ix].T return bunch def plot_species_distribution( species=("bradypus_variegatus_0", "microryzomys_minutus_0"), ): """ Plot the species distribution. """ if len(species) > 2: print( "Note: when more than two species are provided," " only the first two will be used" ) t0 = time() # Load the compressed data data = fetch_species_distributions() # Set up the data grid xgrid, ygrid = construct_grids(data) # The grid in x,y coordinates X, Y = np.meshgrid(xgrid, ygrid[::-1]) # create a bunch for each species BV_bunch = create_species_bunch( species[0], data.train, data.test, data.coverages, xgrid, ygrid ) MM_bunch = create_species_bunch( species[1], data.train, data.test, data.coverages, xgrid, ygrid ) # background points (grid coordinates) for evaluation np.random.seed(13) background_points = np.c_[ np.random.randint(low=0, high=data.Ny, size=10000), np.random.randint(low=0, high=data.Nx, size=10000), ].T # We'll make use of the fact that coverages[6] has measurements at all # land points. This will help us decide between land and water. land_reference = data.coverages[6] # Fit, predict, and plot for each species. for i, species in enumerate([BV_bunch, MM_bunch]): print("_" * 80) print("Modeling distribution of species '%s'" % species.name) # Standardize features mean = species.cov_train.mean(axis=0) std = species.cov_train.std(axis=0) train_cover_std = (species.cov_train - mean) / std # Fit OneClassSVM print(" - fit OneClassSVM ... ", end="") clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5) clf.fit(train_cover_std) print("done.") # Plot map of South America plt.subplot(1, 2, i + 1) if basemap: print(" - plot coastlines using basemap") m = Basemap( projection="cyl", llcrnrlat=Y.min(), urcrnrlat=Y.max(), llcrnrlon=X.min(), urcrnrlon=X.max(), resolution="c", ) m.drawcoastlines() m.drawcountries() else: print(" - plot coastlines from coverage") plt.contour( X, Y, land_reference, levels=[-9998], colors="k", linestyles="solid" ) plt.xticks([]) plt.yticks([]) print(" - predict species distribution") # Predict species distribution using the training data Z = np.ones((data.Ny, data.Nx), dtype=np.float64) # We'll predict only for the land points. idx = (land_reference > -9999).nonzero() coverages_land = data.coverages[:, idx[0], idx[1]].T pred = clf.decision_function((coverages_land - mean) / std) Z *= pred.min() Z[idx[0], idx[1]] = pred levels = np.linspace(Z.min(), Z.max(), 25) Z[land_reference == -9999] = -9999 # plot contours of the prediction plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds) plt.colorbar(format="%.2f") # scatter training/testing points plt.scatter( species.pts_train["dd long"], species.pts_train["dd lat"], s=2**2, c="black", marker="^", label="train", ) plt.scatter( species.pts_test["dd long"], species.pts_test["dd lat"], s=2**2, c="black", marker="x", label="test", ) plt.legend() plt.title(species.name) plt.axis("equal") # Compute AUC with regards to background points pred_background = Z[background_points[0], background_points[1]] pred_test = clf.decision_function((species.cov_test - mean) / std) scores = np.r_[pred_test, pred_background] y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)] fpr, tpr, thresholds = metrics.roc_curve(y, scores) roc_auc = metrics.auc(fpr, tpr) plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right") print("\n Area under the ROC curve : %f" % roc_auc) print("\ntime elapsed: %.2fs" % (time() - t0)) plot_species_distribution() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/applications/plot_digits_denoising.py
examples/applications/plot_digits_denoising.py
""" ================================ Image denoising using kernel PCA ================================ This example shows how to use :class:`~sklearn.decomposition.KernelPCA` to denoise images. In short, we take advantage of the approximation function learned during `fit` to reconstruct the original image. We will compare the results with an exact reconstruction using :class:`~sklearn.decomposition.PCA`. We will use USPS digits dataset to reproduce presented in Sect. 4 of [1]_. .. rubric:: References .. [1] `Bakır, Gökhan H., Jason Weston, and Bernhard Schölkopf. "Learning to find pre-images." Advances in neural information processing systems 16 (2004): 449-456. <https://papers.nips.cc/paper/2003/file/ac1ad983e08ad3304a97e147f522747e-Paper.pdf>`_ """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Load the dataset via OpenML # --------------------------- # # The USPS digits datasets is available in OpenML. We use # :func:`~sklearn.datasets.fetch_openml` to get this dataset. In addition, we # normalize the dataset such that all pixel values are in the range (0, 1). import numpy as np from sklearn.datasets import fetch_openml from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler X, y = fetch_openml(data_id=41082, as_frame=False, return_X_y=True) X = MinMaxScaler().fit_transform(X) # %% # The idea will be to learn a PCA basis (with and without a kernel) on # noisy images and then use these models to reconstruct and denoise these # images. # # Thus, we split our dataset into a training and testing set composed of 1,000 # samples for the training and 100 samples for testing. These images are # noise-free and we will use them to evaluate the efficiency of the denoising # approaches. In addition, we create a copy of the original dataset and add a # Gaussian noise. # # The idea of this application, is to show that we can denoise corrupted images # by learning a PCA basis on some uncorrupted images. We will use both a PCA # and a kernel-based PCA to solve this problem. X_train, X_test, y_train, y_test = train_test_split( X, y, stratify=y, random_state=0, train_size=1_000, test_size=100 ) rng = np.random.RandomState(0) noise = rng.normal(scale=0.25, size=X_test.shape) X_test_noisy = X_test + noise noise = rng.normal(scale=0.25, size=X_train.shape) X_train_noisy = X_train + noise # %% # In addition, we will create a helper function to qualitatively assess the # image reconstruction by plotting the test images. import matplotlib.pyplot as plt def plot_digits(X, title): """Small helper function to plot 100 digits.""" fig, axs = plt.subplots(nrows=10, ncols=10, figsize=(8, 8)) for img, ax in zip(X, axs.ravel()): ax.imshow(img.reshape((16, 16)), cmap="Greys") ax.axis("off") fig.suptitle(title, fontsize=24) # %% # In addition, we will use the mean squared error (MSE) to quantitatively # assess the image reconstruction. # # Let's first have a look to see the difference between noise-free and noisy # images. We will check the test set in this regard. plot_digits(X_test, "Uncorrupted test images") plot_digits( X_test_noisy, f"Noisy test images\nMSE: {np.mean((X_test - X_test_noisy) ** 2):.2f}" ) # %% # Learn the `PCA` basis # --------------------- # # We can now learn our PCA basis using both a linear PCA and a kernel PCA that # uses a radial basis function (RBF) kernel. from sklearn.decomposition import PCA, KernelPCA pca = PCA(n_components=32, random_state=42) kernel_pca = KernelPCA( n_components=400, kernel="rbf", gamma=1e-3, fit_inverse_transform=True, alpha=5e-3, random_state=42, ) pca.fit(X_train_noisy) _ = kernel_pca.fit(X_train_noisy) # %% # Reconstruct and denoise test images # ----------------------------------- # # Now, we can transform and reconstruct the noisy test set. Since we used less # components than the number of original features, we will get an approximation # of the original set. Indeed, by dropping the components explaining variance # in PCA the least, we hope to remove noise. Similar thinking happens in kernel # PCA; however, we expect a better reconstruction because we use a non-linear # kernel to learn the PCA basis and a kernel ridge to learn the mapping # function. X_reconstructed_kernel_pca = kernel_pca.inverse_transform( kernel_pca.transform(X_test_noisy) ) X_reconstructed_pca = pca.inverse_transform(pca.transform(X_test_noisy)) # %% plot_digits(X_test, "Uncorrupted test images") plot_digits( X_reconstructed_pca, f"PCA reconstruction\nMSE: {np.mean((X_test - X_reconstructed_pca) ** 2):.2f}", ) plot_digits( X_reconstructed_kernel_pca, ( "Kernel PCA reconstruction\n" f"MSE: {np.mean((X_test - X_reconstructed_kernel_pca) ** 2):.2f}" ), ) # %% # PCA has a lower MSE than kernel PCA. However, the qualitative analysis might # not favor PCA instead of kernel PCA. We observe that kernel PCA is able to # remove background noise and provide a smoother image. # # However, it should be noted that the results of the denoising with kernel PCA # will depend of the parameters `n_components`, `gamma`, and `alpha`.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/applications/plot_stock_market.py
examples/applications/plot_stock_market.py
""" ======================================= Visualizing the stock market structure ======================================= This example employs several unsupervised learning techniques to extract the stock market structure from variations in historical quotes. The quantity that we use is the daily variation in quote price: quotes that are linked tend to fluctuate in relation to each other during a day. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Retrieve the data from Internet # ------------------------------- # # The data is from 2003 - 2008. This is reasonably calm: not too long ago so # that we get high-tech firms, and before the 2008 crash. This kind of # historical data can be obtained from APIs like the # `data.nasdaq.com <https://data.nasdaq.com/>`_ and # `alphavantage.co <https://www.alphavantage.co/>`_. import sys import numpy as np import pandas as pd symbol_dict = { "TOT": "Total", "XOM": "Exxon", "CVX": "Chevron", "COP": "ConocoPhillips", "VLO": "Valero Energy", "MSFT": "Microsoft", "IBM": "IBM", "TWX": "Time Warner", "CMCSA": "Comcast", "CVC": "Cablevision", "YHOO": "Yahoo", "DELL": "Dell", "HPQ": "HP", "AMZN": "Amazon", "TM": "Toyota", "CAJ": "Canon", "SNE": "Sony", "F": "Ford", "HMC": "Honda", "NAV": "Navistar", "NOC": "Northrop Grumman", "BA": "Boeing", "KO": "Coca Cola", "MMM": "3M", "MCD": "McDonald's", "PEP": "Pepsi", "K": "Kellogg", "UN": "Unilever", "MAR": "Marriott", "PG": "Procter Gamble", "CL": "Colgate-Palmolive", "GE": "General Electrics", "WFC": "Wells Fargo", "JPM": "JPMorgan Chase", "AIG": "AIG", "AXP": "American express", "BAC": "Bank of America", "GS": "Goldman Sachs", "AAPL": "Apple", "SAP": "SAP", "CSCO": "Cisco", "TXN": "Texas Instruments", "XRX": "Xerox", "WMT": "Wal-Mart", "HD": "Home Depot", "GSK": "GlaxoSmithKline", "PFE": "Pfizer", "SNY": "Sanofi-Aventis", "NVS": "Novartis", "KMB": "Kimberly-Clark", "R": "Ryder", "GD": "General Dynamics", "RTN": "Raytheon", "CVS": "CVS", "CAT": "Caterpillar", "DD": "DuPont de Nemours", } symbols, names = np.array(sorted(symbol_dict.items())).T quotes = [] for symbol in symbols: print("Fetching quote history for %r" % symbol, file=sys.stderr) url = ( "https://raw.githubusercontent.com/scikit-learn/examples-data/" "master/financial-data/{}.csv" ) quotes.append(pd.read_csv(url.format(symbol))) close_prices = np.vstack([q["close"] for q in quotes]) open_prices = np.vstack([q["open"] for q in quotes]) # The daily variations of the quotes are what carry the most information variation = close_prices - open_prices # %% # .. _stock_market: # # Learning a graph structure # -------------------------- # # We use sparse inverse covariance estimation to find which quotes are # correlated conditionally on the others. Specifically, sparse inverse # covariance gives us a graph, that is a list of connections. For each # symbol, the symbols that it is connected to are those useful to explain # its fluctuations. from sklearn import covariance alphas = np.logspace(-1.5, 1, num=10) edge_model = covariance.GraphicalLassoCV(alphas=alphas) # standardize the time series: using correlations rather than covariance # former is more efficient for structure recovery X = variation.copy().T X /= X.std(axis=0) edge_model.fit(X) # %% # Clustering using affinity propagation # ------------------------------------- # # We use clustering to group together quotes that behave similarly. Here, # amongst the :ref:`various clustering techniques <clustering>` available # in the scikit-learn, we use :ref:`affinity_propagation` as it does # not enforce equal-size clusters, and it can choose automatically the # number of clusters from the data. # # Note that this gives us a different indication than the graph, as the # graph reflects conditional relations between variables, while the # clustering reflects marginal properties: variables clustered together can # be considered as having a similar impact at the level of the full stock # market. from sklearn import cluster _, labels = cluster.affinity_propagation(edge_model.covariance_, random_state=0) n_labels = labels.max() for i in range(n_labels + 1): print(f"Cluster {i + 1}: {', '.join(names[labels == i])}") # %% # Embedding in 2D space # --------------------- # # For visualization purposes, we need to lay out the different symbols on a # 2D canvas. For this, we use :ref:`manifold` techniques to retrieve 2D # embedding. # We use a dense ``eigen_solver`` to achieve reproducibility (arpack is initiated # with the random vectors that we do not control). In addition, we use a large # number of neighbors to capture the large-scale structure. # Finding a low-dimension embedding for visualization: find the best position of # the nodes (the stocks) on a 2D plane from sklearn import manifold node_position_model = manifold.LocallyLinearEmbedding( n_components=2, eigen_solver="dense", n_neighbors=6 ) embedding = node_position_model.fit_transform(X.T).T # %% # Visualization # ------------- # # The output of the 3 models are combined in a 2D graph where nodes # represent the stocks and edges the connections (partial correlations): # # - cluster labels are used to define the color of the nodes # - the sparse covariance model is used to display the strength of the edges # - the 2D embedding is used to position the nodes in the plan # # This example has a fair amount of visualization-related code, as # visualization is crucial here to display the graph. One of the challenges # is to position the labels minimizing overlap. For this, we use an # heuristic based on the direction of the nearest neighbor along each # axis. import matplotlib.pyplot as plt from matplotlib.collections import LineCollection plt.figure(1, facecolor="w", figsize=(10, 8)) plt.clf() ax = plt.axes([0.0, 0.0, 1.0, 1.0]) plt.axis("off") # Plot the graph of partial correlations partial_correlations = edge_model.precision_.copy() d = 1 / np.sqrt(np.diag(partial_correlations)) partial_correlations *= d partial_correlations *= d[:, np.newaxis] non_zero = np.abs(np.triu(partial_correlations, k=1)) > 0.02 # Plot the nodes using the coordinates of our embedding plt.scatter( embedding[0], embedding[1], s=100 * d**2, c=labels, cmap=plt.cm.nipy_spectral ) # Plot the edges start_idx, end_idx = non_zero.nonzero() # a sequence of (*line0*, *line1*, *line2*), where:: # linen = (x0, y0), (x1, y1), ... (xm, ym) segments = [ [embedding[:, start], embedding[:, stop]] for start, stop in zip(start_idx, end_idx) ] values = np.abs(partial_correlations[non_zero]) lc = LineCollection( segments, zorder=0, cmap=plt.cm.hot_r, norm=plt.Normalize(0, 0.7 * values.max()) ) lc.set_array(values) lc.set_linewidths(15 * values) ax.add_collection(lc) # Add a label to each node. The challenge here is that we want to # position the labels to avoid overlap with other labels for index, (name, label, (x, y)) in enumerate(zip(names, labels, embedding.T)): dx = x - embedding[0] dx[index] = 1 dy = y - embedding[1] dy[index] = 1 this_dx = dx[np.argmin(np.abs(dy))] this_dy = dy[np.argmin(np.abs(dx))] if this_dx > 0: horizontalalignment = "left" x = x + 0.002 else: horizontalalignment = "right" x = x - 0.002 if this_dy > 0: verticalalignment = "bottom" y = y + 0.002 else: verticalalignment = "top" y = y - 0.002 plt.text( x, y, name, size=10, horizontalalignment=horizontalalignment, verticalalignment=verticalalignment, bbox=dict( facecolor="w", edgecolor=plt.cm.nipy_spectral(label / float(n_labels)), alpha=0.6, ), ) plt.xlim( embedding[0].min() - 0.15 * np.ptp(embedding[0]), embedding[0].max() + 0.10 * np.ptp(embedding[0]), ) plt.ylim( embedding[1].min() - 0.03 * np.ptp(embedding[1]), embedding[1].max() + 0.03 * np.ptp(embedding[1]), ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/semi_supervised/plot_self_training_varying_threshold.py
examples/semi_supervised/plot_self_training_varying_threshold.py
""" ============================================= Effect of varying threshold for self-training ============================================= This example illustrates the effect of a varying threshold on self-training. The `breast_cancer` dataset is loaded, and labels are deleted such that only 50 out of 569 samples have labels. A `SelfTrainingClassifier` is fitted on this dataset, with varying thresholds. The upper graph shows the amount of labeled samples that the classifier has available by the end of fit, and the accuracy of the classifier. The lower graph shows the last iteration in which a sample was labeled. All values are cross validated with 3 folds. At low thresholds (in [0.4, 0.5]), the classifier learns from samples that were labeled with a low confidence. These low-confidence samples are likely have incorrect predicted labels, and as a result, fitting on these incorrect labels produces a poor accuracy. Note that the classifier labels almost all of the samples, and only takes one iteration. For very high thresholds (in [0.9, 1)) we observe that the classifier does not augment its dataset (the amount of self-labeled samples is 0). As a result, the accuracy achieved with a threshold of 0.9999 is the same as a normal supervised classifier would achieve. The optimal accuracy lies in between both of these extremes at a threshold of around 0.7. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn import datasets from sklearn.metrics import accuracy_score from sklearn.model_selection import StratifiedKFold from sklearn.semi_supervised import SelfTrainingClassifier from sklearn.svm import SVC from sklearn.utils import shuffle n_splits = 3 X, y = datasets.load_breast_cancer(return_X_y=True) X, y = shuffle(X, y, random_state=42) y_true = y.copy() y[50:] = -1 total_samples = y.shape[0] base_classifier = SVC(probability=True, gamma=0.001, random_state=42) x_values = np.arange(0.4, 1.05, 0.05) x_values = np.append(x_values, 0.99999) scores = np.empty((x_values.shape[0], n_splits)) amount_labeled = np.empty((x_values.shape[0], n_splits)) amount_iterations = np.empty((x_values.shape[0], n_splits)) for i, threshold in enumerate(x_values): self_training_clf = SelfTrainingClassifier(base_classifier, threshold=threshold) # We need manual cross validation so that we don't treat -1 as a separate # class when computing accuracy skfolds = StratifiedKFold(n_splits=n_splits) for fold, (train_index, test_index) in enumerate(skfolds.split(X, y)): X_train = X[train_index] y_train = y[train_index] X_test = X[test_index] y_test = y[test_index] y_test_true = y_true[test_index] self_training_clf.fit(X_train, y_train) # The amount of labeled samples that at the end of fitting amount_labeled[i, fold] = ( total_samples - np.unique(self_training_clf.labeled_iter_, return_counts=True)[1][0] ) # The last iteration the classifier labeled a sample in amount_iterations[i, fold] = np.max(self_training_clf.labeled_iter_) y_pred = self_training_clf.predict(X_test) scores[i, fold] = accuracy_score(y_test_true, y_pred) ax1 = plt.subplot(211) ax1.errorbar( x_values, scores.mean(axis=1), yerr=scores.std(axis=1), capsize=2, color="b" ) ax1.set_ylabel("Accuracy", color="b") ax1.tick_params("y", colors="b") ax2 = ax1.twinx() ax2.errorbar( x_values, amount_labeled.mean(axis=1), yerr=amount_labeled.std(axis=1), capsize=2, color="g", ) ax2.set_ylim(bottom=0) ax2.set_ylabel("Amount of labeled samples", color="g") ax2.tick_params("y", colors="g") ax3 = plt.subplot(212, sharex=ax1) ax3.errorbar( x_values, amount_iterations.mean(axis=1), yerr=amount_iterations.std(axis=1), capsize=2, color="b", ) ax3.set_ylim(bottom=0) ax3.set_ylabel("Amount of iterations") ax3.set_xlabel("Threshold") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/semi_supervised/plot_semi_supervised_versus_svm_iris.py
examples/semi_supervised/plot_semi_supervised_versus_svm_iris.py
""" =============================================================================== Decision boundary of semi-supervised classifiers versus SVM on the Iris dataset =============================================================================== This example compares decision boundaries learned by two semi-supervised methods, namely :class:`~sklearn.semi_supervised.LabelSpreading` and :class:`~sklearn.semi_supervised.SelfTrainingClassifier`, while varying the proportion of labeled training data from small fractions up to the full dataset. Both methods rely on RBF kernels: :class:`~sklearn.semi_supervised.LabelSpreading` uses it by default, and :class:`~sklearn.semi_supervised.SelfTrainingClassifier` is paired here with :class:`~sklearn.svm.SVC` as base estimator (also RBF-based by default) to allow a fair comparison. With 100% labeled data, :class:`~sklearn.semi_supervised.SelfTrainingClassifier` reduces to a fully supervised :class:`~sklearn.svm.SVC`, since there are no unlabeled points left to pseudo-label. In a second section, we explain how `predict_proba` is computed in :class:`~sklearn.semi_supervised.LabelSpreading` and :class:`~sklearn.semi_supervised.SelfTrainingClassifier`. See :ref:`sphx_glr_auto_examples_semi_supervised_plot_semi_supervised_newsgroups.py` for a comparison of `LabelSpreading` and `SelfTrainingClassifier` in terms of performance. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% import matplotlib.patches as mpatches import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import load_iris from sklearn.inspection import DecisionBoundaryDisplay from sklearn.semi_supervised import LabelSpreading, SelfTrainingClassifier from sklearn.svm import SVC iris = load_iris() X = iris.data[:, :2] y = iris.target rng = np.random.RandomState(42) y_rand = rng.rand(y.shape[0]) y_10 = np.copy(y) y_10[y_rand > 0.1] = -1 # set random samples to be unlabeled y_30 = np.copy(y) y_30[y_rand > 0.3] = -1 ls10 = (LabelSpreading().fit(X, y_10), y_10, "LabelSpreading with 10% labeled data") ls30 = (LabelSpreading().fit(X, y_30), y_30, "LabelSpreading with 30% labeled data") ls100 = (LabelSpreading().fit(X, y), y, "LabelSpreading with 100% labeled data") base_classifier = SVC(gamma=0.5, probability=True, random_state=42) st10 = ( SelfTrainingClassifier(base_classifier).fit(X, y_10), y_10, "Self-training with 10% labeled data", ) st30 = ( SelfTrainingClassifier(base_classifier).fit(X, y_30), y_30, "Self-training with 30% labeled data", ) rbf_svc = ( base_classifier.fit(X, y), y, "SVC with rbf kernel\n(equivalent to Self-training with 100% labeled data)", ) tab10 = plt.get_cmap("tab10") color_map = {cls: tab10(cls) for cls in np.unique(y)} color_map[-1] = (1, 1, 1) classifiers = (ls10, st10, ls30, st30, ls100, rbf_svc) fig, axes = plt.subplots(nrows=3, ncols=2, sharex="col", sharey="row", figsize=(10, 12)) axes = axes.ravel() handles = [ mpatches.Patch(facecolor=tab10(i), edgecolor="black", label=iris.target_names[i]) for i in np.unique(y) ] handles.append(mpatches.Patch(facecolor="white", edgecolor="black", label="Unlabeled")) for ax, (clf, y_train, title) in zip(axes, classifiers): DecisionBoundaryDisplay.from_estimator( clf, X, response_method="predict_proba", plot_method="contourf", ax=ax, ) colors = [color_map[label] for label in y_train] ax.scatter(X[:, 0], X[:, 1], c=colors, edgecolor="black") ax.set_title(title) fig.suptitle( "Semi-supervised decision boundaries with varying fractions of labeled data", y=1 ) fig.legend( handles=handles, loc="lower center", ncol=len(handles), bbox_to_anchor=(0.5, 0.0) ) fig.tight_layout(rect=[0, 0.03, 1, 1]) plt.show() # %% # We observe that the decision boundaries are already quite similar to those # using the full labeled data available for training, even when using a very # small subset of the labels. # # Interpretation of `predict_proba` # ================================= # # `predict_proba` in `LabelSpreading` # ----------------------------------- # # :class:`~sklearn.semi_supervised.LabelSpreading` constructs a similarity graph # from the data, by default using an RBF kernel. This means each sample is # connected to every other with a weight that decays with their squared # Euclidean distance, scaled by a parameter `gamma`. # # Once we have that weighted graph, labels are propagated along the graph # edges. Each sample gradually takes on a soft label distribution that reflects # a weighted average of the labels of its neighbors until the process converges. # These per-sample distributions are stored in `label_distributions_`. # # `predict_proba` computes the class probabilities for a new point by taking a # weighted average of the rows in `label_distributions_`, where the weights come # from the RBF kernel similarities between the new point and the training # samples. The averaged values are then renormalized so that they sum to one. # # Just keep in mind that these "probabilities" are graph-based scores, not # calibrated posteriors. Don't over-interpret their absolute values. from sklearn.metrics.pairwise import rbf_kernel ls = ls100[0] # fitted LabelSpreading instance x_query = np.array([[3.5, 1.5]]) # point in the soft blue region # Step 1: similarities between query and all training samples W = rbf_kernel(x_query, X, gamma=ls.gamma) # `gamma=20` by default # Step 2: weighted average of label distributions probs = np.dot(W, ls.label_distributions_) # Step 3: normalize to sum to 1 probs /= probs.sum(axis=1, keepdims=True) print("Manual:", probs) print("API :", ls.predict_proba(x_query)) # %% # `predict_proba` in `SelfTrainingClassifier` # ---------------------------------------------- # # :class:`~sklearn.semi_supervised.SelfTrainingClassifier` works by repeatedly # fitting its base estimator on the currently labeled data, then adding # pseudo-labels for unlabeled points whose predicted probabilities exceed a # confidence threshold. This process continues until no new points can be # labeled, at which point the classifier has a final fitted base estimator # stored in the attribute `estimator_`. # # When you call `predict_proba` on the `SelfTrainingClassifier`, it simply # delegates to this final estimator. st = st10[0] print("Manual:", st.estimator_.predict_proba(x_query)) print("API :", st.predict_proba(x_query)) # %% # In both methods, semi-supervised learning can be understood as constructing a # categorical distribution over classes for each sample. # :class:`~sklearn.semi_supervised.LabelSpreading` keeps these distributions soft and # updates them through graph-based propagation. # Predictions (including `predict_proba`) remain tied to the training set, which # must be stored for inference. # # :class:`~sklearn.semi_supervised.SelfTrainingClassifier` instead uses these # distributions internally to decide which unlabeled points to assign pseudo-labels # during training, but at prediction time the returned probabilities come directly from # the final fitted estimator, and therefore the decision rule does not require storing # the training data.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/semi_supervised/plot_label_propagation_digits_active_learning.py
examples/semi_supervised/plot_label_propagation_digits_active_learning.py
""" ========================================= Label Propagation digits: Active learning ========================================= Demonstrates an active learning technique to learn handwritten digits using label propagation. We start by training a label propagation model with only 10 labeled points, then we select the top five most uncertain points to label. Next, we train with 15 labeled points (original 10 + 5 new ones). We repeat this process four times to have a model trained with 30 labeled examples. Note you can increase this to label more than 30 by changing `max_iterations`. Labeling more than 30 can be useful to get a sense for the speed of convergence of this active learning technique. A plot will appear showing the top 5 most uncertain digits for each iteration of training. These may or may not contain mistakes, but we will train the next model with their true labels. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from scipy import stats from sklearn import datasets from sklearn.metrics import classification_report, confusion_matrix from sklearn.semi_supervised import LabelSpreading digits = datasets.load_digits() rng = np.random.RandomState(0) indices = np.arange(len(digits.data)) rng.shuffle(indices) X = digits.data[indices[:330]] y = digits.target[indices[:330]] images = digits.images[indices[:330]] n_total_samples = len(y) n_labeled_points = 40 max_iterations = 5 unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:] f = plt.figure() for i in range(max_iterations): if len(unlabeled_indices) == 0: print("No unlabeled items left to label.") break y_train = np.copy(y) y_train[unlabeled_indices] = -1 lp_model = LabelSpreading(gamma=0.25, max_iter=20) lp_model.fit(X, y_train) predicted_labels = lp_model.transduction_[unlabeled_indices] true_labels = y[unlabeled_indices] cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_) print("Iteration %i %s" % (i, 70 * "_")) print( "Label Spreading model: %d labeled & %d unlabeled (%d total)" % (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples) ) print(classification_report(true_labels, predicted_labels)) print("Confusion matrix") print(cm) # compute the entropies of transduced label distributions pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T) # select up to 5 digit examples that the classifier is most uncertain about uncertainty_index = np.argsort(pred_entropies)[::-1] uncertainty_index = uncertainty_index[ np.isin(uncertainty_index, unlabeled_indices) ][:5] # keep track of indices that we get labels for delete_indices = np.array([], dtype=int) # for more than 5 iterations, visualize the gain only on the first 5 if i < 5: f.text( 0.05, (1 - (i + 1) * 0.183), "model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10, ) for index, image_index in enumerate(uncertainty_index): image = images[image_index] # for more than 5 iterations, visualize the gain only on the first 5 if i < 5: sub = f.add_subplot(5, 5, index + 1 + (5 * i)) sub.imshow(image, cmap=plt.cm.gray_r, interpolation="none") sub.set_title( "predict: %i\ntrue: %i" % (lp_model.transduction_[image_index], y[image_index]), size=10, ) sub.axis("off") # labeling 5 points, remote from labeled set (delete_index,) = (unlabeled_indices == image_index).nonzero() delete_indices = np.concatenate((delete_indices, delete_index)) unlabeled_indices = np.delete(unlabeled_indices, delete_indices) n_labeled_points += len(uncertainty_index) f.suptitle( ( "Active learning with Label Propagation.\nRows show 5 most " "uncertain labels to learn with the next model." ), y=1.15, ) plt.subplots_adjust(left=0.2, bottom=0.03, right=0.9, top=0.9, wspace=0.2, hspace=0.85) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/semi_supervised/plot_semi_supervised_newsgroups.py
examples/semi_supervised/plot_semi_supervised_newsgroups.py
""" ================================================ Semi-supervised Classification on a Text Dataset ================================================ This example demonstrates the effectiveness of semi-supervised learning for text classification on :class:`TF-IDF <sklearn.feature_extraction.text.TfidfTransformer>` features when labeled data is scarce. For such purpose we compare four different approaches: 1. Supervised learning using 100% of labels in the training set (best-case scenario) - Uses :class:`~sklearn.linear_model.SGDClassifier` with full supervision - Represents the best possible performance when labeled data is abundant 2. Supervised learning using 20% of labels in the training set (baseline) - Same model as the best-case scenario but trained on a random 20% subset of the labeled training data - Shows the performance degradation of a fully supervised model due to limited labeled data 3. :class:`~sklearn.semi_supervised.SelfTrainingClassifier` (semi-supervised) - Uses 20% labeled data + 80% unlabeled data for training - Iteratively predicts labels for unlabeled data - Demonstrates how self-training can improve performance 4. :class:`~sklearn.semi_supervised.LabelSpreading` (semi-supervised) - Uses 20% labeled data + 80% unlabeled data for training - Propagates labels through the data manifold - Shows how graph-based methods can leverage unlabeled data The example uses the 20 newsgroups dataset, focusing on five categories. The results demonstrate how semi-supervised methods can achieve better performance than supervised learning with limited labeled data by effectively utilizing unlabeled samples. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.linear_model import SGDClassifier from sklearn.metrics import f1_score from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline from sklearn.semi_supervised import LabelSpreading, SelfTrainingClassifier # Loading dataset containing first five categories data = fetch_20newsgroups( subset="train", categories=[ "alt.atheism", "comp.graphics", "comp.os.ms-windows.misc", "comp.sys.ibm.pc.hardware", "comp.sys.mac.hardware", ], ) # Parameters sdg_params = dict(alpha=1e-5, penalty="l2", loss="log_loss") vectorizer_params = dict(ngram_range=(1, 2), min_df=5, max_df=0.8) # Supervised Pipeline pipeline = Pipeline( [ ("vect", CountVectorizer(**vectorizer_params)), ("tfidf", TfidfTransformer()), ("clf", SGDClassifier(**sdg_params)), ] ) # SelfTraining Pipeline st_pipeline = Pipeline( [ ("vect", CountVectorizer(**vectorizer_params)), ("tfidf", TfidfTransformer()), ("clf", SelfTrainingClassifier(SGDClassifier(**sdg_params))), ] ) # LabelSpreading Pipeline ls_pipeline = Pipeline( [ ("vect", CountVectorizer(**vectorizer_params)), ("tfidf", TfidfTransformer()), ("clf", LabelSpreading()), ] ) def eval_and_get_f1(clf, X_train, y_train, X_test, y_test): """Evaluate model performance and return F1 score""" print(f" Number of training samples: {len(X_train)}") print(f" Unlabeled samples in training set: {sum(1 for x in y_train if x == -1)}") clf.fit(X_train, y_train) y_pred = clf.predict(X_test) f1 = f1_score(y_test, y_pred, average="micro") print(f" Micro-averaged F1 score on test set: {f1:.3f}") print("\n") return f1 X, y = data.data, data.target X_train, X_test, y_train, y_test = train_test_split(X, y) # %% # 1. Evaluate a supervised SGDClassifier using 100% of the (labeled) training set. # This represents the best-case performance when the model has full access to all # labeled examples. f1_scores = {} print("1. Supervised SGDClassifier on 100% of the data:") f1_scores["Supervised (100%)"] = eval_and_get_f1( pipeline, X_train, y_train, X_test, y_test ) # %% # 2. Evaluate a supervised SGDClassifier trained on only 20% of the data. # This serves as a baseline to illustrate the performance drop caused by limiting # the training samples. import numpy as np print("2. Supervised SGDClassifier on 20% of the training data:") rng = np.random.default_rng(42) y_mask = rng.random(len(y_train)) < 0.2 # X_20 and y_20 are the subset of the train dataset indicated by the mask X_20, y_20 = map(list, zip(*((x, y) for x, y, m in zip(X_train, y_train, y_mask) if m))) f1_scores["Supervised (20%)"] = eval_and_get_f1(pipeline, X_20, y_20, X_test, y_test) # %% # 3. Evaluate a semi-supervised SelfTrainingClassifier using 20% labeled and 80% # unlabeled data. # The remaining 80% of the training labels are masked as unlabeled (-1), # allowing the model to iteratively label and learn from them. print( "3. SelfTrainingClassifier (semi-supervised) using 20% labeled " "+ 80% unlabeled data):" ) y_train_semi = y_train.copy() y_train_semi[~y_mask] = -1 f1_scores["SelfTraining"] = eval_and_get_f1( st_pipeline, X_train, y_train_semi, X_test, y_test ) # %% # 4. Evaluate a semi-supervised LabelSpreading model using 20% labeled and 80% # unlabeled data. # Like SelfTraining, the model infers labels for the unlabeled portion of the data # to enhance performance. print("4. LabelSpreading (semi-supervised) using 20% labeled + 80% unlabeled data:") f1_scores["LabelSpreading"] = eval_and_get_f1( ls_pipeline, X_train, y_train_semi, X_test, y_test ) # %% # Plot results # ------------ # Visualize the performance of different classification approaches using a bar chart. # This helps to compare how each method performs based on the # micro-averaged :func:`~sklearn.metrics.f1_score`. # Micro-averaging computes metrics globally across all classes, # which gives a single overall measure of performance and allows fair comparison # between the different approaches, even in the presence of class imbalance. import matplotlib.pyplot as plt plt.figure(figsize=(10, 6)) models = list(f1_scores.keys()) scores = list(f1_scores.values()) colors = ["royalblue", "royalblue", "forestgreen", "royalblue"] bars = plt.bar(models, scores, color=colors) plt.title("Comparison of Classification Approaches") plt.ylabel("Micro-averaged F1 Score on test set") plt.xticks() for bar in bars: height = bar.get_height() plt.text( bar.get_x() + bar.get_width() / 2.0, height, f"{height:.2f}", ha="center", va="bottom", ) plt.figtext( 0.5, 0.02, "SelfTraining classifier shows improved performance over " "supervised learning with limited data", ha="center", va="bottom", fontsize=10, style="italic", ) plt.tight_layout() plt.subplots_adjust(bottom=0.15) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/semi_supervised/plot_label_propagation_digits.py
examples/semi_supervised/plot_label_propagation_digits.py
""" =================================================== Label Propagation digits: Demonstrating performance =================================================== This example demonstrates the power of semisupervised learning by training a Label Spreading model to classify handwritten digits with sets of very few labels. The handwritten digit dataset has 1797 total points. The model will be trained using all points, but only 30 will be labeled. Results in the form of a confusion matrix and a series of metrics over each class will be very good. At the end, the top 10 most uncertain predictions will be shown. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Data generation # --------------- # # We use the digits dataset. We only use a subset of randomly selected samples. import numpy as np from sklearn import datasets digits = datasets.load_digits() rng = np.random.RandomState(2) indices = np.arange(len(digits.data)) rng.shuffle(indices) # %% # # We selected 340 samples of which only 40 will be associated with a known label. # Therefore, we store the indices of the 300 other samples for which we are not # supposed to know their labels. X = digits.data[indices[:340]] y = digits.target[indices[:340]] images = digits.images[indices[:340]] n_total_samples = len(y) n_labeled_points = 40 indices = np.arange(n_total_samples) unlabeled_set = indices[n_labeled_points:] # %% # Shuffle everything around y_train = np.copy(y) y_train[unlabeled_set] = -1 # %% # Semi-supervised learning # ------------------------ # # We fit a :class:`~sklearn.semi_supervised.LabelSpreading` and use it to predict # the unknown labels. from sklearn.metrics import classification_report from sklearn.semi_supervised import LabelSpreading lp_model = LabelSpreading(gamma=0.25, max_iter=20) lp_model.fit(X, y_train) predicted_labels = lp_model.transduction_[unlabeled_set] true_labels = y[unlabeled_set] print( "Label Spreading model: %d labeled & %d unlabeled points (%d total)" % (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples) ) # %% # Classification report print(classification_report(true_labels, predicted_labels)) # %% # Confusion matrix from sklearn.metrics import ConfusionMatrixDisplay ConfusionMatrixDisplay.from_predictions( true_labels, predicted_labels, labels=lp_model.classes_ ) # %% # Plot the most uncertain predictions # ----------------------------------- # # Here, we will pick and show the 10 most uncertain predictions. from scipy import stats pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T) # %% # Pick the top 10 most uncertain labels uncertainty_index = np.argsort(pred_entropies)[-10:] # %% # Plot import matplotlib.pyplot as plt f = plt.figure(figsize=(7, 5)) for index, image_index in enumerate(uncertainty_index): image = images[image_index] sub = f.add_subplot(2, 5, index + 1) sub.imshow(image, cmap=plt.cm.gray_r) plt.xticks([]) plt.yticks([]) sub.set_title( "predict: %i\ntrue: %i" % (lp_model.transduction_[image_index], y[image_index]) ) f.suptitle("Learning with small amount of labeled data") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/semi_supervised/plot_label_propagation_structure.py
examples/semi_supervised/plot_label_propagation_structure.py
""" ======================================================= Label Propagation circles: Learning a complex structure ======================================================= Example of LabelPropagation learning a complex internal structure to demonstrate "manifold learning". The outer circle should be labeled "red" and the inner circle "blue". Because both label groups lie inside their own distinct shape, we can see that the labels propagate correctly around the circle. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # We generate a dataset with two concentric circles. In addition, a label # is associated with each sample of the dataset that is: 0 (belonging to # the outer circle), 1 (belonging to the inner circle), and -1 (unknown). # Here, all labels but two are tagged as unknown. import numpy as np from sklearn.datasets import make_circles n_samples = 200 X, y = make_circles(n_samples=n_samples, shuffle=False) outer, inner = 0, 1 labels = np.full(n_samples, -1.0) labels[0] = outer labels[-1] = inner # %% # Plot raw data import matplotlib.pyplot as plt plt.figure(figsize=(4, 4)) plt.scatter( X[labels == outer, 0], X[labels == outer, 1], color="navy", marker="s", lw=0, label="outer labeled", s=10, ) plt.scatter( X[labels == inner, 0], X[labels == inner, 1], color="c", marker="s", lw=0, label="inner labeled", s=10, ) plt.scatter( X[labels == -1, 0], X[labels == -1, 1], color="darkorange", marker=".", label="unlabeled", ) plt.legend(scatterpoints=1, shadow=False, loc="center") _ = plt.title("Raw data (2 classes=outer and inner)") # %% # # The aim of :class:`~sklearn.semi_supervised.LabelSpreading` is to associate # a label to sample where the label is initially unknown. from sklearn.semi_supervised import LabelSpreading label_spread = LabelSpreading(kernel="knn", alpha=0.8) label_spread.fit(X, labels) # %% # Now, we can check which labels have been associated with each sample # when the label was unknown. output_labels = label_spread.transduction_ output_label_array = np.asarray(output_labels) outer_numbers = (output_label_array == outer).nonzero()[0] inner_numbers = (output_label_array == inner).nonzero()[0] plt.figure(figsize=(4, 4)) plt.scatter( X[outer_numbers, 0], X[outer_numbers, 1], color="navy", marker="s", lw=0, s=10, label="outer learned", ) plt.scatter( X[inner_numbers, 0], X[inner_numbers, 1], color="c", marker="s", lw=0, s=10, label="inner learned", ) plt.legend(scatterpoints=1, shadow=False, loc="center") plt.title("Labels learned with Label Spreading (KNN)") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cross_decomposition/plot_pcr_vs_pls.py
examples/cross_decomposition/plot_pcr_vs_pls.py
""" ================================================================== Principal Component Regression vs Partial Least Squares Regression ================================================================== This example compares `Principal Component Regression <https://en.wikipedia.org/wiki/Principal_component_regression>`_ (PCR) and `Partial Least Squares Regression <https://en.wikipedia.org/wiki/Partial_least_squares_regression>`_ (PLS) on a toy dataset. Our goal is to illustrate how PLS can outperform PCR when the target is strongly correlated with some directions in the data that have a low variance. PCR is a regressor composed of two steps: first, :class:`~sklearn.decomposition.PCA` is applied to the training data, possibly performing dimensionality reduction; then, a regressor (e.g. a linear regressor) is trained on the transformed samples. In :class:`~sklearn.decomposition.PCA`, the transformation is purely unsupervised, meaning that no information about the targets is used. As a result, PCR may perform poorly in some datasets where the target is strongly correlated with *directions* that have low variance. Indeed, the dimensionality reduction of PCA projects the data into a lower dimensional space where the variance of the projected data is greedily maximized along each axis. Despite them having the most predictive power on the target, the directions with a lower variance will be dropped, and the final regressor will not be able to leverage them. PLS is both a transformer and a regressor, and it is quite similar to PCR: it also applies a dimensionality reduction to the samples before applying a linear regressor to the transformed data. The main difference with PCR is that the PLS transformation is supervised. Therefore, as we will see in this example, it does not suffer from the issue we just mentioned. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # The data # -------- # # We start by creating a simple dataset with two features. Before we even dive # into PCR and PLS, we fit a PCA estimator to display the two principal # components of this dataset, i.e. the two directions that explain the most # variance in the data. import matplotlib.pyplot as plt import numpy as np from sklearn.decomposition import PCA rng = np.random.RandomState(0) n_samples = 500 cov = [[3, 3], [3, 4]] X = rng.multivariate_normal(mean=[0, 0], cov=cov, size=n_samples) pca = PCA(n_components=2).fit(X) plt.scatter(X[:, 0], X[:, 1], alpha=0.3, label="samples") for i, (comp, var) in enumerate(zip(pca.components_, pca.explained_variance_)): comp = comp * var # scale component by its variance explanation power plt.plot( [0, comp[0]], [0, comp[1]], label=f"Component {i}", linewidth=5, color=f"C{i + 2}", ) plt.gca().set( aspect="equal", title="2-dimensional dataset with principal components", xlabel="first feature", ylabel="second feature", ) plt.legend() plt.show() # %% # For the purpose of this example, we now define the target `y` such that it is # strongly correlated with a direction that has a small variance. To this end, # we will project `X` onto the second component, and add some noise to it. y = X.dot(pca.components_[1]) + rng.normal(size=n_samples) / 2 fig, axes = plt.subplots(1, 2, figsize=(10, 3)) axes[0].scatter(X.dot(pca.components_[0]), y, alpha=0.3) axes[0].set(xlabel="Projected data onto first PCA component", ylabel="y") axes[1].scatter(X.dot(pca.components_[1]), y, alpha=0.3) axes[1].set(xlabel="Projected data onto second PCA component", ylabel="y") plt.tight_layout() plt.show() # %% # Projection on one component and predictive power # ------------------------------------------------ # # We now create two regressors: PCR and PLS, and for our illustration purposes # we set the number of components to 1. Before feeding the data to the PCA step # of PCR, we first standardize it, as recommended by good practice. The PLS # estimator has built-in scaling capabilities. # # For both models, we plot the projected data onto the first component against # the target. In both cases, this projected data is what the regressors will # use as training data. from sklearn.cross_decomposition import PLSRegression from sklearn.decomposition import PCA from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng) pcr = make_pipeline(StandardScaler(), PCA(n_components=1), LinearRegression()) pcr.fit(X_train, y_train) pca = pcr.named_steps["pca"] # retrieve the PCA step of the pipeline pls = PLSRegression(n_components=1) pls.fit(X_train, y_train) fig, axes = plt.subplots(1, 2, figsize=(10, 3)) axes[0].scatter(pca.transform(X_test), y_test, alpha=0.3, label="ground truth") axes[0].scatter( pca.transform(X_test), pcr.predict(X_test), alpha=0.3, label="predictions" ) axes[0].set( xlabel="Projected data onto first PCA component", ylabel="y", title="PCR / PCA" ) axes[0].legend() axes[1].scatter(pls.transform(X_test), y_test, alpha=0.3, label="ground truth") axes[1].scatter( pls.transform(X_test), pls.predict(X_test), alpha=0.3, label="predictions" ) axes[1].set(xlabel="Projected data onto first PLS component", ylabel="y", title="PLS") axes[1].legend() plt.tight_layout() plt.show() # %% # As expected, the unsupervised PCA transformation of PCR has dropped the # second component, i.e. the direction with the lowest variance, despite # it being the most predictive direction. This is because PCA is a completely # unsupervised transformation, and results in the projected data having a low # predictive power on the target. # # On the other hand, the PLS regressor manages to capture the effect of the # direction with the lowest variance, thanks to its use of target information # during the transformation: it can recognize that this direction is actually # the most predictive. We note that the first PLS component is negatively # correlated with the target, which comes from the fact that the signs of # eigenvectors are arbitrary. # # We also print the R-squared scores of both estimators, which further confirms # that PLS is a better alternative than PCR in this case. A negative R-squared # indicates that PCR performs worse than a regressor that would simply predict # the mean of the target. print(f"PCR r-squared {pcr.score(X_test, y_test):.3f}") print(f"PLS r-squared {pls.score(X_test, y_test):.3f}") # %% # As a final remark, we note that PCR with 2 components performs as well as # PLS: this is because in this case, PCR was able to leverage the second # component which has the most preditive power on the target. pca_2 = make_pipeline(PCA(n_components=2), LinearRegression()) pca_2.fit(X_train, y_train) print(f"PCR r-squared with 2 components {pca_2.score(X_test, y_test):.3f}")
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cross_decomposition/plot_compare_cross_decomposition.py
examples/cross_decomposition/plot_compare_cross_decomposition.py
""" =================================== Compare cross decomposition methods =================================== Simple usage of various cross decomposition algorithms: - PLSCanonical - PLSRegression, with multivariate response, a.k.a. PLS2 - PLSRegression, with univariate response, a.k.a. PLS1 - CCA Given 2 multivariate covarying two-dimensional datasets, X, and Y, PLS extracts the 'directions of covariance', i.e. the components of each datasets that explain the most shared variance between both datasets. This is apparent on the **scatterplot matrix** display: components 1 in dataset X and dataset Y are maximally correlated (points lie around the first diagonal). This is also true for components 2 in both dataset, however, the correlation across datasets for different components is weak: the point cloud is very spherical. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Dataset based latent variables model # ------------------------------------ import numpy as np from sklearn.model_selection import train_test_split rng = np.random.default_rng(42) n = 500 # 2 latents vars: l1 = rng.normal(size=n) l2 = rng.normal(size=n) latents = np.array([l1, l1, l2, l2]).T X = latents + rng.normal(size=(n, 4)) Y = latents + rng.normal(size=(n, 4)) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.5, shuffle=False) print("Corr(X)") print(np.round(np.corrcoef(X.T), 2)) print("Corr(Y)") print(np.round(np.corrcoef(Y.T), 2)) # %% # Canonical (symmetric) PLS # ------------------------- # # Transform data # ~~~~~~~~~~~~~~ from sklearn.cross_decomposition import PLSCanonical plsca = PLSCanonical(n_components=2) plsca.fit(X_train, Y_train) X_train_r, Y_train_r = plsca.transform(X_train, Y_train) X_test_r, Y_test_r = plsca.transform(X_test, Y_test) # %% # Scatter plot of scores # ~~~~~~~~~~~~~~~~~~~~~~ import matplotlib.pyplot as plt # On diagonal plot X vs Y scores on each components plt.figure(figsize=(12, 8)) plt.subplot(221) plt.scatter(X_train_r[:, 0], Y_train_r[:, 0], label="train", marker="o", s=25) plt.scatter(X_test_r[:, 0], Y_test_r[:, 0], label="test", marker="o", s=25) plt.xlabel("x scores") plt.ylabel("y scores") plt.title( "Comp. 1: X vs Y (test corr = %.2f)" % np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1] ) plt.xticks(()) plt.yticks(()) plt.legend(loc="best") plt.subplot(224) plt.scatter(X_train_r[:, 1], Y_train_r[:, 1], label="train", marker="o", s=25) plt.scatter(X_test_r[:, 1], Y_test_r[:, 1], label="test", marker="o", s=25) plt.xlabel("x scores") plt.ylabel("y scores") plt.title( "Comp. 2: X vs Y (test corr = %.2f)" % np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1] ) plt.xticks(()) plt.yticks(()) plt.legend(loc="best") # Off diagonal plot components 1 vs 2 for X and Y plt.subplot(222) plt.scatter(X_train_r[:, 0], X_train_r[:, 1], label="train", marker="*", s=50) plt.scatter(X_test_r[:, 0], X_test_r[:, 1], label="test", marker="*", s=50) plt.xlabel("X comp. 1") plt.ylabel("X comp. 2") plt.title( "X comp. 1 vs X comp. 2 (test corr = %.2f)" % np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1] ) plt.legend(loc="best") plt.xticks(()) plt.yticks(()) plt.subplot(223) plt.scatter(Y_train_r[:, 0], Y_train_r[:, 1], label="train", marker="*", s=50) plt.scatter(Y_test_r[:, 0], Y_test_r[:, 1], label="test", marker="*", s=50) plt.xlabel("Y comp. 1") plt.ylabel("Y comp. 2") plt.title( "Y comp. 1 vs Y comp. 2 , (test corr = %.2f)" % np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1] ) plt.legend(loc="best") plt.xticks(()) plt.yticks(()) plt.show() # %% # PLS regression, with multivariate response, a.k.a. PLS2 # ------------------------------------------------------- from sklearn.cross_decomposition import PLSRegression n = 1000 q = 3 p = 10 X = rng.normal(size=(n, p)) B = np.array([[1, 2] + [0] * (p - 2)] * q).T # each Yj = 1*X1 + 2*X2 + noize Y = np.dot(X, B) + rng.normal(size=(n, q)) + 5 pls2 = PLSRegression(n_components=3) pls2.fit(X, Y) print("True B (such that: Y = XB + Err)") print(B) # compare pls2.coef_ with B print("Estimated B") print(np.round(pls2.coef_, 1)) pls2.predict(X) # %% # PLS regression, with univariate response, a.k.a. PLS1 # ----------------------------------------------------- n = 1000 p = 10 X = rng.normal(size=(n, p)) y = X[:, 0] + 2 * X[:, 1] + rng.normal(size=n) + 5 pls1 = PLSRegression(n_components=3) pls1.fit(X, y) # note that the number of components exceeds 1 (the dimension of y) print("Estimated betas") print(np.round(pls1.coef_, 1)) # %% # CCA (PLS mode B with symmetric deflation) # ----------------------------------------- from sklearn.cross_decomposition import CCA cca = CCA(n_components=2) cca.fit(X_train, Y_train) X_train_r, Y_train_r = cca.transform(X_train, Y_train) X_test_r, Y_test_r = cca.transform(X_test, Y_test)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_lasso_lasso_lars_elasticnet_path.py
examples/linear_model/plot_lasso_lasso_lars_elasticnet_path.py
""" ======================================== Lasso, Lasso-LARS, and Elastic Net paths ======================================== This example shows how to compute the "paths" of coefficients along the Lasso, Lasso-LARS, and Elastic Net regularization paths. In other words, it shows the relationship between the regularization parameter (alpha) and the coefficients. Lasso and Lasso-LARS impose a sparsity constraint on the coefficients, encouraging some of them to be zero. Elastic Net is a generalization of Lasso that adds an L2 penalty term to the L1 penalty term. This allows for some coefficients to be non-zero while still encouraging sparsity. Lasso and Elastic Net use a coordinate descent method to compute the paths, while Lasso-LARS uses the LARS algorithm to compute the paths. The paths are computed using :func:`~sklearn.linear_model.lasso_path`, :func:`~sklearn.linear_model.lars_path`, and :func:`~sklearn.linear_model.enet_path`. The results show different comparison plots: - Compare Lasso and Lasso-LARS - Compare Lasso and Elastic Net - Compare Lasso with positive Lasso - Compare LARS and Positive LARS - Compare Elastic Net and positive Elastic Net Each plot shows how the model coefficients vary as the regularization strength changes, offering insight into the behavior of these models under different constraints. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from itertools import cycle import matplotlib.pyplot as plt from sklearn.datasets import load_diabetes from sklearn.linear_model import enet_path, lars_path, lasso_path X, y = load_diabetes(return_X_y=True) X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter) # Compute paths eps = 5e-3 # the smaller it is the longer is the path print("Computing regularization path using the lasso...") alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps=eps) print("Computing regularization path using the positive lasso...") alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path( X, y, eps=eps, positive=True ) print("Computing regularization path using the LARS...") alphas_lars, _, coefs_lars = lars_path(X, y, method="lasso") print("Computing regularization path using the positive LARS...") alphas_positive_lars, _, coefs_positive_lars = lars_path( X, y, method="lasso", positive=True ) print("Computing regularization path using the elastic net...") alphas_enet, coefs_enet, _ = enet_path(X, y, eps=eps, l1_ratio=0.8) print("Computing regularization path using the positive elastic net...") alphas_positive_enet, coefs_positive_enet, _ = enet_path( X, y, eps=eps, l1_ratio=0.8, positive=True ) # Display results plt.figure(1) colors = cycle(["b", "r", "g", "c", "k"]) for coef_lasso, coef_lars, c in zip(coefs_lasso, coefs_lars, colors): l1 = plt.semilogx(alphas_lasso, coef_lasso, c=c) l2 = plt.semilogx(alphas_lars, coef_lars, linestyle="--", c=c) plt.xlabel("alpha") plt.ylabel("coefficients") plt.title("Lasso and LARS Paths") plt.legend((l1[-1], l2[-1]), ("Lasso", "LARS"), loc="lower right") plt.axis("tight") plt.figure(2) colors = cycle(["b", "r", "g", "c", "k"]) for coef_l, coef_e, c in zip(coefs_lasso, coefs_enet, colors): l1 = plt.semilogx(alphas_lasso, coef_l, c=c) l2 = plt.semilogx(alphas_enet, coef_e, linestyle="--", c=c) plt.xlabel("alpha") plt.ylabel("coefficients") plt.title("Lasso and Elastic-Net Paths") plt.legend((l1[-1], l2[-1]), ("Lasso", "Elastic-Net"), loc="lower right") plt.axis("tight") plt.figure(3) for coef_l, coef_pl, c in zip(coefs_lasso, coefs_positive_lasso, colors): l1 = plt.semilogy(alphas_lasso, coef_l, c=c) l2 = plt.semilogy(alphas_positive_lasso, coef_pl, linestyle="--", c=c) plt.xlabel("alpha") plt.ylabel("coefficients") plt.title("Lasso and positive Lasso") plt.legend((l1[-1], l2[-1]), ("Lasso", "positive Lasso"), loc="lower right") plt.axis("tight") plt.figure(4) colors = cycle(["b", "r", "g", "c", "k"]) for coef_lars, coef_positive_lars, c in zip(coefs_lars, coefs_positive_lars, colors): l1 = plt.semilogx(alphas_lars, coef_lars, c=c) l2 = plt.semilogx(alphas_positive_lars, coef_positive_lars, linestyle="--", c=c) plt.xlabel("alpha") plt.ylabel("coefficients") plt.title("LARS and Positive LARS") plt.legend((l1[-1], l2[-1]), ("LARS", "Positive LARS"), loc="lower right") plt.axis("tight") plt.figure(5) for coef_e, coef_pe, c in zip(coefs_enet, coefs_positive_enet, colors): l1 = plt.semilogx(alphas_enet, coef_e, c=c) l2 = plt.semilogx(alphas_positive_enet, coef_pe, linestyle="--", c=c) plt.xlabel("alpha") plt.ylabel("coefficients") plt.title("Elastic-Net and positive Elastic-Net") plt.legend((l1[-1], l2[-1]), ("Elastic-Net", "positive Elastic-Net"), loc="lower right") plt.axis("tight") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_omp.py
examples/linear_model/plot_omp.py
""" =========================== Orthogonal Matching Pursuit =========================== Using orthogonal matching pursuit for recovering a sparse signal from a noisy measurement encoded with a dictionary """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_sparse_coded_signal from sklearn.linear_model import OrthogonalMatchingPursuit, OrthogonalMatchingPursuitCV n_components, n_features = 512, 100 n_nonzero_coefs = 17 # generate the data # y = Xw # |x|_0 = n_nonzero_coefs y, X, w = make_sparse_coded_signal( n_samples=1, n_components=n_components, n_features=n_features, n_nonzero_coefs=n_nonzero_coefs, random_state=0, ) X = X.T (idx,) = w.nonzero() # distort the clean signal y_noisy = y + 0.05 * np.random.randn(len(y)) # plot the sparse signal plt.figure(figsize=(7, 7)) plt.subplot(4, 1, 1) plt.xlim(0, 512) plt.title("Sparse signal") plt.stem(idx, w[idx]) # plot the noise-free reconstruction omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs) omp.fit(X, y) coef = omp.coef_ (idx_r,) = coef.nonzero() plt.subplot(4, 1, 2) plt.xlim(0, 512) plt.title("Recovered signal from noise-free measurements") plt.stem(idx_r, coef[idx_r]) # plot the noisy reconstruction omp.fit(X, y_noisy) coef = omp.coef_ (idx_r,) = coef.nonzero() plt.subplot(4, 1, 3) plt.xlim(0, 512) plt.title("Recovered signal from noisy measurements") plt.stem(idx_r, coef[idx_r]) # plot the noisy reconstruction with number of non-zeros set by CV omp_cv = OrthogonalMatchingPursuitCV() omp_cv.fit(X, y_noisy) coef = omp_cv.coef_ (idx_r,) = coef.nonzero() plt.subplot(4, 1, 4) plt.xlim(0, 512) plt.title("Recovered signal from noisy measurements with CV") plt.stem(idx_r, coef[idx_r]) plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38) plt.suptitle("Sparse signal recovery with Orthogonal Matching Pursuit", fontsize=16) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_sgd_iris.py
examples/linear_model/plot_sgd_iris.py
""" ======================================== Plot multi-class SGD on the iris dataset ======================================== Plot decision surface of multi-class SGD on iris dataset. The hyperplanes corresponding to the three one-versus-all (OVA) classifiers are represented by the dashed lines. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn import datasets from sklearn.inspection import DecisionBoundaryDisplay from sklearn.linear_model import SGDClassifier # import some data to play with iris = datasets.load_iris() # we only take the first two features. We could # avoid this ugly slicing by using a two-dim dataset X = iris.data[:, :2] y = iris.target colors = "bry" # shuffle idx = np.arange(X.shape[0]) np.random.seed(13) np.random.shuffle(idx) X = X[idx] y = y[idx] # standardize mean = X.mean(axis=0) std = X.std(axis=0) X = (X - mean) / std clf = SGDClassifier(alpha=0.001, max_iter=100).fit(X, y) ax = plt.gca() DecisionBoundaryDisplay.from_estimator( clf, X, cmap=plt.cm.Paired, ax=ax, response_method="predict", xlabel=iris.feature_names[0], ylabel=iris.feature_names[1], ) plt.axis("tight") # Plot also the training points for i, color in zip(clf.classes_, colors): idx = (y == i).nonzero() plt.scatter( X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i], edgecolor="black", s=20, ) plt.title("Decision surface of multi-class SGD") plt.axis("tight") # Plot the three one-against-all classifiers xmin, xmax = plt.xlim() ymin, ymax = plt.ylim() coef = clf.coef_ intercept = clf.intercept_ def plot_hyperplane(c, color): def line(x0): return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1] plt.plot([xmin, xmax], [line(xmin), line(xmax)], ls="--", color=color) for i, color in zip(clf.classes_, colors): plot_hyperplane(i, color) plt.legend() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_sparse_logistic_regression_mnist.py
examples/linear_model/plot_sparse_logistic_regression_mnist.py
""" ===================================================== MNIST classification using multinomial logistic + L1 ===================================================== Here we fit a multinomial logistic regression with L1 penalty on a subset of the MNIST digits classification task. We use the SAGA algorithm for this purpose: this a solver that is fast when the number of samples is significantly larger than the number of features and is able to finely optimize non-smooth objective functions which is the case with the l1-penalty. Test accuracy reaches > 0.8, while weight vectors remains *sparse* and therefore more easily *interpretable*. Note that this accuracy of this l1-penalized linear model is significantly below what can be reached by an l2-penalized linear model or a non-linear multi-layer perceptron model on this dataset. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import time import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import fetch_openml from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.utils import check_random_state # Turn down for faster convergence t0 = time.time() train_samples = 5000 # Load data from https://www.openml.org/d/554 X, y = fetch_openml("mnist_784", version=1, return_X_y=True, as_frame=False) random_state = check_random_state(0) permutation = random_state.permutation(X.shape[0]) X = X[permutation] y = y[permutation] X = X.reshape((X.shape[0], -1)) X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=train_samples, test_size=10000 ) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) # Turn up tolerance for faster convergence clf = LogisticRegression(C=50.0 / train_samples, l1_ratio=1, solver="saga", tol=0.1) clf.fit(X_train, y_train) sparsity = np.mean(clf.coef_ == 0) * 100 score = clf.score(X_test, y_test) # print('Best C % .4f' % clf.C_) print("Sparsity with L1 penalty: %.2f%%" % sparsity) print("Test score with L1 penalty: %.4f" % score) coef = clf.coef_.copy() plt.figure(figsize=(10, 5)) scale = np.abs(coef).max() for i in range(10): l1_plot = plt.subplot(2, 5, i + 1) l1_plot.imshow( coef[i].reshape(28, 28), interpolation="nearest", cmap=plt.cm.RdBu, vmin=-scale, vmax=scale, ) l1_plot.set_xticks(()) l1_plot.set_yticks(()) l1_plot.set_xlabel(f"Class {i}") plt.suptitle("Classification vector for...") run_time = time.time() - t0 print("Example run in %.3f s" % run_time) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_lasso_dense_vs_sparse_data.py
examples/linear_model/plot_lasso_dense_vs_sparse_data.py
""" ============================== Lasso on dense and sparse data ============================== We show that linear_model.Lasso provides the same results for dense and sparse data and that in the case of sparse data the speed is improved. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from time import time from scipy import linalg, sparse from sklearn.datasets import make_regression from sklearn.linear_model import Lasso # %% # Comparing the two Lasso implementations on Dense data # ----------------------------------------------------- # # We create a linear regression problem that is suitable for the Lasso, # that is to say, with more features than samples. We then store the data # matrix in both dense (the usual) and sparse format, and train a Lasso on # each. We compute the runtime of both and check that they learned the # same model by computing the Euclidean norm of the difference between the # coefficients they learned. Because the data is dense, we expect better # runtime with a dense data format. X, y = make_regression(n_samples=200, n_features=5000, random_state=0) # create a copy of X in sparse format X_sp = sparse.coo_matrix(X) alpha = 1 sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000) dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000) t0 = time() sparse_lasso.fit(X_sp, y) print(f"Sparse Lasso done in {(time() - t0):.3f}s") t0 = time() dense_lasso.fit(X, y) print(f"Dense Lasso done in {(time() - t0):.3f}s") # compare the regression coefficients coeff_diff = linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_) print(f"Distance between coefficients : {coeff_diff:.2e}") # # %% # Comparing the two Lasso implementations on Sparse data # ------------------------------------------------------ # # We make the previous problem sparse by replacing all small values with 0 # and run the same comparisons as above. Because the data is now sparse, we # expect the implementation that uses the sparse data format to be faster. # make a copy of the previous data Xs = X.copy() # make Xs sparse by replacing the values lower than 2.5 with 0s Xs[Xs < 2.5] = 0.0 # create a copy of Xs in sparse format Xs_sp = sparse.coo_matrix(Xs) Xs_sp = Xs_sp.tocsc() # compute the proportion of non-zero coefficient in the data matrix print(f"Matrix density : {(Xs_sp.nnz / float(X.size) * 100):.3f}%") alpha = 0.1 sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000) dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000) t0 = time() sparse_lasso.fit(Xs_sp, y) print(f"Sparse Lasso done in {(time() - t0):.3f}s") t0 = time() dense_lasso.fit(Xs, y) print(f"Dense Lasso done in {(time() - t0):.3f}s") # compare the regression coefficients coeff_diff = linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_) print(f"Distance between coefficients : {coeff_diff:.2e}") # %%
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_lasso_model_selection.py
examples/linear_model/plot_lasso_model_selection.py
""" ================================================= Lasso model selection: AIC-BIC / cross-validation ================================================= This example focuses on model selection for Lasso models that are linear models with an L1 penalty for regression problems. Indeed, several strategies can be used to select the value of the regularization parameter: via cross-validation or using an information criterion, namely AIC or BIC. In what follows, we will discuss in details the different strategies. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Dataset # ------- # In this example, we will use the diabetes dataset. from sklearn.datasets import load_diabetes X, y = load_diabetes(return_X_y=True, as_frame=True) X.head() # %% # In addition, we add some random features to the original data to # better illustrate the feature selection performed by the Lasso model. import numpy as np import pandas as pd rng = np.random.RandomState(42) n_random_features = 14 X_random = pd.DataFrame( rng.randn(X.shape[0], n_random_features), columns=[f"random_{i:02d}" for i in range(n_random_features)], ) X = pd.concat([X, X_random], axis=1) # Show only a subset of the columns X[X.columns[::3]].head() # %% # Selecting Lasso via an information criterion # -------------------------------------------- # :class:`~sklearn.linear_model.LassoLarsIC` provides a Lasso estimator that # uses the Akaike information criterion (AIC) or the Bayes information # criterion (BIC) to select the optimal value of the regularization # parameter alpha. # # Before fitting the model, we will standardize the data with a # :class:`~sklearn.preprocessing.StandardScaler`. In addition, we will # measure the time to fit and tune the hyperparameter alpha in order to # compare with the cross-validation strategy. # # We will first fit a Lasso model with the AIC criterion. import time from sklearn.linear_model import LassoLarsIC from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler start_time = time.time() lasso_lars_ic = make_pipeline(StandardScaler(), LassoLarsIC(criterion="aic")).fit(X, y) fit_time = time.time() - start_time # %% # We store the AIC metric for each value of alpha used during `fit`. results = pd.DataFrame( { "alphas": lasso_lars_ic[-1].alphas_, "AIC criterion": lasso_lars_ic[-1].criterion_, } ).set_index("alphas") alpha_aic = lasso_lars_ic[-1].alpha_ # %% # Now, we perform the same analysis using the BIC criterion. lasso_lars_ic.set_params(lassolarsic__criterion="bic").fit(X, y) results["BIC criterion"] = lasso_lars_ic[-1].criterion_ alpha_bic = lasso_lars_ic[-1].alpha_ # %% # We can check which value of `alpha` leads to the minimum AIC and BIC. def highlight_min(x): x_min = x.min() return ["font-weight: bold" if v == x_min else "" for v in x] results.style.apply(highlight_min) # %% # Finally, we can plot the AIC and BIC values for the different alpha values. # The vertical lines in the plot correspond to the alpha chosen for each # criterion. The selected alpha corresponds to the minimum of the AIC or BIC # criterion. ax = results.plot() ax.vlines( alpha_aic, results["AIC criterion"].min(), results["AIC criterion"].max(), label="alpha: AIC estimate", linestyles="--", color="tab:blue", ) ax.vlines( alpha_bic, results["BIC criterion"].min(), results["BIC criterion"].max(), label="alpha: BIC estimate", linestyle="--", color="tab:orange", ) ax.set_xlabel(r"$\alpha$") ax.set_ylabel("criterion") ax.set_xscale("log") ax.legend() _ = ax.set_title( f"Information-criterion for model selection (training time {fit_time:.2f}s)" ) # %% # Model selection with an information-criterion is very fast. It relies on # computing the criterion on the in-sample set provided to `fit`. Both criteria # estimate the model generalization error based on the training set error and # penalize this overly optimistic error. However, this penalty relies on a # proper estimation of the degrees of freedom and the noise variance. Both are # derived for large samples (asymptotic results) and assume the model is # correct, i.e. that the data are actually generated by this model. # # These models also tend to break when the problem is badly conditioned (more # features than samples). It is then required to provide an estimate of the # noise variance. # # Selecting Lasso via cross-validation # ------------------------------------ # The Lasso estimator can be implemented with different solvers: coordinate # descent and least angle regression. They differ with regards to their # execution speed and sources of numerical errors. # # In scikit-learn, two different estimators are available with integrated # cross-validation: :class:`~sklearn.linear_model.LassoCV` and # :class:`~sklearn.linear_model.LassoLarsCV` that respectively solve the # problem with coordinate descent and least angle regression. # # In the remainder of this section, we will present both approaches. For both # algorithms, we will use a 20-fold cross-validation strategy. # # Lasso via coordinate descent # ............................ # Let's start by making the hyperparameter tuning using # :class:`~sklearn.linear_model.LassoCV`. from sklearn.linear_model import LassoCV start_time = time.time() model = make_pipeline(StandardScaler(), LassoCV(cv=20)).fit(X, y) fit_time = time.time() - start_time # %% import matplotlib.pyplot as plt ymin, ymax = 2300, 3800 lasso = model[-1] plt.semilogx(lasso.alphas_, lasso.mse_path_, linestyle=":") plt.plot( lasso.alphas_, lasso.mse_path_.mean(axis=-1), color="black", label="Average across the folds", linewidth=2, ) plt.axvline(lasso.alpha_, linestyle="--", color="black", label="alpha: CV estimate") plt.ylim(ymin, ymax) plt.xlabel(r"$\alpha$") plt.ylabel("Mean square error") plt.legend() _ = plt.title( f"Mean square error on each fold: coordinate descent (train time: {fit_time:.2f}s)" ) # %% # Lasso via least angle regression # ................................ # Let's start by making the hyperparameter tuning using # :class:`~sklearn.linear_model.LassoLarsCV`. from sklearn.linear_model import LassoLarsCV start_time = time.time() model = make_pipeline(StandardScaler(), LassoLarsCV(cv=20)).fit(X, y) fit_time = time.time() - start_time # %% lasso = model[-1] plt.semilogx(lasso.cv_alphas_, lasso.mse_path_, ":") plt.semilogx( lasso.cv_alphas_, lasso.mse_path_.mean(axis=-1), color="black", label="Average across the folds", linewidth=2, ) plt.axvline(lasso.alpha_, linestyle="--", color="black", label="alpha CV") plt.ylim(ymin, ymax) plt.xlabel(r"$\alpha$") plt.ylabel("Mean square error") plt.legend() _ = plt.title(f"Mean square error on each fold: Lars (train time: {fit_time:.2f}s)") # %% # Summary of cross-validation approach # .................................... # Both algorithms give roughly the same results. # # Lars computes a solution path only for each kink in the path. As a result, it # is very efficient when there are only of few kinks, which is the case if # there are few features or samples. Also, it is able to compute the full path # without setting any hyperparameter. On the opposite, coordinate descent # computes the path points on a pre-specified grid (here we use the default). # Thus it is more efficient if the number of grid points is smaller than the # number of kinks in the path. Such a strategy can be interesting if the number # of features is really large and there are enough samples to be selected in # each of the cross-validation fold. In terms of numerical errors, for heavily # correlated variables, Lars will accumulate more errors, while the coordinate # descent algorithm will only sample the path on a grid. # # Note how the optimal value of alpha varies for each fold. This illustrates # why nested-cross validation is a good strategy when trying to evaluate the # performance of a method for which a parameter is chosen by cross-validation: # this choice of parameter may not be optimal for a final evaluation on # unseen test set only. # # Conclusion # ---------- # In this tutorial, we presented two approaches for selecting the best # hyperparameter `alpha`: one strategy finds the optimal value of `alpha` # by only using the training set and some information criterion, and another # strategy is based on cross-validation. # # In this example, both approaches are working similarly. The in-sample # hyperparameter selection even shows its efficacy in terms of computational # performance. However, it can only be used when the number of samples is large # enough compared to the number of features. # # That's why hyperparameter optimization via cross-validation is a safe # strategy: it works in different settings.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_polynomial_interpolation.py
examples/linear_model/plot_polynomial_interpolation.py
""" =================================== Polynomial and Spline interpolation =================================== This example demonstrates how to approximate a function with polynomials up to degree ``degree`` by using ridge regression. We show two different ways given ``n_samples`` of 1d points ``x_i``: - :class:`~sklearn.preprocessing.PolynomialFeatures` generates all monomials up to ``degree``. This gives us the so called Vandermonde matrix with ``n_samples`` rows and ``degree + 1`` columns:: [[1, x_0, x_0 ** 2, x_0 ** 3, ..., x_0 ** degree], [1, x_1, x_1 ** 2, x_1 ** 3, ..., x_1 ** degree], ...] Intuitively, this matrix can be interpreted as a matrix of pseudo features (the points raised to some power). The matrix is akin to (but different from) the matrix induced by a polynomial kernel. - :class:`~sklearn.preprocessing.SplineTransformer` generates B-spline basis functions. A basis function of a B-spline is a piece-wise polynomial function of degree ``degree`` that is non-zero only between ``degree+1`` consecutive knots. Given ``n_knots`` number of knots, this results in matrix of ``n_samples`` rows and ``n_knots + degree - 1`` columns:: [[basis_1(x_0), basis_2(x_0), ...], [basis_1(x_1), basis_2(x_1), ...], ...] This example shows that these two transformers are well suited to model non-linear effects with a linear model, using a pipeline to add non-linear features. Kernel methods extend this idea and can induce very high (even infinite) dimensional feature spaces. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import Ridge from sklearn.pipeline import make_pipeline from sklearn.preprocessing import PolynomialFeatures, SplineTransformer # %% # We start by defining a function that we intend to approximate and prepare # plotting it. def f(x): """Function to be approximated by polynomial interpolation.""" return x * np.sin(x) # whole range we want to plot x_plot = np.linspace(-1, 11, 100) # %% # To make it interesting, we only give a small subset of points to train on. x_train = np.linspace(0, 10, 100) rng = np.random.RandomState(0) x_train = np.sort(rng.choice(x_train, size=20, replace=False)) y_train = f(x_train) # create 2D-array versions of these arrays to feed to transformers X_train = x_train[:, np.newaxis] X_plot = x_plot[:, np.newaxis] # %% # Now we are ready to create polynomial features and splines, fit on the # training points and show how well they interpolate. # plot function lw = 2 fig, ax = plt.subplots() ax.set_prop_cycle( color=["black", "teal", "yellowgreen", "gold", "darkorange", "tomato"] ) ax.plot(x_plot, f(x_plot), linewidth=lw, label="ground truth") # plot training points ax.scatter(x_train, y_train, label="training points") # polynomial features for degree in [3, 4, 5]: model = make_pipeline(PolynomialFeatures(degree), Ridge(alpha=1e-3)) model.fit(X_train, y_train) y_plot = model.predict(X_plot) ax.plot(x_plot, y_plot, label=f"degree {degree}") # B-spline with 4 + 3 - 1 = 6 basis functions model = make_pipeline(SplineTransformer(n_knots=4, degree=3), Ridge(alpha=1e-3)) model.fit(X_train, y_train) y_plot = model.predict(X_plot) ax.plot(x_plot, y_plot, label="B-spline") ax.legend(loc="lower center") ax.set_ylim(-20, 10) plt.show() # %% # This shows nicely that higher degree polynomials can fit the data better. But # at the same time, too high powers can show unwanted oscillatory behaviour # and are particularly dangerous for extrapolation beyond the range of fitted # data. This is an advantage of B-splines. They usually fit the data as well as # polynomials and show very nice and smooth behaviour. They have also good # options to control the extrapolation, which defaults to continue with a # constant. Note that most often, you would rather increase the number of knots # but keep ``degree=3``. # # In order to give more insights into the generated feature bases, we plot all # columns of both transformers separately. fig, axes = plt.subplots(ncols=2, figsize=(16, 5)) pft = PolynomialFeatures(degree=3).fit(X_train) axes[0].plot(x_plot, pft.transform(X_plot)) axes[0].legend(axes[0].lines, [f"degree {n}" for n in range(4)]) axes[0].set_title("PolynomialFeatures") splt = SplineTransformer(n_knots=4, degree=3).fit(X_train) axes[1].plot(x_plot, splt.transform(X_plot)) axes[1].legend(axes[1].lines, [f"spline {n}" for n in range(6)]) axes[1].set_title("SplineTransformer") # plot knots of spline knots = splt.bsplines_[0].t axes[1].vlines(knots[3:-3], ymin=0, ymax=0.8, linestyles="dashed") plt.show() # %% # In the left plot, we recognize the lines corresponding to simple monomials # from ``x**0`` to ``x**3``. In the right figure, we see the six B-spline # basis functions of ``degree=3`` and also the four knot positions that were # chosen during ``fit``. Note that there are ``degree`` number of additional # knots each to the left and to the right of the fitted interval. These are # there for technical reasons, so we refrain from showing them. Every basis # function has local support and is continued as a constant beyond the fitted # range. This extrapolating behaviour could be changed by the argument # ``extrapolation``. # %% # Periodic Splines # ---------------- # In the previous example we saw the limitations of polynomials and splines for # extrapolation beyond the range of the training observations. In some # settings, e.g. with seasonal effects, we expect a periodic continuation of # the underlying signal. Such effects can be modelled using periodic splines, # which have equal function value and equal derivatives at the first and last # knot. In the following case we show how periodic splines provide a better fit # both within and outside of the range of training data given the additional # information of periodicity. The splines period is the distance between # the first and last knot, which we specify manually. # # Periodic splines can also be useful for naturally periodic features (such as # day of the year), as the smoothness at the boundary knots prevents a jump in # the transformed values (e.g. from Dec 31st to Jan 1st). For such naturally # periodic features or more generally features where the period is known, it is # advised to explicitly pass this information to the `SplineTransformer` by # setting the knots manually. # %% def g(x): """Function to be approximated by periodic spline interpolation.""" return np.sin(x) - 0.7 * np.cos(x * 3) y_train = g(x_train) # Extend the test data into the future: x_plot_ext = np.linspace(-1, 21, 200) X_plot_ext = x_plot_ext[:, np.newaxis] lw = 2 fig, ax = plt.subplots() ax.set_prop_cycle(color=["black", "tomato", "teal"]) ax.plot(x_plot_ext, g(x_plot_ext), linewidth=lw, label="ground truth") ax.scatter(x_train, y_train, label="training points") for transformer, label in [ (SplineTransformer(degree=3, n_knots=10), "spline"), ( SplineTransformer( degree=3, knots=np.linspace(0, 2 * np.pi, 10)[:, None], extrapolation="periodic", ), "periodic spline", ), ]: model = make_pipeline(transformer, Ridge(alpha=1e-3)) model.fit(X_train, y_train) y_plot_ext = model.predict(X_plot_ext) ax.plot(x_plot_ext, y_plot_ext, label=label) ax.legend() fig.show() # %% We again plot the underlying splines. fig, ax = plt.subplots() knots = np.linspace(0, 2 * np.pi, 4) splt = SplineTransformer(knots=knots[:, None], degree=3, extrapolation="periodic").fit( X_train ) ax.plot(x_plot_ext, splt.transform(X_plot_ext)) ax.legend(ax.lines, [f"spline {n}" for n in range(3)]) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_bayesian_ridge_curvefit.py
examples/linear_model/plot_bayesian_ridge_curvefit.py
""" ============================================ Curve Fitting with Bayesian Ridge Regression ============================================ Computes a Bayesian Ridge Regression of Sinusoids. See :ref:`bayesian_ridge_regression` for more information on the regressor. In general, when fitting a curve with a polynomial by Bayesian ridge regression, the selection of initial values of the regularization parameters (alpha, lambda) may be important. This is because the regularization parameters are determined by an iterative procedure that depends on initial values. In this example, the sinusoid is approximated by a polynomial using different pairs of initial values. When starting from the default values (alpha_init = 1.90, lambda_init = 1.), the bias of the resulting curve is large, and the variance is small. So, lambda_init should be relatively small (1.e-3) so as to reduce the bias. Also, by evaluating log marginal likelihood (L) of these models, we can determine which one is better. It can be concluded that the model with larger L is more likely. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate sinusoidal data with noise # ----------------------------------- import numpy as np def func(x): return np.sin(2 * np.pi * x) size = 25 rng = np.random.RandomState(1234) x_train = rng.uniform(0.0, 1.0, size) y_train = func(x_train) + rng.normal(scale=0.1, size=size) x_test = np.linspace(0.0, 1.0, 100) # %% # Fit by cubic polynomial # ----------------------- from sklearn.linear_model import BayesianRidge n_order = 3 X_train = np.vander(x_train, n_order + 1, increasing=True) X_test = np.vander(x_test, n_order + 1, increasing=True) reg = BayesianRidge(tol=1e-6, fit_intercept=False, compute_score=True) # %% # Plot the true and predicted curves with log marginal likelihood (L) # ------------------------------------------------------------------- import matplotlib.pyplot as plt fig, axes = plt.subplots(1, 2, figsize=(8, 4)) for i, ax in enumerate(axes): # Bayesian ridge regression with different initial value pairs if i == 0: init = [1 / np.var(y_train), 1.0] # Default values elif i == 1: init = [1.0, 1e-3] reg.set_params(alpha_init=init[0], lambda_init=init[1]) reg.fit(X_train, y_train) ymean, ystd = reg.predict(X_test, return_std=True) ax.plot(x_test, func(x_test), color="blue", label="sin($2\\pi x$)") ax.scatter(x_train, y_train, s=50, alpha=0.5, label="observation") ax.plot(x_test, ymean, color="red", label="predict mean") ax.fill_between( x_test, ymean - ystd, ymean + ystd, color="pink", alpha=0.5, label="predict std" ) ax.set_ylim(-1.3, 1.3) ax.legend() title = "$\\alpha$_init$={:.2f},\\ \\lambda$_init$={}$".format(init[0], init[1]) if i == 0: title += " (Default)" ax.set_title(title, fontsize=12) text = "$\\alpha={:.1f}$\n$\\lambda={:.3f}$\n$L={:.1f}$".format( reg.alpha_, reg.lambda_, reg.scores_[-1] ) ax.text(0.05, -1.0, text, fontsize=12) plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_sgd_early_stopping.py
examples/linear_model/plot_sgd_early_stopping.py
""" ============================================= Early stopping of Stochastic Gradient Descent ============================================= Stochastic Gradient Descent is an optimization technique which minimizes a loss function in a stochastic fashion, performing a gradient descent step sample by sample. In particular, it is a very efficient method to fit linear models. As a stochastic method, the loss function is not necessarily decreasing at each iteration, and convergence is only guaranteed in expectation. For this reason, monitoring the convergence on the loss function can be difficult. Another approach is to monitor convergence on a validation score. In this case, the input data is split into a training set and a validation set. The model is then fitted on the training set and the stopping criterion is based on the prediction score computed on the validation set. This enables us to find the least number of iterations which is sufficient to build a model that generalizes well to unseen data and reduces the chance of over-fitting the training data. This early stopping strategy is activated if ``early_stopping=True``; otherwise the stopping criterion only uses the training loss on the entire input data. To better control the early stopping strategy, we can specify a parameter ``validation_fraction`` which set the fraction of the input dataset that we keep aside to compute the validation score. The optimization will continue until the validation score did not improve by at least ``tol`` during the last ``n_iter_no_change`` iterations. The actual number of iterations is available at the attribute ``n_iter_``. This example illustrates how the early stopping can used in the :class:`~sklearn.linear_model.SGDClassifier` model to achieve almost the same accuracy as compared to a model built without early stopping. This can significantly reduce training time. Note that scores differ between the stopping criteria even from early iterations because some of the training data is held out with the validation stopping criterion. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import sys import time import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn import linear_model from sklearn.datasets import fetch_openml from sklearn.exceptions import ConvergenceWarning from sklearn.model_selection import train_test_split from sklearn.utils import shuffle from sklearn.utils._testing import ignore_warnings def load_mnist(n_samples=None, class_0="0", class_1="8"): """Load MNIST, select two classes, shuffle and return only n_samples.""" # Load data from http://openml.org/d/554 mnist = fetch_openml("mnist_784", version=1, as_frame=False) # take only two classes for binary classification mask = np.logical_or(mnist.target == class_0, mnist.target == class_1) X, y = shuffle(mnist.data[mask], mnist.target[mask], random_state=42) if n_samples is not None: X, y = X[:n_samples], y[:n_samples] return X, y @ignore_warnings(category=ConvergenceWarning) def fit_and_score(estimator, max_iter, X_train, X_test, y_train, y_test): """Fit the estimator on the train set and score it on both sets""" estimator.set_params(max_iter=max_iter) estimator.set_params(random_state=0) start = time.time() estimator.fit(X_train, y_train) fit_time = time.time() - start n_iter = estimator.n_iter_ train_score = estimator.score(X_train, y_train) test_score = estimator.score(X_test, y_test) return fit_time, n_iter, train_score, test_score # Define the estimators to compare estimator_dict = { "No stopping criterion": linear_model.SGDClassifier(n_iter_no_change=3), "Training loss": linear_model.SGDClassifier( early_stopping=False, n_iter_no_change=3, tol=0.1 ), "Validation score": linear_model.SGDClassifier( early_stopping=True, n_iter_no_change=3, tol=0.0001, validation_fraction=0.2 ), } # Load the dataset X, y = load_mnist(n_samples=10000) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0) results = [] for estimator_name, estimator in estimator_dict.items(): print(estimator_name + ": ", end="") for max_iter in range(1, 50): print(".", end="") sys.stdout.flush() fit_time, n_iter, train_score, test_score = fit_and_score( estimator, max_iter, X_train, X_test, y_train, y_test ) results.append( (estimator_name, max_iter, fit_time, n_iter, train_score, test_score) ) print("") # Transform the results in a pandas dataframe for easy plotting columns = [ "Stopping criterion", "max_iter", "Fit time (sec)", "n_iter_", "Train score", "Test score", ] results_df = pd.DataFrame(results, columns=columns) # Define what to plot lines = "Stopping criterion" x_axis = "max_iter" styles = ["-.", "--", "-"] # First plot: train and test scores fig, axes = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(12, 4)) for ax, y_axis in zip(axes, ["Train score", "Test score"]): for style, (criterion, group_df) in zip(styles, results_df.groupby(lines)): group_df.plot(x=x_axis, y=y_axis, label=criterion, ax=ax, style=style) ax.set_title(y_axis) ax.legend(title=lines) fig.tight_layout() # Second plot: n_iter and fit time fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 4)) for ax, y_axis in zip(axes, ["n_iter_", "Fit time (sec)"]): for style, (criterion, group_df) in zip(styles, results_df.groupby(lines)): group_df.plot(x=x_axis, y=y_axis, label=criterion, ax=ax, style=style) ax.set_title(y_axis) ax.legend(title=lines) fig.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_poisson_regression_non_normal_loss.py
examples/linear_model/plot_poisson_regression_non_normal_loss.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause """ ====================================== Poisson regression and non-normal loss ====================================== This example illustrates the use of log-linear Poisson regression on the `French Motor Third-Party Liability Claims dataset <https://www.openml.org/d/41214>`_ from [1]_ and compares it with a linear model fitted with the usual least squared error and a non-linear GBRT model fitted with the Poisson loss (and a log-link). A few definitions: - A **policy** is a contract between an insurance company and an individual: the **policyholder**, that is, the vehicle driver in this case. - A **claim** is the request made by a policyholder to the insurer to compensate for a loss covered by the insurance. - The **exposure** is the duration of the insurance coverage of a given policy, in years. - The claim **frequency** is the number of claims divided by the exposure, typically measured in number of claims per year. In this dataset, each sample corresponds to an insurance policy. Available features include driver age, vehicle age, vehicle power, etc. Our goal is to predict the expected frequency of claims following car accidents for a new policyholder given the historical data over a population of policyholders. .. [1] A. Noll, R. Salzmann and M.V. Wuthrich, Case Study: French Motor Third-Party Liability Claims (November 8, 2018). `doi:10.2139/ssrn.3164764 <https://doi.org/10.2139/ssrn.3164764>`_ """ import matplotlib.pyplot as plt import numpy as np import pandas as pd ############################################################################## # The French Motor Third-Party Liability Claims dataset # ----------------------------------------------------- # # Let's load the motor claim dataset from OpenML: # https://www.openml.org/d/41214 from sklearn.datasets import fetch_openml df = fetch_openml(data_id=41214, as_frame=True).frame df # %% # The number of claims (``ClaimNb``) is a positive integer that can be modeled # as a Poisson distribution. It is then assumed to be the number of discrete # events occurring with a constant rate in a given time interval (``Exposure``, # in units of years). # # Here we want to model the frequency ``y = ClaimNb / Exposure`` conditionally # on ``X`` via a (scaled) Poisson distribution, and use ``Exposure`` as # ``sample_weight``. df["Frequency"] = df["ClaimNb"] / df["Exposure"] print( "Average Frequency = {}".format(np.average(df["Frequency"], weights=df["Exposure"])) ) print( "Fraction of exposure with zero claims = {0:.1%}".format( df.loc[df["ClaimNb"] == 0, "Exposure"].sum() / df["Exposure"].sum() ) ) fig, (ax0, ax1, ax2) = plt.subplots(ncols=3, figsize=(16, 4)) ax0.set_title("Number of claims") _ = df["ClaimNb"].hist(bins=30, log=True, ax=ax0) ax1.set_title("Exposure in years") _ = df["Exposure"].hist(bins=30, log=True, ax=ax1) ax2.set_title("Frequency (number of claims per year)") _ = df["Frequency"].hist(bins=30, log=True, ax=ax2) # %% # The remaining columns can be used to predict the frequency of claim events. # Those columns are very heterogeneous with a mix of categorical and numeric # variables with different scales, possibly very unevenly distributed. # # In order to fit linear models with those predictors it is therefore # necessary to perform standard feature transformations as follows: from sklearn.compose import ColumnTransformer from sklearn.pipeline import make_pipeline from sklearn.preprocessing import ( FunctionTransformer, KBinsDiscretizer, OneHotEncoder, StandardScaler, ) log_scale_transformer = make_pipeline( FunctionTransformer(np.log, validate=False), StandardScaler() ) linear_model_preprocessor = ColumnTransformer( [ ("passthrough_numeric", "passthrough", ["BonusMalus"]), ( "binned_numeric", KBinsDiscretizer( n_bins=10, quantile_method="averaged_inverted_cdf", random_state=0 ), ["VehAge", "DrivAge"], ), ("log_scaled_numeric", log_scale_transformer, ["Density"]), ( "onehot_categorical", OneHotEncoder(), ["VehBrand", "VehPower", "VehGas", "Region", "Area"], ), ], remainder="drop", ) # %% # A constant prediction baseline # ------------------------------ # # It is worth noting that more than 93% of policyholders have zero claims. If # we were to convert this problem into a binary classification task, it would # be significantly imbalanced, and even a simplistic model that would only # predict mean can achieve an accuracy of 93%. # # To evaluate the pertinence of the used metrics, we will consider as a # baseline a "dummy" estimator that constantly predicts the mean frequency of # the training sample. from sklearn.dummy import DummyRegressor from sklearn.model_selection import train_test_split from sklearn.pipeline import Pipeline df_train, df_test = train_test_split(df, test_size=0.33, random_state=0) dummy = Pipeline( [ ("preprocessor", linear_model_preprocessor), ("regressor", DummyRegressor(strategy="mean")), ] ).fit(df_train, df_train["Frequency"], regressor__sample_weight=df_train["Exposure"]) ############################################################################## # Let's compute the performance of this constant prediction baseline with 3 # different regression metrics: from sklearn.metrics import ( mean_absolute_error, mean_poisson_deviance, mean_squared_error, ) def score_estimator(estimator, df_test): """Score an estimator on the test set.""" y_pred = estimator.predict(df_test) print( "MSE: %.3f" % mean_squared_error( df_test["Frequency"], y_pred, sample_weight=df_test["Exposure"] ) ) print( "MAE: %.3f" % mean_absolute_error( df_test["Frequency"], y_pred, sample_weight=df_test["Exposure"] ) ) # Ignore non-positive predictions, as they are invalid for # the Poisson deviance. mask = y_pred > 0 if (~mask).any(): n_masked, n_samples = (~mask).sum(), mask.shape[0] print( "WARNING: Estimator yields invalid, non-positive predictions " f" for {n_masked} samples out of {n_samples}. These predictions " "are ignored when computing the Poisson deviance." ) print( "mean Poisson deviance: %.3f" % mean_poisson_deviance( df_test["Frequency"][mask], y_pred[mask], sample_weight=df_test["Exposure"][mask], ) ) print("Constant mean frequency evaluation:") score_estimator(dummy, df_test) # %% # (Generalized) linear models # --------------------------- # # We start by modeling the target variable with the (l2 penalized) least # squares linear regression model, more commonly known as Ridge regression. We # use a low penalization `alpha`, as we expect such a linear model to under-fit # on such a large dataset. from sklearn.linear_model import Ridge ridge_glm = Pipeline( [ ("preprocessor", linear_model_preprocessor), ("regressor", Ridge(alpha=1e-6)), ] ).fit(df_train, df_train["Frequency"], regressor__sample_weight=df_train["Exposure"]) # %% # The Poisson deviance cannot be computed on non-positive values predicted by # the model. For models that do return a few non-positive predictions (e.g. # :class:`~sklearn.linear_model.Ridge`) we ignore the corresponding samples, # meaning that the obtained Poisson deviance is approximate. An alternative # approach could be to use :class:`~sklearn.compose.TransformedTargetRegressor` # meta-estimator to map ``y_pred`` to a strictly positive domain. print("Ridge evaluation:") score_estimator(ridge_glm, df_test) # %% # Next we fit the Poisson regressor on the target variable. We set the # regularization strength ``alpha`` to approximately 1e-6 over number of # samples (i.e. `1e-12`) in order to mimic the Ridge regressor whose L2 penalty # term scales differently with the number of samples. # # Since the Poisson regressor internally models the log of the expected target # value instead of the expected value directly (log vs identity link function), # the relationship between X and y is not exactly linear anymore. Therefore the # Poisson regressor is called a Generalized Linear Model (GLM) rather than a # vanilla linear model as is the case for Ridge regression. from sklearn.linear_model import PoissonRegressor n_samples = df_train.shape[0] poisson_glm = Pipeline( [ ("preprocessor", linear_model_preprocessor), ("regressor", PoissonRegressor(alpha=1e-12, solver="newton-cholesky")), ] ) poisson_glm.fit( df_train, df_train["Frequency"], regressor__sample_weight=df_train["Exposure"] ) print("PoissonRegressor evaluation:") score_estimator(poisson_glm, df_test) # %% # Gradient Boosting Regression Trees for Poisson regression # --------------------------------------------------------- # # Finally, we will consider a non-linear model, namely Gradient Boosting # Regression Trees. Tree-based models do not require the categorical data to be # one-hot encoded: instead, we can encode each category label with an arbitrary # integer using :class:`~sklearn.preprocessing.OrdinalEncoder`. With this # encoding, the trees will treat the categorical features as ordered features, # which might not be always a desired behavior. However this effect is limited # for deep enough trees which are able to recover the categorical nature of the # features. The main advantage of the # :class:`~sklearn.preprocessing.OrdinalEncoder` over the # :class:`~sklearn.preprocessing.OneHotEncoder` is that it will make training # faster. # # Gradient Boosting also gives the possibility to fit the trees with a Poisson # loss (with an implicit log-link function) instead of the default # least-squares loss. Here we only fit trees with the Poisson loss to keep this # example concise. from sklearn.ensemble import HistGradientBoostingRegressor from sklearn.preprocessing import OrdinalEncoder tree_preprocessor = ColumnTransformer( [ ( "categorical", OrdinalEncoder(), ["VehBrand", "VehPower", "VehGas", "Region", "Area"], ), ("numeric", "passthrough", ["VehAge", "DrivAge", "BonusMalus", "Density"]), ], remainder="drop", ) poisson_gbrt = Pipeline( [ ("preprocessor", tree_preprocessor), ( "regressor", HistGradientBoostingRegressor(loss="poisson", max_leaf_nodes=128), ), ] ) poisson_gbrt.fit( df_train, df_train["Frequency"], regressor__sample_weight=df_train["Exposure"] ) print("Poisson Gradient Boosted Trees evaluation:") score_estimator(poisson_gbrt, df_test) # %% # Like the Poisson GLM above, the gradient boosted trees model minimizes # the Poisson deviance. However, because of a higher predictive power, # it reaches lower values of Poisson deviance. # # Evaluating models with a single train / test split is prone to random # fluctuations. If computing resources allow, it should be verified that # cross-validated performance metrics would lead to similar conclusions. # # The qualitative difference between these models can also be visualized by # comparing the histogram of observed target values with that of predicted # values: fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(16, 6), sharey=True) fig.subplots_adjust(bottom=0.2) n_bins = 20 for row_idx, label, df in zip(range(2), ["train", "test"], [df_train, df_test]): df["Frequency"].hist(bins=np.linspace(-1, 30, n_bins), ax=axes[row_idx, 0]) axes[row_idx, 0].set_title("Data") axes[row_idx, 0].set_yscale("log") axes[row_idx, 0].set_xlabel("y (observed Frequency)") axes[row_idx, 0].set_ylim([1e1, 5e5]) axes[row_idx, 0].set_ylabel(label + " samples") for idx, model in enumerate([ridge_glm, poisson_glm, poisson_gbrt]): y_pred = model.predict(df) pd.Series(y_pred).hist( bins=np.linspace(-1, 4, n_bins), ax=axes[row_idx, idx + 1] ) axes[row_idx, idx + 1].set( title=model[-1].__class__.__name__, yscale="log", xlabel="y_pred (predicted expected Frequency)", ) plt.tight_layout() # %% # The experimental data presents a long tail distribution for ``y``. In all # models, we predict the expected frequency of a random variable, so we will # have necessarily fewer extreme values than for the observed realizations of # that random variable. This explains that the mode of the histograms of model # predictions doesn't necessarily correspond to the smallest value. # Additionally, the normal distribution used in ``Ridge`` has a constant # variance, while for the Poisson distribution used in ``PoissonRegressor`` and # ``HistGradientBoostingRegressor``, the variance is proportional to the # predicted expected value. # # Thus, among the considered estimators, ``PoissonRegressor`` and # ``HistGradientBoostingRegressor`` are a-priori better suited for modeling the # long tail distribution of the non-negative data as compared to the ``Ridge`` # model which makes a wrong assumption on the distribution of the target # variable. # # The ``HistGradientBoostingRegressor`` estimator has the most flexibility and # is able to predict higher expected values. # # Note that we could have used the least squares loss for the # ``HistGradientBoostingRegressor`` model. This would wrongly assume a normal # distributed response variable as does the `Ridge` model, and possibly # also lead to slightly negative predictions. However the gradient boosted # trees would still perform relatively well and in particular better than # ``PoissonRegressor`` thanks to the flexibility of the trees combined with the # large number of training samples. # # Evaluation of the calibration of predictions # -------------------------------------------- # # To ensure that estimators yield reasonable predictions for different # policyholder types, we can bin test samples according to ``y_pred`` returned # by each model. Then for each bin, we compare the mean predicted ``y_pred``, # with the mean observed target: from sklearn.utils import gen_even_slices def _mean_frequency_by_risk_group(y_true, y_pred, sample_weight=None, n_bins=100): """Compare predictions and observations for bins ordered by y_pred. We order the samples by ``y_pred`` and split it in bins. In each bin the observed mean is compared with the predicted mean. Parameters ---------- y_true: array-like of shape (n_samples,) Ground truth (correct) target values. y_pred: array-like of shape (n_samples,) Estimated target values. sample_weight : array-like of shape (n_samples,) Sample weights. n_bins: int Number of bins to use. Returns ------- bin_centers: ndarray of shape (n_bins,) bin centers y_true_bin: ndarray of shape (n_bins,) average y_pred for each bin y_pred_bin: ndarray of shape (n_bins,) average y_pred for each bin """ idx_sort = np.argsort(y_pred) bin_centers = np.arange(0, 1, 1 / n_bins) + 0.5 / n_bins y_pred_bin = np.zeros(n_bins) y_true_bin = np.zeros(n_bins) for n, sl in enumerate(gen_even_slices(len(y_true), n_bins)): weights = sample_weight[idx_sort][sl] y_pred_bin[n] = np.average(y_pred[idx_sort][sl], weights=weights) y_true_bin[n] = np.average(y_true[idx_sort][sl], weights=weights) return bin_centers, y_true_bin, y_pred_bin print(f"Actual number of claims: {df_test['ClaimNb'].sum()}") fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(12, 8)) plt.subplots_adjust(wspace=0.3) for axi, model in zip(ax.ravel(), [ridge_glm, poisson_glm, poisson_gbrt, dummy]): y_pred = model.predict(df_test) y_true = df_test["Frequency"].values exposure = df_test["Exposure"].values q, y_true_seg, y_pred_seg = _mean_frequency_by_risk_group( y_true, y_pred, sample_weight=exposure, n_bins=10 ) # Name of the model after the estimator used in the last step of the # pipeline. print(f"Predicted number of claims by {model[-1]}: {np.sum(y_pred * exposure):.1f}") axi.plot(q, y_pred_seg, marker="x", linestyle="--", label="predictions") axi.plot(q, y_true_seg, marker="o", linestyle="--", label="observations") axi.set_xlim(0, 1.0) axi.set_ylim(0, 0.5) axi.set( title=model[-1], xlabel="Fraction of samples sorted by y_pred", ylabel="Mean Frequency (y_pred)", ) axi.legend() plt.tight_layout() # %% # The dummy regression model predicts a constant frequency. This model does not # attribute the same tied rank to all samples but is none-the-less globally # well calibrated (to estimate the mean frequency of the entire population). # # The ``Ridge`` regression model can predict very low expected frequencies that # do not match the data. It can therefore severely under-estimate the risk for # some policyholders. # # ``PoissonRegressor`` and ``HistGradientBoostingRegressor`` show better # consistency between predicted and observed targets, especially for low # predicted target values. # # The sum of all predictions also confirms the calibration issue of the # ``Ridge`` model: it under-estimates by more than 3% the total number of # claims in the test set while the other three models can approximately recover # the total number of claims of the test portfolio. # # Evaluation of the ranking power # ------------------------------- # # For some business applications, we are interested in the ability of the model # to rank the riskiest from the safest policyholders, irrespective of the # absolute value of the prediction. In this case, the model evaluation would # cast the problem as a ranking problem rather than a regression problem. # # To compare the 3 models from this perspective, one can plot the cumulative # proportion of claims vs the cumulative proportion of exposure for the test # samples order by the model predictions, from safest to riskiest according to # each model. # # This plot is called a Lorenz curve and can be summarized by the Gini index: from sklearn.metrics import auc def lorenz_curve(y_true, y_pred, exposure): y_true, y_pred = np.asarray(y_true), np.asarray(y_pred) exposure = np.asarray(exposure) # order samples by increasing predicted risk: ranking = np.argsort(y_pred) ranked_frequencies = y_true[ranking] ranked_exposure = exposure[ranking] cumulated_claims = np.cumsum(ranked_frequencies * ranked_exposure) cumulated_claims /= cumulated_claims[-1] cumulated_exposure = np.cumsum(ranked_exposure) cumulated_exposure /= cumulated_exposure[-1] return cumulated_exposure, cumulated_claims fig, ax = plt.subplots(figsize=(8, 8)) for model in [dummy, ridge_glm, poisson_glm, poisson_gbrt]: y_pred = model.predict(df_test) cum_exposure, cum_claims = lorenz_curve( df_test["Frequency"], y_pred, df_test["Exposure"] ) gini = 1 - 2 * auc(cum_exposure, cum_claims) label = "{} (Gini: {:.2f})".format(model[-1], gini) ax.plot(cum_exposure, cum_claims, linestyle="-", label=label) # Oracle model: y_pred == y_test cum_exposure, cum_claims = lorenz_curve( df_test["Frequency"], df_test["Frequency"], df_test["Exposure"] ) gini = 1 - 2 * auc(cum_exposure, cum_claims) label = "Oracle (Gini: {:.2f})".format(gini) ax.plot(cum_exposure, cum_claims, linestyle="-.", color="gray", label=label) # Random Baseline ax.plot([0, 1], [0, 1], linestyle="--", color="black", label="Random baseline") ax.set( title="Lorenz curves by model", xlabel="Cumulative proportion of exposure (from safest to riskiest)", ylabel="Cumulative proportion of claims", ) ax.legend(loc="upper left") # %% # As expected, the dummy regressor is unable to correctly rank the samples and # therefore performs the worst on this plot. # # The tree-based model is significantly better at ranking policyholders by risk # while the two linear models perform similarly. # # All three models are significantly better than chance but also very far from # making perfect predictions. # # This last point is expected due to the nature of the problem: the occurrence # of accidents is mostly dominated by circumstantial causes that are not # captured in the columns of the dataset and can indeed be considered as purely # random. # # The linear models assume no interactions between the input variables which # likely causes under-fitting. Inserting a polynomial feature extractor # (:func:`~sklearn.preprocessing.PolynomialFeatures`) indeed increases their # discrimative power by 2 points of Gini index. In particular it improves the # ability of the models to identify the top 5% riskiest profiles. # # Main takeaways # -------------- # # - The performance of the models can be evaluated by their ability to yield # well-calibrated predictions and a good ranking. # # - The calibration of the model can be assessed by plotting the mean observed # value vs the mean predicted value on groups of test samples binned by # predicted risk. # # - The least squares loss (along with the implicit use of the identity link # function) of the Ridge regression model seems to cause this model to be # badly calibrated. In particular, it tends to underestimate the risk and can # even predict invalid negative frequencies. # # - Using the Poisson loss with a log-link can correct these problems and lead # to a well-calibrated linear model. # # - The Gini index reflects the ability of a model to rank predictions # irrespective of their absolute values, and therefore only assess their # ranking power. # # - Despite the improvement in calibration, the ranking power of both linear # models are comparable and well below the ranking power of the Gradient # Boosting Regression Trees. # # - The Poisson deviance computed as an evaluation metric reflects both the # calibration and the ranking power of the model. It also makes a linear # assumption on the ideal relationship between the expected value and the # variance of the response variable. For the sake of conciseness we did not # check whether this assumption holds. # # - Traditional regression metrics such as Mean Squared Error and Mean Absolute # Error are hard to meaningfully interpret on count values with many zeros. plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_ridge_path.py
examples/linear_model/plot_ridge_path.py
""" =========================================================== Plot Ridge coefficients as a function of the regularization =========================================================== Shows the effect of collinearity in the coefficients of an estimator. .. currentmodule:: sklearn.linear_model :class:`Ridge` Regression is the estimator used in this example. Each color represents a different feature of the coefficient vector, and this is displayed as a function of the regularization parameter. This example also shows the usefulness of applying Ridge regression to highly ill-conditioned matrices. For such matrices, a slight change in the target variable can cause huge variances in the calculated weights. In such cases, it is useful to set a certain regularization (alpha) to reduce this variation (noise). When alpha is very large, the regularization effect dominates the squared loss function and the coefficients tend to zero. At the end of the path, as alpha tends toward zero and the solution tends towards the ordinary least squares, coefficients exhibit big oscillations. In practice it is necessary to tune alpha in such a way that a balance is maintained between both. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn import linear_model # X is the 10x10 Hilbert matrix X = 1.0 / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis]) y = np.ones(10) # %% # Compute paths # ------------- n_alphas = 200 alphas = np.logspace(-10, -2, n_alphas) coefs = [] for a in alphas: ridge = linear_model.Ridge(alpha=a, fit_intercept=False) ridge.fit(X, y) coefs.append(ridge.coef_) # %% # Display results # --------------- ax = plt.gca() ax.plot(alphas, coefs) ax.set_xscale("log") ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis plt.xlabel("alpha") plt.ylabel("weights") plt.title("Ridge Coefficients vs Regularization Strength (alpha)") plt.axis("tight") plt.legend( [f"Feature {i + 1}" for i in range(X.shape[1])], loc="best", fontsize="small" ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_sparse_logistic_regression_20newsgroups.py
examples/linear_model/plot_sparse_logistic_regression_20newsgroups.py
""" ==================================================== Multiclass sparse logistic regression on 20newgroups ==================================================== Comparison of multinomial logistic L1 vs one-versus-rest L1 logistic regression to classify documents from the newgroups20 dataset. Multinomial logistic regression yields more accurate results and is faster to train on the larger scale dataset. Here we use the l1 sparsity that trims the weights of not informative features to zero. This is good if the goal is to extract the strongly discriminative vocabulary of each class. If the goal is to get the best predictive accuracy, it is better to use the non sparsity-inducing l2 penalty instead. A more traditional (and possibly better) way to predict on a sparse subset of input features would be to use univariate feature selection followed by a traditional (l2-penalised) logistic regression model. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import timeit import warnings import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import fetch_20newsgroups_vectorized from sklearn.exceptions import ConvergenceWarning from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.multiclass import OneVsRestClassifier warnings.filterwarnings("ignore", category=ConvergenceWarning, module="sklearn") t0 = timeit.default_timer() # We use SAGA solver solver = "saga" # Turn down for faster run time n_samples = 5000 X, y = fetch_20newsgroups_vectorized(subset="all", return_X_y=True) X = X[:n_samples] y = y[:n_samples] X_train, X_test, y_train, y_test = train_test_split( X, y, random_state=42, stratify=y, test_size=0.1 ) train_samples, n_features = X_train.shape n_classes = np.unique(y).shape[0] print( "Dataset 20newsgroup, train_samples=%i, n_features=%i, n_classes=%i" % (train_samples, n_features, n_classes) ) models = { "ovr": {"name": "One versus Rest", "iters": [1, 2, 3]}, "multinomial": {"name": "Multinomial", "iters": [1, 2, 5]}, } for model in models: # Add initial chance-level values for plotting purpose accuracies = [1 / n_classes] times = [0] densities = [1] model_params = models[model] # Small number of epochs for fast runtime for this_max_iter in model_params["iters"]: print( "[model=%s, solver=%s] Number of epochs: %s" % (model_params["name"], solver, this_max_iter) ) clf = LogisticRegression( l1_ratio=1, solver=solver, max_iter=this_max_iter, random_state=42, ) if model == "ovr": clf = OneVsRestClassifier(clf) t1 = timeit.default_timer() clf.fit(X_train, y_train) train_time = timeit.default_timer() - t1 y_pred = clf.predict(X_test) accuracy = np.sum(y_pred == y_test) / y_test.shape[0] if model == "ovr": coef = np.concatenate([est.coef_ for est in clf.estimators_]) else: coef = clf.coef_ density = np.mean(coef != 0, axis=1) * 100 accuracies.append(accuracy) densities.append(density) times.append(train_time) models[model]["times"] = times models[model]["densities"] = densities models[model]["accuracies"] = accuracies print("Test accuracy for model %s: %.4f" % (model, accuracies[-1])) print( "%% non-zero coefficients for model %s, per class:\n %s" % (model, densities[-1]) ) print( "Run time (%i epochs) for model %s:%.2f" % (model_params["iters"][-1], model, times[-1]) ) fig = plt.figure() ax = fig.add_subplot(111) for model in models: name = models[model]["name"] times = models[model]["times"] accuracies = models[model]["accuracies"] ax.plot(times, accuracies, marker="o", label="Model: %s" % name) ax.set_xlabel("Train time (s)") ax.set_ylabel("Test accuracy") ax.legend() fig.suptitle("Multinomial vs One-vs-Rest Logistic L1\nDataset %s" % "20newsgroups") fig.tight_layout() fig.subplots_adjust(top=0.85) run_time = timeit.default_timer() - t0 print("Example run in %.3f s" % run_time) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_robust_fit.py
examples/linear_model/plot_robust_fit.py
""" Robust linear estimator fitting =============================== Here a sine function is fit with a polynomial of order 3, for values close to zero. Robust fitting is demonstrated in different situations: - No measurement errors, only modelling errors (fitting a sine with a polynomial) - Measurement errors in X - Measurement errors in y The median absolute deviation to non corrupt new data is used to judge the quality of the prediction. What we can see that: - RANSAC is good for strong outliers in the y direction - TheilSen is good for small outliers, both in direction X and y, but has a break point above which it performs worse than OLS. - The scores of HuberRegressor may not be compared directly to both TheilSen and RANSAC because it does not attempt to completely filter the outliers but lessen their effect. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np from matplotlib import pyplot as plt from sklearn.linear_model import ( HuberRegressor, LinearRegression, RANSACRegressor, TheilSenRegressor, ) from sklearn.metrics import mean_squared_error from sklearn.pipeline import make_pipeline from sklearn.preprocessing import PolynomialFeatures np.random.seed(42) X = np.random.normal(size=400) y = np.sin(X) # Make sure that it X is 2D X = X[:, np.newaxis] X_test = np.random.normal(size=200) y_test = np.sin(X_test) X_test = X_test[:, np.newaxis] y_errors = y.copy() y_errors[::3] = 3 X_errors = X.copy() X_errors[::3] = 3 y_errors_large = y.copy() y_errors_large[::3] = 10 X_errors_large = X.copy() X_errors_large[::3] = 10 estimators = [ ("OLS", LinearRegression()), ("Theil-Sen", TheilSenRegressor(random_state=42)), ("RANSAC", RANSACRegressor(random_state=42)), ("HuberRegressor", HuberRegressor()), ] colors = { "OLS": "turquoise", "Theil-Sen": "gold", "RANSAC": "lightgreen", "HuberRegressor": "black", } linestyle = {"OLS": "-", "Theil-Sen": "-.", "RANSAC": "--", "HuberRegressor": "--"} lw = 3 x_plot = np.linspace(X.min(), X.max()) for title, this_X, this_y in [ ("Modeling Errors Only", X, y), ("Corrupt X, Small Deviants", X_errors, y), ("Corrupt y, Small Deviants", X, y_errors), ("Corrupt X, Large Deviants", X_errors_large, y), ("Corrupt y, Large Deviants", X, y_errors_large), ]: plt.figure(figsize=(5, 4)) plt.plot(this_X[:, 0], this_y, "b+") for name, estimator in estimators: model = make_pipeline(PolynomialFeatures(3), estimator) model.fit(this_X, this_y) mse = mean_squared_error(model.predict(X_test), y_test) y_plot = model.predict(x_plot[:, np.newaxis]) plt.plot( x_plot, y_plot, color=colors[name], linestyle=linestyle[name], linewidth=lw, label="%s: error = %.3f" % (name, mse), ) legend_title = "Error of Mean\nAbsolute Deviation\nto Non-corrupt Data" legend = plt.legend( loc="upper right", frameon=False, title=legend_title, prop=dict(size="x-small") ) plt.xlim(-4, 10.2) plt.ylim(-2, 10.2) plt.title(title) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_logistic_multinomial.py
examples/linear_model/plot_logistic_multinomial.py
""" ====================================================================== Decision Boundaries of Multinomial and One-vs-Rest Logistic Regression ====================================================================== This example compares decision boundaries of multinomial and one-vs-rest logistic regression on a 2D dataset with three classes. We make a comparison of the decision boundaries of both methods that is equivalent to call the method `predict`. In addition, we plot the hyperplanes that correspond to the line when the probability estimate for a class is of 0.5. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Dataset Generation # ------------------ # # We generate a synthetic dataset using :func:`~sklearn.datasets.make_blobs` function. # The dataset consists of 1,000 samples from three different classes, # centered around [-5, 0], [0, 1.5], and [5, -1]. After generation, we apply a linear # transformation to introduce some correlation between features and make the problem # more challenging. This results in a 2D dataset with three overlapping classes, # suitable for demonstrating the differences between multinomial and one-vs-rest # logistic regression. import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_blobs centers = [[-5, 0], [0, 1.5], [5, -1]] X, y = make_blobs(n_samples=1_000, centers=centers, random_state=40) transformation = [[0.4, 0.2], [-0.4, 1.2]] X = np.dot(X, transformation) fig, ax = plt.subplots(figsize=(6, 4)) scatter = ax.scatter(X[:, 0], X[:, 1], c=y, edgecolor="black") ax.set(title="Synthetic Dataset", xlabel="Feature 1", ylabel="Feature 2") _ = ax.legend(*scatter.legend_elements(), title="Classes") # %% # Classifier Training # ------------------- # # We train two different logistic regression classifiers: multinomial and one-vs-rest. # The multinomial classifier handles all classes simultaneously, while the one-vs-rest # approach trains a binary classifier for each class against all others. from sklearn.linear_model import LogisticRegression from sklearn.multiclass import OneVsRestClassifier logistic_regression_multinomial = LogisticRegression().fit(X, y) logistic_regression_ovr = OneVsRestClassifier(LogisticRegression()).fit(X, y) accuracy_multinomial = logistic_regression_multinomial.score(X, y) accuracy_ovr = logistic_regression_ovr.score(X, y) # %% # Decision Boundaries Visualization # --------------------------------- # # Let's visualize the decision boundaries of both models that is provided by the # method `predict` of the classifiers. from sklearn.inspection import DecisionBoundaryDisplay fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5), sharex=True, sharey=True) for model, title, ax in [ ( logistic_regression_multinomial, f"Multinomial Logistic Regression\n(Accuracy: {accuracy_multinomial:.3f})", ax1, ), ( logistic_regression_ovr, f"One-vs-Rest Logistic Regression\n(Accuracy: {accuracy_ovr:.3f})", ax2, ), ]: DecisionBoundaryDisplay.from_estimator( model, X, ax=ax, response_method="predict", alpha=0.8, ) scatter = ax.scatter(X[:, 0], X[:, 1], c=y, edgecolor="k") legend = ax.legend(*scatter.legend_elements(), title="Classes") ax.add_artist(legend) ax.set_title(title) # %% # We see that the decision boundaries are different. This difference stems from their # approaches: # # - Multinomial logistic regression considers all classes simultaneously during # optimization. # - One-vs-rest logistic regression fits each class independently against all others. # # These distinct strategies can lead to varying decision boundaries, especially in # complex multi-class problems. # # Hyperplanes Visualization # -------------------------- # # We also visualize the hyperplanes that correspond to the line when the probability # estimate for a class is of 0.5. def plot_hyperplanes(classifier, X, ax): xmin, xmax = X[:, 0].min(), X[:, 0].max() ymin, ymax = X[:, 1].min(), X[:, 1].max() ax.set(xlim=(xmin, xmax), ylim=(ymin, ymax)) if isinstance(classifier, OneVsRestClassifier): coef = np.concatenate([est.coef_ for est in classifier.estimators_]) intercept = np.concatenate([est.intercept_ for est in classifier.estimators_]) else: coef = classifier.coef_ intercept = classifier.intercept_ for i in range(coef.shape[0]): w = coef[i] a = -w[0] / w[1] xx = np.linspace(xmin, xmax) yy = a * xx - (intercept[i]) / w[1] ax.plot(xx, yy, "--", linewidth=3, label=f"Class {i}") return ax.get_legend_handles_labels() # %% fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5), sharex=True, sharey=True) for model, title, ax in [ ( logistic_regression_multinomial, "Multinomial Logistic Regression Hyperplanes", ax1, ), (logistic_regression_ovr, "One-vs-Rest Logistic Regression Hyperplanes", ax2), ]: hyperplane_handles, hyperplane_labels = plot_hyperplanes(model, X, ax) scatter = ax.scatter(X[:, 0], X[:, 1], c=y, edgecolor="k") scatter_handles, scatter_labels = scatter.legend_elements() all_handles = hyperplane_handles + scatter_handles all_labels = hyperplane_labels + scatter_labels ax.legend(all_handles, all_labels, title="Classes") ax.set_title(title) plt.show() # %% # While the hyperplanes for classes 0 and 2 are quite similar between the two methods, # we observe that the hyperplane for class 1 is notably different. This difference stems # from the fundamental approaches of one-vs-rest and multinomial logistic regression: # # For one-vs-rest logistic regression: # # - Each hyperplane is determined independently by considering one class against all # others. # - For class 1, the hyperplane represents the decision boundary that best separates # class 1 from the combined classes 0 and 2. # - This binary approach can lead to simpler decision boundaries but may not capture # complex relationships between all classes simultaneously. # - There is no possible interpretation of the conditional class probabilities. # # For multinomial logistic regression: # # - All hyperplanes are determined simultaneously, considering the relationships between # all classes at once. # - The loss minimized by the model is a proper scoring rule, which means that the model # is optimized to estimate the conditional class probabilities that are, therefore, # meaningful. # - Each hyperplane represents the decision boundary where the probability of one class # becomes higher than the others, based on the overall probability distribution. # - This approach can capture more nuanced relationships between classes, potentially # leading to more accurate classification in multi-class problems. # # The difference in hyperplanes, especially for class 1, highlights how these methods # can produce different decision boundaries despite similar overall accuracy. # # In practice, using multinomial logistic regression is recommended since it minimizes a # well-formulated loss function, leading to better-calibrated class probabilities and # thus more interpretable results. When it comes to decision boundaries, one should # formulate a utility function to transform the class probabilities into a meaningful # quantity for the problem at hand. One-vs-rest allows for different decision boundaries # but does not allow for fine-grained control over the trade-off between the classes as # a utility function would.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_elastic_net_precomputed_gram_matrix_with_weighted_samples.py
examples/linear_model/plot_elastic_net_precomputed_gram_matrix_with_weighted_samples.py
""" ========================================================================== Fitting an Elastic Net with a precomputed Gram Matrix and Weighted Samples ========================================================================== The following example shows how to precompute the gram matrix while using weighted samples with an :class:`~sklearn.linear_model.ElasticNet`. If weighted samples are used, the design matrix must be centered and then rescaled by the square root of the weight vector before the gram matrix is computed. .. note:: `sample_weight` vector is also rescaled to sum to `n_samples`, see the documentation for the `sample_weight` parameter to :meth:`~sklearn.linear_model.ElasticNet.fit`. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Let's start by loading the dataset and creating some sample weights. import numpy as np from sklearn.datasets import make_regression rng = np.random.RandomState(0) n_samples = int(1e5) X, y = make_regression(n_samples=n_samples, noise=0.5, random_state=rng) sample_weight = rng.lognormal(size=n_samples) # normalize the sample weights normalized_weights = sample_weight * (n_samples / (sample_weight.sum())) # %% # To fit the elastic net using the `precompute` option together with the sample # weights, we must first center the design matrix, and rescale it by the # normalized weights prior to computing the gram matrix. X_offset = np.average(X, axis=0, weights=normalized_weights) X_centered = X - np.average(X, axis=0, weights=normalized_weights) X_scaled = X_centered * np.sqrt(normalized_weights)[:, np.newaxis] gram = np.dot(X_scaled.T, X_scaled) # %% # We can now proceed with fitting. We must passed the centered design matrix to # `fit` otherwise the elastic net estimator will detect that it is uncentered # and discard the gram matrix we passed. However, if we pass the scaled design # matrix, the preprocessing code will incorrectly rescale it a second time. from sklearn.linear_model import ElasticNet lm = ElasticNet(alpha=0.01, precompute=gram) lm.fit(X_centered, y, sample_weight=normalized_weights)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_lasso_and_elasticnet.py
examples/linear_model/plot_lasso_and_elasticnet.py
""" ================================== L1-based models for Sparse Signals ================================== The present example compares three l1-based regression models on a synthetic signal obtained from sparse and correlated features that are further corrupted with additive Gaussian noise: - a :ref:`lasso`; - an :ref:`automatic_relevance_determination`; - an :ref:`elastic_net`. It is known that the Lasso estimates turn to be close to the model selection estimates when the data dimensions grow, given that the irrelevant variables are not too correlated with the relevant ones. In the presence of correlated features, Lasso itself cannot select the correct sparsity pattern [1]_. Here we compare the performance of the three models in terms of the :math:`R^2` score, the fitting time and the sparsity of the estimated coefficients when compared with the ground-truth. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate synthetic dataset # -------------------------- # # We generate a dataset where the number of samples is lower than the total # number of features. This leads to an underdetermined system, i.e. the solution # is not unique, and thus we cannot apply an :ref:`ordinary_least_squares` by # itself. Regularization introduces a penalty term to the objective function, # which modifies the optimization problem and can help alleviate the # underdetermined nature of the system. # # The target `y` is a linear combination with alternating signs of sinusoidal # signals. Only the 10 lowest out of the 100 frequencies in `X` are used to # generate `y`, while the rest of the features are not informative. This results # in a high dimensional sparse feature space, where some degree of # l1-penalization is necessary. import numpy as np rng = np.random.RandomState(0) n_samples, n_features, n_informative = 50, 100, 10 time_step = np.linspace(-2, 2, n_samples) freqs = 2 * np.pi * np.sort(rng.rand(n_features)) / 0.01 X = np.zeros((n_samples, n_features)) for i in range(n_features): X[:, i] = np.sin(freqs[i] * time_step) idx = np.arange(n_features) true_coef = (-1) ** idx * np.exp(-idx / 10) true_coef[n_informative:] = 0 # sparsify coef y = np.dot(X, true_coef) # %% # Some of the informative features have close frequencies to induce # (anti-)correlations. freqs[:n_informative] # %% # A random phase is introduced using :func:`numpy.random.random_sample` # and some Gaussian noise (implemented by :func:`numpy.random.normal`) # is added to both the features and the target. for i in range(n_features): X[:, i] = np.sin(freqs[i] * time_step + 2 * (rng.random_sample() - 0.5)) X[:, i] += 0.2 * rng.normal(0, 1, n_samples) y += 0.2 * rng.normal(0, 1, n_samples) # %% # Such sparse, noisy and correlated features can be obtained, for instance, from # sensor nodes monitoring some environmental variables, as they typically register # similar values depending on their positions (spatial correlations). # We can visualize the target. import matplotlib.pyplot as plt plt.plot(time_step, y) plt.ylabel("target signal") plt.xlabel("time") _ = plt.title("Superposition of sinusoidal signals") # %% # We split the data into train and test sets for simplicity. In practice one # should use a :class:`~sklearn.model_selection.TimeSeriesSplit` # cross-validation to estimate the variance of the test score. Here we set # `shuffle="False"` as we must not use training data that succeed the testing # data when dealing with data that have a temporal relationship. from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, shuffle=False) # %% # In the following, we compute the performance of three l1-based models in terms # of the goodness of fit :math:`R^2` score and the fitting time. Then we make a # plot to compare the sparsity of the estimated coefficients with respect to the # ground-truth coefficients and finally we analyze the previous results. # # Lasso # ----- # # In this example, we demo a :class:`~sklearn.linear_model.Lasso` with a fixed # value of the regularization parameter `alpha`. In practice, the optimal # parameter `alpha` should be selected by passing a # :class:`~sklearn.model_selection.TimeSeriesSplit` cross-validation strategy to a # :class:`~sklearn.linear_model.LassoCV`. To keep the example simple and fast to # execute, we directly set the optimal value for alpha here. from time import time from sklearn.linear_model import Lasso from sklearn.metrics import r2_score t0 = time() lasso = Lasso(alpha=0.14).fit(X_train, y_train) print(f"Lasso fit done in {(time() - t0):.3f}s") y_pred_lasso = lasso.predict(X_test) r2_score_lasso = r2_score(y_test, y_pred_lasso) print(f"Lasso r^2 on test data : {r2_score_lasso:.3f}") # %% # Automatic Relevance Determination (ARD) # --------------------------------------- # # An ARD regression is the Bayesian version of the Lasso. It can produce # interval estimates for all of the parameters, including the error variance, if # required. It is a suitable option when the signals have Gaussian noise. See # the example :ref:`sphx_glr_auto_examples_linear_model_plot_ard.py` for a # comparison of :class:`~sklearn.linear_model.ARDRegression` and # :class:`~sklearn.linear_model.BayesianRidge` regressors. from sklearn.linear_model import ARDRegression t0 = time() ard = ARDRegression().fit(X_train, y_train) print(f"ARD fit done in {(time() - t0):.3f}s") y_pred_ard = ard.predict(X_test) r2_score_ard = r2_score(y_test, y_pred_ard) print(f"ARD r^2 on test data : {r2_score_ard:.3f}") # %% # ElasticNet # ---------- # # :class:`~sklearn.linear_model.ElasticNet` is a middle ground between # :class:`~sklearn.linear_model.Lasso` and :class:`~sklearn.linear_model.Ridge`, # as it combines a L1 and a L2-penalty. The amount of regularization is # controlled by the two hyperparameters `l1_ratio` and `alpha`. For `l1_ratio = # 0` the penalty is pure L2 and the model is equivalent to a # :class:`~sklearn.linear_model.Ridge`. Similarly, `l1_ratio = 1` is a pure L1 # penalty and the model is equivalent to a :class:`~sklearn.linear_model.Lasso`. # For `0 < l1_ratio < 1`, the penalty is a combination of L1 and L2. # # As done before, we train the model with fix values for `alpha` and `l1_ratio`. # To select their optimal value we used an # :class:`~sklearn.linear_model.ElasticNetCV`, not shown here to keep the # example simple. from sklearn.linear_model import ElasticNet t0 = time() enet = ElasticNet(alpha=0.08, l1_ratio=0.5).fit(X_train, y_train) print(f"ElasticNet fit done in {(time() - t0):.3f}s") y_pred_enet = enet.predict(X_test) r2_score_enet = r2_score(y_test, y_pred_enet) print(f"ElasticNet r^2 on test data : {r2_score_enet:.3f}") # %% # Plot and analysis of the results # -------------------------------- # # In this section, we use a heatmap to visualize the sparsity of the true # and estimated coefficients of the respective linear models. import matplotlib.pyplot as plt import pandas as pd import seaborn as sns from matplotlib.colors import SymLogNorm df = pd.DataFrame( { "True coefficients": true_coef, "Lasso": lasso.coef_, "ARDRegression": ard.coef_, "ElasticNet": enet.coef_, } ) plt.figure(figsize=(10, 6)) ax = sns.heatmap( df.T, norm=SymLogNorm(linthresh=10e-4, vmin=-1, vmax=1), cbar_kws={"label": "coefficients' values"}, cmap="seismic_r", ) plt.ylabel("linear model") plt.xlabel("coefficients") plt.title( f"Models' coefficients\nLasso $R^2$: {r2_score_lasso:.3f}, " f"ARD $R^2$: {r2_score_ard:.3f}, " f"ElasticNet $R^2$: {r2_score_enet:.3f}" ) plt.tight_layout() # %% # In the present example :class:`~sklearn.linear_model.ElasticNet` yields the # best score and captures the most of the predictive features, yet still fails # at finding all the true components. Notice that both # :class:`~sklearn.linear_model.ElasticNet` and # :class:`~sklearn.linear_model.ARDRegression` result in a less sparse model # than a :class:`~sklearn.linear_model.Lasso`. # # Conclusions # ----------- # # :class:`~sklearn.linear_model.Lasso` is known to recover sparse data # effectively but does not perform well with highly correlated features. Indeed, # if several correlated features contribute to the target, # :class:`~sklearn.linear_model.Lasso` would end up selecting a single one of # them. In the case of sparse yet non-correlated features, a # :class:`~sklearn.linear_model.Lasso` model would be more suitable. # # :class:`~sklearn.linear_model.ElasticNet` introduces some sparsity on the # coefficients and shrinks their values to zero. Thus, in the presence of # correlated features that contribute to the target, the model is still able to # reduce their weights without setting them exactly to zero. This results in a # less sparse model than a pure :class:`~sklearn.linear_model.Lasso` and may # capture non-predictive features as well. # # :class:`~sklearn.linear_model.ARDRegression` is better when handling Gaussian # noise, but is still unable to handle correlated features and requires a larger # amount of time due to fitting a prior. # # References # ---------- # # .. [1] :doi:`"Lasso-type recovery of sparse representations for # high-dimensional data" N. Meinshausen, B. Yu - The Annals of Statistics # 2009, Vol. 37, No. 1, 246-270 <10.1214/07-AOS582>`
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_sgd_weighted_samples.py
examples/linear_model/plot_sgd_weighted_samples.py
""" ===================== SGD: Weighted samples ===================== Plot decision function of a weighted dataset, where the size of points is proportional to its weight. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn import linear_model # we create 20 points np.random.seed(0) X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)] y = [1] * 10 + [-1] * 10 sample_weight = 100 * np.abs(np.random.randn(20)) # and assign a bigger weight to the last 10 samples sample_weight[:10] *= 10 # plot the weighted data points xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500)) fig, ax = plt.subplots() ax.scatter( X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9, cmap=plt.cm.bone, edgecolor="black", ) # fit the unweighted model clf = linear_model.SGDClassifier(alpha=0.01, max_iter=100) clf.fit(X, y) Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) no_weights = ax.contour(xx, yy, Z, levels=[0], linestyles=["solid"]) # fit the weighted model clf = linear_model.SGDClassifier(alpha=0.01, max_iter=100) clf.fit(X, y, sample_weight=sample_weight) Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) samples_weights = ax.contour(xx, yy, Z, levels=[0], linestyles=["dashed"]) no_weights_handles, _ = no_weights.legend_elements() weights_handles, _ = samples_weights.legend_elements() ax.legend( [no_weights_handles[0], weights_handles[0]], ["no weights", "with weights"], loc="lower left", ) ax.set(xticks=(), yticks=()) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_ridge_coeffs.py
examples/linear_model/plot_ridge_coeffs.py
""" ========================================================= Ridge coefficients as a function of the L2 Regularization ========================================================= A model that overfits learns the training data too well, capturing both the underlying patterns and the noise in the data. However, when applied to unseen data, the learned associations may not hold. We normally detect this when we apply our trained predictions to the test data and see the statistical performance drop significantly compared to the training data. One way to overcome overfitting is through regularization, which can be done by penalizing large weights (coefficients) in linear models, forcing the model to shrink all coefficients. Regularization reduces a model's reliance on specific information obtained from the training samples. This example illustrates how L2 regularization in a :class:`~sklearn.linear_model.Ridge` regression affects a model's performance by adding a penalty term to the loss that increases with the coefficients :math:`\\beta`. The regularized loss function is given by: :math:`\\mathcal{L}(X, y, \\beta) = \\| y - X \\beta \\|^{2}_{2} + \\alpha \\| \\beta \\|^{2}_{2}` where :math:`X` is the input data, :math:`y` is the target variable, :math:`\\beta` is the vector of coefficients associated with the features, and :math:`\\alpha` is the regularization strength. The regularized loss function aims to balance the trade-off between accurately predicting the training set and to prevent overfitting. In this regularized loss, the left-hand side (e.g. :math:`\\|y - X\\beta\\|^{2}_{2}`) measures the squared difference between the actual target variable, :math:`y`, and the predicted values. Minimizing this term alone could lead to overfitting, as the model may become too complex and sensitive to noise in the training data. To address overfitting, Ridge regularization adds a constraint, called a penalty term, (:math:`\\alpha \\| \\beta\\|^{2}_{2}`) to the loss function. This penalty term is the sum of the squares of the model's coefficients, multiplied by the regularization strength :math:`\\alpha`. By introducing this constraint, Ridge regularization discourages any single coefficient :math:`\\beta_{i}` from taking an excessively large value and encourages smaller and more evenly distributed coefficients. Higher values of :math:`\\alpha` force the coefficients towards zero. However, an excessively high :math:`\\alpha` can result in an underfit model that fails to capture important patterns in the data. Therefore, the regularized loss function combines the prediction accuracy term and the penalty term. By adjusting the regularization strength, practitioners can fine-tune the degree of constraint imposed on the weights, training a model capable of generalizing well to unseen data while avoiding overfitting. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Purpose of this example # ----------------------- # For the purpose of showing how Ridge regularization works, we will create a # non-noisy data set. Then we will train a regularized model on a range of # regularization strengths (:math:`\alpha`) and plot how the trained # coefficients and the mean squared error between those and the original values # behave as functions of the regularization strength. # # Creating a non-noisy data set # ***************************** # We make a toy data set with 100 samples and 10 features, that's suitable to # detect regression. Out of the 10 features, 8 are informative and contribute to # the regression, while the remaining 2 features do not have any effect on the # target variable (their true coefficients are 0). Please note that in this # example the data is non-noisy, hence we can expect our regression model to # recover exactly the true coefficients w. from sklearn.datasets import make_regression X, y, w = make_regression( n_samples=100, n_features=10, n_informative=8, coef=True, random_state=1 ) # Obtain the true coefficients print(f"The true coefficient of this regression problem are:\n{w}") # %% # Training the Ridge Regressor # **************************** # We use :class:`~sklearn.linear_model.Ridge`, a linear model with L2 # regularization. We train several models, each with a different value for the # model parameter `alpha`, which is a positive constant that multiplies the # penalty term, controlling the regularization strength. For each trained model # we then compute the error between the true coefficients `w` and the # coefficients found by the model `clf`. We store the identified coefficients # and the calculated errors for the corresponding coefficients in lists, which # makes it convenient for us to plot them. import numpy as np from sklearn.linear_model import Ridge from sklearn.metrics import mean_squared_error clf = Ridge() # Generate values for `alpha` that are evenly distributed on a logarithmic scale alphas = np.logspace(-3, 4, 200) coefs = [] errors_coefs = [] # Train the model with different regularisation strengths for a in alphas: clf.set_params(alpha=a).fit(X, y) coefs.append(clf.coef_) errors_coefs.append(mean_squared_error(clf.coef_, w)) # %% # Plotting trained Coefficients and Mean Squared Errors # ***************************************************** # We now plot the 10 different regularized coefficients as a function of the # regularization parameter `alpha` where each color represents a different # coefficient. # # On the right-hand-side, we plot how the errors of the coefficients from the # estimator change as a function of regularization. import matplotlib.pyplot as plt import pandas as pd alphas = pd.Index(alphas, name="alpha") coefs = pd.DataFrame(coefs, index=alphas, columns=[f"Feature {i}" for i in range(10)]) errors = pd.Series(errors_coefs, index=alphas, name="Mean squared error") fig, axs = plt.subplots(1, 2, figsize=(20, 6)) coefs.plot( ax=axs[0], logx=True, title="Ridge coefficients as a function of the regularization strength", ) axs[0].set_ylabel("Ridge coefficient values") errors.plot( ax=axs[1], logx=True, title="Coefficient error as a function of the regularization strength", ) _ = axs[1].set_ylabel("Mean squared error") # %% # Interpreting the plots # ********************** # The plot on the left-hand side shows how the regularization strength (`alpha`) # affects the Ridge regression coefficients. Smaller values of `alpha` (weak # regularization), allow the coefficients to closely resemble the true # coefficients (`w`) used to generate the data set. This is because no # additional noise was added to our artificial data set. As `alpha` increases, # the coefficients shrink towards zero, gradually reducing the impact of the # features that were formerly more significant. # # The right-hand side plot shows the mean squared error (MSE) between the # coefficients found by the model and the true coefficients (`w`). It provides a # measure that relates to how exact our ridge model is in comparison to the true # generative model. A low error means that it found coefficients closer to the # ones of the true generative model. In this case, since our toy data set was # non-noisy, we can see that the least regularized model retrieves coefficients # closest to the true coefficients (`w`) (error is close to 0). # # When `alpha` is small, the model captures the intricate details of the # training data, whether those were caused by noise or by actual information. As # `alpha` increases, the highest coefficients shrink more rapidly, rendering # their corresponding features less influential in the training process. This # can enhance a model's ability to generalize to unseen data (if there was a lot # of noise to capture), but it also poses the risk of losing performance if the # regularization becomes too strong compared to the amount of noise the data # contained (as in this example). # # In real-world scenarios where data typically includes noise, selecting an # appropriate `alpha` value becomes crucial in striking a balance between an # overfitting and an underfitting model. # # Here, we saw that :class:`~sklearn.linear_model.Ridge` adds a penalty to the # coefficients to fight overfitting. Another problem that occurs is linked to # the presence of outliers in the training dataset. An outlier is a data point # that differs significantly from other observations. Concretely, these outliers # impact the left-hand side term of the loss function that we showed earlier. # Some other linear models are formulated to be robust to outliers such as the # :class:`~sklearn.linear_model.HuberRegressor`. You can learn more about it in # the :ref:`sphx_glr_auto_examples_linear_model_plot_huber_vs_ridge.py` example.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_ransac.py
examples/linear_model/plot_ransac.py
""" =========================================== Robust linear model estimation using RANSAC =========================================== In this example, we see how to robustly fit a linear model to faulty data using the :ref:`RANSAC <ransac_regression>` algorithm. The ordinary linear regressor is sensitive to outliers, and the fitted line can easily be skewed away from the true underlying relationship of data. The RANSAC regressor automatically splits the data into inliers and outliers, and the fitted line is determined only by the identified inliers. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np from matplotlib import pyplot as plt from sklearn import datasets, linear_model n_samples = 1000 n_outliers = 50 X, y, coef = datasets.make_regression( n_samples=n_samples, n_features=1, n_informative=1, noise=10, coef=True, random_state=0, ) # Add outlier data np.random.seed(0) X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1)) y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers) # Fit line using all data lr = linear_model.LinearRegression() lr.fit(X, y) # Robustly fit linear model with RANSAC algorithm ransac = linear_model.RANSACRegressor() ransac.fit(X, y) inlier_mask = ransac.inlier_mask_ outlier_mask = np.logical_not(inlier_mask) # Predict data of estimated models line_X = np.arange(X.min(), X.max())[:, np.newaxis] line_y = lr.predict(line_X) line_y_ransac = ransac.predict(line_X) # Compare estimated coefficients print("Estimated coefficients (true, linear regression, RANSAC):") print(coef, lr.coef_, ransac.estimator_.coef_) lw = 2 plt.scatter( X[inlier_mask], y[inlier_mask], color="yellowgreen", marker=".", label="Inliers" ) plt.scatter( X[outlier_mask], y[outlier_mask], color="gold", marker=".", label="Outliers" ) plt.plot(line_X, line_y, color="navy", linewidth=lw, label="Linear regressor") plt.plot( line_X, line_y_ransac, color="cornflowerblue", linewidth=lw, label="RANSAC regressor", ) plt.legend(loc="lower right") plt.xlabel("Input") plt.ylabel("Response") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_sgdocsvm_vs_ocsvm.py
examples/linear_model/plot_sgdocsvm_vs_ocsvm.py
""" ==================================================================== One-Class SVM versus One-Class SVM using Stochastic Gradient Descent ==================================================================== This example shows how to approximate the solution of :class:`sklearn.svm.OneClassSVM` in the case of an RBF kernel with :class:`sklearn.linear_model.SGDOneClassSVM`, a Stochastic Gradient Descent (SGD) version of the One-Class SVM. A kernel approximation is first used in order to apply :class:`sklearn.linear_model.SGDOneClassSVM` which implements a linear One-Class SVM using SGD. Note that :class:`sklearn.linear_model.SGDOneClassSVM` scales linearly with the number of samples whereas the complexity of a kernelized :class:`sklearn.svm.OneClassSVM` is at best quadratic with respect to the number of samples. It is not the purpose of this example to illustrate the benefits of such an approximation in terms of computation time but rather to show that we obtain similar results on a toy dataset. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% import matplotlib import matplotlib.lines as mlines import matplotlib.pyplot as plt import numpy as np from sklearn.kernel_approximation import Nystroem from sklearn.linear_model import SGDOneClassSVM from sklearn.pipeline import make_pipeline from sklearn.svm import OneClassSVM font = {"weight": "normal", "size": 15} matplotlib.rc("font", **font) random_state = 42 rng = np.random.RandomState(random_state) # Generate train data X = 0.3 * rng.randn(500, 2) X_train = np.r_[X + 2, X - 2] # Generate some regular novel observations X = 0.3 * rng.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = rng.uniform(low=-4, high=4, size=(20, 2)) # OCSVM hyperparameters nu = 0.05 gamma = 2.0 # Fit the One-Class SVM clf = OneClassSVM(gamma=gamma, kernel="rbf", nu=nu) clf.fit(X_train) y_pred_train = clf.predict(X_train) y_pred_test = clf.predict(X_test) y_pred_outliers = clf.predict(X_outliers) n_error_train = y_pred_train[y_pred_train == -1].size n_error_test = y_pred_test[y_pred_test == -1].size n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size # Fit the One-Class SVM using a kernel approximation and SGD transform = Nystroem(gamma=gamma, random_state=random_state) clf_sgd = SGDOneClassSVM( nu=nu, shuffle=True, fit_intercept=True, random_state=random_state, tol=1e-4 ) pipe_sgd = make_pipeline(transform, clf_sgd) pipe_sgd.fit(X_train) y_pred_train_sgd = pipe_sgd.predict(X_train) y_pred_test_sgd = pipe_sgd.predict(X_test) y_pred_outliers_sgd = pipe_sgd.predict(X_outliers) n_error_train_sgd = y_pred_train_sgd[y_pred_train_sgd == -1].size n_error_test_sgd = y_pred_test_sgd[y_pred_test_sgd == -1].size n_error_outliers_sgd = y_pred_outliers_sgd[y_pred_outliers_sgd == 1].size # %% from sklearn.inspection import DecisionBoundaryDisplay _, ax = plt.subplots(figsize=(9, 6)) xx, yy = np.meshgrid(np.linspace(-4.5, 4.5, 50), np.linspace(-4.5, 4.5, 50)) X = np.concatenate([xx.ravel().reshape(-1, 1), yy.ravel().reshape(-1, 1)], axis=1) DecisionBoundaryDisplay.from_estimator( clf, X, response_method="decision_function", plot_method="contourf", ax=ax, cmap="PuBu", ) DecisionBoundaryDisplay.from_estimator( clf, X, response_method="decision_function", plot_method="contour", ax=ax, linewidths=2, colors="darkred", levels=[0], ) DecisionBoundaryDisplay.from_estimator( clf, X, response_method="decision_function", plot_method="contourf", ax=ax, colors="palevioletred", levels=[0, clf.decision_function(X).max()], ) s = 20 b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c="white", s=s, edgecolors="k") b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c="blueviolet", s=s, edgecolors="k") c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c="gold", s=s, edgecolors="k") ax.set( title="One-Class SVM", xlim=(-4.5, 4.5), ylim=(-4.5, 4.5), xlabel=( f"error train: {n_error_train}/{X_train.shape[0]}; " f"errors novel regular: {n_error_test}/{X_test.shape[0]}; " f"errors novel abnormal: {n_error_outliers}/{X_outliers.shape[0]}" ), ) _ = ax.legend( [mlines.Line2D([], [], color="darkred", label="learned frontier"), b1, b2, c], [ "learned frontier", "training observations", "new regular observations", "new abnormal observations", ], loc="upper left", ) # %% _, ax = plt.subplots(figsize=(9, 6)) xx, yy = np.meshgrid(np.linspace(-4.5, 4.5, 50), np.linspace(-4.5, 4.5, 50)) X = np.concatenate([xx.ravel().reshape(-1, 1), yy.ravel().reshape(-1, 1)], axis=1) DecisionBoundaryDisplay.from_estimator( pipe_sgd, X, response_method="decision_function", plot_method="contourf", ax=ax, cmap="PuBu", ) DecisionBoundaryDisplay.from_estimator( pipe_sgd, X, response_method="decision_function", plot_method="contour", ax=ax, linewidths=2, colors="darkred", levels=[0], ) DecisionBoundaryDisplay.from_estimator( pipe_sgd, X, response_method="decision_function", plot_method="contourf", ax=ax, colors="palevioletred", levels=[0, pipe_sgd.decision_function(X).max()], ) s = 20 b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c="white", s=s, edgecolors="k") b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c="blueviolet", s=s, edgecolors="k") c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c="gold", s=s, edgecolors="k") ax.set( title="Online One-Class SVM", xlim=(-4.5, 4.5), ylim=(-4.5, 4.5), xlabel=( f"error train: {n_error_train_sgd}/{X_train.shape[0]}; " f"errors novel regular: {n_error_test_sgd}/{X_test.shape[0]}; " f"errors novel abnormal: {n_error_outliers_sgd}/{X_outliers.shape[0]}" ), ) ax.legend( [mlines.Line2D([], [], color="darkred", label="learned frontier"), b1, b2, c], [ "learned frontier", "training observations", "new regular observations", "new abnormal observations", ], loc="upper left", ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_nnls.py
examples/linear_model/plot_nnls.py
""" ========================== Non-negative least squares ========================== In this example, we fit a linear model with positive constraints on the regression coefficients and compare the estimated coefficients to a classic linear regression. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.metrics import r2_score # %% # Generate some random data np.random.seed(42) n_samples, n_features = 200, 50 X = np.random.randn(n_samples, n_features) true_coef = 3 * np.random.randn(n_features) # Threshold coefficients to render them non-negative true_coef[true_coef < 0] = 0 y = np.dot(X, true_coef) # Add some noise y += 5 * np.random.normal(size=(n_samples,)) # %% # Split the data in train set and test set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5) # %% # Fit the Non-Negative least squares. from sklearn.linear_model import LinearRegression reg_nnls = LinearRegression(positive=True) y_pred_nnls = reg_nnls.fit(X_train, y_train).predict(X_test) r2_score_nnls = r2_score(y_test, y_pred_nnls) print("NNLS R2 score", r2_score_nnls) # %% # Fit an OLS. reg_ols = LinearRegression() y_pred_ols = reg_ols.fit(X_train, y_train).predict(X_test) r2_score_ols = r2_score(y_test, y_pred_ols) print("OLS R2 score", r2_score_ols) # %% # Comparing the regression coefficients between OLS and NNLS, we can observe # they are highly correlated (the dashed line is the identity relation), # but the non-negative constraint shrinks some to 0. # The Non-Negative Least squares inherently yield sparse results. fig, ax = plt.subplots() ax.plot(reg_ols.coef_, reg_nnls.coef_, linewidth=0, marker=".") low_x, high_x = ax.get_xlim() low_y, high_y = ax.get_ylim() low = max(low_x, low_y) high = min(high_x, high_y) ax.plot([low, high], [low, high], ls="--", c=".3", alpha=0.5) ax.set_xlabel("OLS regression coefficients", fontweight="bold") ax.set_ylabel("NNLS regression coefficients", fontweight="bold")
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_theilsen.py
examples/linear_model/plot_theilsen.py
""" ==================== Theil-Sen Regression ==================== Computes a Theil-Sen Regression on a synthetic dataset. See :ref:`theil_sen_regression` for more information on the regressor. Compared to the OLS (ordinary least squares) estimator, the Theil-Sen estimator is robust against outliers. It has a breakdown point of about 29.3% in case of a simple linear regression which means that it can tolerate arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional case. The estimation of the model is done by calculating the slopes and intercepts of a subpopulation of all possible combinations of p subsample points. If an intercept is fitted, p must be greater than or equal to n_features + 1. The final slope and intercept is then defined as the spatial median of these slopes and intercepts. In certain cases Theil-Sen performs better than :ref:`RANSAC <ransac_regression>` which is also a robust method. This is illustrated in the second example below where outliers with respect to the x-axis perturb RANSAC. Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in general a priori knowledge about the data and the nature of the outliers is needed. Due to the computational complexity of Theil-Sen it is recommended to use it only for small problems in terms of number of samples and features. For larger problems the ``max_subpopulation`` parameter restricts the magnitude of all possible combinations of p subsample points to a randomly chosen subset and therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger problems with the drawback of losing some of its mathematical properties since it then works on a random subset. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import time import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import LinearRegression, RANSACRegressor, TheilSenRegressor estimators = [ ("OLS", LinearRegression()), ("Theil-Sen", TheilSenRegressor(random_state=42)), ("RANSAC", RANSACRegressor(random_state=42)), ] colors = {"OLS": "turquoise", "Theil-Sen": "gold", "RANSAC": "lightgreen"} lw = 2 # %% # Outliers only in the y direction # -------------------------------- np.random.seed(0) n_samples = 200 # Linear model y = 3*x + N(2, 0.1**2) x = np.random.randn(n_samples) w = 3.0 c = 2.0 noise = 0.1 * np.random.randn(n_samples) y = w * x + c + noise # 10% outliers y[-20:] += -20 * x[-20:] X = x[:, np.newaxis] plt.scatter(x, y, color="indigo", marker="x", s=40) line_x = np.array([-3, 3]) for name, estimator in estimators: t0 = time.time() estimator.fit(X, y) elapsed_time = time.time() - t0 y_pred = estimator.predict(line_x.reshape(2, 1)) plt.plot( line_x, y_pred, color=colors[name], linewidth=lw, label="%s (fit time: %.2fs)" % (name, elapsed_time), ) plt.axis("tight") plt.legend(loc="upper right") _ = plt.title("Corrupt y") # %% # Outliers in the X direction # --------------------------- np.random.seed(0) # Linear model y = 3*x + N(2, 0.1**2) x = np.random.randn(n_samples) noise = 0.1 * np.random.randn(n_samples) y = 3 * x + 2 + noise # 10% outliers x[-20:] = 9.9 y[-20:] += 22 X = x[:, np.newaxis] plt.figure() plt.scatter(x, y, color="indigo", marker="x", s=40) line_x = np.array([-3, 10]) for name, estimator in estimators: t0 = time.time() estimator.fit(X, y) elapsed_time = time.time() - t0 y_pred = estimator.predict(line_x.reshape(2, 1)) plt.plot( line_x, y_pred, color=colors[name], linewidth=lw, label="%s (fit time: %.2fs)" % (name, elapsed_time), ) plt.axis("tight") plt.legend(loc="upper left") plt.title("Corrupt x") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_lasso_lars_ic.py
examples/linear_model/plot_lasso_lars_ic.py
""" ============================================== Lasso model selection via information criteria ============================================== This example reproduces the example of Fig. 2 of [ZHT2007]_. A :class:`~sklearn.linear_model.LassoLarsIC` estimator is fit on a diabetes dataset and the AIC and the BIC criteria are used to select the best model. .. note:: It is important to note that the optimization to find `alpha` with :class:`~sklearn.linear_model.LassoLarsIC` relies on the AIC or BIC criteria that are computed in-sample, thus on the training set directly. This approach differs from the cross-validation procedure. For a comparison of the two approaches, you can refer to the following example: :ref:`sphx_glr_auto_examples_linear_model_plot_lasso_model_selection.py`. .. rubric:: References .. [ZHT2007] :arxiv:`Zou, Hui, Trevor Hastie, and Robert Tibshirani. "On the degrees of freedom of the lasso." The Annals of Statistics 35.5 (2007): 2173-2192. <0712.0881>` """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # We will use the diabetes dataset. from sklearn.datasets import load_diabetes X, y = load_diabetes(return_X_y=True, as_frame=True) n_samples = X.shape[0] X.head() # %% # Scikit-learn provides an estimator called # :class:`~sklearn.linear_model.LassoLarsIC` that uses either Akaike's # information criterion (AIC) or the Bayesian information criterion (BIC) to # select the best model. Before fitting # this model, we will scale the dataset. # # In the following, we are going to fit two models to compare the values # reported by AIC and BIC. from sklearn.linear_model import LassoLarsIC from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler lasso_lars_ic = make_pipeline(StandardScaler(), LassoLarsIC(criterion="aic")).fit(X, y) # %% # To be in line with the definition in [ZHT2007]_, we need to rescale the # AIC and the BIC. Indeed, Zou et al. are ignoring some constant terms # compared to the original definition of AIC derived from the maximum # log-likelihood of a linear model. You can refer to # :ref:`mathematical detail section for the User Guide <lasso_lars_ic>`. def zou_et_al_criterion_rescaling(criterion, n_samples, noise_variance): """Rescale the information criterion to follow the definition of Zou et al.""" return criterion - n_samples * np.log(2 * np.pi * noise_variance) - n_samples # %% import numpy as np aic_criterion = zou_et_al_criterion_rescaling( lasso_lars_ic[-1].criterion_, n_samples, lasso_lars_ic[-1].noise_variance_, ) index_alpha_path_aic = np.flatnonzero( lasso_lars_ic[-1].alphas_ == lasso_lars_ic[-1].alpha_ )[0] # %% lasso_lars_ic.set_params(lassolarsic__criterion="bic").fit(X, y) bic_criterion = zou_et_al_criterion_rescaling( lasso_lars_ic[-1].criterion_, n_samples, lasso_lars_ic[-1].noise_variance_, ) index_alpha_path_bic = np.flatnonzero( lasso_lars_ic[-1].alphas_ == lasso_lars_ic[-1].alpha_ )[0] # %% # Now that we collected the AIC and BIC, we can as well check that the minima # of both criteria happen at the same alpha. Then, we can simplify the # following plot. index_alpha_path_aic == index_alpha_path_bic # %% # Finally, we can plot the AIC and BIC criterion and the subsequent selected # regularization parameter. import matplotlib.pyplot as plt plt.plot(aic_criterion, color="tab:blue", marker="o", label="AIC criterion") plt.plot(bic_criterion, color="tab:orange", marker="o", label="BIC criterion") plt.vlines( index_alpha_path_bic, aic_criterion.min(), aic_criterion.max(), color="black", linestyle="--", label="Selected alpha", ) plt.legend() plt.ylabel("Information criterion") plt.xlabel("Lasso model sequence") _ = plt.title("Lasso model selection via AIC and BIC")
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_tweedie_regression_insurance_claims.py
examples/linear_model/plot_tweedie_regression_insurance_claims.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause """ ====================================== Tweedie regression on insurance claims ====================================== This example illustrates the use of Poisson, Gamma and Tweedie regression on the `French Motor Third-Party Liability Claims dataset <https://www.openml.org/d/41214>`_, and is inspired by an R tutorial [1]_. In this dataset, each sample corresponds to an insurance policy, i.e. a contract within an insurance company and an individual (policyholder). Available features include driver age, vehicle age, vehicle power, etc. A few definitions: a *claim* is the request made by a policyholder to the insurer to compensate for a loss covered by the insurance. The *claim amount* is the amount of money that the insurer must pay. The *exposure* is the duration of the insurance coverage of a given policy, in years. Here our goal is to predict the expected value, i.e. the mean, of the total claim amount per exposure unit also referred to as the pure premium. There are several possibilities to do that, two of which are: 1. Model the number of claims with a Poisson distribution, and the average claim amount per claim, also known as severity, as a Gamma distribution and multiply the predictions of both in order to get the total claim amount. 2. Model the total claim amount per exposure directly, typically with a Tweedie distribution of Tweedie power :math:`p \\in (1, 2)`. In this example we will illustrate both approaches. We start by defining a few helper functions for loading the data and visualizing results. .. [1] A. Noll, R. Salzmann and M.V. Wuthrich, Case Study: French Motor Third-Party Liability Claims (November 8, 2018). `doi:10.2139/ssrn.3164764 <https://doi.org/10.2139/ssrn.3164764>`_ """ # %% from functools import partial import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn.datasets import fetch_openml from sklearn.metrics import ( mean_absolute_error, mean_squared_error, mean_tweedie_deviance, ) def load_mtpl2(n_samples=None): """Fetch the French Motor Third-Party Liability Claims dataset. Parameters ---------- n_samples: int, default=None number of samples to select (for faster run time). Full dataset has 678013 samples. """ # freMTPL2freq dataset from https://www.openml.org/d/41214 df_freq = fetch_openml(data_id=41214, as_frame=True).data df_freq["IDpol"] = df_freq["IDpol"].astype(int) df_freq.set_index("IDpol", inplace=True) # freMTPL2sev dataset from https://www.openml.org/d/41215 df_sev = fetch_openml(data_id=41215, as_frame=True).data # sum ClaimAmount over identical IDs df_sev = df_sev.groupby("IDpol").sum() df = df_freq.join(df_sev, how="left") df["ClaimAmount"] = df["ClaimAmount"].fillna(0) # unquote string fields for column_name in df.columns[[t is object for t in df.dtypes.values]]: df[column_name] = df[column_name].str.strip("'") return df.iloc[:n_samples] def plot_obs_pred( df, feature, weight, observed, predicted, y_label=None, title=None, ax=None, fill_legend=False, ): """Plot observed and predicted - aggregated per feature level. Parameters ---------- df : DataFrame input data feature: str a column name of df for the feature to be plotted weight : str column name of df with the values of weights or exposure observed : str a column name of df with the observed target predicted : DataFrame a dataframe, with the same index as df, with the predicted target fill_legend : bool, default=False whether to show fill_between legend """ # aggregate observed and predicted variables by feature level df_ = df.loc[:, [feature, weight]].copy() df_["observed"] = df[observed] * df[weight] df_["predicted"] = predicted * df[weight] df_ = ( df_.groupby([feature])[[weight, "observed", "predicted"]] .sum() .assign(observed=lambda x: x["observed"] / x[weight]) .assign(predicted=lambda x: x["predicted"] / x[weight]) ) ax = df_.loc[:, ["observed", "predicted"]].plot(style=".", ax=ax) y_max = df_.loc[:, ["observed", "predicted"]].values.max() * 0.8 p2 = ax.fill_between( df_.index, 0, y_max * df_[weight] / df_[weight].values.max(), color="g", alpha=0.1, ) if fill_legend: ax.legend([p2], ["{} distribution".format(feature)]) ax.set( ylabel=y_label if y_label is not None else None, title=title if title is not None else "Train: Observed vs Predicted", ) def score_estimator( estimator, X_train, X_test, df_train, df_test, target, weights, tweedie_powers=None, ): """Evaluate an estimator on train and test sets with different metrics""" metrics = [ ("D² explained", None), # Use default scorer if it exists ("mean abs. error", mean_absolute_error), ("mean squared error", mean_squared_error), ] if tweedie_powers: metrics += [ ( "mean Tweedie dev p={:.4f}".format(power), partial(mean_tweedie_deviance, power=power), ) for power in tweedie_powers ] res = [] for subset_label, X, df in [ ("train", X_train, df_train), ("test", X_test, df_test), ]: y, _weights = df[target], df[weights] for score_label, metric in metrics: if isinstance(estimator, tuple) and len(estimator) == 2: # Score the model consisting of the product of frequency and # severity models. est_freq, est_sev = estimator y_pred = est_freq.predict(X) * est_sev.predict(X) else: y_pred = estimator.predict(X) if metric is None: if not hasattr(estimator, "score"): continue score = estimator.score(X, y, sample_weight=_weights) else: score = metric(y, y_pred, sample_weight=_weights) res.append({"subset": subset_label, "metric": score_label, "score": score}) res = ( pd.DataFrame(res) .set_index(["metric", "subset"]) .score.unstack(-1) .round(4) .loc[:, ["train", "test"]] ) return res # %% # Loading datasets, basic feature extraction and target definitions # ----------------------------------------------------------------- # # We construct the freMTPL2 dataset by joining the freMTPL2freq table, # containing the number of claims (``ClaimNb``), with the freMTPL2sev table, # containing the claim amount (``ClaimAmount``) for the same policy ids # (``IDpol``). from sklearn.compose import ColumnTransformer from sklearn.pipeline import make_pipeline from sklearn.preprocessing import ( FunctionTransformer, KBinsDiscretizer, OneHotEncoder, StandardScaler, ) df = load_mtpl2() # Correct for unreasonable observations (that might be data error) # and a few exceptionally large claim amounts df["ClaimNb"] = df["ClaimNb"].clip(upper=4) df["Exposure"] = df["Exposure"].clip(upper=1) df["ClaimAmount"] = df["ClaimAmount"].clip(upper=200000) # If the claim amount is 0, then we do not count it as a claim. The loss function # used by the severity model needs strictly positive claim amounts. This way # frequency and severity are more consistent with each other. df.loc[(df["ClaimAmount"] == 0) & (df["ClaimNb"] >= 1), "ClaimNb"] = 0 log_scale_transformer = make_pipeline( FunctionTransformer(func=np.log), StandardScaler() ) column_trans = ColumnTransformer( [ ( "binned_numeric", KBinsDiscretizer( n_bins=10, quantile_method="averaged_inverted_cdf", random_state=0 ), ["VehAge", "DrivAge"], ), ( "onehot_categorical", OneHotEncoder(), ["VehBrand", "VehPower", "VehGas", "Region", "Area"], ), ("passthrough_numeric", "passthrough", ["BonusMalus"]), ("log_scaled_numeric", log_scale_transformer, ["Density"]), ], remainder="drop", ) X = column_trans.fit_transform(df) # Insurances companies are interested in modeling the Pure Premium, that is # the expected total claim amount per unit of exposure for each policyholder # in their portfolio: df["PurePremium"] = df["ClaimAmount"] / df["Exposure"] # This can be indirectly approximated by a 2-step modeling: the product of the # Frequency times the average claim amount per claim: df["Frequency"] = df["ClaimNb"] / df["Exposure"] df["AvgClaimAmount"] = df["ClaimAmount"] / np.fmax(df["ClaimNb"], 1) with pd.option_context("display.max_columns", 15): print(df[df.ClaimAmount > 0].head()) # %% # # Frequency model -- Poisson distribution # --------------------------------------- # # The number of claims (``ClaimNb``) is a positive integer (0 included). # Thus, this target can be modelled by a Poisson distribution. # It is then assumed to be the number of discrete events occurring with a # constant rate in a given time interval (``Exposure``, in units of years). # Here we model the frequency ``y = ClaimNb / Exposure``, which is still a # (scaled) Poisson distribution, and use ``Exposure`` as `sample_weight`. from sklearn.linear_model import PoissonRegressor from sklearn.model_selection import train_test_split df_train, df_test, X_train, X_test = train_test_split(df, X, random_state=0) # %% # # Let us keep in mind that despite the seemingly large number of data points in # this dataset, the number of evaluation points where the claim amount is # non-zero is quite small: len(df_test) # %% len(df_test[df_test["ClaimAmount"] > 0]) # %% # # As a consequence, we expect a significant variability in our # evaluation upon random resampling of the train test split. # # The parameters of the model are estimated by minimizing the Poisson deviance # on the training set via a Newton solver. Some of the features are collinear # (e.g. because we did not drop any categorical level in the `OneHotEncoder`), # we use a weak L2 penalization to avoid numerical issues. glm_freq = PoissonRegressor(alpha=1e-4, solver="newton-cholesky") glm_freq.fit(X_train, df_train["Frequency"], sample_weight=df_train["Exposure"]) scores = score_estimator( glm_freq, X_train, X_test, df_train, df_test, target="Frequency", weights="Exposure", ) print("Evaluation of PoissonRegressor on target Frequency") print(scores) # %% # # Note that the score measured on the test set is surprisingly better than on # the training set. This might be specific to this random train-test split. # Proper cross-validation could help us to assess the sampling variability of # these results. # # We can visually compare observed and predicted values, aggregated by the # drivers age (``DrivAge``), vehicle age (``VehAge``) and the insurance # bonus/malus (``BonusMalus``). fig, ax = plt.subplots(ncols=2, nrows=2, figsize=(16, 8)) fig.subplots_adjust(hspace=0.3, wspace=0.2) plot_obs_pred( df=df_train, feature="DrivAge", weight="Exposure", observed="Frequency", predicted=glm_freq.predict(X_train), y_label="Claim Frequency", title="train data", ax=ax[0, 0], ) plot_obs_pred( df=df_test, feature="DrivAge", weight="Exposure", observed="Frequency", predicted=glm_freq.predict(X_test), y_label="Claim Frequency", title="test data", ax=ax[0, 1], fill_legend=True, ) plot_obs_pred( df=df_test, feature="VehAge", weight="Exposure", observed="Frequency", predicted=glm_freq.predict(X_test), y_label="Claim Frequency", title="test data", ax=ax[1, 0], fill_legend=True, ) plot_obs_pred( df=df_test, feature="BonusMalus", weight="Exposure", observed="Frequency", predicted=glm_freq.predict(X_test), y_label="Claim Frequency", title="test data", ax=ax[1, 1], fill_legend=True, ) # %% # According to the observed data, the frequency of accidents is higher for # drivers younger than 30 years old, and is positively correlated with the # `BonusMalus` variable. Our model is able to mostly correctly model this # behaviour. # # Severity Model - Gamma distribution # ------------------------------------ # The mean claim amount or severity (`AvgClaimAmount`) can be empirically # shown to follow approximately a Gamma distribution. We fit a GLM model for # the severity with the same features as the frequency model. # # Note: # # - We filter out ``ClaimAmount == 0`` as the Gamma distribution has support # on :math:`(0, \infty)`, not :math:`[0, \infty)`. # - We use ``ClaimNb`` as `sample_weight` to account for policies that contain # more than one claim. from sklearn.linear_model import GammaRegressor mask_train = df_train["ClaimAmount"] > 0 mask_test = df_test["ClaimAmount"] > 0 glm_sev = GammaRegressor(alpha=10.0, solver="newton-cholesky") glm_sev.fit( X_train[mask_train.values], df_train.loc[mask_train, "AvgClaimAmount"], sample_weight=df_train.loc[mask_train, "ClaimNb"], ) scores = score_estimator( glm_sev, X_train[mask_train.values], X_test[mask_test.values], df_train[mask_train], df_test[mask_test], target="AvgClaimAmount", weights="ClaimNb", ) print("Evaluation of GammaRegressor on target AvgClaimAmount") print(scores) # %% # # Those values of the metrics are not necessarily easy to interpret. It can be # insightful to compare them with a model that does not use any input # features and always predicts a constant value, i.e. the average claim # amount, in the same setting: from sklearn.dummy import DummyRegressor dummy_sev = DummyRegressor(strategy="mean") dummy_sev.fit( X_train[mask_train.values], df_train.loc[mask_train, "AvgClaimAmount"], sample_weight=df_train.loc[mask_train, "ClaimNb"], ) scores = score_estimator( dummy_sev, X_train[mask_train.values], X_test[mask_test.values], df_train[mask_train], df_test[mask_test], target="AvgClaimAmount", weights="ClaimNb", ) print("Evaluation of a mean predictor on target AvgClaimAmount") print(scores) # %% # # We conclude that the claim amount is very challenging to predict. Still, the # :class:`~sklearn.linear_model.GammaRegressor` is able to leverage some # information from the input features to slightly improve upon the mean # baseline in terms of D². # # Note that the resulting model is the average claim amount per claim. As such, # it is conditional on having at least one claim, and cannot be used to predict # the average claim amount per policy. For this, it needs to be combined with # a claims frequency model. print( "Mean AvgClaim Amount per policy: %.2f " % df_train["AvgClaimAmount"].mean() ) print( "Mean AvgClaim Amount | NbClaim > 0: %.2f" % df_train["AvgClaimAmount"][df_train["AvgClaimAmount"] > 0].mean() ) print( "Predicted Mean AvgClaim Amount | NbClaim > 0: %.2f" % glm_sev.predict(X_train).mean() ) print( "Predicted Mean AvgClaim Amount (dummy) | NbClaim > 0: %.2f" % dummy_sev.predict(X_train).mean() ) # %% # We can visually compare observed and predicted values, aggregated for # the drivers age (``DrivAge``). fig, ax = plt.subplots(ncols=1, nrows=2, figsize=(16, 6)) plot_obs_pred( df=df_train.loc[mask_train], feature="DrivAge", weight="Exposure", observed="AvgClaimAmount", predicted=glm_sev.predict(X_train[mask_train.values]), y_label="Average Claim Severity", title="train data", ax=ax[0], ) plot_obs_pred( df=df_test.loc[mask_test], feature="DrivAge", weight="Exposure", observed="AvgClaimAmount", predicted=glm_sev.predict(X_test[mask_test.values]), y_label="Average Claim Severity", title="test data", ax=ax[1], fill_legend=True, ) plt.tight_layout() # %% # Overall, the drivers age (``DrivAge``) has a weak impact on the claim # severity, both in observed and predicted data. # # Pure Premium Modeling via a Product Model vs single TweedieRegressor # -------------------------------------------------------------------- # As mentioned in the introduction, the total claim amount per unit of # exposure can be modeled as the product of the prediction of the # frequency model by the prediction of the severity model. # # Alternatively, one can directly model the total loss with a unique # Compound Poisson Gamma generalized linear model (with a log link function). # This model is a special case of the Tweedie GLM with a "power" parameter # :math:`p \in (1, 2)`. Here, we fix apriori the `power` parameter of the # Tweedie model to some arbitrary value (1.9) in the valid range. Ideally one # would select this value via grid-search by minimizing the negative # log-likelihood of the Tweedie model, but unfortunately the current # implementation does not allow for this (yet). # # We will compare the performance of both approaches. # To quantify the performance of both models, one can compute # the mean deviance of the train and test data assuming a Compound # Poisson-Gamma distribution of the total claim amount. This is equivalent to # a Tweedie distribution with a `power` parameter between 1 and 2. # # The :func:`sklearn.metrics.mean_tweedie_deviance` depends on a `power` # parameter. As we do not know the true value of the `power` parameter, we here # compute the mean deviances for a grid of possible values, and compare the # models side by side, i.e. we compare them at identical values of `power`. # Ideally, we hope that one model will be consistently better than the other, # regardless of `power`. from sklearn.linear_model import TweedieRegressor glm_pure_premium = TweedieRegressor(power=1.9, alpha=0.1, solver="newton-cholesky") glm_pure_premium.fit( X_train, df_train["PurePremium"], sample_weight=df_train["Exposure"] ) tweedie_powers = [1.5, 1.7, 1.8, 1.9, 1.99, 1.999, 1.9999] scores_product_model = score_estimator( (glm_freq, glm_sev), X_train, X_test, df_train, df_test, target="PurePremium", weights="Exposure", tweedie_powers=tweedie_powers, ) scores_glm_pure_premium = score_estimator( glm_pure_premium, X_train, X_test, df_train, df_test, target="PurePremium", weights="Exposure", tweedie_powers=tweedie_powers, ) scores = pd.concat( [scores_product_model, scores_glm_pure_premium], axis=1, sort=True, keys=("Product Model", "TweedieRegressor"), ) print("Evaluation of the Product Model and the Tweedie Regressor on target PurePremium") with pd.option_context("display.expand_frame_repr", False): print(scores) # %% # In this example, both modeling approaches yield comparable performance # metrics. For implementation reasons, the percentage of explained variance # :math:`D^2` is not available for the product model. # # We can additionally validate these models by comparing observed and # predicted total claim amount over the test and train subsets. We see that, # on average, both model tend to underestimate the total claim (but this # behavior depends on the amount of regularization). res = [] for subset_label, X, df in [ ("train", X_train, df_train), ("test", X_test, df_test), ]: exposure = df["Exposure"].values res.append( { "subset": subset_label, "observed": df["ClaimAmount"].values.sum(), "predicted, frequency*severity model": np.sum( exposure * glm_freq.predict(X) * glm_sev.predict(X) ), "predicted, tweedie, power=%.2f" % glm_pure_premium.power: np.sum( exposure * glm_pure_premium.predict(X) ), } ) print(pd.DataFrame(res).set_index("subset").T) # %% # # Finally, we can compare the two models using a plot of cumulative claims: for # each model, the policyholders are ranked from safest to riskiest based on the # model predictions and the cumulative proportion of claim amounts is plotted # against the cumulative proportion of exposure. This plot is often called # the ordered Lorenz curve of the model. # # The Gini coefficient (based on the area between the curve and the diagonal) # can be used as a model selection metric to quantify the ability of the model # to rank policyholders. Note that this metric does not reflect the ability of # the models to make accurate predictions in terms of absolute value of total # claim amounts but only in terms of relative amounts as a ranking metric. The # Gini coefficient is upper bounded by 1.0 but even an oracle model that ranks # the policyholders by the observed claim amounts cannot reach a score of 1.0. # # We observe that both models are able to rank policyholders by riskiness # significantly better than chance although they are also both far from the # oracle model due to the natural difficulty of the prediction problem from a # few features: most accidents are not predictable and can be caused by # environmental circumstances that are not described at all by the input # features of the models. # # Note that the Gini index only characterizes the ranking performance of the # model but not its calibration: any monotonic transformation of the predictions # leaves the Gini index of the model unchanged. # # Finally one should highlight that the Compound Poisson Gamma model that is # directly fit on the pure premium is operationally simpler to develop and # maintain as it consists of a single scikit-learn estimator instead of a pair # of models, each with its own set of hyperparameters. from sklearn.metrics import auc def lorenz_curve(y_true, y_pred, exposure): y_true, y_pred = np.asarray(y_true), np.asarray(y_pred) exposure = np.asarray(exposure) # order samples by increasing predicted risk: ranking = np.argsort(y_pred) ranked_exposure = exposure[ranking] ranked_pure_premium = y_true[ranking] cumulative_claim_amount = np.cumsum(ranked_pure_premium * ranked_exposure) cumulative_claim_amount /= cumulative_claim_amount[-1] cumulative_exposure = np.cumsum(ranked_exposure) cumulative_exposure /= cumulative_exposure[-1] return cumulative_exposure, cumulative_claim_amount fig, ax = plt.subplots(figsize=(8, 8)) y_pred_product = glm_freq.predict(X_test) * glm_sev.predict(X_test) y_pred_total = glm_pure_premium.predict(X_test) for label, y_pred in [ ("Frequency * Severity model", y_pred_product), ("Compound Poisson Gamma", y_pred_total), ]: cum_exposure, cum_claims = lorenz_curve( df_test["PurePremium"], y_pred, df_test["Exposure"] ) gini = 1 - 2 * auc(cum_exposure, cum_claims) label += " (Gini index: {:.3f})".format(gini) ax.plot(cum_exposure, cum_claims, linestyle="-", label=label) # Oracle model: y_pred == y_test cum_exposure, cum_claims = lorenz_curve( df_test["PurePremium"], df_test["PurePremium"], df_test["Exposure"] ) gini = 1 - 2 * auc(cum_exposure, cum_claims) label = "Oracle (Gini index: {:.3f})".format(gini) ax.plot(cum_exposure, cum_claims, linestyle="-.", color="gray", label=label) # Random baseline ax.plot([0, 1], [0, 1], linestyle="--", color="black", label="Random baseline") ax.set( title="Lorenz Curves", xlabel=( "Cumulative proportion of exposure\n(ordered by model from safest to riskiest)" ), ylabel="Cumulative proportion of claim amounts", ) ax.legend(loc="upper left") plt.plot()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_sgd_separating_hyperplane.py
examples/linear_model/plot_sgd_separating_hyperplane.py
""" ========================================= SGD: Maximum margin separating hyperplane ========================================= Plot the maximum margin separating hyperplane within a two-class separable dataset using a linear Support Vector Machines classifier trained using SGD. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_blobs from sklearn.linear_model import SGDClassifier # we create 50 separable points X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60) # fit the model clf = SGDClassifier(loss="hinge", alpha=0.01, max_iter=200) clf.fit(X, Y) # plot the line, the points, and the nearest vectors to the plane xx = np.linspace(-1, 5, 10) yy = np.linspace(-1, 5, 10) X1, X2 = np.meshgrid(xx, yy) Z = np.empty(X1.shape) for (i, j), val in np.ndenumerate(X1): x1 = val x2 = X2[i, j] p = clf.decision_function([[x1, x2]]) Z[i, j] = p[0] levels = [-1.0, 0.0, 1.0] linestyles = ["dashed", "solid", "dashed"] colors = "k" plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles) plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired, edgecolor="black", s=20) plt.axis("tight") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_logistic_path.py
examples/linear_model/plot_logistic_path.py
""" ============================================== Regularization path of L1- Logistic Regression ============================================== Train l1-penalized logistic regression models on a binary classification problem derived from the Iris dataset. The models are ordered from strongest regularized to least regularized. The 4 coefficients of the models are collected and plotted as a "regularization path": on the left-hand side of the figure (strong regularizers), all the coefficients are exactly 0. When regularization gets progressively looser, coefficients can get non-zero values one after the other. Here we choose the liblinear solver because it can efficiently optimize for the Logistic Regression loss with a non-smooth, sparsity inducing l1 penalty. Also note that we set a low value for the tolerance to make sure that the model has converged before collecting the coefficients. We also use warm_start=True which means that the coefficients of the models are reused to initialize the next model fit to speed-up the computation of the full-path. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Load data # --------- from sklearn import datasets iris = datasets.load_iris() X = iris.data y = iris.target feature_names = iris.feature_names # %% # Here we remove the third class to make the problem a binary classification X = X[y != 2] y = y[y != 2] # %% # Compute regularization path # --------------------------- import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import l1_min_c cs = l1_min_c(X, y, loss="log") * np.logspace(0, 1, 16) # %% # Create a pipeline with `StandardScaler` and `LogisticRegression`, to normalize # the data before fitting a linear model, in order to speed-up convergence and # make the coefficients comparable. Also, as a side effect, since the data is now # centered around 0, we don't need to fit an intercept. clf = make_pipeline( StandardScaler(), LogisticRegression( l1_ratio=1, solver="liblinear", tol=1e-6, max_iter=int(1e6), warm_start=True, fit_intercept=False, ), ) coefs_ = [] for c in cs: clf.set_params(logisticregression__C=c) clf.fit(X, y) coefs_.append(clf["logisticregression"].coef_.ravel().copy()) coefs_ = np.array(coefs_) # %% # Plot regularization path # ------------------------ import matplotlib.pyplot as plt # Colorblind-friendly palette (IBM Color Blind Safe palette) colors = ["#648FFF", "#785EF0", "#DC267F", "#FE6100"] plt.figure(figsize=(10, 6)) for i in range(coefs_.shape[1]): plt.semilogx(cs, coefs_[:, i], marker="o", color=colors[i], label=feature_names[i]) ymin, ymax = plt.ylim() plt.xlabel("C") plt.ylabel("Coefficients") plt.title("Logistic Regression Path") plt.legend() plt.axis("tight") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_quantile_regression.py
examples/linear_model/plot_quantile_regression.py
""" =================== Quantile regression =================== This example illustrates how quantile regression can predict non-trivial conditional quantiles. The left figure shows the case when the error distribution is normal, but has non-constant variance, i.e. with heteroscedasticity. The right figure shows an example of an asymmetric error distribution, namely the Pareto distribution. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Dataset generation # ------------------ # # To illustrate the behaviour of quantile regression, we will generate two # synthetic datasets. The true generative random processes for both datasets # will be composed by the same expected value with a linear relationship with a # single feature `x`. import numpy as np rng = np.random.RandomState(42) x = np.linspace(start=0, stop=10, num=100) X = x[:, np.newaxis] y_true_mean = 10 + 0.5 * x # %% # We will create two subsequent problems by changing the distribution of the # target `y` while keeping the same expected value: # # - in the first case, a heteroscedastic Normal noise is added; # - in the second case, an asymmetric Pareto noise is added. y_normal = y_true_mean + rng.normal(loc=0, scale=0.5 + 0.5 * x, size=x.shape[0]) a = 5 y_pareto = y_true_mean + 10 * (rng.pareto(a, size=x.shape[0]) - 1 / (a - 1)) # %% # Let's first visualize the datasets as well as the distribution of the # residuals `y - mean(y)`. import matplotlib.pyplot as plt _, axs = plt.subplots(nrows=2, ncols=2, figsize=(15, 11), sharex="row", sharey="row") axs[0, 0].plot(x, y_true_mean, label="True mean") axs[0, 0].scatter(x, y_normal, color="black", alpha=0.5, label="Observations") axs[1, 0].hist(y_true_mean - y_normal, edgecolor="black") axs[0, 1].plot(x, y_true_mean, label="True mean") axs[0, 1].scatter(x, y_pareto, color="black", alpha=0.5, label="Observations") axs[1, 1].hist(y_true_mean - y_pareto, edgecolor="black") axs[0, 0].set_title("Dataset with heteroscedastic Normal distributed targets") axs[0, 1].set_title("Dataset with asymmetric Pareto distributed target") axs[1, 0].set_title( "Residuals distribution for heteroscedastic Normal distributed targets" ) axs[1, 1].set_title("Residuals distribution for asymmetric Pareto distributed target") axs[0, 0].legend() axs[0, 1].legend() axs[0, 0].set_ylabel("y") axs[1, 0].set_ylabel("Counts") axs[0, 1].set_xlabel("x") axs[0, 0].set_xlabel("x") axs[1, 0].set_xlabel("Residuals") _ = axs[1, 1].set_xlabel("Residuals") # %% # With the heteroscedastic Normal distributed target, we observe that the # variance of the noise is increasing when the value of the feature `x` is # increasing. # # With the asymmetric Pareto distributed target, we observe that the positive # residuals are bounded. # # These types of noisy targets make the estimation via # :class:`~sklearn.linear_model.LinearRegression` less efficient, i.e. we need # more data to get stable results and, in addition, large outliers can have a # huge impact on the fitted coefficients. (Stated otherwise: in a setting with # constant variance, ordinary least squares estimators converge much faster to # the *true* coefficients with increasing sample size.) # # In this asymmetric setting, the median or different quantiles give additional # insights. On top of that, median estimation is much more robust to outliers # and heavy tailed distributions. But note that extreme quantiles are estimated # by very few data points. 95% quantile are more or less estimated by the 5% # largest values and thus also a bit sensitive outliers. # # In the remainder of this tutorial, we will show how # :class:`~sklearn.linear_model.QuantileRegressor` can be used in practice and # give the intuition into the properties of the fitted models. Finally, # we will compare the both :class:`~sklearn.linear_model.QuantileRegressor` # and :class:`~sklearn.linear_model.LinearRegression`. # # Fitting a `QuantileRegressor` # ----------------------------- # # In this section, we want to estimate the conditional median as well as # a low and high quantile fixed at 5% and 95%, respectively. Thus, we will get # three linear models, one for each quantile. # # We will use the quantiles at 5% and 95% to find the outliers in the training # sample beyond the central 90% interval. # %% from sklearn.linear_model import QuantileRegressor quantiles = [0.05, 0.5, 0.95] predictions = {} out_bounds_predictions = np.zeros_like(y_true_mean, dtype=np.bool_) for quantile in quantiles: qr = QuantileRegressor(quantile=quantile, alpha=0) y_pred = qr.fit(X, y_normal).predict(X) predictions[quantile] = y_pred if quantile == min(quantiles): out_bounds_predictions = np.logical_or( out_bounds_predictions, y_pred >= y_normal ) elif quantile == max(quantiles): out_bounds_predictions = np.logical_or( out_bounds_predictions, y_pred <= y_normal ) # %% # Now, we can plot the three linear models and the distinguished samples that # are within the central 90% interval from samples that are outside this # interval. plt.plot(X, y_true_mean, color="black", linestyle="dashed", label="True mean") for quantile, y_pred in predictions.items(): plt.plot(X, y_pred, label=f"Quantile: {quantile}") plt.scatter( x[out_bounds_predictions], y_normal[out_bounds_predictions], color="black", marker="+", alpha=0.5, label="Outside interval", ) plt.scatter( x[~out_bounds_predictions], y_normal[~out_bounds_predictions], color="black", alpha=0.5, label="Inside interval", ) plt.legend() plt.xlabel("x") plt.ylabel("y") _ = plt.title("Quantiles of heteroscedastic Normal distributed target") # %% # Since the noise is still Normally distributed, in particular is symmetric, # the true conditional mean and the true conditional median coincide. Indeed, # we see that the estimated median almost hits the true mean. We observe the # effect of having an increasing noise variance on the 5% and 95% quantiles: # the slopes of those quantiles are very different and the interval between # them becomes wider with increasing `x`. # # To get an additional intuition regarding the meaning of the 5% and 95% # quantiles estimators, one can count the number of samples above and below the # predicted quantiles (represented by a cross on the above plot), considering # that we have a total of 100 samples. # # We can repeat the same experiment using the asymmetric Pareto distributed # target. quantiles = [0.05, 0.5, 0.95] predictions = {} out_bounds_predictions = np.zeros_like(y_true_mean, dtype=np.bool_) for quantile in quantiles: qr = QuantileRegressor(quantile=quantile, alpha=0) y_pred = qr.fit(X, y_pareto).predict(X) predictions[quantile] = y_pred if quantile == min(quantiles): out_bounds_predictions = np.logical_or( out_bounds_predictions, y_pred >= y_pareto ) elif quantile == max(quantiles): out_bounds_predictions = np.logical_or( out_bounds_predictions, y_pred <= y_pareto ) # %% plt.plot(X, y_true_mean, color="black", linestyle="dashed", label="True mean") for quantile, y_pred in predictions.items(): plt.plot(X, y_pred, label=f"Quantile: {quantile}") plt.scatter( x[out_bounds_predictions], y_pareto[out_bounds_predictions], color="black", marker="+", alpha=0.5, label="Outside interval", ) plt.scatter( x[~out_bounds_predictions], y_pareto[~out_bounds_predictions], color="black", alpha=0.5, label="Inside interval", ) plt.legend() plt.xlabel("x") plt.ylabel("y") _ = plt.title("Quantiles of asymmetric Pareto distributed target") # %% # Due to the asymmetry of the distribution of the noise, we observe that the # true mean and estimated conditional median are different. We also observe # that each quantile model has different parameters to better fit the desired # quantile. Note that ideally, all quantiles would be parallel in this case, # which would become more visible with more data points or less extreme # quantiles, e.g. 10% and 90%. # # Comparing `QuantileRegressor` and `LinearRegression` # ---------------------------------------------------- # # In this section, we will linger on the difference regarding the loss functions that # :class:`~sklearn.linear_model.QuantileRegressor` and # :class:`~sklearn.linear_model.LinearRegression` are minimizing. # # Indeed, :class:`~sklearn.linear_model.LinearRegression` is a least squares # approach minimizing the mean squared error (MSE) between the training and # predicted targets. In contrast, # :class:`~sklearn.linear_model.QuantileRegressor` with `quantile=0.5` # minimizes the mean absolute error (MAE) instead. # # Why does it matter? The loss functions specify what exactly the model is aiming # to predict, see # :ref:`user guide on the choice of scoring function<which_scoring_function>`. # In short, a model minimizing MSE predicts the mean (expectation) and a model # minimizing MAE predicts the median. # # Let's compute the training errors of such models in terms of mean # squared error and mean absolute error. We will use the asymmetric Pareto # distributed target to make it more interesting as mean and median are not # equal. from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error linear_regression = LinearRegression() quantile_regression = QuantileRegressor(quantile=0.5, alpha=0) y_pred_lr = linear_regression.fit(X, y_pareto).predict(X) y_pred_qr = quantile_regression.fit(X, y_pareto).predict(X) print( "Training error (in-sample performance)\n" f"{'model':<20} MAE MSE\n" f"{linear_regression.__class__.__name__:<20} " f"{mean_absolute_error(y_pareto, y_pred_lr):5.3f} " f"{mean_squared_error(y_pareto, y_pred_lr):5.3f}\n" f"{quantile_regression.__class__.__name__:<20} " f"{mean_absolute_error(y_pareto, y_pred_qr):5.3f} " f"{mean_squared_error(y_pareto, y_pred_qr):5.3f}" ) # %% # On the training set, we see that MAE is lower for # :class:`~sklearn.linear_model.QuantileRegressor` than # :class:`~sklearn.linear_model.LinearRegression`. In contrast to that, MSE is # lower for :class:`~sklearn.linear_model.LinearRegression` than # :class:`~sklearn.linear_model.QuantileRegressor`. These results confirms that # MAE is the loss minimized by :class:`~sklearn.linear_model.QuantileRegressor` # while MSE is the loss minimized # :class:`~sklearn.linear_model.LinearRegression`. # # We can make a similar evaluation by looking at the test error obtained by # cross-validation. from sklearn.model_selection import cross_validate cv_results_lr = cross_validate( linear_regression, X, y_pareto, cv=3, scoring=["neg_mean_absolute_error", "neg_mean_squared_error"], ) cv_results_qr = cross_validate( quantile_regression, X, y_pareto, cv=3, scoring=["neg_mean_absolute_error", "neg_mean_squared_error"], ) print( "Test error (cross-validated performance)\n" f"{'model':<20} MAE MSE\n" f"{linear_regression.__class__.__name__:<20} " f"{-cv_results_lr['test_neg_mean_absolute_error'].mean():5.3f} " f"{-cv_results_lr['test_neg_mean_squared_error'].mean():5.3f}\n" f"{quantile_regression.__class__.__name__:<20} " f"{-cv_results_qr['test_neg_mean_absolute_error'].mean():5.3f} " f"{-cv_results_qr['test_neg_mean_squared_error'].mean():5.3f}" ) # %% # We reach similar conclusions on the out-of-sample evaluation.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_multi_task_lasso_support.py
examples/linear_model/plot_multi_task_lasso_support.py
""" ============================================= Joint feature selection with multi-task Lasso ============================================= The multi-task lasso allows to fit multiple regression problems jointly enforcing the selected features to be the same across tasks. This example simulates sequential measurements, each task is a time instant, and the relevant features vary in amplitude over time while being the same. The multi-task lasso imposes that features that are selected at one time point are select for all time point. This makes feature selection by the Lasso more stable. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate data # ------------- import numpy as np rng = np.random.RandomState(42) # Generate some 2D coefficients with sine waves with random frequency and phase n_samples, n_features, n_tasks = 100, 30, 40 n_relevant_features = 5 coef = np.zeros((n_tasks, n_features)) times = np.linspace(0, 2 * np.pi, n_tasks) for k in range(n_relevant_features): coef[:, k] = np.sin((1.0 + rng.randn(1)) * times + 3 * rng.randn(1)) X = rng.randn(n_samples, n_features) Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks) # %% # Fit models # ---------- from sklearn.linear_model import Lasso, MultiTaskLasso coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T]) coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.0).fit(X, Y).coef_ # %% # Plot support and time series # ---------------------------- import matplotlib.pyplot as plt fig = plt.figure(figsize=(8, 5)) plt.subplot(1, 2, 1) plt.spy(coef_lasso_) plt.xlabel("Feature") plt.ylabel("Time (or Task)") plt.text(10, 5, "Lasso") plt.subplot(1, 2, 2) plt.spy(coef_multi_task_lasso_) plt.xlabel("Feature") plt.ylabel("Time (or Task)") plt.text(10, 5, "MultiTaskLasso") fig.suptitle("Coefficient non-zero location") feature_to_plot = 0 plt.figure() lw = 2 plt.plot(coef[:, feature_to_plot], color="seagreen", linewidth=lw, label="Ground truth") plt.plot( coef_lasso_[:, feature_to_plot], color="cornflowerblue", linewidth=lw, label="Lasso" ) plt.plot( coef_multi_task_lasso_[:, feature_to_plot], color="gold", linewidth=lw, label="MultiTaskLasso", ) plt.legend(loc="upper center") plt.axis("tight") plt.ylim([-1.1, 1.1]) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_ard.py
examples/linear_model/plot_ard.py
""" ==================================== Comparing Linear Bayesian Regressors ==================================== This example compares two different bayesian regressors: - an :ref:`automatic_relevance_determination` - a :ref:`bayesian_ridge_regression` In the first part, we use an :ref:`ordinary_least_squares` (OLS) model as a baseline for comparing the models' coefficients with respect to the true coefficients. Thereafter, we show that the estimation of such models is done by iteratively maximizing the marginal log-likelihood of the observations. In the last section we plot predictions and uncertainties for the ARD and the Bayesian Ridge regressions using a polynomial feature expansion to fit a non-linear relationship between `X` and `y`. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Models robustness to recover the ground truth weights # ===================================================== # # Generate synthetic dataset # -------------------------- # # We generate a dataset where `X` and `y` are linearly linked: 10 of the # features of `X` will be used to generate `y`. The other features are not # useful at predicting `y`. In addition, we generate a dataset where `n_samples # == n_features`. Such a setting is challenging for an OLS model and leads # potentially to arbitrary large weights. Having a prior on the weights and a # penalty alleviates the problem. Finally, gaussian noise is added. from sklearn.datasets import make_regression X, y, true_weights = make_regression( n_samples=100, n_features=100, n_informative=10, noise=8, coef=True, random_state=42, ) # %% # Fit the regressors # ------------------ # # We now fit both Bayesian models and the OLS to later compare the models' # coefficients. import pandas as pd from sklearn.linear_model import ARDRegression, BayesianRidge, LinearRegression olr = LinearRegression().fit(X, y) brr = BayesianRidge(compute_score=True, max_iter=30).fit(X, y) ard = ARDRegression(compute_score=True, max_iter=30).fit(X, y) df = pd.DataFrame( { "Weights of true generative process": true_weights, "ARDRegression": ard.coef_, "BayesianRidge": brr.coef_, "LinearRegression": olr.coef_, } ) # %% # Plot the true and estimated coefficients # ---------------------------------------- # # Now we compare the coefficients of each model with the weights of # the true generative model. import matplotlib.pyplot as plt import seaborn as sns from matplotlib.colors import SymLogNorm plt.figure(figsize=(10, 6)) ax = sns.heatmap( df.T, norm=SymLogNorm(linthresh=10e-4, vmin=-80, vmax=80), cbar_kws={"label": "coefficients' values"}, cmap="seismic_r", ) plt.ylabel("linear model") plt.xlabel("coefficients") plt.tight_layout(rect=(0, 0, 1, 0.95)) _ = plt.title("Models' coefficients") # %% # Due to the added noise, none of the models recover the true weights. Indeed, # all models always have more than 10 non-zero coefficients. Compared to the OLS # estimator, the coefficients using a Bayesian Ridge regression are slightly # shifted toward zero, which stabilises them. The ARD regression provides a # sparser solution: some of the non-informative coefficients are set exactly to # zero, while shifting others closer to zero. Some non-informative coefficients # are still present and retain large values. # %% # Plot the marginal log-likelihood # -------------------------------- import numpy as np ard_scores = -np.array(ard.scores_) brr_scores = -np.array(brr.scores_) plt.plot(ard_scores, color="navy", label="ARD") plt.plot(brr_scores, color="red", label="BayesianRidge") plt.ylabel("Log-likelihood") plt.xlabel("Iterations") plt.xlim(1, 30) plt.legend() _ = plt.title("Models log-likelihood") # %% # Indeed, both models minimize the log-likelihood up to an arbitrary cutoff # defined by the `max_iter` parameter. # # Bayesian regressions with polynomial feature expansion # ====================================================== # Generate synthetic dataset # -------------------------- # We create a target that is a non-linear function of the input feature. # Noise following a standard uniform distribution is added. from sklearn.pipeline import make_pipeline from sklearn.preprocessing import PolynomialFeatures, StandardScaler rng = np.random.RandomState(0) n_samples = 110 # sort the data to make plotting easier later X = np.sort(-10 * rng.rand(n_samples) + 10) noise = rng.normal(0, 1, n_samples) * 1.35 y = np.sqrt(X) * np.sin(X) + noise full_data = pd.DataFrame({"input_feature": X, "target": y}) X = X.reshape((-1, 1)) # extrapolation X_plot = np.linspace(10, 10.4, 10) y_plot = np.sqrt(X_plot) * np.sin(X_plot) X_plot = np.concatenate((X, X_plot.reshape((-1, 1)))) y_plot = np.concatenate((y - noise, y_plot)) # %% # Fit the regressors # ------------------ # # Here we try a degree 10 polynomial to potentially overfit, though the bayesian # linear models regularize the size of the polynomial coefficients. As # `fit_intercept=True` by default for # :class:`~sklearn.linear_model.ARDRegression` and # :class:`~sklearn.linear_model.BayesianRidge`, then # :class:`~sklearn.preprocessing.PolynomialFeatures` should not introduce an # additional bias feature. By setting `return_std=True`, the bayesian regressors # return the standard deviation of the posterior distribution for the model # parameters. ard_poly = make_pipeline( PolynomialFeatures(degree=10, include_bias=False), StandardScaler(), ARDRegression(), ).fit(X, y) brr_poly = make_pipeline( PolynomialFeatures(degree=10, include_bias=False), StandardScaler(), BayesianRidge(), ).fit(X, y) y_ard, y_ard_std = ard_poly.predict(X_plot, return_std=True) y_brr, y_brr_std = brr_poly.predict(X_plot, return_std=True) # %% # Plotting polynomial regressions with std errors of the scores # ------------------------------------------------------------- ax = sns.scatterplot( data=full_data, x="input_feature", y="target", color="black", alpha=0.75 ) ax.plot(X_plot, y_plot, color="black", label="Ground Truth") ax.plot(X_plot, y_brr, color="red", label="BayesianRidge with polynomial features") ax.plot(X_plot, y_ard, color="navy", label="ARD with polynomial features") ax.fill_between( X_plot.ravel(), y_ard - y_ard_std, y_ard + y_ard_std, color="navy", alpha=0.3, ) ax.fill_between( X_plot.ravel(), y_brr - y_brr_std, y_brr + y_brr_std, color="red", alpha=0.3, ) ax.legend() _ = ax.set_title("Polynomial fit of a non-linear feature") # %% # The error bars represent one standard deviation of the predicted gaussian # distribution of the query points. Notice that the ARD regression captures the # ground truth the best when using the default parameters in both models, but # further reducing the `lambda_init` hyperparameter of the Bayesian Ridge can # reduce its bias (see example # :ref:`sphx_glr_auto_examples_linear_model_plot_bayesian_ridge_curvefit.py`). # Finally, due to the intrinsic limitations of a polynomial regression, both # models fail when extrapolating.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_huber_vs_ridge.py
examples/linear_model/plot_huber_vs_ridge.py
""" ======================================================= HuberRegressor vs Ridge on dataset with strong outliers ======================================================= Fit Ridge and HuberRegressor on a dataset with outliers. The example shows that the predictions in ridge are strongly influenced by the outliers present in the dataset. The Huber regressor is less influenced by the outliers since the model uses the linear loss for these. As the parameter epsilon is increased for the Huber regressor, the decision function approaches that of the ridge. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_regression from sklearn.linear_model import HuberRegressor, Ridge # Generate toy data. rng = np.random.RandomState(0) X, y = make_regression( n_samples=20, n_features=1, random_state=0, noise=4.0, bias=100.0 ) # Add four strong outliers to the dataset. X_outliers = rng.normal(0, 0.5, size=(4, 1)) y_outliers = rng.normal(0, 2.0, size=4) X_outliers[:2, :] += X.max() + X.mean() / 4.0 X_outliers[2:, :] += X.min() - X.mean() / 4.0 y_outliers[:2] += y.min() - y.mean() / 4.0 y_outliers[2:] += y.max() + y.mean() / 4.0 X = np.vstack((X, X_outliers)) y = np.concatenate((y, y_outliers)) plt.plot(X, y, "b.") # Fit the huber regressor over a series of epsilon values. colors = ["r-", "b-", "y-", "m-"] x = np.linspace(X.min(), X.max(), 7) epsilon_values = [1, 1.5, 1.75, 1.9] for k, epsilon in enumerate(epsilon_values): huber = HuberRegressor(alpha=0.0, epsilon=epsilon) huber.fit(X, y) coef_ = huber.coef_ * x + huber.intercept_ plt.plot(x, coef_, colors[k], label="huber loss, %s" % epsilon) # Fit a ridge regressor to compare it to huber regressor. ridge = Ridge(alpha=0.0, random_state=0) ridge.fit(X, y) coef_ridge = ridge.coef_ coef_ = ridge.coef_ * x + ridge.intercept_ plt.plot(x, coef_, "g-", label="ridge regression") plt.title("Comparison of HuberRegressor vs Ridge") plt.xlabel("X") plt.ylabel("y") plt.legend(loc=0) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_logistic_l1_l2_sparsity.py
examples/linear_model/plot_logistic_l1_l2_sparsity.py
""" ============================================== L1 Penalty and Sparsity in Logistic Regression ============================================== Comparison of the sparsity (percentage of zero coefficients) of solutions when L1, L2 and Elastic-Net penalty are used for different values of C. We can see that large values of C give more freedom to the model. Conversely, smaller values of C constrain the model more. In the L1 penalty case, this leads to sparser solutions. As expected, the Elastic-Net penalty sparsity is between that of L1 and L2. We classify 8x8 images of digits into two classes: 0-4 against 5-9. The visualization shows coefficients of the models for varying C. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn import datasets from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler X, y = datasets.load_digits(return_X_y=True) X = StandardScaler().fit_transform(X) # classify small against large digits y = (y > 4).astype(int) l1_ratio = 0.5 # L1 weight in the Elastic-Net regularization fig, axes = plt.subplots(3, 3) # Set regularization parameter for i, (C, axes_row) in enumerate(zip((1, 0.1, 0.01), axes)): # Increase tolerance for short training time clf_l1_LR = LogisticRegression(C=C, l1_ratio=1, tol=0.01, solver="saga") clf_l2_LR = LogisticRegression(C=C, l1_ratio=0, tol=0.01, solver="saga") clf_en_LR = LogisticRegression(C=C, l1_ratio=l1_ratio, tol=0.01, solver="saga") clf_l1_LR.fit(X, y) clf_l2_LR.fit(X, y) clf_en_LR.fit(X, y) coef_l1_LR = clf_l1_LR.coef_.ravel() coef_l2_LR = clf_l2_LR.coef_.ravel() coef_en_LR = clf_en_LR.coef_.ravel() # coef_l1_LR contains zeros due to the # L1 sparsity inducing norm sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100 sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100 sparsity_en_LR = np.mean(coef_en_LR == 0) * 100 print(f"C={C:.2f}") print(f"{'Sparsity with L1 penalty:':<40} {sparsity_l1_LR:.2f}%") print(f"{'Sparsity with Elastic-Net penalty:':<40} {sparsity_en_LR:.2f}%") print(f"{'Sparsity with L2 penalty:':<40} {sparsity_l2_LR:.2f}%") print(f"{'Score with L1 penalty:':<40} {clf_l1_LR.score(X, y):.2f}") print(f"{'Score with Elastic-Net penalty:':<40} {clf_en_LR.score(X, y):.2f}") print(f"{'Score with L2 penalty:':<40} {clf_l2_LR.score(X, y):.2f}") if i == 0: axes_row[0].set_title("L1 penalty") axes_row[1].set_title("Elastic-Net\nl1_ratio = %s" % l1_ratio) axes_row[2].set_title("L2 penalty") for ax, coefs in zip(axes_row, [coef_l1_LR, coef_en_LR, coef_l2_LR]): ax.imshow( np.abs(coefs.reshape(8, 8)), interpolation="nearest", cmap="binary", vmax=1, vmin=0, ) ax.set_xticks(()) ax.set_yticks(()) axes_row[0].set_ylabel(f"C = {C}") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_sgd_penalties.py
examples/linear_model/plot_sgd_penalties.py
""" ============== SGD: Penalties ============== Contours of where the penalty is equal to 1 for the three penalties L1, L2 and elastic-net. All of the above are supported by :class:`~sklearn.linear_model.SGDClassifier` and :class:`~sklearn.linear_model.SGDRegressor`. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np l1_color = "navy" l2_color = "c" elastic_net_color = "darkorange" line = np.linspace(-1.5, 1.5, 1001) xx, yy = np.meshgrid(line, line) l2 = xx**2 + yy**2 l1 = np.abs(xx) + np.abs(yy) rho = 0.5 elastic_net = rho * l1 + (1 - rho) * l2 plt.figure(figsize=(10, 10), dpi=100) ax = plt.gca() elastic_net_contour = plt.contour( xx, yy, elastic_net, levels=[1], colors=elastic_net_color ) l2_contour = plt.contour(xx, yy, l2, levels=[1], colors=l2_color) l1_contour = plt.contour(xx, yy, l1, levels=[1], colors=l1_color) ax.set_aspect("equal") ax.spines["left"].set_position("center") ax.spines["right"].set_color("none") ax.spines["bottom"].set_position("center") ax.spines["top"].set_color("none") plt.clabel( elastic_net_contour, inline=1, fontsize=18, fmt={1.0: "elastic-net"}, manual=[(-1, -1)], ) plt.clabel(l2_contour, inline=1, fontsize=18, fmt={1.0: "L2"}, manual=[(-1, -1)]) plt.clabel(l1_contour, inline=1, fontsize=18, fmt={1.0: "L1"}, manual=[(-1, -1)]) plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_ols_ridge.py
examples/linear_model/plot_ols_ridge.py
""" =========================================== Ordinary Least Squares and Ridge Regression =========================================== 1. Ordinary Least Squares: We illustrate how to use the ordinary least squares (OLS) model, :class:`~sklearn.linear_model.LinearRegression`, on a single feature of the diabetes dataset. We train on a subset of the data, evaluate on a test set, and visualize the predictions. 2. Ordinary Least Squares and Ridge Regression Variance: We then show how OLS can have high variance when the data is sparse or noisy, by fitting on a very small synthetic sample repeatedly. Ridge regression, :class:`~sklearn.linear_model.Ridge`, reduces this variance by penalizing (shrinking) the coefficients, leading to more stable predictions. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Data Loading and Preparation # ---------------------------- # # Load the diabetes dataset. For simplicity, we only keep a single feature in the data. # Then, we split the data and target into training and test sets. from sklearn.datasets import load_diabetes from sklearn.model_selection import train_test_split X, y = load_diabetes(return_X_y=True) X = X[:, [2]] # Use only one feature X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=20, shuffle=False) # %% # Linear regression model # ----------------------- # # We create a linear regression model and fit it on the training data. Note that by # default, an intercept is added to the model. We can control this behavior by setting # the `fit_intercept` parameter. from sklearn.linear_model import LinearRegression regressor = LinearRegression().fit(X_train, y_train) # %% # Model evaluation # ---------------- # # We evaluate the model's performance on the test set using the mean squared error # and the coefficient of determination. from sklearn.metrics import mean_squared_error, r2_score y_pred = regressor.predict(X_test) print(f"Mean squared error: {mean_squared_error(y_test, y_pred):.2f}") print(f"Coefficient of determination: {r2_score(y_test, y_pred):.2f}") # %% # Plotting the results # -------------------- # # Finally, we visualize the results on the train and test data. import matplotlib.pyplot as plt fig, ax = plt.subplots(ncols=2, figsize=(10, 5), sharex=True, sharey=True) ax[0].scatter(X_train, y_train, label="Train data points") ax[0].plot( X_train, regressor.predict(X_train), linewidth=3, color="tab:orange", label="Model predictions", ) ax[0].set(xlabel="Feature", ylabel="Target", title="Train set") ax[0].legend() ax[1].scatter(X_test, y_test, label="Test data points") ax[1].plot(X_test, y_pred, linewidth=3, color="tab:orange", label="Model predictions") ax[1].set(xlabel="Feature", ylabel="Target", title="Test set") ax[1].legend() fig.suptitle("Linear Regression") plt.show() # %% # # OLS on this single-feature subset learns a linear function that minimizes # the mean squared error on the training data. We can see how well (or poorly) # it generalizes by looking at the R^2 score and mean squared error on the # test set. In higher dimensions, pure OLS often overfits, especially if the # data is noisy. Regularization techniques (like Ridge or Lasso) can help # reduce that. # %% # Ordinary Least Squares and Ridge Regression Variance # ---------------------------------------------------------- # # Next, we illustrate the problem of high variance more clearly by using # a tiny synthetic dataset. We sample only two data points, then repeatedly # add small Gaussian noise to them and refit both OLS and Ridge. We plot # each new line to see how much OLS can jump around, whereas Ridge remains # more stable thanks to its penalty term. import matplotlib.pyplot as plt import numpy as np from sklearn import linear_model X_train = np.c_[0.5, 1].T y_train = [0.5, 1] X_test = np.c_[0, 2].T np.random.seed(0) classifiers = dict( ols=linear_model.LinearRegression(), ridge=linear_model.Ridge(alpha=0.1) ) for name, clf in classifiers.items(): fig, ax = plt.subplots(figsize=(4, 3)) for _ in range(6): this_X = 0.1 * np.random.normal(size=(2, 1)) + X_train clf.fit(this_X, y_train) ax.plot(X_test, clf.predict(X_test), color="gray") ax.scatter(this_X, y_train, s=3, c="gray", marker="o", zorder=10) clf.fit(X_train, y_train) ax.plot(X_test, clf.predict(X_test), linewidth=2, color="blue") ax.scatter(X_train, y_train, s=30, c="red", marker="+", zorder=10) ax.set_title(name) ax.set_xlim(0, 2) ax.set_ylim((0, 1.6)) ax.set_xlabel("X") ax.set_ylabel("y") fig.tight_layout() plt.show() # %% # Conclusion # ---------- # # - In the first example, we applied OLS to a real dataset, showing # how a plain linear model can fit the data by minimizing the squared error # on the training set. # # - In the second example, OLS lines varied drastically each time noise # was added, reflecting its high variance when data is sparse or noisy. By # contrast, **Ridge** regression introduces a regularization term that shrinks # the coefficients, stabilizing predictions. # # Techniques like :class:`~sklearn.linear_model.Ridge` or # :class:`~sklearn.linear_model.Lasso` (which applies an L1 penalty) are both # common ways to improve generalization and reduce overfitting. A well-tuned # Ridge or Lasso often outperforms pure OLS when features are correlated, data # is noisy, or sample size is small.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/linear_model/plot_sgd_loss_functions.py
examples/linear_model/plot_sgd_loss_functions.py
""" ========================== SGD: convex loss functions ========================== A plot that compares the various convex loss functions supported by :class:`~sklearn.linear_model.SGDClassifier` . """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np def modified_huber_loss(y_true, y_pred): z = y_pred * y_true loss = -4 * z loss[z >= -1] = (1 - z[z >= -1]) ** 2 loss[z >= 1.0] = 0 return loss xmin, xmax = -4, 4 xx = np.linspace(xmin, xmax, 100) lw = 2 plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], color="gold", lw=lw, label="Zero-one loss") plt.plot(xx, np.where(xx < 1, 1 - xx, 0), color="teal", lw=lw, label="Hinge loss") plt.plot(xx, -np.minimum(xx, 0), color="yellowgreen", lw=lw, label="Perceptron loss") plt.plot(xx, np.log2(1 + np.exp(-xx)), color="cornflowerblue", lw=lw, label="Log loss") plt.plot( xx, np.where(xx < 1, 1 - xx, 0) ** 2, color="orange", lw=lw, label="Squared hinge loss", ) plt.plot( xx, modified_huber_loss(xx, 1), color="darkorchid", lw=lw, linestyle="--", label="Modified Huber loss", ) plt.ylim((0, 8)) plt.legend(loc="upper right") plt.xlabel(r"Decision function $f(x)$") plt.ylabel("$L(y=1, f(x))$") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/compose/plot_column_transformer.py
examples/compose/plot_column_transformer.py
""" ================================================== Column Transformer with Heterogeneous Data Sources ================================================== Datasets can often contain components that require different feature extraction and processing pipelines. This scenario might occur when: 1. your dataset consists of heterogeneous data types (e.g. raster images and text captions), 2. your dataset is stored in a :class:`pandas.DataFrame` and different columns require different processing pipelines. This example demonstrates how to use :class:`~sklearn.compose.ColumnTransformer` on a dataset containing different types of features. The choice of features is not particularly helpful, but serves to illustrate the technique. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np from sklearn.compose import ColumnTransformer from sklearn.datasets import fetch_20newsgroups from sklearn.decomposition import PCA from sklearn.feature_extraction import DictVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import classification_report from sklearn.pipeline import Pipeline from sklearn.preprocessing import FunctionTransformer from sklearn.svm import LinearSVC ############################################################################## # 20 newsgroups dataset # --------------------- # # We will use the :ref:`20 newsgroups dataset <20newsgroups_dataset>`, which # comprises posts from newsgroups on 20 topics. This dataset is split # into train and test subsets based on messages posted before and after # a specific date. We will only use posts from 2 categories to speed up running # time. categories = ["sci.med", "sci.space"] X_train, y_train = fetch_20newsgroups( random_state=1, subset="train", categories=categories, remove=("footers", "quotes"), return_X_y=True, ) X_test, y_test = fetch_20newsgroups( random_state=1, subset="test", categories=categories, remove=("footers", "quotes"), return_X_y=True, ) ############################################################################## # Each feature comprises meta information about that post, such as the subject, # and the body of the news post. print(X_train[0]) ############################################################################## # Creating transformers # --------------------- # # First, we would like a transformer that extracts the subject and # body of each post. Since this is a stateless transformation (does not # require state information from training data), we can define a function that # performs the data transformation then use # :class:`~sklearn.preprocessing.FunctionTransformer` to create a scikit-learn # transformer. def subject_body_extractor(posts): # construct object dtype array with two columns # first column = 'subject' and second column = 'body' features = np.empty(shape=(len(posts), 2), dtype=object) for i, text in enumerate(posts): # temporary variable `_` stores '\n\n' headers, _, body = text.partition("\n\n") # store body text in second column features[i, 1] = body prefix = "Subject:" sub = "" # save text after 'Subject:' in first column for line in headers.split("\n"): if line.startswith(prefix): sub = line[len(prefix) :] break features[i, 0] = sub return features subject_body_transformer = FunctionTransformer(subject_body_extractor) ############################################################################## # We will also create a transformer that extracts the # length of the text and the number of sentences. def text_stats(posts): return [{"length": len(text), "num_sentences": text.count(".")} for text in posts] text_stats_transformer = FunctionTransformer(text_stats) ############################################################################## # Classification pipeline # ----------------------- # # The pipeline below extracts the subject and body from each post using # ``SubjectBodyExtractor``, producing a (n_samples, 2) array. This array is # then used to compute standard bag-of-words features for the subject and body # as well as text length and number of sentences on the body, using # ``ColumnTransformer``. We combine them, with weights, then train a # classifier on the combined set of features. pipeline = Pipeline( [ # Extract subject & body ("subjectbody", subject_body_transformer), # Use ColumnTransformer to combine the subject and body features ( "union", ColumnTransformer( [ # bag-of-words for subject (col 0) ("subject", TfidfVectorizer(min_df=50), 0), # bag-of-words with decomposition for body (col 1) ( "body_bow", Pipeline( [ ("tfidf", TfidfVectorizer()), ("best", PCA(n_components=50, svd_solver="arpack")), ] ), 1, ), # Pipeline for pulling text stats from post's body ( "body_stats", Pipeline( [ ( "stats", text_stats_transformer, ), # returns a list of dicts ( "vect", DictVectorizer(), ), # list of dicts -> feature matrix ] ), 1, ), ], # weight above ColumnTransformer features transformer_weights={ "subject": 0.8, "body_bow": 0.5, "body_stats": 1.0, }, ), ), # Use an SVC classifier on the combined features ("svc", LinearSVC(dual=False)), ], verbose=True, ) ############################################################################## # Finally, we fit our pipeline on the training data and use it to predict # topics for ``X_test``. Performance metrics of our pipeline are then printed. pipeline.fit(X_train, y_train) y_pred = pipeline.predict(X_test) print("Classification report:\n\n{}".format(classification_report(y_test, y_pred)))
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/compose/plot_compare_reduction.py
examples/compose/plot_compare_reduction.py
""" ================================================================= Selecting dimensionality reduction with Pipeline and GridSearchCV ================================================================= This example constructs a pipeline that does dimensionality reduction followed by prediction with a support vector classifier. It demonstrates the use of ``GridSearchCV`` and ``Pipeline`` to optimize over different classes of estimators in a single CV run -- unsupervised ``PCA`` and ``NMF`` dimensionality reductions are compared to univariate feature selection during the grid search. Additionally, ``Pipeline`` can be instantiated with the ``memory`` argument to memoize the transformers within the pipeline, avoiding to fit again the same transformers over and over. Note that the use of ``memory`` to enable caching becomes interesting when the fitting of a transformer is costly. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Illustration of ``Pipeline`` and ``GridSearchCV`` ############################################################################### import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import load_digits from sklearn.decomposition import NMF, PCA from sklearn.feature_selection import SelectKBest, mutual_info_classif from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler from sklearn.svm import LinearSVC X, y = load_digits(return_X_y=True) pipe = Pipeline( [ ("scaling", MinMaxScaler()), # the reduce_dim stage is populated by the param_grid ("reduce_dim", "passthrough"), ("classify", LinearSVC(dual=False, max_iter=10000)), ] ) N_FEATURES_OPTIONS = [2, 4, 8] C_OPTIONS = [1, 10, 100, 1000] param_grid = [ { "reduce_dim": [PCA(iterated_power=7), NMF(max_iter=1_000)], "reduce_dim__n_components": N_FEATURES_OPTIONS, "classify__C": C_OPTIONS, }, { "reduce_dim": [SelectKBest(mutual_info_classif)], "reduce_dim__k": N_FEATURES_OPTIONS, "classify__C": C_OPTIONS, }, ] reducer_labels = ["PCA", "NMF", "KBest(mutual_info_classif)"] grid = GridSearchCV(pipe, n_jobs=1, param_grid=param_grid) grid.fit(X, y) # %% import pandas as pd mean_scores = np.array(grid.cv_results_["mean_test_score"]) # scores are in the order of param_grid iteration, which is alphabetical mean_scores = mean_scores.reshape(len(C_OPTIONS), -1, len(N_FEATURES_OPTIONS)) # select score for best C mean_scores = mean_scores.max(axis=0) # create a dataframe to ease plotting mean_scores = pd.DataFrame( mean_scores.T, index=N_FEATURES_OPTIONS, columns=reducer_labels ) ax = mean_scores.plot.bar() ax.set_title("Comparing feature reduction techniques") ax.set_xlabel("Reduced number of features") ax.set_ylabel("Digit classification accuracy") ax.set_ylim((0, 1)) ax.legend(loc="upper left") plt.show() # %% # Caching transformers within a ``Pipeline`` # ########################################## # # It is sometimes worthwhile storing the state of a specific transformer # since it could be used again. Using a pipeline in ``GridSearchCV`` triggers # such situations. Therefore, we use the argument ``memory`` to enable caching. # # .. warning:: # Note that this example is, however, only an illustration since for this # specific case fitting PCA is not necessarily slower than loading the # cache. Hence, use the ``memory`` constructor parameter when the fitting # of a transformer is costly. from shutil import rmtree from joblib import Memory # Create a temporary folder to store the transformers of the pipeline location = "cachedir" memory = Memory(location=location, verbose=10) cached_pipe = Pipeline( [("reduce_dim", PCA()), ("classify", LinearSVC(dual=False, max_iter=10000))], memory=memory, ) # This time, a cached pipeline will be used within the grid search # Delete the temporary cache before exiting memory.clear(warn=False) rmtree(location) # %% # The ``PCA`` fitting is only computed at the evaluation of the first # configuration of the ``C`` parameter of the ``LinearSVC`` classifier. The # other configurations of ``C`` will trigger the loading of the cached ``PCA`` # estimator data, leading to save processing time. Therefore, the use of # caching the pipeline using ``memory`` is highly beneficial when fitting # a transformer is costly.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/compose/plot_feature_union.py
examples/compose/plot_feature_union.py
""" ================================================= Concatenating multiple feature extraction methods ================================================= In many real-world examples, there are many ways to extract features from a dataset. Often it is beneficial to combine several methods to obtain good performance. This example shows how to use ``FeatureUnion`` to combine features obtained by PCA and univariate selection. Combining features using this transformer has the benefit that it allows cross validation and grid searches over the whole process. The combination used in this example is not particularly helpful on this dataset and is only used to illustrate the usage of FeatureUnion. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from sklearn.datasets import load_iris from sklearn.decomposition import PCA from sklearn.feature_selection import SelectKBest from sklearn.model_selection import GridSearchCV from sklearn.pipeline import FeatureUnion, Pipeline from sklearn.svm import SVC iris = load_iris() X, y = iris.data, iris.target # This dataset is way too high-dimensional. Better do PCA: pca = PCA(n_components=2) # Maybe some original features were good, too? selection = SelectKBest(k=1) # Build estimator from PCA and Univariate selection: combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)]) # Use combined features to transform dataset: X_features = combined_features.fit(X, y).transform(X) print("Combined space has", X_features.shape[1], "features") svm = SVC(kernel="linear") # Do grid search over k, n_components and C: pipeline = Pipeline([("features", combined_features), ("svm", svm)]) param_grid = dict( features__pca__n_components=[1, 2, 3], features__univ_select__k=[1, 2], svm__C=[0.1, 1, 10], ) grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10) grid_search.fit(X, y) print(grid_search.best_estimator_)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/compose/plot_transformed_target.py
examples/compose/plot_transformed_target.py
""" ====================================================== Effect of transforming the targets in regression model ====================================================== In this example, we give an overview of :class:`~sklearn.compose.TransformedTargetRegressor`. We use two examples to illustrate the benefit of transforming the targets before learning a linear regression model. The first example uses synthetic data while the second example is based on the Ames housing data set. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Synthetic example # ################# # # A synthetic random regression dataset is generated. The targets ``y`` are # modified by: # # 1. translating all targets such that all entries are # non-negative (by adding the absolute value of the lowest ``y``) and # 2. applying an exponential function to obtain non-linear # targets which cannot be fitted using a simple linear model. # # Therefore, a logarithmic (`np.log1p`) and an exponential function # (`np.expm1`) will be used to transform the targets before training a linear # regression model and using it for prediction. import numpy as np from sklearn.datasets import make_regression X, y = make_regression(n_samples=10_000, noise=100, random_state=0) y = np.expm1((y + abs(y.min())) / 200) y_trans = np.log1p(y) # %% # Below we plot the probability density functions of the target # before and after applying the logarithmic functions. import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split f, (ax0, ax1) = plt.subplots(1, 2) ax0.hist(y, bins=100, density=True) ax0.set_xlim([0, 2000]) ax0.set_ylabel("Probability") ax0.set_xlabel("Target") ax0.set_title("Target distribution") ax1.hist(y_trans, bins=100, density=True) ax1.set_ylabel("Probability") ax1.set_xlabel("Target") ax1.set_title("Transformed target distribution") f.suptitle("Synthetic data", y=1.05) plt.tight_layout() X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # %% # At first, a linear model will be applied on the original targets. Due to the # non-linearity, the model trained will not be precise during # prediction. Subsequently, a logarithmic function is used to linearize the # targets, allowing better prediction even with a similar linear model as # reported by the median absolute error (MedAE). from sklearn.metrics import median_absolute_error, r2_score def compute_score(y_true, y_pred): return { "R2": f"{r2_score(y_true, y_pred):.3f}", "MedAE": f"{median_absolute_error(y_true, y_pred):.3f}", } # %% from sklearn.compose import TransformedTargetRegressor from sklearn.linear_model import RidgeCV from sklearn.metrics import PredictionErrorDisplay f, (ax0, ax1) = plt.subplots(1, 2, sharey=True) ridge_cv = RidgeCV().fit(X_train, y_train) y_pred_ridge = ridge_cv.predict(X_test) ridge_cv_with_trans_target = TransformedTargetRegressor( regressor=RidgeCV(), func=np.log1p, inverse_func=np.expm1 ).fit(X_train, y_train) y_pred_ridge_with_trans_target = ridge_cv_with_trans_target.predict(X_test) PredictionErrorDisplay.from_predictions( y_test, y_pred_ridge, kind="actual_vs_predicted", ax=ax0, scatter_kwargs={"alpha": 0.5}, ) PredictionErrorDisplay.from_predictions( y_test, y_pred_ridge_with_trans_target, kind="actual_vs_predicted", ax=ax1, scatter_kwargs={"alpha": 0.5}, ) # Add the score in the legend of each axis for ax, y_pred in zip([ax0, ax1], [y_pred_ridge, y_pred_ridge_with_trans_target]): for name, score in compute_score(y_test, y_pred).items(): ax.plot([], [], " ", label=f"{name}={score}") ax.legend(loc="upper left") ax0.set_title("Ridge regression \n without target transformation") ax1.set_title("Ridge regression \n with target transformation") f.suptitle("Synthetic data", y=1.05) plt.tight_layout() # %% # Real-world data set # ################### # # In a similar manner, the Ames housing data set is used to show the impact # of transforming the targets before learning a model. In this example, the # target to be predicted is the selling price of each house. from sklearn.datasets import fetch_openml from sklearn.preprocessing import quantile_transform ames = fetch_openml(name="house_prices", as_frame=True) # Keep only numeric columns X = ames.data.select_dtypes(np.number) # Remove columns with NaN or Inf values X = X.drop(columns=["LotFrontage", "GarageYrBlt", "MasVnrArea"]) # Let the price be in k$ y = ames.target / 1000 y_trans = quantile_transform( y.to_frame(), n_quantiles=900, output_distribution="normal", copy=True ).squeeze() # %% # A :class:`~sklearn.preprocessing.QuantileTransformer` is used to normalize # the target distribution before applying a # :class:`~sklearn.linear_model.RidgeCV` model. f, (ax0, ax1) = plt.subplots(1, 2) ax0.hist(y, bins=100, density=True) ax0.set_ylabel("Probability") ax0.set_xlabel("Target") ax0.set_title("Target distribution") ax1.hist(y_trans, bins=100, density=True) ax1.set_ylabel("Probability") ax1.set_xlabel("Target") ax1.set_title("Transformed target distribution") f.suptitle("Ames housing data: selling price", y=1.05) plt.tight_layout() # %% X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1) # %% # The effect of the transformer is weaker than on the synthetic data. However, # the transformation results in an increase in :math:`R^2` and large decrease # of the MedAE. The residual plot (predicted target - true target vs predicted # target) without target transformation takes on a curved, 'reverse smile' # shape due to residual values that vary depending on the value of predicted # target. With target transformation, the shape is more linear indicating # better model fit. from sklearn.preprocessing import QuantileTransformer f, (ax0, ax1) = plt.subplots(2, 2, sharey="row", figsize=(6.5, 8)) ridge_cv = RidgeCV().fit(X_train, y_train) y_pred_ridge = ridge_cv.predict(X_test) ridge_cv_with_trans_target = TransformedTargetRegressor( regressor=RidgeCV(), transformer=QuantileTransformer(n_quantiles=900, output_distribution="normal"), ).fit(X_train, y_train) y_pred_ridge_with_trans_target = ridge_cv_with_trans_target.predict(X_test) # plot the actual vs predicted values PredictionErrorDisplay.from_predictions( y_test, y_pred_ridge, kind="actual_vs_predicted", ax=ax0[0], scatter_kwargs={"alpha": 0.5}, ) PredictionErrorDisplay.from_predictions( y_test, y_pred_ridge_with_trans_target, kind="actual_vs_predicted", ax=ax0[1], scatter_kwargs={"alpha": 0.5}, ) # Add the score in the legend of each axis for ax, y_pred in zip([ax0[0], ax0[1]], [y_pred_ridge, y_pred_ridge_with_trans_target]): for name, score in compute_score(y_test, y_pred).items(): ax.plot([], [], " ", label=f"{name}={score}") ax.legend(loc="upper left") ax0[0].set_title("Ridge regression \n without target transformation") ax0[1].set_title("Ridge regression \n with target transformation") # plot the residuals vs the predicted values PredictionErrorDisplay.from_predictions( y_test, y_pred_ridge, kind="residual_vs_predicted", ax=ax1[0], scatter_kwargs={"alpha": 0.5}, ) PredictionErrorDisplay.from_predictions( y_test, y_pred_ridge_with_trans_target, kind="residual_vs_predicted", ax=ax1[1], scatter_kwargs={"alpha": 0.5}, ) ax1[0].set_title("Ridge regression \n without target transformation") ax1[1].set_title("Ridge regression \n with target transformation") f.suptitle("Ames housing data: selling price", y=1.05) plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/compose/plot_digits_pipe.py
examples/compose/plot_digits_pipe.py
""" ========================================================= Pipelining: chaining a PCA and a logistic regression ========================================================= The PCA does an unsupervised dimensionality reduction, while the logistic regression does the prediction. We use a GridSearchCV to set the dimensionality of the PCA """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np import polars as pl from sklearn import datasets from sklearn.decomposition import PCA from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler # Define a pipeline to search for the best combination of PCA truncation # and classifier regularization. pca = PCA() # Define a Standard Scaler to normalize inputs scaler = StandardScaler() # set the tolerance to a large value to make the example faster logistic = LogisticRegression(max_iter=10000, tol=0.1) pipe = Pipeline(steps=[("scaler", scaler), ("pca", pca), ("logistic", logistic)]) X_digits, y_digits = datasets.load_digits(return_X_y=True) # Parameters of pipelines can be set using '__' separated parameter names: param_grid = { "pca__n_components": [5, 15, 30, 45, 60], "logistic__C": np.logspace(-4, 4, 4), } search = GridSearchCV(pipe, param_grid, n_jobs=2) search.fit(X_digits, y_digits) print("Best parameter (CV score=%0.3f):" % search.best_score_) print(search.best_params_) # Plot the PCA spectrum pca.fit(X_digits) fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, figsize=(6, 6)) ax0.plot( np.arange(1, pca.n_components_ + 1), pca.explained_variance_ratio_, "+", linewidth=2 ) ax0.set_ylabel("PCA explained variance ratio") ax0.axvline( search.best_estimator_.named_steps["pca"].n_components, linestyle=":", label="n_components chosen", ) ax0.legend(prop=dict(size=12)) # For each number of components, find the best classifier results components_col = "param_pca__n_components" is_max_test_score = pl.col("mean_test_score") == pl.col("mean_test_score").max() best_clfs = ( pl.LazyFrame(search.cv_results_) .filter(is_max_test_score.over(components_col)) .unique(components_col) .sort(components_col) .collect() ) ax1.errorbar( best_clfs[components_col], best_clfs["mean_test_score"], yerr=best_clfs["std_test_score"], ) ax1.set_ylabel("Classification accuracy (val)") ax1.set_xlabel("n_components") plt.xlim(-1, 70) plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/compose/plot_column_transformer_mixed_types.py
examples/compose/plot_column_transformer_mixed_types.py
""" =================================== Column Transformer with Mixed Types =================================== .. currentmodule:: sklearn This example illustrates how to apply different preprocessing and feature extraction pipelines to different subsets of features, using :class:`~compose.ColumnTransformer`. This is particularly handy for the case of datasets that contain heterogeneous data types, since we may want to scale the numeric features and one-hot encode the categorical ones. In this example, the numeric data is standard-scaled after mean-imputation. The categorical data is one-hot encoded via ``OneHotEncoder``, which creates a new category for missing values. We further reduce the dimensionality by selecting categories using a chi-squared test. In addition, we show two different ways to dispatch the columns to the particular pre-processor: by column names and by column data types. Finally, the preprocessing pipeline is integrated in a full prediction pipeline using :class:`~pipeline.Pipeline`, together with a simple classification model. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% import numpy as np from sklearn.compose import ColumnTransformer from sklearn.datasets import fetch_openml from sklearn.feature_selection import SelectPercentile, chi2 from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression from sklearn.model_selection import RandomizedSearchCV, train_test_split from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler np.random.seed(0) # %% # Load data from https://www.openml.org/d/40945 X, y = fetch_openml("titanic", version=1, as_frame=True, return_X_y=True) # Alternatively X and y can be obtained directly from the frame attribute: # X = titanic.frame.drop('survived', axis=1) # y = titanic.frame['survived'] # %% # Use ``ColumnTransformer`` by selecting column by names # # We will train our classifier with the following features: # # Numeric Features: # # * ``age``: float; # * ``fare``: float. # # Categorical Features: # # * ``embarked``: categories encoded as strings ``{'C', 'S', 'Q'}``; # * ``sex``: categories encoded as strings ``{'female', 'male'}``; # * ``pclass``: ordinal integers ``{1, 2, 3}``. # # We create the preprocessing pipelines for both numeric and categorical data. # Note that ``pclass`` could either be treated as a categorical or numeric # feature. numeric_features = ["age", "fare"] numeric_transformer = Pipeline( steps=[("imputer", SimpleImputer(strategy="median")), ("scaler", StandardScaler())] ) categorical_features = ["embarked", "sex", "pclass"] categorical_transformer = Pipeline( steps=[ ("encoder", OneHotEncoder(handle_unknown="ignore")), ("selector", SelectPercentile(chi2, percentile=50)), ] ) preprocessor = ColumnTransformer( transformers=[ ("num", numeric_transformer, numeric_features), ("cat", categorical_transformer, categorical_features), ] ) # %% # Append classifier to preprocessing pipeline. # Now we have a full prediction pipeline. clf = Pipeline( steps=[("preprocessor", preprocessor), ("classifier", LogisticRegression())] ) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) clf.fit(X_train, y_train) print("model score: %.3f" % clf.score(X_test, y_test)) # %% # HTML representation of ``Pipeline`` (display diagram) # # When the ``Pipeline`` is printed out in a jupyter notebook an HTML # representation of the estimator is displayed: clf # %% # Use ``ColumnTransformer`` by selecting column by data types # # When dealing with a cleaned dataset, the preprocessing can be automatic by # using the data types of the column to decide whether to treat a column as a # numerical or categorical feature. # :func:`sklearn.compose.make_column_selector` gives this possibility. # First, let's only select a subset of columns to simplify our # example. subset_feature = ["embarked", "sex", "pclass", "age", "fare"] X_train, X_test = X_train[subset_feature], X_test[subset_feature] # %% # Then, we introspect the information regarding each column data type. X_train.info() # %% # We can observe that the `embarked` and `sex` columns were tagged as # `category` columns when loading the data with ``fetch_openml``. Therefore, we # can use this information to dispatch the categorical columns to the # ``categorical_transformer`` and the remaining columns to the # ``numerical_transformer``. # %% # .. note:: In practice, you will have to handle yourself the column data type. # If you want some columns to be considered as `category`, you will have to # convert them into categorical columns. If you are using pandas, you can # refer to their documentation regarding `Categorical data # <https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`_. from sklearn.compose import make_column_selector as selector preprocessor = ColumnTransformer( transformers=[ ("num", numeric_transformer, selector(dtype_exclude="category")), ("cat", categorical_transformer, selector(dtype_include="category")), ] ) clf = Pipeline( steps=[("preprocessor", preprocessor), ("classifier", LogisticRegression())] ) clf.fit(X_train, y_train) print("model score: %.3f" % clf.score(X_test, y_test)) clf # %% # The resulting score is not exactly the same as the one from the previous # pipeline because the dtype-based selector treats the ``pclass`` column as # a numeric feature instead of a categorical feature as previously: selector(dtype_exclude="category")(X_train) # %% selector(dtype_include="category")(X_train) # %% # Using the prediction pipeline in a grid search # # Grid search can also be performed on the different preprocessing steps # defined in the ``ColumnTransformer`` object, together with the classifier's # hyperparameters as part of the ``Pipeline``. # We will search for both the imputer strategy of the numeric preprocessing # and the regularization parameter of the logistic regression using # :class:`~sklearn.model_selection.RandomizedSearchCV`. This # hyperparameter search randomly selects a fixed number of parameter # settings configured by `n_iter`. Alternatively, one can use # :class:`~sklearn.model_selection.GridSearchCV` but the cartesian product of # the parameter space will be evaluated. param_grid = { "preprocessor__num__imputer__strategy": ["mean", "median"], "preprocessor__cat__selector__percentile": [10, 30, 50, 70], "classifier__C": [0.1, 1.0, 10, 100], } search_cv = RandomizedSearchCV(clf, param_grid, n_iter=10, random_state=0) search_cv # %% # Calling 'fit' triggers the cross-validated search for the best # hyper-parameters combination: # search_cv.fit(X_train, y_train) print("Best params:") print(search_cv.best_params_) # %% # The internal cross-validation scores obtained by those parameters is: print(f"Internal CV score: {search_cv.best_score_:.3f}") # %% # We can also introspect the top grid search results as a pandas dataframe: import pandas as pd cv_results = pd.DataFrame(search_cv.cv_results_) cv_results = cv_results.sort_values("mean_test_score", ascending=False) cv_results[ [ "mean_test_score", "std_test_score", "param_preprocessor__num__imputer__strategy", "param_preprocessor__cat__selector__percentile", "param_classifier__C", ] ].head(5) # %% # The best hyper-parameters have be used to re-fit a final model on the full # training set. We can evaluate that final model on held out test data that was # not used for hyperparameter tuning. # print( "accuracy of the best model from randomized search: " f"{search_cv.score(X_test, y_test):.3f}" )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/inspection/plot_permutation_importance_multicollinear.py
examples/inspection/plot_permutation_importance_multicollinear.py
""" ================================================================= Permutation Importance with Multicollinear or Correlated Features ================================================================= In this example, we compute the :func:`~sklearn.inspection.permutation_importance` of the features to a trained :class:`~sklearn.ensemble.RandomForestClassifier` using the :ref:`breast_cancer_dataset`. The model can easily get about 97% accuracy on a test dataset. Because this dataset contains multicollinear features, the permutation importance shows that none of the features are important, in contradiction with the high test accuracy. We demo a possible approach to handling multicollinearity, which consists of hierarchical clustering on the features' Spearman rank-order correlations, picking a threshold, and keeping a single feature from each cluster. .. note:: See also :ref:`sphx_glr_auto_examples_inspection_plot_permutation_importance.py` """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Random Forest Feature Importance on Breast Cancer Data # ------------------------------------------------------ # # First, we define a function to ease the plotting: import matplotlib from sklearn.inspection import permutation_importance from sklearn.utils.fixes import parse_version def plot_permutation_importance(clf, X, y, ax): result = permutation_importance(clf, X, y, n_repeats=10, random_state=42, n_jobs=2) perm_sorted_idx = result.importances_mean.argsort() # `labels` argument in boxplot is deprecated in matplotlib 3.9 and has been # renamed to `tick_labels`. The following code handles this, but as a # scikit-learn user you probably can write simpler code by using `labels=...` # (matplotlib < 3.9) or `tick_labels=...` (matplotlib >= 3.9). tick_labels_parameter_name = ( "tick_labels" if parse_version(matplotlib.__version__) >= parse_version("3.9") else "labels" ) tick_labels_dict = {tick_labels_parameter_name: X.columns[perm_sorted_idx]} ax.boxplot(result.importances[perm_sorted_idx].T, vert=False, **tick_labels_dict) ax.axvline(x=0, color="k", linestyle="--") return ax # %% # We then train a :class:`~sklearn.ensemble.RandomForestClassifier` on the # :ref:`breast_cancer_dataset` and evaluate its accuracy on a test set: from sklearn.datasets import load_breast_cancer from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import train_test_split X, y = load_breast_cancer(return_X_y=True, as_frame=True) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) clf = RandomForestClassifier(n_estimators=100, random_state=42) clf.fit(X_train, y_train) print(f"Baseline accuracy on test data: {clf.score(X_test, y_test):.2}") # %% # Next, we plot the tree based feature importance and the permutation # importance. The permutation importance is calculated on the training set to # show how much the model relies on each feature during training. import matplotlib.pyplot as plt import numpy as np import pandas as pd mdi_importances = pd.Series(clf.feature_importances_, index=X_train.columns) tree_importance_sorted_idx = np.argsort(clf.feature_importances_) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8)) mdi_importances.sort_values().plot.barh(ax=ax1) ax1.set_xlabel("Gini importance") plot_permutation_importance(clf, X_train, y_train, ax2) ax2.set_xlabel("Decrease in accuracy score") fig.suptitle( "Impurity-based vs. permutation importances on multicollinear features (train set)" ) _ = fig.tight_layout() # %% # The plot on the left shows the Gini importance of the model. As the # scikit-learn implementation of # :class:`~sklearn.ensemble.RandomForestClassifier` uses a random subsets of # :math:`\sqrt{n_\text{features}}` features at each split, it is able to dilute # the dominance of any single correlated feature. As a result, the individual # feature importance may be distributed more evenly among the correlated # features. Since the features have large cardinality and the classifier is # non-overfitted, we can relatively trust those values. # # The permutation importance on the right plot shows that permuting a feature # drops the accuracy by at most `0.012`, which would suggest that none of the # features are important. This is in contradiction with the high test accuracy # computed as baseline: some feature must be important. # # Similarly, the change in accuracy score computed on the test set appears to be # driven by chance: fig, ax = plt.subplots(figsize=(7, 6)) plot_permutation_importance(clf, X_test, y_test, ax) ax.set_title("Permutation Importances on multicollinear features\n(test set)") ax.set_xlabel("Decrease in accuracy score") _ = ax.figure.tight_layout() # %% # Nevertheless, one can still compute a meaningful permutation importance in the # presence of correlated features, as demonstrated in the following section. # # Handling Multicollinear Features # -------------------------------- # When features are collinear, permuting one feature has little effect on the # models performance because it can get the same information from a correlated # feature. Note that this is not the case for all predictive models and depends # on their underlying implementation. # # One way to handle multicollinear features is by performing hierarchical # clustering on the Spearman rank-order correlations, picking a threshold, and # keeping a single feature from each cluster. First, we plot a heatmap of the # correlated features: from scipy.cluster import hierarchy from scipy.spatial.distance import squareform from scipy.stats import spearmanr fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8)) corr = spearmanr(X).correlation # Ensure the correlation matrix is symmetric corr = (corr + corr.T) / 2 np.fill_diagonal(corr, 1) # We convert the correlation matrix to a distance matrix before performing # hierarchical clustering using Ward's linkage. distance_matrix = 1 - np.abs(corr) dist_linkage = hierarchy.ward(squareform(distance_matrix)) dendro = hierarchy.dendrogram( dist_linkage, labels=X.columns.to_list(), ax=ax1, leaf_rotation=90 ) dendro_idx = np.arange(0, len(dendro["ivl"])) ax2.imshow(corr[dendro["leaves"], :][:, dendro["leaves"]]) ax2.set_xticks(dendro_idx) ax2.set_yticks(dendro_idx) ax2.set_xticklabels(dendro["ivl"], rotation="vertical") ax2.set_yticklabels(dendro["ivl"]) _ = fig.tight_layout() # %% # Next, we manually pick a threshold by visual inspection of the dendrogram to # group our features into clusters and choose a feature from each cluster to # keep, select those features from our dataset, and train a new random forest. # The test accuracy of the new random forest did not change much compared to the # random forest trained on the complete dataset. from collections import defaultdict cluster_ids = hierarchy.fcluster(dist_linkage, 1, criterion="distance") cluster_id_to_feature_ids = defaultdict(list) for idx, cluster_id in enumerate(cluster_ids): cluster_id_to_feature_ids[cluster_id].append(idx) selected_features = [v[0] for v in cluster_id_to_feature_ids.values()] selected_features_names = X.columns[selected_features] X_train_sel = X_train[selected_features_names] X_test_sel = X_test[selected_features_names] clf_sel = RandomForestClassifier(n_estimators=100, random_state=42) clf_sel.fit(X_train_sel, y_train) print( "Baseline accuracy on test data with features removed:" f" {clf_sel.score(X_test_sel, y_test):.2}" ) # %% # We can finally explore the permutation importance of the selected subset of # features: fig, ax = plt.subplots(figsize=(7, 6)) plot_permutation_importance(clf_sel, X_test_sel, y_test, ax) ax.set_title("Permutation Importances on selected subset of features\n(test set)") ax.set_xlabel("Decrease in accuracy score") ax.figure.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/inspection/plot_linear_model_coefficient_interpretation.py
examples/inspection/plot_linear_model_coefficient_interpretation.py
""" ====================================================================== Common pitfalls in the interpretation of coefficients of linear models ====================================================================== In linear models, the target value is modeled as a linear combination of the features (see the :ref:`linear_model` User Guide section for a description of a set of linear models available in scikit-learn). Coefficients in multiple linear models represent the relationship between the given feature, :math:`X_i` and the target, :math:`y`, assuming that all the other features remain constant (`conditional dependence <https://en.wikipedia.org/wiki/Conditional_dependence>`_). This is different from plotting :math:`X_i` versus :math:`y` and fitting a linear relationship: in that case all possible values of the other features are taken into account in the estimation (marginal dependence). This example will provide some hints in interpreting coefficient in linear models, pointing at problems that arise when either the linear model is not appropriate to describe the dataset, or when features are correlated. .. note:: Keep in mind that the features :math:`X` and the outcome :math:`y` are in general the result of a data generating process that is unknown to us. Machine learning models are trained to approximate the unobserved mathematical function that links :math:`X` to :math:`y` from sample data. As a result, any interpretation made about a model may not necessarily generalize to the true data generating process. This is especially true when the model is of bad quality or when the sample data is not representative of the population. We will use data from the `"Current Population Survey" <https://www.openml.org/d/534>`_ from 1985 to predict wage as a function of various features such as experience, age, or education. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% import matplotlib.pyplot as plt import numpy as np import pandas as pd import scipy as sp import seaborn as sns # %% # The dataset: wages # ------------------ # # We fetch the data from `OpenML <http://openml.org/>`_. # Note that setting the parameter `as_frame` to True will retrieve the data # as a pandas dataframe. from sklearn.datasets import fetch_openml survey = fetch_openml(data_id=534, as_frame=True) # %% # Then, we identify features `X` and target `y`: the column WAGE is our # target variable (i.e. the variable which we want to predict). X = survey.data[survey.feature_names] X.describe(include="all") # %% # Note that the dataset contains categorical and numerical variables. # We will need to take this into account when preprocessing the dataset # thereafter. X.head() # %% # Our target for prediction: the wage. # Wages are described as floating-point number in dollars per hour. # %% y = survey.target.values.ravel() survey.target.head() # %% # We split the sample into a train and a test dataset. # Only the train dataset will be used in the following exploratory analysis. # This is a way to emulate a real situation where predictions are performed on # an unknown target, and we don't want our analysis and decisions to be biased # by our knowledge of the test data. from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42) # %% # First, let's get some insights by looking at the variables' distributions and # at the pairwise relationships between them. Only numerical # variables will be used. In the following plot, each dot represents a sample. # # .. _marginal_dependencies: train_dataset = X_train.copy() train_dataset.insert(0, "WAGE", y_train) _ = sns.pairplot(train_dataset, kind="reg", diag_kind="kde") # %% # Looking closely at the WAGE distribution reveals that it has a # long tail. For this reason, we should take its logarithm # to turn it approximately into a normal distribution (linear models such # as ridge or lasso work best for a normal distribution of error). # # The WAGE is increasing when EDUCATION is increasing. # Note that the dependence between WAGE and EDUCATION # represented here is a marginal dependence, i.e. it describes the behavior # of a specific variable without keeping the others fixed. # # Also, the EXPERIENCE and AGE are strongly linearly correlated. # # .. _the-pipeline: # # The machine-learning pipeline # ----------------------------- # # To design our machine-learning pipeline, we first manually # check the type of data that we are dealing with: survey.data.info() # %% # As seen previously, the dataset contains columns with different data types # and we need to apply a specific preprocessing for each data types. # In particular categorical variables cannot be included in linear model if not # coded as integers first. In addition, to avoid categorical features to be # treated as ordered values, we need to one-hot-encode them. # Our pre-processor will: # # - one-hot encode (i.e., generate a column by category) the categorical # columns, only for non-binary categorical variables; # - as a first approach (we will see after how the normalisation of numerical # values will affect our discussion), keep numerical values as they are. from sklearn.compose import make_column_transformer from sklearn.preprocessing import OneHotEncoder categorical_columns = ["RACE", "OCCUPATION", "SECTOR", "MARR", "UNION", "SEX", "SOUTH"] numerical_columns = ["EDUCATION", "EXPERIENCE", "AGE"] preprocessor = make_column_transformer( (OneHotEncoder(drop="if_binary"), categorical_columns), remainder="passthrough", verbose_feature_names_out=False, # avoid to prepend the preprocessor names ) # %% # We use a ridge regressor # with a very small regularization to model the logarithm of the WAGE. from sklearn.compose import TransformedTargetRegressor from sklearn.linear_model import Ridge from sklearn.pipeline import make_pipeline model = make_pipeline( preprocessor, TransformedTargetRegressor( regressor=Ridge(alpha=1e-10), func=np.log10, inverse_func=sp.special.exp10 ), ) # %% # Processing the dataset # ---------------------- # # First, we fit the model. model.fit(X_train, y_train) # %% # Then we check the performance of the computed model by plotting its predictions # against the actual values on the test set, and by computing # the median absolute error. from sklearn.metrics import PredictionErrorDisplay, median_absolute_error mae_train = median_absolute_error(y_train, model.predict(X_train)) y_pred = model.predict(X_test) mae_test = median_absolute_error(y_test, y_pred) scores = { "MedAE on training set": f"{mae_train:.2f} $/hour", "MedAE on testing set": f"{mae_test:.2f} $/hour", } # %% _, ax = plt.subplots(figsize=(5, 5)) display = PredictionErrorDisplay.from_predictions( y_test, y_pred, kind="actual_vs_predicted", ax=ax, scatter_kwargs={"alpha": 0.5} ) ax.set_title("Ridge model, small regularization") for name, score in scores.items(): ax.plot([], [], " ", label=f"{name}: {score}") ax.legend(loc="upper left") plt.tight_layout() # %% # The model learnt is far from being a good model making accurate predictions: # this is obvious when looking at the plot above, where good predictions # should lie on the black dashed line. # # In the following section, we will interpret the coefficients of the model. # While we do so, we should keep in mind that any conclusion we draw is # about the model that we build, rather than about the true (real-world) # generative process of the data. # # Interpreting coefficients: scale matters # ---------------------------------------- # # First of all, we can take a look to the values of the coefficients of the # regressor we have fitted. feature_names = model[:-1].get_feature_names_out() coefs = pd.DataFrame( model[-1].regressor_.coef_, columns=["Coefficients"], index=feature_names, ) coefs # %% # The AGE coefficient is expressed in "dollars/hour per living years" while the # EDUCATION one is expressed in "dollars/hour per years of education". This # representation of the coefficients has the benefit of making clear the # practical predictions of the model: an increase of :math:`1` year in AGE # means a decrease of :math:`0.030867` dollars/hour, while an increase of # :math:`1` year in EDUCATION means an increase of :math:`0.054699` # dollars/hour. On the other hand, categorical variables (as UNION or SEX) are # adimensional numbers taking either the value 0 or 1. Their coefficients # are expressed in dollars/hour. Then, we cannot compare the magnitude of # different coefficients since the features have different natural scales, and # hence value ranges, because of their different unit of measure. This is more # visible if we plot the coefficients. coefs.plot.barh(figsize=(9, 7)) plt.title("Ridge model, small regularization") plt.axvline(x=0, color=".5") plt.xlabel("Raw coefficient values") plt.subplots_adjust(left=0.3) # %% # Indeed, from the plot above the most important factor in determining WAGE # appears to be the # variable UNION, even if our intuition might tell us that variables # like EXPERIENCE should have more impact. # # Looking at the coefficient plot to gauge feature importance can be # misleading as some of them vary on a small scale, while others, like AGE, # varies a lot more, several decades. # # This is visible if we compare the standard deviations of different # features. X_train_preprocessed = pd.DataFrame( model[:-1].transform(X_train), columns=feature_names ) X_train_preprocessed.std(axis=0).plot.barh(figsize=(9, 7)) plt.title("Feature ranges") plt.xlabel("Std. dev. of feature values") plt.subplots_adjust(left=0.3) # %% # Multiplying the coefficients by the standard deviation of the related # feature would reduce all the coefficients to the same unit of measure. # As we will see :ref:`after<scaling_num>` this is equivalent to normalize # numerical variables to their standard deviation, # as :math:`y = \sum{coef_i \times X_i} = # \sum{(coef_i \times std_i) \times (X_i / std_i)}`. # # In that way, we emphasize that the # greater the variance of a feature, the larger the weight of the corresponding # coefficient on the output, all else being equal. coefs = pd.DataFrame( model[-1].regressor_.coef_ * X_train_preprocessed.std(axis=0), columns=["Coefficient importance"], index=feature_names, ) coefs.plot(kind="barh", figsize=(9, 7)) plt.xlabel("Coefficient values corrected by the feature's std. dev.") plt.title("Ridge model, small regularization") plt.axvline(x=0, color=".5") plt.subplots_adjust(left=0.3) # %% # Now that the coefficients have been scaled, we can safely compare them. # # .. note:: # # Why does the plot above suggest that an increase in age leads to a # decrease in wage? Why is the :ref:`initial pairplot # <marginal_dependencies>` telling the opposite? # This difference is the difference between marginal and conditional dependence. # # The plot above tells us about dependencies between a specific feature and # the target when all other features remain constant, i.e., **conditional # dependencies**. An increase of the AGE will induce a decrease # of the WAGE when all other features remain constant. On the contrary, an # increase of the EXPERIENCE will induce an increase of the WAGE when all # other features remain constant. # Also, AGE, EXPERIENCE and EDUCATION are the three variables that most # influence the model. # # Interpreting coefficients: being cautious about causality # --------------------------------------------------------- # # Linear models are a great tool for measuring statistical association, but we # should be cautious when making statements about causality, after all # correlation doesn't always imply causation. This is particularly difficult in # the social sciences because the variables we observe only function as proxies # for the underlying causal process. # # In our particular case we can think of the EDUCATION of an individual as a # proxy for their professional aptitude, the real variable we're interested in # but can't observe. We'd certainly like to think that staying in school for # longer would increase technical competency, but it's also quite possible that # causality goes the other way too. That is, those who are technically # competent tend to stay in school for longer. # # An employer is unlikely to care which case it is (or if it's a mix of both), # as long as they remain convinced that a person with more EDUCATION is better # suited for the job, they will be happy to pay out a higher WAGE. # # This confounding of effects becomes problematic when thinking about some # form of intervention e.g. government subsidies of university degrees or # promotional material encouraging individuals to take up higher education. # The usefulness of these measures could end up being overstated, especially if # the degree of confounding is strong. Our model predicts a :math:`0.054699` # increase in hourly wage for each year of education. The actual causal effect # might be lower because of this confounding. # # Checking the variability of the coefficients # -------------------------------------------- # # We can check the coefficient variability through cross-validation: # it is a form of data perturbation (related to # `resampling <https://en.wikipedia.org/wiki/Resampling_(statistics)>`_). # # If coefficients vary significantly when changing the input dataset # their robustness is not guaranteed, and they should probably be interpreted # with caution. from sklearn.model_selection import RepeatedKFold, cross_validate cv = RepeatedKFold(n_splits=5, n_repeats=5, random_state=0) cv_model = cross_validate( model, X, y, cv=cv, return_estimator=True, n_jobs=2, ) coefs = pd.DataFrame( [ est[-1].regressor_.coef_ * est[:-1].transform(X.iloc[train_idx]).std(axis=0) for est, (train_idx, _) in zip(cv_model["estimator"], cv.split(X, y)) ], columns=feature_names, ) # %% plt.figure(figsize=(9, 7)) sns.stripplot(data=coefs, orient="h", palette="dark:k", alpha=0.5) sns.boxplot(data=coefs, orient="h", color="cyan", saturation=0.5, whis=10) plt.axvline(x=0, color=".5") plt.xlabel("Coefficient importance") plt.title("Coefficient importance and its variability") plt.suptitle("Ridge model, small regularization") plt.subplots_adjust(left=0.3) # %% # The problem of correlated variables # ----------------------------------- # # The AGE and EXPERIENCE coefficients are affected by strong variability which # might be due to the collinearity between the 2 features: as AGE and # EXPERIENCE vary together in the data, their effect is difficult to tease # apart. # # To verify this interpretation we plot the variability of the AGE and # EXPERIENCE coefficient. # # .. _covariation: plt.xlabel("Age coefficient") plt.ylabel("Experience coefficient") plt.grid(True) plt.xlim(-0.4, 0.5) plt.ylim(-0.4, 0.5) plt.scatter(coefs["AGE"], coefs["EXPERIENCE"]) _ = plt.title("Co-variations of coefficients for AGE and EXPERIENCE across folds") # %% # Two regions are populated: when the EXPERIENCE coefficient is # positive the AGE one is negative and vice-versa. # # To go further we remove one of the two features, AGE, and check what is the impact # on the model stability. column_to_drop = ["AGE"] cv_model = cross_validate( model, X.drop(columns=column_to_drop), y, cv=cv, return_estimator=True, n_jobs=2, ) coefs = pd.DataFrame( [ est[-1].regressor_.coef_ * est[:-1].transform(X.drop(columns=column_to_drop).iloc[train_idx]).std(axis=0) for est, (train_idx, _) in zip(cv_model["estimator"], cv.split(X, y)) ], columns=feature_names[:-1], ) # %% plt.figure(figsize=(9, 7)) sns.stripplot(data=coefs, orient="h", palette="dark:k", alpha=0.5) sns.boxplot(data=coefs, orient="h", color="cyan", saturation=0.5) plt.axvline(x=0, color=".5") plt.title("Coefficient importance and its variability") plt.xlabel("Coefficient importance") plt.suptitle("Ridge model, small regularization, AGE dropped") plt.subplots_adjust(left=0.3) # %% # The estimation of the EXPERIENCE coefficient now shows a much reduced # variability. EXPERIENCE remains important for all models trained during # cross-validation. # # .. _scaling_num: # # Preprocessing numerical variables # --------------------------------- # # As said above (see ":ref:`the-pipeline`"), we could also choose to scale # numerical values before training the model. # This can be useful when we apply a similar amount of regularization to all of them # in the ridge. # The preprocessor is redefined in order to subtract the mean and scale # variables to unit variance. from sklearn.preprocessing import StandardScaler preprocessor = make_column_transformer( (OneHotEncoder(drop="if_binary"), categorical_columns), (StandardScaler(), numerical_columns), ) # %% # The model will stay unchanged. model = make_pipeline( preprocessor, TransformedTargetRegressor( regressor=Ridge(alpha=1e-10), func=np.log10, inverse_func=sp.special.exp10 ), ) model.fit(X_train, y_train) # %% # Again, we check the performance of the computed # model using the median absolute error. mae_train = median_absolute_error(y_train, model.predict(X_train)) y_pred = model.predict(X_test) mae_test = median_absolute_error(y_test, y_pred) scores = { "MedAE on training set": f"{mae_train:.2f} $/hour", "MedAE on testing set": f"{mae_test:.2f} $/hour", } _, ax = plt.subplots(figsize=(5, 5)) display = PredictionErrorDisplay.from_predictions( y_test, y_pred, kind="actual_vs_predicted", ax=ax, scatter_kwargs={"alpha": 0.5} ) ax.set_title("Ridge model, small regularization") for name, score in scores.items(): ax.plot([], [], " ", label=f"{name}: {score}") ax.legend(loc="upper left") plt.tight_layout() # %% # For the coefficient analysis, scaling is not needed this time because it # was performed during the preprocessing step. coefs = pd.DataFrame( model[-1].regressor_.coef_, columns=["Coefficients importance"], index=feature_names, ) coefs.plot.barh(figsize=(9, 7)) plt.title("Ridge model, small regularization, normalized variables") plt.xlabel("Raw coefficient values") plt.axvline(x=0, color=".5") plt.subplots_adjust(left=0.3) # %% # We now inspect the coefficients across several cross-validation folds. cv_model = cross_validate( model, X, y, cv=cv, return_estimator=True, n_jobs=2, ) coefs = pd.DataFrame( [est[-1].regressor_.coef_ for est in cv_model["estimator"]], columns=feature_names ) # %% plt.figure(figsize=(9, 7)) sns.stripplot(data=coefs, orient="h", palette="dark:k", alpha=0.5) sns.boxplot(data=coefs, orient="h", color="cyan", saturation=0.5, whis=10) plt.axvline(x=0, color=".5") plt.title("Coefficient variability") plt.subplots_adjust(left=0.3) # %% # The result is quite similar to the non-normalized case. # # Linear models with regularization # --------------------------------- # # In machine-learning practice, ridge regression is more often used with # non-negligible regularization. # # Above, we limited this regularization to a very little amount. Regularization # improves the conditioning of the problem and reduces the variance of the # estimates. :class:`~sklearn.linear_model.RidgeCV` applies cross validation # in order to determine which value of the regularization parameter (`alpha`) # is best suited for prediction. from sklearn.linear_model import RidgeCV alphas = np.logspace(-10, 10, 21) # alpha values to be chosen from by cross-validation model = make_pipeline( preprocessor, TransformedTargetRegressor( regressor=RidgeCV(alphas=alphas), func=np.log10, inverse_func=sp.special.exp10, ), ) model.fit(X_train, y_train) # %% # First we check which value of :math:`\alpha` has been selected. model[-1].regressor_.alpha_ # %% # Then we check the quality of the predictions. mae_train = median_absolute_error(y_train, model.predict(X_train)) y_pred = model.predict(X_test) mae_test = median_absolute_error(y_test, y_pred) scores = { "MedAE on training set": f"{mae_train:.2f} $/hour", "MedAE on testing set": f"{mae_test:.2f} $/hour", } _, ax = plt.subplots(figsize=(5, 5)) display = PredictionErrorDisplay.from_predictions( y_test, y_pred, kind="actual_vs_predicted", ax=ax, scatter_kwargs={"alpha": 0.5} ) ax.set_title("Ridge model, optimum regularization") for name, score in scores.items(): ax.plot([], [], " ", label=f"{name}: {score}") ax.legend(loc="upper left") plt.tight_layout() # %% # The ability to reproduce the data of the regularized model is similar to # the one of the non-regularized model. coefs = pd.DataFrame( model[-1].regressor_.coef_, columns=["Coefficients importance"], index=feature_names, ) coefs.plot.barh(figsize=(9, 7)) plt.title("Ridge model, with regularization, normalized variables") plt.xlabel("Raw coefficient values") plt.axvline(x=0, color=".5") plt.subplots_adjust(left=0.3) # %% # The coefficients are significantly different. # AGE and EXPERIENCE coefficients are both positive but they now have less # influence on the prediction. # # The regularization reduces the influence of correlated # variables on the model because the weight is shared between the two # predictive variables, so neither alone would have strong weights. # # On the other hand, the weights obtained with regularization are more # stable (see the :ref:`ridge_regression` User Guide section). This # increased stability is visible from the plot, obtained from data # perturbations, in a cross-validation. This plot can be compared with # the :ref:`previous one<covariation>`. cv_model = cross_validate( model, X, y, cv=cv, return_estimator=True, n_jobs=2, ) coefs = pd.DataFrame( [est[-1].regressor_.coef_ for est in cv_model["estimator"]], columns=feature_names ) # %% plt.xlabel("Age coefficient") plt.ylabel("Experience coefficient") plt.grid(True) plt.xlim(-0.4, 0.5) plt.ylim(-0.4, 0.5) plt.scatter(coefs["AGE"], coefs["EXPERIENCE"]) _ = plt.title("Co-variations of coefficients for AGE and EXPERIENCE across folds") # %% # Linear models with sparse coefficients # -------------------------------------- # # Another possibility to take into account correlated variables in the dataset, # is to estimate sparse coefficients. In some way we already did it manually # when we dropped the AGE column in a previous ridge estimation. # # Lasso models (see the :ref:`lasso` User Guide section) estimates sparse # coefficients. :class:`~sklearn.linear_model.LassoCV` applies cross # validation in order to determine which value of the regularization parameter # (`alpha`) is best suited for the model estimation. from sklearn.linear_model import LassoCV alphas = np.logspace(-10, 10, 21) # alpha values to be chosen from by cross-validation model = make_pipeline( preprocessor, TransformedTargetRegressor( regressor=LassoCV(alphas=alphas, max_iter=100_000), func=np.log10, inverse_func=sp.special.exp10, ), ) _ = model.fit(X_train, y_train) # %% # First we verify which value of :math:`\alpha` has been selected. model[-1].regressor_.alpha_ # %% # Then we check the quality of the predictions. mae_train = median_absolute_error(y_train, model.predict(X_train)) y_pred = model.predict(X_test) mae_test = median_absolute_error(y_test, y_pred) scores = { "MedAE on training set": f"{mae_train:.2f} $/hour", "MedAE on testing set": f"{mae_test:.2f} $/hour", } _, ax = plt.subplots(figsize=(6, 6)) display = PredictionErrorDisplay.from_predictions( y_test, y_pred, kind="actual_vs_predicted", ax=ax, scatter_kwargs={"alpha": 0.5} ) ax.set_title("Lasso model, optimum regularization") for name, score in scores.items(): ax.plot([], [], " ", label=f"{name}: {score}") ax.legend(loc="upper left") plt.tight_layout() # %% # For our dataset, again the model is not very predictive. coefs = pd.DataFrame( model[-1].regressor_.coef_, columns=["Coefficients importance"], index=feature_names, ) coefs.plot(kind="barh", figsize=(9, 7)) plt.title("Lasso model, optimum regularization, normalized variables") plt.axvline(x=0, color=".5") plt.subplots_adjust(left=0.3) # %% # A Lasso model identifies the correlation between # AGE and EXPERIENCE and suppresses one of them for the sake of the prediction. # # It is important to keep in mind that the coefficients that have been # dropped may still be related to the outcome by themselves: the model # chose to suppress them because they bring little or no additional # information on top of the other features. Additionally, this selection # is unstable for correlated features, and should be interpreted with # caution. # # Indeed, we can check the variability of the coefficients across folds. cv_model = cross_validate( model, X, y, cv=cv, return_estimator=True, n_jobs=2, ) coefs = pd.DataFrame( [est[-1].regressor_.coef_ for est in cv_model["estimator"]], columns=feature_names ) # %% plt.figure(figsize=(9, 7)) sns.stripplot(data=coefs, orient="h", palette="dark:k", alpha=0.5) sns.boxplot(data=coefs, orient="h", color="cyan", saturation=0.5, whis=100) plt.axvline(x=0, color=".5") plt.title("Coefficient variability") plt.subplots_adjust(left=0.3) # %% # We observe that the AGE and EXPERIENCE coefficients are varying a lot # depending of the fold. # # Wrong causal interpretation # --------------------------- # # Policy makers might want to know the effect of education on wage to assess # whether or not a certain policy designed to entice people to pursue more # education would make economic sense. While Machine Learning models are great # for measuring statistical associations, they are generally unable to infer # causal effects. # # It might be tempting to look at the coefficient of education on wage from our # last model (or any model for that matter) and conclude that it captures the # true effect of a change in the standardized education variable on wages. # # Unfortunately there are likely unobserved confounding variables that either # inflate or deflate that coefficient. A confounding variable is a variable that # causes both EDUCATION and WAGE. One example of such variable is ability. # Presumably, more able people are more likely to pursue education while at the # same time being more likely to earn a higher hourly wage at any level of # education. In this case, ability induces a positive `Omitted Variable Bias # <https://en.wikipedia.org/wiki/Omitted-variable_bias>`_ (OVB) on the EDUCATION # coefficient, thereby exaggerating the effect of education on wages. # # See the :ref:`sphx_glr_auto_examples_inspection_plot_causal_interpretation.py` # for a simulated case of ability OVB. # # Lessons learned # --------------- # # * Coefficients must be scaled to the same unit of measure to retrieve # feature importance. Scaling them with the standard-deviation of the # feature is a useful proxy. # * Coefficients in multivariate linear models represent the dependency # between a given feature and the target, **conditional** on the other # features. # * Correlated features induce instabilities in the coefficients of linear # models and their effects cannot be well teased apart. # * Different linear models respond differently to feature correlation and # coefficients could significantly vary from one another. # * Inspecting coefficients across the folds of a cross-validation loop # gives an idea of their stability. # * Interpreting causality is difficult when there are confounding effects. If # the relationship between two variables is also affected by something # unobserved, we should be careful when making conclusions about causality.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/inspection/plot_partial_dependence.py
examples/inspection/plot_partial_dependence.py
""" =============================================================== Partial Dependence and Individual Conditional Expectation Plots =============================================================== Partial dependence plots show the dependence between the target function [2]_ and a set of features of interest, marginalizing over the values of all other features (the complement features). Due to the limits of human perception, the size of the set of features of interest must be small (usually, one or two) thus they are usually chosen among the most important features. Similarly, an individual conditional expectation (ICE) plot [3]_ shows the dependence between the target function and a feature of interest. However, unlike partial dependence plots, which show the average effect of the features of interest, ICE plots visualize the dependence of the prediction on a feature for each :term:`sample` separately, with one line per sample. Only one feature of interest is supported for ICE plots. This example shows how to obtain partial dependence and ICE plots from a :class:`~sklearn.neural_network.MLPRegressor` and a :class:`~sklearn.ensemble.HistGradientBoostingRegressor` trained on the bike sharing dataset. The example is inspired by [1]_. .. [1] `Molnar, Christoph. "Interpretable machine learning. A Guide for Making Black Box Models Explainable", 2019. <https://christophm.github.io/interpretable-ml-book/>`_ .. [2] For classification you can think of it as the regression score before the link function. .. [3] :arxiv:`Goldstein, A., Kapelner, A., Bleich, J., and Pitkin, E. (2015). "Peeking Inside the Black Box: Visualizing Statistical Learning With Plots of Individual Conditional Expectation". Journal of Computational and Graphical Statistics, 24(1): 44-65 <1309.6392>` """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Bike sharing dataset preprocessing # ---------------------------------- # # We will use the bike sharing dataset. The goal is to predict the number of bike # rentals using weather and season data as well as the datetime information. from sklearn.datasets import fetch_openml bikes = fetch_openml("Bike_Sharing_Demand", version=2, as_frame=True) # Make an explicit copy to avoid "SettingWithCopyWarning" from pandas X, y = bikes.data.copy(), bikes.target # We use only a subset of the data to speed up the example. X = X.iloc[::5, :] y = y[::5] # %% # The feature `"weather"` has a particularity: the category `"heavy_rain"` is a rare # category. X["weather"].value_counts() # %% # Because of this rare category, we collapse it into `"rain"`. X["weather"] = ( X["weather"] .astype(object) .replace(to_replace="heavy_rain", value="rain") .astype("category") ) # %% # We now have a closer look at the `"year"` feature: X["year"].value_counts() # %% # We see that we have data from two years. We use the first year to train the # model and the second year to test the model. mask_training = X["year"] == 0.0 X = X.drop(columns=["year"]) X_train, y_train = X[mask_training], y[mask_training] X_test, y_test = X[~mask_training], y[~mask_training] # %% # We can check the dataset information to see that we have heterogeneous data types. We # have to preprocess the different columns accordingly. X_train.info() # %% # From the previous information, we will consider the `category` columns as nominal # categorical features. In addition, we will consider the date and time information as # categorical features as well. # # We manually define the columns containing numerical and categorical # features. numerical_features = [ "temp", "feel_temp", "humidity", "windspeed", ] categorical_features = X_train.columns.drop(numerical_features) # %% # Before we go into the details regarding the preprocessing of the different machine # learning pipelines, we will try to get some additional intuition regarding the dataset # that will be helpful to understand the model's statistical performance and results of # the partial dependence analysis. # # We plot the average number of bike rentals by grouping the data by season and # by year. from itertools import product import matplotlib.pyplot as plt import numpy as np days = ("Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat") hours = tuple(range(24)) xticklabels = [f"{day}\n{hour}:00" for day, hour in product(days, hours)] xtick_start, xtick_period = 6, 12 fig, axs = plt.subplots(nrows=2, figsize=(8, 6), sharey=True, sharex=True) average_bike_rentals = bikes.frame.groupby( ["year", "season", "weekday", "hour"], observed=True ).mean(numeric_only=True)["count"] for ax, (idx, df) in zip(axs, average_bike_rentals.groupby("year")): df.groupby("season", observed=True).plot(ax=ax, legend=True) # decorate the plot ax.set_xticks( np.linspace( start=xtick_start, stop=len(xticklabels), num=len(xticklabels) // xtick_period, ) ) ax.set_xticklabels(xticklabels[xtick_start::xtick_period]) ax.set_xlabel("") ax.set_ylabel("Average number of bike rentals") ax.set_title( f"Bike rental for {'2010 (train set)' if idx == 0.0 else '2011 (test set)'}" ) ax.set_ylim(0, 1_000) ax.set_xlim(0, len(xticklabels)) ax.legend(loc=2) # %% # The first striking difference between the train and test set is that the number of # bike rentals is higher in the test set. For this reason, it will not be surprising to # get a machine learning model that underestimates the number of bike rentals. We # also observe that the number of bike rentals is lower during the spring season. In # addition, we see that during working days, there is a specific pattern around 6-7 # am and 5-6 pm with some peaks of bike rentals. We can keep in mind these different # insights and use them to understand the partial dependence plot. # # Preprocessor for machine-learning models # ---------------------------------------- # # Since we later use two different models, a # :class:`~sklearn.neural_network.MLPRegressor` and a # :class:`~sklearn.ensemble.HistGradientBoostingRegressor`, we create two different # preprocessors, specific for each model. # # Preprocessor for the neural network model # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # We will use a :class:`~sklearn.preprocessing.QuantileTransformer` to scale the # numerical features and encode the categorical features with a # :class:`~sklearn.preprocessing.OneHotEncoder`. from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder, QuantileTransformer mlp_preprocessor = ColumnTransformer( transformers=[ ("num", QuantileTransformer(n_quantiles=100), numerical_features), ("cat", OneHotEncoder(handle_unknown="ignore"), categorical_features), ] ) mlp_preprocessor # %% # Preprocessor for the gradient boosting model # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # For the gradient boosting model, we leave the numerical features as-is and only # encode the categorical features using a # :class:`~sklearn.preprocessing.OrdinalEncoder`. from sklearn.preprocessing import OrdinalEncoder hgbdt_preprocessor = ColumnTransformer( transformers=[ ("cat", OrdinalEncoder(), categorical_features), ("num", "passthrough", numerical_features), ], sparse_threshold=1, verbose_feature_names_out=False, ).set_output(transform="pandas") hgbdt_preprocessor # %% # 1-way partial dependence with different models # ---------------------------------------------- # # In this section, we will compute 1-way partial dependence with two different # machine-learning models: (i) a multi-layer perceptron and (ii) a # gradient-boosting model. With these two models, we illustrate how to compute and # interpret both partial dependence plot (PDP) for both numerical and categorical # features and individual conditional expectation (ICE). # # Multi-layer perceptron # ~~~~~~~~~~~~~~~~~~~~~~ # # Let's fit a :class:`~sklearn.neural_network.MLPRegressor` and compute # single-variable partial dependence plots. from time import time from sklearn.neural_network import MLPRegressor from sklearn.pipeline import make_pipeline print("Training MLPRegressor...") tic = time() mlp_model = make_pipeline( mlp_preprocessor, MLPRegressor( hidden_layer_sizes=(30, 15), learning_rate_init=0.01, early_stopping=True, random_state=0, ), ) mlp_model.fit(X_train, y_train) print(f"done in {time() - tic:.3f}s") print(f"Test R2 score: {mlp_model.score(X_test, y_test):.2f}") # %% # We configured a pipeline using the preprocessor that we created specifically for the # neural network and tuned the neural network size and learning rate to get a reasonable # compromise between training time and predictive performance on a test set. # # Importantly, this tabular dataset has very different dynamic ranges for its # features. Neural networks tend to be very sensitive to features with varying # scales and forgetting to preprocess the numeric feature would lead to a very # poor model. # # It would be possible to get even higher predictive performance with a larger # neural network but the training would also be significantly more expensive. # # Note that it is important to check that the model is accurate enough on a # test set before plotting the partial dependence since there would be little # use in explaining the impact of a given feature on the prediction function of # a model with poor predictive performance. In this regard, our MLP model works # reasonably well. # # We will plot the averaged partial dependence. import matplotlib.pyplot as plt from sklearn.inspection import PartialDependenceDisplay common_params = { "subsample": 50, "n_jobs": 2, "grid_resolution": 20, "random_state": 0, } print("Computing partial dependence plots...") features_info = { # features of interest "features": ["temp", "humidity", "windspeed", "season", "weather", "hour"], # type of partial dependence plot "kind": "average", # information regarding categorical features "categorical_features": categorical_features, } tic = time() _, ax = plt.subplots(ncols=3, nrows=2, figsize=(9, 8), constrained_layout=True) display = PartialDependenceDisplay.from_estimator( mlp_model, X_train, **features_info, ax=ax, **common_params, ) print(f"done in {time() - tic:.3f}s") _ = display.figure_.suptitle( ( "Partial dependence of the number of bike rentals\n" "for the bike rental dataset with an MLPRegressor" ), fontsize=16, ) # %% # Gradient boosting # ~~~~~~~~~~~~~~~~~ # # Let's now fit a :class:`~sklearn.ensemble.HistGradientBoostingRegressor` and # compute the partial dependence on the same features. We also use the # specific preprocessor we created for this model. from sklearn.ensemble import HistGradientBoostingRegressor print("Training HistGradientBoostingRegressor...") tic = time() hgbdt_model = make_pipeline( hgbdt_preprocessor, HistGradientBoostingRegressor( categorical_features=categorical_features, random_state=0, max_iter=50, ), ) hgbdt_model.fit(X_train, y_train) print(f"done in {time() - tic:.3f}s") print(f"Test R2 score: {hgbdt_model.score(X_test, y_test):.2f}") # %% # Here, we used the default hyperparameters for the gradient boosting model # without any preprocessing as tree-based models are naturally robust to # monotonic transformations of numerical features. # # Note that on this tabular dataset, Gradient Boosting Machines are both # significantly faster to train and more accurate than neural networks. It is # also significantly cheaper to tune their hyperparameters (the defaults tend # to work well while this is not often the case for neural networks). # # We will plot the partial dependence for some of the numerical and categorical # features. print("Computing partial dependence plots...") tic = time() _, ax = plt.subplots(ncols=3, nrows=2, figsize=(9, 8), constrained_layout=True) display = PartialDependenceDisplay.from_estimator( hgbdt_model, X_train, **features_info, ax=ax, **common_params, ) print(f"done in {time() - tic:.3f}s") _ = display.figure_.suptitle( ( "Partial dependence of the number of bike rentals\n" "for the bike rental dataset with a gradient boosting" ), fontsize=16, ) # %% # Analysis of the plots # ~~~~~~~~~~~~~~~~~~~~~ # # We will first look at the PDPs for the numerical features. For both models, the # general trend of the PDP of the temperature is that the number of bike rentals is # increasing with temperature. We can make a similar analysis but with the opposite # trend for the humidity features. The number of bike rentals is decreasing when the # humidity increases. Finally, we see the same trend for the wind speed feature. The # number of bike rentals is decreasing when the wind speed is increasing for both # models. We also observe that :class:`~sklearn.neural_network.MLPRegressor` has much # smoother predictions than :class:`~sklearn.ensemble.HistGradientBoostingRegressor`. # # Now, we will look at the partial dependence plots for the categorical features. # # We observe that the spring season is the lowest bar for the season feature. With the # weather feature, the rain category is the lowest bar. Regarding the hour feature, # we see two peaks around the 7 am and 6 pm. These findings are in line with the # the observations we made earlier on the dataset. # # However, it is worth noting that we are creating potential meaningless # synthetic samples if features are correlated. # # .. _ice-vs-pdp: # # ICE vs. PDP # ~~~~~~~~~~~ # # PDP is an average of the marginal effects of the features. We are averaging the # response of all samples of the provided set. Thus, some effects could be hidden. In # this regard, it is possible to plot each individual response. This representation is # called the Individual Effect Plot (ICE). In the plot below, we plot 50 randomly # selected ICEs for the temperature and humidity features. print("Computing partial dependence plots and individual conditional expectation...") tic = time() _, ax = plt.subplots(ncols=2, figsize=(6, 4), sharey=True, constrained_layout=True) features_info = { "features": ["temp", "humidity"], "kind": "both", "centered": True, } display = PartialDependenceDisplay.from_estimator( hgbdt_model, X_train, **features_info, ax=ax, **common_params, ) print(f"done in {time() - tic:.3f}s") _ = display.figure_.suptitle("ICE and PDP representations", fontsize=16) # %% # We see that the ICE for the temperature feature gives us some additional information: # Some of the ICE lines are flat while some others show a decrease of the dependence # for temperature above 35 degrees Celsius. We observe a similar pattern for the # humidity feature: some of the ICEs lines show a sharp decrease when the humidity is # above 80%. # # Not all ICE lines are parallel, this indicates that the model finds # interactions between features. We can repeat the experiment by constraining the # gradient boosting model to not use any interactions between features using the # parameter `interaction_cst`: from sklearn.base import clone interaction_cst = [[i] for i in range(X_train.shape[1])] hgbdt_model_without_interactions = ( clone(hgbdt_model) .set_params(histgradientboostingregressor__interaction_cst=interaction_cst) .fit(X_train, y_train) ) print(f"Test R2 score: {hgbdt_model_without_interactions.score(X_test, y_test):.2f}") # %% _, ax = plt.subplots(ncols=2, figsize=(6, 4), sharey=True, constrained_layout=True) features_info["centered"] = False display = PartialDependenceDisplay.from_estimator( hgbdt_model_without_interactions, X_train, **features_info, ax=ax, **common_params, ) _ = display.figure_.suptitle("ICE and PDP representations", fontsize=16) # %% # 2D interaction plots # -------------------- # # PDPs with two features of interest enable us to visualize interactions among them. # However, ICEs cannot be plotted in an easy manner and thus interpreted. We will show # the representation of available in # :meth:`~sklearn.inspection.PartialDependenceDisplay.from_estimator` that is a 2D # heatmap. print("Computing partial dependence plots...") features_info = { "features": ["temp", "humidity", ("temp", "humidity")], "kind": "average", } _, ax = plt.subplots(ncols=3, figsize=(10, 4), constrained_layout=True) tic = time() display = PartialDependenceDisplay.from_estimator( hgbdt_model, X_train, **features_info, ax=ax, **common_params, ) print(f"done in {time() - tic:.3f}s") _ = display.figure_.suptitle( "1-way vs 2-way of numerical PDP using gradient boosting", fontsize=16 ) # %% # The two-way partial dependence plot shows the dependence of the number of bike rentals # on joint values of temperature and humidity. # We clearly see an interaction between the two features. For a temperature higher than # 20 degrees Celsius, the humidity has an impact on the number of bike rentals # that seems independent on the temperature. # # On the other hand, for temperatures lower than 20 degrees Celsius, both the # temperature and humidity continuously impact the number of bike rentals. # # Furthermore, the slope of the of the impact ridge of the 20 degrees Celsius # threshold is very dependent on the humidity level: the ridge is steep under # dry conditions but much smoother under wetter conditions above 70% of humidity. # # We now contrast those results with the same plots computed for the model # constrained to learn a prediction function that does not depend on such # non-linear feature interactions. print("Computing partial dependence plots...") features_info = { "features": ["temp", "humidity", ("temp", "humidity")], "kind": "average", } _, ax = plt.subplots(ncols=3, figsize=(10, 4), constrained_layout=True) tic = time() display = PartialDependenceDisplay.from_estimator( hgbdt_model_without_interactions, X_train, **features_info, ax=ax, **common_params, ) print(f"done in {time() - tic:.3f}s") _ = display.figure_.suptitle( "1-way vs 2-way of numerical PDP using gradient boosting", fontsize=16 ) # %% # The 1D partial dependence plots for the model constrained to not model feature # interactions show local spikes for each features individually, in particular for # for the "humidity" feature. Those spikes might be reflecting a degraded behavior # of the model that attempts to somehow compensate for the forbidden interactions # by overfitting particular training points. Note that the predictive performance # of this model as measured on the test set is significantly worse than that of # the original, unconstrained model. # # Also note that the number of local spikes visible on those plots is depends on # the grid resolution parameter of the PD plot itself. # # Those local spikes result in a noisily gridded 2D PD plot. It is quite # challenging to tell whether or not there are no interaction between those # features because of the high frequency oscillations in the humidity feature. # However it can clearly be seen that the simple interaction effect observed when # the temperature crosses the 20 degrees boundary is no longer visible for this # model. # # The partial dependence between categorical features will provide a discrete # representation that can be shown as a heatmap. For instance the interaction between # the season, the weather, and the target would be as follow: print("Computing partial dependence plots...") features_info = { "features": ["season", "weather", ("season", "weather")], "kind": "average", "categorical_features": categorical_features, } _, ax = plt.subplots(ncols=3, figsize=(14, 6), constrained_layout=True) tic = time() display = PartialDependenceDisplay.from_estimator( hgbdt_model, X_train, **features_info, ax=ax, **common_params, ) print(f"done in {time() - tic:.3f}s") _ = display.figure_.suptitle( "1-way vs 2-way PDP of categorical features using gradient boosting", fontsize=16 ) # %% # 3D representation # ~~~~~~~~~~~~~~~~~ # # Let's make the same partial dependence plot for the 2 features interaction, # this time in 3 dimensions. # unused but required import for doing 3d projections with matplotlib < 3.2 import mpl_toolkits.mplot3d # noqa: F401 import numpy as np from sklearn.inspection import partial_dependence fig = plt.figure(figsize=(5.5, 5)) features = ("temp", "humidity") pdp = partial_dependence( hgbdt_model, X_train, features=features, kind="average", grid_resolution=10 ) XX, YY = np.meshgrid(pdp["grid_values"][0], pdp["grid_values"][1]) Z = pdp.average[0].T ax = fig.add_subplot(projection="3d") fig.add_axes(ax) surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu, edgecolor="k") ax.set_xlabel(features[0]) ax.set_ylabel(features[1]) fig.suptitle( "PD of number of bike rentals on\nthe temperature and humidity GBDT model", fontsize=16, ) # pretty init view ax.view_init(elev=22, azim=122) clb = plt.colorbar(surf, pad=0.08, shrink=0.6, aspect=10) clb.ax.set_title("Partial\ndependence") plt.show() # %% # .. _plt_partial_dependence_custom_values: # # Custom Inspection Points # ~~~~~~~~~~~~~~~~~~~~~~~~ # # None of the examples so far specify _which_ points are evaluated to create the # partial dependence plots. By default we use percentiles defined by the input dataset. # In some cases it can be helpful to specify the exact points where you would like the # model evaluated. For instance, if a user wants to test the model behavior on # out-of-distribution data or compare two models that were fit on slightly different # data. The `custom_values` parameter allows the user to pass in the values that they # want the model to be evaluated on. This overrides the `grid_resolution` and # `percentiles` parameters. Let's return to our gradient boosting example above # but with custom values print("Computing partial dependence plots with custom evaluation values...") tic = time() _, ax = plt.subplots(ncols=2, figsize=(6, 4), sharey=True, constrained_layout=True) features_info = { "features": ["temp", "humidity"], "kind": "both", } display = PartialDependenceDisplay.from_estimator( hgbdt_model, X_train, **features_info, ax=ax, **common_params, # we set custom values for temp feature - # all other features are evaluated based on the data custom_values={"temp": np.linspace(0, 40, 10)}, ) print(f"done in {time() - tic:.3f}s") _ = display.figure_.suptitle( ( "Partial dependence of the number of bike rentals\n" "for the bike rental dataset with a gradient boosting" ), fontsize=16, )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/inspection/plot_permutation_importance.py
examples/inspection/plot_permutation_importance.py
""" ================================================================ Permutation Importance vs Random Forest Feature Importance (MDI) ================================================================ In this example, we will compare the impurity-based feature importance of :class:`~sklearn.ensemble.RandomForestClassifier` with the permutation importance on the titanic dataset using :func:`~sklearn.inspection.permutation_importance`. We will show that the impurity-based feature importance can inflate the importance of numerical features. Furthermore, the impurity-based feature importance of random forests suffers from being computed on statistics derived from the training dataset: the importances can be high even for features that are not predictive of the target variable, as long as the model has the capacity to use them to overfit. This example shows how to use Permutation Importances as an alternative that can mitigate those limitations. .. rubric:: References * :doi:`L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001. <10.1023/A:1010933404324>` """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Data Loading and Feature Engineering # ------------------------------------ # Let's use pandas to load a copy of the titanic dataset. The following shows # how to apply separate preprocessing on numerical and categorical features. # # We further include two random variables that are not correlated in any way # with the target variable (``survived``): # # - ``random_num`` is a high cardinality numerical variable (as many unique # values as records). # - ``random_cat`` is a low cardinality categorical variable (3 possible # values). import numpy as np from sklearn.datasets import fetch_openml from sklearn.model_selection import train_test_split X, y = fetch_openml("titanic", version=1, as_frame=True, return_X_y=True) rng = np.random.RandomState(seed=42) X["random_cat"] = rng.randint(3, size=X.shape[0]) X["random_num"] = rng.randn(X.shape[0]) categorical_columns = ["pclass", "sex", "embarked", "random_cat"] numerical_columns = ["age", "sibsp", "parch", "fare", "random_num"] X = X[categorical_columns + numerical_columns] X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42) # %% # We define a predictive model based on a random forest. Therefore, we will make # the following preprocessing steps: # # - use :class:`~sklearn.preprocessing.OrdinalEncoder` to encode the # categorical features; # - use :class:`~sklearn.impute.SimpleImputer` to fill missing values for # numerical features using a mean strategy. from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import OrdinalEncoder categorical_encoder = OrdinalEncoder( handle_unknown="use_encoded_value", unknown_value=-1, encoded_missing_value=-1 ) numerical_pipe = SimpleImputer(strategy="mean") preprocessing = ColumnTransformer( [ ("cat", categorical_encoder, categorical_columns), ("num", numerical_pipe, numerical_columns), ], verbose_feature_names_out=False, ) rf = Pipeline( [ ("preprocess", preprocessing), ("classifier", RandomForestClassifier(random_state=42)), ] ) rf.fit(X_train, y_train) # %% # Accuracy of the Model # --------------------- # Before inspecting the feature importances, it is important to check that # the model predictive performance is high enough. Indeed, there would be little # interest in inspecting the important features of a non-predictive model. print(f"RF train accuracy: {rf.score(X_train, y_train):.3f}") print(f"RF test accuracy: {rf.score(X_test, y_test):.3f}") # %% # Here, one can observe that the train accuracy is very high (the forest model # has enough capacity to completely memorize the training set) but it can still # generalize well enough to the test set thanks to the built-in bagging of # random forests. # # It might be possible to trade some accuracy on the training set for a # slightly better accuracy on the test set by limiting the capacity of the # trees (for instance by setting ``min_samples_leaf=5`` or # ``min_samples_leaf=10``) so as to limit overfitting while not introducing too # much underfitting. # # However, let us keep our high capacity random forest model for now so that we can # illustrate some pitfalls about feature importance on variables with many # unique values. # %% # Tree's Feature Importance from Mean Decrease in Impurity (MDI) # -------------------------------------------------------------- # The impurity-based feature importance ranks the numerical features to be the # most important features. As a result, the non-predictive ``random_num`` # variable is ranked as one of the most important features! # # This problem stems from two limitations of impurity-based feature # importances: # # - impurity-based importances are biased towards high cardinality features; # - impurity-based importances are computed on training set statistics and # therefore do not reflect the ability of feature to be useful to make # predictions that generalize to the test set (when the model has enough # capacity). # # The bias towards high cardinality features explains why the `random_num` has # a really large importance in comparison with `random_cat` while we would # expect that both random features have a null importance. # # The fact that we use training set statistics explains why both the # `random_num` and `random_cat` features have a non-null importance. import pandas as pd feature_names = rf[:-1].get_feature_names_out() mdi_importances = pd.Series( rf[-1].feature_importances_, index=feature_names ).sort_values(ascending=True) # %% ax = mdi_importances.plot.barh() ax.set_title("Random Forest Feature Importances (MDI)") ax.figure.tight_layout() # %% # As an alternative, the permutation importances of ``rf`` are computed on a # held out test set. This shows that the low cardinality categorical feature, # `sex` and `pclass` are the most important features. Indeed, permuting the # values of these features will lead to the most decrease in accuracy score of the # model on the test set. # # Also, note that both random features have very low importances (close to 0) as # expected. from sklearn.inspection import permutation_importance result = permutation_importance( rf, X_test, y_test, n_repeats=10, random_state=42, n_jobs=2 ) sorted_importances_idx = result.importances_mean.argsort() importances = pd.DataFrame( result.importances[sorted_importances_idx].T, columns=X.columns[sorted_importances_idx], ) ax = importances.plot.box(vert=False, whis=10) ax.set_title("Permutation Importances (test set)") ax.axvline(x=0, color="k", linestyle="--") ax.set_xlabel("Decrease in accuracy score") ax.figure.tight_layout() # %% # It is also possible to compute the permutation importances on the training # set. This reveals that `random_num` and `random_cat` get a significantly # higher importance ranking than when computed on the test set. The difference # between those two plots is a confirmation that the RF model has enough # capacity to use that random numerical and categorical features to overfit. result = permutation_importance( rf, X_train, y_train, n_repeats=10, random_state=42, n_jobs=2 ) sorted_importances_idx = result.importances_mean.argsort() importances = pd.DataFrame( result.importances[sorted_importances_idx].T, columns=X.columns[sorted_importances_idx], ) ax = importances.plot.box(vert=False, whis=10) ax.set_title("Permutation Importances (train set)") ax.axvline(x=0, color="k", linestyle="--") ax.set_xlabel("Decrease in accuracy score") ax.figure.tight_layout() # %% # We can further retry the experiment by limiting the capacity of the trees # to overfit by setting `min_samples_leaf` at 20 data points. rf.set_params(classifier__min_samples_leaf=20).fit(X_train, y_train) # %% # Observing the accuracy score on the training and testing set, we observe that # the two metrics are very similar now. Therefore, our model is not overfitting # anymore. We can then check the permutation importances with this new model. print(f"RF train accuracy: {rf.score(X_train, y_train):.3f}") print(f"RF test accuracy: {rf.score(X_test, y_test):.3f}") # %% train_result = permutation_importance( rf, X_train, y_train, n_repeats=10, random_state=42, n_jobs=2 ) test_results = permutation_importance( rf, X_test, y_test, n_repeats=10, random_state=42, n_jobs=2 ) sorted_importances_idx = train_result.importances_mean.argsort() # %% train_importances = pd.DataFrame( train_result.importances[sorted_importances_idx].T, columns=X.columns[sorted_importances_idx], ) test_importances = pd.DataFrame( test_results.importances[sorted_importances_idx].T, columns=X.columns[sorted_importances_idx], ) # %% for name, importances in zip(["train", "test"], [train_importances, test_importances]): ax = importances.plot.box(vert=False, whis=10) ax.set_title(f"Permutation Importances ({name} set)") ax.set_xlabel("Decrease in accuracy score") ax.axvline(x=0, color="k", linestyle="--") ax.figure.tight_layout() # %% # Now, we can observe that on both sets, the `random_num` and `random_cat` # features have a lower importance compared to the overfitting random forest. # However, the conclusions regarding the importance of the other features are # still valid.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/inspection/plot_causal_interpretation.py
examples/inspection/plot_causal_interpretation.py
""" =================================================== Failure of Machine Learning to infer causal effects =================================================== Machine Learning models are great for measuring statistical associations. Unfortunately, unless we're willing to make strong assumptions about the data, those models are unable to infer causal effects. To illustrate this, we will simulate a situation in which we try to answer one of the most important questions in economics of education: **what is the causal effect of earning a college degree on hourly wages?** Although the answer to this question is crucial to policy makers, `Omitted-Variable Biases <https://en.wikipedia.org/wiki/Omitted-variable_bias>`_ (OVB) prevent us from identifying that causal effect. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # The dataset: simulated hourly wages # ----------------------------------- # # The data generating process is laid out in the code below. Work experience in # years and a measure of ability are drawn from Normal distributions; the # hourly wage of one of the parents is drawn from Beta distribution. We then # create an indicator of college degree which is positively impacted by ability # and parental hourly wage. Finally, we model hourly wages as a linear function # of all the previous variables and a random component. Note that all variables # have a positive effect on hourly wages. import numpy as np import pandas as pd n_samples = 10_000 rng = np.random.RandomState(32) experiences = rng.normal(20, 10, size=n_samples).astype(int) experiences[experiences < 0] = 0 abilities = rng.normal(0, 0.15, size=n_samples) parent_hourly_wages = 50 * rng.beta(2, 8, size=n_samples) parent_hourly_wages[parent_hourly_wages < 0] = 0 college_degrees = ( 9 * abilities + 0.02 * parent_hourly_wages + rng.randn(n_samples) > 0.7 ).astype(int) true_coef = pd.Series( { "college degree": 2.0, "ability": 5.0, "experience": 0.2, "parent hourly wage": 1.0, } ) hourly_wages = ( true_coef["experience"] * experiences + true_coef["parent hourly wage"] * parent_hourly_wages + true_coef["college degree"] * college_degrees + true_coef["ability"] * abilities + rng.normal(0, 1, size=n_samples) ) hourly_wages[hourly_wages < 0] = 0 # %% # Description of the simulated data # --------------------------------- # # The following plot shows the distribution of each variable, and pairwise # scatter plots. Key to our OVB story is the positive relationship between # ability and college degree. import seaborn as sns df = pd.DataFrame( { "college degree": college_degrees, "ability": abilities, "hourly wage": hourly_wages, "experience": experiences, "parent hourly wage": parent_hourly_wages, } ) grid = sns.pairplot(df, diag_kind="kde", corner=True) # %% # In the next section, we train predictive models and we therefore split the # target column from over features and we split the data into a training and a # testing set. from sklearn.model_selection import train_test_split target_name = "hourly wage" X, y = df.drop(columns=target_name), df[target_name] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) # %% # Income prediction with fully observed variables # ----------------------------------------------- # # First, we train a predictive model, a # :class:`~sklearn.linear_model.LinearRegression` model. In this experiment, # we assume that all variables used by the true generative model are available. from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score features_names = ["experience", "parent hourly wage", "college degree", "ability"] regressor_with_ability = LinearRegression() regressor_with_ability.fit(X_train[features_names], y_train) y_pred_with_ability = regressor_with_ability.predict(X_test[features_names]) R2_with_ability = r2_score(y_test, y_pred_with_ability) print(f"R2 score with ability: {R2_with_ability:.3f}") # %% # This model predicts well the hourly wages as shown by the high R2 score. We # plot the model coefficients to show that we exactly recover the values of # the true generative model. import matplotlib.pyplot as plt model_coef = pd.Series(regressor_with_ability.coef_, index=features_names) coef = pd.concat( [true_coef[features_names], model_coef], keys=["Coefficients of true generative model", "Model coefficients"], axis=1, ) ax = coef.plot.barh() ax.set_xlabel("Coefficient values") ax.set_title("Coefficients of the linear regression including the ability features") _ = plt.tight_layout() # %% # Income prediction with partial observations # ------------------------------------------- # # In practice, intellectual abilities are not observed or are only estimated # from proxies that inadvertently measure education as well (e.g. by IQ tests). # But omitting the "ability" feature from a linear model inflates the estimate # via a positive OVB. features_names = ["experience", "parent hourly wage", "college degree"] regressor_without_ability = LinearRegression() regressor_without_ability.fit(X_train[features_names], y_train) y_pred_without_ability = regressor_without_ability.predict(X_test[features_names]) R2_without_ability = r2_score(y_test, y_pred_without_ability) print(f"R2 score without ability: {R2_without_ability:.3f}") # %% # The predictive power of our model is similar when we omit the ability feature # in terms of R2 score. We now check if the coefficient of the model are # different from the true generative model. model_coef = pd.Series(regressor_without_ability.coef_, index=features_names) coef = pd.concat( [true_coef[features_names], model_coef], keys=["Coefficients of true generative model", "Model coefficients"], axis=1, ) ax = coef.plot.barh() ax.set_xlabel("Coefficient values") _ = ax.set_title("Coefficients of the linear regression excluding the ability feature") plt.tight_layout() plt.show() # %% # To compensate for the omitted variable, the model inflates the coefficient of # the college degree feature. Therefore, interpreting this coefficient value # as a causal effect of the true generative model is incorrect. # # Lessons learned # --------------- # # Machine learning models are not designed for the estimation of causal # effects. While we showed this with a linear model, OVB can affect any type of # model. # # Whenever interpreting a coefficient or a change in predictions brought about # by a change in one of the features, it is important to keep in mind # potentially unobserved variables that could be correlated with both the # feature in question and the target variable. Such variables are called # `Confounding Variables <https://en.wikipedia.org/wiki/Confounding>`_. In # order to still estimate causal effect in the presence of confounding, # researchers usually conduct experiments in which the treatment variable (e.g. # college degree) is randomized. When an experiment is prohibitively expensive # or unethical, researchers can sometimes use other causal inference techniques # such as `Instrumental Variables # <https://en.wikipedia.org/wiki/Instrumental_variables_estimation>`_ (IV) # estimations.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_coin_ward_segmentation.py
examples/cluster/plot_coin_ward_segmentation.py
""" ====================================================================== A demo of structured Ward hierarchical clustering on an image of coins ====================================================================== Compute the segmentation of a 2D image with Ward hierarchical clustering. The clustering is spatially constrained in order for each segmented region to be in one piece. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate data # ------------- from skimage.data import coins orig_coins = coins() # %% # Resize it to 20% of the original size to speed up the processing # Applying a Gaussian filter for smoothing prior to down-scaling # reduces aliasing artifacts. import numpy as np from scipy.ndimage import gaussian_filter from skimage.transform import rescale smoothened_coins = gaussian_filter(orig_coins, sigma=2) rescaled_coins = rescale( smoothened_coins, 0.2, mode="reflect", anti_aliasing=False, ) X = np.reshape(rescaled_coins, (-1, 1)) # %% # Define structure of the data # ---------------------------- # # Pixels are connected to their neighbors. from sklearn.feature_extraction.image import grid_to_graph connectivity = grid_to_graph(*rescaled_coins.shape) # %% # Compute clustering # ------------------ import time as time from sklearn.cluster import AgglomerativeClustering print("Compute structured hierarchical clustering...") st = time.time() n_clusters = 27 # number of regions ward = AgglomerativeClustering( n_clusters=n_clusters, linkage="ward", connectivity=connectivity ) ward.fit(X) label = np.reshape(ward.labels_, rescaled_coins.shape) print(f"Elapsed time: {time.time() - st:.3f}s") print(f"Number of pixels: {label.size}") print(f"Number of clusters: {np.unique(label).size}") # %% # Plot the results on an image # ---------------------------- # # Agglomerative clustering is able to segment each coin however, we have had to # use a ``n_cluster`` larger than the number of coins because the segmentation # is finding a large in the background. import matplotlib.pyplot as plt plt.figure(figsize=(5, 5)) plt.imshow(rescaled_coins, cmap=plt.cm.gray) for l in range(n_clusters): plt.contour( label == l, colors=[ plt.cm.nipy_spectral(l / float(n_clusters)), ], ) plt.axis("off") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_digits_agglomeration.py
examples/cluster/plot_digits_agglomeration.py
""" ========================================================= Feature agglomeration ========================================================= These images show how similar features are merged together using feature agglomeration. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn import cluster, datasets from sklearn.feature_extraction.image import grid_to_graph digits = datasets.load_digits() images = digits.images X = np.reshape(images, (len(images), -1)) connectivity = grid_to_graph(*images[0].shape) agglo = cluster.FeatureAgglomeration(connectivity=connectivity, n_clusters=32) agglo.fit(X) X_reduced = agglo.transform(X) X_restored = agglo.inverse_transform(X_reduced) images_restored = np.reshape(X_restored, images.shape) plt.figure(1, figsize=(4, 3.5)) plt.clf() plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.91) for i in range(4): plt.subplot(3, 4, i + 1) plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation="nearest") plt.xticks(()) plt.yticks(()) if i == 1: plt.title("Original data") plt.subplot(3, 4, 4 + i + 1) plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16, interpolation="nearest") if i == 1: plt.title("Agglomerated data") plt.xticks(()) plt.yticks(()) plt.subplot(3, 4, 10) plt.imshow( np.reshape(agglo.labels_, images[0].shape), interpolation="nearest", cmap=plt.cm.nipy_spectral, ) plt.xticks(()) plt.yticks(()) plt.title("Labels") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_bisect_kmeans.py
examples/cluster/plot_bisect_kmeans.py
""" ============================================================= Bisecting K-Means and Regular K-Means Performance Comparison ============================================================= This example shows differences between Regular K-Means algorithm and Bisecting K-Means. While K-Means clusterings are different when increasing n_clusters, Bisecting K-Means clustering builds on top of the previous ones. As a result, it tends to create clusters that have a more regular large-scale structure. This difference can be visually observed: for all numbers of clusters, there is a dividing line cutting the overall data cloud in two for BisectingKMeans, which is not present for regular K-Means. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt from sklearn.cluster import BisectingKMeans, KMeans from sklearn.datasets import make_blobs # Generate sample data n_samples = 10000 random_state = 0 X, _ = make_blobs(n_samples=n_samples, centers=2, random_state=random_state) # Number of cluster centers for KMeans and BisectingKMeans n_clusters_list = [4, 8, 16] # Algorithms to compare clustering_algorithms = { "Bisecting K-Means": BisectingKMeans, "K-Means": KMeans, } # Make subplots for each variant fig, axs = plt.subplots( len(clustering_algorithms), len(n_clusters_list), figsize=(12, 5) ) axs = axs.T for i, (algorithm_name, Algorithm) in enumerate(clustering_algorithms.items()): for j, n_clusters in enumerate(n_clusters_list): algo = Algorithm(n_clusters=n_clusters, random_state=random_state, n_init=3) algo.fit(X) centers = algo.cluster_centers_ axs[j, i].scatter(X[:, 0], X[:, 1], s=10, c=algo.labels_) axs[j, i].scatter(centers[:, 0], centers[:, 1], c="r", s=20) axs[j, i].set_title(f"{algorithm_name} : {n_clusters} clusters") # Hide x labels and tick labels for top plots and y ticks for right plots. for ax in axs.flat: ax.label_outer() ax.set_xticks([]) ax.set_yticks([]) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_agglomerative_dendrogram.py
examples/cluster/plot_agglomerative_dendrogram.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause """ ========================================= Plot Hierarchical Clustering Dendrogram ========================================= This example plots the corresponding dendrogram of a hierarchical clustering using AgglomerativeClustering and the dendrogram method available in scipy. """ import numpy as np from matplotlib import pyplot as plt from scipy.cluster.hierarchy import dendrogram from sklearn.cluster import AgglomerativeClustering from sklearn.datasets import load_iris def plot_dendrogram(model, **kwargs): # Create linkage matrix and then plot the dendrogram # create the counts of samples under each node counts = np.zeros(model.children_.shape[0]) n_samples = len(model.labels_) for i, merge in enumerate(model.children_): current_count = 0 for child_idx in merge: if child_idx < n_samples: current_count += 1 # leaf node else: current_count += counts[child_idx - n_samples] counts[i] = current_count linkage_matrix = np.column_stack( [model.children_, model.distances_, counts] ).astype(float) # Plot the corresponding dendrogram dendrogram(linkage_matrix, **kwargs) iris = load_iris() X = iris.data # setting distance_threshold=0 ensures we compute the full tree. model = AgglomerativeClustering(distance_threshold=0, n_clusters=None) model = model.fit(X) plt.title("Hierarchical Clustering Dendrogram") # plot the top three levels of the dendrogram plot_dendrogram(model, truncate_mode="level", p=3) plt.xlabel("Number of points in node (or index of point if no parenthesis).") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_kmeans_silhouette_analysis.py
examples/cluster/plot_kmeans_silhouette_analysis.py
""" =============================================================================== Selecting the number of clusters with silhouette analysis on KMeans clustering =============================================================================== Silhouette analysis can be used to study the separation distance between the resulting clusters. The silhouette plot displays a measure of how close each point in one cluster is to points in the neighboring clusters and thus provides a way to assess parameters like number of clusters visually. This measure has a range of [-1, 1]. Silhouette coefficients (as these values are referred to as) near +1 indicate that the sample is far away from the neighboring clusters. A value of 0 indicates that the sample is on or very close to the decision boundary between two neighboring clusters and negative values indicate that those samples might have been assigned to the wrong cluster. In this example the silhouette analysis is used to choose an optimal value for ``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5 and 6 are a bad pick for the given data due to the presence of clusters with below average silhouette scores and also due to wide fluctuations in the size of the silhouette plots. Silhouette analysis is more ambivalent in deciding between 2 and 4. Also from the thickness of the silhouette plot the cluster size can be visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to 2, is bigger in size owing to the grouping of the 3 sub clusters into one big cluster. However when the ``n_clusters`` is equal to 4, all the plots are more or less of similar thickness and hence are of similar sizes as can be also verified from the labelled scatter plot on the right. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.cm as cm import matplotlib.pyplot as plt import numpy as np from sklearn.cluster import KMeans from sklearn.datasets import make_blobs from sklearn.metrics import silhouette_samples, silhouette_score # Generating the sample data from make_blobs # This particular setting has one distinct cluster and 3 clusters placed close # together. X, y = make_blobs( n_samples=500, n_features=2, centers=4, cluster_std=1, center_box=(-10.0, 10.0), shuffle=True, random_state=1, ) # For reproducibility range_n_clusters = [2, 3, 4, 5, 6] for n_clusters in range_n_clusters: # Create a subplot with 1 row and 2 columns fig, (ax1, ax2) = plt.subplots(1, 2) fig.set_size_inches(18, 7) # The 1st subplot is the silhouette plot # The silhouette coefficient can range from -1, 1 but in this example all # lie within [-0.1, 1] ax1.set_xlim([-0.1, 1]) # The (n_clusters+1)*10 is for inserting blank space between silhouette # plots of individual clusters, to demarcate them clearly. ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10]) # Initialize the clusterer with n_clusters value and a random generator # seed of 10 for reproducibility. clusterer = KMeans(n_clusters=n_clusters, random_state=10) cluster_labels = clusterer.fit_predict(X) # The silhouette_score gives the average value for all the samples. # This gives a perspective into the density and separation of the formed # clusters silhouette_avg = silhouette_score(X, cluster_labels) print( "For n_clusters =", n_clusters, "The average silhouette_score is :", silhouette_avg, ) # Compute the silhouette scores for each sample sample_silhouette_values = silhouette_samples(X, cluster_labels) y_lower = 10 for i in range(n_clusters): # Aggregate the silhouette scores for samples belonging to # cluster i, and sort them ith_cluster_silhouette_values = sample_silhouette_values[cluster_labels == i] ith_cluster_silhouette_values.sort() size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i color = cm.nipy_spectral(float(i) / n_clusters) ax1.fill_betweenx( np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=0.7, ) # Label the silhouette plots with their cluster numbers at the middle ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i)) # Compute the new y_lower for next plot y_lower = y_upper + 10 # 10 for the 0 samples ax1.set_title("The silhouette plot for the various clusters.") ax1.set_xlabel("The silhouette coefficient values") ax1.set_ylabel("Cluster label") # The vertical line for average silhouette score of all the values ax1.axvline(x=silhouette_avg, color="red", linestyle="--") ax1.set_yticks([]) # Clear the yaxis labels / ticks ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1]) # 2nd Plot showing the actual clusters formed colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters) ax2.scatter( X[:, 0], X[:, 1], marker=".", s=30, lw=0, alpha=0.7, c=colors, edgecolor="k" ) # Labeling the clusters centers = clusterer.cluster_centers_ # Draw white circles at cluster centers ax2.scatter( centers[:, 0], centers[:, 1], marker="o", c="white", alpha=1, s=200, edgecolor="k", ) for i, c in enumerate(centers): ax2.scatter(c[0], c[1], marker="$%d$" % i, alpha=1, s=50, edgecolor="k") ax2.set_title("The visualization of the clustered data.") ax2.set_xlabel("Feature space for the 1st feature") ax2.set_ylabel("Feature space for the 2nd feature") plt.suptitle( "Silhouette analysis for KMeans clustering on sample data with n_clusters = %d" % n_clusters, fontsize=14, fontweight="bold", ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_linkage_comparison.py
examples/cluster/plot_linkage_comparison.py
""" ================================================================ Comparing different hierarchical linkage methods on toy datasets ================================================================ This example shows characteristics of different linkage methods for hierarchical clustering on datasets that are "interesting" but still in 2D. The main observations to make are: - single linkage is fast, and can perform well on non-globular data, but it performs poorly in the presence of noise. - average and complete linkage perform well on cleanly separated globular clusters, but have mixed results otherwise. - Ward is the most effective method for noisy data. While these examples give some intuition about the algorithms, this intuition might not apply to very high dimensional data. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import time import warnings from itertools import cycle, islice import matplotlib.pyplot as plt import numpy as np from sklearn import cluster, datasets from sklearn.preprocessing import StandardScaler # %% # Generate datasets. We choose the size big enough to see the scalability # of the algorithms, but not too big to avoid too long running times n_samples = 1500 noisy_circles = datasets.make_circles( n_samples=n_samples, factor=0.5, noise=0.05, random_state=170 ) noisy_moons = datasets.make_moons(n_samples=n_samples, noise=0.05, random_state=170) blobs = datasets.make_blobs(n_samples=n_samples, random_state=170) rng = np.random.RandomState(170) no_structure = rng.rand(n_samples, 2), None # Anisotropicly distributed data X, y = datasets.make_blobs(n_samples=n_samples, random_state=170) transformation = [[0.6, -0.6], [-0.4, 0.8]] X_aniso = np.dot(X, transformation) aniso = (X_aniso, y) # blobs with varied variances varied = datasets.make_blobs( n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=170 ) # %% # Run the clustering and plot # Set up cluster parameters plt.figure(figsize=(9 * 1.3 + 2, 14.5)) plt.subplots_adjust( left=0.02, right=0.98, bottom=0.001, top=0.96, wspace=0.05, hspace=0.01 ) plot_num = 1 default_base = {"n_neighbors": 10, "n_clusters": 3} datasets = [ (noisy_circles, {"n_clusters": 2}), (noisy_moons, {"n_clusters": 2}), (varied, {"n_neighbors": 2}), (aniso, {"n_neighbors": 2}), (blobs, {}), (no_structure, {}), ] for i_dataset, (dataset, algo_params) in enumerate(datasets): # update parameters with dataset-specific values params = default_base.copy() params.update(algo_params) X, y = dataset # normalize dataset for easier parameter selection X = StandardScaler().fit_transform(X) # ============ # Create cluster objects # ============ ward = cluster.AgglomerativeClustering( n_clusters=params["n_clusters"], linkage="ward" ) complete = cluster.AgglomerativeClustering( n_clusters=params["n_clusters"], linkage="complete" ) average = cluster.AgglomerativeClustering( n_clusters=params["n_clusters"], linkage="average" ) single = cluster.AgglomerativeClustering( n_clusters=params["n_clusters"], linkage="single" ) clustering_algorithms = ( ("Single Linkage", single), ("Average Linkage", average), ("Complete Linkage", complete), ("Ward Linkage", ward), ) for name, algorithm in clustering_algorithms: t0 = time.time() # catch warnings related to kneighbors_graph with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="the number of connected components of the " "connectivity matrix is [0-9]{1,2}" " > 1. Completing it to avoid stopping the tree early.", category=UserWarning, ) algorithm.fit(X) t1 = time.time() if hasattr(algorithm, "labels_"): y_pred = algorithm.labels_.astype(int) else: y_pred = algorithm.predict(X) plt.subplot(len(datasets), len(clustering_algorithms), plot_num) if i_dataset == 0: plt.title(name, size=18) colors = np.array( list( islice( cycle( [ "#377eb8", "#ff7f00", "#4daf4a", "#f781bf", "#a65628", "#984ea3", "#999999", "#e41a1c", "#dede00", ] ), int(max(y_pred) + 1), ) ) ) plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[y_pred]) plt.xlim(-2.5, 2.5) plt.ylim(-2.5, 2.5) plt.xticks(()) plt.yticks(()) plt.text( 0.99, 0.01, ("%.2fs" % (t1 - t0)).lstrip("0"), transform=plt.gca().transAxes, size=15, horizontalalignment="right", ) plot_num += 1 plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_segmentation_toy.py
examples/cluster/plot_segmentation_toy.py
""" =========================================== Spectral clustering for image segmentation =========================================== In this example, an image with connected circles is generated and spectral clustering is used to separate the circles. In these settings, the :ref:`spectral_clustering` approach solves the problem know as 'normalized graph cuts': the image is seen as a graph of connected voxels, and the spectral clustering algorithm amounts to choosing graph cuts defining regions while minimizing the ratio of the gradient along the cut, and the volume of the region. As the algorithm tries to balance the volume (ie balance the region sizes), if we take circles with different sizes, the segmentation fails. In addition, as there is no useful information in the intensity of the image, or its gradient, we choose to perform the spectral clustering on a graph that is only weakly informed by the gradient. This is close to performing a Voronoi partition of the graph. In addition, we use the mask of the objects to restrict the graph to the outline of the objects. In this example, we are interested in separating the objects one from the other, and not from the background. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate the data # ----------------- import numpy as np l = 100 x, y = np.indices((l, l)) center1 = (28, 24) center2 = (40, 50) center3 = (67, 58) center4 = (24, 70) radius1, radius2, radius3, radius4 = 16, 14, 15, 14 circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1**2 circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2**2 circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3**2 circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4**2 # %% # Plotting four circles # --------------------- img = circle1 + circle2 + circle3 + circle4 # We use a mask that limits to the foreground: the problem that we are # interested in here is not separating the objects from the background, # but separating them one from the other. mask = img.astype(bool) img = img.astype(float) img += 1 + 0.2 * np.random.randn(*img.shape) # %% # Convert the image into a graph with the value of the gradient on the # edges. from sklearn.feature_extraction import image graph = image.img_to_graph(img, mask=mask) # %% # Take a decreasing function of the gradient resulting in a segmentation # that is close to a Voronoi partition graph.data = np.exp(-graph.data / graph.data.std()) # %% # Here we perform spectral clustering using the arpack solver since amg is # numerically unstable on this example. We then plot the results. import matplotlib.pyplot as plt from sklearn.cluster import spectral_clustering labels = spectral_clustering(graph, n_clusters=4, eigen_solver="arpack") label_im = np.full(mask.shape, -1.0) label_im[mask] = labels fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 5)) axs[0].matshow(img) axs[1].matshow(label_im) plt.show() # %% # Plotting two circles # -------------------- # Here we repeat the above process but only consider the first two circles # we generated. Note that this results in a cleaner separation between the # circles as the region sizes are easier to balance in this case. img = circle1 + circle2 mask = img.astype(bool) img = img.astype(float) img += 1 + 0.2 * np.random.randn(*img.shape) graph = image.img_to_graph(img, mask=mask) graph.data = np.exp(-graph.data / graph.data.std()) labels = spectral_clustering(graph, n_clusters=2, eigen_solver="arpack") label_im = np.full(mask.shape, -1.0) label_im[mask] = labels fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 5)) axs[0].matshow(img) axs[1].matshow(label_im) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_ward_structured_vs_unstructured.py
examples/cluster/plot_ward_structured_vs_unstructured.py
""" =================================================== Hierarchical clustering with and without structure =================================================== This example demonstrates hierarchical clustering with and without connectivity constraints. It shows the effect of imposing a connectivity graph to capture local structure in the data. Without connectivity constraints, the clustering is based purely on distance, while with constraints, the clustering respects local structure. For more information, see :ref:`hierarchical_clustering`. There are two advantages of imposing connectivity. First, clustering with sparse connectivity matrices is faster in general. Second, when using a connectivity matrix, single, average and complete linkage are unstable and tend to create a few clusters that grow very quickly. Indeed, average and complete linkage fight this percolation behavior by considering all the distances between two clusters when merging them (while single linkage exaggerates the behaviour by considering only the shortest distance between clusters). The connectivity graph breaks this mechanism for average and complete linkage, making them resemble the more brittle single linkage. This effect is more pronounced for very sparse graphs (try decreasing the number of neighbors in `kneighbors_graph`) and with complete linkage. In particular, having a very small number of neighbors in the graph, imposes a geometry that is close to that of single linkage, which is well known to have this percolation instability. The effect of imposing connectivity is illustrated on two different but similar datasets which show a spiral structure. In the first example we build a Swiss roll dataset and run hierarchical clustering on the position of the data. Here, we compare unstructured Ward clustering with a structured variant that enforces k-Nearest Neighbors connectivity. In the second example we include the effects of applying a such a connectivity graph to single, average and complete linkage. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate the Swiss Roll dataset. # -------------------------------- import time from sklearn.cluster import AgglomerativeClustering from sklearn.datasets import make_swiss_roll n_samples = 1500 noise = 0.05 X1, _ = make_swiss_roll(n_samples, noise=noise) X1[:, 1] *= 0.5 # Make the roll thinner # %% # Compute clustering without connectivity constraints # --------------------------------------------------- print("Compute unstructured hierarchical clustering...") st = time.time() ward_unstructured = AgglomerativeClustering(n_clusters=6, linkage="ward").fit(X1) elapsed_time_unstructured = time.time() - st label_unstructured = ward_unstructured.labels_ print(f"Elapsed time: {elapsed_time_unstructured:.2f}s") print(f"Number of points: {label_unstructured.size}") # %% # Plot unstructured clustering result import matplotlib.pyplot as plt import numpy as np fig1 = plt.figure() ax1 = fig1.add_subplot(111, projection="3d", elev=7, azim=-80) ax1.set_position([0, 0, 0.95, 1]) for l in np.unique(label_unstructured): ax1.scatter( X1[label_unstructured == l, 0], X1[label_unstructured == l, 1], X1[label_unstructured == l, 2], color=plt.cm.jet(float(l) / np.max(label_unstructured + 1)), s=20, edgecolor="k", ) _ = fig1.suptitle( f"Without connectivity constraints (time {elapsed_time_unstructured:.2f}s)" ) # %% # Compute clustering with connectivity constraints # ------------------------------------------------ from sklearn.neighbors import kneighbors_graph connectivity = kneighbors_graph(X1, n_neighbors=10, include_self=False) print("Compute structured hierarchical clustering...") st = time.time() ward_structured = AgglomerativeClustering( n_clusters=6, connectivity=connectivity, linkage="ward" ).fit(X1) elapsed_time_structured = time.time() - st label_structured = ward_structured.labels_ print(f"Elapsed time: {elapsed_time_structured:.2f}s") print(f"Number of points: {label_structured.size}") # %% # Plot structured clustering result fig2 = plt.figure() ax2 = fig2.add_subplot(111, projection="3d", elev=7, azim=-80) ax2.set_position([0, 0, 0.95, 1]) for l in np.unique(label_structured): ax2.scatter( X1[label_structured == l, 0], X1[label_structured == l, 1], X1[label_structured == l, 2], color=plt.cm.jet(float(l) / np.max(label_structured + 1)), s=20, edgecolor="k", ) _ = fig2.suptitle( f"With connectivity constraints (time {elapsed_time_structured:.2f}s)" ) # %% # Generate 2D spiral dataset. # --------------------------- n_samples = 1500 np.random.seed(0) t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples)) x = t * np.cos(t) y = t * np.sin(t) X2 = np.concatenate((x, y)) X2 += 0.7 * np.random.randn(2, n_samples) X2 = X2.T # %% # Capture local connectivity using a graph # ---------------------------------------- # Larger number of neighbors will give more homogeneous clusters to # the cost of computation time. A very large number of neighbors gives # more evenly distributed cluster sizes, but may not impose the local # manifold structure of the data. knn_graph = kneighbors_graph(X2, 30, include_self=False) # %% # Plot clustering with and without structure # ****************************************** fig3 = plt.figure(figsize=(8, 12)) subfigs = fig3.subfigures(4, 1) params = [ (None, 30), (None, 3), (knn_graph, 30), (knn_graph, 3), ] for subfig, (connectivity, n_clusters) in zip(subfigs, params): axs = subfig.subplots(1, 4, sharey=True) for index, linkage in enumerate(("average", "complete", "ward", "single")): model = AgglomerativeClustering( linkage=linkage, connectivity=connectivity, n_clusters=n_clusters ) t0 = time.time() model.fit(X2) elapsed_time = time.time() - t0 axs[index].scatter( X2[:, 0], X2[:, 1], c=model.labels_, cmap=plt.cm.nipy_spectral ) axs[index].set_title( "linkage=%s\n(time %.2fs)" % (linkage, elapsed_time), fontdict=dict(verticalalignment="top"), ) axs[index].set_aspect("equal") axs[index].axis("off") subfig.subplots_adjust(bottom=0, top=0.83, wspace=0, left=0, right=1) subfig.suptitle( "n_cluster=%i, connectivity=%r" % (n_clusters, connectivity is not None), size=17, ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_digits_linkage.py
examples/cluster/plot_digits_linkage.py
""" ============================================================================= Various Agglomerative Clustering on a 2D embedding of digits ============================================================================= An illustration of various linkage option for agglomerative clustering on a 2D embedding of the digits dataset. The goal of this example is to show intuitively how the metrics behave, and not to find good clusters for the digits. This is why the example works on a 2D embedding. What this example shows us is the behavior "rich getting richer" of agglomerative clustering that tends to create uneven cluster sizes. This behavior is pronounced for the average linkage strategy, that ends up with a couple of clusters with few datapoints. The case of single linkage is even more pathologic with a very large cluster covering most digits, an intermediate size (clean) cluster with most zero digits and all other clusters being drawn from noise points around the fringes. The other linkage strategies lead to more evenly distributed clusters that are therefore likely to be less sensible to a random resampling of the dataset. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from time import time import numpy as np from matplotlib import pyplot as plt from sklearn import datasets, manifold digits = datasets.load_digits() X, y = digits.data, digits.target n_samples, n_features = X.shape np.random.seed(0) # ---------------------------------------------------------------------- # Visualize the clustering def plot_clustering(X_red, labels, title=None): x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0) X_red = (X_red - x_min) / (x_max - x_min) plt.figure(figsize=(6, 4)) for digit in digits.target_names: plt.scatter( *X_red[y == digit].T, marker=f"${digit}$", s=50, c=plt.cm.nipy_spectral(labels[y == digit] / 10), alpha=0.5, ) plt.xticks([]) plt.yticks([]) if title is not None: plt.title(title, size=17) plt.axis("off") plt.tight_layout(rect=[0, 0.03, 1, 0.95]) # ---------------------------------------------------------------------- # 2D embedding of the digits dataset print("Computing embedding") X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X) print("Done.") from sklearn.cluster import AgglomerativeClustering for linkage in ("ward", "average", "complete", "single"): clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10) t0 = time() clustering.fit(X_red) print("%s :\t%.2fs" % (linkage, time() - t0)) plot_clustering(X_red, clustering.labels_, "%s linkage" % linkage) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_hdbscan.py
examples/cluster/plot_hdbscan.py
# -*- coding: utf-8 -*- """ ==================================== Demo of HDBSCAN clustering algorithm ==================================== .. currentmodule:: sklearn In this demo we will take a look at :class:`cluster.HDBSCAN` from the perspective of generalizing the :class:`cluster.DBSCAN` algorithm. We'll compare both algorithms on specific datasets. Finally we'll evaluate HDBSCAN's sensitivity to certain hyperparameters. We first define a couple utility functions for convenience. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% import matplotlib.pyplot as plt import numpy as np from sklearn.cluster import DBSCAN, HDBSCAN from sklearn.datasets import make_blobs def plot(X, labels, probabilities=None, parameters=None, ground_truth=False, ax=None): if ax is None: _, ax = plt.subplots(figsize=(10, 4)) labels = labels if labels is not None else np.ones(X.shape[0]) probabilities = probabilities if probabilities is not None else np.ones(X.shape[0]) # Black removed and is used for noise instead. unique_labels = set(labels) colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))] # The probability of a point belonging to its labeled cluster determines # the size of its marker proba_map = {idx: probabilities[idx] for idx in range(len(labels))} for k, col in zip(unique_labels, colors): if k == -1: # Black used for noise. col = [0, 0, 0, 1] class_index = (labels == k).nonzero()[0] for ci in class_index: ax.plot( X[ci, 0], X[ci, 1], "x" if k == -1 else "o", markerfacecolor=tuple(col), markeredgecolor="k", markersize=4 if k == -1 else 1 + 5 * proba_map[ci], ) n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) preamble = "True" if ground_truth else "Estimated" title = f"{preamble} number of clusters: {n_clusters_}" if parameters is not None: parameters_str = ", ".join(f"{k}={v}" for k, v in parameters.items()) title += f" | {parameters_str}" ax.set_title(title) plt.tight_layout() # %% # Generate sample data # -------------------- # One of the greatest advantages of HDBSCAN over DBSCAN is its out-of-the-box # robustness. It's especially remarkable on heterogeneous mixtures of data. # Like DBSCAN, it can model arbitrary shapes and distributions, however unlike # DBSCAN it does not require specification of an arbitrary and sensitive # `eps` hyperparameter. # # For example, below we generate a dataset from a mixture of three bi-dimensional # and isotropic Gaussian distributions. centers = [[1, 1], [-1, -1], [1.5, -1.5]] X, labels_true = make_blobs( n_samples=750, centers=centers, cluster_std=[0.4, 0.1, 0.75], random_state=0 ) plot(X, labels=labels_true, ground_truth=True) # %% # Scale Invariance # ----------------- # It's worth remembering that, while DBSCAN provides a default value for `eps` # parameter, it hardly has a proper default value and must be tuned for the # specific dataset at use. # # As a simple demonstration, consider the clustering for a `eps` value tuned # for one dataset, and clustering obtained with the same value but applied to # rescaled versions of the dataset. fig, axes = plt.subplots(3, 1, figsize=(10, 12)) dbs = DBSCAN(eps=0.3) for idx, scale in enumerate([1, 0.5, 3]): dbs.fit(X * scale) plot(X * scale, dbs.labels_, parameters={"scale": scale, "eps": 0.3}, ax=axes[idx]) # %% # Indeed, in order to maintain the same results we would have to scale `eps` by # the same factor. fig, axis = plt.subplots(1, 1, figsize=(12, 5)) dbs = DBSCAN(eps=0.9).fit(3 * X) plot(3 * X, dbs.labels_, parameters={"scale": 3, "eps": 0.9}, ax=axis) # %% # While standardizing data (e.g. using # :class:`sklearn.preprocessing.StandardScaler`) helps mitigate this problem, # great care must be taken to select the appropriate value for `eps`. # # HDBSCAN is much more robust in this sense: HDBSCAN can be seen as # clustering over all possible values of `eps` and extracting the best # clusters from all possible clusters (see :ref:`User Guide <HDBSCAN>`). # One immediate advantage is that HDBSCAN is scale-invariant. fig, axes = plt.subplots(3, 1, figsize=(10, 12)) hdb = HDBSCAN(copy=True) for idx, scale in enumerate([1, 0.5, 3]): hdb.fit(X * scale) plot( X * scale, hdb.labels_, hdb.probabilities_, ax=axes[idx], parameters={"scale": scale}, ) # %% # Multi-Scale Clustering # ---------------------- # HDBSCAN is much more than scale invariant though -- it is capable of # multi-scale clustering, which accounts for clusters with varying density. # Traditional DBSCAN assumes that any potential clusters are homogeneous in # density. HDBSCAN is free from such constraints. To demonstrate this we # consider the following dataset centers = [[-0.85, -0.85], [-0.85, 0.85], [3, 3], [3, -3]] X, labels_true = make_blobs( n_samples=750, centers=centers, cluster_std=[0.2, 0.35, 1.35, 1.35], random_state=0 ) plot(X, labels=labels_true, ground_truth=True) # %% # This dataset is more difficult for DBSCAN due to the varying densities and # spatial separation: # # - If `eps` is too large then we risk falsely clustering the two dense # clusters as one since their mutual reachability will extend # clusters. # - If `eps` is too small, then we risk fragmenting the sparser clusters # into many false clusters. # # Not to mention this requires manually tuning choices of `eps` until we # find a tradeoff that we are comfortable with. fig, axes = plt.subplots(2, 1, figsize=(10, 8)) params = {"eps": 0.7} dbs = DBSCAN(**params).fit(X) plot(X, dbs.labels_, parameters=params, ax=axes[0]) params = {"eps": 0.3} dbs = DBSCAN(**params).fit(X) plot(X, dbs.labels_, parameters=params, ax=axes[1]) # %% # To properly cluster the two dense clusters, we would need a smaller value of # epsilon, however at `eps=0.3` we are already fragmenting the sparse clusters, # which would only become more severe as we decrease epsilon. Indeed it seems # that DBSCAN is incapable of simultaneously separating the two dense clusters # while preventing the sparse clusters from fragmenting. Let's compare with # HDBSCAN. hdb = HDBSCAN(copy=True).fit(X) plot(X, hdb.labels_, hdb.probabilities_) # %% # HDBSCAN is able to adapt to the multi-scale structure of the dataset without # requiring parameter tuning. While any sufficiently interesting dataset will # require tuning, this case demonstrates that HDBSCAN can yield qualitatively # better classes of clusterings without users' intervention which are # inaccessible via DBSCAN. # %% # Hyperparameter Robustness # ------------------------- # Ultimately tuning will be an important step in any real world application, so # let's take a look at some of the most important hyperparameters for HDBSCAN. # While HDBSCAN is free from the `eps` parameter of DBSCAN, it does still have # some hyperparameters like `min_cluster_size` and `min_samples` which tune its # results regarding density. We will however see that HDBSCAN is relatively robust # to various real world examples thanks to those parameters whose clear meaning # helps tuning them. # # `min_cluster_size` # ^^^^^^^^^^^^^^^^^^ # `min_cluster_size` is the minimum number of samples in a group for that # group to be considered a cluster. # # Clusters smaller than the ones of this size will be left as noise. # The default value is 5. This parameter is generally tuned to # larger values as needed. Smaller values will likely to lead to results with # fewer points labeled as noise. However values which too small will lead to # false sub-clusters being picked up and preferred. Larger values tend to be # more robust with respect to noisy datasets, e.g. high-variance clusters with # significant overlap. PARAM = ({"min_cluster_size": 5}, {"min_cluster_size": 3}, {"min_cluster_size": 25}) fig, axes = plt.subplots(3, 1, figsize=(10, 12)) for i, param in enumerate(PARAM): hdb = HDBSCAN(copy=True, **param).fit(X) labels = hdb.labels_ plot(X, labels, hdb.probabilities_, param, ax=axes[i]) # %% # `min_samples` # ^^^^^^^^^^^^^ # `min_samples` is the number of samples in a neighborhood for a point to # be considered as a core point, including the point itself. # `min_samples` defaults to `min_cluster_size`. # Similarly to `min_cluster_size`, larger values for `min_samples` increase # the model's robustness to noise, but risks ignoring or discarding # potentially valid but small clusters. # `min_samples` better be tuned after finding a good value for `min_cluster_size`. PARAM = ( {"min_cluster_size": 20, "min_samples": 5}, {"min_cluster_size": 20, "min_samples": 3}, {"min_cluster_size": 20, "min_samples": 25}, ) fig, axes = plt.subplots(3, 1, figsize=(10, 12)) for i, param in enumerate(PARAM): hdb = HDBSCAN(copy=True, **param).fit(X) labels = hdb.labels_ plot(X, labels, hdb.probabilities_, param, ax=axes[i]) # %% # `dbscan_clustering` # ^^^^^^^^^^^^^^^^^^^ # During `fit`, `HDBSCAN` builds a single-linkage tree which encodes the # clustering of all points across all values of :class:`~cluster.DBSCAN`'s # `eps` parameter. # We can thus plot and evaluate these clusterings efficiently without fully # recomputing intermediate values such as core-distances, mutual-reachability, # and the minimum spanning tree. All we need to do is specify the `cut_distance` # (equivalent to `eps`) we want to cluster with. PARAM = ( {"cut_distance": 0.1}, {"cut_distance": 0.5}, {"cut_distance": 1.0}, ) hdb = HDBSCAN(copy=True) hdb.fit(X) fig, axes = plt.subplots(len(PARAM), 1, figsize=(10, 12)) for i, param in enumerate(PARAM): labels = hdb.dbscan_clustering(**param) plot(X, labels, hdb.probabilities_, param, ax=axes[i])
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_mean_shift.py
examples/cluster/plot_mean_shift.py
""" ============================================= A demo of the mean-shift clustering algorithm ============================================= Reference: Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward feature space analysis". IEEE Transactions on Pattern Analysis and Machine Intelligence. 2002. pp. 603-619. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np from sklearn.cluster import MeanShift, estimate_bandwidth from sklearn.datasets import make_blobs # %% # Generate sample data # -------------------- centers = [[1, 1], [-1, -1], [1, -1]] X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6) # %% # Compute clustering with MeanShift # --------------------------------- # The following bandwidth can be automatically detected using bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500) ms = MeanShift(bandwidth=bandwidth, bin_seeding=True) ms.fit(X) labels = ms.labels_ cluster_centers = ms.cluster_centers_ labels_unique = np.unique(labels) n_clusters_ = len(labels_unique) print("number of estimated clusters : %d" % n_clusters_) # %% # Plot result # ----------- import matplotlib.pyplot as plt plt.figure(1) plt.clf() colors = ["#dede00", "#377eb8", "#f781bf"] markers = ["x", "o", "^"] for k, col in zip(range(n_clusters_), colors): my_members = labels == k cluster_center = cluster_centers[k] plt.plot(X[my_members, 0], X[my_members, 1], markers[k], color=col) plt.plot( cluster_center[0], cluster_center[1], markers[k], markerfacecolor=col, markeredgecolor="k", markersize=14, ) plt.title("Estimated number of clusters: %d" % n_clusters_) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_dict_face_patches.py
examples/cluster/plot_dict_face_patches.py
""" Online learning of a dictionary of parts of faces ================================================= This example uses a large dataset of faces to learn a set of 20 x 20 images patches that constitute faces. From the programming standpoint, it is interesting because it shows how to use the online API of the scikit-learn to process a very large dataset by chunks. The way we proceed is that we load an image at a time and extract randomly 50 patches from this image. Once we have accumulated 500 of these patches (using 10 images), we run the :func:`~sklearn.cluster.MiniBatchKMeans.partial_fit` method of the online KMeans object, MiniBatchKMeans. The verbose setting on the MiniBatchKMeans enables us to see that some clusters are reassigned during the successive calls to partial-fit. This is because the number of patches that they represent has become too low, and it is better to choose a random new cluster. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Load the data # ------------- from sklearn import datasets faces = datasets.fetch_olivetti_faces() # %% # Learn the dictionary of images # ------------------------------ import time import numpy as np from sklearn.cluster import MiniBatchKMeans from sklearn.feature_extraction.image import extract_patches_2d print("Learning the dictionary... ") rng = np.random.RandomState(0) kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True, n_init=3) patch_size = (20, 20) buffer = [] t0 = time.time() # The online learning part: cycle over the whole dataset 6 times index = 0 for _ in range(6): for img in faces.images: data = extract_patches_2d(img, patch_size, max_patches=50, random_state=rng) data = np.reshape(data, (len(data), -1)) buffer.append(data) index += 1 if index % 10 == 0: data = np.concatenate(buffer, axis=0) data -= np.mean(data, axis=0) data /= np.std(data, axis=0) kmeans.partial_fit(data) buffer = [] if index % 100 == 0: print("Partial fit of %4i out of %i" % (index, 6 * len(faces.images))) dt = time.time() - t0 print("done in %.2fs." % dt) # %% # Plot the results # ---------------- import matplotlib.pyplot as plt plt.figure(figsize=(4.2, 4)) for i, patch in enumerate(kmeans.cluster_centers_): plt.subplot(9, 9, i + 1) plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray, interpolation="nearest") plt.xticks(()) plt.yticks(()) plt.suptitle( "Patches of faces\nTrain time %.1fs on %d patches" % (dt, 8 * len(faces.images)), fontsize=16, ) plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_optics.py
examples/cluster/plot_optics.py
""" =================================== Demo of OPTICS clustering algorithm =================================== .. currentmodule:: sklearn Finds core samples of high density and expands clusters from them. This example uses data that is generated so that the clusters have different densities. The :class:`~cluster.OPTICS` is first used with its Xi cluster detection method, and then setting specific thresholds on the reachability, which corresponds to :class:`~cluster.DBSCAN`. We can see that the different clusters of OPTICS's Xi method can be recovered with different choices of thresholds in DBSCAN. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.gridspec as gridspec import matplotlib.pyplot as plt import numpy as np from sklearn.cluster import OPTICS, cluster_optics_dbscan # Generate sample data np.random.seed(0) n_points_per_cluster = 250 C1 = [-5, -2] + 0.8 * np.random.randn(n_points_per_cluster, 2) C2 = [4, -1] + 0.1 * np.random.randn(n_points_per_cluster, 2) C3 = [1, -2] + 0.2 * np.random.randn(n_points_per_cluster, 2) C4 = [-2, 3] + 0.3 * np.random.randn(n_points_per_cluster, 2) C5 = [3, -2] + 1.6 * np.random.randn(n_points_per_cluster, 2) C6 = [5, 6] + 2 * np.random.randn(n_points_per_cluster, 2) X = np.vstack((C1, C2, C3, C4, C5, C6)) clust = OPTICS(min_samples=50, xi=0.05, min_cluster_size=0.05) # Run the fit clust.fit(X) labels_050 = cluster_optics_dbscan( reachability=clust.reachability_, core_distances=clust.core_distances_, ordering=clust.ordering_, eps=0.5, ) labels_200 = cluster_optics_dbscan( reachability=clust.reachability_, core_distances=clust.core_distances_, ordering=clust.ordering_, eps=2, ) space = np.arange(len(X)) reachability = clust.reachability_[clust.ordering_] labels = clust.labels_[clust.ordering_] plt.figure(figsize=(10, 7)) G = gridspec.GridSpec(2, 3) ax1 = plt.subplot(G[0, :]) ax2 = plt.subplot(G[1, 0]) ax3 = plt.subplot(G[1, 1]) ax4 = plt.subplot(G[1, 2]) # Reachability plot colors = ["g.", "r.", "b.", "y.", "c."] for klass, color in enumerate(colors): Xk = space[labels == klass] Rk = reachability[labels == klass] ax1.plot(Xk, Rk, color, alpha=0.3) ax1.plot(space[labels == -1], reachability[labels == -1], "k.", alpha=0.3) ax1.plot(space, np.full_like(space, 2.0, dtype=float), "k-", alpha=0.5) ax1.plot(space, np.full_like(space, 0.5, dtype=float), "k-.", alpha=0.5) ax1.set_ylabel("Reachability (epsilon distance)") ax1.set_title("Reachability Plot") # OPTICS colors = ["g.", "r.", "b.", "y.", "c."] for klass, color in enumerate(colors): Xk = X[clust.labels_ == klass] ax2.plot(Xk[:, 0], Xk[:, 1], color, alpha=0.3) ax2.plot(X[clust.labels_ == -1, 0], X[clust.labels_ == -1, 1], "k+", alpha=0.1) ax2.set_title("Automatic Clustering\nOPTICS") # DBSCAN at 0.5 colors = ["g.", "r.", "b.", "c."] for klass, color in enumerate(colors): Xk = X[labels_050 == klass] ax3.plot(Xk[:, 0], Xk[:, 1], color, alpha=0.3) ax3.plot(X[labels_050 == -1, 0], X[labels_050 == -1, 1], "k+", alpha=0.1) ax3.set_title("Clustering at 0.5 epsilon cut\nDBSCAN") # DBSCAN at 2. colors = ["g.", "m.", "y.", "c."] for klass, color in enumerate(colors): Xk = X[labels_200 == klass] ax4.plot(Xk[:, 0], Xk[:, 1], color, alpha=0.3) ax4.plot(X[labels_200 == -1, 0], X[labels_200 == -1, 1], "k+", alpha=0.1) ax4.set_title("Clustering at 2.0 epsilon cut\nDBSCAN") plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_inductive_clustering.py
examples/cluster/plot_inductive_clustering.py
""" ==================== Inductive Clustering ==================== Clustering can be expensive, especially when our dataset contains millions of datapoints. Many clustering algorithms are not :term:`inductive` and so cannot be directly applied to new data samples without recomputing the clustering, which may be intractable. Instead, we can use clustering to then learn an inductive model with a classifier, which has several benefits: - it allows the clusters to scale and apply to new data - unlike re-fitting the clusters to new samples, it makes sure the labelling procedure is consistent over time - it allows us to use the inferential capabilities of the classifier to describe or explain the clusters This example illustrates a generic implementation of a meta-estimator which extends clustering by inducing a classifier from the cluster labels. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt from sklearn.base import BaseEstimator, clone from sklearn.cluster import AgglomerativeClustering from sklearn.datasets import make_blobs from sklearn.ensemble import RandomForestClassifier from sklearn.inspection import DecisionBoundaryDisplay from sklearn.utils.metaestimators import available_if from sklearn.utils.validation import check_is_fitted N_SAMPLES = 5000 RANDOM_STATE = 42 def _classifier_has(attr): """Check if we can delegate a method to the underlying classifier. First, we check the first fitted classifier if available, otherwise we check the unfitted classifier. """ return lambda estimator: ( hasattr(estimator.classifier_, attr) if hasattr(estimator, "classifier_") else hasattr(estimator.classifier, attr) ) class InductiveClusterer(BaseEstimator): def __init__(self, clusterer, classifier): self.clusterer = clusterer self.classifier = classifier def fit(self, X, y=None): self.clusterer_ = clone(self.clusterer) self.classifier_ = clone(self.classifier) y = self.clusterer_.fit_predict(X) self.classifier_.fit(X, y) return self @available_if(_classifier_has("predict")) def predict(self, X): check_is_fitted(self) return self.classifier_.predict(X) @available_if(_classifier_has("decision_function")) def decision_function(self, X): check_is_fitted(self) return self.classifier_.decision_function(X) def plot_scatter(X, color, alpha=0.5): return plt.scatter(X[:, 0], X[:, 1], c=color, alpha=alpha, edgecolor="k") # Generate some training data from clustering X, y = make_blobs( n_samples=N_SAMPLES, cluster_std=[1.0, 1.0, 0.5], centers=[(-5, -5), (0, 0), (5, 5)], random_state=RANDOM_STATE, ) # Train a clustering algorithm on the training data and get the cluster labels clusterer = AgglomerativeClustering(n_clusters=3) cluster_labels = clusterer.fit_predict(X) plt.figure(figsize=(12, 4)) plt.subplot(131) plot_scatter(X, cluster_labels) plt.title("Ward Linkage") # Generate new samples and plot them along with the original dataset X_new, y_new = make_blobs( n_samples=10, centers=[(-7, -1), (-2, 4), (3, 6)], random_state=RANDOM_STATE ) plt.subplot(132) plot_scatter(X, cluster_labels) plot_scatter(X_new, "black", 1) plt.title("Unknown instances") # Declare the inductive learning model that it will be used to # predict cluster membership for unknown instances classifier = RandomForestClassifier(random_state=RANDOM_STATE) inductive_learner = InductiveClusterer(clusterer, classifier).fit(X) probable_clusters = inductive_learner.predict(X_new) ax = plt.subplot(133) plot_scatter(X, cluster_labels) plot_scatter(X_new, probable_clusters) # Plotting decision regions DecisionBoundaryDisplay.from_estimator( inductive_learner, X, response_method="predict", alpha=0.4, ax=ax ) plt.title("Classify unknown instances") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_kmeans_plusplus.py
examples/cluster/plot_kmeans_plusplus.py
""" =========================================================== An example of K-Means++ initialization =========================================================== An example to show the output of the :func:`sklearn.cluster.kmeans_plusplus` function for generating initial seeds for clustering. K-Means++ is used as the default initialization for :ref:`k_means`. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt from sklearn.cluster import kmeans_plusplus from sklearn.datasets import make_blobs # Generate sample data n_samples = 4000 n_components = 4 X, y_true = make_blobs( n_samples=n_samples, centers=n_components, cluster_std=0.60, random_state=0 ) X = X[:, ::-1] # Calculate seeds from k-means++ centers_init, indices = kmeans_plusplus(X, n_clusters=4, random_state=0) # Plot init seeds along side sample data plt.figure(1) colors = ["#4EACC5", "#FF9C34", "#4E9A06", "m"] for k, col in enumerate(colors): cluster_data = y_true == k plt.scatter(X[cluster_data, 0], X[cluster_data, 1], c=col, marker=".", s=10) plt.scatter(centers_init[:, 0], centers_init[:, 1], c="b", s=50) plt.title("K-Means++ Initialization") plt.xticks([]) plt.yticks([]) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_kmeans_stability_low_dim_dense.py
examples/cluster/plot_kmeans_stability_low_dim_dense.py
""" ============================================================ Empirical evaluation of the impact of k-means initialization ============================================================ Evaluate the ability of k-means initializations strategies to make the algorithm convergence robust, as measured by the relative standard deviation of the inertia of the clustering (i.e. the sum of squared distances to the nearest cluster center). The first plot shows the best inertia reached for each combination of the model (``KMeans`` or ``MiniBatchKMeans``), and the init method (``init="random"`` or ``init="k-means++"``) for increasing values of the ``n_init`` parameter that controls the number of initializations. The second plot demonstrates one single run of the ``MiniBatchKMeans`` estimator using a ``init="random"`` and ``n_init=1``. This run leads to a bad convergence (local optimum), with estimated centers stuck between ground truth clusters. The dataset used for evaluation is a 2D grid of isotropic Gaussian clusters widely spaced. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.cm as cm import matplotlib.pyplot as plt import numpy as np from sklearn.cluster import KMeans, MiniBatchKMeans from sklearn.utils import check_random_state, shuffle random_state = np.random.RandomState(0) # Number of run (with randomly generated dataset) for each strategy so as # to be able to compute an estimate of the standard deviation n_runs = 5 # k-means models can do several random inits so as to be able to trade # CPU time for convergence robustness n_init_range = np.array([1, 5, 10, 15, 20]) # Datasets generation parameters n_samples_per_center = 100 grid_size = 3 scale = 0.1 n_clusters = grid_size**2 def make_data(random_state, n_samples_per_center, grid_size, scale): random_state = check_random_state(random_state) centers = np.array([[i, j] for i in range(grid_size) for j in range(grid_size)]) n_clusters_true, n_features = centers.shape noise = random_state.normal( scale=scale, size=(n_samples_per_center, centers.shape[1]) ) X = np.concatenate([c + noise for c in centers]) y = np.concatenate([[i] * n_samples_per_center for i in range(n_clusters_true)]) return shuffle(X, y, random_state=random_state) # Part 1: Quantitative evaluation of various init methods plt.figure() plots = [] legends = [] cases = [ (KMeans, "k-means++", {}, "^-"), (KMeans, "random", {}, "o-"), (MiniBatchKMeans, "k-means++", {"max_no_improvement": 3}, "x-"), (MiniBatchKMeans, "random", {"max_no_improvement": 3, "init_size": 500}, "d-"), ] for factory, init, params, format in cases: print("Evaluation of %s with %s init" % (factory.__name__, init)) inertia = np.empty((len(n_init_range), n_runs)) for run_id in range(n_runs): X, y = make_data(run_id, n_samples_per_center, grid_size, scale) for i, n_init in enumerate(n_init_range): km = factory( n_clusters=n_clusters, init=init, random_state=run_id, n_init=n_init, **params, ).fit(X) inertia[i, run_id] = km.inertia_ p = plt.errorbar( n_init_range, inertia.mean(axis=1), inertia.std(axis=1), fmt=format ) plots.append(p[0]) legends.append("%s with %s init" % (factory.__name__, init)) plt.xlabel("n_init") plt.ylabel("inertia") plt.legend(plots, legends) plt.title("Mean inertia for various k-means init across %d runs" % n_runs) # Part 2: Qualitative visual inspection of the convergence X, y = make_data(random_state, n_samples_per_center, grid_size, scale) km = MiniBatchKMeans( n_clusters=n_clusters, init="random", n_init=1, random_state=random_state ).fit(X) plt.figure() for k in range(n_clusters): my_members = km.labels_ == k color = cm.nipy_spectral(float(k) / n_clusters, 1) plt.plot(X[my_members, 0], X[my_members, 1], ".", c=color) cluster_center = km.cluster_centers_[k] plt.plot( cluster_center[0], cluster_center[1], "o", markerfacecolor=color, markeredgecolor="k", markersize=6, ) plt.title( "Example cluster allocation with a single random init\nwith MiniBatchKMeans" ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_mini_batch_kmeans.py
examples/cluster/plot_mini_batch_kmeans.py
""" ==================================================================== Comparison of the K-Means and MiniBatchKMeans clustering algorithms ==================================================================== We want to compare the performance of the MiniBatchKMeans and KMeans: the MiniBatchKMeans is faster, but gives slightly different results (see :ref:`mini_batch_kmeans`). We will cluster a set of data, first with KMeans and then with MiniBatchKMeans, and plot the results. We will also plot the points that are labelled differently between the two algorithms. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate the data # ----------------- # # We start by generating the blobs of data to be clustered. import numpy as np from sklearn.datasets import make_blobs np.random.seed(0) batch_size = 45 centers = [[1, 1], [-1, -1], [1, -1]] n_clusters = len(centers) X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7) # %% # Compute clustering with KMeans # ------------------------------ import time from sklearn.cluster import KMeans k_means = KMeans(init="k-means++", n_clusters=3, n_init=10) t0 = time.time() k_means.fit(X) t_batch = time.time() - t0 # %% # Compute clustering with MiniBatchKMeans # --------------------------------------- from sklearn.cluster import MiniBatchKMeans mbk = MiniBatchKMeans( init="k-means++", n_clusters=3, batch_size=batch_size, n_init=10, max_no_improvement=10, verbose=0, ) t0 = time.time() mbk.fit(X) t_mini_batch = time.time() - t0 # %% # Establishing parity between clusters # ------------------------------------ # # We want to have the same color for the same cluster from both the # MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per # closest one. from sklearn.metrics.pairwise import pairwise_distances_argmin k_means_cluster_centers = k_means.cluster_centers_ order = pairwise_distances_argmin(k_means.cluster_centers_, mbk.cluster_centers_) mbk_means_cluster_centers = mbk.cluster_centers_[order] k_means_labels = pairwise_distances_argmin(X, k_means_cluster_centers) mbk_means_labels = pairwise_distances_argmin(X, mbk_means_cluster_centers) # %% # Plotting the results # -------------------- import matplotlib.pyplot as plt fig = plt.figure(figsize=(8, 3)) fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9) colors = ["#4EACC5", "#FF9C34", "#4E9A06"] # KMeans ax = fig.add_subplot(1, 3, 1) for k, col in zip(range(n_clusters), colors): my_members = k_means_labels == k cluster_center = k_means_cluster_centers[k] ax.plot(X[my_members, 0], X[my_members, 1], "w", markerfacecolor=col, marker=".") ax.plot( cluster_center[0], cluster_center[1], "o", markerfacecolor=col, markeredgecolor="k", markersize=6, ) ax.set_title("KMeans") ax.set_xticks(()) ax.set_yticks(()) plt.text(-3.5, 1.8, "train time: %.2fs\ninertia: %f" % (t_batch, k_means.inertia_)) # MiniBatchKMeans ax = fig.add_subplot(1, 3, 2) for k, col in zip(range(n_clusters), colors): my_members = mbk_means_labels == k cluster_center = mbk_means_cluster_centers[k] ax.plot(X[my_members, 0], X[my_members, 1], "w", markerfacecolor=col, marker=".") ax.plot( cluster_center[0], cluster_center[1], "o", markerfacecolor=col, markeredgecolor="k", markersize=6, ) ax.set_title("MiniBatchKMeans") ax.set_xticks(()) ax.set_yticks(()) plt.text(-3.5, 1.8, "train time: %.2fs\ninertia: %f" % (t_mini_batch, mbk.inertia_)) # Initialize the different array to all False different = mbk_means_labels == 4 ax = fig.add_subplot(1, 3, 3) for k in range(n_clusters): different += (k_means_labels == k) != (mbk_means_labels == k) identical = np.logical_not(different) ax.plot(X[identical, 0], X[identical, 1], "w", markerfacecolor="#bbbbbb", marker=".") ax.plot(X[different, 0], X[different, 1], "w", markerfacecolor="m", marker=".") ax.set_title("Difference") ax.set_xticks(()) ax.set_yticks(()) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_affinity_propagation.py
examples/cluster/plot_affinity_propagation.py
""" ================================================= Demo of affinity propagation clustering algorithm ================================================= Reference: Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages Between Data Points", Science Feb. 2007 """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np from sklearn import metrics from sklearn.cluster import AffinityPropagation from sklearn.datasets import make_blobs # %% # Generate sample data # -------------------- centers = [[1, 1], [-1, -1], [1, -1]] X, labels_true = make_blobs( n_samples=300, centers=centers, cluster_std=0.5, random_state=0 ) # %% # Compute Affinity Propagation # ---------------------------- af = AffinityPropagation(preference=-50, random_state=0).fit(X) cluster_centers_indices = af.cluster_centers_indices_ labels = af.labels_ n_clusters_ = len(cluster_centers_indices) print("Estimated number of clusters: %d" % n_clusters_) print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels)) print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels)) print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels)) print("Adjusted Rand Index: %0.3f" % metrics.adjusted_rand_score(labels_true, labels)) print( "Adjusted Mutual Information: %0.3f" % metrics.adjusted_mutual_info_score(labels_true, labels) ) print( "Silhouette Coefficient: %0.3f" % metrics.silhouette_score(X, labels, metric="sqeuclidean") ) # %% # Plot result # ----------- import matplotlib.pyplot as plt plt.close("all") plt.figure(1) plt.clf() colors = plt.cycler("color", plt.cm.viridis(np.linspace(0, 1, 4))) for k, col in zip(range(n_clusters_), colors): class_members = labels == k cluster_center = X[cluster_centers_indices[k]] plt.scatter( X[class_members, 0], X[class_members, 1], color=col["color"], marker="." ) plt.scatter( cluster_center[0], cluster_center[1], s=14, color=col["color"], marker="o" ) for x in X[class_members]: plt.plot( [cluster_center[0], x[0]], [cluster_center[1], x[1]], color=col["color"] ) plt.title("Estimated number of clusters: %d" % n_clusters_) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false