repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_kmeans_assumptions.py
examples/cluster/plot_kmeans_assumptions.py
""" ==================================== Demonstration of k-means assumptions ==================================== This example is meant to illustrate situations where k-means produces unintuitive and possibly undesirable clusters. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Data generation # --------------- # # The function :func:`~sklearn.datasets.make_blobs` generates isotropic # (spherical) gaussian blobs. To obtain anisotropic (elliptical) gaussian blobs # one has to define a linear `transformation`. import numpy as np from sklearn.datasets import make_blobs n_samples = 1500 random_state = 170 transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]] X, y = make_blobs(n_samples=n_samples, random_state=random_state) X_aniso = np.dot(X, transformation) # Anisotropic blobs X_varied, y_varied = make_blobs( n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state ) # Unequal variance X_filtered = np.vstack( (X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]) ) # Unevenly sized blobs y_filtered = [0] * 500 + [1] * 100 + [2] * 10 # %% # We can visualize the resulting data: import matplotlib.pyplot as plt fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(12, 12)) axs[0, 0].scatter(X[:, 0], X[:, 1], c=y) axs[0, 0].set_title("Mixture of Gaussian Blobs") axs[0, 1].scatter(X_aniso[:, 0], X_aniso[:, 1], c=y) axs[0, 1].set_title("Anisotropically Distributed Blobs") axs[1, 0].scatter(X_varied[:, 0], X_varied[:, 1], c=y_varied) axs[1, 0].set_title("Unequal Variance") axs[1, 1].scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_filtered) axs[1, 1].set_title("Unevenly Sized Blobs") plt.suptitle("Ground truth clusters").set_y(0.95) plt.show() # %% # Fit models and plot results # --------------------------- # # The previously generated data is now used to show how # :class:`~sklearn.cluster.KMeans` behaves in the following scenarios: # # - Non-optimal number of clusters: in a real setting there is no uniquely # defined **true** number of clusters. An appropriate number of clusters has # to be decided from data-based criteria and knowledge of the intended goal. # - Anisotropically distributed blobs: k-means consists of minimizing sample's # euclidean distances to the centroid of the cluster they are assigned to. As # a consequence, k-means is more appropriate for clusters that are isotropic # and normally distributed (i.e. spherical gaussians). # - Unequal variance: k-means is equivalent to taking the maximum likelihood # estimator for a "mixture" of k gaussian distributions with the same # variances but with possibly different means. # - Unevenly sized blobs: there is no theoretical result about k-means that # states that it requires similar cluster sizes to perform well, yet # minimizing euclidean distances does mean that the more sparse and # high-dimensional the problem is, the higher is the need to run the algorithm # with different centroid seeds to ensure a global minimal inertia. from sklearn.cluster import KMeans common_params = { "n_init": "auto", "random_state": random_state, } fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(12, 12)) y_pred = KMeans(n_clusters=2, **common_params).fit_predict(X) axs[0, 0].scatter(X[:, 0], X[:, 1], c=y_pred) axs[0, 0].set_title("Non-optimal Number of Clusters") y_pred = KMeans(n_clusters=3, **common_params).fit_predict(X_aniso) axs[0, 1].scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred) axs[0, 1].set_title("Anisotropically Distributed Blobs") y_pred = KMeans(n_clusters=3, **common_params).fit_predict(X_varied) axs[1, 0].scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred) axs[1, 0].set_title("Unequal Variance") y_pred = KMeans(n_clusters=3, **common_params).fit_predict(X_filtered) axs[1, 1].scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred) axs[1, 1].set_title("Unevenly Sized Blobs") plt.suptitle("Unexpected KMeans clusters").set_y(0.95) plt.show() # %% # Possible solutions # ------------------ # # For an example on how to find a correct number of blobs, see # :ref:`sphx_glr_auto_examples_cluster_plot_kmeans_silhouette_analysis.py`. # In this case it suffices to set `n_clusters=3`. y_pred = KMeans(n_clusters=3, **common_params).fit_predict(X) plt.scatter(X[:, 0], X[:, 1], c=y_pred) plt.title("Optimal Number of Clusters") plt.show() # %% # To deal with unevenly sized blobs one can increase the number of random # initializations. In this case we set `n_init=10` to avoid finding a # sub-optimal local minimum. For more details see :ref:`kmeans_sparse_high_dim`. y_pred = KMeans(n_clusters=3, n_init=10, random_state=random_state).fit_predict( X_filtered ) plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred) plt.title("Unevenly Sized Blobs \nwith several initializations") plt.show() # %% # As anisotropic and unequal variances are real limitations of the k-means # algorithm, here we propose instead the use of # :class:`~sklearn.mixture.GaussianMixture`, which also assumes gaussian # clusters but does not impose any constraints on their variances. Notice that # one still has to find the correct number of blobs (see # :ref:`sphx_glr_auto_examples_mixture_plot_gmm_selection.py`). # # For an example on how other clustering methods deal with anisotropic or # unequal variance blobs, see the example # :ref:`sphx_glr_auto_examples_cluster_plot_cluster_comparison.py`. from sklearn.mixture import GaussianMixture fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(12, 6)) y_pred = GaussianMixture(n_components=3).fit_predict(X_aniso) ax1.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred) ax1.set_title("Anisotropically Distributed Blobs") y_pred = GaussianMixture(n_components=3).fit_predict(X_varied) ax2.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred) ax2.set_title("Unequal Variance") plt.suptitle("Gaussian mixture clusters").set_y(0.95) plt.show() # %% # Final remarks # ------------- # # In high-dimensional spaces, Euclidean distances tend to become inflated # (not shown in this example). Running a dimensionality reduction algorithm # prior to k-means clustering can alleviate this problem and speed up the # computations (see the example # :ref:`sphx_glr_auto_examples_text_plot_document_clustering.py`). # # In the case where clusters are known to be isotropic, have similar variance # and are not too sparse, the k-means algorithm is quite effective and is one of # the fastest clustering algorithms available. This advantage is lost if one has # to restart it several times to avoid convergence to a local minimum.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_kmeans_digits.py
examples/cluster/plot_kmeans_digits.py
""" =========================================================== A demo of K-Means clustering on the handwritten digits data =========================================================== In this example we compare the various initialization strategies for K-means in terms of runtime and quality of the results. As the ground truth is known here, we also apply different cluster quality metrics to judge the goodness of fit of the cluster labels to the ground truth. Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for definitions and discussions of the metrics): =========== ======================================================== Shorthand full name =========== ======================================================== homo homogeneity score compl completeness score v-meas V measure ARI adjusted Rand index AMI adjusted mutual information silhouette silhouette coefficient =========== ======================================================== """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Load the dataset # ---------------- # # We will start by loading the `digits` dataset. This dataset contains # handwritten digits from 0 to 9. In the context of clustering, one would like # to group images such that the handwritten digits on the image are the same. import numpy as np from sklearn.datasets import load_digits data, labels = load_digits(return_X_y=True) (n_samples, n_features), n_digits = data.shape, np.unique(labels).size print(f"# digits: {n_digits}; # samples: {n_samples}; # features {n_features}") # %% # Define our evaluation benchmark # ------------------------------- # # We will first our evaluation benchmark. During this benchmark, we intend to # compare different initialization methods for KMeans. Our benchmark will: # # * create a pipeline which will scale the data using a # :class:`~sklearn.preprocessing.StandardScaler`; # * train and time the pipeline fitting; # * measure the performance of the clustering obtained via different metrics. from time import time from sklearn import metrics from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler def bench_k_means(kmeans, name, data, labels): """Benchmark to evaluate the KMeans initialization methods. Parameters ---------- kmeans : KMeans instance A :class:`~sklearn.cluster.KMeans` instance with the initialization already set. name : str Name given to the strategy. It will be used to show the results in a table. data : ndarray of shape (n_samples, n_features) The data to cluster. labels : ndarray of shape (n_samples,) The labels used to compute the clustering metrics which requires some supervision. """ t0 = time() estimator = make_pipeline(StandardScaler(), kmeans).fit(data) fit_time = time() - t0 results = [name, fit_time, estimator[-1].inertia_] # Define the metrics which require only the true labels and estimator # labels clustering_metrics = [ metrics.homogeneity_score, metrics.completeness_score, metrics.v_measure_score, metrics.adjusted_rand_score, metrics.adjusted_mutual_info_score, ] results += [m(labels, estimator[-1].labels_) for m in clustering_metrics] # The silhouette score requires the full dataset results += [ metrics.silhouette_score( data, estimator[-1].labels_, metric="euclidean", sample_size=300, ) ] # Show the results formatter_result = ( "{:9s}\t{:.3f}s\t{:.0f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}" ) print(formatter_result.format(*results)) # %% # Run the benchmark # ----------------- # # We will compare three approaches: # # * an initialization using `k-means++`. This method is stochastic and we will # run the initialization 4 times; # * a random initialization. This method is stochastic as well and we will run # the initialization 4 times; # * an initialization based on a :class:`~sklearn.decomposition.PCA` # projection. Indeed, we will use the components of the # :class:`~sklearn.decomposition.PCA` to initialize KMeans. This method is # deterministic and a single initialization suffice. from sklearn.cluster import KMeans from sklearn.decomposition import PCA print(82 * "_") print("init\t\ttime\tinertia\thomo\tcompl\tv-meas\tARI\tAMI\tsilhouette") kmeans = KMeans(init="k-means++", n_clusters=n_digits, n_init=4, random_state=0) bench_k_means(kmeans=kmeans, name="k-means++", data=data, labels=labels) kmeans = KMeans(init="random", n_clusters=n_digits, n_init=4, random_state=0) bench_k_means(kmeans=kmeans, name="random", data=data, labels=labels) pca = PCA(n_components=n_digits).fit(data) kmeans = KMeans(init=pca.components_, n_clusters=n_digits, n_init=1) bench_k_means(kmeans=kmeans, name="PCA-based", data=data, labels=labels) print(82 * "_") # %% # Visualize the results on PCA-reduced data # ----------------------------------------- # # :class:`~sklearn.decomposition.PCA` allows to project the data from the # original 64-dimensional space into a lower dimensional space. Subsequently, # we can use :class:`~sklearn.decomposition.PCA` to project into a # 2-dimensional space and plot the data and the clusters in this new space. import matplotlib.pyplot as plt reduced_data = PCA(n_components=2).fit_transform(data) kmeans = KMeans(init="k-means++", n_clusters=n_digits, n_init=4) kmeans.fit(reduced_data) # Step size of the mesh. Decrease to increase the quality of the VQ. h = 0.02 # point in the mesh [x_min, x_max]x[y_min, y_max]. # Plot the decision boundary. For that, we will assign a color to each x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1 y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Obtain labels for each point in mesh. Use last trained model. Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure(1) plt.clf() plt.imshow( Z, interpolation="nearest", extent=(xx.min(), xx.max(), yy.min(), yy.max()), cmap=plt.cm.Paired, aspect="auto", origin="lower", ) plt.plot(reduced_data[:, 0], reduced_data[:, 1], "k.", markersize=2) # Plot the centroids as a white X centroids = kmeans.cluster_centers_ plt.scatter( centroids[:, 0], centroids[:, 1], marker="x", s=169, linewidths=3, color="w", zorder=10, ) plt.title( "K-means clustering on the digits dataset (PCA-reduced data)\n" "Centroids are marked with white cross" ) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(()) plt.yticks(()) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_birch_vs_minibatchkmeans.py
examples/cluster/plot_birch_vs_minibatchkmeans.py
""" ================================= Compare BIRCH and MiniBatchKMeans ================================= This example compares the timing of BIRCH (with and without the global clustering step) and MiniBatchKMeans on a synthetic dataset having 25,000 samples and 2 features generated using make_blobs. Both ``MiniBatchKMeans`` and ``BIRCH`` are very scalable algorithms and could run efficiently on hundreds of thousands or even millions of datapoints. We chose to limit the dataset size of this example in the interest of keeping our Continuous Integration resource usage reasonable but the interested reader might enjoy editing this script to rerun it with a larger value for `n_samples`. If ``n_clusters`` is set to None, the data is reduced from 25,000 samples to a set of 158 clusters. This can be viewed as a preprocessing step before the final (global) clustering step that further reduces these 158 clusters to 100 clusters. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from itertools import cycle from time import time import matplotlib.colors as colors import matplotlib.pyplot as plt import numpy as np from joblib import cpu_count from sklearn.cluster import Birch, MiniBatchKMeans from sklearn.datasets import make_blobs # Generate centers for the blobs so that it forms a 10 X 10 grid. xx = np.linspace(-22, 22, 10) yy = np.linspace(-22, 22, 10) xx, yy = np.meshgrid(xx, yy) n_centers = np.hstack((np.ravel(xx)[:, np.newaxis], np.ravel(yy)[:, np.newaxis])) # Generate blobs to do a comparison between MiniBatchKMeans and BIRCH. X, y = make_blobs(n_samples=25000, centers=n_centers, random_state=0) # Use all colors that matplotlib provides by default. colors_ = cycle(colors.cnames.keys()) fig = plt.figure(figsize=(12, 4)) fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9) # Compute clustering with BIRCH with and without the final clustering step # and plot. birch_models = [ Birch(threshold=1.7, n_clusters=None), Birch(threshold=1.7, n_clusters=100), ] final_step = ["without global clustering", "with global clustering"] for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)): t = time() birch_model.fit(X) print("BIRCH %s as the final step took %0.2f seconds" % (info, (time() - t))) # Plot result labels = birch_model.labels_ centroids = birch_model.subcluster_centers_ n_clusters = np.unique(labels).size print("n_clusters : %d" % n_clusters) ax = fig.add_subplot(1, 3, ind + 1) for this_centroid, k, col in zip(centroids, range(n_clusters), colors_): mask = labels == k ax.scatter(X[mask, 0], X[mask, 1], c="w", edgecolor=col, marker=".", alpha=0.5) if birch_model.n_clusters is None: ax.scatter(this_centroid[0], this_centroid[1], marker="+", c="k", s=25) ax.set_ylim([-25, 25]) ax.set_xlim([-25, 25]) ax.set_autoscaley_on(False) ax.set_title("BIRCH %s" % info) # Compute clustering with MiniBatchKMeans. mbk = MiniBatchKMeans( init="k-means++", n_clusters=100, batch_size=256 * cpu_count(), n_init=10, max_no_improvement=10, verbose=0, random_state=0, ) t0 = time() mbk.fit(X) t_mini_batch = time() - t0 print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch) mbk_means_labels_unique = np.unique(mbk.labels_) ax = fig.add_subplot(1, 3, 3) for this_centroid, k, col in zip(mbk.cluster_centers_, range(n_clusters), colors_): mask = mbk.labels_ == k ax.scatter(X[mask, 0], X[mask, 1], marker=".", c="w", edgecolor=col, alpha=0.5) ax.scatter(this_centroid[0], this_centroid[1], marker="+", c="k", s=25) ax.set_xlim([-25, 25]) ax.set_ylim([-25, 25]) ax.set_title("MiniBatchKMeans") ax.set_autoscaley_on(False) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_face_compress.py
examples/cluster/plot_face_compress.py
""" =========================== Vector Quantization Example =========================== This example shows how one can use :class:`~sklearn.preprocessing.KBinsDiscretizer` to perform vector quantization on a set of toy image, the raccoon face. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Original image # -------------- # # We start by loading the raccoon face image from SciPy. We will additionally check # a couple of information regarding the image, such as the shape and data type used # to store the image. # from scipy.datasets import face raccoon_face = face(gray=True) print(f"The dimension of the image is {raccoon_face.shape}") print(f"The data used to encode the image is of type {raccoon_face.dtype}") print(f"The number of bytes taken in RAM is {raccoon_face.nbytes}") # %% # Thus the image is a 2D array of 768 pixels in height and 1024 pixels in width. Each # value is a 8-bit unsigned integer, which means that the image is encoded using 8 # bits per pixel. The total memory usage of the image is 786 kilobytes (1 byte equals # 8 bits). # # Using 8-bit unsigned integer means that the image is encoded using 256 different # shades of gray, at most. We can check the distribution of these values. import matplotlib.pyplot as plt fig, ax = plt.subplots(ncols=2, figsize=(12, 4)) ax[0].imshow(raccoon_face, cmap=plt.cm.gray) ax[0].axis("off") ax[0].set_title("Rendering of the image") ax[1].hist(raccoon_face.ravel(), bins=256) ax[1].set_xlabel("Pixel value") ax[1].set_ylabel("Count of pixels") ax[1].set_title("Distribution of the pixel values") _ = fig.suptitle("Original image of a raccoon face") # %% # Compression via vector quantization # ----------------------------------- # # The idea behind compression via vector quantization is to reduce the number of # gray levels to represent an image. For instance, we can use 8 values instead # of 256 values. Therefore, it means that we could efficiently use 3 bits instead # of 8 bits to encode a single pixel and therefore reduce the memory usage by a # factor of approximately 2.5. We will later discuss about this memory usage. # # Encoding strategy # """"""""""""""""" # # The compression can be done using a # :class:`~sklearn.preprocessing.KBinsDiscretizer`. We need to choose a strategy # to define the 8 gray values to sub-sample. The simplest strategy is to define # them equally spaced, which correspond to setting `strategy="uniform"`. From # the previous histogram, we know that this strategy is certainly not optimal. from sklearn.preprocessing import KBinsDiscretizer n_bins = 8 encoder = KBinsDiscretizer( n_bins=n_bins, encode="ordinal", strategy="uniform", random_state=0, ) compressed_raccoon_uniform = encoder.fit_transform(raccoon_face.reshape(-1, 1)).reshape( raccoon_face.shape ) fig, ax = plt.subplots(ncols=2, figsize=(12, 4)) ax[0].imshow(compressed_raccoon_uniform, cmap=plt.cm.gray) ax[0].axis("off") ax[0].set_title("Rendering of the image") ax[1].hist(compressed_raccoon_uniform.ravel(), bins=256) ax[1].set_xlabel("Pixel value") ax[1].set_ylabel("Count of pixels") ax[1].set_title("Sub-sampled distribution of the pixel values") _ = fig.suptitle("Raccoon face compressed using 3 bits and a uniform strategy") # %% # Qualitatively, we can spot some small regions where we see the effect of the # compression (e.g. leaves on the bottom right corner). But after all, the resulting # image is still looking good. # # We observe that the distribution of pixels values have been mapped to 8 # different values. We can check the correspondence between such values and the # original pixel values. bin_edges = encoder.bin_edges_[0] bin_center = bin_edges[:-1] + (bin_edges[1:] - bin_edges[:-1]) / 2 bin_center # %% _, ax = plt.subplots() ax.hist(raccoon_face.ravel(), bins=256) color = "tab:orange" for center in bin_center: ax.axvline(center, color=color) ax.text(center - 10, ax.get_ybound()[1] + 100, f"{center:.1f}", color=color) # %% # As previously stated, the uniform sampling strategy is not optimal. Notice for # instance that the pixels mapped to the value 7 will encode a rather small # amount of information, whereas the mapped value 3 will represent a large # amount of counts. We can instead use a clustering strategy such as k-means to # find a more optimal mapping. encoder = KBinsDiscretizer( n_bins=n_bins, encode="ordinal", strategy="kmeans", random_state=0, ) compressed_raccoon_kmeans = encoder.fit_transform(raccoon_face.reshape(-1, 1)).reshape( raccoon_face.shape ) fig, ax = plt.subplots(ncols=2, figsize=(12, 4)) ax[0].imshow(compressed_raccoon_kmeans, cmap=plt.cm.gray) ax[0].axis("off") ax[0].set_title("Rendering of the image") ax[1].hist(compressed_raccoon_kmeans.ravel(), bins=256) ax[1].set_xlabel("Pixel value") ax[1].set_ylabel("Number of pixels") ax[1].set_title("Distribution of the pixel values") _ = fig.suptitle("Raccoon face compressed using 3 bits and a K-means strategy") # %% bin_edges = encoder.bin_edges_[0] bin_center = bin_edges[:-1] + (bin_edges[1:] - bin_edges[:-1]) / 2 bin_center # %% _, ax = plt.subplots() ax.hist(raccoon_face.ravel(), bins=256) color = "tab:orange" for center in bin_center: ax.axvline(center, color=color) ax.text(center - 10, ax.get_ybound()[1] + 100, f"{center:.1f}", color=color) # %% # The counts in the bins are now more balanced and their centers are no longer # equally spaced. Note that we could enforce the same number of pixels per bin # by using the `strategy="quantile"` instead of `strategy="kmeans"`. # # Memory footprint # """""""""""""""" # # We previously stated that we should save 8 times less memory. Let's verify it. print(f"The number of bytes taken in RAM is {compressed_raccoon_kmeans.nbytes}") print(f"Compression ratio: {compressed_raccoon_kmeans.nbytes / raccoon_face.nbytes}") # %% # It is quite surprising to see that our compressed image is taking x8 more # memory than the original image. This is indeed the opposite of what we # expected. The reason is mainly due to the type of data used to encode the # image. print(f"Type of the compressed image: {compressed_raccoon_kmeans.dtype}") # %% # Indeed, the output of the :class:`~sklearn.preprocessing.KBinsDiscretizer` is # an array of 64-bit float. It means that it takes x8 more memory. However, we # use this 64-bit float representation to encode 8 values. Indeed, we will save # memory only if we cast the compressed image into an array of 3-bits integers. We # could use the method `numpy.ndarray.astype`. However, a 3-bits integer # representation does not exist and to encode the 8 values, we would need to use # the 8-bit unsigned integer representation as well. # # In practice, observing a memory gain would require the original image to be in # a 64-bit float representation.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_agglomerative_clustering_metrics.py
examples/cluster/plot_agglomerative_clustering_metrics.py
""" Agglomerative clustering with different metrics =============================================== Demonstrates the effect of different metrics on the hierarchical clustering. The example is engineered to show the effect of the choice of different metrics. It is applied to waveforms, which can be seen as high-dimensional vector. Indeed, the difference between metrics is usually more pronounced in high dimension (in particular for euclidean and cityblock). We generate data from three groups of waveforms. Two of the waveforms (waveform 1 and waveform 2) are proportional one to the other. The cosine distance is invariant to a scaling of the data, as a result, it cannot distinguish these two waveforms. Thus even with no noise, clustering using this distance will not separate out waveform 1 and 2. We add observation noise to these waveforms. We generate very sparse noise: only 6% of the time points contain noise. As a result, the l1 norm of this noise (ie "cityblock" distance) is much smaller than its l2 norm ("euclidean" distance). This can be seen on the inter-class distance matrices: the values on the diagonal, that characterize the spread of the class, are much bigger for the Euclidean distance than for the cityblock distance. When we apply clustering to the data, we find that the clustering reflects what was in the distance matrices. Indeed, for the Euclidean distance, the classes are ill-separated because of the noise, and thus the clustering does not separate the waveforms. For the cityblock distance, the separation is good and the waveform classes are recovered. Finally, the cosine distance does not separate at all waveform 1 and 2, thus the clustering puts them in the same cluster. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.patheffects as PathEffects import matplotlib.pyplot as plt import numpy as np from sklearn.cluster import AgglomerativeClustering from sklearn.metrics import pairwise_distances np.random.seed(0) # Generate waveform data n_features = 2000 t = np.pi * np.linspace(0, 1, n_features) def sqr(x): return np.sign(np.cos(x)) X = list() y = list() for i, (phi, a) in enumerate([(0.5, 0.15), (0.5, 0.6), (0.3, 0.2)]): for _ in range(30): phase_noise = 0.01 * np.random.normal() amplitude_noise = 0.04 * np.random.normal() additional_noise = 1 - 2 * np.random.rand(n_features) # Make the noise sparse additional_noise[np.abs(additional_noise) < 0.997] = 0 X.append( 12 * ( (a + amplitude_noise) * (sqr(6 * (t + phi + phase_noise))) + additional_noise ) ) y.append(i) X = np.array(X) y = np.array(y) n_clusters = 3 labels = ("Waveform 1", "Waveform 2", "Waveform 3") colors = ["#f7bd01", "#377eb8", "#f781bf"] # Plot the ground-truth labelling plt.figure() plt.axes([0, 0, 1, 1]) for l, color, n in zip(range(n_clusters), colors, labels): lines = plt.plot(X[y == l].T, c=color, alpha=0.5) lines[0].set_label(n) plt.legend(loc="best") plt.axis("tight") plt.axis("off") plt.suptitle("Ground truth", size=20, y=1) # Plot the distances for index, metric in enumerate(["cosine", "euclidean", "cityblock"]): avg_dist = np.zeros((n_clusters, n_clusters)) plt.figure(figsize=(5, 4.5)) for i in range(n_clusters): for j in range(n_clusters): avg_dist[i, j] = pairwise_distances( X[y == i], X[y == j], metric=metric ).mean() avg_dist /= avg_dist.max() for i in range(n_clusters): for j in range(n_clusters): t = plt.text( i, j, "%5.3f" % avg_dist[i, j], verticalalignment="center", horizontalalignment="center", ) t.set_path_effects( [PathEffects.withStroke(linewidth=5, foreground="w", alpha=0.5)] ) plt.imshow(avg_dist, interpolation="nearest", cmap="cividis", vmin=0) plt.xticks(range(n_clusters), labels, rotation=45) plt.yticks(range(n_clusters), labels) plt.colorbar() plt.suptitle("Interclass %s distances" % metric, size=18, y=1) plt.tight_layout() # Plot clustering results for index, metric in enumerate(["cosine", "euclidean", "cityblock"]): model = AgglomerativeClustering( n_clusters=n_clusters, linkage="average", metric=metric ) model.fit(X) plt.figure() plt.axes([0, 0, 1, 1]) for l, color in zip(np.arange(model.n_clusters), colors): plt.plot(X[model.labels_ == l].T, c=color, alpha=0.5) plt.axis("tight") plt.axis("off") plt.suptitle("AgglomerativeClustering(metric=%s)" % metric, size=20, y=1) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_dbscan.py
examples/cluster/plot_dbscan.py
""" =================================== Demo of DBSCAN clustering algorithm =================================== DBSCAN (Density-Based Spatial Clustering of Applications with Noise) finds core samples in regions of high density and expands clusters from them. This algorithm is good for data which contains clusters of similar density. See the :ref:`sphx_glr_auto_examples_cluster_plot_cluster_comparison.py` example for a demo of different clustering algorithms on 2D datasets. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Data generation # --------------- # # We use :class:`~sklearn.datasets.make_blobs` to create 3 synthetic clusters. from sklearn.datasets import make_blobs from sklearn.preprocessing import StandardScaler centers = [[1, 1], [-1, -1], [1, -1]] X, labels_true = make_blobs( n_samples=750, centers=centers, cluster_std=0.4, random_state=0 ) X = StandardScaler().fit_transform(X) # %% # We can visualize the resulting data: import matplotlib.pyplot as plt plt.scatter(X[:, 0], X[:, 1]) plt.show() # %% # Compute DBSCAN # -------------- # # One can access the labels assigned by :class:`~sklearn.cluster.DBSCAN` using # the `labels_` attribute. Noisy samples are given the label :math:`-1`. import numpy as np from sklearn import metrics from sklearn.cluster import DBSCAN db = DBSCAN(eps=0.3, min_samples=10).fit(X) labels = db.labels_ # Number of clusters in labels, ignoring noise if present. n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) n_noise_ = list(labels).count(-1) print("Estimated number of clusters: %d" % n_clusters_) print("Estimated number of noise points: %d" % n_noise_) # %% # Clustering algorithms are fundamentally unsupervised learning methods. # However, since :class:`~sklearn.datasets.make_blobs` gives access to the true # labels of the synthetic clusters, it is possible to use evaluation metrics # that leverage this "supervised" ground truth information to quantify the # quality of the resulting clusters. Examples of such metrics are the # homogeneity, completeness, V-measure, Rand-Index, Adjusted Rand-Index and # Adjusted Mutual Information (AMI). # # If the ground truth labels are not known, evaluation can only be performed # using the model results itself. In that case, the Silhouette Coefficient comes # in handy. # # For more information, see the # :ref:`sphx_glr_auto_examples_cluster_plot_adjusted_for_chance_measures.py` # example or the :ref:`clustering_evaluation` module. print(f"Homogeneity: {metrics.homogeneity_score(labels_true, labels):.3f}") print(f"Completeness: {metrics.completeness_score(labels_true, labels):.3f}") print(f"V-measure: {metrics.v_measure_score(labels_true, labels):.3f}") print(f"Adjusted Rand Index: {metrics.adjusted_rand_score(labels_true, labels):.3f}") print( "Adjusted Mutual Information:" f" {metrics.adjusted_mutual_info_score(labels_true, labels):.3f}" ) print(f"Silhouette Coefficient: {metrics.silhouette_score(X, labels):.3f}") # %% # Plot results # ------------ # # Core samples (large dots) and non-core samples (small dots) are color-coded # according to the assigned cluster. Samples tagged as noise are represented in # black. unique_labels = set(labels) core_samples_mask = np.zeros_like(labels, dtype=bool) core_samples_mask[db.core_sample_indices_] = True colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))] for k, col in zip(unique_labels, colors): if k == -1: # Black used for noise. col = [0, 0, 0, 1] class_member_mask = labels == k xy = X[class_member_mask & core_samples_mask] plt.plot( xy[:, 0], xy[:, 1], "o", markerfacecolor=tuple(col), markeredgecolor="k", markersize=14, ) xy = X[class_member_mask & ~core_samples_mask] plt.plot( xy[:, 0], xy[:, 1], "o", markerfacecolor=tuple(col), markeredgecolor="k", markersize=6, ) plt.title(f"Estimated number of clusters: {n_clusters_}") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_adjusted_for_chance_measures.py
examples/cluster/plot_adjusted_for_chance_measures.py
""" ========================================================== Adjustment for chance in clustering performance evaluation ========================================================== This notebook explores the impact of uniformly-distributed random labeling on the behavior of some clustering evaluation metrics. For such purpose, the metrics are computed with a fixed number of samples and as a function of the number of clusters assigned by the estimator. The example is divided into two experiments: - a first experiment with fixed "ground truth labels" (and therefore fixed number of classes) and randomly "predicted labels"; - a second experiment with varying "ground truth labels", randomly "predicted labels". The "predicted labels" have the same number of classes and clusters as the "ground truth labels". """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Defining the list of metrics to evaluate # ---------------------------------------- # # Clustering algorithms are fundamentally unsupervised learning methods. # However, since we assign class labels for the synthetic clusters in this # example, it is possible to use evaluation metrics that leverage this # "supervised" ground truth information to quantify the quality of the resulting # clusters. Examples of such metrics are the following: # # - V-measure, the harmonic mean of completeness and homogeneity; # # - Rand index, which measures how frequently pairs of data points are grouped # consistently according to the result of the clustering algorithm and the # ground truth class assignment; # # - Adjusted Rand index (ARI), a chance-adjusted Rand index such that a random # cluster assignment has an ARI of 0.0 in expectation; # # - Mutual Information (MI) is an information theoretic measure that quantifies # how dependent are the two labelings. Note that the maximum value of MI for # perfect labelings depends on the number of clusters and samples; # # - Normalized Mutual Information (NMI), a Mutual Information defined between 0 # (no mutual information) in the limit of large number of data points and 1 # (perfectly matching label assignments, up to a permutation of the labels). # It is not adjusted for chance: then the number of clustered data points is # not large enough, the expected values of MI or NMI for random labelings can # be significantly non-zero; # # - Adjusted Mutual Information (AMI), a chance-adjusted Mutual Information. # Similarly to ARI, random cluster assignment has an AMI of 0.0 in # expectation. # # For more information, see the :ref:`clustering_evaluation` module. from sklearn import metrics score_funcs = [ ("V-measure", metrics.v_measure_score), ("Rand index", metrics.rand_score), ("ARI", metrics.adjusted_rand_score), ("MI", metrics.mutual_info_score), ("NMI", metrics.normalized_mutual_info_score), ("AMI", metrics.adjusted_mutual_info_score), ] # %% # First experiment: fixed ground truth labels and growing number of clusters # -------------------------------------------------------------------------- # # We first define a function that creates uniformly-distributed random labeling. import numpy as np rng = np.random.RandomState(0) def random_labels(n_samples, n_classes): return rng.randint(low=0, high=n_classes, size=n_samples) # %% # Another function will use the `random_labels` function to create a fixed set # of ground truth labels (`labels_a`) distributed in `n_classes` and then score # several sets of randomly "predicted" labels (`labels_b`) to assess the # variability of a given metric at a given `n_clusters`. def fixed_classes_uniform_labelings_scores( score_func, n_samples, n_clusters_range, n_classes, n_runs=5 ): scores = np.zeros((len(n_clusters_range), n_runs)) labels_a = random_labels(n_samples=n_samples, n_classes=n_classes) for i, n_clusters in enumerate(n_clusters_range): for j in range(n_runs): labels_b = random_labels(n_samples=n_samples, n_classes=n_clusters) scores[i, j] = score_func(labels_a, labels_b) return scores # %% # In this first example we set the number of classes (true number of clusters) to # `n_classes=10`. The number of clusters varies over the values provided by # `n_clusters_range`. import matplotlib.pyplot as plt import seaborn as sns n_samples = 1000 n_classes = 10 n_clusters_range = np.linspace(2, 100, 10).astype(int) plots = [] names = [] sns.color_palette("colorblind") plt.figure(1) for marker, (score_name, score_func) in zip("d^vx.,", score_funcs): scores = fixed_classes_uniform_labelings_scores( score_func, n_samples, n_clusters_range, n_classes=n_classes ) plots.append( plt.errorbar( n_clusters_range, scores.mean(axis=1), scores.std(axis=1), alpha=0.8, linewidth=1, marker=marker, )[0] ) names.append(score_name) plt.title( "Clustering measures for random uniform labeling\n" f"against reference assignment with {n_classes} classes" ) plt.xlabel(f"Number of clusters (Number of samples is fixed to {n_samples})") plt.ylabel("Score value") plt.ylim(bottom=-0.05, top=1.05) plt.legend(plots, names, bbox_to_anchor=(0.5, 0.5)) plt.show() # %% # The Rand index saturates for `n_clusters` > `n_classes`. Other non-adjusted # measures such as the V-Measure show a linear dependency between the number of # clusters and the number of samples. # # Adjusted for chance measure, such as ARI and AMI, display some random # variations centered around a mean score of 0.0, independently of the number of # samples and clusters. # # Second experiment: varying number of classes and clusters # --------------------------------------------------------- # # In this section we define a similar function that uses several metrics to # score 2 uniformly-distributed random labelings. In this case, the number of # classes and assigned number of clusters are matched for each possible value in # `n_clusters_range`. def uniform_labelings_scores(score_func, n_samples, n_clusters_range, n_runs=5): scores = np.zeros((len(n_clusters_range), n_runs)) for i, n_clusters in enumerate(n_clusters_range): for j in range(n_runs): labels_a = random_labels(n_samples=n_samples, n_classes=n_clusters) labels_b = random_labels(n_samples=n_samples, n_classes=n_clusters) scores[i, j] = score_func(labels_a, labels_b) return scores # %% # In this case, we use `n_samples=100` to show the effect of having a number of # clusters similar or equal to the number of samples. n_samples = 100 n_clusters_range = np.linspace(2, n_samples, 10).astype(int) plt.figure(2) plots = [] names = [] for marker, (score_name, score_func) in zip("d^vx.,", score_funcs): scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range) plots.append( plt.errorbar( n_clusters_range, np.median(scores, axis=1), scores.std(axis=1), alpha=0.8, linewidth=2, marker=marker, )[0] ) names.append(score_name) plt.title( "Clustering measures for 2 random uniform labelings\nwith equal number of clusters" ) plt.xlabel(f"Number of clusters (Number of samples is fixed to {n_samples})") plt.ylabel("Score value") plt.legend(plots, names) plt.ylim(bottom=-0.05, top=1.05) plt.show() # %% # We observe similar results as for the first experiment: adjusted for chance # metrics stay constantly near zero while other metrics tend to get larger with # finer-grained labelings. The mean V-measure of random labeling increases # significantly as the number of clusters is closer to the total number of # samples used to compute the measure. Furthermore, raw Mutual Information is # unbounded from above and its scale depends on the dimensions of the clustering # problem and the cardinality of the ground truth classes. This is why the # curve goes off the chart. # # Only adjusted measures can hence be safely used as a consensus index to # evaluate the average stability of clustering algorithms for a given value of k # on various overlapping sub-samples of the dataset. # # Non-adjusted clustering evaluation metric can therefore be misleading as they # output large values for fine-grained labelings, one could be lead to think # that the labeling has captured meaningful groups while they can be totally # random. In particular, such non-adjusted metrics should not be used to compare # the results of different clustering algorithms that output a different number # of clusters.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_coin_segmentation.py
examples/cluster/plot_coin_segmentation.py
""" ================================================ Segmenting the picture of greek coins in regions ================================================ This example uses :ref:`spectral_clustering` on a graph created from voxel-to-voxel difference on an image to break this image into multiple partly-homogeneous regions. This procedure (spectral clustering on an image) is an efficient approximate solution for finding normalized graph cuts. There are three options to assign labels: * 'kmeans' spectral clustering clusters samples in the embedding space using a kmeans algorithm * 'discrete' iteratively searches for the closest partition space to the embedding space of spectral clustering. * 'cluster_qr' assigns labels using the QR factorization with pivoting that directly determines the partition in the embedding space. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import time import matplotlib.pyplot as plt import numpy as np from scipy.ndimage import gaussian_filter from skimage.data import coins from skimage.transform import rescale from sklearn.cluster import spectral_clustering from sklearn.feature_extraction import image # load the coins as a numpy array orig_coins = coins() # Resize it to 20% of the original size to speed up the processing # Applying a Gaussian filter for smoothing prior to down-scaling # reduces aliasing artifacts. smoothened_coins = gaussian_filter(orig_coins, sigma=2) rescaled_coins = rescale(smoothened_coins, 0.2, mode="reflect", anti_aliasing=False) # Convert the image into a graph with the value of the gradient on the # edges. graph = image.img_to_graph(rescaled_coins) # Take a decreasing function of the gradient: an exponential # The smaller beta is, the more independent the segmentation is of the # actual image. For beta=1, the segmentation is close to a voronoi beta = 10 eps = 1e-6 graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps # The number of segmented regions to display needs to be chosen manually. # The current version of 'spectral_clustering' does not support determining # the number of good quality clusters automatically. n_regions = 26 # %% # Compute and visualize the resulting regions # Computing a few extra eigenvectors may speed up the eigen_solver. # The spectral clustering quality may also benefit from requesting # extra regions for segmentation. n_regions_plus = 3 # Apply spectral clustering using the default eigen_solver='arpack'. # Any implemented solver can be used: eigen_solver='arpack', 'lobpcg', or 'amg'. # Choosing eigen_solver='amg' requires an extra package called 'pyamg'. # The quality of segmentation and the speed of calculations is mostly determined # by the choice of the solver and the value of the tolerance 'eigen_tol'. # TODO: varying eigen_tol seems to have no effect for 'lobpcg' and 'amg' #21243. for assign_labels in ("kmeans", "discretize", "cluster_qr"): t0 = time.time() labels = spectral_clustering( graph, n_clusters=(n_regions + n_regions_plus), eigen_tol=1e-7, assign_labels=assign_labels, random_state=42, ) t1 = time.time() labels = labels.reshape(rescaled_coins.shape) plt.figure(figsize=(5, 5)) plt.imshow(rescaled_coins, cmap=plt.cm.gray) plt.xticks(()) plt.yticks(()) title = "Spectral clustering: %s, %.2fs" % (assign_labels, (t1 - t0)) print(title) plt.title(title) for l in range(n_regions): colors = [plt.cm.nipy_spectral((l + 4) / float(n_regions + 4))] plt.contour(labels == l, colors=colors) # To view individual segments as appear comment in plt.pause(0.5) plt.show() # TODO: After #21194 is merged and #21243 is fixed, check which eigen_solver # is the best and set eigen_solver='arpack', 'lobpcg', or 'amg' and eigen_tol # explicitly in this example.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_cluster_comparison.py
examples/cluster/plot_cluster_comparison.py
""" ========================================================= Comparing different clustering algorithms on toy datasets ========================================================= This example shows characteristics of different clustering algorithms on datasets that are "interesting" but still in 2D. With the exception of the last dataset, the parameters of each of these dataset-algorithm pairs has been tuned to produce good clustering results. Some algorithms are more sensitive to parameter values than others. The last dataset is an example of a 'null' situation for clustering: the data is homogeneous, and there is no good clustering. For this example, the null dataset uses the same parameters as the dataset in the row above it, which represents a mismatch in the parameter values and the data structure. While these examples give some intuition about the algorithms, this intuition might not apply to very high dimensional data. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import time import warnings from itertools import cycle, islice import matplotlib.pyplot as plt import numpy as np from sklearn import cluster, datasets, mixture from sklearn.neighbors import kneighbors_graph from sklearn.preprocessing import StandardScaler # ============ # Generate datasets. We choose the size big enough to see the scalability # of the algorithms, but not too big to avoid too long running times # ============ n_samples = 500 seed = 30 noisy_circles = datasets.make_circles( n_samples=n_samples, factor=0.5, noise=0.05, random_state=seed ) noisy_moons = datasets.make_moons(n_samples=n_samples, noise=0.05, random_state=seed) blobs = datasets.make_blobs(n_samples=n_samples, random_state=seed) rng = np.random.RandomState(seed) no_structure = rng.rand(n_samples, 2), None # Anisotropicly distributed data random_state = 170 X, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state) transformation = [[0.6, -0.6], [-0.4, 0.8]] X_aniso = np.dot(X, transformation) aniso = (X_aniso, y) # blobs with varied variances varied = datasets.make_blobs( n_samples=n_samples, cluster_std=[1.0, 2.5, 0.5], random_state=random_state ) # ============ # Set up cluster parameters # ============ plt.figure(figsize=(9 * 2 + 3, 13)) plt.subplots_adjust( left=0.02, right=0.98, bottom=0.001, top=0.95, wspace=0.05, hspace=0.01 ) plot_num = 1 default_base = { "quantile": 0.3, "eps": 0.3, "damping": 0.9, "preference": -200, "n_neighbors": 3, "n_clusters": 3, "min_samples": 7, "xi": 0.05, "min_cluster_size": 0.1, "allow_single_cluster": True, "hdbscan_min_cluster_size": 15, "hdbscan_min_samples": 3, "random_state": 42, } datasets = [ ( noisy_circles, { "damping": 0.77, "preference": -240, "quantile": 0.2, "n_clusters": 2, "min_samples": 7, "xi": 0.08, }, ), ( noisy_moons, { "damping": 0.75, "preference": -220, "n_clusters": 2, "min_samples": 7, "xi": 0.1, }, ), ( varied, { "eps": 0.18, "n_neighbors": 2, "min_samples": 7, "xi": 0.01, "min_cluster_size": 0.2, }, ), ( aniso, { "eps": 0.15, "n_neighbors": 2, "min_samples": 7, "xi": 0.1, "min_cluster_size": 0.2, }, ), (blobs, {"min_samples": 7, "xi": 0.1, "min_cluster_size": 0.2}), (no_structure, {}), ] for i_dataset, (dataset, algo_params) in enumerate(datasets): # update parameters with dataset-specific values params = default_base.copy() params.update(algo_params) X, y = dataset # normalize dataset for easier parameter selection X = StandardScaler().fit_transform(X) # estimate bandwidth for mean shift bandwidth = cluster.estimate_bandwidth(X, quantile=params["quantile"]) # connectivity matrix for structured Ward connectivity = kneighbors_graph( X, n_neighbors=params["n_neighbors"], include_self=False ) # make connectivity symmetric connectivity = 0.5 * (connectivity + connectivity.T) # ============ # Create cluster objects # ============ ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True) two_means = cluster.MiniBatchKMeans( n_clusters=params["n_clusters"], random_state=params["random_state"], ) ward = cluster.AgglomerativeClustering( n_clusters=params["n_clusters"], linkage="ward", connectivity=connectivity ) spectral = cluster.SpectralClustering( n_clusters=params["n_clusters"], eigen_solver="arpack", affinity="nearest_neighbors", random_state=params["random_state"], ) dbscan = cluster.DBSCAN(eps=params["eps"]) hdbscan = cluster.HDBSCAN( min_samples=params["hdbscan_min_samples"], min_cluster_size=params["hdbscan_min_cluster_size"], allow_single_cluster=params["allow_single_cluster"], copy=True, ) optics = cluster.OPTICS( min_samples=params["min_samples"], xi=params["xi"], min_cluster_size=params["min_cluster_size"], ) affinity_propagation = cluster.AffinityPropagation( damping=params["damping"], preference=params["preference"], random_state=params["random_state"], ) average_linkage = cluster.AgglomerativeClustering( linkage="average", metric="cityblock", n_clusters=params["n_clusters"], connectivity=connectivity, ) birch = cluster.Birch(n_clusters=params["n_clusters"]) gmm = mixture.GaussianMixture( n_components=params["n_clusters"], covariance_type="full", random_state=params["random_state"], ) clustering_algorithms = ( ("MiniBatch\nKMeans", two_means), ("Affinity\nPropagation", affinity_propagation), ("MeanShift", ms), ("Spectral\nClustering", spectral), ("Ward", ward), ("Agglomerative\nClustering", average_linkage), ("DBSCAN", dbscan), ("HDBSCAN", hdbscan), ("OPTICS", optics), ("BIRCH", birch), ("Gaussian\nMixture", gmm), ) for name, algorithm in clustering_algorithms: t0 = time.time() # catch warnings related to kneighbors_graph with warnings.catch_warnings(): warnings.filterwarnings( "ignore", message="the number of connected components of the " "connectivity matrix is [0-9]{1,2}" " > 1. Completing it to avoid stopping the tree early.", category=UserWarning, ) warnings.filterwarnings( "ignore", message="Graph is not fully connected, spectral embedding" " may not work as expected.", category=UserWarning, ) algorithm.fit(X) t1 = time.time() if hasattr(algorithm, "labels_"): y_pred = algorithm.labels_.astype(int) else: y_pred = algorithm.predict(X) plt.subplot(len(datasets), len(clustering_algorithms), plot_num) if i_dataset == 0: plt.title(name, size=18) colors = np.array( list( islice( cycle( [ "#377eb8", "#ff7f00", "#4daf4a", "#f781bf", "#a65628", "#984ea3", "#999999", "#e41a1c", "#dede00", ] ), int(max(y_pred) + 1), ) ) ) # add black color for outliers (if any) colors = np.append(colors, ["#000000"]) plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[y_pred]) plt.xlim(-2.5, 2.5) plt.ylim(-2.5, 2.5) plt.xticks(()) plt.yticks(()) plt.text( 0.99, 0.01, ("%.2fs" % (t1 - t0)).lstrip("0"), transform=plt.gca().transAxes, size=15, horizontalalignment="right", ) plot_num += 1 plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py
examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py
""" ============================================== Feature agglomeration vs. univariate selection ============================================== This example compares 2 dimensionality reduction strategies: - univariate feature selection with Anova - feature agglomeration with Ward hierarchical clustering Both methods are compared in a regression problem using a BayesianRidge as supervised estimator. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% import shutil import tempfile import matplotlib.pyplot as plt import numpy as np from joblib import Memory from scipy import linalg, ndimage from sklearn import feature_selection from sklearn.cluster import FeatureAgglomeration from sklearn.feature_extraction.image import grid_to_graph from sklearn.linear_model import BayesianRidge from sklearn.model_selection import GridSearchCV, KFold from sklearn.pipeline import Pipeline # %% # Set parameters n_samples = 200 size = 40 # image size roi_size = 15 snr = 5.0 np.random.seed(0) # %% # Generate data coef = np.zeros((size, size)) coef[0:roi_size, 0:roi_size] = -1.0 coef[-roi_size:, -roi_size:] = 1.0 X = np.random.randn(n_samples, size**2) for x in X: # smooth data x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel() X -= X.mean(axis=0) X /= X.std(axis=0) y = np.dot(X, coef.ravel()) # %% # add noise noise = np.random.randn(y.shape[0]) noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.0)) / linalg.norm(noise, 2) y += noise_coef * noise # %% # Compute the coefs of a Bayesian Ridge with GridSearch cv = KFold(2) # cross-validation generator for model selection ridge = BayesianRidge() cachedir = tempfile.mkdtemp() mem = Memory(location=cachedir, verbose=1) # %% # Ward agglomeration followed by BayesianRidge connectivity = grid_to_graph(n_x=size, n_y=size) ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity, memory=mem) clf = Pipeline([("ward", ward), ("ridge", ridge)]) # Select the optimal number of parcels with grid search clf = GridSearchCV(clf, {"ward__n_clusters": [10, 20, 30]}, n_jobs=1, cv=cv) clf.fit(X, y) # set the best parameters coef_ = clf.best_estimator_.steps[-1][1].coef_ coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_) coef_agglomeration_ = coef_.reshape(size, size) # %% # Anova univariate feature selection followed by BayesianRidge f_regression = mem.cache(feature_selection.f_regression) # caching function anova = feature_selection.SelectPercentile(f_regression) clf = Pipeline([("anova", anova), ("ridge", ridge)]) # Select the optimal percentage of features with grid search clf = GridSearchCV(clf, {"anova__percentile": [5, 10, 20]}, cv=cv) clf.fit(X, y) # set the best parameters coef_ = clf.best_estimator_.steps[-1][1].coef_ coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_.reshape(1, -1)) coef_selection_ = coef_.reshape(size, size) # %% # Inverse the transformation to plot the results on an image plt.close("all") plt.figure(figsize=(7.3, 2.7)) plt.subplot(1, 3, 1) plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r) plt.title("True weights") plt.subplot(1, 3, 2) plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r) plt.title("Feature Selection") plt.subplot(1, 3, 3) plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r) plt.title("Feature Agglomeration") plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26) plt.show() # %% # Attempt to remove the temporary cachedir, but don't worry if it fails shutil.rmtree(cachedir, ignore_errors=True)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/decomposition/plot_kernel_pca.py
examples/decomposition/plot_kernel_pca.py
""" ========== Kernel PCA ========== This example shows the difference between the Principal Components Analysis (:class:`~sklearn.decomposition.PCA`) and its kernelized version (:class:`~sklearn.decomposition.KernelPCA`). On the one hand, we show that :class:`~sklearn.decomposition.KernelPCA` is able to find a projection of the data which linearly separates them while it is not the case with :class:`~sklearn.decomposition.PCA`. Finally, we show that inverting this projection is an approximation with :class:`~sklearn.decomposition.KernelPCA`, while it is exact with :class:`~sklearn.decomposition.PCA`. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Projecting data: `PCA` vs. `KernelPCA` # -------------------------------------- # # In this section, we show the advantages of using a kernel when # projecting data using a Principal Component Analysis (PCA). We create a # dataset made of two nested circles. from sklearn.datasets import make_circles from sklearn.model_selection import train_test_split X, y = make_circles(n_samples=1_000, factor=0.3, noise=0.05, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=0) # %% # Let's have a quick first look at the generated dataset. import matplotlib.pyplot as plt _, (train_ax, test_ax) = plt.subplots(ncols=2, sharex=True, sharey=True, figsize=(8, 4)) train_ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train) train_ax.set_ylabel("Feature #1") train_ax.set_xlabel("Feature #0") train_ax.set_title("Training data") test_ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test) test_ax.set_xlabel("Feature #0") _ = test_ax.set_title("Testing data") # %% # The samples from each class cannot be linearly separated: there is no # straight line that can split the samples of the inner set from the outer # set. # # Now, we will use PCA with and without a kernel to see what is the effect of # using such a kernel. The kernel used here is a radial basis function (RBF) # kernel. from sklearn.decomposition import PCA, KernelPCA pca = PCA(n_components=2) kernel_pca = KernelPCA( n_components=None, kernel="rbf", gamma=10, fit_inverse_transform=True, alpha=0.1 ) X_test_pca = pca.fit(X_train).transform(X_test) X_test_kernel_pca = kernel_pca.fit(X_train).transform(X_test) # %% fig, (orig_data_ax, pca_proj_ax, kernel_pca_proj_ax) = plt.subplots( ncols=3, figsize=(14, 4) ) orig_data_ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test) orig_data_ax.set_ylabel("Feature #1") orig_data_ax.set_xlabel("Feature #0") orig_data_ax.set_title("Testing data") pca_proj_ax.scatter(X_test_pca[:, 0], X_test_pca[:, 1], c=y_test) pca_proj_ax.set_ylabel("Principal component #1") pca_proj_ax.set_xlabel("Principal component #0") pca_proj_ax.set_title("Projection of testing data\n using PCA") kernel_pca_proj_ax.scatter(X_test_kernel_pca[:, 0], X_test_kernel_pca[:, 1], c=y_test) kernel_pca_proj_ax.set_ylabel("Principal component #1") kernel_pca_proj_ax.set_xlabel("Principal component #0") _ = kernel_pca_proj_ax.set_title("Projection of testing data\n using KernelPCA") # %% # We recall that PCA transforms the data linearly. Intuitively, it means that # the coordinate system will be centered, rescaled on each component # with respected to its variance and finally be rotated. # The obtained data from this transformation is isotropic and can now be # projected on its *principal components*. # # Thus, looking at the projection made using PCA (i.e. the middle figure), we # see that there is no change regarding the scaling; indeed the data being two # concentric circles centered in zero, the original data is already isotropic. # However, we can see that the data have been rotated. As a # conclusion, we see that such a projection would not help if define a linear # classifier to distinguish samples from both classes. # # Using a kernel allows to make a non-linear projection. Here, by using an RBF # kernel, we expect that the projection will unfold the dataset while keeping # approximately preserving the relative distances of pairs of data points that # are close to one another in the original space. # # We observe such behaviour in the figure on the right: the samples of a given # class are closer to each other than the samples from the opposite class, # untangling both sample sets. Now, we can use a linear classifier to separate # the samples from the two classes. # # Projecting into the original feature space # ------------------------------------------ # # One particularity to have in mind when using # :class:`~sklearn.decomposition.KernelPCA` is related to the reconstruction # (i.e. the back projection in the original feature space). With # :class:`~sklearn.decomposition.PCA`, the reconstruction will be exact if # `n_components` is the same than the number of original features. # This is the case in this example. # # We can investigate if we get the original dataset when back projecting with # :class:`~sklearn.decomposition.KernelPCA`. X_reconstructed_pca = pca.inverse_transform(pca.transform(X_test)) X_reconstructed_kernel_pca = kernel_pca.inverse_transform(kernel_pca.transform(X_test)) # %% fig, (orig_data_ax, pca_back_proj_ax, kernel_pca_back_proj_ax) = plt.subplots( ncols=3, sharex=True, sharey=True, figsize=(13, 4) ) orig_data_ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test) orig_data_ax.set_ylabel("Feature #1") orig_data_ax.set_xlabel("Feature #0") orig_data_ax.set_title("Original test data") pca_back_proj_ax.scatter(X_reconstructed_pca[:, 0], X_reconstructed_pca[:, 1], c=y_test) pca_back_proj_ax.set_xlabel("Feature #0") pca_back_proj_ax.set_title("Reconstruction via PCA") kernel_pca_back_proj_ax.scatter( X_reconstructed_kernel_pca[:, 0], X_reconstructed_kernel_pca[:, 1], c=y_test ) kernel_pca_back_proj_ax.set_xlabel("Feature #0") _ = kernel_pca_back_proj_ax.set_title("Reconstruction via KernelPCA") # %% # While we see a perfect reconstruction with # :class:`~sklearn.decomposition.PCA` we observe a different result for # :class:`~sklearn.decomposition.KernelPCA`. # # Indeed, :meth:`~sklearn.decomposition.KernelPCA.inverse_transform` cannot # rely on an analytical back-projection and thus an exact reconstruction. # Instead, a :class:`~sklearn.kernel_ridge.KernelRidge` is internally trained # to learn a mapping from the kernalized PCA basis to the original feature # space. This method therefore comes with an approximation introducing small # differences when back projecting in the original feature space. # # To improve the reconstruction using # :meth:`~sklearn.decomposition.KernelPCA.inverse_transform`, one can tune # `alpha` in :class:`~sklearn.decomposition.KernelPCA`, the regularization term # which controls the reliance on the training data during the training of # the mapping.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/decomposition/plot_sparse_coding.py
examples/decomposition/plot_sparse_coding.py
""" =========================================== Sparse coding with a precomputed dictionary =========================================== Transform a signal as a sparse combination of Ricker wavelets. This example visually compares different sparse coding methods using the :class:`~sklearn.decomposition.SparseCoder` estimator. The Ricker (also known as Mexican hat or the second derivative of a Gaussian) is not a particularly good kernel to represent piecewise constant signals like this one. It can therefore be seen how much adding different widths of atoms matters and it therefore motivates learning the dictionary to best fit your type of signals. The richer dictionary on the right is not larger in size, heavier subsampling is performed in order to stay on the same order of magnitude. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.decomposition import SparseCoder def ricker_function(resolution, center, width): """Discrete sub-sampled Ricker (Mexican hat) wavelet""" x = np.linspace(0, resolution - 1, resolution) x = ( (2 / (np.sqrt(3 * width) * np.pi**0.25)) * (1 - (x - center) ** 2 / width**2) * np.exp(-((x - center) ** 2) / (2 * width**2)) ) return x def ricker_matrix(width, resolution, n_components): """Dictionary of Ricker (Mexican hat) wavelets""" centers = np.linspace(0, resolution - 1, n_components) D = np.empty((n_components, resolution)) for i, center in enumerate(centers): D[i] = ricker_function(resolution, center, width) D /= np.sqrt(np.sum(D**2, axis=1))[:, np.newaxis] return D resolution = 1024 subsampling = 3 # subsampling factor width = 100 n_components = resolution // subsampling # Compute a wavelet dictionary D_fixed = ricker_matrix(width=width, resolution=resolution, n_components=n_components) D_multi = np.r_[ tuple( ricker_matrix(width=w, resolution=resolution, n_components=n_components // 5) for w in (10, 50, 100, 500, 1000) ) ] # Generate a signal y = np.linspace(0, resolution - 1, resolution) first_quarter = y < resolution / 4 y[first_quarter] = 3.0 y[np.logical_not(first_quarter)] = -1.0 # List the different sparse coding methods in the following format: # (title, transform_algorithm, transform_alpha, # transform_n_nozero_coefs, color) estimators = [ ("OMP", "omp", None, 15, "navy"), ("Lasso", "lasso_lars", 2, None, "turquoise"), ] lw = 2 plt.figure(figsize=(13, 6)) for subplot, (D, title) in enumerate( zip((D_fixed, D_multi), ("fixed width", "multiple widths")) ): plt.subplot(1, 2, subplot + 1) plt.title("Sparse coding against %s dictionary" % title) plt.plot(y, lw=lw, linestyle="--", label="Original signal") # Do a wavelet approximation for title, algo, alpha, n_nonzero, color in estimators: coder = SparseCoder( dictionary=D, transform_n_nonzero_coefs=n_nonzero, transform_alpha=alpha, transform_algorithm=algo, ) x = coder.transform(y.reshape(1, -1)) density = len(np.flatnonzero(x)) x = np.ravel(np.dot(x, D)) squared_error = np.sum((y - x) ** 2) plt.plot( x, color=color, lw=lw, label="%s: %s nonzero coefs,\n%.2f error" % (title, density, squared_error), ) # Soft thresholding debiasing coder = SparseCoder( dictionary=D, transform_algorithm="threshold", transform_alpha=20 ) x = coder.transform(y.reshape(1, -1)) _, idx = (x != 0).nonzero() x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y, rcond=None) x = np.ravel(np.dot(x, D)) squared_error = np.sum((y - x) ** 2) plt.plot( x, color="darkorange", lw=lw, label="Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error" % (len(idx), squared_error), ) plt.axis("tight") plt.legend(shadow=False, loc="best") plt.subplots_adjust(0.04, 0.07, 0.97, 0.90, 0.09, 0.2) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/decomposition/plot_pca_vs_fa_model_selection.py
examples/decomposition/plot_pca_vs_fa_model_selection.py
""" =============================================================== Model selection with Probabilistic PCA and Factor Analysis (FA) =============================================================== Probabilistic PCA and Factor Analysis are probabilistic models. The consequence is that the likelihood of new data can be used for model selection and covariance estimation. Here we compare PCA and FA with cross-validation on low rank data corrupted with homoscedastic noise (noise variance is the same for each feature) or heteroscedastic noise (noise variance is the different for each feature). In a second step we compare the model likelihood to the likelihoods obtained from shrinkage covariance estimators. One can observe that with homoscedastic noise both FA and PCA succeed in recovering the size of the low rank subspace. The likelihood with PCA is higher than FA in this case. However PCA fails and overestimates the rank when heteroscedastic noise is present. Under appropriate circumstances (choice of the number of components), the held-out data is more likely for low rank models than for shrinkage models. The automatic estimation from Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604 by Thomas P. Minka is also compared. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Create the data # --------------- import numpy as np from scipy import linalg n_samples, n_features, rank = 500, 25, 5 sigma = 1.0 rng = np.random.RandomState(42) U, _, _ = linalg.svd(rng.randn(n_features, n_features)) X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T) # Adding homoscedastic noise X_homo = X + sigma * rng.randn(n_samples, n_features) # Adding heteroscedastic noise sigmas = sigma * rng.rand(n_features) + sigma / 2.0 X_hetero = X + rng.randn(n_samples, n_features) * sigmas # %% # Fit the models # -------------- import matplotlib.pyplot as plt from sklearn.covariance import LedoitWolf, ShrunkCovariance from sklearn.decomposition import PCA, FactorAnalysis from sklearn.model_selection import GridSearchCV, cross_val_score n_components = np.arange(0, n_features, 5) # options for n_components def compute_scores(X): pca = PCA(svd_solver="full") fa = FactorAnalysis() pca_scores, fa_scores = [], [] for n in n_components: pca.n_components = n fa.n_components = n pca_scores.append(np.mean(cross_val_score(pca, X))) fa_scores.append(np.mean(cross_val_score(fa, X))) return pca_scores, fa_scores def shrunk_cov_score(X): shrinkages = np.logspace(-2, 0, 30) cv = GridSearchCV(ShrunkCovariance(), {"shrinkage": shrinkages}) return np.mean(cross_val_score(cv.fit(X).best_estimator_, X)) def lw_score(X): return np.mean(cross_val_score(LedoitWolf(), X)) for X, title in [(X_homo, "Homoscedastic Noise"), (X_hetero, "Heteroscedastic Noise")]: pca_scores, fa_scores = compute_scores(X) n_components_pca = n_components[np.argmax(pca_scores)] n_components_fa = n_components[np.argmax(fa_scores)] pca = PCA(svd_solver="full", n_components="mle") pca.fit(X) n_components_pca_mle = pca.n_components_ print("best n_components by PCA CV = %d" % n_components_pca) print("best n_components by FactorAnalysis CV = %d" % n_components_fa) print("best n_components by PCA MLE = %d" % n_components_pca_mle) plt.figure() plt.plot(n_components, pca_scores, "b", label="PCA scores") plt.plot(n_components, fa_scores, "r", label="FA scores") plt.axvline(rank, color="g", label="TRUTH: %d" % rank, linestyle="-") plt.axvline( n_components_pca, color="b", label="PCA CV: %d" % n_components_pca, linestyle="--", ) plt.axvline( n_components_fa, color="r", label="FactorAnalysis CV: %d" % n_components_fa, linestyle="--", ) plt.axvline( n_components_pca_mle, color="k", label="PCA MLE: %d" % n_components_pca_mle, linestyle="--", ) # compare with other covariance estimators plt.axhline( shrunk_cov_score(X), color="violet", label="Shrunk Covariance MLE", linestyle="-.", ) plt.axhline( lw_score(X), color="orange", label="LedoitWolf MLE" % n_components_pca_mle, linestyle="-.", ) plt.xlabel("nb of components") plt.ylabel("CV scores") plt.legend(loc="lower right") plt.title(title) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/decomposition/plot_image_denoising.py
examples/decomposition/plot_image_denoising.py
""" ========================================= Image denoising using dictionary learning ========================================= An example comparing the effect of reconstructing noisy fragments of a raccoon face image using firstly online :ref:`DictionaryLearning` and various transform methods. The dictionary is fitted on the distorted left half of the image, and subsequently used to reconstruct the right half. Note that even better performance could be achieved by fitting to an undistorted (i.e. noiseless) image, but here we start from the assumption that it is not available. A common practice for evaluating the results of image denoising is by looking at the difference between the reconstruction and the original image. If the reconstruction is perfect this will look like Gaussian noise. It can be seen from the plots that the results of :ref:`omp` with two non-zero coefficients is a bit less biased than when keeping only one (the edges look less prominent). It is in addition closer from the ground truth in Frobenius norm. The result of :ref:`least_angle_regression` is much more strongly biased: the difference is reminiscent of the local intensity value of the original image. Thresholding is clearly not useful for denoising, but it is here to show that it can produce a suggestive output with very high speed, and thus be useful for other tasks such as object classification, where performance is not necessarily related to visualisation. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate distorted image # ------------------------ import numpy as np from scipy.datasets import face raccoon_face = face(gray=True) # Convert from uint8 representation with values between 0 and 255 to # a floating point representation with values between 0 and 1. raccoon_face = raccoon_face / 255.0 # downsample for higher speed raccoon_face = ( raccoon_face[::4, ::4] + raccoon_face[1::4, ::4] + raccoon_face[::4, 1::4] + raccoon_face[1::4, 1::4] ) raccoon_face /= 4.0 height, width = raccoon_face.shape # Distort the right half of the image print("Distorting image...") distorted = raccoon_face.copy() distorted[:, width // 2 :] += 0.075 * np.random.randn(height, width // 2) # %% # Display the distorted image # --------------------------- import matplotlib.pyplot as plt def show_with_diff(image, reference, title): """Helper function to display denoising""" plt.figure(figsize=(5, 3.3)) plt.subplot(1, 2, 1) plt.title("Image") plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation="nearest") plt.xticks(()) plt.yticks(()) plt.subplot(1, 2, 2) difference = image - reference plt.title("Difference (norm: %.2f)" % np.sqrt(np.sum(difference**2))) plt.imshow( difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr, interpolation="nearest" ) plt.xticks(()) plt.yticks(()) plt.suptitle(title, size=16) plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2) show_with_diff(distorted, raccoon_face, "Distorted image") # %% # Extract reference patches # ---------------------------- from time import time from sklearn.feature_extraction.image import extract_patches_2d # Extract all reference patches from the left half of the image print("Extracting reference patches...") t0 = time() patch_size = (7, 7) data = extract_patches_2d(distorted[:, : width // 2], patch_size) data = data.reshape(data.shape[0], -1) data -= np.mean(data, axis=0) data /= np.std(data, axis=0) print(f"{data.shape[0]} patches extracted in %.2fs." % (time() - t0)) # %% # Learn the dictionary from reference patches # ------------------------------------------- from sklearn.decomposition import MiniBatchDictionaryLearning print("Learning the dictionary...") t0 = time() dico = MiniBatchDictionaryLearning( # increase to 300 for higher quality results at the cost of slower # training times. n_components=50, batch_size=200, alpha=1.0, max_iter=10, ) V = dico.fit(data).components_ dt = time() - t0 print(f"{dico.n_iter_} iterations / {dico.n_steps_} steps in {dt:.2f}.") plt.figure(figsize=(4.2, 4)) for i, comp in enumerate(V[:100]): plt.subplot(10, 10, i + 1) plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r, interpolation="nearest") plt.xticks(()) plt.yticks(()) plt.suptitle( "Dictionary learned from face patches\n" + "Train time %.1fs on %d patches" % (dt, len(data)), fontsize=16, ) plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23) # %% # Extract noisy patches and reconstruct them using the dictionary # --------------------------------------------------------------- from sklearn.feature_extraction.image import reconstruct_from_patches_2d print("Extracting noisy patches... ") t0 = time() data = extract_patches_2d(distorted[:, width // 2 :], patch_size) data = data.reshape(data.shape[0], -1) intercept = np.mean(data, axis=0) data -= intercept print("done in %.2fs." % (time() - t0)) transform_algorithms = [ ("Orthogonal Matching Pursuit\n1 atom", "omp", {"transform_n_nonzero_coefs": 1}), ("Orthogonal Matching Pursuit\n2 atoms", "omp", {"transform_n_nonzero_coefs": 2}), ("Least-angle regression\n4 atoms", "lars", {"transform_n_nonzero_coefs": 4}), ("Thresholding\n alpha=0.1", "threshold", {"transform_alpha": 0.1}), ] reconstructions = {} for title, transform_algorithm, kwargs in transform_algorithms: print(title + "...") reconstructions[title] = raccoon_face.copy() t0 = time() dico.set_params(transform_algorithm=transform_algorithm, **kwargs) code = dico.transform(data) patches = np.dot(code, V) patches += intercept patches = patches.reshape(len(data), *patch_size) if transform_algorithm == "threshold": patches -= patches.min() patches /= patches.max() reconstructions[title][:, width // 2 :] = reconstruct_from_patches_2d( patches, (height, width // 2) ) dt = time() - t0 print("done in %.2fs." % dt) show_with_diff(reconstructions[title], raccoon_face, title + " (time: %.1fs)" % dt) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/decomposition/plot_varimax_fa.py
examples/decomposition/plot_varimax_fa.py
""" =============================================================== Factor Analysis (with rotation) to visualize patterns =============================================================== Investigating the Iris dataset, we see that sepal length, petal length and petal width are highly correlated. Sepal width is less redundant. Matrix decomposition techniques can uncover these latent patterns. Applying rotations to the resulting components does not inherently improve the predictive value of the derived latent space, but can help visualise their structure; here, for example, the varimax rotation, which is found by maximizing the squared variances of the weights, finds a structure where the second component only loads positively on sepal width. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import load_iris from sklearn.decomposition import PCA, FactorAnalysis from sklearn.preprocessing import StandardScaler # %% # Load Iris data data = load_iris() X = StandardScaler().fit_transform(data["data"]) feature_names = data["feature_names"] # %% # Plot covariance of Iris features ax = plt.axes() im = ax.imshow(np.corrcoef(X.T), cmap="RdBu_r", vmin=-1, vmax=1) ax.set_xticks([0, 1, 2, 3]) ax.set_xticklabels(list(feature_names), rotation=90) ax.set_yticks([0, 1, 2, 3]) ax.set_yticklabels(list(feature_names)) plt.colorbar(im).ax.set_ylabel("$r$", rotation=0) ax.set_title("Iris feature correlation matrix") plt.tight_layout() # %% # Run factor analysis with Varimax rotation n_comps = 2 methods = [ ("PCA", PCA()), ("Unrotated FA", FactorAnalysis()), ("Varimax FA", FactorAnalysis(rotation="varimax")), ] fig, axes = plt.subplots(ncols=len(methods), figsize=(10, 8), sharey=True) for ax, (method, fa) in zip(axes, methods): fa.set_params(n_components=n_comps) fa.fit(X) components = fa.components_.T print("\n\n %s :\n" % method) print(components) vmax = np.abs(components).max() ax.imshow(components, cmap="RdBu_r", vmax=vmax, vmin=-vmax) ax.set_yticks(np.arange(len(feature_names))) ax.set_yticklabels(feature_names) ax.set_title(str(method)) ax.set_xticks([0, 1]) ax.set_xticklabels(["Comp. 1", "Comp. 2"]) fig.suptitle("Factors") plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/decomposition/plot_faces_decomposition.py
examples/decomposition/plot_faces_decomposition.py
""" ============================ Faces dataset decompositions ============================ This example applies to :ref:`olivetti_faces_dataset` different unsupervised matrix decomposition (dimension reduction) methods from the module :mod:`sklearn.decomposition` (see the documentation chapter :ref:`decompositions`). """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Dataset preparation # ------------------- # # Loading and preprocessing the Olivetti faces dataset. import logging import matplotlib.pyplot as plt from numpy.random import RandomState from sklearn import cluster, decomposition from sklearn.datasets import fetch_olivetti_faces rng = RandomState(0) # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s") faces, _ = fetch_olivetti_faces(return_X_y=True, shuffle=True, random_state=rng) n_samples, n_features = faces.shape # Global centering (focus on one feature, centering all samples) faces_centered = faces - faces.mean(axis=0) # Local centering (focus on one sample, centering all features) faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1) print("Dataset consists of %d faces" % n_samples) # %% # Define a base function to plot the gallery of faces. n_row, n_col = 2, 3 n_components = n_row * n_col image_shape = (64, 64) def plot_gallery(title, images, n_col=n_col, n_row=n_row, cmap=plt.cm.gray): fig, axs = plt.subplots( nrows=n_row, ncols=n_col, figsize=(2.0 * n_col, 2.3 * n_row), facecolor="white", constrained_layout=True, ) fig.get_layout_engine().set(w_pad=0.01, h_pad=0.02, hspace=0, wspace=0) fig.set_edgecolor("black") fig.suptitle(title, size=16) for ax, vec in zip(axs.flat, images): vmax = max(vec.max(), -vec.min()) im = ax.imshow( vec.reshape(image_shape), cmap=cmap, interpolation="nearest", vmin=-vmax, vmax=vmax, ) ax.axis("off") fig.colorbar(im, ax=axs, orientation="horizontal", shrink=0.99, aspect=40, pad=0.01) plt.show() # %% # Let's take a look at our data. Gray color indicates negative values, # white indicates positive values. plot_gallery("Faces from dataset", faces_centered[:n_components]) # %% # Decomposition # ------------- # # Initialise different estimators for decomposition and fit each # of them on all images and plot some results. Each estimator extracts # 6 components as vectors :math:`h \in \mathbb{R}^{4096}`. # We just displayed these vectors in human-friendly visualisation as 64x64 pixel images. # # Read more in the :ref:`User Guide <decompositions>`. # %% # Eigenfaces - PCA using randomized SVD # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # Linear dimensionality reduction using Singular Value Decomposition (SVD) of the data # to project it to a lower dimensional space. # # # .. note:: # # The Eigenfaces estimator, via the :py:mod:`sklearn.decomposition.PCA`, # also provides a scalar `noise_variance_` (the mean of pixelwise variance) # that cannot be displayed as an image. # %% pca_estimator = decomposition.PCA( n_components=n_components, svd_solver="randomized", whiten=True ) pca_estimator.fit(faces_centered) plot_gallery( "Eigenfaces - PCA using randomized SVD", pca_estimator.components_[:n_components] ) # %% # Non-negative components - NMF # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # Estimate non-negative original data as production of two non-negative matrices. # %% nmf_estimator = decomposition.NMF(n_components=n_components, tol=5e-3) nmf_estimator.fit(faces) # original non- negative dataset plot_gallery("Non-negative components - NMF", nmf_estimator.components_[:n_components]) # %% # Independent components - FastICA # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # Independent component analysis separates a multivariate vectors into additive # subcomponents that are maximally independent. # %% ica_estimator = decomposition.FastICA( n_components=n_components, max_iter=400, whiten="arbitrary-variance", tol=15e-5 ) ica_estimator.fit(faces_centered) plot_gallery( "Independent components - FastICA", ica_estimator.components_[:n_components] ) # %% # Sparse components - MiniBatchSparsePCA # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # Mini-batch sparse PCA (:class:`~sklearn.decomposition.MiniBatchSparsePCA`) # extracts the set of sparse components that best reconstruct the data. This # variant is faster but less accurate than the similar # :class:`~sklearn.decomposition.SparsePCA`. # %% batch_pca_estimator = decomposition.MiniBatchSparsePCA( n_components=n_components, alpha=0.1, max_iter=100, batch_size=3, random_state=rng ) batch_pca_estimator.fit(faces_centered) plot_gallery( "Sparse components - MiniBatchSparsePCA", batch_pca_estimator.components_[:n_components], ) # %% # Dictionary learning # ^^^^^^^^^^^^^^^^^^^ # # By default, :class:`~sklearn.decomposition.MiniBatchDictionaryLearning` # divides the data into mini-batches and optimizes in an online manner by # cycling over the mini-batches for the specified number of iterations. # %% batch_dict_estimator = decomposition.MiniBatchDictionaryLearning( n_components=n_components, alpha=0.1, max_iter=50, batch_size=3, random_state=rng ) batch_dict_estimator.fit(faces_centered) plot_gallery("Dictionary learning", batch_dict_estimator.components_[:n_components]) # %% # Cluster centers - MiniBatchKMeans # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # :class:`sklearn.cluster.MiniBatchKMeans` is computationally efficient and # implements on-line learning with a # :meth:`~sklearn.cluster.MiniBatchKMeans.partial_fit` method. That is # why it could be beneficial to enhance some time-consuming algorithms with # :class:`~sklearn.cluster.MiniBatchKMeans`. # %% kmeans_estimator = cluster.MiniBatchKMeans( n_clusters=n_components, tol=1e-3, batch_size=20, max_iter=50, random_state=rng, ) kmeans_estimator.fit(faces_centered) plot_gallery( "Cluster centers - MiniBatchKMeans", kmeans_estimator.cluster_centers_[:n_components], ) # %% # Factor Analysis components - FA # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # :class:`~sklearn.decomposition.FactorAnalysis` is similar to # :class:`~sklearn.decomposition.PCA` but has the advantage of modelling the # variance in every direction of the input space independently (heteroscedastic # noise). Read more in the :ref:`User Guide <FA>`. # %% fa_estimator = decomposition.FactorAnalysis(n_components=n_components, max_iter=20) fa_estimator.fit(faces_centered) plot_gallery("Factor Analysis (FA)", fa_estimator.components_[:n_components]) # --- Pixelwise variance plt.figure(figsize=(3.2, 3.6), facecolor="white", tight_layout=True) vec = fa_estimator.noise_variance_ vmax = max(vec.max(), -vec.min()) plt.imshow( vec.reshape(image_shape), cmap=plt.cm.gray, interpolation="nearest", vmin=-vmax, vmax=vmax, ) plt.axis("off") plt.title("Pixelwise variance from \n Factor Analysis (FA)", size=16, wrap=True) plt.colorbar(orientation="horizontal", shrink=0.8, pad=0.03) plt.show() # %% # Decomposition: Dictionary learning # ---------------------------------- # # In the further section, let's consider :ref:`DictionaryLearning` more precisely. # Dictionary learning is a problem that amounts to finding a sparse representation # of the input data as a combination of simple elements. These simple elements form # a dictionary. It is possible to constrain the dictionary and/or coding coefficients # to be positive to match constraints that may be present in the data. # # :class:`~sklearn.decomposition.MiniBatchDictionaryLearning` implements a # faster, but less accurate version of the dictionary learning algorithm that # is better suited for large datasets. Read more in the :ref:`User Guide # <MiniBatchDictionaryLearning>`. # %% # Plot the same samples from our dataset but with another colormap. # Red indicates negative values, blue indicates positive values, # and white represents zeros. plot_gallery("Faces from dataset", faces_centered[:n_components], cmap=plt.cm.RdBu) # %% # Similar to the previous examples, we change parameters and train # :class:`~sklearn.decomposition.MiniBatchDictionaryLearning` estimator on all # images. Generally, the dictionary learning and sparse encoding decompose # input data into the dictionary and the coding coefficients matrices. :math:`X # \approx UV`, where :math:`X = [x_1, . . . , x_n]`, :math:`X \in # \mathbb{R}^{m×n}`, dictionary :math:`U \in \mathbb{R}^{m×k}`, coding # coefficients :math:`V \in \mathbb{R}^{k×n}`. # # Also below are the results when the dictionary and coding # coefficients are positively constrained. # %% # Dictionary learning - positive dictionary # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # In the following section we enforce positivity when finding the dictionary. # %% dict_pos_dict_estimator = decomposition.MiniBatchDictionaryLearning( n_components=n_components, alpha=0.1, max_iter=50, batch_size=3, random_state=rng, positive_dict=True, ) dict_pos_dict_estimator.fit(faces_centered) plot_gallery( "Dictionary learning - positive dictionary", dict_pos_dict_estimator.components_[:n_components], cmap=plt.cm.RdBu, ) # %% # Dictionary learning - positive code # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # Below we constrain the coding coefficients as a positive matrix. # %% dict_pos_code_estimator = decomposition.MiniBatchDictionaryLearning( n_components=n_components, alpha=0.1, max_iter=50, batch_size=3, fit_algorithm="cd", random_state=rng, positive_code=True, ) dict_pos_code_estimator.fit(faces_centered) plot_gallery( "Dictionary learning - positive code", dict_pos_code_estimator.components_[:n_components], cmap=plt.cm.RdBu, ) # %% # Dictionary learning - positive dictionary & code # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # Also below are the results if the dictionary values and coding # coefficients are positively constrained. # %% dict_pos_estimator = decomposition.MiniBatchDictionaryLearning( n_components=n_components, alpha=0.1, max_iter=50, batch_size=3, fit_algorithm="cd", random_state=rng, positive_dict=True, positive_code=True, ) dict_pos_estimator.fit(faces_centered) plot_gallery( "Dictionary learning - positive dictionary & code", dict_pos_estimator.components_[:n_components], cmap=plt.cm.RdBu, )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/decomposition/plot_ica_vs_pca.py
examples/decomposition/plot_ica_vs_pca.py
""" ========================== FastICA on 2D point clouds ========================== This example illustrates visually in the feature space a comparison by results using two different component analysis techniques. :ref:`ICA` vs :ref:`PCA`. Representing ICA in the feature space gives the view of 'geometric ICA': ICA is an algorithm that finds directions in the feature space corresponding to projections with high non-Gaussianity. These directions need not be orthogonal in the original feature space, but they are orthogonal in the whitened feature space, in which all directions correspond to the same variance. PCA, on the other hand, finds orthogonal directions in the raw feature space that correspond to directions accounting for maximum variance. Here we simulate independent sources using a highly non-Gaussian process, 2 student T with a low number of degrees of freedom (top left figure). We mix them to create observations (top right figure). In this raw observation space, directions identified by PCA are represented by orange vectors. We represent the signal in the PCA space, after whitening by the variance corresponding to the PCA vectors (lower left). Running ICA corresponds to finding a rotation in this space to identify the directions of largest non-Gaussianity (lower right). """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate sample data # -------------------- import numpy as np from sklearn.decomposition import PCA, FastICA rng = np.random.RandomState(42) S = rng.standard_t(1.5, size=(20000, 2)) S[:, 0] *= 2.0 # Mix data A = np.array([[1, 1], [0, 2]]) # Mixing matrix X = np.dot(S, A.T) # Generate observations pca = PCA() S_pca_ = pca.fit(X).transform(X) ica = FastICA(random_state=rng, whiten="arbitrary-variance") S_ica_ = ica.fit(X).transform(X) # Estimate the sources # %% # Plot results # ------------ import matplotlib.pyplot as plt def plot_samples(S, axis_list=None): plt.scatter( S[:, 0], S[:, 1], s=2, marker="o", zorder=10, color="steelblue", alpha=0.5 ) if axis_list is not None: for axis, color, label in axis_list: x_axis, y_axis = axis / axis.std() plt.quiver( (0, 0), (0, 0), x_axis, y_axis, zorder=11, width=0.01, scale=6, color=color, label=label, ) plt.hlines(0, -5, 5, color="black", linewidth=0.5) plt.vlines(0, -3, 3, color="black", linewidth=0.5) plt.xlim(-5, 5) plt.ylim(-3, 3) plt.gca().set_aspect("equal") plt.xlabel("x") plt.ylabel("y") plt.figure() plt.subplot(2, 2, 1) plot_samples(S / S.std()) plt.title("True Independent Sources") axis_list = [(pca.components_.T, "orange", "PCA"), (ica.mixing_, "red", "ICA")] plt.subplot(2, 2, 2) plot_samples(X / np.std(X), axis_list=axis_list) legend = plt.legend(loc="upper left") legend.set_zorder(100) plt.title("Observations") plt.subplot(2, 2, 3) plot_samples(S_pca_ / np.std(S_pca_)) plt.title("PCA recovered signals") plt.subplot(2, 2, 4) plot_samples(S_ica_ / np.std(S_ica_)) plt.title("ICA recovered signals") plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36) plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/decomposition/plot_pca_vs_lda.py
examples/decomposition/plot_pca_vs_lda.py
""" ======================================================= Comparison of LDA and PCA 2D projection of Iris dataset ======================================================= The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour and Virginica) with 4 attributes: sepal length, sepal width, petal length and petal width. Principal Component Analysis (PCA) applied to this data identifies the combination of attributes (principal components, or directions in the feature space) that account for the most variance in the data. Here we plot the different samples on the 2 first principal components. Linear Discriminant Analysis (LDA) tries to identify attributes that account for the most variance *between classes*. In particular, LDA, in contrast to PCA, is a supervised method, using known class labels. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt from sklearn import datasets from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis iris = datasets.load_iris() X = iris.data y = iris.target target_names = iris.target_names pca = PCA(n_components=2) X_r = pca.fit(X).transform(X) lda = LinearDiscriminantAnalysis(n_components=2) X_r2 = lda.fit(X, y).transform(X) # Percentage of variance explained for each components print( "explained variance ratio (first two components): %s" % str(pca.explained_variance_ratio_) ) plt.figure() colors = ["navy", "turquoise", "darkorange"] lw = 2 for color, i, target_name in zip(colors, [0, 1, 2], target_names): plt.scatter( X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=0.8, lw=lw, label=target_name ) plt.legend(loc="best", shadow=False, scatterpoints=1) plt.title("PCA of IRIS dataset") plt.figure() for color, i, target_name in zip(colors, [0, 1, 2], target_names): plt.scatter( X_r2[y == i, 0], X_r2[y == i, 1], alpha=0.8, color=color, label=target_name ) plt.legend(loc="best", shadow=False, scatterpoints=1) plt.title("LDA of IRIS dataset") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/decomposition/plot_pca_iris.py
examples/decomposition/plot_pca_iris.py
""" ================================================== Principal Component Analysis (PCA) on Iris Dataset ================================================== This example shows a well known decomposition technique known as Principal Component Analysis (PCA) on the `Iris dataset <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_. This dataset is made of 4 features: sepal length, sepal width, petal length, petal width. We use PCA to project this 4 feature space into a 3-dimensional space. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Loading the Iris dataset # ------------------------ # # The Iris dataset is directly available as part of scikit-learn. It can be loaded # using the :func:`~sklearn.datasets.load_iris` function. With the default parameters, # a :class:`~sklearn.utils.Bunch` object is returned, containing the data, the # target values, the feature names, and the target names. from sklearn.datasets import load_iris iris = load_iris(as_frame=True) print(iris.keys()) # %% # Plot of pairs of features of the Iris dataset # --------------------------------------------- # # Let's first plot the pairs of features of the Iris dataset. import seaborn as sns # Rename classes using the iris target names iris.frame["target"] = iris.target_names[iris.target] _ = sns.pairplot(iris.frame, hue="target") # %% # Each data point on each scatter plot refers to one of the 150 iris flowers # in the dataset, with the color indicating their respective type # (Setosa, Versicolor, and Virginica). # # You can already see a pattern regarding the Setosa type, which is # easily identifiable based on its short and wide sepal. Only # considering these two dimensions, sepal width and length, there's still # overlap between the Versicolor and Virginica types. # # The diagonal of the plot shows the distribution of each feature. We observe # that the petal width and the petal length are the most discriminant features # for the three types. # # Plot a PCA representation # ------------------------- # Let's apply a Principal Component Analysis (PCA) to the iris dataset # and then plot the irises across the first three principal components. # This will allow us to better differentiate among the three types! import matplotlib.pyplot as plt # unused but required import for doing 3d projections with matplotlib < 3.2 import mpl_toolkits.mplot3d # noqa: F401 from sklearn.decomposition import PCA fig = plt.figure(1, figsize=(8, 6)) ax = fig.add_subplot(111, projection="3d", elev=-150, azim=110) X_reduced = PCA(n_components=3).fit_transform(iris.data) scatter = ax.scatter( X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=iris.target, s=40, ) ax.set( title="First three principal components", xlabel="1st Principal Component", ylabel="2nd Principal Component", zlabel="3rd Principal Component", ) ax.xaxis.set_ticklabels([]) ax.yaxis.set_ticklabels([]) ax.zaxis.set_ticklabels([]) # Add a legend legend1 = ax.legend( scatter.legend_elements()[0], iris.target_names.tolist(), loc="upper right", title="Classes", ) ax.add_artist(legend1) plt.show() # %% # PCA will create 3 new features that are a linear combination of the 4 original # features. In addition, this transformation maximizes the variance. With this # transformation, we can identify each species using only the first principal component.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/decomposition/plot_incremental_pca.py
examples/decomposition/plot_incremental_pca.py
""" =============== Incremental PCA =============== Incremental principal component analysis (IPCA) is typically used as a replacement for principal component analysis (PCA) when the dataset to be decomposed is too large to fit in memory. IPCA builds a low-rank approximation for the input data using an amount of memory which is independent of the number of input data samples. It is still dependent on the input data features, but changing the batch size allows for control of memory usage. This example serves as a visual check that IPCA is able to find a similar projection of the data to PCA (to a sign flip), while only processing a few samples at a time. This can be considered a "toy example", as IPCA is intended for large datasets which do not fit in main memory, requiring incremental approaches. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import load_iris from sklearn.decomposition import PCA, IncrementalPCA iris = load_iris() X = iris.data y = iris.target n_components = 2 ipca = IncrementalPCA(n_components=n_components, batch_size=10) X_ipca = ipca.fit_transform(X) pca = PCA(n_components=n_components) X_pca = pca.fit_transform(X) colors = ["navy", "turquoise", "darkorange"] for X_transformed, title in [(X_ipca, "Incremental PCA"), (X_pca, "PCA")]: plt.figure(figsize=(8, 8)) for color, i, target_name in zip(colors, [0, 1, 2], iris.target_names): plt.scatter( X_transformed[y == i, 0], X_transformed[y == i, 1], color=color, lw=2, label=target_name, ) if "Incremental" in title: err = np.abs(np.abs(X_pca) - np.abs(X_ipca)).mean() plt.title(title + " of iris dataset\nMean absolute unsigned error %.6f" % err) else: plt.title(title + " of iris dataset") plt.legend(loc="best", shadow=False, scatterpoints=1) plt.axis([-4, 4, -1.5, 1.5]) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/decomposition/plot_ica_blind_source_separation.py
examples/decomposition/plot_ica_blind_source_separation.py
""" ===================================== Blind source separation using FastICA ===================================== An example of estimating sources from noisy data. :ref:`ICA` is used to estimate sources given noisy measurements. Imagine 3 instruments playing simultaneously and 3 microphones recording the mixed signals. ICA is used to recover the sources ie. what is played by each instrument. Importantly, PCA fails at recovering our `instruments` since the related signals reflect non-Gaussian processes. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate sample data # -------------------- import numpy as np from scipy import signal np.random.seed(0) n_samples = 2000 time = np.linspace(0, 8, n_samples) s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal S = np.c_[s1, s2, s3] S += 0.2 * np.random.normal(size=S.shape) # Add noise S /= S.std(axis=0) # Standardize data # Mix data A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix X = np.dot(S, A.T) # Generate observations # %% # Fit ICA and PCA models # ---------------------- from sklearn.decomposition import PCA, FastICA # Compute ICA ica = FastICA(n_components=3, whiten="arbitrary-variance") S_ = ica.fit_transform(X) # Reconstruct signals A_ = ica.mixing_ # Get estimated mixing matrix # We can `prove` that the ICA model applies by reverting the unmixing. assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_) # For comparison, compute PCA pca = PCA(n_components=3) H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components # %% # Plot results # ------------ import matplotlib.pyplot as plt plt.figure() models = [X, S, S_, H] names = [ "Observations (mixed signal)", "True Sources", "ICA recovered signals", "PCA recovered signals", ] colors = ["red", "steelblue", "orange"] for ii, (model, name) in enumerate(zip(models, names), 1): plt.subplot(4, 1, ii) plt.title(name) for sig, color in zip(model.T, colors): plt.plot(sig, color=color) plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/bicluster/plot_spectral_biclustering.py
examples/bicluster/plot_spectral_biclustering.py
""" ============================================= A demo of the Spectral Biclustering algorithm ============================================= This example demonstrates how to generate a checkerboard dataset and bicluster it using the :class:`~sklearn.cluster.SpectralBiclustering` algorithm. The spectral biclustering algorithm is specifically designed to cluster data by simultaneously considering both the rows (samples) and columns (features) of a matrix. It aims to identify patterns not only between samples but also within subsets of samples, allowing for the detection of localized structure within the data. This makes spectral biclustering particularly well-suited for datasets where the order or arrangement of features is fixed, such as in images, time series, or genomes. The data is generated, then shuffled and passed to the spectral biclustering algorithm. The rows and columns of the shuffled matrix are then rearranged to plot the biclusters found. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate sample data # -------------------- # We generate the sample data using the # :func:`~sklearn.datasets.make_checkerboard` function. Each pixel within # `shape=(300, 300)` represents with its color a value from a uniform # distribution. The noise is added from a normal distribution, where the value # chosen for `noise` is the standard deviation. # # As you can see, the data is distributed over 12 cluster cells and is # relatively well distinguishable. from matplotlib import pyplot as plt from sklearn.datasets import make_checkerboard n_clusters = (4, 3) data, rows, columns = make_checkerboard( shape=(300, 300), n_clusters=n_clusters, noise=10, shuffle=False, random_state=42 ) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Original dataset") plt.show() # %% # We shuffle the data and the goal is to reconstruct it afterwards using # :class:`~sklearn.cluster.SpectralBiclustering`. import numpy as np # Creating lists of shuffled row and column indices rng = np.random.RandomState(0) row_idx_shuffled = rng.permutation(data.shape[0]) col_idx_shuffled = rng.permutation(data.shape[1]) # %% # We redefine the shuffled data and plot it. We observe that we lost the # structure of original data matrix. data = data[row_idx_shuffled][:, col_idx_shuffled] plt.matshow(data, cmap=plt.cm.Blues) plt.title("Shuffled dataset") plt.show() # %% # Fitting `SpectralBiclustering` # ------------------------------ # We fit the model and compare the obtained clusters with the ground truth. Note # that when creating the model we specify the same number of clusters that we # used to create the dataset (`n_clusters = (4, 3)`), which will contribute to # obtain a good result. from sklearn.cluster import SpectralBiclustering from sklearn.metrics import consensus_score model = SpectralBiclustering(n_clusters=n_clusters, method="log", random_state=0) model.fit(data) # Compute the similarity of two sets of biclusters score = consensus_score( model.biclusters_, (rows[:, row_idx_shuffled], columns[:, col_idx_shuffled]) ) print(f"consensus score: {score:.1f}") # %% # The score is between 0 and 1, where 1 corresponds to a perfect matching. It # shows the quality of the biclustering. # %% # Plotting results # ---------------- # Now, we rearrange the data based on the row and column labels assigned by the # :class:`~sklearn.cluster.SpectralBiclustering` model in ascending order and # plot again. The `row_labels_` range from 0 to 3, while the `column_labels_` # range from 0 to 2, representing a total of 4 clusters per row and 3 clusters # per column. # Reordering first the rows and then the columns. reordered_rows = data[np.argsort(model.row_labels_)] reordered_data = reordered_rows[:, np.argsort(model.column_labels_)] plt.matshow(reordered_data, cmap=plt.cm.Blues) plt.title("After biclustering; rearranged to show biclusters") plt.show() # %% # As a last step, we want to demonstrate the relationships between the row # and column labels assigned by the model. Therefore, we create a grid with # :func:`numpy.outer`, which takes the sorted `row_labels_` and `column_labels_` # and adds 1 to each to ensure that the labels start from 1 instead of 0 for # better visualization. plt.matshow( np.outer(np.sort(model.row_labels_) + 1, np.sort(model.column_labels_) + 1), cmap=plt.cm.Blues, ) plt.title("Checkerboard structure of rearranged data") plt.show() # %% # The outer product of the row and column label vectors shows a representation # of the checkerboard structure, where different combinations of row and column # labels are represented by different shades of blue.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/bicluster/plot_bicluster_newsgroups.py
examples/bicluster/plot_bicluster_newsgroups.py
""" ================================================================ Biclustering documents with the Spectral Co-clustering algorithm ================================================================ This example demonstrates the Spectral Co-clustering algorithm on the twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is excluded because it contains many posts containing nothing but data. The TF-IDF vectorized posts form a word frequency matrix, which is then biclustered using Dhillon's Spectral Co-Clustering algorithm. The resulting document-word biclusters indicate subsets words used more often in those subsets documents. For a few of the best biclusters, its most common document categories and its ten most important words get printed. The best biclusters are determined by their normalized cut. The best words are determined by comparing their sums inside and outside the bicluster. For comparison, the documents are also clustered using MiniBatchKMeans. The document clusters derived from the biclusters achieve a better V-measure than clusters found by MiniBatchKMeans. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from collections import Counter from time import time import numpy as np from sklearn.cluster import MiniBatchKMeans, SpectralCoclustering from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.cluster import v_measure_score def number_normalizer(tokens): """Map all numeric tokens to a placeholder. For many applications, tokens that begin with a number are not directly useful, but the fact that such a token exists can be relevant. By applying this form of dimensionality reduction, some methods may perform better. """ return ("#NUMBER" if token[0].isdigit() else token for token in tokens) class NumberNormalizingVectorizer(TfidfVectorizer): def build_tokenizer(self): tokenize = super().build_tokenizer() return lambda doc: list(number_normalizer(tokenize(doc))) # exclude 'comp.os.ms-windows.misc' categories = [ "alt.atheism", "comp.graphics", "comp.sys.ibm.pc.hardware", "comp.sys.mac.hardware", "comp.windows.x", "misc.forsale", "rec.autos", "rec.motorcycles", "rec.sport.baseball", "rec.sport.hockey", "sci.crypt", "sci.electronics", "sci.med", "sci.space", "soc.religion.christian", "talk.politics.guns", "talk.politics.mideast", "talk.politics.misc", "talk.religion.misc", ] newsgroups = fetch_20newsgroups(categories=categories) y_true = newsgroups.target vectorizer = NumberNormalizingVectorizer(stop_words="english", min_df=5) cocluster = SpectralCoclustering( n_clusters=len(categories), svd_method="arpack", random_state=0 ) kmeans = MiniBatchKMeans( n_clusters=len(categories), batch_size=20000, random_state=0, n_init=3 ) print("Vectorizing...") X = vectorizer.fit_transform(newsgroups.data) print("Coclustering...") start_time = time() cocluster.fit(X) y_cocluster = cocluster.row_labels_ print( f"Done in {time() - start_time:.2f}s. V-measure: \ {v_measure_score(y_cocluster, y_true):.4f}" ) print("MiniBatchKMeans...") start_time = time() y_kmeans = kmeans.fit_predict(X) print( f"Done in {time() - start_time:.2f}s. V-measure: \ {v_measure_score(y_kmeans, y_true):.4f}" ) feature_names = vectorizer.get_feature_names_out() document_names = list(newsgroups.target_names[i] for i in newsgroups.target) def bicluster_ncut(i): rows, cols = cocluster.get_indices(i) if not (np.any(rows) and np.any(cols)): import sys return sys.float_info.max row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0] col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0] # Note: the following is identical to X[rows[:, np.newaxis], # cols].sum() but much faster in scipy <= 0.16 weight = X[rows][:, cols].sum() cut = X[row_complement][:, cols].sum() + X[rows][:, col_complement].sum() return cut / weight bicluster_ncuts = list(bicluster_ncut(i) for i in range(len(newsgroups.target_names))) best_idx = np.argsort(bicluster_ncuts)[:5] print() print("Best biclusters:") print("----------------") for idx, cluster in enumerate(best_idx): n_rows, n_cols = cocluster.get_shape(cluster) cluster_docs, cluster_words = cocluster.get_indices(cluster) if not len(cluster_docs) or not len(cluster_words): continue # categories counter = Counter(document_names[doc] for doc in cluster_docs) cat_string = ", ".join( f"{(c / n_rows * 100):.0f}% {name}" for name, c in counter.most_common(3) ) # words out_of_cluster_docs = cocluster.row_labels_ != cluster out_of_cluster_docs = out_of_cluster_docs.nonzero()[0] word_col = X[:, cluster_words] word_scores = np.array( word_col[cluster_docs, :].sum(axis=0) - word_col[out_of_cluster_docs, :].sum(axis=0) ) word_scores = word_scores.ravel() important_words = list( feature_names[cluster_words[i]] for i in word_scores.argsort()[:-11:-1] ) print(f"bicluster {idx} : {n_rows} documents, {n_cols} words") print(f"categories : {cat_string}") print(f"words : {', '.join(important_words)}\n")
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/bicluster/plot_spectral_coclustering.py
examples/bicluster/plot_spectral_coclustering.py
""" ============================================== A demo of the Spectral Co-Clustering algorithm ============================================== This example demonstrates how to generate a dataset and bicluster it using the Spectral Co-Clustering algorithm. The dataset is generated using the ``make_biclusters`` function, which creates a matrix of small values and implants bicluster with large values. The rows and columns are then shuffled and passed to the Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to make biclusters contiguous shows how accurately the algorithm found the biclusters. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np from matplotlib import pyplot as plt from sklearn.cluster import SpectralCoclustering from sklearn.datasets import make_biclusters from sklearn.metrics import consensus_score data, rows, columns = make_biclusters( shape=(300, 300), n_clusters=5, noise=5, shuffle=False, random_state=0 ) plt.matshow(data, cmap=plt.cm.Blues) plt.title("Original dataset") # shuffle clusters rng = np.random.RandomState(0) row_idx = rng.permutation(data.shape[0]) col_idx = rng.permutation(data.shape[1]) data = data[row_idx][:, col_idx] plt.matshow(data, cmap=plt.cm.Blues) plt.title("Shuffled dataset") model = SpectralCoclustering(n_clusters=5, random_state=0) model.fit(data) score = consensus_score(model.biclusters_, (rows[:, row_idx], columns[:, col_idx])) print("consensus score: {:.3f}".format(score)) fit_data = data[np.argsort(model.row_labels_)] fit_data = fit_data[:, np.argsort(model.column_labels_)] plt.matshow(fit_data, cmap=plt.cm.Blues) plt.title("After biclustering; rearranged to show biclusters") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/developing_estimators/sklearn_is_fitted.py
examples/developing_estimators/sklearn_is_fitted.py
""" ======================================== `__sklearn_is_fitted__` as Developer API ======================================== The `__sklearn_is_fitted__` method is a convention used in scikit-learn for checking whether an estimator object has been fitted or not. This method is typically implemented in custom estimator classes that are built on top of scikit-learn's base classes like `BaseEstimator` or its subclasses. Developers should use :func:`~sklearn.utils.validation.check_is_fitted` at the beginning of all methods except `fit`. If they need to customize or speed-up the check, they can implement the `__sklearn_is_fitted__` method as shown below. In this example the custom estimator showcases the usage of the `__sklearn_is_fitted__` method and the `check_is_fitted` utility function as developer APIs. The `__sklearn_is_fitted__` method checks fitted status by verifying the presence of the `_is_fitted` attribute. """ # %% # An example custom estimator implementing a simple classifier # ------------------------------------------------------------ # This code snippet defines a custom estimator class called `CustomEstimator` # that extends both the `BaseEstimator` and `ClassifierMixin` classes from # scikit-learn and showcases the usage of the `__sklearn_is_fitted__` method # and the `check_is_fitted` utility function. # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.utils.validation import check_is_fitted class CustomEstimator(BaseEstimator, ClassifierMixin): def __init__(self, parameter=1): self.parameter = parameter def fit(self, X, y): """ Fit the estimator to the training data. """ self.classes_ = sorted(set(y)) # Custom attribute to track if the estimator is fitted self._is_fitted = True return self def predict(self, X): """ Perform Predictions If the estimator is not fitted, then raise NotFittedError """ check_is_fitted(self) # Perform prediction logic predictions = [self.classes_[0]] * len(X) return predictions def score(self, X, y): """ Calculate Score If the estimator is not fitted, then raise NotFittedError """ check_is_fitted(self) # Perform scoring logic return 0.5 def __sklearn_is_fitted__(self): """ Check fitted status and return a Boolean value. """ return hasattr(self, "_is_fitted") and self._is_fitted
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/multiclass/plot_multiclass_overview.py
examples/multiclass/plot_multiclass_overview.py
""" =============================================== Overview of multiclass training meta-estimators =============================================== In this example, we discuss the problem of classification when the target variable is composed of more than two classes. This is called multiclass classification. In scikit-learn, all estimators support multiclass classification out of the box: the most sensible strategy was implemented for the end-user. The :mod:`sklearn.multiclass` module implements various strategies that one can use for experimenting or developing third-party estimators that only support binary classification. :mod:`sklearn.multiclass` includes OvO/OvR strategies used to train a multiclass classifier by fitting a set of binary classifiers (the :class:`~sklearn.multiclass.OneVsOneClassifier` and :class:`~sklearn.multiclass.OneVsRestClassifier` meta-estimators). This example will review them. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # The Yeast UCI dataset # --------------------- # # In this example, we use a UCI dataset [1]_, generally referred as the Yeast # dataset. We use the :func:`sklearn.datasets.fetch_openml` function to load # the dataset from OpenML. from sklearn.datasets import fetch_openml X, y = fetch_openml(data_id=181, as_frame=True, return_X_y=True) # %% # To know the type of data science problem we are dealing with, we can check # the target for which we want to build a predictive model. y.value_counts().sort_index() # %% # We see that the target is discrete and composed of 10 classes. We therefore # deal with a multiclass classification problem. # # Strategies comparison # --------------------- # # In the following experiment, we use a # :class:`~sklearn.tree.DecisionTreeClassifier` and a # :class:`~sklearn.model_selection.RepeatedStratifiedKFold` cross-validation # with 3 splits and 5 repetitions. # # We compare the following strategies: # # * :class:`~sklearn.tree.DecisionTreeClassifier` can handle multiclass # classification without needing any special adjustments. It works by breaking # down the training data into smaller subsets and focusing on the most common # class in each subset. By repeating this process, the model can accurately # classify input data into multiple different classes. # * :class:`~sklearn.multiclass.OneVsOneClassifier` trains a set of binary # classifiers where each classifier is trained to distinguish between # two classes. # * :class:`~sklearn.multiclass.OneVsRestClassifier`: trains a set of binary # classifiers where each classifier is trained to distinguish between # one class and the rest of the classes. # * :class:`~sklearn.multiclass.OutputCodeClassifier`: trains a set of binary # classifiers where each classifier is trained to distinguish between # a set of classes from the rest of the classes. The set of classes is # defined by a codebook, which is randomly generated in scikit-learn. This # method exposes a parameter `code_size` to control the size of the codebook. # We set it above one since we are not interested in compressing the class # representation. import pandas as pd from sklearn.model_selection import RepeatedStratifiedKFold, cross_validate from sklearn.multiclass import ( OneVsOneClassifier, OneVsRestClassifier, OutputCodeClassifier, ) from sklearn.tree import DecisionTreeClassifier cv = RepeatedStratifiedKFold(n_splits=3, n_repeats=5, random_state=0) tree = DecisionTreeClassifier(random_state=0) ovo_tree = OneVsOneClassifier(tree) ovr_tree = OneVsRestClassifier(tree) ecoc = OutputCodeClassifier(tree, code_size=2) cv_results_tree = cross_validate(tree, X, y, cv=cv, n_jobs=2) cv_results_ovo = cross_validate(ovo_tree, X, y, cv=cv, n_jobs=2) cv_results_ovr = cross_validate(ovr_tree, X, y, cv=cv, n_jobs=2) cv_results_ecoc = cross_validate(ecoc, X, y, cv=cv, n_jobs=2) # %% # We can now compare the statistical performance of the different strategies. # We plot the score distribution of the different strategies. from matplotlib import pyplot as plt scores = pd.DataFrame( { "DecisionTreeClassifier": cv_results_tree["test_score"], "OneVsOneClassifier": cv_results_ovo["test_score"], "OneVsRestClassifier": cv_results_ovr["test_score"], "OutputCodeClassifier": cv_results_ecoc["test_score"], } ) ax = scores.plot.kde(legend=True) ax.set_xlabel("Accuracy score") ax.set_xlim([0, 0.7]) _ = ax.set_title( "Density of the accuracy scores for the different multiclass strategies" ) # %% # At a first glance, we can see that the built-in strategy of the decision # tree classifier is working quite well. One-vs-one and the error-correcting # output code strategies are working even better. However, the # one-vs-rest strategy is not working as well as the other strategies. # # Indeed, these results reproduce something reported in the literature # as in [2]_. However, the story is not as simple as it seems. # # The importance of hyperparameters search # ---------------------------------------- # # It was later shown in [3]_ that the multiclass strategies would show similar # scores if the hyperparameters of the base classifiers are first optimized. # # Here we try to reproduce such result by at least optimizing the depth of the # base decision tree. from sklearn.model_selection import GridSearchCV param_grid = {"max_depth": [3, 5, 8]} tree_optimized = GridSearchCV(tree, param_grid=param_grid, cv=3) ovo_tree = OneVsOneClassifier(tree_optimized) ovr_tree = OneVsRestClassifier(tree_optimized) ecoc = OutputCodeClassifier(tree_optimized, code_size=2) cv_results_tree = cross_validate(tree_optimized, X, y, cv=cv, n_jobs=2) cv_results_ovo = cross_validate(ovo_tree, X, y, cv=cv, n_jobs=2) cv_results_ovr = cross_validate(ovr_tree, X, y, cv=cv, n_jobs=2) cv_results_ecoc = cross_validate(ecoc, X, y, cv=cv, n_jobs=2) scores = pd.DataFrame( { "DecisionTreeClassifier": cv_results_tree["test_score"], "OneVsOneClassifier": cv_results_ovo["test_score"], "OneVsRestClassifier": cv_results_ovr["test_score"], "OutputCodeClassifier": cv_results_ecoc["test_score"], } ) ax = scores.plot.kde(legend=True) ax.set_xlabel("Accuracy score") ax.set_xlim([0, 0.7]) _ = ax.set_title( "Density of the accuracy scores for the different multiclass strategies" ) plt.show() # %% # We can see that once the hyperparameters are optimized, all multiclass # strategies have similar performance as discussed in [3]_. # # Conclusion # ---------- # # We can get some intuition behind those results. # # First, the reason for which one-vs-one and error-correcting output code are # outperforming the tree when the hyperparameters are not optimized relies on # fact that they ensemble a larger number of classifiers. The ensembling # improves the generalization performance. This is a bit similar why a bagging # classifier generally performs better than a single decision tree if no care # is taken to optimize the hyperparameters. # # Then, we see the importance of optimizing the hyperparameters. Indeed, it # should be regularly explored when developing predictive models even if # techniques such as ensembling help at reducing this impact. # # Finally, it is important to recall that the estimators in scikit-learn # are developed with a specific strategy to handle multiclass classification # out of the box. So for these estimators, it means that there is no need to # use different strategies. These strategies are mainly useful for third-party # estimators supporting only binary classification. In all cases, we also show # that the hyperparameters should be optimized. # # References # ---------- # # .. [1] https://archive.ics.uci.edu/ml/datasets/Yeast # # .. [2] `"Reducing multiclass to binary: A unifying approach for margin classifiers." # Allwein, Erin L., Robert E. Schapire, and Yoram Singer. # Journal of machine learning research. 1 Dec (2000): 113-141. # <https://www.jmlr.org/papers/volume1/allwein00a/allwein00a.pdf>`_ # # .. [3] `"In defense of one-vs-all classification." # Journal of Machine Learning Research. 5 Jan (2004): 101-141. # <https://www.jmlr.org/papers/volume5/rifkin04a/rifkin04a.pdf>`_
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/manifold/plot_compare_methods.py
examples/manifold/plot_compare_methods.py
""" ========================================= Comparison of Manifold Learning methods ========================================= An illustration of dimensionality reduction on the S-curve dataset with various manifold learning methods. For a discussion and comparison of these algorithms, see the :ref:`manifold module page <manifold>` For a similar example, where the methods are applied to a sphere dataset, see :ref:`sphx_glr_auto_examples_manifold_plot_manifold_sphere.py` Note that the purpose of the MDS is to find a low-dimensional representation of the data (here 2D) in which the distances respect well the distances in the original high-dimensional space, unlike other manifold-learning algorithms, it does not seeks an isotropic representation of the data in the low-dimensional space. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Dataset preparation # ------------------- # # We start by generating the S-curve dataset. import matplotlib.pyplot as plt # unused but required import for doing 3d projections with matplotlib < 3.2 import mpl_toolkits.mplot3d # noqa: F401 from matplotlib import ticker from sklearn import datasets, manifold n_samples = 1500 S_points, S_color = datasets.make_s_curve(n_samples, random_state=0) # %% # Let's look at the original data. Also define some helping # functions, which we will use further on. def plot_3d(points, points_color, title): x, y, z = points.T fig, ax = plt.subplots( figsize=(6, 6), facecolor="white", tight_layout=True, subplot_kw={"projection": "3d"}, ) fig.suptitle(title, size=16) col = ax.scatter(x, y, z, c=points_color, s=50, alpha=0.8) ax.view_init(azim=-60, elev=9) ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) ax.zaxis.set_major_locator(ticker.MultipleLocator(1)) fig.colorbar(col, ax=ax, orientation="horizontal", shrink=0.6, aspect=60, pad=0.01) plt.show() def plot_2d(points, points_color, title): fig, ax = plt.subplots(figsize=(3, 3), facecolor="white", constrained_layout=True) fig.suptitle(title, size=16) add_2d_scatter(ax, points, points_color) plt.show() def add_2d_scatter(ax, points, points_color, title=None): x, y = points.T ax.scatter(x, y, c=points_color, s=50, alpha=0.8) ax.set_title(title) ax.xaxis.set_major_formatter(ticker.NullFormatter()) ax.yaxis.set_major_formatter(ticker.NullFormatter()) plot_3d(S_points, S_color, "Original S-curve samples") # %% # Define algorithms for the manifold learning # ------------------------------------------- # # Manifold learning is an approach to non-linear dimensionality reduction. # Algorithms for this task are based on the idea that the dimensionality of # many data sets is only artificially high. # # Read more in the :ref:`User Guide <manifold>`. n_neighbors = 12 # neighborhood which is used to recover the locally linear structure n_components = 2 # number of coordinates for the manifold # %% # Locally Linear Embeddings # ^^^^^^^^^^^^^^^^^^^^^^^^^ # # Locally linear embedding (LLE) can be thought of as a series of local # Principal Component Analyses which are globally compared to find the # best non-linear embedding. # Read more in the :ref:`User Guide <locally_linear_embedding>`. params = { "n_neighbors": n_neighbors, "n_components": n_components, "eigen_solver": "auto", "random_state": 0, } lle_standard = manifold.LocallyLinearEmbedding(method="standard", **params) S_standard = lle_standard.fit_transform(S_points) lle_ltsa = manifold.LocallyLinearEmbedding(method="ltsa", **params) S_ltsa = lle_ltsa.fit_transform(S_points) lle_hessian = manifold.LocallyLinearEmbedding(method="hessian", **params) S_hessian = lle_hessian.fit_transform(S_points) lle_mod = manifold.LocallyLinearEmbedding(method="modified", **params) S_mod = lle_mod.fit_transform(S_points) # %% fig, axs = plt.subplots( nrows=2, ncols=2, figsize=(7, 7), facecolor="white", constrained_layout=True ) fig.suptitle("Locally Linear Embeddings", size=16) lle_methods = [ ("Standard locally linear embedding", S_standard), ("Local tangent space alignment", S_ltsa), ("Hessian eigenmap", S_hessian), ("Modified locally linear embedding", S_mod), ] for ax, method in zip(axs.flat, lle_methods): name, points = method add_2d_scatter(ax, points, S_color, name) plt.show() # %% # Isomap Embedding # ^^^^^^^^^^^^^^^^ # # Non-linear dimensionality reduction through Isometric Mapping. # Isomap seeks a lower-dimensional embedding which maintains geodesic # distances between all points. Read more in the :ref:`User Guide <isomap>`. isomap = manifold.Isomap(n_neighbors=n_neighbors, n_components=n_components, p=1) S_isomap = isomap.fit_transform(S_points) plot_2d(S_isomap, S_color, "Isomap Embedding") # %% # Multidimensional scaling # ^^^^^^^^^^^^^^^^^^^^^^^^ # # Multidimensional scaling (MDS) seeks a low-dimensional representation # of the data in which the distances respect well the distances in the # original high-dimensional space. # Read more in the :ref:`User Guide <multidimensional_scaling>`. md_scaling = manifold.MDS( n_components=n_components, max_iter=50, n_init=1, random_state=0, init="classical_mds", normalized_stress=False, ) S_scaling_metric = md_scaling.fit_transform(S_points) md_scaling_nonmetric = manifold.MDS( n_components=n_components, max_iter=50, n_init=1, random_state=0, normalized_stress=False, metric_mds=False, init="classical_mds", ) S_scaling_nonmetric = md_scaling_nonmetric.fit_transform(S_points) md_scaling_classical = manifold.ClassicalMDS(n_components=n_components) S_scaling_classical = md_scaling_classical.fit_transform(S_points) # %% fig, axs = plt.subplots( nrows=1, ncols=3, figsize=(7, 3.5), facecolor="white", constrained_layout=True ) fig.suptitle("Multidimensional scaling", size=16) mds_methods = [ ("Metric MDS", S_scaling_metric), ("Non-metric MDS", S_scaling_nonmetric), ("Classical MDS", S_scaling_classical), ] for ax, method in zip(axs.flat, mds_methods): name, points = method add_2d_scatter(ax, points, S_color, name) plt.show() # %% # Spectral embedding for non-linear dimensionality reduction # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # This implementation uses Laplacian Eigenmaps, which finds a low dimensional # representation of the data using a spectral decomposition of the graph Laplacian. # Read more in the :ref:`User Guide <spectral_embedding>`. spectral = manifold.SpectralEmbedding( n_components=n_components, n_neighbors=n_neighbors, random_state=42 ) S_spectral = spectral.fit_transform(S_points) plot_2d(S_spectral, S_color, "Spectral Embedding") # %% # T-distributed Stochastic Neighbor Embedding # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # It converts similarities between data points to joint probabilities and # tries to minimize the Kullback-Leibler divergence between the joint probabilities # of the low-dimensional embedding and the high-dimensional data. t-SNE has a cost # function that is not convex, i.e. with different initializations we can get # different results. Read more in the :ref:`User Guide <t_sne>`. t_sne = manifold.TSNE( n_components=n_components, perplexity=30, init="random", max_iter=250, random_state=0, ) S_t_sne = t_sne.fit_transform(S_points) plot_2d(S_t_sne, S_color, "T-distributed Stochastic \n Neighbor Embedding") # %%
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/manifold/plot_mds.py
examples/manifold/plot_mds.py
""" ========================= Multi-dimensional scaling ========================= An illustration of the metric and non-metric MDS on generated noisy data. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Dataset preparation # ------------------- # # We start by uniformly generating 20 points in a 2D space. import numpy as np from matplotlib import pyplot as plt from matplotlib.collections import LineCollection from sklearn import manifold from sklearn.decomposition import PCA from sklearn.metrics import euclidean_distances # Generate the data EPSILON = np.finfo(np.float32).eps n_samples = 20 rng = np.random.RandomState(seed=3) X_true = rng.randint(0, 20, 2 * n_samples).astype(float) X_true = X_true.reshape((n_samples, 2)) # Center the data X_true -= X_true.mean() # %% # Now we compute pairwise distances between all points and add # a small amount of noise to the distance matrix. We make sure # to keep the noisy distance matrix symmetric. # Compute pairwise Euclidean distances distances = euclidean_distances(X_true) # Add noise to the distances noise = rng.rand(n_samples, n_samples) noise = noise + noise.T np.fill_diagonal(noise, 0) distances += noise # %% # Here we compute metric, non-metric, and classical MDS of the noisy distance matrix. mds = manifold.MDS( n_components=2, max_iter=3000, eps=1e-9, n_init=1, random_state=42, metric="precomputed", n_jobs=1, init="classical_mds", ) X_mds = mds.fit(distances).embedding_ nmds = manifold.MDS( n_components=2, metric_mds=False, max_iter=3000, eps=1e-12, metric="precomputed", random_state=42, n_jobs=1, n_init=1, init="classical_mds", ) X_nmds = nmds.fit_transform(distances) cmds = manifold.ClassicalMDS( n_components=2, metric="precomputed", ) X_cmds = cmds.fit_transform(distances) # %% # Rescaling the non-metric MDS solution to match the spread of the original data. X_nmds *= np.sqrt((X_true**2).sum()) / np.sqrt((X_nmds**2).sum()) # %% # To make the visual comparisons easier, we rotate the original data and all MDS # solutions to their PCA axes. And flip horizontal and vertical MDS axes, if needed, # to match the original data orientation. # Rotate the data (CMDS does not need to be rotated, it is inherently PCA-aligned) pca = PCA(n_components=2) X_true = pca.fit_transform(X_true) X_mds = pca.fit_transform(X_mds) X_nmds = pca.fit_transform(X_nmds) # Align the sign of PCs for i in [0, 1]: if np.corrcoef(X_mds[:, i], X_true[:, i])[0, 1] < 0: X_mds[:, i] *= -1 if np.corrcoef(X_nmds[:, i], X_true[:, i])[0, 1] < 0: X_nmds[:, i] *= -1 if np.corrcoef(X_cmds[:, i], X_true[:, i])[0, 1] < 0: X_cmds[:, i] *= -1 # %% # Finally, we plot the original data and all MDS reconstructions. fig = plt.figure(1) ax = plt.axes([0.0, 0.0, 1.0, 1.0]) s = 100 plt.scatter(X_true[:, 0], X_true[:, 1], color="navy", s=s, lw=0, label="True Position") plt.scatter(X_mds[:, 0], X_mds[:, 1], color="turquoise", s=s, lw=0, label="MDS") plt.scatter( X_nmds[:, 0], X_nmds[:, 1], color="darkorange", s=s, lw=0, label="Non-metric MDS" ) plt.scatter( X_cmds[:, 0], X_cmds[:, 1], color="lightcoral", s=s, lw=0, label="Classical MDS" ) plt.legend(scatterpoints=1, loc="best", shadow=False) # Plot the edges start_idx, end_idx = X_mds.nonzero() # a sequence of (*line0*, *line1*, *line2*), where:: # linen = (x0, y0), (x1, y1), ... (xm, ym) segments = [ [X_true[i, :], X_true[j, :]] for i in range(len(X_true)) for j in range(len(X_true)) ] edges = distances.max() / (distances + EPSILON) * 100 np.fill_diagonal(edges, 0) edges = np.abs(edges) lc = LineCollection( segments, zorder=0, cmap=plt.cm.Blues, norm=plt.Normalize(0, edges.max()) ) lc.set_array(edges.flatten()) lc.set_linewidths(np.full(len(segments), 0.5)) ax.add_collection(lc) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/manifold/plot_t_sne_perplexity.py
examples/manifold/plot_t_sne_perplexity.py
""" ============================================================================= t-SNE: The effect of various perplexity values on the shape ============================================================================= An illustration of t-SNE on the two concentric circles and the S-curve datasets for different perplexity values. We observe a tendency towards clearer shapes as the perplexity value increases. The size, the distance and the shape of clusters may vary upon initialization, perplexity values and does not always convey a meaning. As shown below, t-SNE for higher perplexities finds meaningful topology of two concentric circles, however the size and the distance of the circles varies slightly from the original. Contrary to the two circles dataset, the shapes visually diverge from S-curve topology on the S-curve dataset even for larger perplexity values. For further details, "How to Use t-SNE Effectively" https://distill.pub/2016/misread-tsne/ provides a good discussion of the effects of various parameters, as well as interactive plots to explore those effects. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from time import time import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import NullFormatter from sklearn import datasets, manifold n_samples = 150 n_components = 2 (fig, subplots) = plt.subplots(3, 5, figsize=(15, 8)) perplexities = [5, 30, 50, 100] X, y = datasets.make_circles( n_samples=n_samples, factor=0.5, noise=0.05, random_state=0 ) red = y == 0 green = y == 1 ax = subplots[0][0] ax.scatter(X[red, 0], X[red, 1], c="r") ax.scatter(X[green, 0], X[green, 1], c="g") ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis("tight") for i, perplexity in enumerate(perplexities): ax = subplots[0][i + 1] t0 = time() tsne = manifold.TSNE( n_components=n_components, init="random", random_state=0, perplexity=perplexity, max_iter=300, ) Y = tsne.fit_transform(X) t1 = time() print("circles, perplexity=%d in %.2g sec" % (perplexity, t1 - t0)) ax.set_title("Perplexity=%d" % perplexity) ax.scatter(Y[red, 0], Y[red, 1], c="r") ax.scatter(Y[green, 0], Y[green, 1], c="g") ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) ax.axis("tight") # Another example using s-curve X, color = datasets.make_s_curve(n_samples, random_state=0) ax = subplots[1][0] ax.scatter(X[:, 0], X[:, 2], c=color) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) for i, perplexity in enumerate(perplexities): ax = subplots[1][i + 1] t0 = time() tsne = manifold.TSNE( n_components=n_components, init="random", random_state=0, perplexity=perplexity, learning_rate="auto", max_iter=300, ) Y = tsne.fit_transform(X) t1 = time() print("S-curve, perplexity=%d in %.2g sec" % (perplexity, t1 - t0)) ax.set_title("Perplexity=%d" % perplexity) ax.scatter(Y[:, 0], Y[:, 1], c=color) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) ax.axis("tight") # Another example using a 2D uniform grid x = np.linspace(0, 1, int(np.sqrt(n_samples))) xx, yy = np.meshgrid(x, x) X = np.hstack( [ xx.ravel().reshape(-1, 1), yy.ravel().reshape(-1, 1), ] ) color = xx.ravel() ax = subplots[2][0] ax.scatter(X[:, 0], X[:, 1], c=color) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) for i, perplexity in enumerate(perplexities): ax = subplots[2][i + 1] t0 = time() tsne = manifold.TSNE( n_components=n_components, init="random", random_state=0, perplexity=perplexity, max_iter=400, ) Y = tsne.fit_transform(X) t1 = time() print("uniform grid, perplexity=%d in %.2g sec" % (perplexity, t1 - t0)) ax.set_title("Perplexity=%d" % perplexity) ax.scatter(Y[:, 0], Y[:, 1], c=color) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) ax.axis("tight") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/manifold/plot_lle_digits.py
examples/manifold/plot_lle_digits.py
""" ============================================================================= Manifold learning on handwritten digits: Locally Linear Embedding, Isomap... ============================================================================= We illustrate various embedding techniques on the digits dataset. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Load digits dataset # ------------------- # We will load the digits dataset and only use six first of the ten available classes. from sklearn.datasets import load_digits digits = load_digits(n_class=6) X, y = digits.data, digits.target n_samples, n_features = X.shape n_neighbors = 30 # %% # We can plot the first hundred digits from this data set. import matplotlib.pyplot as plt fig, axs = plt.subplots(nrows=10, ncols=10, figsize=(6, 6)) for idx, ax in enumerate(axs.ravel()): ax.imshow(X[idx].reshape((8, 8)), cmap=plt.cm.binary) ax.axis("off") _ = fig.suptitle("A selection from the 64-dimensional digits dataset", fontsize=16) # %% # Helper function to plot embedding # --------------------------------- # Below, we will use different techniques to embed the digits dataset. We will plot # the projection of the original data onto each embedding. It will allow us to # check whether or digits are grouped together in the embedding space, or # scattered across it. import numpy as np from matplotlib import offsetbox from sklearn.preprocessing import MinMaxScaler def plot_embedding(X, title): _, ax = plt.subplots() X = MinMaxScaler().fit_transform(X) for digit in digits.target_names: ax.scatter( *X[y == digit].T, marker=f"${digit}$", s=60, color=plt.cm.Dark2(digit), alpha=0.425, zorder=2, ) shown_images = np.array([[1.0, 1.0]]) # just something big for i in range(X.shape[0]): # plot every digit on the embedding # show an annotation box for a group of digits dist = np.sum((X[i] - shown_images) ** 2, 1) if np.min(dist) < 4e-3: # don't show points that are too close continue shown_images = np.concatenate([shown_images, [X[i]]], axis=0) imagebox = offsetbox.AnnotationBbox( offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r), X[i] ) imagebox.set(zorder=1) ax.add_artist(imagebox) ax.set_title(title) ax.axis("off") # %% # Embedding techniques comparison # ------------------------------- # # Below, we compare different techniques. However, there are a couple of things # to note: # # * the :class:`~sklearn.ensemble.RandomTreesEmbedding` is not # technically a manifold embedding method, as it learn a high-dimensional # representation on which we apply a dimensionality reduction method. # However, it is often useful to cast a dataset into a representation in # which the classes are linearly-separable. # * the :class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis` and # the :class:`~sklearn.neighbors.NeighborhoodComponentsAnalysis`, are supervised # dimensionality reduction method, i.e. they make use of the provided labels, # contrary to other methods. # * the :class:`~sklearn.manifold.TSNE` is initialized with the embedding that is # generated by PCA in this example. It ensures global stability of the embedding, # i.e., the embedding does not depend on random initialization. from sklearn.decomposition import TruncatedSVD from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.ensemble import RandomTreesEmbedding from sklearn.manifold import ( MDS, TSNE, ClassicalMDS, Isomap, LocallyLinearEmbedding, SpectralEmbedding, ) from sklearn.neighbors import NeighborhoodComponentsAnalysis from sklearn.pipeline import make_pipeline from sklearn.random_projection import SparseRandomProjection embeddings = { "Random projection embedding": SparseRandomProjection( n_components=2, random_state=42 ), "Truncated SVD embedding": TruncatedSVD(n_components=2), "Linear Discriminant Analysis embedding": LinearDiscriminantAnalysis( n_components=2 ), "Isomap embedding": Isomap(n_neighbors=n_neighbors, n_components=2), "Standard LLE embedding": LocallyLinearEmbedding( n_neighbors=n_neighbors, n_components=2, method="standard" ), "Modified LLE embedding": LocallyLinearEmbedding( n_neighbors=n_neighbors, n_components=2, method="modified" ), "Hessian LLE embedding": LocallyLinearEmbedding( n_neighbors=n_neighbors, n_components=2, method="hessian" ), "LTSA LLE embedding": LocallyLinearEmbedding( n_neighbors=n_neighbors, n_components=2, method="ltsa" ), "Metric MDS embedding": MDS(n_components=2, n_init=1, init="classical_mds"), "Non-metric MDS embedding": MDS( n_components=2, n_init=1, init="classical_mds", metric_mds=False ), "Classical MDS embedding": ClassicalMDS(n_components=2), "Random Trees embedding": make_pipeline( RandomTreesEmbedding(n_estimators=200, max_depth=5, random_state=0), TruncatedSVD(n_components=2), ), "Spectral embedding": SpectralEmbedding( n_components=2, random_state=0, eigen_solver="arpack" ), "t-SNE embedding": TSNE( n_components=2, max_iter=500, n_iter_without_progress=150, n_jobs=2, random_state=0, ), "NCA embedding": NeighborhoodComponentsAnalysis( n_components=2, init="pca", random_state=0 ), } # %% # Once we declared all the methods of interest, we can run and perform the projection # of the original data. We will store the projected data as well as the computational # time needed to perform each projection. from time import time projections, timing = {}, {} for name, transformer in embeddings.items(): if name.startswith("Linear Discriminant Analysis"): data = X.copy() data.flat[:: X.shape[1] + 1] += 0.01 # Make X invertible else: data = X print(f"Computing {name}...") start_time = time() projections[name] = transformer.fit_transform(data, y) timing[name] = time() - start_time # %% # Finally, we can plot the resulting projection given by each method. for name in timing: title = f"{name} (time {timing[name]:.3f}s)" plot_embedding(projections[name], title) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/manifold/plot_swissroll.py
examples/manifold/plot_swissroll.py
""" =================================== Swiss Roll And Swiss-Hole Reduction =================================== This notebook seeks to compare two popular non-linear dimensionality techniques, T-distributed Stochastic Neighbor Embedding (t-SNE) and Locally Linear Embedding (LLE), on the classic Swiss Roll dataset. Then, we will explore how they both deal with the addition of a hole in the data. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Swiss Roll # --------------------------------------------------- # # We start by generating the Swiss Roll dataset. import matplotlib.pyplot as plt from sklearn import datasets, manifold sr_points, sr_color = datasets.make_swiss_roll(n_samples=1500, random_state=0) # %% # Now, let's take a look at our data: fig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111, projection="3d") fig.add_axes(ax) ax.scatter( sr_points[:, 0], sr_points[:, 1], sr_points[:, 2], c=sr_color, s=50, alpha=0.8 ) ax.set_title("Swiss Roll in Ambient Space") ax.view_init(azim=-66, elev=12) _ = ax.text2D(0.8, 0.05, s="n_samples=1500", transform=ax.transAxes) # %% # Computing the LLE and t-SNE embeddings, we find that LLE seems to unroll the # Swiss Roll pretty effectively. t-SNE on the other hand, is able # to preserve the general structure of the data, but, poorly represents the # continuous nature of our original data. Instead, it seems to unnecessarily # clump sections of points together. sr_lle, sr_err = manifold.locally_linear_embedding( sr_points, n_neighbors=12, n_components=2 ) sr_tsne = manifold.TSNE(n_components=2, perplexity=40, random_state=0).fit_transform( sr_points ) fig, axs = plt.subplots(figsize=(8, 8), nrows=2) axs[0].scatter(sr_lle[:, 0], sr_lle[:, 1], c=sr_color) axs[0].set_title("LLE Embedding of Swiss Roll") axs[1].scatter(sr_tsne[:, 0], sr_tsne[:, 1], c=sr_color) _ = axs[1].set_title("t-SNE Embedding of Swiss Roll") # %% # .. note:: # # LLE seems to be stretching the points from the center (purple) # of the swiss roll. However, we observe that this is simply a byproduct # of how the data was generated. There is a higher density of points near the # center of the roll, which ultimately affects how LLE reconstructs the # data in a lower dimension. # %% # Swiss-Hole # --------------------------------------------------- # # Now let's take a look at how both algorithms deal with us adding a hole to # the data. First, we generate the Swiss-Hole dataset and plot it: sh_points, sh_color = datasets.make_swiss_roll( n_samples=1500, hole=True, random_state=0 ) fig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111, projection="3d") fig.add_axes(ax) ax.scatter( sh_points[:, 0], sh_points[:, 1], sh_points[:, 2], c=sh_color, s=50, alpha=0.8 ) ax.set_title("Swiss-Hole in Ambient Space") ax.view_init(azim=-66, elev=12) _ = ax.text2D(0.8, 0.05, s="n_samples=1500", transform=ax.transAxes) # %% # Computing the LLE and t-SNE embeddings, we obtain similar results to the # Swiss Roll. LLE very capably unrolls the data and even preserves # the hole. t-SNE, again seems to clump sections of points together, but, we # note that it preserves the general topology of the original data. sh_lle, sh_err = manifold.locally_linear_embedding( sh_points, n_neighbors=12, n_components=2 ) sh_tsne = manifold.TSNE( n_components=2, perplexity=40, init="random", random_state=0 ).fit_transform(sh_points) fig, axs = plt.subplots(figsize=(8, 8), nrows=2) axs[0].scatter(sh_lle[:, 0], sh_lle[:, 1], c=sh_color) axs[0].set_title("LLE Embedding of Swiss-Hole") axs[1].scatter(sh_tsne[:, 0], sh_tsne[:, 1], c=sh_color) _ = axs[1].set_title("t-SNE Embedding of Swiss-Hole") # %% # # Concluding remarks # ------------------ # # We note that t-SNE benefits from testing more combinations of parameters. # Better results could probably have been obtained by better tuning these # parameters. # # We observe that, as seen in the "Manifold learning on # handwritten digits" example, t-SNE generally performs better than LLE # on real world data.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/manifold/plot_manifold_sphere.py
examples/manifold/plot_manifold_sphere.py
""" ============================================= Manifold Learning methods on a severed sphere ============================================= An application of the different :ref:`manifold` techniques on a spherical data-set. Here one can see the use of dimensionality reduction in order to gain some intuition regarding the manifold learning methods. Regarding the dataset, the poles are cut from the sphere, as well as a thin slice down its side. This enables the manifold learning techniques to 'spread it open' whilst projecting it onto two dimensions. For a similar example, where the methods are applied to the S-curve dataset, see :ref:`sphx_glr_auto_examples_manifold_plot_compare_methods.py`. Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is to find a low-dimensional representation of the data (here 2D) in which the distances respect well the distances in the original high-dimensional space, unlike other manifold-learning algorithms, it does not seeks an isotropic representation of the data in the low-dimensional space. Here the manifold problem matches fairly that of representing a flat map of the Earth, as with `map projection <https://en.wikipedia.org/wiki/Map_projection>`_. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from time import time import matplotlib.pyplot as plt # Unused but required import for doing 3d projections with matplotlib < 3.2 import mpl_toolkits.mplot3d # noqa: F401 import numpy as np from matplotlib.ticker import NullFormatter from sklearn import manifold from sklearn.utils import check_random_state # Variables for manifold learning. n_neighbors = 10 n_samples = 1000 # Create our sphere. random_state = check_random_state(0) p = random_state.rand(n_samples) * (2 * np.pi - 0.55) t = random_state.rand(n_samples) * np.pi # Sever the poles from the sphere. indices = (t < (np.pi - (np.pi / 8))) & (t > (np.pi / 8)) colors = p[indices] x, y, z = ( np.sin(t[indices]) * np.cos(p[indices]), np.sin(t[indices]) * np.sin(p[indices]), np.cos(t[indices]), ) # Plot our dataset. fig = plt.figure(figsize=(15, 12)) plt.suptitle( "Manifold Learning with %i points, %i neighbors" % (1000, n_neighbors), fontsize=14 ) ax = fig.add_subplot(351, projection="3d") ax.scatter(x, y, z, c=p[indices], cmap=plt.cm.rainbow) ax.view_init(40, -10) sphere_data = np.array([x, y, z]).T # Perform Locally Linear Embedding Manifold learning methods = ["standard", "ltsa", "hessian", "modified"] labels = ["LLE", "LTSA", "Hessian LLE", "Modified LLE"] for i, method in enumerate(methods): t0 = time() trans_data = ( manifold.LocallyLinearEmbedding( n_neighbors=n_neighbors, n_components=2, method=method, random_state=42 ) .fit_transform(sphere_data) .T ) t1 = time() print("%s: %.2g sec" % (methods[i], t1 - t0)) ax = fig.add_subplot(352 + i) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("%s (%.2g sec)" % (labels[i], t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis("tight") # Perform Isomap Manifold learning. t0 = time() trans_data = ( manifold.Isomap(n_neighbors=n_neighbors, n_components=2) .fit_transform(sphere_data) .T ) t1 = time() print("%s: %.2g sec" % ("ISO", t1 - t0)) ax = fig.add_subplot(357) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("%s (%.2g sec)" % ("Isomap", t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis("tight") # Perform Multi-dimensional scaling. t0 = time() mds = manifold.MDS(2, n_init=1, random_state=42, init="classical_mds") trans_data = mds.fit_transform(sphere_data).T t1 = time() print("MDS: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(358) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("MDS (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis("tight") t0 = time() mds = manifold.MDS(2, n_init=1, random_state=42, metric_mds=False, init="classical_mds") trans_data = mds.fit_transform(sphere_data).T t1 = time() print("Non-metric MDS: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(359) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("Non-metric MDS (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis("tight") t0 = time() mds = manifold.ClassicalMDS(2) trans_data = mds.fit_transform(sphere_data).T t1 = time() print("Classical MDS: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(3, 5, 10) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("Classical MDS (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis("tight") # Perform Spectral Embedding. t0 = time() se = manifold.SpectralEmbedding( n_components=2, n_neighbors=n_neighbors, random_state=42 ) trans_data = se.fit_transform(sphere_data).T t1 = time() print("Spectral Embedding: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(3, 5, 12) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("Spectral Embedding (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis("tight") # Perform t-distributed stochastic neighbor embedding. t0 = time() tsne = manifold.TSNE(n_components=2, random_state=0) trans_data = tsne.fit_transform(sphere_data).T t1 = time() print("t-SNE: %.2g sec" % (t1 - t0)) ax = fig.add_subplot(3, 5, 13) plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow) plt.title("t-SNE (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) plt.axis("tight") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/text/plot_document_clustering.py
examples/text/plot_document_clustering.py
""" ======================================= Clustering text documents using k-means ======================================= This is an example showing how the scikit-learn API can be used to cluster documents by topics using a `Bag of Words approach <https://en.wikipedia.org/wiki/Bag-of-words_model>`_. Two algorithms are demonstrated, namely :class:`~sklearn.cluster.KMeans` and its more scalable variant, :class:`~sklearn.cluster.MiniBatchKMeans`. Additionally, latent semantic analysis is used to reduce dimensionality and discover latent patterns in the data. This example uses two different text vectorizers: a :class:`~sklearn.feature_extraction.text.TfidfVectorizer` and a :class:`~sklearn.feature_extraction.text.HashingVectorizer`. See the example notebook :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py` for more information on vectorizers and a comparison of their processing times. For document analysis via a supervised learning approach, see the example script :ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py`. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Loading text data # ================= # # We load data from :ref:`20newsgroups_dataset`, which comprises around 18,000 # newsgroups posts on 20 topics. For illustrative purposes and to reduce the # computational cost, we select a subset of 4 topics only accounting for around # 3,400 documents. See the example # :ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py` # to gain intuition on the overlap of such topics. # # Notice that, by default, the text samples contain some message metadata such # as `"headers"`, `"footers"` (signatures) and `"quotes"` to other posts. We use # the `remove` parameter from :func:`~sklearn.datasets.fetch_20newsgroups` to # strip those features and have a more sensible clustering problem. import numpy as np from sklearn.datasets import fetch_20newsgroups categories = [ "alt.atheism", "talk.religion.misc", "comp.graphics", "sci.space", ] dataset = fetch_20newsgroups( remove=("headers", "footers", "quotes"), subset="all", categories=categories, shuffle=True, random_state=42, ) labels = dataset.target unique_labels, category_sizes = np.unique(labels, return_counts=True) true_k = unique_labels.shape[0] print(f"{len(dataset.data)} documents - {true_k} categories") # %% # Quantifying the quality of clustering results # ============================================= # # In this section we define a function to score different clustering pipelines # using several metrics. # # Clustering algorithms are fundamentally unsupervised learning methods. # However, since we happen to have class labels for this specific dataset, it is # possible to use evaluation metrics that leverage this "supervised" ground # truth information to quantify the quality of the resulting clusters. Examples # of such metrics are the following: # # - homogeneity, which quantifies how much clusters contain only members of a # single class; # # - completeness, which quantifies how much members of a given class are # assigned to the same clusters; # # - V-measure, the harmonic mean of completeness and homogeneity; # # - Rand-Index, which measures how frequently pairs of data points are grouped # consistently according to the result of the clustering algorithm and the # ground truth class assignment; # # - Adjusted Rand-Index, a chance-adjusted Rand-Index such that random cluster # assignment have an ARI of 0.0 in expectation. # # If the ground truth labels are not known, evaluation can only be performed # using the model results itself. In that case, the Silhouette Coefficient comes in # handy. See :ref:`sphx_glr_auto_examples_cluster_plot_kmeans_silhouette_analysis.py` # for an example on how to do it. # # For more reference, see :ref:`clustering_evaluation`. from collections import defaultdict from time import time from sklearn import metrics evaluations = [] evaluations_std = [] def fit_and_evaluate(km, X, name=None, n_runs=5): name = km.__class__.__name__ if name is None else name train_times = [] scores = defaultdict(list) for seed in range(n_runs): km.set_params(random_state=seed) t0 = time() km.fit(X) train_times.append(time() - t0) scores["Homogeneity"].append(metrics.homogeneity_score(labels, km.labels_)) scores["Completeness"].append(metrics.completeness_score(labels, km.labels_)) scores["V-measure"].append(metrics.v_measure_score(labels, km.labels_)) scores["Adjusted Rand-Index"].append( metrics.adjusted_rand_score(labels, km.labels_) ) scores["Silhouette Coefficient"].append( metrics.silhouette_score(X, km.labels_, sample_size=2000) ) train_times = np.asarray(train_times) print(f"clustering done in {train_times.mean():.2f} ± {train_times.std():.2f} s ") evaluation = { "estimator": name, "train_time": train_times.mean(), } evaluation_std = { "estimator": name, "train_time": train_times.std(), } for score_name, score_values in scores.items(): mean_score, std_score = np.mean(score_values), np.std(score_values) print(f"{score_name}: {mean_score:.3f} ± {std_score:.3f}") evaluation[score_name] = mean_score evaluation_std[score_name] = std_score evaluations.append(evaluation) evaluations_std.append(evaluation_std) # %% # K-means clustering on text features # =================================== # # Two feature extraction methods are used in this example: # # - :class:`~sklearn.feature_extraction.text.TfidfVectorizer` uses an in-memory # vocabulary (a Python dict) to map the most frequent words to features # indices and hence compute a word occurrence frequency (sparse) matrix. The # word frequencies are then reweighted using the Inverse Document Frequency # (IDF) vector collected feature-wise over the corpus. # # - :class:`~sklearn.feature_extraction.text.HashingVectorizer` hashes word # occurrences to a fixed dimensional space, possibly with collisions. The word # count vectors are then normalized to each have l2-norm equal to one # (projected to the euclidean unit-sphere) which seems to be important for # k-means to work in high dimensional space. # # Furthermore it is possible to post-process those extracted features using # dimensionality reduction. We will explore the impact of those choices on the # clustering quality in the following. # # Feature Extraction using TfidfVectorizer # ---------------------------------------- # # We first benchmark the estimators using a dictionary vectorizer along with an # IDF normalization as provided by # :class:`~sklearn.feature_extraction.text.TfidfVectorizer`. from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer( max_df=0.5, min_df=5, stop_words="english", ) t0 = time() X_tfidf = vectorizer.fit_transform(dataset.data) print(f"vectorization done in {time() - t0:.3f} s") print(f"n_samples: {X_tfidf.shape[0]}, n_features: {X_tfidf.shape[1]}") # %% # After ignoring terms that appear in more than 50% of the documents (as set by # `max_df=0.5`) and terms that are not present in at least 5 documents (set by # `min_df=5`), the resulting number of unique terms `n_features` is around # 8,000. We can additionally quantify the sparsity of the `X_tfidf` matrix as # the fraction of non-zero entries divided by the total number of elements. print(f"{X_tfidf.nnz / np.prod(X_tfidf.shape):.3f}") # %% # We find that around 0.7% of the entries of the `X_tfidf` matrix are non-zero. # # .. _kmeans_sparse_high_dim: # # Clustering sparse data with k-means # ----------------------------------- # # As both :class:`~sklearn.cluster.KMeans` and # :class:`~sklearn.cluster.MiniBatchKMeans` optimize a non-convex objective # function, their clustering is not guaranteed to be optimal for a given random # init. Even further, on sparse high-dimensional data such as text vectorized # using the Bag of Words approach, k-means can initialize centroids on extremely # isolated data points. Those data points can stay their own centroids all # along. # # The following code illustrates how the previous phenomenon can sometimes lead # to highly imbalanced clusters, depending on the random initialization: from sklearn.cluster import KMeans for seed in range(5): kmeans = KMeans( n_clusters=true_k, max_iter=100, n_init=1, random_state=seed, ).fit(X_tfidf) cluster_ids, cluster_sizes = np.unique(kmeans.labels_, return_counts=True) print(f"Number of elements assigned to each cluster: {cluster_sizes}") print() print( "True number of documents in each category according to the class labels: " f"{category_sizes}" ) # %% # To avoid this problem, one possibility is to increase the number of runs with # independent random initiations `n_init`. In such case the clustering with the # best inertia (objective function of k-means) is chosen. kmeans = KMeans( n_clusters=true_k, max_iter=100, n_init=5, ) fit_and_evaluate(kmeans, X_tfidf, name="KMeans\non tf-idf vectors") # %% # All those clustering evaluation metrics have a maximum value of 1.0 (for a # perfect clustering result). Higher values are better. Values of the Adjusted # Rand-Index close to 0.0 correspond to a random labeling. Notice from the # scores above that the cluster assignment is indeed well above chance level, # but the overall quality can certainly improve. # # Keep in mind that the class labels may not reflect accurately the document # topics and therefore metrics that use labels are not necessarily the best to # evaluate the quality of our clustering pipeline. # # Performing dimensionality reduction using LSA # --------------------------------------------- # # A `n_init=1` can still be used as long as the dimension of the vectorized # space is reduced first to make k-means more stable. For such purpose we use # :class:`~sklearn.decomposition.TruncatedSVD`, which works on term count/tf-idf # matrices. Since SVD results are not normalized, we redo the normalization to # improve the :class:`~sklearn.cluster.KMeans` result. Using SVD to reduce the # dimensionality of TF-IDF document vectors is often known as `latent semantic # analysis <https://en.wikipedia.org/wiki/Latent_semantic_analysis>`_ (LSA) in # the information retrieval and text mining literature. from sklearn.decomposition import TruncatedSVD from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Normalizer lsa = make_pipeline(TruncatedSVD(n_components=100), Normalizer(copy=False)) t0 = time() X_lsa = lsa.fit_transform(X_tfidf) explained_variance = lsa[0].explained_variance_ratio_.sum() print(f"LSA done in {time() - t0:.3f} s") print(f"Explained variance of the SVD step: {explained_variance * 100:.1f}%") # %% # Using a single initialization means the processing time will be reduced for # both :class:`~sklearn.cluster.KMeans` and # :class:`~sklearn.cluster.MiniBatchKMeans`. kmeans = KMeans( n_clusters=true_k, max_iter=100, n_init=1, ) fit_and_evaluate(kmeans, X_lsa, name="KMeans\nwith LSA on tf-idf vectors") # %% # We can observe that clustering on the LSA representation of the document is # significantly faster (both because of `n_init=1` and because the # dimensionality of the LSA feature space is much smaller). Furthermore, all the # clustering evaluation metrics have improved. We repeat the experiment with # :class:`~sklearn.cluster.MiniBatchKMeans`. from sklearn.cluster import MiniBatchKMeans minibatch_kmeans = MiniBatchKMeans( n_clusters=true_k, n_init=1, init_size=1000, batch_size=1000, ) fit_and_evaluate( minibatch_kmeans, X_lsa, name="MiniBatchKMeans\nwith LSA on tf-idf vectors", ) # %% # Top terms per cluster # --------------------- # # Since :class:`~sklearn.feature_extraction.text.TfidfVectorizer` can be # inverted we can identify the cluster centers, which provide an intuition of # the most influential words **for each cluster**. See the example script # :ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py` # for a comparison with the most predictive words **for each target class**. original_space_centroids = lsa[0].inverse_transform(kmeans.cluster_centers_) order_centroids = original_space_centroids.argsort()[:, ::-1] terms = vectorizer.get_feature_names_out() for i in range(true_k): print(f"Cluster {i}: ", end="") for ind in order_centroids[i, :10]: print(f"{terms[ind]} ", end="") print() # %% # HashingVectorizer # ----------------- # An alternative vectorization can be done using a # :class:`~sklearn.feature_extraction.text.HashingVectorizer` instance, which # does not provide IDF weighting as this is a stateless model (the fit method # does nothing). When IDF weighting is needed it can be added by pipelining the # :class:`~sklearn.feature_extraction.text.HashingVectorizer` output to a # :class:`~sklearn.feature_extraction.text.TfidfTransformer` instance. In this # case we also add LSA to the pipeline to reduce the dimension and sparcity of # the hashed vector space. from sklearn.feature_extraction.text import HashingVectorizer, TfidfTransformer lsa_vectorizer = make_pipeline( HashingVectorizer(stop_words="english", n_features=50_000), TfidfTransformer(), TruncatedSVD(n_components=100, random_state=0), Normalizer(copy=False), ) t0 = time() X_hashed_lsa = lsa_vectorizer.fit_transform(dataset.data) print(f"vectorization done in {time() - t0:.3f} s") # %% # One can observe that the LSA step takes a relatively long time to fit, # especially with hashed vectors. The reason is that a hashed space is typically # large (set to `n_features=50_000` in this example). One can try lowering the # number of features at the expense of having a larger fraction of features with # hash collisions as shown in the example notebook # :ref:`sphx_glr_auto_examples_text_plot_hashing_vs_dict_vectorizer.py`. # # We now fit and evaluate the `kmeans` and `minibatch_kmeans` instances on this # hashed-lsa-reduced data: fit_and_evaluate(kmeans, X_hashed_lsa, name="KMeans\nwith LSA on hashed vectors") # %% fit_and_evaluate( minibatch_kmeans, X_hashed_lsa, name="MiniBatchKMeans\nwith LSA on hashed vectors", ) # %% # Both methods lead to good results that are similar to running the same models # on the traditional LSA vectors (without hashing). # # Clustering evaluation summary # ============================== import matplotlib.pyplot as plt import pandas as pd fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(16, 6), sharey=True) df = pd.DataFrame(evaluations[::-1]).set_index("estimator") df_std = pd.DataFrame(evaluations_std[::-1]).set_index("estimator") df.drop( ["train_time"], axis="columns", ).plot.barh(ax=ax0, xerr=df_std) ax0.set_xlabel("Clustering scores") ax0.set_ylabel("") df["train_time"].plot.barh(ax=ax1, xerr=df_std["train_time"]) ax1.set_xlabel("Clustering time (s)") plt.tight_layout() # %% # :class:`~sklearn.cluster.KMeans` and :class:`~sklearn.cluster.MiniBatchKMeans` # suffer from the phenomenon called the `Curse of Dimensionality # <https://en.wikipedia.org/wiki/Curse_of_dimensionality>`_ for high dimensional # datasets such as text data. That is the reason why the overall scores improve # when using LSA. Using LSA reduced data also improves the stability and # requires lower clustering time, though keep in mind that the LSA step itself # takes a long time, especially with hashed vectors. # # The Silhouette Coefficient is defined between 0 and 1. In all cases we obtain # values close to 0 (even if they improve a bit after using LSA) because its # definition requires measuring distances, in contrast with other evaluation # metrics such as the V-measure and the Adjusted Rand Index which are only based # on cluster assignments rather than distances. Notice that strictly speaking, # one should not compare the Silhouette Coefficient between spaces of different # dimension, due to the different notions of distance they imply. # # The homogeneity, completeness and hence v-measure metrics do not yield a # baseline with regards to random labeling: this means that depending on the # number of samples, clusters and ground truth classes, a completely random # labeling will not always yield the same values. In particular random labeling # won't yield zero scores, especially when the number of clusters is large. This # problem can safely be ignored when the number of samples is more than a # thousand and the number of clusters is less than 10, which is the case of the # present example. For smaller sample sizes or larger number of clusters it is # safer to use an adjusted index such as the Adjusted Rand Index (ARI). See the # example # :ref:`sphx_glr_auto_examples_cluster_plot_adjusted_for_chance_measures.py` for # a demo on the effect of random labeling. # # The size of the error bars show that :class:`~sklearn.cluster.MiniBatchKMeans` # is less stable than :class:`~sklearn.cluster.KMeans` for this relatively small # dataset. It is more interesting to use when the number of samples is much # bigger, but it can come at the expense of a small degradation in clustering # quality compared to the traditional k-means algorithm.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/text/plot_hashing_vs_dict_vectorizer.py
examples/text/plot_hashing_vs_dict_vectorizer.py
""" =========================================== FeatureHasher and DictVectorizer Comparison =========================================== In this example we illustrate text vectorization, which is the process of representing non-numerical input data (such as dictionaries or text documents) as vectors of real numbers. We first compare :func:`~sklearn.feature_extraction.FeatureHasher` and :func:`~sklearn.feature_extraction.DictVectorizer` by using both methods to vectorize text documents that are preprocessed (tokenized) with the help of a custom Python function. Later we introduce and analyze the text-specific vectorizers :func:`~sklearn.feature_extraction.text.HashingVectorizer`, :func:`~sklearn.feature_extraction.text.CountVectorizer` and :func:`~sklearn.feature_extraction.text.TfidfVectorizer` that handle both the tokenization and the assembling of the feature matrix within a single class. The objective of the example is to demonstrate the usage of text vectorization API and to compare their processing time. See the example scripts :ref:`sphx_glr_auto_examples_text_plot_document_classification_20newsgroups.py` and :ref:`sphx_glr_auto_examples_text_plot_document_clustering.py` for actual learning on text documents. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Load Data # --------- # # We load data from :ref:`20newsgroups_dataset`, which comprises around # 18000 newsgroups posts on 20 topics split in two subsets: one for training and # one for testing. For the sake of simplicity and reducing the computational # cost, we select a subset of 7 topics and use the training set only. from sklearn.datasets import fetch_20newsgroups categories = [ "alt.atheism", "comp.graphics", "comp.sys.ibm.pc.hardware", "misc.forsale", "rec.autos", "sci.space", "talk.religion.misc", ] print("Loading 20 newsgroups training data") raw_data, _ = fetch_20newsgroups(subset="train", categories=categories, return_X_y=True) data_size_mb = sum(len(s.encode("utf-8")) for s in raw_data) / 1e6 print(f"{len(raw_data)} documents - {data_size_mb:.3f}MB") # %% # Define preprocessing functions # ------------------------------ # # A token may be a word, part of a word or anything comprised between spaces or # symbols in a string. Here we define a function that extracts the tokens using # a simple regular expression (regex) that matches Unicode word characters. This # includes most characters that can be part of a word in any language, as well # as numbers and the underscore: import re def tokenize(doc): """Extract tokens from doc. This uses a simple regex that matches word characters to break strings into tokens. For a more principled approach, see CountVectorizer or TfidfVectorizer. """ return (tok.lower() for tok in re.findall(r"\w+", doc)) list(tokenize("This is a simple example, isn't it?")) # %% # We define an additional function that counts the (frequency of) occurrence of # each token in a given document. It returns a frequency dictionary to be used # by the vectorizers. from collections import defaultdict def token_freqs(doc): """Extract a dict mapping tokens from doc to their occurrences.""" freq = defaultdict(int) for tok in tokenize(doc): freq[tok] += 1 return freq token_freqs("That is one example, but this is another one") # %% # Observe in particular that the repeated token `"is"` is counted twice for # instance. # # Breaking a text document into word tokens, potentially losing the order # information between the words in a sentence is often called a `Bag of Words # representation <https://en.wikipedia.org/wiki/Bag-of-words_model>`_. # %% # DictVectorizer # -------------- # # First we benchmark the :func:`~sklearn.feature_extraction.DictVectorizer`, # then we compare it to :func:`~sklearn.feature_extraction.FeatureHasher` as # both of them receive dictionaries as input. from time import time from sklearn.feature_extraction import DictVectorizer dict_count_vectorizers = defaultdict(list) t0 = time() vectorizer = DictVectorizer() vectorizer.fit_transform(token_freqs(d) for d in raw_data) duration = time() - t0 dict_count_vectorizers["vectorizer"].append( vectorizer.__class__.__name__ + "\non freq dicts" ) dict_count_vectorizers["speed"].append(data_size_mb / duration) print(f"done in {duration:.3f} s at {data_size_mb / duration:.1f} MB/s") print(f"Found {len(vectorizer.get_feature_names_out())} unique terms") # %% # The actual mapping from text token to column index is explicitly stored in # the `.vocabulary_` attribute which is a potentially very large Python # dictionary: type(vectorizer.vocabulary_) # %% len(vectorizer.vocabulary_) # %% vectorizer.vocabulary_["example"] # %% # FeatureHasher # ------------- # # Dictionaries take up a large amount of storage space and grow in size as the # training set grows. Instead of growing the vectors along with a dictionary, # feature hashing builds a vector of pre-defined length by applying a hash # function `h` to the features (e.g., tokens), then using the hash values # directly as feature indices and updating the resulting vector at those # indices. When the feature space is not large enough, hashing functions tend to # map distinct values to the same hash code (hash collisions). As a result, it # is impossible to determine what object generated any particular hash code. # # Because of the above it is impossible to recover the original tokens from the # feature matrix and the best approach to estimate the number of unique terms in # the original dictionary is to count the number of active columns in the # encoded feature matrix. For such a purpose we define the following function: import numpy as np def n_nonzero_columns(X): """Number of columns with at least one non-zero value in a CSR matrix. This is useful to count the number of features columns that are effectively active when using the FeatureHasher. """ return len(np.unique(X.nonzero()[1])) # %% # The default number of features for the # :func:`~sklearn.feature_extraction.FeatureHasher` is 2**20. Here we set # `n_features = 2**18` to illustrate hash collisions. # # **FeatureHasher on frequency dictionaries** from sklearn.feature_extraction import FeatureHasher t0 = time() hasher = FeatureHasher(n_features=2**18) X = hasher.transform(token_freqs(d) for d in raw_data) duration = time() - t0 dict_count_vectorizers["vectorizer"].append( hasher.__class__.__name__ + "\non freq dicts" ) dict_count_vectorizers["speed"].append(data_size_mb / duration) print(f"done in {duration:.3f} s at {data_size_mb / duration:.1f} MB/s") print(f"Found {n_nonzero_columns(X)} unique tokens") # %% # The number of unique tokens when using the # :func:`~sklearn.feature_extraction.FeatureHasher` is lower than those obtained # using the :func:`~sklearn.feature_extraction.DictVectorizer`. This is due to # hash collisions. # # The number of collisions can be reduced by increasing the feature space. # Notice that the speed of the vectorizer does not change significantly when # setting a large number of features, though it causes larger coefficient # dimensions and then requires more memory usage to store them, even if a # majority of them is inactive. t0 = time() hasher = FeatureHasher(n_features=2**22) X = hasher.transform(token_freqs(d) for d in raw_data) duration = time() - t0 print(f"done in {duration:.3f} s at {data_size_mb / duration:.1f} MB/s") print(f"Found {n_nonzero_columns(X)} unique tokens") # %% # We confirm that the number of unique tokens gets closer to the number of # unique terms found by the :func:`~sklearn.feature_extraction.DictVectorizer`. # # **FeatureHasher on raw tokens** # # Alternatively, one can set `input_type="string"` in the # :func:`~sklearn.feature_extraction.FeatureHasher` to vectorize the strings # output directly from the customized `tokenize` function. This is equivalent to # passing a dictionary with an implied frequency of 1 for each feature name. t0 = time() hasher = FeatureHasher(n_features=2**18, input_type="string") X = hasher.transform(tokenize(d) for d in raw_data) duration = time() - t0 dict_count_vectorizers["vectorizer"].append( hasher.__class__.__name__ + "\non raw tokens" ) dict_count_vectorizers["speed"].append(data_size_mb / duration) print(f"done in {duration:.3f} s at {data_size_mb / duration:.1f} MB/s") print(f"Found {n_nonzero_columns(X)} unique tokens") # %% # We now plot the speed of the above methods for vectorizing. import matplotlib.pyplot as plt fig, ax = plt.subplots(figsize=(12, 6)) y_pos = np.arange(len(dict_count_vectorizers["vectorizer"])) ax.barh(y_pos, dict_count_vectorizers["speed"], align="center") ax.set_yticks(y_pos) ax.set_yticklabels(dict_count_vectorizers["vectorizer"]) ax.invert_yaxis() _ = ax.set_xlabel("speed (MB/s)") # %% # In both cases :func:`~sklearn.feature_extraction.FeatureHasher` is # approximately twice as fast as # :func:`~sklearn.feature_extraction.DictVectorizer`. This is handy when dealing # with large amounts of data, with the downside of losing the invertibility of # the transformation, which in turn makes the interpretation of a model a more # complex task. # # The `FeatureHeasher` with `input_type="string"` is slightly faster than the # variant that works on frequency dict because it does not count repeated # tokens: each token is implicitly counted once, even if it was repeated. # Depending on the downstream machine learning task, it can be a limitation or # not. # # Comparison with special purpose text vectorizers # ------------------------------------------------ # # :func:`~sklearn.feature_extraction.text.CountVectorizer` accepts raw data as # it internally implements tokenization and occurrence counting. It is similar # to the :func:`~sklearn.feature_extraction.DictVectorizer` when used along with # the customized function `token_freqs` as done in the previous section. The # difference being that :func:`~sklearn.feature_extraction.text.CountVectorizer` # is more flexible. In particular it accepts various regex patterns through the # `token_pattern` parameter. from sklearn.feature_extraction.text import CountVectorizer t0 = time() vectorizer = CountVectorizer() vectorizer.fit_transform(raw_data) duration = time() - t0 dict_count_vectorizers["vectorizer"].append(vectorizer.__class__.__name__) dict_count_vectorizers["speed"].append(data_size_mb / duration) print(f"done in {duration:.3f} s at {data_size_mb / duration:.1f} MB/s") print(f"Found {len(vectorizer.get_feature_names_out())} unique terms") # %% # We see that using the :func:`~sklearn.feature_extraction.text.CountVectorizer` # implementation is approximately twice as fast as using the # :func:`~sklearn.feature_extraction.DictVectorizer` along with the simple # function we defined for mapping the tokens. The reason is that # :func:`~sklearn.feature_extraction.text.CountVectorizer` is optimized by # reusing a compiled regular expression for the full training set instead of # creating one per document as done in our naive tokenize function. # # Now we make a similar experiment with the # :func:`~sklearn.feature_extraction.text.HashingVectorizer`, which is # equivalent to combining the "hashing trick" implemented by the # :func:`~sklearn.feature_extraction.FeatureHasher` class and the text # preprocessing and tokenization of the # :func:`~sklearn.feature_extraction.text.CountVectorizer`. from sklearn.feature_extraction.text import HashingVectorizer t0 = time() vectorizer = HashingVectorizer(n_features=2**18) vectorizer.fit_transform(raw_data) duration = time() - t0 dict_count_vectorizers["vectorizer"].append(vectorizer.__class__.__name__) dict_count_vectorizers["speed"].append(data_size_mb / duration) print(f"done in {duration:.3f} s at {data_size_mb / duration:.1f} MB/s") # %% # We can observe that this is the fastest text tokenization strategy so far, # assuming that the downstream machine learning task can tolerate a few # collisions. # # TfidfVectorizer # --------------- # # In a large text corpus, some words appear with higher frequency (e.g. "the", # "a", "is" in English) and do not carry meaningful information about the actual # contents of a document. If we were to feed the word count data directly to a # classifier, those very common terms would shadow the frequencies of rarer yet # more informative terms. In order to re-weight the count features into floating # point values suitable for usage by a classifier it is very common to use the # tf-idf transform as implemented by the # :func:`~sklearn.feature_extraction.text.TfidfTransformer`. TF stands for # "term-frequency" while "tf-idf" means term-frequency times inverse # document-frequency. # # We now benchmark the :func:`~sklearn.feature_extraction.text.TfidfVectorizer`, # which is equivalent to combining the tokenization and occurrence counting of # the :func:`~sklearn.feature_extraction.text.CountVectorizer` along with the # normalizing and weighting from a # :func:`~sklearn.feature_extraction.text.TfidfTransformer`. from sklearn.feature_extraction.text import TfidfVectorizer t0 = time() vectorizer = TfidfVectorizer() vectorizer.fit_transform(raw_data) duration = time() - t0 dict_count_vectorizers["vectorizer"].append(vectorizer.__class__.__name__) dict_count_vectorizers["speed"].append(data_size_mb / duration) print(f"done in {duration:.3f} s at {data_size_mb / duration:.1f} MB/s") print(f"Found {len(vectorizer.get_feature_names_out())} unique terms") # %% # Summary # ------- # Let's conclude this notebook by summarizing all the recorded processing speeds # in a single plot: fig, ax = plt.subplots(figsize=(12, 6)) y_pos = np.arange(len(dict_count_vectorizers["vectorizer"])) ax.barh(y_pos, dict_count_vectorizers["speed"], align="center") ax.set_yticks(y_pos) ax.set_yticklabels(dict_count_vectorizers["vectorizer"]) ax.invert_yaxis() _ = ax.set_xlabel("speed (MB/s)") # %% # Notice from the plot that # :func:`~sklearn.feature_extraction.text.TfidfVectorizer` is slightly slower # than :func:`~sklearn.feature_extraction.text.CountVectorizer` because of the # extra operation induced by the # :func:`~sklearn.feature_extraction.text.TfidfTransformer`. # # Also notice that, by setting the number of features `n_features = 2**18`, the # :func:`~sklearn.feature_extraction.text.HashingVectorizer` performs better # than the :func:`~sklearn.feature_extraction.text.CountVectorizer` at the # expense of inversibility of the transformation due to hash collisions. # # We highlight that :func:`~sklearn.feature_extraction.text.CountVectorizer` and # :func:`~sklearn.feature_extraction.text.HashingVectorizer` perform better than # their equivalent :func:`~sklearn.feature_extraction.DictVectorizer` and # :func:`~sklearn.feature_extraction.FeatureHasher` on manually tokenized # documents since the internal tokenization step of the former vectorizers # compiles a regular expression once and then reuses it for all the documents.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/text/plot_document_classification_20newsgroups.py
examples/text/plot_document_classification_20newsgroups.py
""" ====================================================== Classification of text documents using sparse features ====================================================== This is an example showing how scikit-learn can be used to classify documents by topics using a `Bag of Words approach <https://en.wikipedia.org/wiki/Bag-of-words_model>`_. This example uses a Tf-idf-weighted document-term sparse matrix to encode the features and demonstrates various classifiers that can efficiently handle sparse matrices. For document analysis via an unsupervised learning approach, see the example script :ref:`sphx_glr_auto_examples_text_plot_document_clustering.py`. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Loading and vectorizing the 20 newsgroups text dataset # ====================================================== # # We define a function to load data from :ref:`20newsgroups_dataset`, which # comprises around 18,000 newsgroups posts on 20 topics split in two subsets: # one for training (or development) and the other one for testing (or for # performance evaluation). Note that, by default, the text samples contain some # message metadata such as `'headers'`, `'footers'` (signatures) and `'quotes'` # to other posts. The `fetch_20newsgroups` function therefore accepts a # parameter named `remove` to attempt stripping such information that can make # the classification problem "too easy". This is achieved using simple # heuristics that are neither perfect nor standard, hence disabled by default. from time import time from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import TfidfVectorizer categories = [ "alt.atheism", "talk.religion.misc", "comp.graphics", "sci.space", ] def size_mb(docs): return sum(len(s.encode("utf-8")) for s in docs) / 1e6 def load_dataset(verbose=False, remove=()): """Load and vectorize the 20 newsgroups dataset.""" data_train = fetch_20newsgroups( subset="train", categories=categories, shuffle=True, random_state=42, remove=remove, ) data_test = fetch_20newsgroups( subset="test", categories=categories, shuffle=True, random_state=42, remove=remove, ) # order of labels in `target_names` can be different from `categories` target_names = data_train.target_names # split target in a training set and a test set y_train, y_test = data_train.target, data_test.target # Extracting features from the training data using a sparse vectorizer t0 = time() vectorizer = TfidfVectorizer( sublinear_tf=True, max_df=0.5, min_df=5, stop_words="english" ) X_train = vectorizer.fit_transform(data_train.data) duration_train = time() - t0 # Extracting features from the test data using the same vectorizer t0 = time() X_test = vectorizer.transform(data_test.data) duration_test = time() - t0 feature_names = vectorizer.get_feature_names_out() if verbose: # compute size of loaded data data_train_size_mb = size_mb(data_train.data) data_test_size_mb = size_mb(data_test.data) print( f"{len(data_train.data)} documents - " f"{data_train_size_mb:.2f}MB (training set)" ) print(f"{len(data_test.data)} documents - {data_test_size_mb:.2f}MB (test set)") print(f"{len(target_names)} categories") print( f"vectorize training done in {duration_train:.3f}s " f"at {data_train_size_mb / duration_train:.3f}MB/s" ) print(f"n_samples: {X_train.shape[0]}, n_features: {X_train.shape[1]}") print( f"vectorize testing done in {duration_test:.3f}s " f"at {data_test_size_mb / duration_test:.3f}MB/s" ) print(f"n_samples: {X_test.shape[0]}, n_features: {X_test.shape[1]}") return X_train, X_test, y_train, y_test, feature_names, target_names # %% # Analysis of a bag-of-words document classifier # ============================================== # # We will now train a classifier twice, once on the text samples including # metadata and once after stripping the metadata. For both cases we will analyze # the classification errors on a test set using a confusion matrix and inspect # the coefficients that define the classification function of the trained # models. # # Model without metadata stripping # -------------------------------- # # We start by using the custom function `load_dataset` to load the data without # metadata stripping. X_train, X_test, y_train, y_test, feature_names, target_names = load_dataset( verbose=True ) # %% # Our first model is an instance of the # :class:`~sklearn.linear_model.RidgeClassifier` class. This is a linear # classification model that uses the mean squared error on {-1, 1} encoded # targets, one for each possible class. Contrary to # :class:`~sklearn.linear_model.LogisticRegression`, # :class:`~sklearn.linear_model.RidgeClassifier` does not # provide probabilistic predictions (no `predict_proba` method), # but it is often faster to train. from sklearn.linear_model import RidgeClassifier clf = RidgeClassifier(tol=1e-2, solver="sparse_cg") clf.fit(X_train, y_train) pred = clf.predict(X_test) # %% # We plot the confusion matrix of this classifier to find if there is a pattern # in the classification errors. import matplotlib.pyplot as plt from sklearn.metrics import ConfusionMatrixDisplay fig, ax = plt.subplots(figsize=(10, 5)) ConfusionMatrixDisplay.from_predictions(y_test, pred, ax=ax) ax.xaxis.set_ticklabels(target_names) ax.yaxis.set_ticklabels(target_names) _ = ax.set_title( f"Confusion Matrix for {clf.__class__.__name__}\non the original documents" ) # %% # The confusion matrix highlights that documents of the `alt.atheism` class are # often confused with documents with the class `talk.religion.misc` class and # vice-versa which is expected since the topics are semantically related. # # We also observe that some documents of the `sci.space` class can be misclassified as # `comp.graphics` while the converse is much rarer. A manual inspection of those # badly classified documents would be required to get some insights on this # asymmetry. It could be the case that the vocabulary of the space topic could # be more specific than the vocabulary for computer graphics. # # We can gain a deeper understanding of how this classifier makes its decisions # by looking at the words with the highest average feature effects: import numpy as np import pandas as pd def plot_feature_effects(): # learned coefficients weighted by frequency of appearance average_feature_effects = clf.coef_ * np.asarray(X_train.mean(axis=0)).ravel() for i, label in enumerate(target_names): top5 = np.argsort(average_feature_effects[i])[-5:][::-1] if i == 0: top = pd.DataFrame(feature_names[top5], columns=[label]) top_indices = top5 else: top[label] = feature_names[top5] top_indices = np.concatenate((top_indices, top5), axis=None) top_indices = np.unique(top_indices) predictive_words = feature_names[top_indices] # plot feature effects bar_size = 0.25 padding = 0.75 y_locs = np.arange(len(top_indices)) * (4 * bar_size + padding) fig, ax = plt.subplots(figsize=(10, 8)) for i, label in enumerate(target_names): ax.barh( y_locs + (i - 2) * bar_size, average_feature_effects[i, top_indices], height=bar_size, label=label, ) ax.set( yticks=y_locs, yticklabels=predictive_words, ylim=[ 0 - 4 * bar_size, len(top_indices) * (4 * bar_size + padding) - 4 * bar_size, ], ) ax.legend(loc="lower right") print("top 5 keywords per class:") print(top) return ax _ = plot_feature_effects().set_title("Average feature effect on the original data") # %% # We can observe that the most predictive words are often strongly positively # associated with a single class and negatively associated with all the other # classes. Most of those positive associations are quite easy to interpret. # However, some words such as `"god"` and `"people"` are positively associated to # both `"talk.misc.religion"` and `"alt.atheism"` as those two classes expectedly # share some common vocabulary. Notice however that there are also words such as # `"christian"` and `"morality"` that are only positively associated with # `"talk.misc.religion"`. Furthermore, in this version of the dataset, the word # `"caltech"` is one of the top predictive features for atheism due to pollution # in the dataset coming from some sort of metadata such as the email addresses # of the sender of previous emails in the discussion as can be seen below: data_train = fetch_20newsgroups( subset="train", categories=categories, shuffle=True, random_state=42 ) for doc in data_train.data: if "caltech" in doc: print(doc) break # %% # Such headers, signature footers (and quoted metadata from previous messages) # can be considered side information that artificially reveals the newsgroup by # identifying the registered members and one would rather want our text # classifier to only learn from the "main content" of each text document instead # of relying on the leaked identity of the writers. # # Model with metadata stripping # ----------------------------- # # The `remove` option of the 20 newsgroups dataset loader in scikit-learn allows # to heuristically attempt to filter out some of this unwanted metadata that # makes the classification problem artificially easier. Be aware that such # filtering of the text contents is far from perfect. # # Let us try to leverage this option to train a text classifier that does not # rely too much on this kind of metadata to make its decisions: ( X_train, X_test, y_train, y_test, feature_names, target_names, ) = load_dataset(remove=("headers", "footers", "quotes")) clf = RidgeClassifier(tol=1e-2, solver="sparse_cg") clf.fit(X_train, y_train) pred = clf.predict(X_test) fig, ax = plt.subplots(figsize=(10, 5)) ConfusionMatrixDisplay.from_predictions(y_test, pred, ax=ax) ax.xaxis.set_ticklabels(target_names) ax.yaxis.set_ticklabels(target_names) _ = ax.set_title( f"Confusion Matrix for {clf.__class__.__name__}\non filtered documents" ) # %% # By looking at the confusion matrix, it is more evident that the scores of the # model trained with metadata were over-optimistic. The classification problem # without access to the metadata is less accurate but more representative of the # intended text classification problem. _ = plot_feature_effects().set_title("Average feature effects on filtered documents") # %% # In the next section we keep the dataset without metadata to compare several # classifiers. # %% # Benchmarking classifiers # ======================== # # Scikit-learn provides many different kinds of classification algorithms. In # this section we will train a selection of those classifiers on the same text # classification problem and measure both their generalization performance # (accuracy on the test set) and their computation performance (speed), both at # training time and testing time. For such purpose we define the following # benchmarking utilities: from sklearn import metrics from sklearn.utils.extmath import density def benchmark(clf, custom_name=False): print("_" * 80) print("Training: ") print(clf) t0 = time() clf.fit(X_train, y_train) train_time = time() - t0 print(f"train time: {train_time:.3}s") t0 = time() pred = clf.predict(X_test) test_time = time() - t0 print(f"test time: {test_time:.3}s") score = metrics.accuracy_score(y_test, pred) print(f"accuracy: {score:.3}") if hasattr(clf, "coef_"): print(f"dimensionality: {clf.coef_.shape[1]}") print(f"density: {density(clf.coef_)}") print() print() if custom_name: clf_descr = str(custom_name) else: clf_descr = clf.__class__.__name__ return clf_descr, score, train_time, test_time # %% # We now train and test the datasets with 8 different classification models and # get performance results for each model. The goal of this study is to highlight # the computation/accuracy tradeoffs of different types of classifiers for # such a multi-class text classification problem. # # Notice that the most important hyperparameters values were tuned using a grid # search procedure not shown in this notebook for the sake of simplicity. See # the example script # :ref:`sphx_glr_auto_examples_model_selection_plot_grid_search_text_feature_extraction.py` # noqa: E501 # for a demo on how such tuning can be done. from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression, SGDClassifier from sklearn.naive_bayes import ComplementNB from sklearn.neighbors import KNeighborsClassifier, NearestCentroid from sklearn.svm import LinearSVC results = [] for clf, name in ( (LogisticRegression(C=5, max_iter=1000), "Logistic Regression"), (RidgeClassifier(alpha=1.0, solver="sparse_cg"), "Ridge Classifier"), (KNeighborsClassifier(n_neighbors=100), "kNN"), (RandomForestClassifier(), "Random Forest"), # L2 penalty Linear SVC (LinearSVC(C=0.1, dual=False, max_iter=1000), "Linear SVC"), # L2 penalty Linear SGD ( SGDClassifier( loss="log_loss", alpha=1e-4, n_iter_no_change=3, early_stopping=True ), "log-loss SGD", ), # NearestCentroid (aka Rocchio classifier) (NearestCentroid(), "NearestCentroid"), # Sparse naive Bayes classifier (ComplementNB(alpha=0.1), "Complement naive Bayes"), ): print("=" * 80) print(name) results.append(benchmark(clf, name)) # %% # Plot accuracy, training and test time of each classifier # ======================================================== # # The scatter plots show the trade-off between the test accuracy and the # training and testing time of each classifier. indices = np.arange(len(results)) results = [[x[i] for x in results] for i in range(4)] clf_names, score, training_time, test_time = results training_time = np.array(training_time) test_time = np.array(test_time) fig, ax1 = plt.subplots(figsize=(10, 8)) ax1.scatter(score, training_time, s=60) ax1.set( title="Score-training time trade-off", yscale="log", xlabel="test accuracy", ylabel="training time (s)", ) fig, ax2 = plt.subplots(figsize=(10, 8)) ax2.scatter(score, test_time, s=60) ax2.set( title="Score-test time trade-off", yscale="log", xlabel="test accuracy", ylabel="test time (s)", ) for i, txt in enumerate(clf_names): ax1.annotate(txt, (score[i], training_time[i])) ax2.annotate(txt, (score[i], test_time[i])) # %% # The naive Bayes model has the best trade-off between score and # training/testing time, while Random Forest is both slow to train, expensive to # predict and has a comparatively bad accuracy. This is expected: for # high-dimensional prediction problems, linear models are often better suited as # most problems become linearly separable when the feature space has 10,000 # dimensions or more. # # The difference in training speed and accuracy of the linear models can be # explained by the choice of the loss function they optimize and the kind of # regularization they use. Be aware that some linear models with the same loss # but a different solver or regularization configuration may yield different # fitting times and test accuracy. We can observe on the second plot that once # trained, all linear models have approximately the same prediction speed which # is expected because they all implement the same prediction function. # # KNeighborsClassifier has a relatively low accuracy and has the highest testing # time. The long prediction time is also expected: for each prediction the model # has to compute the pairwise distances between the testing sample and each # document in the training set, which is computationally expensive. Furthermore, # the "curse of dimensionality" harms the ability of this model to yield # competitive accuracy in the high dimensional feature space of text # classification problems.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/svm/plot_custom_kernel.py
examples/svm/plot_custom_kernel.py
""" ====================== SVM with custom kernel ====================== Simple usage of Support Vector Machines to classify a sample. It will plot the decision surface and the support vectors. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn import datasets, svm from sklearn.inspection import DecisionBoundaryDisplay # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. We could # avoid this ugly slicing by using a two-dim dataset Y = iris.target def my_kernel(X, Y): """ We create a custom kernel: (2 0) k(X, Y) = X ( ) Y.T (0 1) """ M = np.array([[2, 0], [0, 1.0]]) return np.dot(np.dot(X, M), Y.T) h = 0.02 # step size in the mesh # we create an instance of SVM and fit out data. clf = svm.SVC(kernel=my_kernel) clf.fit(X, Y) ax = plt.gca() DecisionBoundaryDisplay.from_estimator( clf, X, cmap=plt.cm.Paired, ax=ax, response_method="predict", plot_method="pcolormesh", shading="auto", ) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired, edgecolors="k") plt.title("3-Class classification using Support Vector Machine with custom kernel") plt.axis("tight") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/svm/plot_svm_anova.py
examples/svm/plot_svm_anova.py
""" ================================================= SVM-Anova: SVM with univariate feature selection ================================================= This example shows how to perform univariate feature selection before running a SVC (support vector classifier) to improve the classification scores. We use the iris dataset (4 features) and add 36 non-informative features. We can find that our model achieves best performance when we select around 10% of features. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Load some data to play with # --------------------------- import numpy as np from sklearn.datasets import load_iris X, y = load_iris(return_X_y=True) # Add non-informative features rng = np.random.RandomState(0) X = np.hstack((X, 2 * rng.random((X.shape[0], 36)))) # %% # Create the pipeline # ------------------- from sklearn.feature_selection import SelectPercentile, f_classif from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC # Create a feature-selection transform, a scaler and an instance of SVM that we # combine together to have a full-blown estimator clf = Pipeline( [ ("anova", SelectPercentile(f_classif)), ("scaler", StandardScaler()), ("svc", SVC(gamma="auto")), ] ) # %% # Plot the cross-validation score as a function of percentile of features # ----------------------------------------------------------------------- import matplotlib.pyplot as plt from sklearn.model_selection import cross_val_score score_means = list() score_stds = list() percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100) for percentile in percentiles: clf.set_params(anova__percentile=percentile) this_scores = cross_val_score(clf, X, y) score_means.append(this_scores.mean()) score_stds.append(this_scores.std()) plt.errorbar(percentiles, score_means, np.array(score_stds)) plt.title("Performance of the SVM-Anova varying the percentile of features selected") plt.xticks(np.linspace(0, 100, 11, endpoint=True)) plt.xlabel("Percentile") plt.ylabel("Accuracy Score") plt.axis("tight") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/svm/plot_separating_hyperplane_unbalanced.py
examples/svm/plot_separating_hyperplane_unbalanced.py
""" ================================================= SVM: Separating hyperplane for unbalanced classes ================================================= Find the optimal separating hyperplane using an SVC for classes that are unbalanced. We first find the separating plane with a plain SVC and then plot (dashed) the separating hyperplane with automatically correction for unbalanced classes. .. currentmodule:: sklearn.linear_model .. note:: This example will also work by replacing ``SVC(kernel="linear")`` with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour such as that of an SVC with a linear kernel. For example try instead of the ``SVC``:: clf = SGDClassifier(n_iter=100, alpha=0.01) """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.lines as mlines import matplotlib.pyplot as plt from sklearn import svm from sklearn.datasets import make_blobs from sklearn.inspection import DecisionBoundaryDisplay # we create two clusters of random points n_samples_1 = 1000 n_samples_2 = 100 centers = [[0.0, 0.0], [2.0, 2.0]] clusters_std = [1.5, 0.5] X, y = make_blobs( n_samples=[n_samples_1, n_samples_2], centers=centers, cluster_std=clusters_std, random_state=0, shuffle=False, ) # fit the model and get the separating hyperplane clf = svm.SVC(kernel="linear", C=1.0) clf.fit(X, y) # fit the model and get the separating hyperplane using weighted classes wclf = svm.SVC(kernel="linear", class_weight={1: 10}) wclf.fit(X, y) # plot the samples plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired, edgecolors="k") # plot the decision functions for both classifiers ax = plt.gca() disp = DecisionBoundaryDisplay.from_estimator( clf, X, plot_method="contour", colors="k", levels=[0], alpha=0.5, linestyles=["-"], ax=ax, ) # plot decision boundary and margins for weighted classes wdisp = DecisionBoundaryDisplay.from_estimator( wclf, X, plot_method="contour", colors="r", levels=[0], alpha=0.5, linestyles=["-"], ax=ax, ) plt.legend( [ mlines.Line2D([], [], color="k", label="non weighted"), mlines.Line2D([], [], color="r", label="weighted"), ], ["non weighted", "weighted"], loc="upper right", ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/svm/plot_rbf_parameters.py
examples/svm/plot_rbf_parameters.py
""" ================== RBF SVM parameters ================== This example illustrates the effect of the parameters ``gamma`` and ``C`` of the Radial Basis Function (RBF) kernel SVM. Intuitively, the ``gamma`` parameter defines how far the influence of a single training example reaches, with low values meaning 'far' and high values meaning 'close'. The ``gamma`` parameters can be seen as the inverse of the radius of influence of samples selected by the model as support vectors. The ``C`` parameter trades off correct classification of training examples against maximization of the decision function's margin. For larger values of ``C``, a smaller margin will be accepted if the decision function is better at classifying all training points correctly. A lower ``C`` will encourage a larger margin, therefore a simpler decision function, at the cost of training accuracy. In other words ``C`` behaves as a regularization parameter in the SVM. The first plot is a visualization of the decision function for a variety of parameter values on a simplified classification problem involving only 2 input features and 2 possible target classes (binary classification). Note that this kind of plot is not possible to do for problems with more features or target classes. The second plot is a heatmap of the classifier's cross-validation accuracy as a function of ``C`` and ``gamma``. For this example we explore a relatively large grid for illustration purposes. In practice, a logarithmic grid from :math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters lie on the boundaries of the grid, it can be extended in that direction in a subsequent search. Note that the heat map plot has a special colorbar with a midpoint value close to the score values of the best performing models so as to make it easy to tell them apart in the blink of an eye. The behavior of the model is very sensitive to the ``gamma`` parameter. If ``gamma`` is too large, the radius of the area of influence of the support vectors only includes the support vector itself and no amount of regularization with ``C`` will be able to prevent overfitting. When ``gamma`` is very small, the model is too constrained and cannot capture the complexity or "shape" of the data. The region of influence of any selected support vector would include the whole training set. The resulting model will behave similarly to a linear model with a set of hyperplanes that separate the centers of high density of any pair of two classes. For intermediate values, we can see on the second plot that good models can be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma`` values) can be made more complex by increasing the importance of classifying each point correctly (larger ``C`` values) hence the diagonal of good performing models. Finally, one can also observe that for some intermediate values of ``gamma`` we get equally performing models when ``C`` becomes very large. This suggests that the set of support vectors does not change anymore. The radius of the RBF kernel alone acts as a good structural regularizer. Increasing ``C`` further doesn't help, likely because there are no more training points in violation (inside the margin or wrongly classified), or at least no better solution can be found. Scores being equal, it may make sense to use the smaller ``C`` values, since very high ``C`` values typically increase fitting time. On the other hand, lower ``C`` values generally lead to more support vectors, which may increase prediction time. Therefore, lowering the value of ``C`` involves a trade-off between fitting time and prediction time. We should also note that small differences in scores results from the random splits of the cross-validation procedure. Those spurious variations can be smoothed out by increasing the number of CV iterations ``n_splits`` at the expense of compute time. Increasing the value number of ``C_range`` and ``gamma_range`` steps will increase the resolution of the hyper-parameter heat map. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Utility class to move the midpoint of a colormap to be around # the values of interest. import numpy as np from matplotlib.colors import Normalize class MidpointNormalize(Normalize): def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False): self.midpoint = midpoint Normalize.__init__(self, vmin, vmax, clip) def __call__(self, value, clip=None): x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1] return np.ma.masked_array(np.interp(value, x, y)) # %% # Load and prepare data set # ------------------------- # # dataset for grid search from sklearn.datasets import load_iris iris = load_iris() X = iris.data y = iris.target # %% # Dataset for decision function visualization: we only keep the first two # features in X and sub-sample the dataset to keep only 2 classes and # make it a binary classification problem. X_2d = X[:, :2] X_2d = X_2d[y > 0] y_2d = y[y > 0] y_2d -= 1 # %% # It is usually a good idea to scale the data for SVM training. # We are cheating a bit in this example in scaling all of the data, # instead of fitting the transformation on the training set and # just applying it on the test set. from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X = scaler.fit_transform(X) X_2d = scaler.fit_transform(X_2d) # %% # Train classifiers # ----------------- # # For an initial search, a logarithmic grid with basis # 10 is often helpful. Using a basis of 2, a finer # tuning can be achieved but at a much higher cost. from sklearn.model_selection import GridSearchCV, StratifiedShuffleSplit from sklearn.svm import SVC C_range = np.logspace(-2, 10, 13) gamma_range = np.logspace(-9, 3, 13) param_grid = dict(gamma=gamma_range, C=C_range) cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42) grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv) grid.fit(X, y) print( "The best parameters are %s with a score of %0.2f" % (grid.best_params_, grid.best_score_) ) # %% # Now we need to fit a classifier for all parameters in the 2d version # (we use a smaller set of parameters here because it takes a while to train) C_2d_range = [1e-2, 1, 1e2] gamma_2d_range = [1e-1, 1, 1e1] classifiers = [] for C in C_2d_range: for gamma in gamma_2d_range: clf = SVC(C=C, gamma=gamma) clf.fit(X_2d, y_2d) classifiers.append((C, gamma, clf)) # %% # Visualization # ------------- # # draw visualization of parameter effects import matplotlib.pyplot as plt plt.figure(figsize=(8, 6)) xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200)) for k, (C, gamma, clf) in enumerate(classifiers): # evaluate decision function in a grid Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # visualize decision function for these parameters plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1) plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)), size="medium") # visualize parameter's effect on decision function plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu) plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r, edgecolors="k") plt.xticks(()) plt.yticks(()) plt.axis("tight") scores = grid.cv_results_["mean_test_score"].reshape(len(C_range), len(gamma_range)) # %% # Draw heatmap of the validation accuracy as a function of gamma and C # # The score are encoded as colors with the hot colormap which varies from dark # red to bright yellow. As the most interesting scores are all located in the # 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so # as to make it easier to visualize the small variations of score values in the # interesting range while not brutally collapsing all the low score values to # the same color. plt.figure(figsize=(8, 6)) plt.subplots_adjust(left=0.2, right=0.95, bottom=0.15, top=0.95) plt.imshow( scores, interpolation="nearest", cmap=plt.cm.hot, norm=MidpointNormalize(vmin=0.2, midpoint=0.92), ) plt.xlabel("gamma") plt.ylabel("C") plt.colorbar() plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45) plt.yticks(np.arange(len(C_range)), C_range) plt.title("Validation accuracy") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/svm/plot_oneclass.py
examples/svm/plot_oneclass.py
""" ========================================== One-class SVM with non-linear kernel (RBF) ========================================== An example using a one-class SVM for novelty detection. :ref:`One-class SVM <svm_outlier_detection>` is an unsupervised algorithm that learns a decision function for novelty detection: classifying new data as similar or different to the training set. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% import numpy as np from sklearn import svm # Generate train data X = 0.3 * np.random.randn(100, 2) X_train = np.r_[X + 2, X - 2] # Generate some regular novel observations X = 0.3 * np.random.randn(20, 2) X_test = np.r_[X + 2, X - 2] # Generate some abnormal novel observations X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2)) # fit the model clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1) clf.fit(X_train) y_pred_train = clf.predict(X_train) y_pred_test = clf.predict(X_test) y_pred_outliers = clf.predict(X_outliers) n_error_train = y_pred_train[y_pred_train == -1].size n_error_test = y_pred_test[y_pred_test == -1].size n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size # %% import matplotlib.font_manager import matplotlib.lines as mlines import matplotlib.pyplot as plt from sklearn.inspection import DecisionBoundaryDisplay _, ax = plt.subplots() # generate grid for the boundary display xx, yy = np.meshgrid(np.linspace(-5, 5, 10), np.linspace(-5, 5, 10)) X = np.concatenate([xx.reshape(-1, 1), yy.reshape(-1, 1)], axis=1) DecisionBoundaryDisplay.from_estimator( clf, X, response_method="decision_function", plot_method="contourf", ax=ax, cmap="PuBu", ) DecisionBoundaryDisplay.from_estimator( clf, X, response_method="decision_function", plot_method="contourf", ax=ax, levels=[0, 10000], colors="palevioletred", ) DecisionBoundaryDisplay.from_estimator( clf, X, response_method="decision_function", plot_method="contour", ax=ax, levels=[0], colors="darkred", linewidths=2, ) s = 40 b1 = ax.scatter(X_train[:, 0], X_train[:, 1], c="white", s=s, edgecolors="k") b2 = ax.scatter(X_test[:, 0], X_test[:, 1], c="blueviolet", s=s, edgecolors="k") c = ax.scatter(X_outliers[:, 0], X_outliers[:, 1], c="gold", s=s, edgecolors="k") plt.legend( [mlines.Line2D([], [], color="darkred"), b1, b2, c], [ "learned frontier", "training observations", "new regular observations", "new abnormal observations", ], loc="upper left", prop=matplotlib.font_manager.FontProperties(size=11), ) ax.set( xlabel=( f"error train: {n_error_train}/200 ; errors novel regular: {n_error_test}/40 ;" f" errors novel abnormal: {n_error_outliers}/40" ), title="Novelty Detection", xlim=(-5, 5), ylim=(-5, 5), ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/svm/plot_weighted_samples.py
examples/svm/plot_weighted_samples.py
""" ===================== SVM: Weighted samples ===================== Plot decision function of a weighted dataset, where the size of points is proportional to its weight. The sample weighting rescales the C parameter, which means that the classifier puts more emphasis on getting these points right. The effect might often be subtle. To emphasize the effect here, we particularly increase the weight of the positive class, making the deformation of the decision boundary more visible. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_classification from sklearn.inspection import DecisionBoundaryDisplay from sklearn.svm import SVC X, y = make_classification( n_samples=1_000, n_features=2, n_informative=2, n_redundant=0, n_clusters_per_class=1, class_sep=1.1, weights=[0.9, 0.1], random_state=0, ) # down-sample for plotting rng = np.random.RandomState(0) plot_indices = rng.choice(np.arange(X.shape[0]), size=100, replace=True) X_plot, y_plot = X[plot_indices], y[plot_indices] def plot_decision_function(classifier, sample_weight, axis, title): """Plot the synthetic data and the classifier decision function. Points with larger sample_weight are mapped to larger circles in the scatter plot.""" axis.scatter( X_plot[:, 0], X_plot[:, 1], c=y_plot, s=100 * sample_weight[plot_indices], alpha=0.9, cmap=plt.cm.bone, edgecolors="black", ) DecisionBoundaryDisplay.from_estimator( classifier, X_plot, response_method="decision_function", alpha=0.75, ax=axis, cmap=plt.cm.bone, ) axis.axis("off") axis.set_title(title) # we define constant weights as expected by the plotting function sample_weight_constant = np.ones(len(X)) # assign random weights to all points sample_weight_modified = abs(rng.randn(len(X))) # assign bigger weights to the positive class positive_class_indices = np.asarray(y == 1).nonzero()[0] sample_weight_modified[positive_class_indices] *= 15 # This model does not include sample weights. clf_no_weights = SVC(gamma=1) clf_no_weights.fit(X, y) # This other model includes sample weights. clf_weights = SVC(gamma=1) clf_weights.fit(X, y, sample_weight=sample_weight_modified) fig, axes = plt.subplots(1, 2, figsize=(14, 6)) plot_decision_function( clf_no_weights, sample_weight_constant, axes[0], "Constant weights" ) plot_decision_function(clf_weights, sample_weight_modified, axes[1], "Modified weights") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/svm/plot_svm_tie_breaking.py
examples/svm/plot_svm_tie_breaking.py
""" ========================================================= SVM Tie Breaking Example ========================================================= Tie breaking is costly if ``decision_function_shape='ovr'``, and therefore it is not enabled by default. This example illustrates the effect of the ``break_ties`` parameter for a multiclass classification problem and ``decision_function_shape='ovr'``. The two plots differ only in the area in the middle where the classes are tied. If ``break_ties=False``, all input in that area would be classified as one class, whereas if ``break_ties=True``, the tie-breaking mechanism will create a non-convex decision boundary in that area. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_blobs from sklearn.svm import SVC X, y = make_blobs(random_state=27) fig, sub = plt.subplots(2, 1, figsize=(5, 8)) titles = ("break_ties = False", "break_ties = True") for break_ties, title, ax in zip((False, True), titles, sub.flatten()): svm = SVC( kernel="linear", C=1, break_ties=break_ties, decision_function_shape="ovr" ).fit(X, y) xlim = [X[:, 0].min(), X[:, 0].max()] ylim = [X[:, 1].min(), X[:, 1].max()] xs = np.linspace(xlim[0], xlim[1], 1000) ys = np.linspace(ylim[0], ylim[1], 1000) xx, yy = np.meshgrid(xs, ys) pred = svm.predict(np.c_[xx.ravel(), yy.ravel()]) colors = [plt.cm.Accent(i) for i in [0, 4, 7]] points = ax.scatter(X[:, 0], X[:, 1], c=y, cmap="Accent") classes = [(0, 1), (0, 2), (1, 2)] line = np.linspace(X[:, 1].min() - 5, X[:, 1].max() + 5) ax.imshow( pred.reshape(xx.shape), cmap="Accent", alpha=0.2, extent=(xlim[0], xlim[1], ylim[1], ylim[0]), ) for coef, intercept, col in zip(svm.coef_, svm.intercept_, classes): line2 = -(line * coef[1] + intercept) / coef[0] ax.plot(line2, line, "-", c=colors[col[0]]) ax.plot(line2, line, "--", c=colors[col[1]]) ax.set_xlim(xlim) ax.set_ylim(ylim) ax.set_title(title) ax.set_aspect("equal") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/svm/plot_svm_margin.py
examples/svm/plot_svm_margin.py
""" ========================================================= SVM Margins Example ========================================================= The plots below illustrate the effect the parameter `C` has on the separation line. A large value of `C` basically tells our model that we do not have that much faith in our data's distribution, and will only consider points close to line of separation. A small value of `C` includes more/all the observations, allowing the margins to be calculated using all the data in the area. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn import svm # we create 40 separable points np.random.seed(0) X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]] Y = [0] * 20 + [1] * 20 # figure number fignum = 1 # fit the model for name, penalty in (("unreg", 1), ("reg", 0.05)): clf = svm.SVC(kernel="linear", C=penalty) clf.fit(X, Y) # get the separating hyperplane w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(-5, 5) yy = a * xx - (clf.intercept_[0]) / w[1] # plot the parallels to the separating hyperplane that pass through the # support vectors (margin away from hyperplane in direction # perpendicular to hyperplane). This is sqrt(1+a^2) away vertically in # 2-d. margin = 1 / np.sqrt(np.sum(clf.coef_**2)) yy_down = yy - np.sqrt(1 + a**2) * margin yy_up = yy + np.sqrt(1 + a**2) * margin # plot the line, the points, and the nearest vectors to the plane plt.figure(fignum, figsize=(4, 3)) plt.clf() plt.plot(xx, yy, "k-") plt.plot(xx, yy_down, "k--") plt.plot(xx, yy_up, "k--") plt.scatter( clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80, facecolors="none", zorder=10, edgecolors="k", ) plt.scatter( X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.get_cmap("RdBu"), edgecolors="k" ) plt.axis("tight") x_min = -4.8 x_max = 4.2 y_min = -6 y_max = 6 YY, XX = np.meshgrid(yy, xx) xy = np.vstack([XX.ravel(), YY.ravel()]).T Z = clf.decision_function(xy).reshape(XX.shape) # Put the result into a contour plot plt.contourf(XX, YY, Z, cmap=plt.get_cmap("RdBu"), alpha=0.5, linestyles=["-"]) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(()) plt.yticks(()) fignum = fignum + 1 plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/svm/plot_svm_kernels.py
examples/svm/plot_svm_kernels.py
""" ========================================================= Plot classification boundaries with different SVM Kernels ========================================================= This example shows how different kernels in a :class:`~sklearn.svm.SVC` (Support Vector Classifier) influence the classification boundaries in a binary, two-dimensional classification problem. SVCs aim to find a hyperplane that effectively separates the classes in their training data by maximizing the margin between the outermost data points of each class. This is achieved by finding the best weight vector :math:`w` that defines the decision boundary hyperplane and minimizes the sum of hinge losses for misclassified samples, as measured by the :func:`~sklearn.metrics.hinge_loss` function. By default, regularization is applied with the parameter `C=1`, which allows for a certain degree of misclassification tolerance. If the data is not linearly separable in the original feature space, a non-linear kernel parameter can be set. Depending on the kernel, the process involves adding new features or transforming existing features to enrich and potentially add meaning to the data. When a kernel other than `"linear"` is set, the SVC applies the `kernel trick <https://en.wikipedia.org/wiki/Kernel_method#Mathematics:_the_kernel_trick>`__, which computes the similarity between pairs of data points using the kernel function without explicitly transforming the entire dataset. The kernel trick surpasses the otherwise necessary matrix transformation of the whole dataset by only considering the relations between all pairs of data points. The kernel function maps two vectors (each pair of observations) to their similarity using their dot product. The hyperplane can then be calculated using the kernel function as if the dataset were represented in a higher-dimensional space. Using a kernel function instead of an explicit matrix transformation improves performance, as the kernel function has a time complexity of :math:`O({n}^2)`, whereas matrix transformation scales according to the specific transformation being applied. In this example, we compare the most common kernel types of Support Vector Machines: the linear kernel (`"linear"`), the polynomial kernel (`"poly"`), the radial basis function kernel (`"rbf"`) and the sigmoid kernel (`"sigmoid"`). """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Creating a dataset # ------------------ # We create a two-dimensional classification dataset with 16 samples and two classes. We # plot the samples with the colors matching their respective targets. import matplotlib.pyplot as plt import numpy as np X = np.array( [ [0.4, -0.7], [-1.5, -1.0], [-1.4, -0.9], [-1.3, -1.2], [-1.1, -0.2], [-1.2, -0.4], [-0.5, 1.2], [-1.5, 2.1], [1.0, 1.0], [1.3, 0.8], [1.2, 0.5], [0.2, -2.0], [0.5, -2.4], [0.2, -2.3], [0.0, -2.7], [1.3, 2.1], ] ) y = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1]) # Plotting settings fig, ax = plt.subplots(figsize=(4, 3)) x_min, x_max, y_min, y_max = -3, 3, -3, 3 ax.set(xlim=(x_min, x_max), ylim=(y_min, y_max)) # Plot samples by color and add legend scatter = ax.scatter(X[:, 0], X[:, 1], s=150, c=y, label=y, edgecolors="k") ax.legend(*scatter.legend_elements(), loc="upper right", title="Classes") ax.set_title("Samples in two-dimensional feature space") plt.show() # %% # We can see that the samples are not clearly separable by a straight line. # # Training SVC model and plotting decision boundaries # --------------------------------------------------- # We define a function that fits a :class:`~sklearn.svm.SVC` classifier, # allowing the `kernel` parameter as an input, and then plots the decision # boundaries learned by the model using # :class:`~sklearn.inspection.DecisionBoundaryDisplay`. # # Notice that for the sake of simplicity, the `C` parameter is set to its # default value (`C=1`) in this example and the `gamma` parameter is set to # `gamma=2` across all kernels, although it is automatically ignored for the # linear kernel. In a real classification task, where performance matters, # parameter tuning (by using :class:`~sklearn.model_selection.GridSearchCV` for # instance) is highly recommended to capture different structures within the # data. # # Setting `response_method="predict"` in # :class:`~sklearn.inspection.DecisionBoundaryDisplay` colors the areas based # on their predicted class. Using `response_method="decision_function"` allows # us to also plot the decision boundary and the margins to both sides of it. # Finally the support vectors used during training (which always lay on the # margins) are identified by means of the `support_vectors_` attribute of # the trained SVCs, and plotted as well. from sklearn import svm from sklearn.inspection import DecisionBoundaryDisplay def plot_training_data_with_decision_boundary( kernel, ax=None, long_title=True, support_vectors=True ): # Train the SVC clf = svm.SVC(kernel=kernel, gamma=2).fit(X, y) # Settings for plotting if ax is None: _, ax = plt.subplots(figsize=(4, 3)) x_min, x_max, y_min, y_max = -3, 3, -3, 3 ax.set(xlim=(x_min, x_max), ylim=(y_min, y_max)) # Plot decision boundary and margins common_params = {"estimator": clf, "X": X, "ax": ax} DecisionBoundaryDisplay.from_estimator( **common_params, response_method="predict", plot_method="pcolormesh", alpha=0.3, ) DecisionBoundaryDisplay.from_estimator( **common_params, response_method="decision_function", plot_method="contour", levels=[-1, 0, 1], colors=["k", "k", "k"], linestyles=["--", "-", "--"], ) if support_vectors: # Plot bigger circles around samples that serve as support vectors ax.scatter( clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=150, facecolors="none", edgecolors="k", ) # Plot samples by color and add legend ax.scatter(X[:, 0], X[:, 1], c=y, s=30, edgecolors="k") ax.legend(*scatter.legend_elements(), loc="upper right", title="Classes") if long_title: ax.set_title(f" Decision boundaries of {kernel} kernel in SVC") else: ax.set_title(kernel) if ax is None: plt.show() # %% # Linear kernel # ************* # Linear kernel is the dot product of the input samples: # # .. math:: K(\mathbf{x}_1, \mathbf{x}_2) = \mathbf{x}_1^\top \mathbf{x}_2 # # It is then applied to any combination of two data points (samples) in the # dataset. The dot product of the two points determines the # :func:`~sklearn.metrics.pairwise.cosine_similarity` between both points. The # higher the value, the more similar the points are. plot_training_data_with_decision_boundary("linear") # %% # Training a :class:`~sklearn.svm.SVC` on a linear kernel results in an # untransformed feature space, where the hyperplane and the margins are # straight lines. Due to the lack of expressivity of the linear kernel, the # trained classes do not perfectly capture the training data. # # Polynomial kernel # ***************** # The polynomial kernel changes the notion of similarity. The kernel function # is defined as: # # .. math:: # K(\mathbf{x}_1, \mathbf{x}_2) = (\gamma \cdot \ # \mathbf{x}_1^\top\mathbf{x}_2 + r)^d # # where :math:`{d}` is the degree (`degree`) of the polynomial, :math:`{\gamma}` # (`gamma`) controls the influence of each individual training sample on the # decision boundary and :math:`{r}` is the bias term (`coef0`) that shifts the # data up or down. Here, we use the default value for the degree of the # polynomial in the kernel function (`degree=3`). When `coef0=0` (the default), # the data is only transformed, but no additional dimension is added. Using a # polynomial kernel is equivalent to creating # :class:`~sklearn.preprocessing.PolynomialFeatures` and then fitting a # :class:`~sklearn.svm.SVC` with a linear kernel on the transformed data, # although this alternative approach would be computationally expensive for most # datasets. plot_training_data_with_decision_boundary("poly") # %% # The polynomial kernel with `gamma=2` adapts well to the training data, # causing the margins on both sides of the hyperplane to bend accordingly. # # RBF kernel # ********** # The radial basis function (RBF) kernel, also known as the Gaussian kernel, is # the default kernel for Support Vector Machines in scikit-learn. It measures # similarity between two data points in infinite dimensions and then approaches # classification by majority vote. The kernel function is defined as: # # .. math:: # K(\mathbf{x}_1, \mathbf{x}_2) = \exp\left(-\gamma \cdot # {\|\mathbf{x}_1 - \mathbf{x}_2\|^2}\right) # # where :math:`{\gamma}` (`gamma`) controls the influence of each individual # training sample on the decision boundary. # # The larger the euclidean distance between two points # :math:`\|\mathbf{x}_1 - \mathbf{x}_2\|^2` # the closer the kernel function is to zero. This means that two points far away # are more likely to be dissimilar. plot_training_data_with_decision_boundary("rbf") # %% # In the plot we can see how the decision boundaries tend to contract around # data points that are close to each other. # # Sigmoid kernel # ************** # The sigmoid kernel function is defined as: # # .. math:: # K(\mathbf{x}_1, \mathbf{x}_2) = \tanh(\gamma \cdot # \mathbf{x}_1^\top\mathbf{x}_2 + r) # # where the kernel coefficient :math:`{\gamma}` (`gamma`) controls the influence # of each individual training sample on the decision boundary and :math:`{r}` is # the bias term (`coef0`) that shifts the data up or down. # # In the sigmoid kernel, the similarity between two data points is computed # using the hyperbolic tangent function (:math:`\tanh`). The kernel function # scales and possibly shifts the dot product of the two points # (:math:`\mathbf{x}_1` and :math:`\mathbf{x}_2`). plot_training_data_with_decision_boundary("sigmoid") # %% # We can see that the decision boundaries obtained with the sigmoid kernel # appear curved and irregular. The decision boundary tries to separate the # classes by fitting a sigmoid-shaped curve, resulting in a complex boundary # that may not generalize well to unseen data. From this example it becomes # obvious, that the sigmoid kernel has very specific use cases, when dealing # with data that exhibits a sigmoidal shape. In this example, careful fine # tuning might find more generalizable decision boundaries. Because of its # specificity, the sigmoid kernel is less commonly used in practice compared to # other kernels. # # Conclusion # ---------- # In this example, we have visualized the decision boundaries trained with the # provided dataset. The plots serve as an intuitive demonstration of how # different kernels utilize the training data to determine the classification # boundaries. # # The hyperplanes and margins, although computed indirectly, can be imagined as # planes in the transformed feature space. However, in the plots, they are # represented relative to the original feature space, resulting in curved # decision boundaries for the polynomial, RBF, and sigmoid kernels. # # Please note that the plots do not evaluate the individual kernel's accuracy or # quality. They are intended to provide a visual understanding of how the # different kernels use the training data. # # For a comprehensive evaluation, fine-tuning of :class:`~sklearn.svm.SVC` # parameters using techniques such as # :class:`~sklearn.model_selection.GridSearchCV` is recommended to capture the # underlying structures within the data. # %% # XOR dataset # ----------- # A classical example of a dataset which is not linearly separable is the XOR # pattern. HEre we demonstrate how different kernels work on such a dataset. xx, yy = np.meshgrid(np.linspace(-3, 3, 500), np.linspace(-3, 3, 500)) np.random.seed(0) X = np.random.randn(300, 2) y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0) _, ax = plt.subplots(2, 2, figsize=(8, 8)) args = dict(long_title=False, support_vectors=False) plot_training_data_with_decision_boundary("linear", ax[0, 0], **args) plot_training_data_with_decision_boundary("poly", ax[0, 1], **args) plot_training_data_with_decision_boundary("rbf", ax[1, 0], **args) plot_training_data_with_decision_boundary("sigmoid", ax[1, 1], **args) plt.show() # %% # As you can see from the plots above, only the `rbf` kernel can find a # reasonable decision boundary for the above dataset.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/svm/plot_svm_scale_c.py
examples/svm/plot_svm_scale_c.py
r""" ============================================== Scaling the regularization parameter for SVCs ============================================== The following example illustrates the effect of scaling the regularization parameter when using :ref:`svm` for :ref:`classification <svm_classification>`. For SVC classification, we are interested in a risk minimization for the equation: .. math:: C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w) where - :math:`C` is used to set the amount of regularization - :math:`\mathcal{L}` is a `loss` function of our samples and our model parameters. - :math:`\Omega` is a `penalty` function of our model parameters If we consider the loss function to be the individual error per sample, then the data-fit term, or the sum of the error for each sample, increases as we add more samples. The penalization term, however, does not increase. When using, for example, :ref:`cross validation <cross_validation>`, to set the amount of regularization with `C`, there would be a different amount of samples between the main problem and the smaller problems within the folds of the cross validation. Since the loss function dependens on the amount of samples, the latter influences the selected value of `C`. The question that arises is "How do we optimally adjust C to account for the different amount of training samples?" """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Data generation # --------------- # # In this example we investigate the effect of reparametrizing the regularization # parameter `C` to account for the number of samples when using either L1 or L2 # penalty. For such purpose we create a synthetic dataset with a large number of # features, out of which only a few are informative. We therefore expect the # regularization to shrink the coefficients towards zero (L2 penalty) or exactly # zero (L1 penalty). from sklearn.datasets import make_classification n_samples, n_features = 100, 300 X, y = make_classification( n_samples=n_samples, n_features=n_features, n_informative=5, random_state=1 ) # %% # L1-penalty case # --------------- # In the L1 case, theory says that provided a strong regularization, the # estimator cannot predict as well as a model knowing the true distribution # (even in the limit where the sample size grows to infinity) as it may set some # weights of otherwise predictive features to zero, which induces a bias. It does # say, however, that it is possible to find the right set of non-zero parameters # as well as their signs by tuning `C`. # # We define a linear SVC with the L1 penalty. from sklearn.svm import LinearSVC model_l1 = LinearSVC(penalty="l1", loss="squared_hinge", dual=False, tol=1e-3) # %% # We compute the mean test score for different values of `C` via # cross-validation. import numpy as np import pandas as pd from sklearn.model_selection import ShuffleSplit, validation_curve Cs = np.logspace(-2.3, -1.3, 10) train_sizes = np.linspace(0.3, 0.7, 3) labels = [f"fraction: {train_size}" for train_size in train_sizes] shuffle_params = { "test_size": 0.3, "n_splits": 150, "random_state": 1, } results = {"C": Cs} for label, train_size in zip(labels, train_sizes): cv = ShuffleSplit(train_size=train_size, **shuffle_params) train_scores, test_scores = validation_curve( model_l1, X, y, param_name="C", param_range=Cs, cv=cv, n_jobs=2, ) results[label] = test_scores.mean(axis=1) results = pd.DataFrame(results) # %% import matplotlib.pyplot as plt fig, axes = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(12, 6)) # plot results without scaling C results.plot(x="C", ax=axes[0], logx=True) axes[0].set_ylabel("CV score") axes[0].set_title("No scaling") for label in labels: best_C = results.loc[results[label].idxmax(), "C"] axes[0].axvline(x=best_C, linestyle="--", color="grey", alpha=0.7) # plot results by scaling C for train_size_idx, label in enumerate(labels): train_size = train_sizes[train_size_idx] results_scaled = results[[label]].assign( C_scaled=Cs * float(n_samples * np.sqrt(train_size)) ) results_scaled.plot(x="C_scaled", ax=axes[1], logx=True, label=label) best_C_scaled = results_scaled["C_scaled"].loc[results[label].idxmax()] axes[1].axvline(x=best_C_scaled, linestyle="--", color="grey", alpha=0.7) axes[1].set_title("Scaling C by sqrt(1 / n_samples)") _ = fig.suptitle("Effect of scaling C with L1 penalty") # %% # In the region of small `C` (strong regularization) all the coefficients # learned by the models are zero, leading to severe underfitting. Indeed, the # accuracy in this region is at the chance level. # # Using the default scale results in a somewhat stable optimal value of `C`, # whereas the transition out of the underfitting region depends on the number of # training samples. The reparametrization leads to even more stable results. # # See e.g. theorem 3 of :arxiv:`On the prediction performance of the Lasso # <1402.1700>` or :arxiv:`Simultaneous analysis of Lasso and Dantzig selector # <0801.1095>` where the regularization parameter is always assumed to be # proportional to 1 / sqrt(n_samples). # # L2-penalty case # --------------- # We can do a similar experiment with the L2 penalty. In this case, the # theory says that in order to achieve prediction consistency, the penalty # parameter should be kept constant as the number of samples grow. model_l2 = LinearSVC(penalty="l2", loss="squared_hinge", dual=True) Cs = np.logspace(-8, 4, 11) labels = [f"fraction: {train_size}" for train_size in train_sizes] results = {"C": Cs} for label, train_size in zip(labels, train_sizes): cv = ShuffleSplit(train_size=train_size, **shuffle_params) train_scores, test_scores = validation_curve( model_l2, X, y, param_name="C", param_range=Cs, cv=cv, n_jobs=2, ) results[label] = test_scores.mean(axis=1) results = pd.DataFrame(results) # %% import matplotlib.pyplot as plt fig, axes = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(12, 6)) # plot results without scaling C results.plot(x="C", ax=axes[0], logx=True) axes[0].set_ylabel("CV score") axes[0].set_title("No scaling") for label in labels: best_C = results.loc[results[label].idxmax(), "C"] axes[0].axvline(x=best_C, linestyle="--", color="grey", alpha=0.8) # plot results by scaling C for train_size_idx, label in enumerate(labels): results_scaled = results[[label]].assign( C_scaled=Cs * float(n_samples * np.sqrt(train_sizes[train_size_idx])) ) results_scaled.plot(x="C_scaled", ax=axes[1], logx=True, label=label) best_C_scaled = results_scaled["C_scaled"].loc[results[label].idxmax()] axes[1].axvline(x=best_C_scaled, linestyle="--", color="grey", alpha=0.8) axes[1].set_title("Scaling C by sqrt(1 / n_samples)") fig.suptitle("Effect of scaling C with L2 penalty") plt.show() # %% # For the L2 penalty case, the reparametrization seems to have a smaller impact # on the stability of the optimal value for the regularization. The transition # out of the overfitting region occurs in a more spread range and the accuracy # does not seem to be degraded up to chance level. # # Try increasing the value to `n_splits=1_000` for better results in the L2 # case, which is not shown here due to the limitations on the documentation # builder.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/svm/plot_separating_hyperplane.py
examples/svm/plot_separating_hyperplane.py
""" ========================================= SVM: Maximum margin separating hyperplane ========================================= Plot the maximum margin separating hyperplane within a two-class separable dataset using a Support Vector Machine classifier with linear kernel. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt from sklearn import svm from sklearn.datasets import make_blobs from sklearn.inspection import DecisionBoundaryDisplay # we create 40 separable points X, y = make_blobs(n_samples=40, centers=2, random_state=6) # fit the model, don't regularize for illustration purposes clf = svm.SVC(kernel="linear", C=1000) clf.fit(X, y) plt.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap=plt.cm.Paired) # plot the decision function ax = plt.gca() DecisionBoundaryDisplay.from_estimator( clf, X, plot_method="contour", colors="k", levels=[-1, 0, 1], alpha=0.5, linestyles=["--", "-", "--"], ax=ax, ) # plot support vectors ax.scatter( clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=100, linewidth=1, facecolors="none", edgecolors="k", ) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/svm/plot_svm_regression.py
examples/svm/plot_svm_regression.py
""" =================================================================== Support Vector Regression (SVR) using linear and non-linear kernels =================================================================== Toy example of 1D regression using linear, polynomial and RBF kernels. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.svm import SVR # %% # Generate sample data # -------------------- X = np.sort(5 * np.random.rand(40, 1), axis=0) y = np.sin(X).ravel() # add noise to targets y[::5] += 3 * (0.5 - np.random.rand(8)) # %% # Fit regression model # -------------------- svr_rbf = SVR(kernel="rbf", C=100, gamma=0.1, epsilon=0.1) svr_lin = SVR(kernel="linear", C=100, gamma="auto") svr_poly = SVR(kernel="poly", C=100, gamma="auto", degree=3, epsilon=0.1, coef0=1) # %% # Look at the results # ------------------- lw = 2 svrs = [svr_rbf, svr_lin, svr_poly] kernel_label = ["RBF", "Linear", "Polynomial"] model_color = ["m", "c", "g"] fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(15, 10), sharey=True) for ix, svr in enumerate(svrs): axes[ix].plot( X, svr.fit(X, y).predict(X), color=model_color[ix], lw=lw, label="{} model".format(kernel_label[ix]), ) axes[ix].scatter( X[svr.support_], y[svr.support_], facecolor="none", edgecolor=model_color[ix], s=50, label="{} support vectors".format(kernel_label[ix]), ) axes[ix].scatter( X[np.setdiff1d(np.arange(len(X)), svr.support_)], y[np.setdiff1d(np.arange(len(X)), svr.support_)], facecolor="none", edgecolor="k", s=50, label="other training data", ) axes[ix].legend( loc="upper center", bbox_to_anchor=(0.5, 1.1), ncol=1, fancybox=True, shadow=True, ) fig.text(0.5, 0.04, "data", ha="center", va="center") fig.text(0.06, 0.5, "target", ha="center", va="center", rotation="vertical") fig.suptitle("Support Vector Regression", fontsize=14) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/svm/plot_linearsvc_support_vectors.py
examples/svm/plot_linearsvc_support_vectors.py
""" ===================================== Plot the support vectors in LinearSVC ===================================== Unlike SVC (based on LIBSVM), LinearSVC (based on LIBLINEAR) does not provide the support vectors. This example demonstrates how to obtain the support vectors in LinearSVC. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import make_blobs from sklearn.inspection import DecisionBoundaryDisplay from sklearn.svm import LinearSVC X, y = make_blobs(n_samples=40, centers=2, random_state=0) plt.figure(figsize=(10, 5)) for i, C in enumerate([1, 100]): # "hinge" is the standard SVM loss clf = LinearSVC(C=C, loss="hinge", random_state=42).fit(X, y) # obtain the support vectors through the decision function decision_function = clf.decision_function(X) # we can also calculate the decision function manually # decision_function = np.dot(X, clf.coef_[0]) + clf.intercept_[0] # The support vectors are the samples that lie within the margin # boundaries, whose size is conventionally constrained to 1 support_vector_indices = (np.abs(decision_function) <= 1 + 1e-15).nonzero()[0] support_vectors = X[support_vector_indices] plt.subplot(1, 2, i + 1) plt.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap=plt.cm.Paired) ax = plt.gca() DecisionBoundaryDisplay.from_estimator( clf, X, ax=ax, grid_resolution=50, plot_method="contour", colors="k", levels=[-1, 0, 1], alpha=0.5, linestyles=["--", "-", "--"], ) plt.scatter( support_vectors[:, 0], support_vectors[:, 1], s=100, linewidth=1, facecolors="none", edgecolors="k", ) plt.title("C=" + str(C)) plt.tight_layout() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/svm/plot_iris_svc.py
examples/svm/plot_iris_svc.py
""" ================================================== Plot different SVM classifiers in the iris dataset ================================================== Comparison of different linear SVM classifiers on a 2D projection of the iris dataset. We only consider the first 2 features of this dataset: - Sepal length - Sepal width This example shows how to plot the decision surface for four SVM classifiers with different kernels. The linear models ``LinearSVC()`` and ``SVC(kernel='linear')`` yield slightly different decision boundaries. This can be a consequence of the following differences: - ``LinearSVC`` minimizes the squared hinge loss while ``SVC`` minimizes the regular hinge loss. - ``LinearSVC`` uses the One-vs-All (also known as One-vs-Rest) multiclass reduction while ``SVC`` uses the One-vs-One multiclass reduction. Both linear models have linear decision boundaries (intersecting hyperplanes) while the non-linear kernel models (polynomial or Gaussian RBF) have more flexible non-linear decision boundaries with shapes that depend on the kind of kernel and its parameters. .. NOTE:: while plotting the decision function of classifiers for toy 2D datasets can help get an intuitive understanding of their respective expressive power, be aware that those intuitions don't always generalize to more realistic high-dimensional problems. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt from sklearn import datasets, svm from sklearn.inspection import DecisionBoundaryDisplay # import some data to play with iris = datasets.load_iris() # Take the first two features. We could avoid this by using a two-dim dataset X = iris.data[:, :2] y = iris.target # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors C = 1.0 # SVM regularization parameter models = ( svm.SVC(kernel="linear", C=C), svm.LinearSVC(C=C, max_iter=10000), svm.SVC(kernel="rbf", gamma=0.7, C=C), svm.SVC(kernel="poly", degree=3, gamma="auto", C=C), ) models = (clf.fit(X, y) for clf in models) # title for the plots titles = ( "SVC with linear kernel", "LinearSVC (linear kernel)", "SVC with RBF kernel", "SVC with polynomial (degree 3) kernel", ) # Set-up 2x2 grid for plotting. fig, sub = plt.subplots(2, 2) plt.subplots_adjust(wspace=0.4, hspace=0.4) X0, X1 = X[:, 0], X[:, 1] for clf, title, ax in zip(models, titles, sub.flatten()): disp = DecisionBoundaryDisplay.from_estimator( clf, X, response_method="predict", cmap=plt.cm.coolwarm, alpha=0.8, ax=ax, xlabel=iris.feature_names[0], ylabel=iris.feature_names[1], ) ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors="k") ax.set_xticks(()) ax.set_yticks(()) ax.set_title(title) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/tree/plot_tree_regression.py
examples/tree/plot_tree_regression.py
""" ======================== Decision Tree Regression ======================== In this example, we demonstrate the effect of changing the maximum depth of a decision tree on how it fits to the data. We perform this once on a 1D regression task and once on a multi-output regression task. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Decision Tree on a 1D Regression Task # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Here we fit a tree on a 1D regression task. # # The :ref:`decision trees <tree>` is # used to fit a sine curve with addition noisy observation. As a result, it # learns local linear regressions approximating the sine curve. # # We can see that if the maximum depth of the tree (controlled by the # `max_depth` parameter) is set too high, the decision trees learn too fine # details of the training data and learn from the noise, i.e. they overfit. # # Create a random 1D dataset # -------------------------- import numpy as np rng = np.random.RandomState(1) X = np.sort(5 * rng.rand(80, 1), axis=0) y = np.sin(X).ravel() y[::5] += 3 * (0.5 - rng.rand(16)) # %% # Fit regression model # -------------------- # Here we fit two models with different maximum depths from sklearn.tree import DecisionTreeRegressor regr_1 = DecisionTreeRegressor(max_depth=2) regr_2 = DecisionTreeRegressor(max_depth=5) regr_1.fit(X, y) regr_2.fit(X, y) # %% # Predict # ------- # Get predictions on the test set X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis] y_1 = regr_1.predict(X_test) y_2 = regr_2.predict(X_test) # %% # Plot the results # ---------------- import matplotlib.pyplot as plt plt.figure() plt.scatter(X, y, s=20, edgecolor="black", c="darkorange", label="data") plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2) plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2) plt.xlabel("data") plt.ylabel("target") plt.title("Decision Tree Regression") plt.legend() plt.show() # %% # As you can see, the model with a depth of 5 (yellow) learns the details of the # training data to the point that it overfits to the noise. On the other hand, # the model with a depth of 2 (blue) learns the major tendencies in the data well # and does not overfit. In real use cases, you need to make sure that the tree # is not overfitting the training data, which can be done using cross-validation. # %% # Decision Tree Regression with Multi-Output Targets # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Here the :ref:`decision trees <tree>` # is used to predict simultaneously the noisy `x` and `y` observations of a circle # given a single underlying feature. As a result, it learns local linear # regressions approximating the circle. # # We can see that if the maximum depth of the tree (controlled by the # `max_depth` parameter) is set too high, the decision trees learn too fine # details of the training data and learn from the noise, i.e. they overfit. # %% # Create a random dataset # ----------------------- rng = np.random.RandomState(1) X = np.sort(200 * rng.rand(100, 1) - 100, axis=0) y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T y[::5, :] += 0.5 - rng.rand(20, 2) # %% # Fit regression model # -------------------- regr_1 = DecisionTreeRegressor(max_depth=2) regr_2 = DecisionTreeRegressor(max_depth=5) regr_3 = DecisionTreeRegressor(max_depth=8) regr_1.fit(X, y) regr_2.fit(X, y) regr_3.fit(X, y) # %% # Predict # ------- # Get predictions on the test set X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis] y_1 = regr_1.predict(X_test) y_2 = regr_2.predict(X_test) y_3 = regr_3.predict(X_test) # %% # Plot the results # ---------------- plt.figure() s = 25 plt.scatter(y[:, 0], y[:, 1], c="yellow", s=s, edgecolor="black", label="data") plt.scatter( y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, edgecolor="black", label="max_depth=2", ) plt.scatter(y_2[:, 0], y_2[:, 1], c="red", s=s, edgecolor="black", label="max_depth=5") plt.scatter(y_3[:, 0], y_3[:, 1], c="blue", s=s, edgecolor="black", label="max_depth=8") plt.xlim([-6, 6]) plt.ylim([-6, 6]) plt.xlabel("target 1") plt.ylabel("target 2") plt.title("Multi-output Decision Tree Regression") plt.legend(loc="best") plt.show() # %% # As you can see, the higher the value of `max_depth`, the more details of the data # are caught by the model. However, the model also overfits to the data and is # influenced by the noise.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/tree/plot_iris_dtc.py
examples/tree/plot_iris_dtc.py
""" ======================================================================= Plot the decision surface of decision trees trained on the iris dataset ======================================================================= Plot the decision surface of a decision tree trained on pairs of features of the iris dataset. See :ref:`decision tree <tree>` for more information on the estimator. For each pair of iris features, the decision tree learns decision boundaries made of combinations of simple thresholding rules inferred from the training samples. We also show the tree structure of a model built on all of the features. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # First load the copy of the Iris dataset shipped with scikit-learn: from sklearn.datasets import load_iris iris = load_iris() # %% # Display the decision functions of trees trained on all pairs of features. import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import load_iris from sklearn.inspection import DecisionBoundaryDisplay from sklearn.tree import DecisionTreeClassifier # Parameters n_classes = 3 plot_colors = "ryb" plot_step = 0.02 for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]): # We only take the two corresponding features X = iris.data[:, pair] y = iris.target # Train clf = DecisionTreeClassifier().fit(X, y) # Plot the decision boundary ax = plt.subplot(2, 3, pairidx + 1) plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5) DecisionBoundaryDisplay.from_estimator( clf, X, cmap=plt.cm.RdYlBu, response_method="predict", ax=ax, xlabel=iris.feature_names[pair[0]], ylabel=iris.feature_names[pair[1]], ) # Plot the training points for i, color in zip(range(n_classes), plot_colors): idx = np.asarray(y == i).nonzero() plt.scatter( X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i], edgecolor="black", s=15, ) plt.suptitle("Decision surface of decision trees trained on pairs of features") plt.legend(loc="lower right", borderpad=0, handletextpad=0) _ = plt.axis("tight") # %% # Display the structure of a single decision tree trained on all the features # together. from sklearn.tree import plot_tree plt.figure() clf = DecisionTreeClassifier().fit(iris.data, iris.target) plot_tree(clf, filled=True) plt.title("Decision tree trained on all the iris features") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/tree/plot_cost_complexity_pruning.py
examples/tree/plot_cost_complexity_pruning.py
""" ======================================================== Post pruning decision trees with cost complexity pruning ======================================================== .. currentmodule:: sklearn.tree The :class:`DecisionTreeClassifier` provides parameters such as ``min_samples_leaf`` and ``max_depth`` to prevent a tree from overfitting. Cost complexity pruning provides another option to control the size of a tree. In :class:`DecisionTreeClassifier`, this pruning technique is parameterized by the cost complexity parameter, ``ccp_alpha``. Greater values of ``ccp_alpha`` increase the number of nodes pruned. Here we only show the effect of ``ccp_alpha`` on regularizing the trees and how to choose a ``ccp_alpha`` based on validation scores. See also :ref:`minimal_cost_complexity_pruning` for details on pruning. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import matplotlib.pyplot as plt from sklearn.datasets import load_breast_cancer from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier # %% # Total impurity of leaves vs effective alphas of pruned tree # --------------------------------------------------------------- # Minimal cost complexity pruning recursively finds the node with the "weakest # link". The weakest link is characterized by an effective alpha, where the # nodes with the smallest effective alpha are pruned first. To get an idea of # what values of ``ccp_alpha`` could be appropriate, scikit-learn provides # :func:`DecisionTreeClassifier.cost_complexity_pruning_path` that returns the # effective alphas and the corresponding total leaf impurities at each step of # the pruning process. As alpha increases, more of the tree is pruned, which # increases the total impurity of its leaves. X, y = load_breast_cancer(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = DecisionTreeClassifier(random_state=0) path = clf.cost_complexity_pruning_path(X_train, y_train) ccp_alphas, impurities = path.ccp_alphas, path.impurities # %% # In the following plot, the maximum effective alpha value is removed, because # it is the trivial tree with only one node. fig, ax = plt.subplots() ax.plot(ccp_alphas[:-1], impurities[:-1], marker="o", drawstyle="steps-post") ax.set_xlabel("effective alpha") ax.set_ylabel("total impurity of leaves") ax.set_title("Total Impurity vs effective alpha for training set") # %% # Next, we train a decision tree using the effective alphas. The last value # in ``ccp_alphas`` is the alpha value that prunes the whole tree, # leaving the tree, ``clfs[-1]``, with one node. clfs = [] for ccp_alpha in ccp_alphas: clf = DecisionTreeClassifier(random_state=0, ccp_alpha=ccp_alpha) clf.fit(X_train, y_train) clfs.append(clf) print( "Number of nodes in the last tree is: {} with ccp_alpha: {}".format( clfs[-1].tree_.node_count, ccp_alphas[-1] ) ) # %% # For the remainder of this example, we remove the last element in # ``clfs`` and ``ccp_alphas``, because it is the trivial tree with only one # node. Here we show that the number of nodes and tree depth decreases as alpha # increases. clfs = clfs[:-1] ccp_alphas = ccp_alphas[:-1] node_counts = [clf.tree_.node_count for clf in clfs] depth = [clf.tree_.max_depth for clf in clfs] fig, ax = plt.subplots(2, 1) ax[0].plot(ccp_alphas, node_counts, marker="o", drawstyle="steps-post") ax[0].set_xlabel("alpha") ax[0].set_ylabel("number of nodes") ax[0].set_title("Number of nodes vs alpha") ax[1].plot(ccp_alphas, depth, marker="o", drawstyle="steps-post") ax[1].set_xlabel("alpha") ax[1].set_ylabel("depth of tree") ax[1].set_title("Depth vs alpha") fig.tight_layout() # %% # Accuracy vs alpha for training and testing sets # ---------------------------------------------------- # When ``ccp_alpha`` is set to zero and keeping the other default parameters # of :class:`DecisionTreeClassifier`, the tree overfits, leading to # a 100% training accuracy and 88% testing accuracy. As alpha increases, more # of the tree is pruned, thus creating a decision tree that generalizes better. # In this example, setting ``ccp_alpha=0.015`` maximizes the testing accuracy. train_scores = [clf.score(X_train, y_train) for clf in clfs] test_scores = [clf.score(X_test, y_test) for clf in clfs] fig, ax = plt.subplots() ax.set_xlabel("alpha") ax.set_ylabel("accuracy") ax.set_title("Accuracy vs alpha for training and testing sets") ax.plot(ccp_alphas, train_scores, marker="o", label="train", drawstyle="steps-post") ax.plot(ccp_alphas, test_scores, marker="o", label="test", drawstyle="steps-post") ax.legend() plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/tree/plot_unveil_tree_structure.py
examples/tree/plot_unveil_tree_structure.py
""" ========================================= Understanding the decision tree structure ========================================= The decision tree structure can be analysed to gain further insight on the relation between the features and the target to predict. In this example, we show how to retrieve: - the binary tree structure; - the depth of each node and whether or not it's a leaf; - the nodes that were reached by a sample using the ``decision_path`` method; - the leaf that was reached by a sample using the apply method; - the rules that were used to predict a sample; - the decision path shared by a group of samples. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np from matplotlib import pyplot as plt from sklearn import tree from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier ############################################################################## # Train tree classifier # --------------------- # First, we fit a :class:`~sklearn.tree.DecisionTreeClassifier` using the # :func:`~sklearn.datasets.load_iris` dataset. iris = load_iris() X = iris.data y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = DecisionTreeClassifier(max_leaf_nodes=3, random_state=0) clf.fit(X_train, y_train) ############################################################################## # Tree structure # -------------- # # The decision classifier has an attribute called ``tree_`` which allows access # to low level attributes such as ``node_count``, the total number of nodes, # and ``max_depth``, the maximal depth of the tree. The # ``tree_.compute_node_depths()`` method computes the depth of each node in the # tree. `tree_` also stores the entire binary tree structure, represented as a # number of parallel arrays. The i-th element of each array holds information # about the node ``i``. Node 0 is the tree's root. Some of the arrays only # apply to either leaves or split nodes. In this case the values of the nodes # of the other type is arbitrary. For example, the arrays ``feature`` and # ``threshold`` only apply to split nodes. The values for leaf nodes in these # arrays are therefore arbitrary. # # Among these arrays, we have: # # - ``children_left[i]``: id of the left child of node ``i`` or -1 if leaf node # - ``children_right[i]``: id of the right child of node ``i`` or -1 if leaf node # - ``feature[i]``: feature used for splitting node ``i`` # - ``threshold[i]``: threshold value at node ``i`` # - ``n_node_samples[i]``: the number of training samples reaching node ``i`` # - ``impurity[i]``: the impurity at node ``i`` # - ``weighted_n_node_samples[i]``: the weighted number of training samples # reaching node ``i`` # - ``value[i, j, k]``: the summary of the training samples that reached node i for # output j and class k (for regression tree, class is set to 1). See below # for more information about ``value``. # # Using the arrays, we can traverse the tree structure to compute various # properties. Below, we will compute the depth of each node and whether or not # it is a leaf. n_nodes = clf.tree_.node_count children_left = clf.tree_.children_left children_right = clf.tree_.children_right feature = clf.tree_.feature threshold = clf.tree_.threshold values = clf.tree_.value node_depth = np.zeros(shape=n_nodes, dtype=np.int64) is_leaves = np.zeros(shape=n_nodes, dtype=bool) stack = [(0, 0)] # start with the root node id (0) and its depth (0) while len(stack) > 0: # `pop` ensures each node is only visited once node_id, depth = stack.pop() node_depth[node_id] = depth # If the left and right child of a node is not the same we have a split # node is_split_node = children_left[node_id] != children_right[node_id] # If a split node, append left and right children and depth to `stack` # so we can loop through them if is_split_node: stack.append((children_left[node_id], depth + 1)) stack.append((children_right[node_id], depth + 1)) else: is_leaves[node_id] = True print( "The binary tree structure has {n} nodes and has " "the following tree structure:\n".format(n=n_nodes) ) for i in range(n_nodes): if is_leaves[i]: print( "{space}node={node} is a leaf node with value={value}.".format( space=node_depth[i] * "\t", node=i, value=np.around(values[i], 3) ) ) else: print( "{space}node={node} is a split node with value={value}: " "go to node {left} if X[:, {feature}] <= {threshold} " "else to node {right}.".format( space=node_depth[i] * "\t", node=i, left=children_left[i], feature=feature[i], threshold=threshold[i], right=children_right[i], value=np.around(values[i], 3), ) ) # %% # What is the values array used here? # ----------------------------------- # The `tree_.value` array is a 3D array of shape # [``n_nodes``, ``n_classes``, ``n_outputs``] which provides the proportion of samples # reaching a node for each class and for each output. # Each node has a ``value`` array which is the proportion of weighted samples reaching # this node for each output and class with respect to the parent node. # # One could convert this to the absolute weighted number of samples reaching a node, # by multiplying this number by `tree_.weighted_n_node_samples[node_idx]` for the # given node. Note sample weights are not used in this example, so the weighted # number of samples is the number of samples reaching the node because each sample # has a weight of 1 by default. # # For example, in the above tree built on the iris dataset, the root node has # ``value = [0.33, 0.304, 0.366]`` indicating there are 33% of class 0 samples, # 30.4% of class 1 samples, and 36.6% of class 2 samples at the root node. One can # convert this to the absolute number of samples by multiplying by the number of # samples reaching the root node, which is `tree_.weighted_n_node_samples[0]`. # Then the root node has ``value = [37, 34, 41]``, indicating there are 37 samples # of class 0, 34 samples of class 1, and 41 samples of class 2 at the root node. # # Traversing the tree, the samples are split and as a result, the ``value`` array # reaching each node changes. The left child of the root node has ``value = [1., 0, 0]`` # (or ``value = [37, 0, 0]`` when converted to the absolute number of samples) # because all 37 samples in the left child node are from class 0. # # Note: In this example, `n_outputs=1`, but the tree classifier can also handle # multi-output problems. The `value` array at each node would just be a 2D # array instead. ############################################################################## # We can compare the above output to the plot of the decision tree. # Here, we show the proportions of samples of each class that reach each # node corresponding to the actual elements of `tree_.value` array. tree.plot_tree(clf, proportion=True) plt.show() ############################################################################## # Decision path # ------------- # # We can also retrieve the decision path of samples of interest. The # ``decision_path`` method outputs an indicator matrix that allows us to # retrieve the nodes the samples of interest traverse through. A non zero # element in the indicator matrix at position ``(i, j)`` indicates that # the sample ``i`` goes through the node ``j``. Or, for one sample ``i``, the # positions of the non zero elements in row ``i`` of the indicator matrix # designate the ids of the nodes that sample goes through. # # The leaf ids reached by samples of interest can be obtained with the # ``apply`` method. This returns an array of the node ids of the leaves # reached by each sample of interest. Using the leaf ids and the # ``decision_path`` we can obtain the splitting conditions that were used to # predict a sample or a group of samples. First, let's do it for one sample. # Note that ``node_index`` is a sparse matrix. node_indicator = clf.decision_path(X_test) leaf_id = clf.apply(X_test) sample_id = 0 # obtain ids of the nodes `sample_id` goes through, i.e., row `sample_id` node_index = node_indicator.indices[ node_indicator.indptr[sample_id] : node_indicator.indptr[sample_id + 1] ] print("Rules used to predict sample {id}:\n".format(id=sample_id)) for node_id in node_index: # continue to the next node if it is a leaf node if leaf_id[sample_id] == node_id: continue # check if value of the split feature for sample 0 is below threshold if X_test[sample_id, feature[node_id]] <= threshold[node_id]: threshold_sign = "<=" else: threshold_sign = ">" print( "decision node {node} : (X_test[{sample}, {feature}] = {value}) " "{inequality} {threshold})".format( node=node_id, sample=sample_id, feature=feature[node_id], value=X_test[sample_id, feature[node_id]], inequality=threshold_sign, threshold=threshold[node_id], ) ) ############################################################################## # For a group of samples, we can determine the common nodes the samples go # through. sample_ids = [0, 1] # boolean array indicating the nodes both samples go through common_nodes = node_indicator.toarray()[sample_ids].sum(axis=0) == len(sample_ids) # obtain node ids using position in array common_node_id = np.arange(n_nodes)[common_nodes] print( "\nThe following samples {samples} share the node(s) {nodes} in the tree.".format( samples=sample_ids, nodes=common_node_id ) ) print("This is {prop}% of all nodes.".format(prop=100 * len(common_node_id) / n_nodes))
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/frozen/plot_frozen_examples.py
examples/frozen/plot_frozen_examples.py
""" =================================== Examples of Using `FrozenEstimator` =================================== This example showcases some use cases of :class:`~sklearn.frozen.FrozenEstimator`. :class:`~sklearn.frozen.FrozenEstimator` is a utility class that allows to freeze a fitted estimator. This is useful, for instance, when we want to pass a fitted estimator to a meta-estimator, such as :class:`~sklearn.model_selection.FixedThresholdClassifier` without letting the meta-estimator refit the estimator. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Setting a decision threshold for a pre-fitted classifier # -------------------------------------------------------- # Fitted classifiers in scikit-learn use an arbitrary decision threshold to decide # which class the given sample belongs to. The decision threshold is either `0.0` on the # value returned by :term:`decision_function`, or `0.5` on the probability returned by # :term:`predict_proba`. # # However, one might want to set a custom decision threshold. We can do this by # using :class:`~sklearn.model_selection.FixedThresholdClassifier` and wrapping the # classifier with :class:`~sklearn.frozen.FrozenEstimator`. from sklearn.datasets import make_classification from sklearn.frozen import FrozenEstimator from sklearn.linear_model import LogisticRegression from sklearn.model_selection import FixedThresholdClassifier, train_test_split X, y = make_classification(n_samples=1000, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) classifier = LogisticRegression().fit(X_train, y_train) print( "Probability estimates for three data points:\n" f"{classifier.predict_proba(X_test[-3:]).round(3)}" ) print( "Predicted class for the same three data points:\n" f"{classifier.predict(X_test[-3:])}" ) # %% # Now imagine you'd want to set a different decision threshold on the probability # estimates. We can do this by wrapping the classifier with # :class:`~sklearn.frozen.FrozenEstimator` and passing it to # :class:`~sklearn.model_selection.FixedThresholdClassifier`. threshold_classifier = FixedThresholdClassifier( estimator=FrozenEstimator(classifier), threshold=0.9 ) # %% # Note that in the above piece of code, calling `fit` on # :class:`~sklearn.model_selection.FixedThresholdClassifier` does not refit the # underlying classifier. # # Now, let's see how the predictions changed with respect to the probability # threshold. print( "Probability estimates for three data points with FixedThresholdClassifier:\n" f"{threshold_classifier.predict_proba(X_test[-3:]).round(3)}" ) print( "Predicted class for the same three data points with FixedThresholdClassifier:\n" f"{threshold_classifier.predict(X_test[-3:])}" ) # %% # We see that the probability estimates stay the same, but since a different decision # threshold is used, the predicted classes are different. # # Please refer to # :ref:`sphx_glr_auto_examples_model_selection_plot_cost_sensitive_learning.py` # to learn about cost-sensitive learning and decision threshold tuning. # %% # Calibration of a pre-fitted classifier # -------------------------------------- # You can use :class:`~sklearn.frozen.FrozenEstimator` to calibrate a pre-fitted # classifier using :class:`~sklearn.calibration.CalibratedClassifierCV`. from sklearn.calibration import CalibratedClassifierCV from sklearn.metrics import brier_score_loss calibrated_classifier = CalibratedClassifierCV( estimator=FrozenEstimator(classifier) ).fit(X_train, y_train) prob_pos_clf = classifier.predict_proba(X_test)[:, 1] clf_score = brier_score_loss(y_test, prob_pos_clf) print(f"No calibration: {clf_score:.3f}") prob_pos_calibrated = calibrated_classifier.predict_proba(X_test)[:, 1] calibrated_score = brier_score_loss(y_test, prob_pos_calibrated) print(f"With calibration: {calibrated_score:.3f}")
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/kernel_approximation/plot_scalable_poly_kernels.py
examples/kernel_approximation/plot_scalable_poly_kernels.py
""" ====================================================== Scalable learning with polynomial kernel approximation ====================================================== .. currentmodule:: sklearn.kernel_approximation This example illustrates the use of :class:`PolynomialCountSketch` to efficiently generate polynomial kernel feature-space approximations. This is used to train linear classifiers that approximate the accuracy of kernelized ones. We use the Covtype dataset [2]_, trying to reproduce the experiments on the original paper of Tensor Sketch [1]_, i.e. the algorithm implemented by :class:`PolynomialCountSketch`. First, we compute the accuracy of a linear classifier on the original features. Then, we train linear classifiers on different numbers of features (`n_components`) generated by :class:`PolynomialCountSketch`, approximating the accuracy of a kernelized classifier in a scalable manner. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Preparing the data # ------------------ # # Load the Covtype dataset, which contains 581,012 samples # with 54 features each, distributed among 6 classes. The goal of this dataset # is to predict forest cover type from cartographic variables only # (no remotely sensed data). After loading, we transform it into a binary # classification problem to match the version of the dataset in the # LIBSVM webpage [2]_, which was the one used in [1]_. from sklearn.datasets import fetch_covtype X, y = fetch_covtype(return_X_y=True) y[y != 2] = 0 y[y == 2] = 1 # We will try to separate class 2 from the other 6 classes. # %% # Partitioning the data # --------------------- # # Here we select 5,000 samples for training and 10,000 for testing. # To actually reproduce the results in the original Tensor Sketch paper, # select 100,000 for training. from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split( X, y, train_size=5_000, test_size=10_000, random_state=42 ) # %% # Feature normalization # --------------------- # # Now scale features to the range [0, 1] to match the format of the dataset in # the LIBSVM webpage, and then normalize to unit length as done in the # original Tensor Sketch paper [1]_. from sklearn.pipeline import make_pipeline from sklearn.preprocessing import MinMaxScaler, Normalizer mm = make_pipeline(MinMaxScaler(), Normalizer()) X_train = mm.fit_transform(X_train) X_test = mm.transform(X_test) # %% # Establishing a baseline model # ----------------------------- # # As a baseline, train a linear SVM on the original features and print the # accuracy. We also measure and store accuracies and training times to # plot them later. import time from sklearn.svm import LinearSVC results = {} lsvm = LinearSVC() start = time.time() lsvm.fit(X_train, y_train) lsvm_time = time.time() - start lsvm_score = 100 * lsvm.score(X_test, y_test) results["LSVM"] = {"time": lsvm_time, "score": lsvm_score} print(f"Linear SVM score on raw features: {lsvm_score:.2f}%") # %% # Establishing the kernel approximation model # ------------------------------------------- # # Then we train linear SVMs on the features generated by # :class:`PolynomialCountSketch` with different values for `n_components`, # showing that these kernel feature approximations improve the accuracy # of linear classification. In typical application scenarios, `n_components` # should be larger than the number of features in the input representation # in order to achieve an improvement with respect to linear classification. # As a rule of thumb, the optimum of evaluation score / run time cost is # typically achieved at around `n_components` = 10 * `n_features`, though this # might depend on the specific dataset being handled. Note that, since the # original samples have 54 features, the explicit feature map of the # polynomial kernel of degree four would have approximately 8.5 million # features (precisely, 54^4). Thanks to :class:`PolynomialCountSketch`, we can # condense most of the discriminative information of that feature space into a # much more compact representation. While we run the experiment only a single time # (`n_runs` = 1) in this example, in practice one should repeat the experiment several # times to compensate for the stochastic nature of :class:`PolynomialCountSketch`. from sklearn.kernel_approximation import PolynomialCountSketch n_runs = 1 N_COMPONENTS = [250, 500, 1000, 2000] for n_components in N_COMPONENTS: ps_lsvm_time = 0 ps_lsvm_score = 0 for _ in range(n_runs): pipeline = make_pipeline( PolynomialCountSketch(n_components=n_components, degree=4), LinearSVC(), ) start = time.time() pipeline.fit(X_train, y_train) ps_lsvm_time += time.time() - start ps_lsvm_score += 100 * pipeline.score(X_test, y_test) ps_lsvm_time /= n_runs ps_lsvm_score /= n_runs results[f"LSVM + PS({n_components})"] = { "time": ps_lsvm_time, "score": ps_lsvm_score, } print( f"Linear SVM score on {n_components} PolynomialCountSketch " f"features: {ps_lsvm_score:.2f}%" ) # %% # Establishing the kernelized SVM model # ------------------------------------- # # Train a kernelized SVM to see how well :class:`PolynomialCountSketch` # is approximating the performance of the kernel. This, of course, may take # some time, as the SVC class has a relatively poor scalability. This is the # reason why kernel approximators are so useful: from sklearn.svm import SVC ksvm = SVC(C=500.0, kernel="poly", degree=4, coef0=0, gamma=1.0) start = time.time() ksvm.fit(X_train, y_train) ksvm_time = time.time() - start ksvm_score = 100 * ksvm.score(X_test, y_test) results["KSVM"] = {"time": ksvm_time, "score": ksvm_score} print(f"Kernel-SVM score on raw features: {ksvm_score:.2f}%") # %% # Comparing the results # --------------------- # # Finally, plot the results of the different methods against their training # times. As we can see, the kernelized SVM achieves a higher accuracy, # but its training time is much larger and, most importantly, will grow # much faster if the number of training samples increases. import matplotlib.pyplot as plt fig, ax = plt.subplots(figsize=(7, 7)) ax.scatter( [ results["LSVM"]["time"], ], [ results["LSVM"]["score"], ], label="Linear SVM", c="green", marker="^", ) ax.scatter( [ results["LSVM + PS(250)"]["time"], ], [ results["LSVM + PS(250)"]["score"], ], label="Linear SVM + PolynomialCountSketch", c="blue", ) for n_components in N_COMPONENTS: ax.scatter( [ results[f"LSVM + PS({n_components})"]["time"], ], [ results[f"LSVM + PS({n_components})"]["score"], ], c="blue", ) ax.annotate( f"n_comp.={n_components}", ( results[f"LSVM + PS({n_components})"]["time"], results[f"LSVM + PS({n_components})"]["score"], ), xytext=(-30, 10), textcoords="offset pixels", ) ax.scatter( [ results["KSVM"]["time"], ], [ results["KSVM"]["score"], ], label="Kernel SVM", c="red", marker="x", ) ax.set_xlabel("Training time (s)") ax.set_ylabel("Accuracy (%)") ax.legend() plt.show() # %% # References # ========== # # .. [1] Pham, Ninh and Rasmus Pagh. "Fast and scalable polynomial kernels via # explicit feature maps." KDD '13 (2013). # https://doi.org/10.1145/2487575.2487591 # # .. [2] LIBSVM binary datasets repository # https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary.html
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/neural_networks/plot_mlp_training_curves.py
examples/neural_networks/plot_mlp_training_curves.py
""" ======================================================== Compare Stochastic learning strategies for MLPClassifier ======================================================== This example visualizes some training loss curves for different stochastic learning strategies, including SGD and Adam. Because of time-constraints, we use several small datasets, for which L-BFGS might be more suitable. The general trend shown in these examples seems to carry over to larger datasets, however. Note that those results can be highly dependent on the value of ``learning_rate_init``. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings import matplotlib.pyplot as plt from sklearn import datasets from sklearn.exceptions import ConvergenceWarning from sklearn.neural_network import MLPClassifier from sklearn.preprocessing import MinMaxScaler # different learning rate schedules and momentum parameters params = [ { "solver": "sgd", "learning_rate": "constant", "momentum": 0, "learning_rate_init": 0.2, }, { "solver": "sgd", "learning_rate": "constant", "momentum": 0.9, "nesterovs_momentum": False, "learning_rate_init": 0.2, }, { "solver": "sgd", "learning_rate": "constant", "momentum": 0.9, "nesterovs_momentum": True, "learning_rate_init": 0.2, }, { "solver": "sgd", "learning_rate": "invscaling", "momentum": 0, "learning_rate_init": 0.2, }, { "solver": "sgd", "learning_rate": "invscaling", "momentum": 0.9, "nesterovs_momentum": False, "learning_rate_init": 0.2, }, { "solver": "sgd", "learning_rate": "invscaling", "momentum": 0.9, "nesterovs_momentum": True, "learning_rate_init": 0.2, }, {"solver": "adam", "learning_rate_init": 0.01}, ] labels = [ "constant learning-rate", "constant with momentum", "constant with Nesterov's momentum", "inv-scaling learning-rate", "inv-scaling with momentum", "inv-scaling with Nesterov's momentum", "adam", ] plot_args = [ {"c": "red", "linestyle": "-"}, {"c": "green", "linestyle": "-"}, {"c": "blue", "linestyle": "-"}, {"c": "red", "linestyle": "--"}, {"c": "green", "linestyle": "--"}, {"c": "blue", "linestyle": "--"}, {"c": "black", "linestyle": "-"}, ] def plot_on_dataset(X, y, ax, name): # for each dataset, plot learning for each learning strategy print("\nlearning on dataset %s" % name) ax.set_title(name) X = MinMaxScaler().fit_transform(X) mlps = [] if name == "digits": # digits is larger but converges fairly quickly max_iter = 15 else: max_iter = 400 for label, param in zip(labels, params): print("training: %s" % label) mlp = MLPClassifier(random_state=0, max_iter=max_iter, **param) # some parameter combinations will not converge as can be seen on the # plots so they are ignored here with warnings.catch_warnings(): warnings.filterwarnings( "ignore", category=ConvergenceWarning, module="sklearn" ) mlp.fit(X, y) mlps.append(mlp) print("Training set score: %f" % mlp.score(X, y)) print("Training set loss: %f" % mlp.loss_) for mlp, label, args in zip(mlps, labels, plot_args): ax.plot(mlp.loss_curve_, label=label, **args) fig, axes = plt.subplots(2, 2, figsize=(15, 10)) # load / generate some toy datasets iris = datasets.load_iris() X_digits, y_digits = datasets.load_digits(return_X_y=True) data_sets = [ (iris.data, iris.target), (X_digits, y_digits), datasets.make_circles(noise=0.2, factor=0.5, random_state=1), datasets.make_moons(noise=0.3, random_state=0), ] for ax, data, name in zip( axes.ravel(), data_sets, ["iris", "digits", "circles", "moons"] ): plot_on_dataset(*data, ax=ax, name=name) fig.legend(ax.get_lines(), labels, ncol=3, loc="upper center") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/neural_networks/plot_mnist_filters.py
examples/neural_networks/plot_mnist_filters.py
""" ===================================== Visualization of MLP weights on MNIST ===================================== Sometimes looking at the learned coefficients of a neural network can provide insight into the learning behavior. For example if weights look unstructured, maybe some were not used at all, or if very large coefficients exist, maybe regularization was too low or the learning rate too high. This example shows how to plot some of the first layer weights in a MLPClassifier trained on the MNIST dataset. The input data consists of 28x28 pixel handwritten digits, leading to 784 features in the dataset. Therefore the first layer weight matrix has the shape (784, hidden_layer_sizes[0]). We can therefore visualize a single column of the weight matrix as a 28x28 pixel image. To make the example run faster, we use very few hidden units, and train only for a very short time. Training longer would result in weights with a much smoother spatial appearance. The example will throw a warning because it doesn't converge, in this case this is what we want because of resource usage constraints on our Continuous Integration infrastructure that is used to build this documentation on a regular basis. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings import matplotlib.pyplot as plt from sklearn.datasets import fetch_openml from sklearn.exceptions import ConvergenceWarning from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPClassifier # Load data from https://www.openml.org/d/554 X, y = fetch_openml("mnist_784", version=1, return_X_y=True, as_frame=False) X = X / 255.0 # Split data into train partition and test partition X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, test_size=0.7) mlp = MLPClassifier( hidden_layer_sizes=(40,), max_iter=8, alpha=1e-4, solver="sgd", verbose=10, random_state=1, learning_rate_init=0.2, ) # this example won't converge because of resource usage constraints on # our Continuous Integration infrastructure, so we catch the warning and # ignore it here with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=ConvergenceWarning, module="sklearn") mlp.fit(X_train, y_train) print("Training set score: %f" % mlp.score(X_train, y_train)) print("Test set score: %f" % mlp.score(X_test, y_test)) fig, axes = plt.subplots(4, 4) # use global min / max to ensure all weights are shown on the same scale vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max() for coef, ax in zip(mlp.coefs_[0].T, axes.ravel()): ax.matshow(coef.reshape(28, 28), cmap=plt.cm.gray, vmin=0.5 * vmin, vmax=0.5 * vmax) ax.set_xticks(()) ax.set_yticks(()) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/neural_networks/plot_rbm_logistic_classification.py
examples/neural_networks/plot_rbm_logistic_classification.py
""" ============================================================== Restricted Boltzmann Machine features for digit classification ============================================================== For greyscale image data where pixel values can be interpreted as degrees of blackness on a white background, like handwritten digit recognition, the Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM <sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear feature extraction. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate data # ------------- # # In order to learn good latent representations from a small dataset, we # artificially generate more labeled data by perturbing the training data with # linear shifts of 1 pixel in each direction. import numpy as np from scipy.ndimage import convolve from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.preprocessing import minmax_scale def nudge_dataset(X, Y): """ This produces a dataset 5 times bigger than the original one, by moving the 8x8 images in X around by 1px to left, right, down, up """ direction_vectors = [ [[0, 1, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [1, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 1], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 1, 0]], ] def shift(x, w): return convolve(x.reshape((8, 8)), mode="constant", weights=w).ravel() X = np.concatenate( [X] + [np.apply_along_axis(shift, 1, X, vector) for vector in direction_vectors] ) Y = np.concatenate([Y for _ in range(5)], axis=0) return X, Y X, y = datasets.load_digits(return_X_y=True) X = np.asarray(X, "float32") X, Y = nudge_dataset(X, y) X = minmax_scale(X, feature_range=(0, 1)) # 0-1 scaling X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0) # %% # Models definition # ----------------- # # We build a classification pipeline with a BernoulliRBM feature extractor and # a :class:`LogisticRegression <sklearn.linear_model.LogisticRegression>` # classifier. from sklearn import linear_model from sklearn.neural_network import BernoulliRBM from sklearn.pipeline import Pipeline logistic = linear_model.LogisticRegression(solver="newton-cg", tol=1) rbm = BernoulliRBM(random_state=0, verbose=True) rbm_features_classifier = Pipeline(steps=[("rbm", rbm), ("logistic", logistic)]) # %% # Training # -------- # # The hyperparameters of the entire model (learning rate, hidden layer size, # regularization) were optimized by grid search, but the search is not # reproduced here because of runtime constraints. from sklearn.base import clone # Hyper-parameters. These were set by cross-validation, # using a GridSearchCV. Here we are not performing cross-validation to # save time. rbm.learning_rate = 0.06 rbm.n_iter = 10 # More components tend to give better prediction performance, but larger # fitting time rbm.n_components = 100 logistic.C = 6000 # Training RBM-Logistic Pipeline rbm_features_classifier.fit(X_train, Y_train) # Training the Logistic regression classifier directly on the pixel raw_pixel_classifier = clone(logistic) raw_pixel_classifier.C = 100.0 raw_pixel_classifier.fit(X_train, Y_train) # %% # Evaluation # ---------- from sklearn import metrics Y_pred = rbm_features_classifier.predict(X_test) print( "Logistic regression using RBM features:\n%s\n" % (metrics.classification_report(Y_test, Y_pred)) ) # %% Y_pred = raw_pixel_classifier.predict(X_test) print( "Logistic regression using raw pixel features:\n%s\n" % (metrics.classification_report(Y_test, Y_pred)) ) # %% # The features extracted by the BernoulliRBM help improve the classification # accuracy with respect to the logistic regression on raw pixels. # %% # Plotting # -------- import matplotlib.pyplot as plt plt.figure(figsize=(4.2, 4)) for i, comp in enumerate(rbm.components_): plt.subplot(10, 10, i + 1) plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r, interpolation="nearest") plt.xticks(()) plt.yticks(()) plt.suptitle("100 components extracted by RBM", fontsize=16) plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/neural_networks/plot_mlp_alpha.py
examples/neural_networks/plot_mlp_alpha.py
""" ================================================ Varying regularization in Multi-layer Perceptron ================================================ A comparison of different values for regularization parameter 'alpha' on synthetic datasets. The plot shows that different alphas yield different decision functions. Alpha is a parameter for regularization term, aka penalty term, that combats overfitting by constraining the size of the weights. Increasing alpha may fix high variance (a sign of overfitting) by encouraging smaller weights, resulting in a decision boundary plot that appears with lesser curvatures. Similarly, decreasing alpha may fix high bias (a sign of underfitting) by encouraging larger weights, potentially resulting in a more complicated decision boundary. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap from sklearn.datasets import make_circles, make_classification, make_moons from sklearn.model_selection import train_test_split from sklearn.neural_network import MLPClassifier from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler h = 0.02 # step size in the mesh alphas = np.logspace(-1, 1, 5) classifiers = [] names = [] for alpha in alphas: classifiers.append( make_pipeline( StandardScaler(), MLPClassifier( solver="lbfgs", alpha=alpha, random_state=1, max_iter=2000, early_stopping=True, hidden_layer_sizes=[10, 10], ), ) ) names.append(f"alpha {alpha:.2f}") X, y = make_classification( n_features=2, n_redundant=0, n_informative=2, random_state=0, n_clusters_per_class=1 ) rng = np.random.RandomState(2) X += 2 * rng.uniform(size=X.shape) linearly_separable = (X, y) datasets = [ make_moons(noise=0.3, random_state=0), make_circles(noise=0.2, factor=0.5, random_state=1), linearly_separable, ] figure = plt.figure(figsize=(17, 9)) i = 1 # iterate over datasets for X, y in datasets: # split into training and test part X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.4, random_state=42 ) x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5 y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # just plot the dataset first cm = plt.cm.RdBu cm_bright = ListedColormap(["#FF0000", "#0000FF"]) ax = plt.subplot(len(datasets), len(classifiers) + 1, i) # Plot the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright) # and testing points ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) i += 1 # iterate over classifiers for name, clf in zip(names, classifiers): ax = plt.subplot(len(datasets), len(classifiers) + 1, i) clf.fit(X_train, y_train) score = clf.score(X_test, y_test) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, x_max] x [y_min, y_max]. if hasattr(clf, "decision_function"): Z = clf.decision_function(np.column_stack([xx.ravel(), yy.ravel()])) else: Z = clf.predict_proba(np.column_stack([xx.ravel(), yy.ravel()]))[:, 1] # Put the result into a color plot Z = Z.reshape(xx.shape) ax.contourf(xx, yy, Z, cmap=cm, alpha=0.8) # Plot also the training points ax.scatter( X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors="black", s=25, ) # and testing points ax.scatter( X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6, edgecolors="black", s=25, ) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) ax.set_title(name) ax.text( xx.max() - 0.3, yy.min() + 0.3, f"{score:.3f}".lstrip("0"), size=15, horizontalalignment="right", ) i += 1 figure.subplots_adjust(left=0.02, right=0.98) plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/calibration/plot_calibration_multiclass.py
examples/calibration/plot_calibration_multiclass.py
""" ================================================== Probability Calibration for 3-class classification ================================================== This example illustrates how sigmoid :ref:`calibration <calibration>` changes predicted probabilities for a 3-class classification problem. Illustrated is the standard 2-simplex, where the three corners correspond to the three classes. Arrows point from the probability vectors predicted by an uncalibrated classifier to the probability vectors predicted by the same classifier after sigmoid calibration on a hold-out validation set. Colors indicate the true class of an instance (red: class 1, green: class 2, blue: class 3). """ # %% # Data # ---- # Below, we generate a classification dataset with 2000 samples, 2 features # and 3 target classes. We then split the data as follows: # # * train: 600 samples (for training the classifier) # * valid: 400 samples (for calibrating predicted probabilities) # * test: 1000 samples # # Note that we also create `X_train_valid` and `y_train_valid`, which consists # of both the train and valid subsets. This is used when we only want to train # the classifier but not calibrate the predicted probabilities. # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import numpy as np from sklearn.datasets import make_blobs np.random.seed(0) X, y = make_blobs( n_samples=2000, n_features=2, centers=3, random_state=42, cluster_std=5.0 ) X_train, y_train = X[:600], y[:600] X_valid, y_valid = X[600:1000], y[600:1000] X_train_valid, y_train_valid = X[:1000], y[:1000] X_test, y_test = X[1000:], y[1000:] # %% # Fitting and calibration # ----------------------- # # First, we will train a :class:`~sklearn.ensemble.RandomForestClassifier` # with 25 base estimators (trees) on the concatenated train and validation # data (1000 samples). This is the uncalibrated classifier. from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(n_estimators=25) clf.fit(X_train_valid, y_train_valid) # %% # To train the calibrated classifier, we start with the same # :class:`~sklearn.ensemble.RandomForestClassifier` but train it using only # the train data subset (600 samples) then calibrate, with `method='sigmoid'`, # using the valid data subset (400 samples) in a 2-stage process. from sklearn.calibration import CalibratedClassifierCV from sklearn.frozen import FrozenEstimator clf = RandomForestClassifier(n_estimators=25) clf.fit(X_train, y_train) cal_clf = CalibratedClassifierCV(FrozenEstimator(clf), method="sigmoid") cal_clf.fit(X_valid, y_valid) # %% # Compare probabilities # --------------------- # Below we plot a 2-simplex with arrows showing the change in predicted # probabilities of the test samples. import matplotlib.pyplot as plt plt.figure(figsize=(10, 10)) colors = ["r", "g", "b"] clf_probs = clf.predict_proba(X_test) cal_clf_probs = cal_clf.predict_proba(X_test) # Plot arrows for i in range(clf_probs.shape[0]): plt.arrow( clf_probs[i, 0], clf_probs[i, 1], cal_clf_probs[i, 0] - clf_probs[i, 0], cal_clf_probs[i, 1] - clf_probs[i, 1], color=colors[y_test[i]], head_width=1e-2, ) # Plot perfect predictions, at each vertex plt.plot([1.0], [0.0], "ro", ms=20, label="Class 1") plt.plot([0.0], [1.0], "go", ms=20, label="Class 2") plt.plot([0.0], [0.0], "bo", ms=20, label="Class 3") # Plot boundaries of unit simplex plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], "k", label="Simplex") # Annotate points 6 points around the simplex, and mid point inside simplex plt.annotate( r"($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)", xy=(1.0 / 3, 1.0 / 3), xytext=(1.0 / 3, 0.23), xycoords="data", arrowprops=dict(facecolor="black", shrink=0.05), horizontalalignment="center", verticalalignment="center", ) plt.plot([1.0 / 3], [1.0 / 3], "ko", ms=5) plt.annotate( r"($\frac{1}{2}$, $0$, $\frac{1}{2}$)", xy=(0.5, 0.0), xytext=(0.5, 0.1), xycoords="data", arrowprops=dict(facecolor="black", shrink=0.05), horizontalalignment="center", verticalalignment="center", ) plt.annotate( r"($0$, $\frac{1}{2}$, $\frac{1}{2}$)", xy=(0.0, 0.5), xytext=(0.1, 0.5), xycoords="data", arrowprops=dict(facecolor="black", shrink=0.05), horizontalalignment="center", verticalalignment="center", ) plt.annotate( r"($\frac{1}{2}$, $\frac{1}{2}$, $0$)", xy=(0.5, 0.5), xytext=(0.6, 0.6), xycoords="data", arrowprops=dict(facecolor="black", shrink=0.05), horizontalalignment="center", verticalalignment="center", ) plt.annotate( r"($0$, $0$, $1$)", xy=(0, 0), xytext=(0.1, 0.1), xycoords="data", arrowprops=dict(facecolor="black", shrink=0.05), horizontalalignment="center", verticalalignment="center", ) plt.annotate( r"($1$, $0$, $0$)", xy=(1, 0), xytext=(1, 0.1), xycoords="data", arrowprops=dict(facecolor="black", shrink=0.05), horizontalalignment="center", verticalalignment="center", ) plt.annotate( r"($0$, $1$, $0$)", xy=(0, 1), xytext=(0.1, 1), xycoords="data", arrowprops=dict(facecolor="black", shrink=0.05), horizontalalignment="center", verticalalignment="center", ) # Add grid plt.grid(False) for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: plt.plot([0, x], [x, 0], "k", alpha=0.2) plt.plot([0, 0 + (1 - x) / 2], [x, x + (1 - x) / 2], "k", alpha=0.2) plt.plot([x, x + (1 - x) / 2], [0, 0 + (1 - x) / 2], "k", alpha=0.2) plt.title("Change of predicted probabilities on test samples after sigmoid calibration") plt.xlabel("Probability class 1") plt.ylabel("Probability class 2") plt.xlim(-0.05, 1.05) plt.ylim(-0.05, 1.05) _ = plt.legend(loc="best") # %% # In the figure above, each vertex of the simplex represents # a perfectly predicted class (e.g., 1, 0, 0). The mid point # inside the simplex represents predicting the three classes with equal # probability (i.e., 1/3, 1/3, 1/3). Each arrow starts at the # uncalibrated probabilities and end with the arrow head at the calibrated # probability. The color of the arrow represents the true class of that test # sample. # # The uncalibrated classifier is overly confident in its predictions and # incurs a large :ref:`log loss <log_loss>`. The calibrated classifier incurs # a lower :ref:`log loss <log_loss>` due to two factors. First, notice in the # figure above that the arrows generally point away from the edges of the # simplex, where the probability of one class is 0. Second, a large proportion # of the arrows point towards the true class, e.g., green arrows (samples where # the true class is 'green') generally point towards the green vertex. This # results in fewer over-confident, 0 predicted probabilities and at the same # time an increase in the predicted probabilities of the correct class. # Thus, the calibrated classifier produces more accurate predicted probabilities # that incur a lower :ref:`log loss <log_loss>` # # We can show this objectively by comparing the :ref:`log loss <log_loss>` of # the uncalibrated and calibrated classifiers on the predictions of the 1000 # test samples. Note that an alternative would have been to increase the number # of base estimators (trees) of the # :class:`~sklearn.ensemble.RandomForestClassifier` which would have resulted # in a similar decrease in :ref:`log loss <log_loss>`. from sklearn.metrics import log_loss loss = log_loss(y_test, clf_probs) cal_loss = log_loss(y_test, cal_clf_probs) print("Log-loss of:") print(f" - uncalibrated classifier: {loss:.3f}") print(f" - calibrated classifier: {cal_loss:.3f}") # %% # We can also assess calibration with the Brier score for probabilistics predictions # (lower is better, possible range is [0, 2]): from sklearn.metrics import brier_score_loss loss = brier_score_loss(y_test, clf_probs) cal_loss = brier_score_loss(y_test, cal_clf_probs) print("Brier score of") print(f" - uncalibrated classifier: {loss:.3f}") print(f" - calibrated classifier: {cal_loss:.3f}") # %% # According to the Brier score, the calibrated classifier is not better than # the original model. # # Finally we generate a grid of possible uncalibrated probabilities over # the 2-simplex, compute the corresponding calibrated probabilities and # plot arrows for each. The arrows are colored according the highest # uncalibrated probability. This illustrates the learned calibration map: plt.figure(figsize=(10, 10)) # Generate grid of probability values p1d = np.linspace(0, 1, 20) p0, p1 = np.meshgrid(p1d, p1d) p2 = 1 - p0 - p1 p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()] p = p[p[:, 2] >= 0] # Use the three class-wise calibrators to compute calibrated probabilities calibrated_classifier = cal_clf.calibrated_classifiers_[0] prediction = np.vstack( [ calibrator.predict(this_p) for calibrator, this_p in zip(calibrated_classifier.calibrators, p.T) ] ).T # Re-normalize the calibrated predictions to make sure they stay inside the # simplex. This same renormalization step is performed internally by the # predict method of CalibratedClassifierCV on multiclass problems. prediction /= prediction.sum(axis=1)[:, None] # Plot changes in predicted probabilities induced by the calibrators for i in range(prediction.shape[0]): plt.arrow( p[i, 0], p[i, 1], prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1], head_width=1e-2, color=colors[np.argmax(p[i])], ) # Plot the boundaries of the unit simplex plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], "k", label="Simplex") plt.grid(False) for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: plt.plot([0, x], [x, 0], "k", alpha=0.2) plt.plot([0, 0 + (1 - x) / 2], [x, x + (1 - x) / 2], "k", alpha=0.2) plt.plot([x, x + (1 - x) / 2], [0, 0 + (1 - x) / 2], "k", alpha=0.2) plt.title("Learned sigmoid calibration map") plt.xlabel("Probability class 1") plt.ylabel("Probability class 2") plt.xlim(-0.05, 1.05) plt.ylim(-0.05, 1.05) plt.show() # %% # One can observe that, on average, the calibrator is pushing highly confident # predictions away from the boundaries of the simplex while simultaneously # moving uncertain predictions towards one of three modes, one for each class. # We can also observe that the mapping is not symmetric. Furthermore some # arrows seem to cross class assignment boundaries which is not necessarily # what one would expect from a calibration map as it means that some predicted # classes will change after calibration. # # All in all, the One-vs-Rest multiclass-calibration strategy implemented in # `CalibratedClassifierCV` should not be trusted blindly.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/calibration/plot_compare_calibration.py
examples/calibration/plot_compare_calibration.py
""" ======================================== Comparison of Calibration of Classifiers ======================================== Well calibrated classifiers are probabilistic classifiers for which the output of :term:`predict_proba` can be directly interpreted as a confidence level. For instance, a well calibrated (binary) classifier should classify the samples such that for the samples to which it gave a :term:`predict_proba` value close to 0.8, approximately 80% actually belong to the positive class. In this example we will compare the calibration of four different models: :ref:`Logistic_regression`, :ref:`gaussian_naive_bayes`, :ref:`Random Forest Classifier <forest>` and :ref:`Linear SVM <svm_classification>`. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Dataset # ------- # # We will use a synthetic binary classification dataset with 100,000 samples # and 20 features. Of the 20 features, only 2 are informative, 2 are # redundant (random combinations of the informative features) and the # remaining 16 are uninformative (random numbers). # # Of the 100,000 samples, 100 will be used for model fitting and the remaining # for testing. Note that this split is quite unusual: the goal is to obtain # stable calibration curve estimates for models that are potentially prone to # overfitting. In practice, one should rather use cross-validation with more # balanced splits but this would make the code of this example more complicated # to follow. from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split X, y = make_classification( n_samples=100_000, n_features=20, n_informative=2, n_redundant=2, random_state=42 ) train_samples = 100 # Samples used for training the models X_train, X_test, y_train, y_test = train_test_split( X, y, shuffle=False, test_size=100_000 - train_samples, ) # %% # Calibration curves # ------------------ # # Below, we train each of the four models with the small training dataset, then # plot calibration curves (also known as reliability diagrams) using # predicted probabilities of the test dataset. Calibration curves are created # by binning predicted probabilities, then plotting the mean predicted # probability in each bin against the observed frequency ('fraction of # positives'). Below the calibration curve, we plot a histogram showing # the distribution of the predicted probabilities or more specifically, # the number of samples in each predicted probability bin. import numpy as np from sklearn.svm import LinearSVC class NaivelyCalibratedLinearSVC(LinearSVC): """LinearSVC with `predict_proba` method that naively scales `decision_function` output.""" def fit(self, X, y): super().fit(X, y) df = self.decision_function(X) self.df_min_ = df.min() self.df_max_ = df.max() def predict_proba(self, X): """Min-max scale output of `decision_function` to [0,1].""" df = self.decision_function(X) calibrated_df = (df - self.df_min_) / (self.df_max_ - self.df_min_) proba_pos_class = np.clip(calibrated_df, 0, 1) proba_neg_class = 1 - proba_pos_class proba = np.c_[proba_neg_class, proba_pos_class] return proba # %% from sklearn.calibration import CalibrationDisplay from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegressionCV from sklearn.naive_bayes import GaussianNB # Define the classifiers to be compared in the study. # # Note that we use a variant of the logistic regression model that can # automatically tune its regularization parameter. # # For a fair comparison, we should run a hyper-parameter search for all the # classifiers but we don't do it here for the sake of keeping the example code # concise and fast to execute. lr = LogisticRegressionCV( Cs=np.logspace(-6, 6, 101), cv=10, l1_ratios=(0,), scoring="neg_log_loss", max_iter=1_000, use_legacy_attributes=False, ) gnb = GaussianNB() svc = NaivelyCalibratedLinearSVC(C=1.0) rfc = RandomForestClassifier(random_state=42) clf_list = [ (lr, "Logistic Regression"), (gnb, "Naive Bayes"), (svc, "SVC"), (rfc, "Random forest"), ] # %% import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec fig = plt.figure(figsize=(10, 10)) gs = GridSpec(4, 2) colors = plt.get_cmap("Dark2") ax_calibration_curve = fig.add_subplot(gs[:2, :2]) calibration_displays = {} markers = ["^", "v", "s", "o"] for i, (clf, name) in enumerate(clf_list): clf.fit(X_train, y_train) display = CalibrationDisplay.from_estimator( clf, X_test, y_test, n_bins=10, name=name, ax=ax_calibration_curve, color=colors(i), marker=markers[i], ) calibration_displays[name] = display ax_calibration_curve.grid() ax_calibration_curve.set_title("Calibration plots") # Add histogram grid_positions = [(2, 0), (2, 1), (3, 0), (3, 1)] for i, (_, name) in enumerate(clf_list): row, col = grid_positions[i] ax = fig.add_subplot(gs[row, col]) ax.hist( calibration_displays[name].y_prob, range=(0, 1), bins=10, label=name, color=colors(i), ) ax.set(title=name, xlabel="Mean predicted probability", ylabel="Count") plt.tight_layout() plt.show() # %% # # Analysis of the results # ----------------------- # # :class:`~sklearn.linear_model.LogisticRegressionCV` returns reasonably well # calibrated predictions despite the small training set size: its reliability # curve is the closest to the diagonal among the four models. # # Logistic regression is trained by minimizing the log-loss which is a strictly # proper scoring rule: in the limit of infinite training data, strictly proper # scoring rules are minimized by the model that predicts the true conditional # probabilities. That (hypothetical) model would therefore be perfectly # calibrated. However, using a proper scoring rule as training objective is not # sufficient to guarantee a well-calibrated model by itself: even with a very # large training set, logistic regression could still be poorly calibrated, if # it was too strongly regularized or if the choice and preprocessing of input # features made this model mis-specified (e.g. if the true decision boundary of # the dataset is a highly non-linear function of the input features). # # In this example the training set was intentionally kept very small. In this # setting, optimizing the log-loss can still lead to poorly calibrated models # because of overfitting. To mitigate this, the # :class:`~sklearn.linear_model.LogisticRegressionCV` class was configured to # tune the `C` regularization parameter to also minimize the log-loss via inner # cross-validation so as to find the best compromise for this model in the # small training set setting. # # Because of the finite training set size and the lack of guarantee for # well-specification, we observe that the calibration curve of the logistic # regression model is close but not perfectly on the diagonal. The shape of the # calibration curve of this model can be interpreted as slightly # under-confident: the predicted probabilities are a bit too close to 0.5 # compared to the true fraction of positive samples. # # The other methods all output less well calibrated probabilities: # # * :class:`~sklearn.naive_bayes.GaussianNB` tends to push probabilities to 0 # or 1 (see histogram) on this particular dataset (over-confidence). This is # mainly because the naive Bayes equation only provides correct estimate of # probabilities when the assumption that features are conditionally # independent holds [2]_. However, features can be correlated and this is the case # with this dataset, which contains 2 features generated as random linear # combinations of the informative features. These correlated features are # effectively being 'counted twice', resulting in pushing the predicted # probabilities towards 0 and 1 [3]_. Note, however, that changing the seed # used to generate the dataset can lead to widely varying results for the # naive Bayes estimator. # # * :class:`~sklearn.svm.LinearSVC` is not a natural probabilistic classifier. # In order to interpret its prediction as such, we naively scaled the output # of the :term:`decision_function` into [0, 1] by applying min-max scaling in # the `NaivelyCalibratedLinearSVC` wrapper class defined above. This # estimator shows a typical sigmoid-shaped calibration curve on this data: # predictions larger than 0.5 correspond to samples with an even larger # effective positive class fraction (above the diagonal), while predictions # below 0.5 corresponds to even lower positive class fractions (below the # diagonal). This under-confident predictions are typical for maximum-margin # methods [1]_. # # * :class:`~sklearn.ensemble.RandomForestClassifier`'s prediction histogram # shows peaks at approx. 0.2 and 0.9 probability, while probabilities close to # 0 or 1 are very rare. An explanation for this is given by [1]_: # "Methods such as bagging and random forests that average # predictions from a base set of models can have difficulty making # predictions near 0 and 1 because variance in the underlying base models # will bias predictions that should be near zero or one away from these # values. Because predictions are restricted to the interval [0, 1], errors # caused by variance tend to be one-sided near zero and one. For example, if # a model should predict p = 0 for a case, the only way bagging can achieve # this is if all bagged trees predict zero. If we add noise to the trees that # bagging is averaging over, this noise will cause some trees to predict # values larger than 0 for this case, thus moving the average prediction of # the bagged ensemble away from 0. We observe this effect most strongly with # random forests because the base-level trees trained with random forests # have relatively high variance due to feature subsetting." This effect can # make random forests under-confident. Despite this possible bias, note that # the trees themselves are fit by minimizing either the Gini or Entropy # criterion, both of which lead to splits that minimize proper scoring rules: # the Brier score or the log-loss respectively. See :ref:`the user guide # <tree_mathematical_formulation>` for more details. This can explain why # this model shows a good enough calibration curve on this particular example # dataset. Indeed the Random Forest model is not significantly more # under-confident than the Logistic Regression model. # # Feel free to re-run this example with different random seeds and other # dataset generation parameters to see how different the calibration plots can # look. In general, Logistic Regression and Random Forest will tend to be the # best calibrated classifiers, while SVC will often display the typical # under-confident miscalibration. The naive Bayes model is also often poorly # calibrated but the general shape of its calibration curve can vary widely # depending on the dataset. # # Finally, note that for some dataset seeds, all models are poorly calibrated, # even when tuning the regularization parameter as above. This is bound to # happen when the training size is too small or when the model is severely # misspecified. # # References # ---------- # # .. [1] `Predicting Good Probabilities with Supervised Learning # <https://dl.acm.org/doi/pdf/10.1145/1102351.1102430>`_, A. # Niculescu-Mizil & R. Caruana, ICML 2005 # # .. [2] `Beyond independence: Conditions for the optimality of the simple # Bayesian classifier # <https://www.ics.uci.edu/~pazzani/Publications/mlc96-pedro.pdf>`_ # Domingos, P., & Pazzani, M., Proc. 13th Intl. Conf. Machine Learning. # 1996. # # .. [3] `Obtaining calibrated probability estimates from decision trees and # naive Bayesian classifiers # <https://cseweb.ucsd.edu/~elkan/calibrated.pdf>`_ # Zadrozny, Bianca, and Charles Elkan. Icml. Vol. 1. 2001.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/calibration/plot_calibration_curve.py
examples/calibration/plot_calibration_curve.py
""" ============================== Probability Calibration curves ============================== When performing classification one often wants to predict not only the class label, but also the associated probability. This probability gives some kind of confidence on the prediction. This example demonstrates how to visualize how well calibrated the predicted probabilities are using calibration curves, also known as reliability diagrams. Calibration of an uncalibrated classifier will also be demonstrated. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Dataset # ------- # # We will use a synthetic binary classification dataset with 100,000 samples # and 20 features. Of the 20 features, only 2 are informative, 10 are # redundant (random combinations of the informative features) and the # remaining 8 are uninformative (random numbers). Of the 100,000 samples, 1,000 # will be used for model fitting and the rest for testing. from sklearn.datasets import make_classification from sklearn.model_selection import train_test_split X, y = make_classification( n_samples=100_000, n_features=20, n_informative=2, n_redundant=10, random_state=42 ) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.99, random_state=42 ) # %% # Calibration curves # ------------------ # # Gaussian Naive Bayes # ^^^^^^^^^^^^^^^^^^^^ # # First, we will compare: # # * :class:`~sklearn.linear_model.LogisticRegression` (used as baseline # since very often, properly regularized logistic regression is well # calibrated by default thanks to the use of the log-loss) # * Uncalibrated :class:`~sklearn.naive_bayes.GaussianNB` # * :class:`~sklearn.naive_bayes.GaussianNB` with isotonic and sigmoid # calibration (see :ref:`User Guide <calibration>`) # # Calibration curves for all 4 conditions are plotted below, with the average # predicted probability for each bin on the x-axis and the fraction of positive # classes in each bin on the y-axis. import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec from sklearn.calibration import CalibratedClassifierCV, CalibrationDisplay from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB lr = LogisticRegression(C=1.0) gnb = GaussianNB() gnb_isotonic = CalibratedClassifierCV(gnb, cv=2, method="isotonic") gnb_sigmoid = CalibratedClassifierCV(gnb, cv=2, method="sigmoid") clf_list = [ (lr, "Logistic"), (gnb, "Naive Bayes"), (gnb_isotonic, "Naive Bayes + Isotonic"), (gnb_sigmoid, "Naive Bayes + Sigmoid"), ] # %% fig = plt.figure(figsize=(10, 10)) gs = GridSpec(4, 2) colors = plt.get_cmap("Dark2") ax_calibration_curve = fig.add_subplot(gs[:2, :2]) calibration_displays = {} for i, (clf, name) in enumerate(clf_list): clf.fit(X_train, y_train) display = CalibrationDisplay.from_estimator( clf, X_test, y_test, n_bins=10, name=name, ax=ax_calibration_curve, color=colors(i), ) calibration_displays[name] = display ax_calibration_curve.grid() ax_calibration_curve.set_title("Calibration plots (Naive Bayes)") # Add histogram grid_positions = [(2, 0), (2, 1), (3, 0), (3, 1)] for i, (_, name) in enumerate(clf_list): row, col = grid_positions[i] ax = fig.add_subplot(gs[row, col]) ax.hist( calibration_displays[name].y_prob, range=(0, 1), bins=10, label=name, color=colors(i), ) ax.set(title=name, xlabel="Mean predicted probability", ylabel="Count") plt.tight_layout() plt.show() # %% # Uncalibrated :class:`~sklearn.naive_bayes.GaussianNB` is poorly calibrated # because of # the redundant features which violate the assumption of feature-independence # and result in an overly confident classifier, which is indicated by the # typical transposed-sigmoid curve. Calibration of the probabilities of # :class:`~sklearn.naive_bayes.GaussianNB` with :ref:`isotonic` can fix # this issue as can be seen from the nearly diagonal calibration curve. # :ref:`Sigmoid regression <sigmoid_regressor>` also improves calibration # slightly, # albeit not as strongly as the non-parametric isotonic regression. This can be # attributed to the fact that we have plenty of calibration data such that the # greater flexibility of the non-parametric model can be exploited. # # Below we will make a quantitative analysis considering several classification # metrics: :ref:`brier_score_loss`, :ref:`log_loss`, # :ref:`precision, recall, F1 score <precision_recall_f_measure_metrics>` and # :ref:`ROC AUC <roc_metrics>`. from collections import defaultdict import pandas as pd from sklearn.metrics import ( brier_score_loss, f1_score, log_loss, precision_score, recall_score, roc_auc_score, ) scores = defaultdict(list) for i, (clf, name) in enumerate(clf_list): clf.fit(X_train, y_train) y_prob = clf.predict_proba(X_test) y_pred = clf.predict(X_test) scores["Classifier"].append(name) for metric in [brier_score_loss, log_loss, roc_auc_score]: score_name = metric.__name__.replace("_", " ").replace("score", "").capitalize() scores[score_name].append(metric(y_test, y_prob[:, 1])) for metric in [precision_score, recall_score, f1_score]: score_name = metric.__name__.replace("_", " ").replace("score", "").capitalize() scores[score_name].append(metric(y_test, y_pred)) score_df = pd.DataFrame(scores).set_index("Classifier") score_df.round(decimals=3) score_df # %% # Notice that although calibration improves the :ref:`brier_score_loss` (a # metric composed # of calibration term and refinement term) and :ref:`log_loss`, it does not # significantly alter the prediction accuracy measures (precision, recall and # F1 score). # This is because calibration should not significantly change prediction # probabilities at the location of the decision threshold (at x = 0.5 on the # graph). Calibration should however, make the predicted probabilities more # accurate and thus more useful for making allocation decisions under # uncertainty. # Further, ROC AUC, should not change at all because calibration is a # monotonic transformation. Indeed, no rank metrics are affected by # calibration. # # Linear support vector classifier # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # Next, we will compare: # # * :class:`~sklearn.linear_model.LogisticRegression` (baseline) # * Uncalibrated :class:`~sklearn.svm.LinearSVC`. Since SVC does not output # probabilities by default, we naively scale the output of the # :term:`decision_function` into [0, 1] by applying min-max scaling. # * :class:`~sklearn.svm.LinearSVC` with isotonic and sigmoid # calibration (see :ref:`User Guide <calibration>`) import numpy as np from sklearn.svm import LinearSVC class NaivelyCalibratedLinearSVC(LinearSVC): """LinearSVC with `predict_proba` method that naively scales `decision_function` output for binary classification.""" def fit(self, X, y): super().fit(X, y) df = self.decision_function(X) self.df_min_ = df.min() self.df_max_ = df.max() def predict_proba(self, X): """Min-max scale output of `decision_function` to [0, 1].""" df = self.decision_function(X) calibrated_df = (df - self.df_min_) / (self.df_max_ - self.df_min_) proba_pos_class = np.clip(calibrated_df, 0, 1) proba_neg_class = 1 - proba_pos_class proba = np.c_[proba_neg_class, proba_pos_class] return proba # %% lr = LogisticRegression(C=1.0) svc = NaivelyCalibratedLinearSVC(max_iter=10_000) svc_isotonic = CalibratedClassifierCV(svc, cv=2, method="isotonic") svc_sigmoid = CalibratedClassifierCV(svc, cv=2, method="sigmoid") clf_list = [ (lr, "Logistic"), (svc, "SVC"), (svc_isotonic, "SVC + Isotonic"), (svc_sigmoid, "SVC + Sigmoid"), ] # %% fig = plt.figure(figsize=(10, 10)) gs = GridSpec(4, 2) ax_calibration_curve = fig.add_subplot(gs[:2, :2]) calibration_displays = {} for i, (clf, name) in enumerate(clf_list): clf.fit(X_train, y_train) display = CalibrationDisplay.from_estimator( clf, X_test, y_test, n_bins=10, name=name, ax=ax_calibration_curve, color=colors(i), ) calibration_displays[name] = display ax_calibration_curve.grid() ax_calibration_curve.set_title("Calibration plots (SVC)") # Add histogram grid_positions = [(2, 0), (2, 1), (3, 0), (3, 1)] for i, (_, name) in enumerate(clf_list): row, col = grid_positions[i] ax = fig.add_subplot(gs[row, col]) ax.hist( calibration_displays[name].y_prob, range=(0, 1), bins=10, label=name, color=colors(i), ) ax.set(title=name, xlabel="Mean predicted probability", ylabel="Count") plt.tight_layout() plt.show() # %% # :class:`~sklearn.svm.LinearSVC` shows the opposite # behavior to :class:`~sklearn.naive_bayes.GaussianNB`; the calibration # curve has a sigmoid shape, which is typical for an under-confident # classifier. In the case of :class:`~sklearn.svm.LinearSVC`, this is caused # by the margin property of the hinge loss, which focuses on samples that are # close to the decision boundary (support vectors). Samples that are far # away from the decision boundary do not impact the hinge loss. It thus makes # sense that :class:`~sklearn.svm.LinearSVC` does not try to separate samples # in the high confidence region regions. This leads to flatter calibration # curves near 0 and 1 and is empirically shown with a variety of datasets # in Niculescu-Mizil & Caruana [1]_. # # Both kinds of calibration (sigmoid and isotonic) can fix this issue and # yield similar results. # # As before, we show the :ref:`brier_score_loss`, :ref:`log_loss`, # :ref:`precision, recall, F1 score <precision_recall_f_measure_metrics>` and # :ref:`ROC AUC <roc_metrics>`. scores = defaultdict(list) for i, (clf, name) in enumerate(clf_list): clf.fit(X_train, y_train) y_prob = clf.predict_proba(X_test) y_pred = clf.predict(X_test) scores["Classifier"].append(name) for metric in [brier_score_loss, log_loss, roc_auc_score]: score_name = metric.__name__.replace("_", " ").replace("score", "").capitalize() scores[score_name].append(metric(y_test, y_prob[:, 1])) for metric in [precision_score, recall_score, f1_score]: score_name = metric.__name__.replace("_", " ").replace("score", "").capitalize() scores[score_name].append(metric(y_test, y_pred)) score_df = pd.DataFrame(scores).set_index("Classifier") score_df.round(decimals=3) score_df # %% # As with :class:`~sklearn.naive_bayes.GaussianNB` above, calibration improves # both :ref:`brier_score_loss` and :ref:`log_loss` but does not alter the # prediction accuracy measures (precision, recall and F1 score) much. # # Summary # ------- # # Parametric sigmoid calibration can deal with situations where the calibration # curve of the base classifier is sigmoid (e.g., for # :class:`~sklearn.svm.LinearSVC`) but not where it is transposed-sigmoid # (e.g., :class:`~sklearn.naive_bayes.GaussianNB`). Non-parametric # isotonic calibration can deal with both situations but may require more # data to produce good results. # # References # ---------- # # .. [1] `Predicting Good Probabilities with Supervised Learning # <https://dl.acm.org/doi/pdf/10.1145/1102351.1102430>`_, # A. Niculescu-Mizil & R. Caruana, ICML 2005
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/calibration/plot_calibration.py
examples/calibration/plot_calibration.py
""" ====================================== Probability calibration of classifiers ====================================== When performing classification you often want to predict not only the class label, but also the associated probability. This probability gives you some kind of confidence on the prediction. However, not all classifiers provide well-calibrated probabilities, some being over-confident while others being under-confident. Thus, a separate calibration of predicted probabilities is often desirable as a postprocessing. This example illustrates two different methods for this calibration and evaluates the quality of the returned probabilities using Brier's score (see https://en.wikipedia.org/wiki/Brier_score). Compared are the estimated probability using a Gaussian naive Bayes classifier without calibration, with a sigmoid calibration, and with a non-parametric isotonic calibration. One can observe that only the non-parametric model is able to provide a probability calibration that returns probabilities close to the expected 0.5 for most of the samples belonging to the middle cluster with heterogeneous labels. This results in a significantly improved Brier score. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Generate synthetic dataset # -------------------------- import numpy as np from sklearn.datasets import make_blobs from sklearn.model_selection import train_test_split n_samples = 50000 # Generate 3 blobs with 2 classes where the second blob contains # half positive samples and half negative samples. Probability in this # blob is therefore 0.5. centers = [(-5, -5), (0, 0), (5, 5)] X, y = make_blobs(n_samples=n_samples, centers=centers, shuffle=False, random_state=42) y[: n_samples // 2] = 0 y[n_samples // 2 :] = 1 sample_weight = np.random.RandomState(42).rand(y.shape[0]) # split train, test for calibration X_train, X_test, y_train, y_test, sw_train, sw_test = train_test_split( X, y, sample_weight, test_size=0.9, random_state=42 ) # %% # Gaussian Naive-Bayes # -------------------- from sklearn.calibration import CalibratedClassifierCV from sklearn.metrics import brier_score_loss from sklearn.naive_bayes import GaussianNB # With no calibration clf = GaussianNB() clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights prob_pos_clf = clf.predict_proba(X_test)[:, 1] # With isotonic calibration clf_isotonic = CalibratedClassifierCV(clf, cv=2, method="isotonic") clf_isotonic.fit(X_train, y_train, sample_weight=sw_train) prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1] # With sigmoid calibration clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method="sigmoid") clf_sigmoid.fit(X_train, y_train, sample_weight=sw_train) prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1] print("Brier score losses: (the smaller the better)") clf_score = brier_score_loss(y_test, prob_pos_clf, sample_weight=sw_test) print("No calibration: %1.3f" % clf_score) clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sample_weight=sw_test) print("With isotonic calibration: %1.3f" % clf_isotonic_score) clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sample_weight=sw_test) print("With sigmoid calibration: %1.3f" % clf_sigmoid_score) # %% # Plot data and the predicted probabilities # ----------------------------------------- import matplotlib.pyplot as plt from matplotlib import cm plt.figure() y_unique = np.unique(y) colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size)) for this_y, color in zip(y_unique, colors): this_X = X_train[y_train == this_y] this_sw = sw_train[y_train == this_y] plt.scatter( this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color[np.newaxis, :], alpha=0.5, edgecolor="k", label="Class %s" % this_y, ) plt.legend(loc="best") plt.title("Data") plt.figure() order = np.lexsort((prob_pos_clf,)) plt.plot(prob_pos_clf[order], "r", label="No calibration (%1.3f)" % clf_score) plt.plot( prob_pos_isotonic[order], "g", linewidth=3, label="Isotonic calibration (%1.3f)" % clf_isotonic_score, ) plt.plot( prob_pos_sigmoid[order], "b", linewidth=3, label="Sigmoid calibration (%1.3f)" % clf_sigmoid_score, ) plt.plot( np.linspace(0, y_test.size, 51)[1::2], y_test[order].reshape(25, -1).mean(1), "k", linewidth=3, label=r"Empirical", ) plt.ylim([-0.05, 1.05]) plt.xlabel("Instances sorted according to predicted probability (uncalibrated GNB)") plt.ylabel("P(y=1)") plt.legend(loc="upper left") plt.title("Gaussian naive Bayes probabilities") plt.show()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/doc/api_reference.py
doc/api_reference.py
"""Configuration for the API reference documentation.""" def _get_guide(*refs, is_developer=False): """Get the rst to refer to user/developer guide. `refs` is several references that can be used in the :ref:`...` directive. """ if len(refs) == 1: ref_desc = f":ref:`{refs[0]}` section" elif len(refs) == 2: ref_desc = f":ref:`{refs[0]}` and :ref:`{refs[1]}` sections" else: ref_desc = ", ".join(f":ref:`{ref}`" for ref in refs[:-1]) ref_desc += f", and :ref:`{refs[-1]}` sections" guide_name = "Developer" if is_developer else "User" return f"**{guide_name} guide.** See the {ref_desc} for further details." def _get_submodule(module_name, submodule_name): """Get the submodule docstring and automatically add the hook. `module_name` is e.g. `sklearn.feature_extraction`, and `submodule_name` is e.g. `image`, so we get the docstring and hook for `sklearn.feature_extraction.image` submodule. `module_name` is used to reset the current module because autosummary automatically changes the current module. """ lines = [ f".. automodule:: {module_name}.{submodule_name}", f".. currentmodule:: {module_name}", ] return "\n\n".join(lines) """ CONFIGURING API_REFERENCE ========================= API_REFERENCE maps each module name to a dictionary that consists of the following components: short_summary (required) The text to be printed on the index page; it has nothing to do the API reference page of each module. description (required, `None` if not needed) The additional description for the module to be placed under the module docstring, before the sections start. sections (required) A list of sections, each of which consists of: - title (required, `None` if not needed): the section title, commonly it should not be `None` except for the first section of a module, - description (optional): the optional additional description for the section, - autosummary (required): an autosummary block, assuming current module is the current module name. Essentially, the rendered page would look like the following: |---------------------------------------------------------------------------------| | {{ module_name }} | | ================= | | {{ module_docstring }} | | {{ description }} | | | | {{ section_title_1 }} <-------------- Optional if one wants the first | | --------------------- section to directly follow | | {{ section_description_1 }} without a second-level heading. | | {{ section_autosummary_1 }} | | | | {{ section_title_2 }} | | --------------------- | | {{ section_description_2 }} | | {{ section_autosummary_2 }} | | | | More sections... | |---------------------------------------------------------------------------------| Hooks will be automatically generated for each module and each section. For a module, e.g., `sklearn.feature_extraction`, the hook would be `feature_extraction_ref`; for a section, e.g., "From text" under `sklearn.feature_extraction`, the hook would be `feature_extraction_ref-from-text`. However, note that a better way is to refer using the :mod: directive, e.g., :mod:`sklearn.feature_extraction` for the module and :mod:`sklearn.feature_extraction.text` for the section. Only in case that a section is not a particular submodule does the hook become useful, e.g., the "Loaders" section under `sklearn.datasets`. """ API_REFERENCE = { "sklearn": { "short_summary": "Settings and information tools.", "description": None, "sections": [ { "title": None, "autosummary": [ "config_context", "get_config", "set_config", "show_versions", ], }, ], }, "sklearn.base": { "short_summary": "Base classes and utility functions.", "description": None, "sections": [ { "title": None, "autosummary": [ "BaseEstimator", "BiclusterMixin", "ClassNamePrefixFeaturesOutMixin", "ClassifierMixin", "ClusterMixin", "DensityMixin", "MetaEstimatorMixin", "OneToOneFeatureMixin", "OutlierMixin", "RegressorMixin", "TransformerMixin", "clone", "is_classifier", "is_clusterer", "is_regressor", "is_outlier_detector", ], } ], }, "sklearn.calibration": { "short_summary": "Probability calibration.", "description": _get_guide("calibration"), "sections": [ { "title": None, "autosummary": ["CalibratedClassifierCV", "calibration_curve"], }, { "title": "Visualization", "autosummary": ["CalibrationDisplay"], }, ], }, "sklearn.cluster": { "short_summary": "Clustering.", "description": _get_guide("clustering", "biclustering"), "sections": [ { "title": None, "autosummary": [ "AffinityPropagation", "AgglomerativeClustering", "Birch", "BisectingKMeans", "DBSCAN", "FeatureAgglomeration", "HDBSCAN", "KMeans", "MeanShift", "MiniBatchKMeans", "OPTICS", "SpectralBiclustering", "SpectralClustering", "SpectralCoclustering", "affinity_propagation", "cluster_optics_dbscan", "cluster_optics_xi", "compute_optics_graph", "dbscan", "estimate_bandwidth", "k_means", "kmeans_plusplus", "mean_shift", "spectral_clustering", "ward_tree", ], }, ], }, "sklearn.compose": { "short_summary": "Composite estimators.", "description": _get_guide("combining_estimators"), "sections": [ { "title": None, "autosummary": [ "ColumnTransformer", "TransformedTargetRegressor", "make_column_selector", "make_column_transformer", ], }, ], }, "sklearn.covariance": { "short_summary": "Covariance estimation.", "description": _get_guide("covariance"), "sections": [ { "title": None, "autosummary": [ "EllipticEnvelope", "EmpiricalCovariance", "GraphicalLasso", "GraphicalLassoCV", "LedoitWolf", "MinCovDet", "OAS", "ShrunkCovariance", "empirical_covariance", "graphical_lasso", "ledoit_wolf", "ledoit_wolf_shrinkage", "oas", "shrunk_covariance", ], }, ], }, "sklearn.cross_decomposition": { "short_summary": "Cross decomposition.", "description": _get_guide("cross_decomposition"), "sections": [ { "title": None, "autosummary": ["CCA", "PLSCanonical", "PLSRegression", "PLSSVD"], }, ], }, "sklearn.datasets": { "short_summary": "Datasets.", "description": _get_guide("datasets"), "sections": [ { "title": "Loaders", "autosummary": [ "clear_data_home", "dump_svmlight_file", "fetch_20newsgroups", "fetch_20newsgroups_vectorized", "fetch_california_housing", "fetch_covtype", "fetch_file", "fetch_kddcup99", "fetch_lfw_pairs", "fetch_lfw_people", "fetch_olivetti_faces", "fetch_openml", "fetch_rcv1", "fetch_species_distributions", "get_data_home", "load_breast_cancer", "load_diabetes", "load_digits", "load_files", "load_iris", "load_linnerud", "load_sample_image", "load_sample_images", "load_svmlight_file", "load_svmlight_files", "load_wine", ], }, { "title": "Sample generators", "autosummary": [ "make_biclusters", "make_blobs", "make_checkerboard", "make_circles", "make_classification", "make_friedman1", "make_friedman2", "make_friedman3", "make_gaussian_quantiles", "make_hastie_10_2", "make_low_rank_matrix", "make_moons", "make_multilabel_classification", "make_regression", "make_s_curve", "make_sparse_coded_signal", "make_sparse_spd_matrix", "make_sparse_uncorrelated", "make_spd_matrix", "make_swiss_roll", ], }, ], }, "sklearn.decomposition": { "short_summary": "Matrix decomposition.", "description": _get_guide("decompositions"), "sections": [ { "title": None, "autosummary": [ "DictionaryLearning", "FactorAnalysis", "FastICA", "IncrementalPCA", "KernelPCA", "LatentDirichletAllocation", "MiniBatchDictionaryLearning", "MiniBatchNMF", "MiniBatchSparsePCA", "NMF", "PCA", "SparseCoder", "SparsePCA", "TruncatedSVD", "dict_learning", "dict_learning_online", "fastica", "non_negative_factorization", "sparse_encode", ], }, ], }, "sklearn.discriminant_analysis": { "short_summary": "Discriminant analysis.", "description": _get_guide("lda_qda"), "sections": [ { "title": None, "autosummary": [ "LinearDiscriminantAnalysis", "QuadraticDiscriminantAnalysis", ], }, ], }, "sklearn.dummy": { "short_summary": "Dummy estimators.", "description": _get_guide("model_evaluation"), "sections": [ { "title": None, "autosummary": ["DummyClassifier", "DummyRegressor"], }, ], }, "sklearn.ensemble": { "short_summary": "Ensemble methods.", "description": _get_guide("ensemble"), "sections": [ { "title": None, "autosummary": [ "AdaBoostClassifier", "AdaBoostRegressor", "BaggingClassifier", "BaggingRegressor", "ExtraTreesClassifier", "ExtraTreesRegressor", "GradientBoostingClassifier", "GradientBoostingRegressor", "HistGradientBoostingClassifier", "HistGradientBoostingRegressor", "IsolationForest", "RandomForestClassifier", "RandomForestRegressor", "RandomTreesEmbedding", "StackingClassifier", "StackingRegressor", "VotingClassifier", "VotingRegressor", ], }, ], }, "sklearn.exceptions": { "short_summary": "Exceptions and warnings.", "description": None, "sections": [ { "title": None, "autosummary": [ "ConvergenceWarning", "DataConversionWarning", "DataDimensionalityWarning", "EfficiencyWarning", "FitFailedWarning", "InconsistentVersionWarning", "NotFittedError", "UndefinedMetricWarning", "EstimatorCheckFailedWarning", ], }, ], }, "sklearn.experimental": { "short_summary": "Experimental tools.", "description": None, "sections": [ { "title": None, "autosummary": ["enable_halving_search_cv", "enable_iterative_imputer"], }, ], }, "sklearn.feature_extraction": { "short_summary": "Feature extraction.", "description": _get_guide("feature_extraction"), "sections": [ { "title": None, "autosummary": ["DictVectorizer", "FeatureHasher"], }, { "title": "From images", "description": _get_submodule("sklearn.feature_extraction", "image"), "autosummary": [ "image.PatchExtractor", "image.extract_patches_2d", "image.grid_to_graph", "image.img_to_graph", "image.reconstruct_from_patches_2d", ], }, { "title": "From text", "description": _get_submodule("sklearn.feature_extraction", "text"), "autosummary": [ "text.CountVectorizer", "text.HashingVectorizer", "text.TfidfTransformer", "text.TfidfVectorizer", ], }, ], }, "sklearn.feature_selection": { "short_summary": "Feature selection.", "description": _get_guide("feature_selection"), "sections": [ { "title": None, "autosummary": [ "GenericUnivariateSelect", "RFE", "RFECV", "SelectFdr", "SelectFpr", "SelectFromModel", "SelectFwe", "SelectKBest", "SelectPercentile", "SelectorMixin", "SequentialFeatureSelector", "VarianceThreshold", "chi2", "f_classif", "f_regression", "mutual_info_classif", "mutual_info_regression", "r_regression", ], }, ], }, "sklearn.frozen": { "short_summary": "Frozen estimators.", "description": None, "sections": [ { "title": None, "autosummary": ["FrozenEstimator"], }, ], }, "sklearn.gaussian_process": { "short_summary": "Gaussian processes.", "description": _get_guide("gaussian_process"), "sections": [ { "title": None, "autosummary": [ "GaussianProcessClassifier", "GaussianProcessRegressor", ], }, { "title": "Kernels", "description": _get_submodule("sklearn.gaussian_process", "kernels"), "autosummary": [ "kernels.CompoundKernel", "kernels.ConstantKernel", "kernels.DotProduct", "kernels.ExpSineSquared", "kernels.Exponentiation", "kernels.Hyperparameter", "kernels.Kernel", "kernels.Matern", "kernels.PairwiseKernel", "kernels.Product", "kernels.RBF", "kernels.RationalQuadratic", "kernels.Sum", "kernels.WhiteKernel", ], }, ], }, "sklearn.impute": { "short_summary": "Imputation.", "description": _get_guide("impute"), "sections": [ { "title": None, "autosummary": [ "IterativeImputer", "KNNImputer", "MissingIndicator", "SimpleImputer", ], }, ], }, "sklearn.inspection": { "short_summary": "Inspection.", "description": _get_guide("inspection"), "sections": [ { "title": None, "autosummary": ["partial_dependence", "permutation_importance"], }, { "title": "Plotting", "autosummary": ["DecisionBoundaryDisplay", "PartialDependenceDisplay"], }, ], }, "sklearn.isotonic": { "short_summary": "Isotonic regression.", "description": _get_guide("isotonic"), "sections": [ { "title": None, "autosummary": [ "IsotonicRegression", "check_increasing", "isotonic_regression", ], }, ], }, "sklearn.kernel_approximation": { "short_summary": "Kernel approximation.", "description": _get_guide("kernel_approximation"), "sections": [ { "title": None, "autosummary": [ "AdditiveChi2Sampler", "Nystroem", "PolynomialCountSketch", "RBFSampler", "SkewedChi2Sampler", ], }, ], }, "sklearn.kernel_ridge": { "short_summary": "Kernel ridge regression.", "description": _get_guide("kernel_ridge"), "sections": [ { "title": None, "autosummary": ["KernelRidge"], }, ], }, "sklearn.linear_model": { "short_summary": "Generalized linear models.", "description": ( _get_guide("linear_model") + "\n\nThe following subsections are only rough guidelines: the same " "estimator can fall into multiple categories, depending on its parameters." ), "sections": [ { "title": "Linear classifiers", "autosummary": [ "LogisticRegression", "LogisticRegressionCV", "PassiveAggressiveClassifier", # TODO(1.10): remove "Perceptron", "RidgeClassifier", "RidgeClassifierCV", "SGDClassifier", "SGDOneClassSVM", ], }, { "title": "Classical linear regressors", "autosummary": ["LinearRegression", "Ridge", "RidgeCV", "SGDRegressor"], }, { "title": "Regressors with variable selection", "description": ( "The following estimators have built-in variable selection fitting " "procedures, but any estimator using a L1 or elastic-net penalty " "also performs variable selection: typically " ":class:`~linear_model.SGDRegressor` or " ":class:`~sklearn.linear_model.SGDClassifier` with an appropriate " "penalty." ), "autosummary": [ "ElasticNet", "ElasticNetCV", "Lars", "LarsCV", "Lasso", "LassoCV", "LassoLars", "LassoLarsCV", "LassoLarsIC", "OrthogonalMatchingPursuit", "OrthogonalMatchingPursuitCV", ], }, { "title": "Bayesian regressors", "autosummary": ["ARDRegression", "BayesianRidge"], }, { "title": "Multi-task linear regressors with variable selection", "description": ( "These estimators fit multiple regression problems (or tasks)" " jointly, while inducing sparse coefficients. While the inferred" " coefficients may differ between the tasks, they are constrained" " to agree on the features that are selected (non-zero" " coefficients)." ), "autosummary": [ "MultiTaskElasticNet", "MultiTaskElasticNetCV", "MultiTaskLasso", "MultiTaskLassoCV", ], }, { "title": "Outlier-robust regressors", "description": ( "Any estimator using the Huber loss would also be robust to " "outliers, e.g., :class:`~linear_model.SGDRegressor` with " "``loss='huber'``." ), "autosummary": [ "HuberRegressor", "QuantileRegressor", "RANSACRegressor", "TheilSenRegressor", ], }, { "title": "Generalized linear models (GLM) for regression", "description": ( "These models allow for response variables to have error " "distributions other than a normal distribution." ), "autosummary": [ "GammaRegressor", "PoissonRegressor", "TweedieRegressor", ], }, { "title": "Miscellaneous", "autosummary": [ "PassiveAggressiveRegressor", # TODO(1.10): remove "enet_path", "lars_path", "lars_path_gram", "lasso_path", "orthogonal_mp", "orthogonal_mp_gram", "ridge_regression", ], }, ], }, "sklearn.manifold": { "short_summary": "Manifold learning.", "description": _get_guide("manifold"), "sections": [ { "title": None, "autosummary": [ "ClassicalMDS", "Isomap", "LocallyLinearEmbedding", "MDS", "SpectralEmbedding", "TSNE", "locally_linear_embedding", "smacof", "spectral_embedding", "trustworthiness", ], }, ], }, "sklearn.metrics": { "short_summary": "Metrics.", "description": _get_guide("model_evaluation", "metrics"), "sections": [ { "title": "Model selection interface", "description": _get_guide("scoring_parameter"), "autosummary": [ "check_scoring", "get_scorer", "get_scorer_names", "make_scorer", ], }, { "title": "Classification metrics", "description": _get_guide("classification_metrics"), "autosummary": [ "accuracy_score", "auc", "average_precision_score", "balanced_accuracy_score", "brier_score_loss", "class_likelihood_ratios", "classification_report", "cohen_kappa_score", "confusion_matrix", "confusion_matrix_at_thresholds", "d2_brier_score", "d2_log_loss_score", "dcg_score", "det_curve", "f1_score", "fbeta_score", "hamming_loss", "hinge_loss", "jaccard_score", "log_loss", "matthews_corrcoef", "multilabel_confusion_matrix", "ndcg_score", "precision_recall_curve", "precision_recall_fscore_support", "precision_score", "recall_score", "roc_auc_score", "roc_curve", "top_k_accuracy_score", "zero_one_loss", ], }, { "title": "Regression metrics", "description": _get_guide("regression_metrics"), "autosummary": [ "d2_absolute_error_score", "d2_pinball_score", "d2_tweedie_score", "explained_variance_score", "max_error", "mean_absolute_error", "mean_absolute_percentage_error", "mean_gamma_deviance", "mean_pinball_loss", "mean_poisson_deviance", "mean_squared_error", "mean_squared_log_error", "mean_tweedie_deviance", "median_absolute_error", "r2_score", "root_mean_squared_error", "root_mean_squared_log_error", ], }, { "title": "Multilabel ranking metrics", "description": _get_guide("multilabel_ranking_metrics"), "autosummary": [ "coverage_error", "label_ranking_average_precision_score", "label_ranking_loss", ], }, { "title": "Clustering metrics", "description": ( _get_submodule("sklearn.metrics", "cluster") + "\n\n" + _get_guide("clustering_evaluation") ), "autosummary": [ "adjusted_mutual_info_score", "adjusted_rand_score", "calinski_harabasz_score", "cluster.contingency_matrix", "cluster.pair_confusion_matrix", "completeness_score", "davies_bouldin_score", "fowlkes_mallows_score", "homogeneity_completeness_v_measure", "homogeneity_score", "mutual_info_score", "normalized_mutual_info_score", "rand_score", "silhouette_samples", "silhouette_score", "v_measure_score", ], }, { "title": "Biclustering metrics", "description": _get_guide("biclustering_evaluation"), "autosummary": ["consensus_score"], }, { "title": "Distance metrics", "autosummary": ["DistanceMetric"], }, { "title": "Pairwise metrics", "description": ( _get_submodule("sklearn.metrics", "pairwise") + "\n\n" + _get_guide("metrics") ), "autosummary": [ "pairwise.additive_chi2_kernel", "pairwise.chi2_kernel", "pairwise.cosine_distances", "pairwise.cosine_similarity", "pairwise.distance_metrics", "pairwise.euclidean_distances", "pairwise.haversine_distances", "pairwise.kernel_metrics", "pairwise.laplacian_kernel", "pairwise.linear_kernel", "pairwise.manhattan_distances", "pairwise.nan_euclidean_distances", "pairwise.paired_cosine_distances", "pairwise.paired_distances", "pairwise.paired_euclidean_distances", "pairwise.paired_manhattan_distances", "pairwise.pairwise_kernels", "pairwise.polynomial_kernel", "pairwise.rbf_kernel", "pairwise.sigmoid_kernel", "pairwise_distances", "pairwise_distances_argmin", "pairwise_distances_argmin_min", "pairwise_distances_chunked", ], }, { "title": "Plotting", "description": _get_guide("visualizations"), "autosummary": [ "ConfusionMatrixDisplay", "DetCurveDisplay", "PrecisionRecallDisplay", "PredictionErrorDisplay", "RocCurveDisplay", ], }, ], }, "sklearn.mixture": { "short_summary": "Gaussian mixture models.", "description": _get_guide("mixture"), "sections": [ { "title": None, "autosummary": ["BayesianGaussianMixture", "GaussianMixture"], }, ], }, "sklearn.model_selection": { "short_summary": "Model selection.", "description": _get_guide("cross_validation", "grid_search", "learning_curve"), "sections": [ { "title": "Splitters", "autosummary": [ "GroupKFold", "GroupShuffleSplit", "KFold", "LeaveOneGroupOut", "LeaveOneOut",
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/doc/conftest.py
doc/conftest.py
import os from os import environ from os.path import exists, join import pytest from _pytest.doctest import DoctestItem from sklearn.datasets import get_data_home from sklearn.datasets._base import _pkl_filepath from sklearn.datasets._twenty_newsgroups import CACHE_NAME from sklearn.utils._testing import SkipTest, check_skip_network from sklearn.utils.fixes import np_base_version, parse_version, sp_version def setup_labeled_faces(): data_home = get_data_home() if not exists(join(data_home, "lfw_home")): raise SkipTest("Skipping dataset loading doctests") def setup_rcv1(): check_skip_network() # skip the test in rcv1.rst if the dataset is not already loaded rcv1_dir = join(get_data_home(), "RCV1") if not exists(rcv1_dir): raise SkipTest("Download RCV1 dataset to run this test.") def setup_twenty_newsgroups(): cache_path = _pkl_filepath(get_data_home(), CACHE_NAME) if not exists(cache_path): raise SkipTest("Skipping dataset loading doctests") def setup_working_with_text_data(): check_skip_network() cache_path = _pkl_filepath(get_data_home(), CACHE_NAME) if not exists(cache_path): raise SkipTest("Skipping dataset loading doctests") def setup_loading_other_datasets(): try: import pandas # noqa: F401 except ImportError: raise SkipTest("Skipping loading_other_datasets.rst, pandas not installed") # checks SKLEARN_SKIP_NETWORK_TESTS to see if test should run run_network_tests = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0" if not run_network_tests: raise SkipTest( "Skipping loading_other_datasets.rst, tests can be " "enabled by setting SKLEARN_SKIP_NETWORK_TESTS=0" ) def setup_compose(): try: import pandas # noqa: F401 except ImportError: raise SkipTest("Skipping compose.rst, pandas not installed") def setup_impute(): try: import pandas # noqa: F401 except ImportError: raise SkipTest("Skipping impute.rst, pandas not installed") def setup_grid_search(): try: import pandas # noqa: F401 except ImportError: raise SkipTest("Skipping grid_search.rst, pandas not installed") def setup_preprocessing(): try: import pandas # noqa: F401 except ImportError: raise SkipTest("Skipping preprocessing.rst, pandas not installed") def skip_if_matplotlib_not_installed(fname): try: import matplotlib # noqa: F401 except ImportError: basename = os.path.basename(fname) raise SkipTest(f"Skipping doctests for {basename}, matplotlib not installed") def skip_if_cupy_not_installed(fname): try: import cupy # noqa: F401 except ImportError: basename = os.path.basename(fname) raise SkipTest(f"Skipping doctests for {basename}, cupy not installed") def pytest_runtest_setup(item): fname = item.fspath.strpath # normalize filename to use forward slashes on Windows for easier handling # later fname = fname.replace(os.sep, "/") is_index = fname.endswith("datasets/index.rst") if fname.endswith("datasets/labeled_faces.rst") or is_index: setup_labeled_faces() elif fname.endswith("datasets/rcv1.rst") or is_index: setup_rcv1() elif fname.endswith("datasets/twenty_newsgroups.rst") or is_index: setup_twenty_newsgroups() elif fname.endswith("modules/compose.rst") or is_index: setup_compose() elif fname.endswith("datasets/loading_other_datasets.rst"): setup_loading_other_datasets() elif fname.endswith("modules/impute.rst"): setup_impute() elif fname.endswith("modules/grid_search.rst"): setup_grid_search() elif fname.endswith("modules/preprocessing.rst"): setup_preprocessing() rst_files_requiring_matplotlib = [ "modules/partial_dependence.rst", "modules/tree.rst", ] for each in rst_files_requiring_matplotlib: if fname.endswith(each): skip_if_matplotlib_not_installed(fname) if fname.endswith("array_api.rst"): skip_if_cupy_not_installed(fname) def pytest_configure(config): # Use matplotlib agg backend during the tests including doctests try: import matplotlib matplotlib.use("agg") except ImportError: pass def pytest_collection_modifyitems(config, items): """Called after collect is completed. Parameters ---------- config : pytest config items : list of collected items """ skip_doctests = False if np_base_version < parse_version("2"): # TODO: configure numpy to output scalar arrays as regular Python scalars # once possible to improve readability of the tests docstrings. # https://numpy.org/neps/nep-0051-scalar-representation.html#implementation reason = "Due to NEP 51 numpy scalar repr has changed in numpy 2" skip_doctests = True if sp_version < parse_version("1.14"): reason = "Scipy sparse matrix repr has changed in scipy 1.14" skip_doctests = True # Normally doctest has the entire module's scope. Here we set globs to an empty dict # to remove the module's scope: # https://docs.python.org/3/library/doctest.html#what-s-the-execution-context for item in items: if isinstance(item, DoctestItem): item.dtest.globs = {} if skip_doctests: skip_marker = pytest.mark.skip(reason=reason) for item in items: if isinstance(item, DoctestItem): item.add_marker(skip_marker)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/doc/conf.py
doc/conf.py
# scikit-learn documentation build configuration file, created by # sphinx-quickstart on Fri Jan 8 09:13:42 2010. # # This file is execfile()d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import json import os import re import sys import warnings from datetime import datetime from pathlib import Path from urllib.request import urlopen from sklearn.externals._packaging.version import parse from sklearn.utils._testing import turn_warnings_into_errors # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. sys.path.insert(0, os.path.abspath(".")) sys.path.insert(0, os.path.abspath("sphinxext")) import jinja2 import sphinx_gallery from github_link import make_linkcode_resolve from sphinx.util.logging import getLogger from sphinx_gallery.notebook import add_code_cell, add_markdown_cell from sphinx_gallery.sorting import ExampleTitleSortKey logger = getLogger(__name__) try: # Configure plotly to integrate its output into the HTML pages generated by # sphinx-gallery. import plotly.io as pio pio.renderers.default = "sphinx_gallery" except ImportError: # Make it possible to render the doc when not running the examples # that need plotly. pass # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "numpydoc", "sphinx.ext.linkcode", "sphinx.ext.doctest", "sphinx.ext.intersphinx", "sphinx.ext.imgconverter", "sphinx_gallery.gen_gallery", "sphinx-prompt", "sphinx_copybutton", "sphinxext.opengraph", "matplotlib.sphinxext.plot_directive", "sphinxcontrib.sass", "sphinx_remove_toctrees", "sphinx_design", # See sphinxext/ "allow_nan_estimators", "autoshortsummary", "doi_role", "dropdown_anchors", "override_pst_pagetoc", "sphinx_issues", ] # Specify how to identify the prompt when copying code snippets copybutton_prompt_text = r">>> |\.\.\. " copybutton_prompt_is_regexp = True copybutton_exclude = "style" try: import jupyterlite_sphinx # noqa: F401 extensions.append("jupyterlite_sphinx") with_jupyterlite = True except ImportError: # In some cases we don't want to require jupyterlite_sphinx to be installed, # e.g. the doc-min-dependencies build warnings.warn( "jupyterlite_sphinx is not installed, you need to install it " "if you want JupyterLite links to appear in each example" ) with_jupyterlite = False # Produce `plot::` directives for examples that contain `import matplotlib` or # `from matplotlib import`. numpydoc_use_plots = True # Options for the `::plot` directive: # https://matplotlib.org/stable/api/sphinxext_plot_directive_api.html plot_formats = ["png"] plot_include_source = True plot_html_show_formats = False plot_html_show_source_link = False # We do not need the table of class members because `sphinxext/override_pst_pagetoc.py` # will show them in the secondary sidebar numpydoc_show_class_members = False numpydoc_show_inherited_class_members = False # We want in-page toc of class members instead of a separate page for each entry numpydoc_class_members_toctree = False # For maths, use mathjax by default and svg if NO_MATHJAX env variable is set # (useful for viewing the doc offline) if os.environ.get("NO_MATHJAX"): extensions.append("sphinx.ext.imgmath") imgmath_image_format = "svg" mathjax_path = "" else: extensions.append("sphinx.ext.mathjax") mathjax_path = "https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js" # Add any paths that contain templates here, relative to this directory. templates_path = ["templates"] # generate autosummary even if no references autosummary_generate = True # The suffix of source filenames. source_suffix = ".rst" # The encoding of source files. source_encoding = "utf-8" # The main toctree document. root_doc = "index" # General information about the project. project = "scikit-learn" copyright = f"2007 - {datetime.now().year}, scikit-learn developers (BSD License)" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. import sklearn parsed_version = parse(sklearn.__version__) version = ".".join(parsed_version.base_version.split(".")[:2]) # The full version, including alpha/beta/rc tags. # Removes post from release name if parsed_version.is_postrelease: release = parsed_version.base_version else: release = sklearn.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [ "_build", "templates", "includes", "**/sg_execution_times.rst", "whats_new/upcoming_changes", ] # The reST default role (used for this markup: `text`) to use for all # documents. default_role = "literal" # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = "pydata_sphinx_theme" # This config option is used to generate the canonical links in the header # of every page. The canonical link is needed to prevent search engines from # returning results pointing to old scikit-learn versions. html_baseurl = "https://scikit-learn.org/stable/" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { # -- General configuration ------------------------------------------------ "sidebar_includehidden": True, "use_edit_page_button": True, "external_links": [], "icon_links_label": "Icon Links", "icon_links": [ { "name": "GitHub", "url": "https://github.com/scikit-learn/scikit-learn", "icon": "fa-brands fa-square-github", "type": "fontawesome", }, ], "analytics": { "plausible_analytics_domain": "scikit-learn.org", "plausible_analytics_url": "https://views.scientific-python.org/js/script.js", }, # If "prev-next" is included in article_footer_items, then setting show_prev_next # to True would repeat prev and next links. See # https://github.com/pydata/pydata-sphinx-theme/blob/b731dc230bc26a3d1d1bb039c56c977a9b3d25d8/src/pydata_sphinx_theme/theme/pydata_sphinx_theme/layout.html#L118-L129 "show_prev_next": False, "search_bar_text": "Search the docs ...", "navigation_with_keys": False, "collapse_navigation": False, "navigation_depth": 2, "show_nav_level": 1, "show_toc_level": 1, "navbar_align": "left", "header_links_before_dropdown": 5, "header_dropdown_text": "More", # The switcher requires a JSON file with the list of documentation versions, which # is generated by the script `build_tools/circle/list_versions.py` and placed under # the `js/` static directory; it will then be copied to the `_static` directory in # the built documentation "switcher": { "json_url": "https://scikit-learn.org/dev/_static/versions.json", "version_match": release, }, # check_switcher may be set to False if docbuild pipeline fails. See # https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/version-dropdown.html#configure-switcher-json-url "check_switcher": True, "pygments_light_style": "tango", "pygments_dark_style": "monokai", "logo": { "alt_text": "scikit-learn homepage", "image_relative": "logos/scikit-learn-logo-without-subtitle.svg", "image_light": "logos/scikit-learn-logo-without-subtitle.svg", "image_dark": "logos/scikit-learn-logo-without-subtitle.svg", }, "surface_warnings": True, # -- Template placement in theme layouts ---------------------------------- "navbar_start": ["navbar-logo"], # Note that the alignment of navbar_center is controlled by navbar_align "navbar_center": ["navbar-nav"], "navbar_end": ["theme-switcher", "navbar-icon-links", "version-switcher"], # navbar_persistent is persistent right (even when on mobiles) "navbar_persistent": ["search-button"], "article_header_start": ["breadcrumbs"], "article_header_end": [], "article_footer_items": ["prev-next"], "content_footer_items": [], # Use html_sidebars that map page patterns to list of sidebar templates "primary_sidebar_end": [], "footer_start": ["copyright"], "footer_center": [], "footer_end": [], # When specified as a dictionary, the keys should follow glob-style patterns, as in # https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-exclude_patterns # In particular, "**" specifies the default for all pages # Use :html_theme.sidebar_secondary.remove: for file-wide removal "secondary_sidebar_items": { "**": [ "page-toc", "sourcelink", # Sphinx-Gallery-specific sidebar components # https://sphinx-gallery.github.io/stable/advanced.html#using-sphinx-gallery-sidebar-components "sg_download_links", "sg_launcher_links", ], }, "show_version_warning_banner": True, "announcement": None, } # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = ["themes"] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = "scikit-learn" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = "logos/favicon.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["images", "css", "js"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # Custom sidebar templates, maps document names to template names. # Workaround for removing the left sidebar on pages without TOC # A better solution would be to follow the merge of: # https://github.com/pydata/pydata-sphinx-theme/pull/1682 html_sidebars = { "install": [], "getting_started": [], "glossary": [], "faq": [], "support": [], "related_projects": [], "roadmap": [], "governance": [], "about": [], } # Additional templates that should be rendered to pages, maps page names to # template names. html_additional_pages = {"index": "index.html"} # Additional files to copy # html_extra_path = [] # Additional JS files html_js_files = [ "scripts/dropdown.js", "scripts/version-switcher.js", "scripts/sg_plotly_resize.js", "scripts/theme-observer.js", ] # Compile scss files into css files using sphinxcontrib-sass sass_src_dir, sass_out_dir = "scss", "css/styles" sass_targets = { f"{file.stem}.scss": f"{file.stem}.css" for file in Path(sass_src_dir).glob("*.scss") } # Additional CSS files, should be subset of the values of `sass_targets` html_css_files = ["styles/colors.css", "styles/custom.css"] def add_js_css_files(app, pagename, templatename, context, doctree): """Load additional JS and CSS files only for certain pages. Note that `html_js_files` and `html_css_files` are included in all pages and should be used for the ones that are used by multiple pages. All page-specific JS and CSS files should be added here instead. """ if pagename == "api/index": # External: jQuery and DataTables app.add_js_file("https://code.jquery.com/jquery-3.7.0.js") app.add_js_file("https://cdn.datatables.net/2.0.0/js/dataTables.min.js") app.add_css_file( "https://cdn.datatables.net/2.0.0/css/dataTables.dataTables.min.css" ) # Internal: API search initialization and styling app.add_js_file("scripts/api-search.js") app.add_css_file("styles/api-search.css") elif pagename == "index": app.add_css_file("styles/index.css") elif pagename.startswith("modules/generated/"): app.add_css_file("styles/api.css") # If false, no module index is generated. html_domain_indices = False # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = "scikit-learndoc" # If true, the reST sources are included in the HTML build as _sources/name. html_copy_source = True # Adds variables into templates html_context = {} # finds latest release highlights and places it into HTML context for # index.html release_highlights_dir = Path("..") / "examples" / "release_highlights" # Finds the highlight with the latest version number latest_highlights = sorted(release_highlights_dir.glob("plot_release_highlights_*.py"))[ -1 ] latest_highlights = latest_highlights.with_suffix("").name html_context["release_highlights"] = ( f"auto_examples/release_highlights/{latest_highlights}" ) # get version from highlight name assuming highlights have the form # plot_release_highlights_0_22_0 highlight_version = ".".join(latest_highlights.split("_")[-3:-1]) html_context["release_highlights_version"] = highlight_version # redirects dictionary maps from old links to new links redirects = { "documentation": "index", "contents": "index", "preface": "index", "modules/classes": "api/index", "tutorial/machine_learning_map/index": "machine_learning_map", "auto_examples/feature_selection/plot_permutation_test_for_classification": ( "auto_examples/model_selection/plot_permutation_tests_for_classification" ), "modules/model_persistence": "model_persistence", "auto_examples/linear_model/plot_bayesian_ridge": ( "auto_examples/linear_model/plot_ard" ), "auto_examples/model_selection/grid_search_text_feature_extraction": ( "auto_examples/model_selection/plot_grid_search_text_feature_extraction" ), "auto_examples/model_selection/plot_validation_curve": ( "auto_examples/model_selection/plot_train_error_vs_test_error" ), "auto_examples/datasets/plot_digits_last_image": ( "auto_examples/exercises/plot_digits_classification_exercises" ), "auto_examples/datasets/plot_random_dataset": ( "auto_examples/classification/plot_classifier_comparison" ), "auto_examples/miscellaneous/plot_changed_only_pprint_parameter": ( "auto_examples/miscellaneous/plot_estimator_representation" ), "auto_examples/decomposition/plot_beta_divergence": ( "auto_examples/applications/plot_topics_extraction_with_nmf_lda" ), "auto_examples/svm/plot_svm_nonlinear": "auto_examples/svm/plot_svm_kernels", "auto_examples/ensemble/plot_adaboost_hastie_10_2": ( "auto_examples/ensemble/plot_adaboost_multiclass" ), "auto_examples/decomposition/plot_pca_3d": ( "auto_examples/decomposition/plot_pca_iris" ), "auto_examples/exercises/plot_cv_digits": ( "auto_examples/model_selection/plot_nested_cross_validation_iris" ), "auto_examples/linear_model/plot_lasso_lars": ( "auto_examples/linear_model/plot_lasso_lasso_lars_elasticnet_path" ), "auto_examples/linear_model/plot_lasso_coordinate_descent_path": ( "auto_examples/linear_model/plot_lasso_lasso_lars_elasticnet_path" ), "auto_examples/cluster/plot_color_quantization": ( "auto_examples/cluster/plot_face_compress" ), "auto_examples/cluster/plot_cluster_iris": ( "auto_examples/cluster/plot_kmeans_assumptions" ), "auto_examples/ensemble/plot_forest_importances_faces": ( "auto_examples/ensemble/plot_forest_importances" ), "auto_examples/ensemble/plot_voting_probas": ( "auto_examples/ensemble/plot_voting_decision_regions" ), "auto_examples/datasets/plot_iris_dataset": ( "auto_examples/decomposition/plot_pca_iris" ), "auto_examples/linear_model/plot_iris_logistic": ( "auto_examples/linear_model/plot_logistic_multinomial" ), "auto_examples/linear_model/plot_logistic": ( "auto_examples/calibration/plot_calibration_curve" ), "auto_examples/linear_model/plot_ols_3d": ("auto_examples/linear_model/plot_ols"), "auto_examples/linear_model/plot_ols": "auto_examples/linear_model/plot_ols_ridge", "auto_examples/linear_model/plot_ols_ridge_variance": ( "auto_examples/linear_model/plot_ols_ridge" ), "auto_examples/cluster/plot_agglomerative_clustering.html": ( "auto_examples/cluster/plot_ward_structured_vs_unstructured.html" ), "auto_examples/linear_model/plot_sgd_comparison": ( "auto_examples/linear_model/plot_sgd_loss_functions" ), } html_context["redirects"] = redirects for old_link in redirects: html_additional_pages[old_link] = "redirects.html" # See https://github.com/scikit-learn/scikit-learn/pull/22550 html_context["is_devrelease"] = parsed_version.is_devrelease # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. "preamble": r""" \usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm} \usepackage{morefloats}\usepackage{enumitem} \setlistdepth{10} \let\oldhref\href \renewcommand{\href}[2]{\oldhref{#1}{\hbox{#2}}} """ } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ( "contents", "user_guide.tex", "scikit-learn user guide", "scikit-learn developers", "manual", ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. latex_logo = "logos/scikit-learn-logo.png" # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. latex_domain_indices = False trim_doctests_flags = True # intersphinx configuration intersphinx_mapping = { "python": ("https://docs.python.org/{.major}".format(sys.version_info), None), "numpy": ("https://numpy.org/doc/stable", None), "scipy": ("https://docs.scipy.org/doc/scipy/", None), "matplotlib": ("https://matplotlib.org/", None), "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), "joblib": ("https://joblib.readthedocs.io/en/latest/", None), "seaborn": ("https://seaborn.pydata.org/", None), "skops": ("https://skops.readthedocs.io/en/stable/", None), } v = parse(release) if v.release is None: raise ValueError( "Ill-formed version: {!r}. Version should follow PEP440".format(version) ) if v.is_devrelease: binder_branch = "main" else: major, minor = v.release[:2] binder_branch = "{}.{}.X".format(major, minor) class SubSectionTitleOrder: """Sort example gallery by title of subsection. Assumes README.txt exists for all subsections and uses the subsection with dashes, '---', as the adornment. """ def __init__(self, src_dir): self.src_dir = src_dir self.regex = re.compile(r"^([\w ]+)\n-", re.MULTILINE) def __repr__(self): return "<%s>" % (self.__class__.__name__,) def __call__(self, directory): src_path = os.path.normpath(os.path.join(self.src_dir, directory)) # Forces Release Highlights to the top if os.path.basename(src_path) == "release_highlights": return "0" readme = os.path.join(src_path, "README.txt") try: with open(readme, "r") as f: content = f.read() except FileNotFoundError: return directory title_match = self.regex.search(content) if title_match is not None: return title_match.group(1) return directory class SKExampleTitleSortKey(ExampleTitleSortKey): """Sorts release highlights based on version number.""" def __call__(self, filename): title = super().__call__(filename) prefix = "plot_release_highlights_" # Use title to sort if not a release highlight if not str(filename).startswith(prefix): return title major_minor = filename[len(prefix) :].split("_")[:2] version_float = float(".".join(major_minor)) # negate to place the newest version highlights first return -version_float def notebook_modification_function(notebook_content, notebook_filename): notebook_content_str = str(notebook_content) warning_template = "\n".join( [ "<div class='alert alert-{message_class}'>", "", "# JupyterLite warning", "", "{message}", "</div>", ] ) message_class = "warning" message = ( "Running the scikit-learn examples in JupyterLite is experimental and you may" " encounter some unexpected behavior.\n\nThe main difference is that imports" " will take a lot longer than usual, for example the first `import sklearn` can" " take roughly 10-20s.\n\nIf you notice problems, feel free to open an" " [issue](https://github.com/scikit-learn/scikit-learn/issues/new/choose)" " about it." ) markdown = warning_template.format(message_class=message_class, message=message) dummy_notebook_content = {"cells": []} add_markdown_cell(dummy_notebook_content, markdown) code_lines = [] if "seaborn" in notebook_content_str: code_lines.append("%pip install seaborn") if "plotly.express" in notebook_content_str: code_lines.append("%pip install plotly nbformat") if "skimage" in notebook_content_str: code_lines.append("%pip install scikit-image") if "polars" in notebook_content_str: code_lines.append("%pip install polars") if "fetch_" in notebook_content_str: code_lines.extend( [ "%pip install pyodide-http", "import pyodide_http", "pyodide_http.patch_all()", ] ) # always import matplotlib and pandas to avoid Pyodide limitation with # imports inside functions code_lines.extend(["import matplotlib", "import pandas"]) # Work around https://github.com/jupyterlite/pyodide-kernel/issues/166 # and https://github.com/pyodide/micropip/issues/223 by installing the # dependencies first, and then scikit-learn from Anaconda.org. if "dev" in release: dev_docs_specific_code = [ "import piplite", "import joblib", "import threadpoolctl", "import scipy", "await piplite.install(\n" f" 'scikit-learn=={release}',\n" " index_urls='https://pypi.anaconda.org/scientific-python-nightly-wheels/simple',\n" ")", ] code_lines.extend(dev_docs_specific_code) if code_lines: code_lines = ["# JupyterLite-specific code"] + code_lines code = "\n".join(code_lines) add_code_cell(dummy_notebook_content, code) notebook_content["cells"] = ( dummy_notebook_content["cells"] + notebook_content["cells"] ) default_global_config = sklearn.get_config() def reset_sklearn_config(gallery_conf, fname): """Reset sklearn config to default values.""" sklearn.set_config(**default_global_config) sg_examples_dir = "../examples" sg_gallery_dir = "auto_examples" sphinx_gallery_conf = { "doc_module": "sklearn", "backreferences_dir": os.path.join("modules", "generated"), "show_memory": False, "reference_url": {"sklearn": None}, "examples_dirs": [sg_examples_dir], "gallery_dirs": [sg_gallery_dir], "subsection_order": SubSectionTitleOrder(sg_examples_dir), "within_subsection_order": SKExampleTitleSortKey, "binder": { "org": "scikit-learn", "repo": "scikit-learn", "binderhub_url": "https://mybinder.org", "branch": binder_branch, "dependencies": "./binder/requirements.txt", "use_jupyter_lab": True, }, # avoid generating too many cross links "inspect_global_variables": False, "remove_config_comments": True, "plot_gallery": "True", "recommender": {"enable": True, "n_examples": 4, "min_df": 12}, "reset_modules": ("matplotlib", "seaborn", reset_sklearn_config), } if with_jupyterlite: sphinx_gallery_conf["jupyterlite"] = { "notebook_modification_function": notebook_modification_function } # For the index page of the gallery and each nested section, we hide the secondary # sidebar by specifying an empty list (no components), because there is no meaningful # in-page toc for these pages, and they are generated so "sourcelink" is not useful # either. html_theme_options["secondary_sidebar_items"][f"{sg_gallery_dir}/index"] = [] for sub_sg_dir in (Path(".") / sg_examples_dir).iterdir(): if sub_sg_dir.is_dir(): html_theme_options["secondary_sidebar_items"][ f"{sg_gallery_dir}/{sub_sg_dir.name}/index" ] = [] # The following dictionary contains the information used to create the # thumbnails for the front page of the scikit-learn home page. # key: first image in set # values: (number of plot in set, height of thumbnail) carousel_thumbs = {"sphx_glr_plot_classifier_comparison_001.png": 600} # enable experimental module so that experimental estimators can be # discovered properly by sphinx from sklearn.experimental import ( # noqa: F401 enable_halving_search_cv, enable_iterative_imputer, ) def make_carousel_thumbs(app, exception): """produces the final resized carousel images""" if exception is not None: return print("Preparing carousel images") image_dir = os.path.join(app.builder.outdir, "_images") for glr_plot, max_width in carousel_thumbs.items(): image = os.path.join(image_dir, glr_plot) if os.path.exists(image): c_thumb = os.path.join(image_dir, glr_plot[:-4] + "_carousel.png") sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190) def filter_search_index(app, exception): if exception is not None: return # searchindex only exist when generating html if app.builder.name != "html": return print("Removing methods from search index") searchindex_path = os.path.join(app.builder.outdir, "searchindex.js") with open(searchindex_path, "r") as f: searchindex_text = f.read() searchindex_text = re.sub(r"{__init__.+?}", "{}", searchindex_text) searchindex_text = re.sub(r"{__call__.+?}", "{}", searchindex_text) with open(searchindex_path, "w") as f: f.write(searchindex_text) # Config for sphinx_issues # we use the issues path for PRs since the issues URL will forward issues_github_path = "scikit-learn/scikit-learn" def disable_plot_gallery_for_linkcheck(app): if app.builder.name == "linkcheck": sphinx_gallery_conf["plot_gallery"] = "False" def skip_properties(app, what, name, obj, skip, options): """Skip properties that are fitted attributes""" if isinstance(obj, property): if name.endswith("_") and not name.startswith("_"): return True return skip def setup(app): # do not run the examples when using linkcheck by using a small priority # (default priority is 500 and sphinx-gallery using builder-inited event too) app.connect("builder-inited", disable_plot_gallery_for_linkcheck, priority=50) # triggered just before the HTML for an individual page is created app.connect("html-page-context", add_js_css_files) # to hide/show the prompt in code examples app.connect("build-finished", make_carousel_thumbs) app.connect("build-finished", filter_search_index) app.connect("autodoc-skip-member", skip_properties) # The following is used by sphinx.ext.linkcode to provide links to github linkcode_resolve = make_linkcode_resolve( "sklearn", ( "https://github.com/scikit-learn/" "scikit-learn/blob/{revision}/" "{package}/{path}#L{lineno}" ), ) warnings.filterwarnings( "ignore", category=UserWarning, message=( "Matplotlib is currently using agg, which is a" " non-GUI backend, so cannot show the figure." ), ) # TODO(1.10): remove PassiveAggressive warnings.filterwarnings("ignore", category=FutureWarning, message="PassiveAggressive") if os.environ.get("SKLEARN_WARNINGS_AS_ERRORS", "0") != "0": turn_warnings_into_errors() # maps functions with a class name that is indistinguishable when case is # ignore to another filename autosummary_filename_map = { "sklearn.cluster.dbscan": "dbscan-function", "sklearn.covariance.oas": "oas-function", "sklearn.decomposition.fastica": "fastica-function", } # Config for sphinxext.opengraph ogp_site_url = "https://scikit-learn/stable/" ogp_image = "https://scikit-learn.org/stable/_static/scikit-learn-logo-notext.png" ogp_use_first_image = True ogp_site_name = "scikit-learn" # Config for linkcheck that checks the documentation for broken links # ignore all links in 'whats_new' to avoid doing many github requests and # hitting the github rate threshold that makes linkcheck take a lot of time linkcheck_exclude_documents = [r"whats_new/.*"] # default timeout to make some sites links fail faster linkcheck_timeout = 10 # Allow redirects from doi.org linkcheck_allowed_redirects = {r"https://doi.org/.+": r".*"} linkcheck_ignore = [ # ignore links to local html files e.g. in image directive :target: field r"^..?/", # ignore links to specific pdf pages because linkcheck does not handle them # ('utf-8' codec can't decode byte error) r"http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=.*", ( "https://www.fordfoundation.org/media/2976/roads-and-bridges" "-the-unseen-labor-behind-our-digital-infrastructure.pdf#page=.*" ), # links falsely flagged as broken ( "https://www.researchgate.net/publication/" "233096619_A_Dendrite_Method_for_Cluster_Analysis" ), (
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/doc/sphinxext/allow_nan_estimators.py
doc/sphinxext/allow_nan_estimators.py
from contextlib import suppress from docutils import nodes from docutils.parsers.rst import Directive from sklearn.utils import all_estimators from sklearn.utils._test_common.instance_generator import _construct_instances from sklearn.utils._testing import SkipTest class AllowNanEstimators(Directive): @staticmethod def make_paragraph_for_estimator_type(estimator_type): intro = nodes.list_item() intro += nodes.strong(text="Estimators that allow NaN values for type ") intro += nodes.literal(text=f"{estimator_type}") intro += nodes.strong(text=":\n") exists = False lst = nodes.bullet_list() for name, est_class in all_estimators(type_filter=estimator_type): with suppress(SkipTest): # Here we generate the text only for one instance. This directive # should not be used for meta-estimators where tags depend on the # sub-estimator. est = next(_construct_instances(est_class)) if est.__sklearn_tags__().input_tags.allow_nan: module_name = ".".join(est_class.__module__.split(".")[:2]) class_title = f"{est_class.__name__}" class_url = f"./generated/{module_name}.{class_title}.html" item = nodes.list_item() para = nodes.paragraph() para += nodes.reference( class_title, text=class_title, internal=False, refuri=class_url ) exists = True item += para lst += item intro += lst return [intro] if exists else None def run(self): lst = nodes.bullet_list() for i in ["cluster", "regressor", "classifier", "transformer"]: item = self.make_paragraph_for_estimator_type(i) if item is not None: lst += item return [lst] def setup(app): app.add_directive("allow_nan_estimators", AllowNanEstimators) return { "version": "0.1", "parallel_read_safe": True, "parallel_write_safe": True, }
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/doc/sphinxext/sphinx_issues.py
doc/sphinxext/sphinx_issues.py
"""A Sphinx extension for linking to your project's issue tracker. Copyright 2014 Steven Loria Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import re from docutils import nodes, utils from sphinx.util.nodes import split_explicit_title __version__ = "1.2.0" __author__ = "Steven Loria" __license__ = "MIT" def user_role(name, rawtext, text, lineno, inliner, options=None, content=None): """Sphinx role for linking to a user profile. Defaults to linking to Github profiles, but the profile URIS can be configured via the ``issues_user_uri`` config value. Examples: :: :user:`sloria` Anchor text also works: :: :user:`Steven Loria <sloria>` """ options = options or {} content = content or [] has_explicit_title, title, target = split_explicit_title(text) target = utils.unescape(target).strip() title = utils.unescape(title).strip() config = inliner.document.settings.env.app.config if config.issues_user_uri: ref = config.issues_user_uri.format(user=target) else: ref = "https://github.com/{0}".format(target) if has_explicit_title: text = title else: text = "@{0}".format(target) link = nodes.reference(text=text, refuri=ref, **options) return [link], [] def cve_role(name, rawtext, text, lineno, inliner, options=None, content=None): """Sphinx role for linking to a CVE on https://cve.mitre.org. Examples: :: :cve:`CVE-2018-17175` """ options = options or {} content = content or [] has_explicit_title, title, target = split_explicit_title(text) target = utils.unescape(target).strip() title = utils.unescape(title).strip() ref = "https://cve.mitre.org/cgi-bin/cvename.cgi?name={0}".format(target) text = title if has_explicit_title else target link = nodes.reference(text=text, refuri=ref, **options) return [link], [] class IssueRole(object): EXTERNAL_REPO_REGEX = re.compile(r"^(\w+)/(.+)([#@])([\w]+)$") def __init__( self, uri_config_option, format_kwarg, github_uri_template, format_text=None ): self.uri_config_option = uri_config_option self.format_kwarg = format_kwarg self.github_uri_template = github_uri_template self.format_text = format_text or self.default_format_text @staticmethod def default_format_text(issue_no): return "#{0}".format(issue_no) def make_node(self, name, issue_no, config, options=None): name_map = {"pr": "pull", "issue": "issues", "commit": "commit"} options = options or {} repo_match = self.EXTERNAL_REPO_REGEX.match(issue_no) if repo_match: # External repo username, repo, symbol, issue = repo_match.groups() if name not in name_map: raise ValueError( "External repo linking not supported for :{}:".format(name) ) path = name_map.get(name) ref = "https://github.com/{issues_github_path}/{path}/{n}".format( issues_github_path="{}/{}".format(username, repo), path=path, n=issue ) formatted_issue = self.format_text(issue).lstrip("#") text = "{username}/{repo}{symbol}{formatted_issue}".format(**locals()) link = nodes.reference(text=text, refuri=ref, **options) return link if issue_no not in ("-", "0"): uri_template = getattr(config, self.uri_config_option, None) if uri_template: ref = uri_template.format(**{self.format_kwarg: issue_no}) elif config.issues_github_path: ref = self.github_uri_template.format( issues_github_path=config.issues_github_path, n=issue_no ) else: raise ValueError( "Neither {} nor issues_github_path is set".format( self.uri_config_option ) ) issue_text = self.format_text(issue_no) link = nodes.reference(text=issue_text, refuri=ref, **options) else: link = None return link def __call__( self, name, rawtext, text, lineno, inliner, options=None, content=None ): options = options or {} content = content or [] issue_nos = [each.strip() for each in utils.unescape(text).split(",")] config = inliner.document.settings.env.app.config ret = [] for i, issue_no in enumerate(issue_nos): node = self.make_node(name, issue_no, config, options=options) ret.append(node) if i != len(issue_nos) - 1: sep = nodes.raw(text=", ", format="html") ret.append(sep) return ret, [] """Sphinx role for linking to an issue. Must have `issues_uri` or `issues_github_path` configured in ``conf.py``. Examples: :: :issue:`123` :issue:`42,45` :issue:`sloria/konch#123` """ issue_role = IssueRole( uri_config_option="issues_uri", format_kwarg="issue", github_uri_template="https://github.com/{issues_github_path}/issues/{n}", ) """Sphinx role for linking to a pull request. Must have `issues_pr_uri` or `issues_github_path` configured in ``conf.py``. Examples: :: :pr:`123` :pr:`42,45` :pr:`sloria/konch#43` """ pr_role = IssueRole( uri_config_option="issues_pr_uri", format_kwarg="pr", github_uri_template="https://github.com/{issues_github_path}/pull/{n}", ) def format_commit_text(sha): return sha[:7] """Sphinx role for linking to a commit. Must have `issues_pr_uri` or `issues_github_path` configured in ``conf.py``. Examples: :: :commit:`123abc456def` :commit:`sloria/konch@123abc456def` """ commit_role = IssueRole( uri_config_option="issues_commit_uri", format_kwarg="commit", github_uri_template="https://github.com/{issues_github_path}/commit/{n}", format_text=format_commit_text, ) def setup(app): # Format template for issues URI # e.g. 'https://github.com/sloria/marshmallow/issues/{issue} app.add_config_value("issues_uri", default=None, rebuild="html") # Format template for PR URI # e.g. 'https://github.com/sloria/marshmallow/pull/{issue} app.add_config_value("issues_pr_uri", default=None, rebuild="html") # Format template for commit URI # e.g. 'https://github.com/sloria/marshmallow/commits/{commit} app.add_config_value("issues_commit_uri", default=None, rebuild="html") # Shortcut for Github, e.g. 'sloria/marshmallow' app.add_config_value("issues_github_path", default=None, rebuild="html") # Format template for user profile URI # e.g. 'https://github.com/{user}' app.add_config_value("issues_user_uri", default=None, rebuild="html") app.add_role("issue", issue_role) app.add_role("pr", pr_role) app.add_role("user", user_role) app.add_role("commit", commit_role) app.add_role("cve", cve_role) return { "version": __version__, "parallel_read_safe": True, "parallel_write_safe": True, }
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/doc/sphinxext/autoshortsummary.py
doc/sphinxext/autoshortsummary.py
from sphinx.ext.autodoc import ModuleLevelDocumenter class ShortSummaryDocumenter(ModuleLevelDocumenter): """An autodocumenter that only renders the short summary of the object.""" # Defines the usage: .. autoshortsummary:: {{ object }} objtype = "shortsummary" # Disable content indentation content_indent = "" # Avoid being selected as the default documenter for some objects, because we are # returning `can_document_member` as True for all objects priority = -99 @classmethod def can_document_member(cls, member, membername, isattr, parent): """Allow documenting any object.""" return True def get_object_members(self, want_all): """Document no members.""" return (False, []) def add_directive_header(self, sig): """Override default behavior to add no directive header or options.""" pass def add_content(self, more_content): """Override default behavior to add only the first line of the docstring. Modified based on the part of processing docstrings in the original implementation of this method. https://github.com/sphinx-doc/sphinx/blob/faa33a53a389f6f8bc1f6ae97d6015fa92393c4a/sphinx/ext/autodoc/__init__.py#L609-L622 """ sourcename = self.get_sourcename() docstrings = self.get_doc() if docstrings is not None: if not docstrings: docstrings.append([]) # Get the first non-empty line of the processed docstring; this could lead # to unexpected results if the object does not have a short summary line. short_summary = next( (s for s in self.process_doc(docstrings) if s), "<no summary>" ) self.add_line(short_summary, sourcename, 0) def setup(app): app.add_autodocumenter(ShortSummaryDocumenter)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/doc/sphinxext/override_pst_pagetoc.py
doc/sphinxext/override_pst_pagetoc.py
from functools import cache from sphinx.util.logging import getLogger logger = getLogger(__name__) def override_pst_pagetoc(app, pagename, templatename, context, doctree): """Overrides the `generate_toc_html` function of pydata-sphinx-theme for API.""" @cache def generate_api_toc_html(kind="html"): """Generate the in-page toc for an API page. This relies on the `generate_toc_html` function added by pydata-sphinx-theme into the context. We save the original function into `pst_generate_toc_html` and override `generate_toc_html` with this function for generated API pages. The pagetoc of an API page would look like the following: <ul class="visible ..."> <-- Unwrap <li class="toc-h1 ..."> <-- Unwrap <a class="..." href="#">{{obj}}</a> <-- Decompose <ul class="visible ..."> <li class="toc-h2 ..."> ...object <ul class="..."> <-- Set visible if exists <li class="toc-h3 ...">...method 1</li> <-- Shorten <li class="toc-h3 ...">...method 2</li> <-- Shorten ...more methods <-- Shorten </ul> </li> <li class="toc-h2 ...">...gallery examples</li> </ul> </li> <-- Unwrapped </ul> <-- Unwrapped """ soup = context["pst_generate_toc_html"](kind="soup") try: # Unwrap the outermost level soup.ul.unwrap() soup.li.unwrap() soup.a.decompose() # Get all toc-h2 level entries, where the first one should be the function # or class, and the second one, if exists, should be the examples; there # should be no more than two entries at this level for generated API pages lis = soup.ul.select("li.toc-h2") main_li = lis[0] meth_list = main_li.ul if meth_list is not None: # This is a class API page, we remove the class name from the method # names to make them better fit into the secondary sidebar; also we # make the toc-h3 level entries always visible to more easily navigate # through the methods meth_list["class"].append("visible") for meth in meth_list.find_all("li", {"class": "toc-h3"}): target = meth.a.code.span target.string = target.string.split(".", 1)[1] # This corresponds to the behavior of `generate_toc_html` return str(soup) if kind == "html" else soup except Exception as e: # Upon any failure we return the original pagetoc logger.warning( f"Failed to generate API pagetoc for {pagename}: {e}; falling back" ) return context["pst_generate_toc_html"](kind=kind) # Override the pydata-sphinx-theme implementation for generate API pages if pagename.startswith("modules/generated/"): context["pst_generate_toc_html"] = context["generate_toc_html"] context["generate_toc_html"] = generate_api_toc_html def setup(app): # Need to be triggered after `pydata_sphinx_theme.toctree.add_toctree_functions`, # and since default priority is 500 we set 900 for safety app.connect("html-page-context", override_pst_pagetoc, priority=900)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/doc/sphinxext/github_link.py
doc/sphinxext/github_link.py
import inspect import os import subprocess import sys from functools import partial from operator import attrgetter REVISION_CMD = "git rev-parse --short HEAD" def _get_git_revision(): try: revision = subprocess.check_output(REVISION_CMD.split()).strip() except (subprocess.CalledProcessError, OSError): print("Failed to execute git to get revision") return None return revision.decode("utf-8") def _linkcode_resolve(domain, info, package, url_fmt, revision): """Determine a link to online source for a class/method/function This is called by sphinx.ext.linkcode An example with a long-untouched module that everyone has >>> _linkcode_resolve('py', {'module': 'tty', ... 'fullname': 'setraw'}, ... package='tty', ... url_fmt='https://hg.python.org/cpython/file/' ... '{revision}/Lib/{package}/{path}#L{lineno}', ... revision='xxxx') 'https://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18' """ if revision is None: return if domain not in ("py", "pyx"): return if not info.get("module") or not info.get("fullname"): return class_name = info["fullname"].split(".")[0] module = __import__(info["module"], fromlist=[class_name]) obj = attrgetter(info["fullname"])(module) # Unwrap the object to get the correct source # file in case that is wrapped by a decorator obj = inspect.unwrap(obj) try: fn = inspect.getsourcefile(obj) except Exception: fn = None if not fn: try: fn = inspect.getsourcefile(sys.modules[obj.__module__]) except Exception: fn = None if not fn: return try: fn = os.path.relpath(fn, start=os.path.dirname(__import__(package).__file__)) except ValueError: return None try: lineno = inspect.getsourcelines(obj)[1] except Exception: lineno = "" return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno) def make_linkcode_resolve(package, url_fmt): """Returns a linkcode_resolve function for the given URL format revision is a git commit reference (hash or name) package is the name of the root module of the package url_fmt is along the lines of ('https://github.com/USER/PROJECT/' 'blob/{revision}/{package}/' '{path}#L{lineno}') """ revision = _get_git_revision() return partial( _linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt )
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/doc/sphinxext/doi_role.py
doc/sphinxext/doi_role.py
""" doilinks ~~~~~~~~ Extension to add links to DOIs. With this extension you can use e.g. :doi:`10.1016/S0022-2836(05)80360-2` in your documents. This will create a link to a DOI resolver (``https://doi.org/10.1016/S0022-2836(05)80360-2``). The link caption will be the raw DOI. You can also give an explicit caption, e.g. :doi:`Basic local alignment search tool <10.1016/S0022-2836(05)80360-2>`. :copyright: Copyright 2015 Jon Lund Steffensen. Based on extlinks by the Sphinx team. :license: BSD. """ from docutils import nodes, utils from sphinx.util.nodes import split_explicit_title def reference_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): text = utils.unescape(text) has_explicit_title, title, part = split_explicit_title(text) if typ in ["arXiv", "arxiv"]: full_url = "https://arxiv.org/abs/" + part if not has_explicit_title: title = "arXiv:" + part pnode = nodes.reference(title, title, internal=False, refuri=full_url) return [pnode], [] if typ in ["doi", "DOI"]: full_url = "https://doi.org/" + part if not has_explicit_title: title = "DOI:" + part pnode = nodes.reference(title, title, internal=False, refuri=full_url) return [pnode], [] def setup_link_role(app): app.add_role("arxiv", reference_role, override=True) app.add_role("arXiv", reference_role, override=True) app.add_role("doi", reference_role, override=True) app.add_role("DOI", reference_role, override=True) def setup(app): app.connect("builder-inited", setup_link_role) return {"version": "0.1", "parallel_read_safe": True}
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/doc/sphinxext/dropdown_anchors.py
doc/sphinxext/dropdown_anchors.py
import re from docutils import nodes from sphinx.transforms.post_transforms import SphinxPostTransform from sphinx_design.dropdown import dropdown_main class DropdownAnchorAdder(SphinxPostTransform): """Insert anchor links to the sphinx-design dropdowns. Some of the dropdowns were originally headers that had automatic anchors, so we need to make sure that the old anchors still work. See the original implementation (in JS): https://github.com/scikit-learn/scikit-learn/pull/27409 The anchor links are inserted at the end of the node with class "sd-summary-text" which includes only the title text part of the dropdown (no icon, markers, etc). """ default_priority = 9999 # Apply later than everything else formats = ["html"] def run(self): """Run the post transformation.""" # Counter to store the duplicated summary text to add it as a suffix in the # anchor ID anchor_id_counters = {} for sd_dropdown in self.document.findall(dropdown_main): # Grab the summary text node sd_summary_text = sd_dropdown.next_node( lambda node: "sd-summary-text" in node.get("classes", []) ) # Concatenate the text of relevant nodes as the title text title_text = "".join(node.astext() for node in sd_summary_text.children) # The ID uses the first line, lowercased, with spaces replaced by dashes; # suffix the anchor ID with a counter if it already exists anchor_id = re.sub(r"\s+", "-", title_text.strip().split("\n")[0]).lower() if anchor_id in anchor_id_counters: anchor_id_counters[anchor_id] += 1 anchor_id = f"{anchor_id}-{anchor_id_counters[anchor_id]}" else: anchor_id_counters[anchor_id] = 1 sd_dropdown["ids"].append(anchor_id) # Create the anchor element and insert after the title text; we do this # directly with raw HTML anchor_html = ( f'<a class="headerlink" href="#{anchor_id}" ' 'title="Link to this dropdown">#</a>' ) anchor_node = nodes.raw("", anchor_html, format="html") sd_summary_text.append(anchor_node) def setup(app): app.add_post_transform(DropdownAnchorAdder)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/.spin/cmds.py
.spin/cmds.py
import shutil import sys import click from spin.cmds import util @click.command() def clean(): """🪥 Clean build folder. Very rarely needed since meson-python recompiles as needed when sklearn is imported. One known use case where "spin clean" is useful: avoid compilation errors when switching from numpy<2 to numpy>=2 in the same conda environment or virtualenv. """ util.run([sys.executable, "-m", "pip", "uninstall", "scikit-learn", "-y"]) default_meson_build_dir = ( f"build/cp{sys.version_info.major}{sys.version_info.minor}" ) click.secho( f"removing default Meson build dir: {default_meson_build_dir}", bold=True, fg="bright_blue", ) shutil.rmtree(default_meson_build_dir, ignore_errors=True)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/isotonic.py
sklearn/isotonic.py
"""Isotonic regression for obtaining monotonic fit to data.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import math import warnings from numbers import Real import numpy as np from scipy import interpolate, optimize from scipy.stats import spearmanr from sklearn._isotonic import _inplace_contiguous_isotonic_regression, _make_unique from sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin, _fit_context from sklearn.utils import check_array, check_consistent_length, metadata_routing from sklearn.utils._param_validation import Interval, StrOptions, validate_params from sklearn.utils.fixes import parse_version, sp_base_version from sklearn.utils.validation import _check_sample_weight, check_is_fitted __all__ = ["IsotonicRegression", "check_increasing", "isotonic_regression"] @validate_params( { "x": ["array-like"], "y": ["array-like"], }, prefer_skip_nested_validation=True, ) def check_increasing(x, y): """Determine whether y is monotonically correlated with x. y is found increasing or decreasing with respect to x based on a Spearman correlation test. Parameters ---------- x : array-like of shape (n_samples,) Training data. y : array-like of shape (n_samples,) Training target. Returns ------- increasing_bool : boolean Whether the relationship is increasing or decreasing. Notes ----- The Spearman correlation coefficient is estimated from the data, and the sign of the resulting estimate is used as the result. In the event that the 95% confidence interval based on Fisher transform spans zero, a warning is raised. References ---------- Fisher transformation. Wikipedia. https://en.wikipedia.org/wiki/Fisher_transformation Examples -------- >>> from sklearn.isotonic import check_increasing >>> x, y = [1, 2, 3, 4, 5], [2, 4, 6, 8, 10] >>> check_increasing(x, y) np.True_ >>> y = [10, 8, 6, 4, 2] >>> check_increasing(x, y) np.False_ """ # Calculate Spearman rho estimate and set return accordingly. rho, _ = spearmanr(x, y) increasing_bool = rho >= 0 # Run Fisher transform to get the rho CI, but handle rho=+/-1 if rho not in [-1.0, 1.0] and len(x) > 3: F = 0.5 * math.log((1.0 + rho) / (1.0 - rho)) F_se = 1 / math.sqrt(len(x) - 3) # Use a 95% CI, i.e., +/-1.96 S.E. # https://en.wikipedia.org/wiki/Fisher_transformation rho_0 = math.tanh(F - 1.96 * F_se) rho_1 = math.tanh(F + 1.96 * F_se) # Warn if the CI spans zero. if np.sign(rho_0) != np.sign(rho_1): warnings.warn( "Confidence interval of the Spearman " "correlation coefficient spans zero. " "Determination of ``increasing`` may be " "suspect." ) return increasing_bool @validate_params( { "y": ["array-like"], "sample_weight": ["array-like", None], "y_min": [Interval(Real, None, None, closed="both"), None], "y_max": [Interval(Real, None, None, closed="both"), None], "increasing": ["boolean"], }, prefer_skip_nested_validation=True, ) def isotonic_regression( y, *, sample_weight=None, y_min=None, y_max=None, increasing=True ): """Solve the isotonic regression model. Read more in the :ref:`User Guide <isotonic>`. Parameters ---------- y : array-like of shape (n_samples,) The data. sample_weight : array-like of shape (n_samples,), default=None Weights on each point of the regression. If None, weight is set to 1 (equal weights). y_min : float, default=None Lower bound on the lowest predicted value (the minimum value may still be higher). If not set, defaults to -inf. y_max : float, default=None Upper bound on the highest predicted value (the maximum may still be lower). If not set, defaults to +inf. increasing : bool, default=True Whether to compute ``y_`` is increasing (if set to True) or decreasing (if set to False). Returns ------- y_ : ndarray of shape (n_samples,) Isotonic fit of y. References ---------- "Active set algorithms for isotonic regression; A unifying framework" by Michael J. Best and Nilotpal Chakravarti, section 3. Examples -------- >>> from sklearn.isotonic import isotonic_regression >>> isotonic_regression([5, 3, 1, 2, 8, 10, 7, 9, 6, 4]) array([2.75 , 2.75 , 2.75 , 2.75 , 7.33, 7.33, 7.33, 7.33, 7.33, 7.33]) """ y = check_array(y, ensure_2d=False, input_name="y", dtype=[np.float64, np.float32]) if sp_base_version >= parse_version("1.12.0"): res = optimize.isotonic_regression( y=y, weights=sample_weight, increasing=increasing ) y = np.asarray(res.x, dtype=y.dtype) else: # TODO: remove this branch when Scipy 1.12 is the minimum supported version # Also remove _inplace_contiguous_isotonic_regression. order = np.s_[:] if increasing else np.s_[::-1] y = np.array(y[order], dtype=y.dtype) sample_weight = _check_sample_weight(sample_weight, y, dtype=y.dtype, copy=True) sample_weight = np.ascontiguousarray(sample_weight[order]) _inplace_contiguous_isotonic_regression(y, sample_weight) y = y[order] if y_min is not None or y_max is not None: # Older versions of np.clip don't accept None as a bound, so use np.inf if y_min is None: y_min = -np.inf if y_max is None: y_max = np.inf np.clip(y, y_min, y_max, y) return y class IsotonicRegression(RegressorMixin, TransformerMixin, BaseEstimator): """Isotonic regression model. Read more in the :ref:`User Guide <isotonic>`. .. versionadded:: 0.13 Parameters ---------- y_min : float, default=None Lower bound on the lowest predicted value (the minimum value may still be higher). If not set, defaults to -inf. y_max : float, default=None Upper bound on the highest predicted value (the maximum may still be lower). If not set, defaults to +inf. increasing : bool or 'auto', default=True Determines whether the predictions should be constrained to increase or decrease with `X`. 'auto' will decide based on the Spearman correlation estimate's sign. out_of_bounds : {'nan', 'clip', 'raise'}, default='nan' Handles how `X` values outside of the training domain are handled during prediction. - 'nan', predictions will be NaN. - 'clip', predictions will be set to the value corresponding to the nearest train interval endpoint. - 'raise', a `ValueError` is raised. Attributes ---------- X_min_ : float Minimum value of input array `X_` for left bound. X_max_ : float Maximum value of input array `X_` for right bound. X_thresholds_ : ndarray of shape (n_thresholds,) Unique ascending `X` values used to interpolate the y = f(X) monotonic function. .. versionadded:: 0.24 y_thresholds_ : ndarray of shape (n_thresholds,) De-duplicated `y` values suitable to interpolate the y = f(X) monotonic function. .. versionadded:: 0.24 f_ : function The stepwise interpolating function that covers the input domain ``X``. increasing_ : bool Inferred value for ``increasing``. See Also -------- sklearn.linear_model.LinearRegression : Ordinary least squares Linear Regression. sklearn.ensemble.HistGradientBoostingRegressor : Gradient boosting that is a non-parametric model accepting monotonicity constraints. isotonic_regression : Function to solve the isotonic regression model. Notes ----- Ties are broken using the secondary method from de Leeuw, 1977. References ---------- Isotonic Median Regression: A Linear Programming Approach Nilotpal Chakravarti Mathematics of Operations Research Vol. 14, No. 2 (May, 1989), pp. 303-308 Isotone Optimization in R : Pool-Adjacent-Violators Algorithm (PAVA) and Active Set Methods de Leeuw, Hornik, Mair Journal of Statistical Software 2009 Correctness of Kruskal's algorithms for monotone regression with ties de Leeuw, Psychometrica, 1977 Examples -------- >>> from sklearn.datasets import make_regression >>> from sklearn.isotonic import IsotonicRegression >>> X, y = make_regression(n_samples=10, n_features=1, random_state=41) >>> iso_reg = IsotonicRegression().fit(X, y) >>> iso_reg.predict([.1, .2]) array([1.8628, 3.7256]) """ # T should have been called X __metadata_request__predict = {"T": metadata_routing.UNUSED} __metadata_request__transform = {"T": metadata_routing.UNUSED} _parameter_constraints: dict = { "y_min": [Interval(Real, None, None, closed="both"), None], "y_max": [Interval(Real, None, None, closed="both"), None], "increasing": ["boolean", StrOptions({"auto"})], "out_of_bounds": [StrOptions({"nan", "clip", "raise"})], } def __init__(self, *, y_min=None, y_max=None, increasing=True, out_of_bounds="nan"): self.y_min = y_min self.y_max = y_max self.increasing = increasing self.out_of_bounds = out_of_bounds def _check_input_data_shape(self, X): if not (X.ndim == 1 or (X.ndim == 2 and X.shape[1] == 1)): msg = ( "Isotonic regression input X should be a 1d array or " "2d array with 1 feature" ) raise ValueError(msg) def _build_f(self, X, y): """Build the f_ interp1d function.""" bounds_error = self.out_of_bounds == "raise" if len(y) == 1: # single y, constant prediction self.f_ = lambda x: y.repeat(x.shape) else: self.f_ = interpolate.interp1d( X, y, kind="linear", bounds_error=bounds_error ) def _build_y(self, X, y, sample_weight, trim_duplicates=True): """Build the y_ IsotonicRegression.""" self._check_input_data_shape(X) X = X.reshape(-1) # use 1d view # Determine increasing if auto-determination requested if self.increasing == "auto": self.increasing_ = check_increasing(X, y) else: self.increasing_ = self.increasing # If sample_weights is passed, removed zero-weight values and clean # order sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) mask = sample_weight > 0 X, y, sample_weight = X[mask], y[mask], sample_weight[mask] order = np.lexsort((y, X)) X, y, sample_weight = [array[order] for array in [X, y, sample_weight]] unique_X, unique_y, unique_sample_weight = _make_unique(X, y, sample_weight) X = unique_X y = isotonic_regression( unique_y, sample_weight=unique_sample_weight, y_min=self.y_min, y_max=self.y_max, increasing=self.increasing_, ) # Handle the left and right bounds on X self.X_min_, self.X_max_ = np.min(X), np.max(X) if trim_duplicates: # Remove unnecessary points for faster prediction keep_data = np.ones((len(y),), dtype=bool) # Aside from the 1st and last point, remove points whose y values # are equal to both the point before and the point after it. keep_data[1:-1] = np.logical_or( np.not_equal(y[1:-1], y[:-2]), np.not_equal(y[1:-1], y[2:]) ) return X[keep_data], y[keep_data] else: # The ability to turn off trim_duplicates is only used to it make # easier to unit test that removing duplicates in y does not have # any impact the resulting interpolation function (besides # prediction speed). return X, y @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y, sample_weight=None): """Fit the model using X, y as training data. Parameters ---------- X : array-like of shape (n_samples,) or (n_samples, 1) Training data. .. versionchanged:: 0.24 Also accepts 2d array with 1 feature. y : array-like of shape (n_samples,) Training target. sample_weight : array-like of shape (n_samples,), default=None Weights. If set to None, all weights will be set to 1 (equal weights). Returns ------- self : object Returns an instance of self. Notes ----- X is stored for future use, as :meth:`transform` needs X to interpolate new input data. """ check_params = dict(accept_sparse=False, ensure_2d=False) X = check_array( X, input_name="X", dtype=[np.float64, np.float32], **check_params ) y = check_array(y, input_name="y", dtype=X.dtype, **check_params) check_consistent_length(X, y, sample_weight) # Transform y by running the isotonic regression algorithm and # transform X accordingly. X, y = self._build_y(X, y, sample_weight) # It is necessary to store the non-redundant part of the training set # on the model to make it possible to support model persistence via # the pickle module as the object built by scipy.interp1d is not # picklable directly. self.X_thresholds_, self.y_thresholds_ = X, y # Build the interpolation function self._build_f(X, y) return self def _transform(self, T): """`_transform` is called by both `transform` and `predict` methods. Since `transform` is wrapped to output arrays of specific types (e.g. NumPy arrays, pandas DataFrame), we cannot make `predict` call `transform` directly. The above behaviour could be changed in the future, if we decide to output other type of arrays when calling `predict`. """ if hasattr(self, "X_thresholds_"): dtype = self.X_thresholds_.dtype else: dtype = np.float64 T = check_array(T, dtype=dtype, ensure_2d=False) self._check_input_data_shape(T) T = T.reshape(-1) # use 1d view if self.out_of_bounds == "clip": T = np.clip(T, self.X_min_, self.X_max_) res = self.f_(T) # on scipy 0.17, interp1d up-casts to float64, so we cast back res = res.astype(T.dtype) return res def transform(self, T): """Transform new data by linear interpolation. Parameters ---------- T : array-like of shape (n_samples,) or (n_samples, 1) Data to transform. .. versionchanged:: 0.24 Also accepts 2d array with 1 feature. Returns ------- y_pred : ndarray of shape (n_samples,) The transformed data. """ return self._transform(T) def predict(self, T): """Predict new data by linear interpolation. Parameters ---------- T : array-like of shape (n_samples,) or (n_samples, 1) Data to transform. Returns ------- y_pred : ndarray of shape (n_samples,) Transformed data. """ return self._transform(T) # We implement get_feature_names_out here instead of using # `ClassNamePrefixFeaturesOutMixin`` because `input_features` are ignored. # `input_features` are ignored because `IsotonicRegression` accepts 1d # arrays and the semantics of `feature_names_in_` are not clear for 1d arrays. def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Ignored. Returns ------- feature_names_out : ndarray of str objects An ndarray with one string i.e. ["isotonicregression0"]. """ check_is_fitted(self, "f_") class_name = self.__class__.__name__.lower() return np.asarray([f"{class_name}0"], dtype=object) def __getstate__(self): """Pickle-protocol - return state of the estimator.""" state = super().__getstate__() # remove interpolation method state.pop("f_", None) return state def __setstate__(self, state): """Pickle-protocol - set state of the estimator. We need to rebuild the interpolation function. """ super().__setstate__(state) if hasattr(self, "X_thresholds_") and hasattr(self, "y_thresholds_"): self._build_f(self.X_thresholds_, self.y_thresholds_) def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.one_d_array = True tags.input_tags.two_d_array = False return tags
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/discriminant_analysis.py
sklearn/discriminant_analysis.py
"""Linear and quadratic discriminant analysis.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings from numbers import Integral, Real import numpy as np import scipy.linalg from scipy import linalg from sklearn.base import ( BaseEstimator, ClassifierMixin, ClassNamePrefixFeaturesOutMixin, TransformerMixin, _fit_context, ) from sklearn.covariance import empirical_covariance, ledoit_wolf, shrunk_covariance from sklearn.linear_model._base import LinearClassifierMixin from sklearn.preprocessing import StandardScaler from sklearn.utils._array_api import _expit, device, get_namespace, size from sklearn.utils._param_validation import HasMethods, Interval, StrOptions from sklearn.utils.extmath import softmax from sklearn.utils.multiclass import check_classification_targets, unique_labels from sklearn.utils.validation import check_is_fitted, validate_data __all__ = ["LinearDiscriminantAnalysis", "QuadraticDiscriminantAnalysis"] def _cov(X, shrinkage=None, covariance_estimator=None): """Estimate covariance matrix (using optional covariance_estimator). Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. shrinkage : {'empirical', 'auto'} or float, default=None Shrinkage parameter, possible values: - None or 'empirical': no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Shrinkage parameter is ignored if `covariance_estimator` is not None. covariance_estimator : estimator, default=None If not None, `covariance_estimator` is used to estimate the covariance matrices instead of relying on the empirical covariance estimator (with potential shrinkage). The object should have a fit method and a ``covariance_`` attribute like the estimators in :mod:`sklearn.covariance``. If None the shrinkage parameter drives the estimate. .. versionadded:: 0.24 Returns ------- s : ndarray of shape (n_features, n_features) Estimated covariance matrix. """ if covariance_estimator is None: shrinkage = "empirical" if shrinkage is None else shrinkage if isinstance(shrinkage, str): if shrinkage == "auto": sc = StandardScaler() # standardize features X = sc.fit_transform(X) s = ledoit_wolf(X)[0] # rescale s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :] elif shrinkage == "empirical": s = empirical_covariance(X) elif isinstance(shrinkage, Real): s = shrunk_covariance(empirical_covariance(X), shrinkage) else: if shrinkage is not None and shrinkage != 0: raise ValueError( "covariance_estimator and shrinkage parameters " "are not None. Only one of the two can be set." ) covariance_estimator.fit(X) if not hasattr(covariance_estimator, "covariance_"): raise ValueError( "%s does not have a covariance_ attribute" % covariance_estimator.__class__.__name__ ) s = covariance_estimator.covariance_ return s def _class_means(X, y): """Compute class means. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Returns ------- means : array-like of shape (n_classes, n_features) Class means. """ xp, is_array_api_compliant = get_namespace(X) classes, y = xp.unique_inverse(y) means = xp.zeros((classes.shape[0], X.shape[1]), device=device(X), dtype=X.dtype) if is_array_api_compliant: for i in range(classes.shape[0]): means[i, :] = xp.mean(X[y == i], axis=0) else: # TODO: Explore the choice of using bincount + add.at as it seems sub optimal # from a performance-wise cnt = np.bincount(y) np.add.at(means, y, X) means /= cnt[:, None] return means def _class_cov(X, y, priors, shrinkage=None, covariance_estimator=None): """Compute weighted within-class covariance matrix. The per-class covariance are weighted by the class priors. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. priors : array-like of shape (n_classes,) Class priors. shrinkage : 'auto' or float, default=None Shrinkage parameter, possible values: - None: no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Shrinkage parameter is ignored if `covariance_estimator` is not None. covariance_estimator : estimator, default=None If not None, `covariance_estimator` is used to estimate the covariance matrices instead of relying the empirical covariance estimator (with potential shrinkage). The object should have a fit method and a ``covariance_`` attribute like the estimators in sklearn.covariance. If None, the shrinkage parameter drives the estimate. .. versionadded:: 0.24 Returns ------- cov : array-like of shape (n_features, n_features) Weighted within-class covariance matrix """ classes = np.unique(y) cov = np.zeros(shape=(X.shape[1], X.shape[1])) for idx, group in enumerate(classes): Xg = X[y == group, :] cov += priors[idx] * np.atleast_2d(_cov(Xg, shrinkage, covariance_estimator)) return cov class DiscriminantAnalysisPredictionMixin: """Mixin class for QuadraticDiscriminantAnalysis and NearestCentroid.""" def decision_function(self, X): """Apply decision function to an array of samples. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Array of samples (test vectors). Returns ------- y_scores : ndarray of shape (n_samples,) or (n_samples, n_classes) Decision function values related to each class, per sample. In the two-class case, the shape is `(n_samples,)`, giving the log likelihood ratio of the positive class. """ y_scores = self._decision_function(X) if len(self.classes_) == 2: return y_scores[:, 1] - y_scores[:, 0] return y_scores def predict(self, X): """Perform classification on an array of vectors `X`. Returns the class label for each sample. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input vectors, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- y_pred : ndarray of shape (n_samples,) Class label for each sample. """ scores = self._decision_function(X) return self.classes_.take(scores.argmax(axis=1)) def predict_proba(self, X): """Estimate class probabilities. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. Returns ------- y_proba : ndarray of shape (n_samples, n_classes) Probability estimate of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. """ return np.exp(self.predict_log_proba(X)) def predict_log_proba(self, X): """Estimate log class probabilities. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. Returns ------- y_log_proba : ndarray of shape (n_samples, n_classes) Estimated log probabilities. """ scores = self._decision_function(X) log_likelihood = scores - scores.max(axis=1)[:, np.newaxis] return log_likelihood - np.log( np.exp(log_likelihood).sum(axis=1)[:, np.newaxis] ) class LinearDiscriminantAnalysis( ClassNamePrefixFeaturesOutMixin, LinearClassifierMixin, TransformerMixin, BaseEstimator, ): """Linear Discriminant Analysis. A classifier with a linear decision boundary, generated by fitting class conditional densities to the data and using Bayes' rule. The model fits a Gaussian density to each class, assuming that all classes share the same covariance matrix. The fitted model can also be used to reduce the dimensionality of the input by projecting it to the most discriminative directions, using the `transform` method. .. versionadded:: 0.17 For a comparison between :class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis` and :class:`~sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis`, see :ref:`sphx_glr_auto_examples_classification_plot_lda_qda.py`. Read more in the :ref:`User Guide <lda_qda>`. Parameters ---------- solver : {'svd', 'lsqr', 'eigen'}, default='svd' Solver to use, possible values: - 'svd': Singular value decomposition (default). Does not compute the covariance matrix, therefore this solver is recommended for data with a large number of features. - 'lsqr': Least squares solution. Can be combined with shrinkage or custom covariance estimator. - 'eigen': Eigenvalue decomposition. Can be combined with shrinkage or custom covariance estimator. .. versionchanged:: 1.2 `solver="svd"` now has experimental Array API support. See the :ref:`Array API User Guide <array_api>` for more details. shrinkage : 'auto' or float, default=None Shrinkage parameter, possible values: - None: no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. This should be left to None if `covariance_estimator` is used. Note that shrinkage works only with 'lsqr' and 'eigen' solvers. For a usage example, see :ref:`sphx_glr_auto_examples_classification_plot_lda.py`. priors : array-like of shape (n_classes,), default=None The class prior probabilities. By default, the class proportions are inferred from the training data. n_components : int, default=None Number of components (<= min(n_classes - 1, n_features)) for dimensionality reduction. If None, will be set to min(n_classes - 1, n_features). This parameter only affects the `transform` method. For a usage example, see :ref:`sphx_glr_auto_examples_decomposition_plot_pca_vs_lda.py`. store_covariance : bool, default=False If True, explicitly compute the weighted within-class covariance matrix when solver is 'svd'. The matrix is always computed and stored for the other solvers. .. versionadded:: 0.17 tol : float, default=1.0e-4 Absolute threshold for a singular value of X to be considered significant, used to estimate the rank of X. Dimensions whose singular values are non-significant are discarded. Only used if solver is 'svd'. .. versionadded:: 0.17 covariance_estimator : covariance estimator, default=None If not None, `covariance_estimator` is used to estimate the covariance matrices instead of relying on the empirical covariance estimator (with potential shrinkage). The object should have a fit method and a ``covariance_`` attribute like the estimators in :mod:`sklearn.covariance`. if None the shrinkage parameter drives the estimate. This should be left to None if `shrinkage` is used. Note that `covariance_estimator` works only with 'lsqr' and 'eigen' solvers. .. versionadded:: 0.24 Attributes ---------- coef_ : ndarray of shape (n_features,) or (n_classes, n_features) Weight vector(s). intercept_ : ndarray of shape (n_classes,) Intercept term. covariance_ : array-like of shape (n_features, n_features) Weighted within-class covariance matrix. It corresponds to `sum_k prior_k * C_k` where `C_k` is the covariance matrix of the samples in class `k`. The `C_k` are estimated using the (potentially shrunk) biased estimator of covariance. If solver is 'svd', only exists when `store_covariance` is True. explained_variance_ratio_ : ndarray of shape (n_components,) Percentage of variance explained by each of the selected components. If ``n_components`` is not set then all components are stored and the sum of explained variances is equal to 1.0. Only available when eigen or svd solver is used. means_ : array-like of shape (n_classes, n_features) Class-wise means. priors_ : array-like of shape (n_classes,) Class priors (sum to 1). scalings_ : array-like of shape (rank, n_classes - 1) Scaling of the features in the space spanned by the class centroids. Only available for 'svd' and 'eigen' solvers. xbar_ : array-like of shape (n_features,) Overall mean. Only present if solver is 'svd'. classes_ : array-like of shape (n_classes,) Unique class labels. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- QuadraticDiscriminantAnalysis : Quadratic Discriminant Analysis. Examples -------- >>> import numpy as np >>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> clf = LinearDiscriminantAnalysis() >>> clf.fit(X, y) LinearDiscriminantAnalysis() >>> print(clf.predict([[-0.8, -1]])) [1] """ _parameter_constraints: dict = { "solver": [StrOptions({"svd", "lsqr", "eigen"})], "shrinkage": [StrOptions({"auto"}), Interval(Real, 0, 1, closed="both"), None], "n_components": [Interval(Integral, 1, None, closed="left"), None], "priors": ["array-like", None], "store_covariance": ["boolean"], "tol": [Interval(Real, 0, None, closed="left")], "covariance_estimator": [HasMethods("fit"), None], } def __init__( self, solver="svd", shrinkage=None, priors=None, n_components=None, store_covariance=False, tol=1e-4, covariance_estimator=None, ): self.solver = solver self.shrinkage = shrinkage self.priors = priors self.n_components = n_components self.store_covariance = store_covariance # used only in svd solver self.tol = tol # used only in svd solver self.covariance_estimator = covariance_estimator def _solve_lstsq(self, X, y, shrinkage, covariance_estimator): """Least squares solver. The least squares solver computes a straightforward solution of the optimal decision rule based directly on the discriminant functions. It can only be used for classification (with any covariance estimator), because estimation of eigenvectors is not performed. Therefore, dimensionality reduction with the transform is not supported. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_classes) Target values. shrinkage : 'auto', float or None Shrinkage parameter, possible values: - None: no shrinkage. - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Shrinkage parameter is ignored if `covariance_estimator` is not None covariance_estimator : estimator, default=None If not None, `covariance_estimator` is used to estimate the covariance matrices instead of relying the empirical covariance estimator (with potential shrinkage). The object should have a fit method and a ``covariance_`` attribute like the estimators in sklearn.covariance. if None the shrinkage parameter drives the estimate. .. versionadded:: 0.24 Notes ----- This solver is based on [1]_, section 2.6.2, pp. 39-41. References ---------- .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN 0-471-05669-3. """ self.means_ = _class_means(X, y) self.covariance_ = _class_cov( X, y, self.priors_, shrinkage, covariance_estimator ) self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log( self.priors_ ) def _solve_eigen(self, X, y, shrinkage, covariance_estimator): """Eigenvalue solver. The eigenvalue solver computes the optimal solution of the Rayleigh coefficient (basically the ratio of between class scatter to within class scatter). This solver supports both classification and dimensionality reduction (with any covariance estimator). Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. shrinkage : 'auto', float or None Shrinkage parameter, possible values: - None: no shrinkage. - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage constant. Shrinkage parameter is ignored if `covariance_estimator` is not None covariance_estimator : estimator, default=None If not None, `covariance_estimator` is used to estimate the covariance matrices instead of relying the empirical covariance estimator (with potential shrinkage). The object should have a fit method and a ``covariance_`` attribute like the estimators in sklearn.covariance. if None the shrinkage parameter drives the estimate. .. versionadded:: 0.24 Notes ----- This solver is based on [1]_, section 3.8.3, pp. 121-124. References ---------- .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN 0-471-05669-3. """ self.means_ = _class_means(X, y) self.covariance_ = _class_cov( X, y, self.priors_, shrinkage, covariance_estimator ) Sw = self.covariance_ # within scatter St = _cov(X, shrinkage, covariance_estimator) # total scatter Sb = St - Sw # between scatter evals, evecs = linalg.eigh(Sb, Sw) self.explained_variance_ratio_ = np.sort(evals / np.sum(evals))[::-1][ : self._max_components ] evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors self.scalings_ = evecs self.coef_ = np.dot(self.means_, evecs).dot(evecs.T) self.intercept_ = -0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log( self.priors_ ) def _solve_svd(self, X, y): """SVD solver. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. """ xp, is_array_api_compliant = get_namespace(X) if is_array_api_compliant: svd = xp.linalg.svd else: svd = scipy.linalg.svd n_samples, _ = X.shape n_classes = self.classes_.shape[0] self.means_ = _class_means(X, y) if self.store_covariance: self.covariance_ = _class_cov(X, y, self.priors_) Xc = [] for idx, group in enumerate(self.classes_): Xg = X[y == group] Xc.append(Xg - self.means_[idx, :]) self.xbar_ = self.priors_ @ self.means_ Xc = xp.concat(Xc, axis=0) # 1) within (univariate) scaling by with classes std-dev std = xp.std(Xc, axis=0) # avoid division by zero in normalization std[std == 0] = 1.0 fac = xp.asarray(1.0 / (n_samples - n_classes), dtype=X.dtype, device=device(X)) # 2) Within variance scaling X = xp.sqrt(fac) * (Xc / std) # SVD of centered (within)scaled data _, S, Vt = svd(X, full_matrices=False) rank = xp.sum(xp.astype(S > self.tol, xp.int32)) # Scaling of within covariance is: V' 1/S scalings = (Vt[:rank, :] / std).T / S[:rank] fac = 1.0 if n_classes == 1 else 1.0 / (n_classes - 1) # 3) Between variance scaling # Scale weighted centers X = ( (xp.sqrt((n_samples * self.priors_) * fac)) * (self.means_ - self.xbar_).T ).T @ scalings # Centers are living in a space with n_classes-1 dim (maximum) # Use SVD to find projection in the space spanned by the # (n_classes) centers _, S, Vt = svd(X, full_matrices=False) if self._max_components == 0: self.explained_variance_ratio_ = xp.empty((0,), dtype=S.dtype) else: self.explained_variance_ratio_ = (S**2 / xp.sum(S**2))[ : self._max_components ] rank = xp.sum(xp.astype(S > self.tol * S[0], xp.int32)) self.scalings_ = scalings @ Vt.T[:, :rank] coef = (self.means_ - self.xbar_) @ self.scalings_ self.intercept_ = -0.5 * xp.sum(coef**2, axis=1) + xp.log(self.priors_) self.coef_ = coef @ self.scalings_.T self.intercept_ -= self.xbar_ @ self.coef_.T @_fit_context( # LinearDiscriminantAnalysis.covariance_estimator is not validated yet prefer_skip_nested_validation=False ) def fit(self, X, y): """Fit the Linear Discriminant Analysis model. .. versionchanged:: 0.19 `store_covariance` and `tol` has been moved to main constructor. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. Returns ------- self : object Fitted estimator. """ xp, _ = get_namespace(X) X, y = validate_data( self, X, y, ensure_min_samples=2, dtype=[xp.float64, xp.float32] ) self.classes_ = unique_labels(y) n_samples, n_features = X.shape n_classes = self.classes_.shape[0] if n_samples == n_classes: raise ValueError( "The number of samples must be more than the number of classes." ) if self.priors is None: # estimate priors from sample _, cnts = xp.unique_counts(y) # non-negative ints self.priors_ = xp.astype(cnts, X.dtype) / float(n_samples) else: self.priors_ = xp.asarray(self.priors, dtype=X.dtype) if xp.any(self.priors_ < 0): raise ValueError("priors must be non-negative") if xp.abs(xp.sum(self.priors_) - 1.0) > 1e-5: warnings.warn("The priors do not sum to 1. Renormalizing", UserWarning) self.priors_ = self.priors_ / self.priors_.sum() # Maximum number of components no matter what n_components is # specified: max_components = min(n_classes - 1, n_features) if self.n_components is None: self._max_components = max_components else: if self.n_components > max_components: raise ValueError( "n_components cannot be larger than min(n_features, n_classes - 1)." ) self._max_components = self.n_components if self.solver == "svd": if self.shrinkage is not None: raise NotImplementedError("shrinkage not supported with 'svd' solver.") if self.covariance_estimator is not None: raise ValueError( "covariance estimator " "is not supported " "with svd solver. Try another solver" ) self._solve_svd(X, y) elif self.solver == "lsqr": self._solve_lstsq( X, y, shrinkage=self.shrinkage, covariance_estimator=self.covariance_estimator, ) elif self.solver == "eigen": self._solve_eigen( X, y, shrinkage=self.shrinkage, covariance_estimator=self.covariance_estimator, ) if size(self.classes_) == 2: # treat binary case as a special case coef_ = xp.asarray(self.coef_[1, :] - self.coef_[0, :], dtype=X.dtype) self.coef_ = xp.reshape(coef_, (1, -1)) intercept_ = xp.asarray( self.intercept_[1] - self.intercept_[0], dtype=X.dtype ) self.intercept_ = xp.reshape(intercept_, (1,)) self._n_features_out = self._max_components return self def transform(self, X): """Project data to maximize class separation. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Returns ------- X_new : ndarray of shape (n_samples, n_components) or \ (n_samples, min(rank, n_components)) Transformed data. In the case of the 'svd' solver, the shape is (n_samples, min(rank, n_components)). """ if self.solver == "lsqr": raise NotImplementedError( "transform not implemented for 'lsqr' solver (use 'svd' or 'eigen')." ) check_is_fitted(self) X = validate_data(self, X, reset=False) if self.solver == "svd": X_new = (X - self.xbar_) @ self.scalings_ elif self.solver == "eigen": X_new = X @ self.scalings_ return X_new[:, : self._max_components] def predict_proba(self, X): """Estimate probability. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Returns ------- C : ndarray of shape (n_samples, n_classes) Estimated probabilities. """ check_is_fitted(self) xp, _ = get_namespace(X) decision = self.decision_function(X) if size(self.classes_) == 2: proba = _expit(decision, xp) return xp.stack([1 - proba, proba], axis=1) else: return softmax(decision) def predict_log_proba(self, X): """Estimate log probability. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Returns ------- C : ndarray of shape (n_samples, n_classes) Estimated log probabilities. """ xp, _ = get_namespace(X) prediction = self.predict_proba(X) smallest_normal = xp.finfo(prediction.dtype).smallest_normal prediction[prediction == 0.0] += smallest_normal return xp.log(prediction) def decision_function(self, X): """Apply decision function to an array of samples. The decision function is equal (up to a constant factor) to the log-posterior of the model, i.e. `log p(y = k | x)`. In a binary classification setting this instead corresponds to the difference `log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`. Parameters ---------- X : array-like of shape (n_samples, n_features) Array of samples (test vectors). Returns ------- y_scores : ndarray of shape (n_samples,) or (n_samples, n_classes) Decision function values related to each class, per sample. In the two-class case, the shape is `(n_samples,)`, giving the log likelihood ratio of the positive class. """ # Only overrides for the docstring. return super().decision_function(X) def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.array_api_support = True return tags class QuadraticDiscriminantAnalysis( DiscriminantAnalysisPredictionMixin, ClassifierMixin, BaseEstimator ): """Quadratic Discriminant Analysis. A classifier with a quadratic decision boundary, generated by fitting class conditional densities to the data and using Bayes' rule. The model fits a Gaussian density to each class. .. versionadded:: 0.17 For a comparison between :class:`~sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis` and :class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis`, see :ref:`sphx_glr_auto_examples_classification_plot_lda_qda.py`. Read more in the :ref:`User Guide <lda_qda>`. Parameters ---------- solver : {'svd', 'eigen'}, default='svd' Solver to use, possible values: - 'svd': Singular value decomposition (default). Does not compute the covariance matrix, therefore this solver is recommended for data with a large number of features. - 'eigen': Eigenvalue decomposition. Can be combined with shrinkage or custom covariance estimator. shrinkage : 'auto' or float, default=None Shrinkage parameter, possible values: - None: no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Enabling shrinkage is expected to improve the model when some classes have a relatively small number of training data points compared to the number of features by mitigating overfitting during the covariance estimation step. This should be left to `None` if `covariance_estimator` is used. Note that shrinkage works only with 'eigen' solver. priors : array-like of shape (n_classes,), default=None Class priors. By default, the class proportions are inferred from the training data. reg_param : float, default=0.0 Regularizes the per-class covariance estimates by transforming S2 as ``S2 = (1 - reg_param) * S2 + reg_param * np.eye(n_features)``, where S2 corresponds to the `scaling_` attribute of a given class. store_covariance : bool, default=False If True, the class covariance matrices are explicitly computed and stored in the `self.covariance_` attribute. .. versionadded:: 0.17 tol : float, default=1.0e-4 Absolute threshold for the covariance matrix to be considered rank deficient after applying some regularization (see `reg_param`) to each `Sk` where `Sk` represents covariance matrix for k-th class. This parameter does not affect the predictions. It controls when a warning is raised if the covariance matrix is not full rank. .. versionadded:: 0.17 covariance_estimator : covariance estimator, default=None If not None, `covariance_estimator` is used to estimate the covariance
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/calibration.py
sklearn/calibration.py
"""Methods for calibrating predicted probabilities.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings from functools import partial from inspect import signature from math import log from numbers import Integral, Real import numpy as np from scipy.optimize import minimize, minimize_scalar from scipy.special import expit from sklearn._loss import HalfBinomialLoss, HalfMultinomialLoss from sklearn.base import ( BaseEstimator, ClassifierMixin, MetaEstimatorMixin, RegressorMixin, _fit_context, clone, ) from sklearn.externals import array_api_extra as xpx from sklearn.frozen import FrozenEstimator from sklearn.isotonic import IsotonicRegression from sklearn.model_selection import LeaveOneOut, check_cv, cross_val_predict from sklearn.preprocessing import LabelEncoder, label_binarize from sklearn.svm import LinearSVC from sklearn.utils import Bunch, _safe_indexing, column_or_1d, get_tags, indexable from sklearn.utils._array_api import ( _convert_to_numpy, _half_multinomial_loss, _is_numpy_namespace, get_namespace, get_namespace_and_device, move_to, ) from sklearn.utils._param_validation import ( HasMethods, Interval, StrOptions, validate_params, ) from sklearn.utils._plotting import ( _BinaryClassifierCurveDisplayMixin, _validate_style_kwargs, ) from sklearn.utils._response import _get_response_values, _process_predict_proba from sklearn.utils.extmath import softmax from sklearn.utils.metadata_routing import ( MetadataRouter, MethodMapping, _routing_enabled, process_routing, ) from sklearn.utils.multiclass import check_classification_targets from sklearn.utils.parallel import Parallel, delayed from sklearn.utils.validation import ( _check_method_params, _check_pos_label_consistency, _check_response_method, _check_sample_weight, _num_samples, check_array, check_consistent_length, check_is_fitted, ) class CalibratedClassifierCV(ClassifierMixin, MetaEstimatorMixin, BaseEstimator): """Calibrate probabilities using isotonic, sigmoid, or temperature scaling. This class uses cross-validation to both estimate the parameters of a classifier and subsequently calibrate a classifier. With `ensemble=True`, for each cv split it fits a copy of the base estimator to the training subset, and calibrates it using the testing subset. For prediction, predicted probabilities are averaged across these individual calibrated classifiers. When `ensemble=False`, cross-validation is used to obtain unbiased predictions, via :func:`~sklearn.model_selection.cross_val_predict`, which are then used for calibration. For prediction, the base estimator, trained using all the data, is used. This is the prediction method implemented when `probabilities=True` for :class:`~sklearn.svm.SVC` and :class:`~sklearn.svm.NuSVC` estimators (see :ref:`User Guide <scores_probabilities>` for details). Already fitted classifiers can be calibrated by wrapping the model in a :class:`~sklearn.frozen.FrozenEstimator`. In this case all provided data is used for calibration. The user has to take care manually that data for model fitting and calibration are disjoint. The calibration is based on the :term:`decision_function` method of the `estimator` if it exists, else on :term:`predict_proba`. Read more in the :ref:`User Guide <calibration>`. In order to learn more on the CalibratedClassifierCV class, see the following calibration examples: :ref:`sphx_glr_auto_examples_calibration_plot_calibration.py`, :ref:`sphx_glr_auto_examples_calibration_plot_calibration_curve.py`, and :ref:`sphx_glr_auto_examples_calibration_plot_calibration_multiclass.py`. Parameters ---------- estimator : estimator instance, default=None The classifier whose output need to be calibrated to provide more accurate `predict_proba` outputs. The default classifier is a :class:`~sklearn.svm.LinearSVC`. .. versionadded:: 1.2 method : {'sigmoid', 'isotonic', 'temperature'}, default='sigmoid' The method to use for calibration. Can be: - 'sigmoid', which corresponds to Platt's method (i.e. a binary logistic regression model). - 'isotonic', which is a non-parametric approach. - 'temperature', temperature scaling. Sigmoid and isotonic calibration methods natively support only binary classifiers and extend to multi-class classification using a One-vs-Rest (OvR) strategy with post-hoc renormalization, i.e., adjusting the probabilities after calibration to ensure they sum up to 1. In contrast, temperature scaling naturally supports multi-class calibration by applying `softmax(classifier_logits/T)` with a value of `T` (temperature) that optimizes the log loss. For very uncalibrated classifiers on very imbalanced datasets, sigmoid calibration might be preferred because it fits an additional intercept parameter. This helps shift decision boundaries appropriately when the classifier being calibrated is biased towards the majority class. Isotonic calibration is not recommended when the number of calibration samples is too low ``(≪1000)`` since it then tends to overfit. .. versionchanged:: 1.8 Added option 'temperature'. cv : int, cross-validation generator, or iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross-validation, - integer, to specify the number of folds. - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if ``y`` is binary or multiclass, :class:`~sklearn.model_selection.StratifiedKFold` is used. If ``y`` is neither binary nor multiclass, :class:`~sklearn.model_selection.KFold` is used. Refer to the :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. n_jobs : int, default=None Number of jobs to run in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. Base estimator clones are fitted in parallel across cross-validation iterations. See :term:`Glossary <n_jobs>` for more details. .. versionadded:: 0.24 ensemble : bool, or "auto", default="auto" Determines how the calibrator is fitted. "auto" will use `False` if the `estimator` is a :class:`~sklearn.frozen.FrozenEstimator`, and `True` otherwise. If `True`, the `estimator` is fitted using training data, and calibrated using testing data, for each `cv` fold. The final estimator is an ensemble of `n_cv` fitted classifier and calibrator pairs, where `n_cv` is the number of cross-validation folds. The output is the average predicted probabilities of all pairs. If `False`, `cv` is used to compute unbiased predictions, via :func:`~sklearn.model_selection.cross_val_predict`, which are then used for calibration. At prediction time, the classifier used is the `estimator` trained on all the data. Note that this method is also internally implemented in :mod:`sklearn.svm` estimators with the `probabilities=True` parameter. .. versionadded:: 0.24 .. versionchanged:: 1.6 `"auto"` option is added and is the default. Attributes ---------- classes_ : ndarray of shape (n_classes,) The class labels. n_features_in_ : int Number of features seen during :term:`fit`. Only defined if the underlying estimator exposes such an attribute when fit. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Only defined if the underlying estimator exposes such an attribute when fit. .. versionadded:: 1.0 calibrated_classifiers_ : list (len() equal to cv or 1 if `ensemble=False`) The list of classifier and calibrator pairs. - When `ensemble=True`, `n_cv` fitted `estimator` and calibrator pairs. `n_cv` is the number of cross-validation folds. - When `ensemble=False`, the `estimator`, fitted on all the data, and fitted calibrator. .. versionchanged:: 0.24 Single calibrated classifier case when `ensemble=False`. See Also -------- calibration_curve : Compute true and predicted probabilities for a calibration curve. References ---------- .. [1] B. Zadrozny & C. Elkan. `Obtaining calibrated probability estimates from decision trees and naive Bayesian classifiers <https://cseweb.ucsd.edu/~elkan/calibrated.pdf>`_, ICML 2001. .. [2] B. Zadrozny & C. Elkan. `Transforming Classifier Scores into Accurate Multiclass Probability Estimates <https://web.archive.org/web/20060720141520id_/http://www.research.ibm.com:80/people/z/zadrozny/kdd2002-Transf.pdf>`_, KDD 2002. .. [3] J. Platt. `Probabilistic Outputs for Support Vector Machines and Comparisons to Regularized Likelihood Methods <https://www.researchgate.net/profile/John-Platt-2/publication/2594015_Probabilistic_Outputs_for_Support_Vector_Machines_and_Comparisons_to_Regularized_Likelihood_Methods/links/004635154cff5262d6000000/Probabilistic-Outputs-for-Support-Vector-Machines-and-Comparisons-to-Regularized-Likelihood-Methods.pdf>`_, 1999. .. [4] A. Niculescu-Mizil & R. Caruana. `Predicting Good Probabilities with Supervised Learning <https://www.cs.cornell.edu/~alexn/papers/calibration.icml05.crc.rev3.pdf>`_, ICML 2005. .. [5] Chuan Guo, Geoff Pleiss, Yu Sun, Kilian Q. Weinberger. :doi:`On Calibration of Modern Neural Networks<10.48550/arXiv.1706.04599>`. Proceedings of the 34th International Conference on Machine Learning, PMLR 70:1321-1330, 2017. Examples -------- >>> from sklearn.datasets import make_classification >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.calibration import CalibratedClassifierCV >>> X, y = make_classification(n_samples=100, n_features=2, ... n_redundant=0, random_state=42) >>> base_clf = GaussianNB() >>> calibrated_clf = CalibratedClassifierCV(base_clf, cv=3) >>> calibrated_clf.fit(X, y) CalibratedClassifierCV(...) >>> len(calibrated_clf.calibrated_classifiers_) 3 >>> calibrated_clf.predict_proba(X)[:5, :] array([[0.110, 0.889], [0.072, 0.927], [0.928, 0.072], [0.928, 0.072], [0.072, 0.928]]) >>> from sklearn.model_selection import train_test_split >>> X, y = make_classification(n_samples=100, n_features=2, ... n_redundant=0, random_state=42) >>> X_train, X_calib, y_train, y_calib = train_test_split( ... X, y, random_state=42 ... ) >>> base_clf = GaussianNB() >>> base_clf.fit(X_train, y_train) GaussianNB() >>> from sklearn.frozen import FrozenEstimator >>> calibrated_clf = CalibratedClassifierCV(FrozenEstimator(base_clf)) >>> calibrated_clf.fit(X_calib, y_calib) CalibratedClassifierCV(...) >>> len(calibrated_clf.calibrated_classifiers_) 1 >>> calibrated_clf.predict_proba([[-0.5, 0.5]]) array([[0.936, 0.063]]) """ _parameter_constraints: dict = { "estimator": [ HasMethods(["fit", "predict_proba"]), HasMethods(["fit", "decision_function"]), None, ], "method": [StrOptions({"isotonic", "sigmoid", "temperature"})], "cv": ["cv_object"], "n_jobs": [Integral, None], "ensemble": ["boolean", StrOptions({"auto"})], } def __init__( self, estimator=None, *, method="sigmoid", cv=None, n_jobs=None, ensemble="auto", ): self.estimator = estimator self.method = method self.cv = cv self.n_jobs = n_jobs self.ensemble = ensemble def _get_estimator(self): """Resolve which estimator to return (default is LinearSVC)""" if self.estimator is None: # we want all classifiers that don't expose a random_state # to be deterministic (and we don't want to expose this one). estimator = LinearSVC(random_state=0) if _routing_enabled(): estimator.set_fit_request(sample_weight=True) else: estimator = self.estimator return estimator @_fit_context( # CalibratedClassifierCV.estimator is not validated yet prefer_skip_nested_validation=False ) def fit(self, X, y, sample_weight=None, **fit_params): """Fit the calibrated model. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. **fit_params : dict Parameters to pass to the `fit` method of the underlying classifier. Returns ------- self : object Returns an instance of self. """ check_classification_targets(y) X, y = indexable(X, y) estimator = self._get_estimator() _ensemble = self.ensemble if _ensemble == "auto": _ensemble = not isinstance(estimator, FrozenEstimator) self.calibrated_classifiers_ = [] # Set `classes_` using all `y` label_encoder_ = LabelEncoder().fit(y) self.classes_ = label_encoder_.classes_ if self.method == "temperature" and isinstance(y[0], str): # for temperature scaling if `y` contains strings then encode it # right here to avoid fitting LabelEncoder again within the # `_fit_calibrator` function. y = label_encoder_.transform(y=y) if _routing_enabled(): routed_params = process_routing( self, "fit", sample_weight=sample_weight, **fit_params, ) else: # sample_weight checks fit_parameters = signature(estimator.fit).parameters supports_sw = "sample_weight" in fit_parameters if sample_weight is not None and not supports_sw: estimator_name = type(estimator).__name__ warnings.warn( f"Since {estimator_name} does not appear to accept" " sample_weight, sample weights will only be used for the" " calibration itself. This can be caused by a limitation of" " the current scikit-learn API. See the following issue for" " more details:" " https://github.com/scikit-learn/scikit-learn/issues/21134." " Be warned that the result of the calibration is likely to be" " incorrect." ) routed_params = Bunch() routed_params.splitter = Bunch(split={}) # no routing for splitter routed_params.estimator = Bunch(fit=fit_params) if sample_weight is not None and supports_sw: routed_params.estimator.fit["sample_weight"] = sample_weight xp, is_array_api, device_ = get_namespace_and_device(X) if is_array_api: y, sample_weight = move_to(y, sample_weight, xp=xp, device=device_) # Check that each cross-validation fold can have at least one # example per class if isinstance(self.cv, int): n_folds = self.cv elif hasattr(self.cv, "n_splits"): n_folds = self.cv.n_splits else: n_folds = None if n_folds and xp.any(xp.unique_counts(y)[1] < n_folds): raise ValueError( f"Requesting {n_folds}-fold " "cross-validation but provided less than " f"{n_folds} examples for at least one class." ) if isinstance(self.cv, LeaveOneOut): raise ValueError( "LeaveOneOut cross-validation does not allow" "all classes to be present in test splits. " "Please use a cross-validation generator that allows " "all classes to appear in every test and train split." ) cv = check_cv(self.cv, y, classifier=True) if _ensemble: parallel = Parallel(n_jobs=self.n_jobs) self.calibrated_classifiers_ = parallel( delayed(_fit_classifier_calibrator_pair)( clone(estimator), X, y, train=train, test=test, method=self.method, classes=self.classes_, xp=xp, sample_weight=sample_weight, fit_params=routed_params.estimator.fit, ) for train, test in cv.split(X, y, **routed_params.splitter.split) ) else: this_estimator = clone(estimator) method_name = _check_response_method( this_estimator, ["decision_function", "predict_proba"], ).__name__ predictions = cross_val_predict( estimator=this_estimator, X=X, y=y, cv=cv, method=method_name, n_jobs=self.n_jobs, params=routed_params.estimator.fit, ) if self.classes_.shape[0] == 2: # Ensure shape (n_samples, 1) in the binary case if method_name == "predict_proba": # Select the probability column of the positive class predictions = _process_predict_proba( y_pred=predictions, target_type="binary", classes=self.classes_, pos_label=self.classes_[1], ) predictions = predictions.reshape(-1, 1) if sample_weight is not None: # Check that the sample_weight dtype is consistent with the # predictions to avoid unintentional upcasts. sample_weight = _check_sample_weight( sample_weight, predictions, dtype=predictions.dtype ) this_estimator.fit(X, y, **routed_params.estimator.fit) # Note: Here we don't pass on fit_params because the supported # calibrators don't support fit_params anyway calibrated_classifier = _fit_calibrator( this_estimator, predictions, y, self.classes_, self.method, xp=xp, sample_weight=sample_weight, ) self.calibrated_classifiers_.append(calibrated_classifier) first_clf = self.calibrated_classifiers_[0].estimator if hasattr(first_clf, "n_features_in_"): self.n_features_in_ = first_clf.n_features_in_ if hasattr(first_clf, "feature_names_in_"): self.feature_names_in_ = first_clf.feature_names_in_ return self def predict_proba(self, X): """Calibrated probabilities of classification. This function returns calibrated probabilities of classification according to each class on an array of test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) The samples, as accepted by `estimator.predict_proba`. Returns ------- C : ndarray of shape (n_samples, n_classes) The predicted probas. """ check_is_fitted(self) # Compute the arithmetic mean of the predictions of the calibrated # classifiers xp, _, device_ = get_namespace_and_device(X) mean_proba = xp.zeros((_num_samples(X), self.classes_.shape[0]), device=device_) for calibrated_classifier in self.calibrated_classifiers_: proba = calibrated_classifier.predict_proba(X) mean_proba += proba mean_proba /= len(self.calibrated_classifiers_) return mean_proba def predict(self, X): """Predict the target of new samples. The predicted class is the class that has the highest probability, and can thus be different from the prediction of the uncalibrated classifier. Parameters ---------- X : array-like of shape (n_samples, n_features) The samples, as accepted by `estimator.predict`. Returns ------- C : ndarray of shape (n_samples,) The predicted class. """ xp, _ = get_namespace(X) check_is_fitted(self) class_indices = xp.argmax(self.predict_proba(X), axis=1) if isinstance(self.classes_[0], str): class_indices = _convert_to_numpy(class_indices, xp=xp) return self.classes_[class_indices] def get_metadata_routing(self): """Get metadata routing of this object. Please check :ref:`User Guide <metadata_routing>` on how the routing mechanism works. Returns ------- routing : MetadataRouter A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating routing information. """ router = ( MetadataRouter(owner=self) .add_self_request(self) .add( estimator=self._get_estimator(), method_mapping=MethodMapping().add(caller="fit", callee="fit"), ) .add( splitter=self.cv, method_mapping=MethodMapping().add(caller="fit", callee="split"), ) ) return router def __sklearn_tags__(self): tags = super().__sklearn_tags__() estimator_tags = get_tags(self._get_estimator()) tags.input_tags.sparse = estimator_tags.input_tags.sparse tags.array_api_support = ( estimator_tags.array_api_support and self.method == "temperature" ) return tags def _fit_classifier_calibrator_pair( estimator, X, y, train, test, method, classes, xp, sample_weight=None, fit_params=None, ): """Fit a classifier/calibration pair on a given train/test split. Fit the classifier on the train set, compute its predictions on the test set and use the predictions as input to fit the calibrator along with the test labels. Parameters ---------- estimator : estimator instance Cloned base estimator. X : array-like, shape (n_samples, n_features) Sample data. y : array-like, shape (n_samples,) Targets. train : ndarray, shape (n_train_indices,) Indices of the training subset. test : ndarray, shape (n_test_indices,) Indices of the testing subset. method : {'sigmoid', 'isotonic', 'temperature'} Method to use for calibration. classes : ndarray, shape (n_classes,) The target classes. xp : namespace Array API namespace. sample_weight : array-like, default=None Sample weights for `X`. fit_params : dict, default=None Parameters to pass to the `fit` method of the underlying classifier. Returns ------- calibrated_classifier : _CalibratedClassifier instance """ fit_params_train = _check_method_params(X, params=fit_params, indices=train) X_train, y_train = _safe_indexing(X, train), _safe_indexing(y, train) X_test, y_test = _safe_indexing(X, test), _safe_indexing(y, test) estimator.fit(X_train, y_train, **fit_params_train) predictions, _ = _get_response_values( estimator, X_test, response_method=["decision_function", "predict_proba"], ) if predictions.ndim == 1: # Reshape binary output from `(n_samples,)` to `(n_samples, 1)` predictions = predictions.reshape(-1, 1) if sample_weight is not None: # Check that the sample_weight dtype is consistent with the predictions # to avoid unintentional upcasts. sample_weight = _check_sample_weight(sample_weight, X, dtype=predictions.dtype) sw_test = _safe_indexing(sample_weight, test) else: sw_test = None calibrated_classifier = _fit_calibrator( estimator, predictions, y_test, classes, method, xp=xp, sample_weight=sw_test, ) return calibrated_classifier def _fit_calibrator(clf, predictions, y, classes, method, xp, sample_weight=None): """Fit calibrator(s) and return a `_CalibratedClassifier` instance. A separate calibrator is fitted for each of the `n_classes` (i.e. `len(clf.classes_)`). However, if `n_classes` is 2 or if `method` is 'temperature', only one calibrator is fitted. Parameters ---------- clf : estimator instance Fitted classifier. predictions : array-like, shape (n_samples, n_classes) or (n_samples, 1) \ when binary. Raw predictions returned by the un-calibrated base classifier. y : array-like, shape (n_samples,) The targets. For `method="temperature"`, `y` needs to be label encoded. classes : ndarray, shape (n_classes,) All the prediction classes. method : {'sigmoid', 'isotonic', 'temperature'} The method to use for calibration. xp : namespace Array API namespace. sample_weight : ndarray, shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Returns ------- pipeline : _CalibratedClassifier instance """ calibrators = [] if method in ("isotonic", "sigmoid"): Y = label_binarize(y, classes=classes) label_encoder = LabelEncoder().fit(classes) pos_class_indices = label_encoder.transform(clf.classes_) for class_idx, this_pred in zip(pos_class_indices, predictions.T): if method == "isotonic": calibrator = IsotonicRegression(out_of_bounds="clip") else: # "sigmoid" calibrator = _SigmoidCalibration() calibrator.fit(this_pred, Y[:, class_idx], sample_weight) calibrators.append(calibrator) elif method == "temperature": if classes.shape[0] == 2 and predictions.shape[-1] == 1: response_method_name = _check_response_method( clf, ["decision_function", "predict_proba"], ).__name__ if response_method_name == "predict_proba": predictions = xp.concat([1 - predictions, predictions], axis=1) calibrator = _TemperatureScaling() calibrator.fit(predictions, y, sample_weight) calibrators.append(calibrator) pipeline = _CalibratedClassifier(clf, calibrators, method=method, classes=classes) return pipeline class _CalibratedClassifier: """Pipeline-like chaining a fitted classifier and its fitted calibrators. Parameters ---------- estimator : estimator instance Fitted classifier. calibrators : list of fitted estimator instances List of fitted calibrators (either 'IsotonicRegression' or '_SigmoidCalibration'). The number of calibrators equals the number of classes. However, if there are 2 classes, the list contains only one fitted calibrator. classes : array-like of shape (n_classes,) All the prediction classes. method : {'sigmoid', 'isotonic'}, default='sigmoid' The method to use for calibration. Can be 'sigmoid' which corresponds to Platt's method or 'isotonic' which is a non-parametric approach based on isotonic regression. """ def __init__(self, estimator, calibrators, *, classes, method="sigmoid"): self.estimator = estimator self.calibrators = calibrators self.classes = classes self.method = method def predict_proba(self, X): """Calculate calibrated probabilities. Calculates classification calibrated probabilities for each class, in a one-vs-all manner, for `X`. Parameters ---------- X : ndarray of shape (n_samples, n_features) The sample data. Returns ------- proba : array, shape (n_samples, n_classes) The predicted probabilities. Can be exact zeros. """ predictions, _ = _get_response_values( self.estimator, X, response_method=["decision_function", "predict_proba"], ) if predictions.ndim == 1: # Reshape binary output from `(n_samples,)` to `(n_samples, 1)` predictions = predictions.reshape(-1, 1) n_classes = self.classes.shape[0] proba = np.zeros((_num_samples(X), n_classes)) if self.method in ("sigmoid", "isotonic"): label_encoder = LabelEncoder().fit(self.classes) pos_class_indices = label_encoder.transform(self.estimator.classes_) for class_idx, this_pred, calibrator in zip( pos_class_indices, predictions.T, self.calibrators ): if n_classes == 2: # When binary, `predictions` consists only of predictions for # clf.classes_[1] but `pos_class_indices` = 0 class_idx += 1 proba[:, class_idx] = calibrator.predict(this_pred) # Normalize the probabilities if n_classes == 2: proba[:, 0] = 1.0 - proba[:, 1] else: denominator = np.sum(proba, axis=1)[:, np.newaxis] # In the edge case where for each class calibrator returns a zero # probability for a given sample, use the uniform distribution # instead. uniform_proba = np.full_like(proba, 1 / n_classes) proba = np.divide( proba, denominator, out=uniform_proba, where=denominator != 0 ) elif self.method == "temperature": xp, _ = get_namespace(predictions) if n_classes == 2 and predictions.shape[-1] == 1: response_method_name = _check_response_method( self.estimator, ["decision_function", "predict_proba"], ).__name__ if response_method_name == "predict_proba": predictions = xp.concat([1 - predictions, predictions], axis=1) proba = self.calibrators[0].predict(predictions) # Deal with cases where the predicted probability minimally exceeds 1.0 proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0 return proba # The max_abs_prediction_threshold was approximated using # logit(np.finfo(np.float64).eps) which is about -36 def _sigmoid_calibration( predictions, y, sample_weight=None, max_abs_prediction_threshold=30 ): """Probability Calibration with sigmoid method (Platt 2000) Parameters ---------- predictions : ndarray of shape (n_samples,) The decision function or predict proba for the samples. y : ndarray of shape (n_samples,) The targets. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Returns ------- a : float The slope. b : float The intercept. References ---------- Platt, "Probabilistic Outputs for Support Vector Machines" """ predictions = column_or_1d(predictions) y = column_or_1d(y)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/kernel_approximation.py
sklearn/kernel_approximation.py
"""Approximate kernel feature maps based on Fourier transforms and count sketches.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings from numbers import Integral, Real import numpy as np import scipy.sparse as sp from scipy.fft import fft, ifft from scipy.linalg import svd from sklearn.base import ( BaseEstimator, ClassNamePrefixFeaturesOutMixin, TransformerMixin, _fit_context, ) from sklearn.metrics.pairwise import ( KERNEL_PARAMS, PAIRWISE_KERNEL_FUNCTIONS, pairwise_kernels, ) from sklearn.utils import check_random_state from sklearn.utils._param_validation import Interval, StrOptions from sklearn.utils.extmath import safe_sparse_dot from sklearn.utils.validation import ( _check_feature_names_in, check_is_fitted, validate_data, ) class PolynomialCountSketch( ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator ): """Polynomial kernel approximation via Tensor Sketch. Implements Tensor Sketch, which approximates the feature map of the polynomial kernel:: K(X, Y) = (gamma * <X, Y> + coef0)^degree by efficiently computing a Count Sketch of the outer product of a vector with itself using Fast Fourier Transforms (FFT). Read more in the :ref:`User Guide <polynomial_kernel_approx>`. .. versionadded:: 0.24 Parameters ---------- gamma : float, default=1.0 Parameter of the polynomial kernel whose feature map will be approximated. degree : int, default=2 Degree of the polynomial kernel whose feature map will be approximated. coef0 : int, default=0 Constant term of the polynomial kernel whose feature map will be approximated. n_components : int, default=100 Dimensionality of the output feature space. Usually, `n_components` should be greater than the number of features in input samples in order to achieve good performance. The optimal score / run time balance is typically achieved around `n_components` = 10 * `n_features`, but this depends on the specific dataset being used. random_state : int, RandomState instance, default=None Determines random number generation for indexHash and bitHash initialization. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. Attributes ---------- indexHash_ : ndarray of shape (degree, n_features), dtype=int64 Array of indexes in range [0, n_components) used to represent the 2-wise independent hash functions for Count Sketch computation. bitHash_ : ndarray of shape (degree, n_features), dtype=float32 Array with random entries in {+1, -1}, used to represent the 2-wise independent hash functions for Count Sketch computation. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel. Nystroem : Approximate a kernel map using a subset of the training data. RBFSampler : Approximate a RBF kernel feature map using random Fourier features. SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel. sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels. Examples -------- >>> from sklearn.kernel_approximation import PolynomialCountSketch >>> from sklearn.linear_model import SGDClassifier >>> X = [[0, 0], [1, 1], [1, 0], [0, 1]] >>> y = [0, 0, 1, 1] >>> ps = PolynomialCountSketch(degree=3, random_state=1) >>> X_features = ps.fit_transform(X) >>> clf = SGDClassifier(max_iter=10, tol=1e-3) >>> clf.fit(X_features, y) SGDClassifier(max_iter=10) >>> clf.score(X_features, y) 1.0 For a more detailed example of usage, see :ref:`sphx_glr_auto_examples_kernel_approximation_plot_scalable_poly_kernels.py` """ _parameter_constraints: dict = { "gamma": [Interval(Real, 0, None, closed="left")], "degree": [Interval(Integral, 1, None, closed="left")], "coef0": [Interval(Real, None, None, closed="neither")], "n_components": [Interval(Integral, 1, None, closed="left")], "random_state": ["random_state"], } def __init__( self, *, gamma=1.0, degree=2, coef0=0, n_components=100, random_state=None ): self.gamma = gamma self.degree = degree self.coef0 = coef0 self.n_components = n_components self.random_state = random_state @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None): """Fit the model with X. Initializes the internal variables. The method needs no information about the distribution of data, so we only care about n_features in X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the instance itself. """ X = validate_data(self, X, accept_sparse="csc") random_state = check_random_state(self.random_state) n_features = X.shape[1] if self.coef0 != 0: n_features += 1 self.indexHash_ = random_state.randint( 0, high=self.n_components, size=(self.degree, n_features) ) self.bitHash_ = random_state.choice(a=[-1, 1], size=(self.degree, n_features)) self._n_features_out = self.n_components return self def transform(self, X): """Generate the feature map approximation for X. Parameters ---------- X : {array-like}, shape (n_samples, n_features) New data, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) Returns the instance itself. """ check_is_fitted(self) X = validate_data(self, X, accept_sparse="csc", reset=False) X_gamma = np.sqrt(self.gamma) * X if sp.issparse(X_gamma) and self.coef0 != 0: X_gamma = sp.hstack( [X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))], format="csc", ) elif not sp.issparse(X_gamma) and self.coef0 != 0: X_gamma = np.hstack( [X_gamma, np.sqrt(self.coef0) * np.ones((X_gamma.shape[0], 1))] ) if X_gamma.shape[1] != self.indexHash_.shape[1]: raise ValueError( "Number of features of test samples does not" " match that of training samples." ) count_sketches = np.zeros((X_gamma.shape[0], self.degree, self.n_components)) if sp.issparse(X_gamma): for j in range(X_gamma.shape[1]): for d in range(self.degree): iHashIndex = self.indexHash_[d, j] iHashBit = self.bitHash_[d, j] count_sketches[:, d, iHashIndex] += ( (iHashBit * X_gamma[:, [j]]).toarray().ravel() ) else: for j in range(X_gamma.shape[1]): for d in range(self.degree): iHashIndex = self.indexHash_[d, j] iHashBit = self.bitHash_[d, j] count_sketches[:, d, iHashIndex] += iHashBit * X_gamma[:, j] # For each same, compute a count sketch of phi(x) using the polynomial # multiplication (via FFT) of p count sketches of x. count_sketches_fft = fft(count_sketches, axis=2, overwrite_x=True) count_sketches_fft_prod = np.prod(count_sketches_fft, axis=1) data_sketch = np.real(ifft(count_sketches_fft_prod, overwrite_x=True)) return data_sketch def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.sparse = True return tags class RBFSampler(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): """Approximate a RBF kernel feature map using random Fourier features. It implements a variant of Random Kitchen Sinks.[1] Read more in the :ref:`User Guide <rbf_kernel_approx>`. Parameters ---------- gamma : 'scale' or float, default=1.0 Parameter of RBF kernel: exp(-gamma * x^2). If ``gamma='scale'`` is passed then it uses 1 / (n_features * X.var()) as value of gamma. .. versionadded:: 1.2 The option `"scale"` was added in 1.2. n_components : int, default=100 Number of Monte Carlo samples per original feature. Equals the dimensionality of the computed feature space. random_state : int, RandomState instance or None, default=None Pseudo-random number generator to control the generation of the random weights and random offset when fitting the training data. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Attributes ---------- random_offset_ : ndarray of shape (n_components,), dtype={np.float64, np.float32} Random offset used to compute the projection in the `n_components` dimensions of the feature space. random_weights_ : ndarray of shape (n_features, n_components),\ dtype={np.float64, np.float32} Random projection directions drawn from the Fourier transform of the RBF kernel. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel. Nystroem : Approximate a kernel map using a subset of the training data. PolynomialCountSketch : Polynomial kernel approximation via Tensor Sketch. SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel. sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels. Notes ----- See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and Benjamin Recht. [1] "Weighted Sums of Random Kitchen Sinks: Replacing minimization with randomization in learning" by A. Rahimi and Benjamin Recht. (https://people.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf) Examples -------- >>> from sklearn.kernel_approximation import RBFSampler >>> from sklearn.linear_model import SGDClassifier >>> X = [[0, 0], [1, 1], [1, 0], [0, 1]] >>> y = [0, 0, 1, 1] >>> rbf_feature = RBFSampler(gamma=1, random_state=1) >>> X_features = rbf_feature.fit_transform(X) >>> clf = SGDClassifier(max_iter=5, tol=1e-3) >>> clf.fit(X_features, y) SGDClassifier(max_iter=5) >>> clf.score(X_features, y) 1.0 """ _parameter_constraints: dict = { "gamma": [ StrOptions({"scale"}), Interval(Real, 0.0, None, closed="left"), ], "n_components": [Interval(Integral, 1, None, closed="left")], "random_state": ["random_state"], } def __init__(self, *, gamma=1.0, n_components=100, random_state=None): self.gamma = gamma self.n_components = n_components self.random_state = random_state @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None): """Fit the model with X. Samples random projection according to n_features. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the instance itself. """ X = validate_data(self, X, accept_sparse="csr") random_state = check_random_state(self.random_state) n_features = X.shape[1] sparse = sp.issparse(X) if self.gamma == "scale": # var = E[X^2] - E[X]^2 if sparse X_var = (X.multiply(X)).mean() - (X.mean()) ** 2 if sparse else X.var() self._gamma = 1.0 / (n_features * X_var) if X_var != 0 else 1.0 else: self._gamma = self.gamma self.random_weights_ = (2.0 * self._gamma) ** 0.5 * random_state.normal( size=(n_features, self.n_components) ) self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components) if X.dtype == np.float32: # Setting the data type of the fitted attribute will ensure the # output data type during `transform`. self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False) self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False) self._n_features_out = self.n_components return self def transform(self, X): """Apply the approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) Returns the instance itself. """ check_is_fitted(self) X = validate_data(self, X, accept_sparse="csr", reset=False) projection = safe_sparse_dot(X, self.random_weights_) projection += self.random_offset_ np.cos(projection, projection) projection *= (2.0 / self.n_components) ** 0.5 return projection def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.sparse = True tags.transformer_tags.preserves_dtype = ["float64", "float32"] return tags class SkewedChi2Sampler( ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator ): """Approximate feature map for "skewed chi-squared" kernel. Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`. Parameters ---------- skewedness : float, default=1.0 "skewedness" parameter of the kernel. Needs to be cross-validated. n_components : int, default=100 Number of Monte Carlo samples per original feature. Equals the dimensionality of the computed feature space. random_state : int, RandomState instance or None, default=None Pseudo-random number generator to control the generation of the random weights and random offset when fitting the training data. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Attributes ---------- random_weights_ : ndarray of shape (n_features, n_components) Weight array, sampled from a secant hyperbolic distribution, which will be used to linearly transform the log of the data. random_offset_ : ndarray of shape (n_features, n_components) Bias term, which will be added to the data. It is uniformly distributed between 0 and 2*pi. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- AdditiveChi2Sampler : Approximate feature map for additive chi2 kernel. Nystroem : Approximate a kernel map using a subset of the training data. RBFSampler : Approximate a RBF kernel feature map using random Fourier features. SkewedChi2Sampler : Approximate feature map for "skewed chi-squared" kernel. sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel. sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels. References ---------- See "Random Fourier Approximations for Skewed Multiplicative Histogram Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu. Examples -------- >>> from sklearn.kernel_approximation import SkewedChi2Sampler >>> from sklearn.linear_model import SGDClassifier >>> X = [[0, 0], [1, 1], [1, 0], [0, 1]] >>> y = [0, 0, 1, 1] >>> chi2_feature = SkewedChi2Sampler(skewedness=.01, ... n_components=10, ... random_state=0) >>> X_features = chi2_feature.fit_transform(X, y) >>> clf = SGDClassifier(max_iter=10, tol=1e-3) >>> clf.fit(X_features, y) SGDClassifier(max_iter=10) >>> clf.score(X_features, y) 1.0 """ _parameter_constraints: dict = { "skewedness": [Interval(Real, None, None, closed="neither")], "n_components": [Interval(Integral, 1, None, closed="left")], "random_state": ["random_state"], } def __init__(self, *, skewedness=1.0, n_components=100, random_state=None): self.skewedness = skewedness self.n_components = n_components self.random_state = random_state @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None): """Fit the model with X. Samples random projection according to n_features. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the instance itself. """ X = validate_data(self, X) random_state = check_random_state(self.random_state) n_features = X.shape[1] uniform = random_state.uniform(size=(n_features, self.n_components)) # transform by inverse CDF of sech self.random_weights_ = 1.0 / np.pi * np.log(np.tan(np.pi / 2.0 * uniform)) self.random_offset_ = random_state.uniform(0, 2 * np.pi, size=self.n_components) if X.dtype == np.float32: # Setting the data type of the fitted attribute will ensure the # output data type during `transform`. self.random_weights_ = self.random_weights_.astype(X.dtype, copy=False) self.random_offset_ = self.random_offset_.astype(X.dtype, copy=False) self._n_features_out = self.n_components return self def transform(self, X): """Apply the approximate feature map to X. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where `n_samples` is the number of samples and `n_features` is the number of features. All values of X must be strictly greater than "-skewedness". Returns ------- X_new : array-like, shape (n_samples, n_components) Returns the instance itself. """ check_is_fitted(self) X = validate_data( self, X, copy=True, dtype=[np.float64, np.float32], reset=False ) if (X <= -self.skewedness).any(): raise ValueError("X may not contain entries smaller than -skewedness.") X += self.skewedness np.log(X, X) projection = safe_sparse_dot(X, self.random_weights_) projection += self.random_offset_ np.cos(projection, projection) projection *= np.sqrt(2.0) / np.sqrt(self.n_components) return projection def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.transformer_tags.preserves_dtype = ["float64", "float32"] return tags class AdditiveChi2Sampler(TransformerMixin, BaseEstimator): """Approximate feature map for additive chi2 kernel. Uses sampling the fourier transform of the kernel characteristic at regular intervals. Since the kernel that is to be approximated is additive, the components of the input vectors can be treated separately. Each entry in the original space is transformed into 2*sample_steps-1 features, where sample_steps is a parameter of the method. Typical values of sample_steps include 1, 2 and 3. Optimal choices for the sampling interval for certain data ranges can be computed (see the reference). The default values should be reasonable. Read more in the :ref:`User Guide <additive_chi_kernel_approx>`. Parameters ---------- sample_steps : int, default=2 Gives the number of (complex) sampling points. sample_interval : float, default=None Sampling interval. Must be specified when sample_steps not in {1,2,3}. Attributes ---------- n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of the chi squared kernel. sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel. sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi squared kernel. Notes ----- This estimator approximates a slightly different version of the additive chi squared kernel then ``metric.additive_chi2`` computes. This estimator is stateless and does not need to be fitted. However, we recommend to call :meth:`fit_transform` instead of :meth:`transform`, as parameter validation is only performed in :meth:`fit`. References ---------- See `"Efficient additive kernels via explicit feature maps" <http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_ A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence, 2011 Examples -------- >>> from sklearn.datasets import load_digits >>> from sklearn.linear_model import SGDClassifier >>> from sklearn.kernel_approximation import AdditiveChi2Sampler >>> X, y = load_digits(return_X_y=True) >>> chi2sampler = AdditiveChi2Sampler(sample_steps=2) >>> X_transformed = chi2sampler.fit_transform(X, y) >>> clf = SGDClassifier(max_iter=5, random_state=0, tol=1e-3) >>> clf.fit(X_transformed, y) SGDClassifier(max_iter=5, random_state=0) >>> clf.score(X_transformed, y) 0.9499... """ _parameter_constraints: dict = { "sample_steps": [Interval(Integral, 1, None, closed="left")], "sample_interval": [Interval(Real, 0, None, closed="left"), None], } def __init__(self, *, sample_steps=2, sample_interval=None): self.sample_steps = sample_steps self.sample_interval = sample_interval @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None): """Only validates estimator's parameters. This method allows to: (i) validate the estimator's parameters and (ii) be consistent with the scikit-learn transformer API. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_outputs), \ default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the transformer. """ X = validate_data(self, X, accept_sparse="csr", ensure_non_negative=True) if self.sample_interval is None and self.sample_steps not in (1, 2, 3): raise ValueError( "If sample_steps is not in [1, 2, 3]," " you need to provide sample_interval" ) return self def transform(self, X): """Apply approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data, where `n_samples` is the number of samples and `n_features` is the number of features. Returns ------- X_new : {ndarray, sparse matrix}, \ shape = (n_samples, n_features * (2*sample_steps - 1)) Whether the return value is an array or sparse matrix depends on the type of the input X. """ X = validate_data( self, X, accept_sparse="csr", reset=False, ensure_non_negative=True ) sparse = sp.issparse(X) if self.sample_interval is None: # See figure 2 c) of "Efficient additive kernels via explicit feature maps" # <http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf> # A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence, # 2011 if self.sample_steps == 1: sample_interval = 0.8 elif self.sample_steps == 2: sample_interval = 0.5 elif self.sample_steps == 3: sample_interval = 0.4 else: raise ValueError( "If sample_steps is not in [1, 2, 3]," " you need to provide sample_interval" ) else: sample_interval = self.sample_interval # zeroth component # 1/cosh = sech # cosh(0) = 1.0 transf = self._transform_sparse if sparse else self._transform_dense return transf(X, self.sample_steps, sample_interval) def get_feature_names_out(self, input_features=None): """Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Only used to validate feature names with the names seen in :meth:`fit`. Returns ------- feature_names_out : ndarray of str objects Transformed feature names. """ # Note that passing attributes="n_features_in_" forces check_is_fitted # to check if the attribute is present. Otherwise it will pass on this # stateless estimator (requires_fit=False) check_is_fitted(self, attributes="n_features_in_") input_features = _check_feature_names_in( self, input_features, generate_names=True ) est_name = self.__class__.__name__.lower() names_list = [f"{est_name}_{name}_sqrt" for name in input_features] for j in range(1, self.sample_steps): cos_names = [f"{est_name}_{name}_cos{j}" for name in input_features] sin_names = [f"{est_name}_{name}_sin{j}" for name in input_features] names_list.extend(cos_names + sin_names) return np.asarray(names_list, dtype=object) @staticmethod def _transform_dense(X, sample_steps, sample_interval): non_zero = X != 0.0 X_nz = X[non_zero] X_step = np.zeros_like(X) X_step[non_zero] = np.sqrt(X_nz * sample_interval) X_new = [X_step] log_step_nz = sample_interval * np.log(X_nz) step_nz = 2 * X_nz * sample_interval for j in range(1, sample_steps): factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * sample_interval)) X_step = np.zeros_like(X) X_step[non_zero] = factor_nz * np.cos(j * log_step_nz) X_new.append(X_step) X_step = np.zeros_like(X) X_step[non_zero] = factor_nz * np.sin(j * log_step_nz) X_new.append(X_step) return np.hstack(X_new) @staticmethod def _transform_sparse(X, sample_steps, sample_interval): indices = X.indices.copy() indptr = X.indptr.copy() data_step = np.sqrt(X.data * sample_interval) X_step = sp.csr_matrix( (data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False ) X_new = [X_step] log_step_nz = sample_interval * np.log(X.data) step_nz = 2 * X.data * sample_interval for j in range(1, sample_steps): factor_nz = np.sqrt(step_nz / np.cosh(np.pi * j * sample_interval)) data_step = factor_nz * np.cos(j * log_step_nz) X_step = sp.csr_matrix( (data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False ) X_new.append(X_step) data_step = factor_nz * np.sin(j * log_step_nz) X_step = sp.csr_matrix( (data_step, indices, indptr), shape=X.shape, dtype=X.dtype, copy=False ) X_new.append(X_step) return sp.hstack(X_new) def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.requires_fit = False tags.input_tags.positive_only = True tags.input_tags.sparse = True return tags class Nystroem(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator): """Approximate a kernel map using a subset of the training data. Constructs an approximate feature map for an arbitrary kernel using a subset of the data as basis. Read more in the :ref:`User Guide <nystroem_kernel_approx>`. .. versionadded:: 0.13 Parameters ---------- kernel : str or callable, default='rbf' Kernel map to be approximated. A callable should accept two arguments and the keyword arguments passed to this object as `kernel_params`, and should return a floating point number. gamma : float, default=None Gamma parameter for the RBF, laplacian, polynomial, exponential chi2 and sigmoid kernels. Interpretation of the default value is left to the kernel; see the documentation for sklearn.metrics.pairwise. Ignored by other kernels. coef0 : float, default=None Zero coefficient for polynomial and sigmoid kernels. Ignored by other kernels. degree : float, default=None Degree of the polynomial kernel. Ignored by other kernels. kernel_params : dict, default=None Additional parameters (keyword arguments) for kernel function passed as callable object. n_components : int, default=100 Number of features to construct. How many data points will be used to construct the mapping. random_state : int, RandomState instance or None, default=None Pseudo-random number generator to control the uniform sampling without replacement of `n_components` of the training data to construct the basis kernel. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. n_jobs : int, default=None The number of jobs to use for the computation. This works by breaking down the kernel matrix into `n_jobs` even slices and computing them in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. .. versionadded:: 0.24 Attributes ---------- components_ : ndarray of shape (n_components, n_features)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/dummy.py
sklearn/dummy.py
"""Dummy estimators that implement simple rules of thumb.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings from numbers import Integral, Real import numpy as np import scipy.sparse as sp from sklearn.base import ( BaseEstimator, ClassifierMixin, MultiOutputMixin, RegressorMixin, _fit_context, ) from sklearn.utils import check_random_state from sklearn.utils._param_validation import Interval, StrOptions from sklearn.utils.multiclass import class_distribution from sklearn.utils.random import _random_choice_csc from sklearn.utils.stats import _weighted_percentile from sklearn.utils.validation import ( _check_sample_weight, _num_samples, check_array, check_consistent_length, check_is_fitted, validate_data, ) class DummyClassifier(MultiOutputMixin, ClassifierMixin, BaseEstimator): """DummyClassifier makes predictions that ignore the input features. This classifier serves as a simple baseline to compare against other more complex classifiers. The specific behavior of the baseline is selected with the `strategy` parameter. All strategies make predictions that ignore the input feature values passed as the `X` argument to `fit` and `predict`. The predictions, however, typically depend on values observed in the `y` parameter passed to `fit`. Note that the "stratified" and "uniform" strategies lead to non-deterministic predictions that can be rendered deterministic by setting the `random_state` parameter if needed. The other strategies are naturally deterministic and, once fit, always return the same constant prediction for any value of `X`. Read more in the :ref:`User Guide <dummy_estimators>`. .. versionadded:: 0.13 Parameters ---------- strategy : {"most_frequent", "prior", "stratified", "uniform", \ "constant"}, default="prior" Strategy to use to generate predictions. * "most_frequent": the `predict` method always returns the most frequent class label in the observed `y` argument passed to `fit`. The `predict_proba` method returns the matching one-hot encoded vector. * "prior": the `predict` method always returns the most frequent class label in the observed `y` argument passed to `fit` (like "most_frequent"). ``predict_proba`` always returns the empirical class distribution of `y` also known as the empirical class prior distribution. * "stratified": the `predict_proba` method randomly samples one-hot vectors from a multinomial distribution parametrized by the empirical class prior probabilities. The `predict` method returns the class label which got probability one in the one-hot vector of `predict_proba`. Each sampled row of both methods is therefore independent and identically distributed. * "uniform": generates predictions uniformly at random from the list of unique classes observed in `y`, i.e. each class has equal probability. * "constant": always predicts a constant label that is provided by the user. This is useful for metrics that evaluate a non-majority class. .. versionchanged:: 0.24 The default value of `strategy` has changed to "prior" in version 0.24. random_state : int, RandomState instance or None, default=None Controls the randomness to generate the predictions when ``strategy='stratified'`` or ``strategy='uniform'``. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. constant : int or str or array-like of shape (n_outputs,), default=None The explicit constant as predicted by the "constant" strategy. This parameter is useful only for the "constant" strategy. Attributes ---------- classes_ : ndarray of shape (n_classes,) or list of such arrays Unique class labels observed in `y`. For multi-output classification problems, this attribute is a list of arrays as each output has an independent set of possible classes. n_classes_ : int or list of int Number of label for each output. class_prior_ : ndarray of shape (n_classes,) or list of such arrays Frequency of each class observed in `y`. For multioutput classification problems, this is computed independently for each output. n_features_in_ : int Number of features seen during :term:`fit`. feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. n_outputs_ : int Number of outputs. sparse_output_ : bool True if the array returned from predict is to be in sparse CSC format. Is automatically set to True if the input `y` is passed in sparse format. See Also -------- DummyRegressor : Regressor that makes predictions using simple rules. Examples -------- >>> import numpy as np >>> from sklearn.dummy import DummyClassifier >>> X = np.array([-1, 1, 1, 1]) >>> y = np.array([0, 1, 1, 1]) >>> dummy_clf = DummyClassifier(strategy="most_frequent") >>> dummy_clf.fit(X, y) DummyClassifier(strategy='most_frequent') >>> dummy_clf.predict(X) array([1, 1, 1, 1]) >>> dummy_clf.score(X, y) 0.75 """ _parameter_constraints: dict = { "strategy": [ StrOptions({"most_frequent", "prior", "stratified", "uniform", "constant"}) ], "random_state": ["random_state"], "constant": [Integral, str, "array-like", None], } def __init__(self, *, strategy="prior", random_state=None, constant=None): self.strategy = strategy self.random_state = random_state self.constant = constant @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y, sample_weight=None): """Fit the baseline classifier. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_outputs) Target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- self : object Returns the instance itself. """ validate_data(self, X, skip_check_array=True) self._strategy = self.strategy if self._strategy == "uniform" and sp.issparse(y): y = y.toarray() warnings.warn( ( "A local copy of the target data has been converted " "to a numpy array. Predicting on sparse target data " "with the uniform strategy would not save memory " "and would be slower." ), UserWarning, ) self.sparse_output_ = sp.issparse(y) if not self.sparse_output_: y = np.asarray(y) y = np.atleast_1d(y) if y.ndim == 1: y = np.reshape(y, (-1, 1)) self.n_outputs_ = y.shape[1] check_consistent_length(X, y) if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X) if self._strategy == "constant": if self.constant is None: raise ValueError( "Constant target value has to be specified " "when the constant strategy is used." ) else: constant = np.reshape(np.atleast_1d(self.constant), (-1, 1)) if constant.shape[0] != self.n_outputs_: raise ValueError( "Constant target value should have shape (%d, 1)." % self.n_outputs_ ) (self.classes_, self.n_classes_, self.class_prior_) = class_distribution( y, sample_weight ) if self._strategy == "constant": for k in range(self.n_outputs_): if not any(constant[k][0] == c for c in self.classes_[k]): # Checking in case of constant strategy if the constant # provided by the user is in y. err_msg = ( "The constant target value must be present in " "the training data. You provided constant={}. " "Possible values are: {}.".format( self.constant, self.classes_[k].tolist() ) ) raise ValueError(err_msg) if self.n_outputs_ == 1: self.n_classes_ = self.n_classes_[0] self.classes_ = self.classes_[0] self.class_prior_ = self.class_prior_[0] return self def predict(self, X): """Perform classification on test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) Test data. Returns ------- y : array-like of shape (n_samples,) or (n_samples, n_outputs) Predicted target values for X. """ check_is_fitted(self) # numpy random_state expects Python int and not long as size argument # under Windows n_samples = _num_samples(X) rs = check_random_state(self.random_state) n_classes_ = self.n_classes_ classes_ = self.classes_ class_prior_ = self.class_prior_ constant = self.constant if self.n_outputs_ == 1: # Get same type even for self.n_outputs_ == 1 n_classes_ = [n_classes_] classes_ = [classes_] class_prior_ = [class_prior_] constant = [constant] # Compute probability only once if self._strategy == "stratified": proba = self.predict_proba(X) if self.n_outputs_ == 1: proba = [proba] if self.sparse_output_: class_prob = None if self._strategy in ("most_frequent", "prior"): classes_ = [np.array([cp.argmax()]) for cp in class_prior_] elif self._strategy == "stratified": class_prob = class_prior_ elif self._strategy == "uniform": raise ValueError( "Sparse target prediction is not " "supported with the uniform strategy" ) elif self._strategy == "constant": classes_ = [np.array([c]) for c in constant] y = _random_choice_csc(n_samples, classes_, class_prob, self.random_state) else: if self._strategy in ("most_frequent", "prior"): y = np.tile( [ classes_[k][class_prior_[k].argmax()] for k in range(self.n_outputs_) ], [n_samples, 1], ) elif self._strategy == "stratified": y = np.vstack( [ classes_[k][proba[k].argmax(axis=1)] for k in range(self.n_outputs_) ] ).T elif self._strategy == "uniform": ret = [ classes_[k][rs.randint(n_classes_[k], size=n_samples)] for k in range(self.n_outputs_) ] y = np.vstack(ret).T elif self._strategy == "constant": y = np.tile(self.constant, (n_samples, 1)) if self.n_outputs_ == 1: y = np.ravel(y) return y def predict_proba(self, X): """ Return probability estimates for the test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) Test data. Returns ------- P : ndarray of shape (n_samples, n_classes) or list of such arrays Returns the probability of the sample for each class in the model, where classes are ordered arithmetically, for each output. """ check_is_fitted(self) # numpy random_state expects Python int and not long as size argument # under Windows n_samples = _num_samples(X) rs = check_random_state(self.random_state) n_classes_ = self.n_classes_ classes_ = self.classes_ class_prior_ = self.class_prior_ constant = self.constant if self.n_outputs_ == 1: # Get same type even for self.n_outputs_ == 1 n_classes_ = [n_classes_] classes_ = [classes_] class_prior_ = [class_prior_] constant = [constant] P = [] for k in range(self.n_outputs_): if self._strategy == "most_frequent": ind = class_prior_[k].argmax() out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) out[:, ind] = 1.0 elif self._strategy == "prior": out = np.ones((n_samples, 1)) * class_prior_[k] elif self._strategy == "stratified": out = rs.multinomial(1, class_prior_[k], size=n_samples) out = out.astype(np.float64) elif self._strategy == "uniform": out = np.ones((n_samples, n_classes_[k]), dtype=np.float64) out /= n_classes_[k] elif self._strategy == "constant": ind = np.where(classes_[k] == constant[k]) out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64) out[:, ind] = 1.0 P.append(out) if self.n_outputs_ == 1: P = P[0] return P def predict_log_proba(self, X): """ Return log probability estimates for the test vectors X. Parameters ---------- X : {array-like, object with finite length or shape} Training data. Returns ------- P : ndarray of shape (n_samples, n_classes) or list of such arrays Returns the log probability of the sample for each class in the model, where classes are ordered arithmetically for each output. """ proba = self.predict_proba(X) if self.n_outputs_ == 1: return np.log(proba) else: return [np.log(p) for p in proba] def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.sparse = True tags.classifier_tags.poor_score = True tags.no_validation = True return tags def score(self, X, y, sample_weight=None): """Return the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters ---------- X : None or array-like of shape (n_samples, n_features) Test samples. Passing None as test samples gives the same result as passing real test samples, since DummyClassifier operates independently of the sampled observations. y : array-like of shape (n_samples,) or (n_samples, n_outputs) True labels for X. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float Mean accuracy of self.predict(X) w.r.t. y. """ if X is None: X = np.zeros(shape=(len(y), 1)) return super().score(X, y, sample_weight) class DummyRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator): """Regressor that makes predictions using simple rules. This regressor is useful as a simple baseline to compare with other (real) regressors. Do not use it for real problems. Read more in the :ref:`User Guide <dummy_estimators>`. .. versionadded:: 0.13 Parameters ---------- strategy : {"mean", "median", "quantile", "constant"}, default="mean" Strategy to use to generate predictions. * "mean": always predicts the mean of the training set * "median": always predicts the median of the training set * "quantile": always predicts a specified quantile of the training set, provided with the quantile parameter. * "constant": always predicts a constant value that is provided by the user. constant : int or float or array-like of shape (n_outputs,), default=None The explicit constant as predicted by the "constant" strategy. This parameter is useful only for the "constant" strategy. quantile : float in [0.0, 1.0], default=None The quantile to predict using the "quantile" strategy. A quantile of 0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the maximum. Attributes ---------- constant_ : ndarray of shape (1, n_outputs) Mean or median or quantile of the training targets or constant value given by the user. n_features_in_ : int Number of features seen during :term:`fit`. feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. n_outputs_ : int Number of outputs. See Also -------- DummyClassifier: Classifier that makes predictions using simple rules. Examples -------- >>> import numpy as np >>> from sklearn.dummy import DummyRegressor >>> X = np.array([1.0, 2.0, 3.0, 4.0]) >>> y = np.array([2.0, 3.0, 5.0, 10.0]) >>> dummy_regr = DummyRegressor(strategy="mean") >>> dummy_regr.fit(X, y) DummyRegressor() >>> dummy_regr.predict(X) array([5., 5., 5., 5.]) >>> dummy_regr.score(X, y) 0.0 """ _parameter_constraints: dict = { "strategy": [StrOptions({"mean", "median", "quantile", "constant"})], "quantile": [Interval(Real, 0.0, 1.0, closed="both"), None], "constant": [ Interval(Real, None, None, closed="neither"), "array-like", None, ], } def __init__(self, *, strategy="mean", constant=None, quantile=None): self.strategy = strategy self.constant = constant self.quantile = quantile @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y, sample_weight=None): """Fit the baseline regressor. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. y : array-like of shape (n_samples,) or (n_samples, n_outputs) Target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- self : object Fitted estimator. """ validate_data(self, X, skip_check_array=True) y = check_array(y, ensure_2d=False, input_name="y") if len(y) == 0: raise ValueError("y must not be empty.") if y.ndim == 1: y = np.reshape(y, (-1, 1)) self.n_outputs_ = y.shape[1] check_consistent_length(X, y, sample_weight) if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X) if self.strategy == "mean": self.constant_ = np.average(y, axis=0, weights=sample_weight) elif self.strategy == "median": if sample_weight is None: self.constant_ = np.median(y, axis=0) else: self.constant_ = _weighted_percentile( y, sample_weight, percentile_rank=50.0 ) elif self.strategy == "quantile": if self.quantile is None: raise ValueError( "When using `strategy='quantile', you have to specify the desired " "quantile in the range [0, 1]." ) percentile_rank = self.quantile * 100.0 if sample_weight is None: self.constant_ = np.percentile(y, axis=0, q=percentile_rank) else: self.constant_ = _weighted_percentile( y, sample_weight, percentile_rank=percentile_rank ) elif self.strategy == "constant": if self.constant is None: raise TypeError( "Constant target value has to be specified " "when the constant strategy is used." ) self.constant_ = check_array( self.constant, accept_sparse=["csr", "csc", "coo"], ensure_2d=False, ensure_min_samples=0, ) if self.n_outputs_ != 1 and self.constant_.shape[0] != y.shape[1]: raise ValueError( "Constant target value should have shape (%d, 1)." % y.shape[1] ) self.constant_ = np.reshape(self.constant_, (1, -1)) return self def predict(self, X, return_std=False): """Perform classification on test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) Test data. return_std : bool, default=False Whether to return the standard deviation of posterior prediction. All zeros in this case. .. versionadded:: 0.20 Returns ------- y : array-like of shape (n_samples,) or (n_samples, n_outputs) Predicted target values for X. y_std : array-like of shape (n_samples,) or (n_samples, n_outputs) Standard deviation of predictive distribution of query points. """ check_is_fitted(self) n_samples = _num_samples(X) y = np.full( (n_samples, self.n_outputs_), self.constant_, dtype=np.array(self.constant_).dtype, ) y_std = np.zeros((n_samples, self.n_outputs_)) if self.n_outputs_ == 1: y = np.ravel(y) y_std = np.ravel(y_std) return (y, y_std) if return_std else y def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.sparse = True tags.regressor_tags.poor_score = True tags.no_validation = True return tags def score(self, X, y, sample_weight=None): """Return the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as `(1 - u/v)`, where `u` is the residual sum of squares `((y_true - y_pred) ** 2).sum()` and `v` is the total sum of squares `((y_true - y_true.mean()) ** 2).sum()`. The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters ---------- X : None or array-like of shape (n_samples, n_features) Test samples. Passing None as test samples gives the same result as passing real test samples, since `DummyRegressor` operates independently of the sampled observations. y : array-like of shape (n_samples,) or (n_samples, n_outputs) True values for X. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float R^2 of `self.predict(X)` w.r.t. y. """ if X is None: X = np.zeros(shape=(len(y), 1)) return super().score(X, y, sample_weight)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/_min_dependencies.py
sklearn/_min_dependencies.py
"""All minimum dependencies for scikit-learn.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import argparse from collections import defaultdict # scipy and cython should by in sync with pyproject.toml NUMPY_MIN_VERSION = "1.24.1" SCIPY_MIN_VERSION = "1.10.0" JOBLIB_MIN_VERSION = "1.3.0" THREADPOOLCTL_MIN_VERSION = "3.2.0" PYTEST_MIN_VERSION = "7.1.2" CYTHON_MIN_VERSION = "3.1.2" # 'build' and 'install' is included to have structured metadata for CI. # It will NOT be included in setup's extras_require # The values are (version_spec, comma separated tags) dependent_packages = { "numpy": (NUMPY_MIN_VERSION, "build, install"), "scipy": (SCIPY_MIN_VERSION, "build, install"), "joblib": (JOBLIB_MIN_VERSION, "install"), "threadpoolctl": (THREADPOOLCTL_MIN_VERSION, "install"), "cython": (CYTHON_MIN_VERSION, "build"), "meson-python": ("0.17.1", "build"), "matplotlib": ("3.6.1", "benchmark, docs, examples, tests"), "scikit-image": ("0.22.0", "docs, examples"), "pandas": ("1.5.0", "benchmark, docs, examples, tests"), "seaborn": ("0.13.0", "docs, examples"), "memory_profiler": ("0.57.0", "benchmark, docs"), "pytest": (PYTEST_MIN_VERSION, "tests"), "pytest-cov": ("2.9.0", "tests"), "ruff": ("0.12.2", "tests"), "mypy": ("1.15", "tests"), "pyamg": ("5.0.0", "tests"), "polars": ("0.20.30", "docs, tests"), "pyarrow": ("12.0.0", "tests"), "sphinx": ("7.3.7", "docs"), "sphinx-copybutton": ("0.5.2", "docs"), "sphinx-gallery": ("0.17.1", "docs"), "numpydoc": ("1.2.0", "docs, tests"), "Pillow": ("10.1.0", "docs"), "pooch": ("1.8.0", "docs, examples, tests"), "sphinx-prompt": ("1.4.0", "docs"), "sphinxext-opengraph": ("0.9.1", "docs"), "plotly": ("5.18.0", "docs, examples"), "sphinxcontrib-sass": ("0.3.4", "docs"), "sphinx-remove-toctrees": ("1.0.0.post1", "docs"), "sphinx-design": ("0.6.0", "docs"), "pydata-sphinx-theme": ("0.15.3", "docs"), "towncrier": ("24.8.0", "docs"), # XXX: Pin conda-lock to the latest released version (needs manual update # from time to time) "conda-lock": ("3.0.1", "maintenance"), } # create inverse mapping for setuptools tag_to_packages: dict = defaultdict(list) for package, (min_version, extras) in dependent_packages.items(): for extra in extras.split(", "): tag_to_packages[extra].append("{}>={}".format(package, min_version)) # Used by CI to get the min dependencies if __name__ == "__main__": parser = argparse.ArgumentParser(description="Get min dependencies for a package") parser.add_argument("package", choices=dependent_packages) args = parser.parse_args() min_version = dependent_packages[args.package][0] print(min_version)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/random_projection.py
sklearn/random_projection.py
"""Random projection transformers. Random projections are a simple and computationally efficient way to reduce the dimensionality of the data by trading a controlled amount of accuracy (as additional variance) for faster processing times and smaller model sizes. The dimensions and distribution of random projections matrices are controlled so as to preserve the pairwise distances between any two samples of the dataset. The main theoretical result behind the efficiency of random projection is the `Johnson-Lindenstrauss lemma (quoting Wikipedia) <https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma>`_: In mathematics, the Johnson-Lindenstrauss lemma is a result concerning low-distortion embeddings of points from high-dimensional into low-dimensional Euclidean space. The lemma states that a small set of points in a high-dimensional space can be embedded into a space of much lower dimension in such a way that distances between the points are nearly preserved. The map used for the embedding is at least Lipschitz, and can even be taken to be an orthogonal projection. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings from abc import ABCMeta, abstractmethod from numbers import Integral, Real import numpy as np import scipy.sparse as sp from scipy import linalg from sklearn.base import ( BaseEstimator, ClassNamePrefixFeaturesOutMixin, TransformerMixin, _fit_context, ) from sklearn.exceptions import DataDimensionalityWarning from sklearn.utils import check_random_state from sklearn.utils._param_validation import Interval, StrOptions, validate_params from sklearn.utils.extmath import safe_sparse_dot from sklearn.utils.random import sample_without_replacement from sklearn.utils.validation import check_array, check_is_fitted, validate_data __all__ = [ "GaussianRandomProjection", "SparseRandomProjection", "johnson_lindenstrauss_min_dim", ] @validate_params( { "n_samples": ["array-like", Interval(Real, 1, None, closed="left")], "eps": ["array-like", Interval(Real, 0, 1, closed="neither")], }, prefer_skip_nested_validation=True, ) def johnson_lindenstrauss_min_dim(n_samples, *, eps=0.1): """Find a 'safe' number of components to randomly project to. The distortion introduced by a random projection `p` only changes the distance between two points by a factor (1 +- eps) in a euclidean space with good probability. The projection `p` is an eps-embedding as defined by: .. code-block:: text (1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2 Where u and v are any rows taken from a dataset of shape (n_samples, n_features), eps is in ]0, 1[ and p is a projection by a random Gaussian N(0, 1) matrix of shape (n_components, n_features) (or a sparse Achlioptas matrix). The minimum number of components to guarantee the eps-embedding is given by: .. code-block:: text n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3) Note that the number of dimensions is independent of the original number of features but instead depends on the size of the dataset: the larger the dataset, the higher is the minimal dimensionality of an eps-embedding. Read more in the :ref:`User Guide <johnson_lindenstrauss>`. Parameters ---------- n_samples : int or array-like of int Number of samples that should be an integer greater than 0. If an array is given, it will compute a safe number of components array-wise. eps : float or array-like of shape (n_components,), dtype=float, \ default=0.1 Maximum distortion rate in the range (0, 1) as defined by the Johnson-Lindenstrauss lemma. If an array is given, it will compute a safe number of components array-wise. Returns ------- n_components : int or ndarray of int The minimal number of components to guarantee with good probability an eps-embedding with n_samples. References ---------- .. [1] https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma .. [2] `Sanjoy Dasgupta and Anupam Gupta, 1999, "An elementary proof of the Johnson-Lindenstrauss Lemma." <https://citeseerx.ist.psu.edu/doc_view/pid/95cd464d27c25c9c8690b378b894d337cdf021f9>`_ Examples -------- >>> from sklearn.random_projection import johnson_lindenstrauss_min_dim >>> johnson_lindenstrauss_min_dim(1e6, eps=0.5) np.int64(663) >>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01]) array([ 663, 11841, 1112658]) >>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1) array([ 7894, 9868, 11841]) """ eps = np.asarray(eps) n_samples = np.asarray(n_samples) if np.any(eps <= 0.0) or np.any(eps >= 1): raise ValueError("The JL bound is defined for eps in ]0, 1[, got %r" % eps) if np.any(n_samples <= 0): raise ValueError( "The JL bound is defined for n_samples greater than zero, got %r" % n_samples ) denominator = (eps**2 / 2) - (eps**3 / 3) return (4 * np.log(n_samples) / denominator).astype(np.int64) def _check_density(density, n_features): """Factorize density check according to Li et al.""" if density == "auto": density = 1 / np.sqrt(n_features) elif density <= 0 or density > 1: raise ValueError("Expected density in range ]0, 1], got: %r" % density) return density def _check_input_size(n_components, n_features): """Factorize argument checking for random matrix generation.""" if n_components <= 0: raise ValueError( "n_components must be strictly positive, got %d" % n_components ) if n_features <= 0: raise ValueError("n_features must be strictly positive, got %d" % n_features) def _gaussian_random_matrix(n_components, n_features, random_state=None): """Generate a dense Gaussian random matrix. The components of the random matrix are drawn from N(0, 1.0 / n_components). Read more in the :ref:`User Guide <gaussian_random_matrix>`. Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. random_state : int, RandomState instance or None, default=None Controls the pseudo random number generator used to generate the matrix at fit time. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- components : ndarray of shape (n_components, n_features) The generated Gaussian random matrix. See Also -------- GaussianRandomProjection """ _check_input_size(n_components, n_features) rng = check_random_state(random_state) components = rng.normal( loc=0.0, scale=1.0 / np.sqrt(n_components), size=(n_components, n_features) ) return components def _sparse_random_matrix(n_components, n_features, density="auto", random_state=None): """Generalized Achlioptas random sparse matrix for random projection. Setting density to 1 / 3 will yield the original matrix by Dimitris Achlioptas while setting a lower value will yield the generalization by Ping Li et al. If we note :math:`s = 1 / density`, the components of the random matrix are drawn from: - -sqrt(s) / sqrt(n_components) with probability 1 / 2s - 0 with probability 1 - 1 / s - +sqrt(s) / sqrt(n_components) with probability 1 / 2s Read more in the :ref:`User Guide <sparse_random_matrix>`. Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. density : float or 'auto', default='auto' Ratio of non-zero component in the random projection matrix in the range `(0, 1]` If density = 'auto', the value is set to the minimum density as recommended by Ping Li et al.: 1 / sqrt(n_features). Use density = 1 / 3.0 if you want to reproduce the results from Achlioptas, 2001. random_state : int, RandomState instance or None, default=None Controls the pseudo random number generator used to generate the matrix at fit time. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- components : {ndarray, sparse matrix} of shape (n_components, n_features) The generated Gaussian random matrix. Sparse matrix will be of CSR format. See Also -------- SparseRandomProjection References ---------- .. [1] Ping Li, T. Hastie and K. W. Church, 2006, "Very Sparse Random Projections". https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf .. [2] D. Achlioptas, 2001, "Database-friendly random projections", https://cgi.di.uoa.gr/~optas/papers/jl.pdf """ _check_input_size(n_components, n_features) density = _check_density(density, n_features) rng = check_random_state(random_state) if density == 1: # skip index generation if totally dense components = rng.binomial(1, 0.5, (n_components, n_features)) * 2 - 1 return 1 / np.sqrt(n_components) * components else: # Generate location of non zero elements indices = [] offset = 0 indptr = [offset] for _ in range(n_components): # find the indices of the non-zero components for row i n_nonzero_i = rng.binomial(n_features, density) indices_i = sample_without_replacement( n_features, n_nonzero_i, random_state=rng ) indices.append(indices_i) offset += n_nonzero_i indptr.append(offset) indices = np.concatenate(indices) # Among non zero components the probability of the sign is 50%/50% data = rng.binomial(1, 0.5, size=np.size(indices)) * 2 - 1 # build the CSR structure by concatenating the rows components = sp.csr_matrix( (data, indices, indptr), shape=(n_components, n_features) ) return np.sqrt(1 / density) / np.sqrt(n_components) * components class BaseRandomProjection( ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta ): """Base class for random projections. Warning: This class should not be used directly. Use derived classes instead. """ _parameter_constraints: dict = { "n_components": [ Interval(Integral, 1, None, closed="left"), StrOptions({"auto"}), ], "eps": [Interval(Real, 0, None, closed="neither")], "compute_inverse_components": ["boolean"], "random_state": ["random_state"], } @abstractmethod def __init__( self, n_components="auto", *, eps=0.1, compute_inverse_components=False, random_state=None, ): self.n_components = n_components self.eps = eps self.compute_inverse_components = compute_inverse_components self.random_state = random_state @abstractmethod def _make_random_matrix(self, n_components, n_features): """Generate the random projection matrix. Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. Returns ------- components : {ndarray, sparse matrix} of shape (n_components, n_features) The generated random matrix. Sparse matrix will be of CSR format. """ def _compute_inverse_components(self): """Compute the pseudo-inverse of the (densified) components.""" components = self.components_ if sp.issparse(components): components = components.toarray() return linalg.pinv(components, check_finite=False) @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y=None): """Generate a sparse random projection matrix. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) Training set: only the shape is used to find optimal random matrix dimensions based on the theory referenced in the afore mentioned papers. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object BaseRandomProjection class instance. """ X = validate_data( self, X, accept_sparse=["csr", "csc"], dtype=[np.float64, np.float32] ) n_samples, n_features = X.shape if self.n_components == "auto": self.n_components_ = johnson_lindenstrauss_min_dim( n_samples=n_samples, eps=self.eps ) if self.n_components_ <= 0: raise ValueError( "eps=%f and n_samples=%d lead to a target dimension of " "%d which is invalid" % (self.eps, n_samples, self.n_components_) ) elif self.n_components_ > n_features: raise ValueError( "eps=%f and n_samples=%d lead to a target dimension of " "%d which is larger than the original space with " "n_features=%d" % (self.eps, n_samples, self.n_components_, n_features) ) else: if self.n_components > n_features: warnings.warn( "The number of components is higher than the number of" " features: n_features < n_components (%s < %s)." "The dimensionality of the problem will not be reduced." % (n_features, self.n_components), DataDimensionalityWarning, ) self.n_components_ = self.n_components # Generate a projection matrix of size [n_components, n_features] self.components_ = self._make_random_matrix( self.n_components_, n_features ).astype(X.dtype, copy=False) if self.compute_inverse_components: self.inverse_components_ = self._compute_inverse_components() # Required by ClassNamePrefixFeaturesOutMixin.get_feature_names_out. self._n_features_out = self.n_components return self def inverse_transform(self, X): """Project data back to its original space. Returns an array X_original whose transform would be X. Note that even if X is sparse, X_original is dense: this may use a lot of RAM. If `compute_inverse_components` is False, the inverse of the components is computed during each call to `inverse_transform` which can be costly. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_components) Data to be transformed back. Returns ------- X_original : ndarray of shape (n_samples, n_features) Reconstructed data. """ check_is_fitted(self) X = check_array(X, dtype=[np.float64, np.float32], accept_sparse=("csr", "csc")) if self.compute_inverse_components: return X @ self.inverse_components_.T inverse_components = self._compute_inverse_components() return X @ inverse_components.T def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.transformer_tags.preserves_dtype = ["float64", "float32"] tags.input_tags.sparse = True return tags class GaussianRandomProjection(BaseRandomProjection): """Reduce dimensionality through Gaussian random projection. The components of the random matrix are drawn from N(0, 1 / n_components). Read more in the :ref:`User Guide <gaussian_random_matrix>`. .. versionadded:: 0.13 Parameters ---------- n_components : int or 'auto', default='auto' Dimensionality of the target projection space. n_components can be automatically adjusted according to the number of samples in the dataset and the bound given by the Johnson-Lindenstrauss lemma. In that case the quality of the embedding is controlled by the ``eps`` parameter. It should be noted that Johnson-Lindenstrauss lemma can yield very conservative estimated of the required number of components as it makes no assumption on the structure of the dataset. eps : float, default=0.1 Parameter to control the quality of the embedding according to the Johnson-Lindenstrauss lemma when `n_components` is set to 'auto'. The value should be strictly positive. Smaller values lead to better embedding and higher number of dimensions (n_components) in the target projection space. compute_inverse_components : bool, default=False Learn the inverse transform by computing the pseudo-inverse of the components during fit. Note that computing the pseudo-inverse does not scale well to large matrices. random_state : int, RandomState instance or None, default=None Controls the pseudo random number generator used to generate the projection matrix at fit time. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Attributes ---------- n_components_ : int Concrete number of components computed when n_components="auto". components_ : ndarray of shape (n_components, n_features) Random matrix used for the projection. inverse_components_ : ndarray of shape (n_features, n_components) Pseudo-inverse of the components, only computed if `compute_inverse_components` is True. .. versionadded:: 1.1 n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- SparseRandomProjection : Reduce dimensionality through sparse random projection. Examples -------- >>> import numpy as np >>> from sklearn.random_projection import GaussianRandomProjection >>> rng = np.random.RandomState(42) >>> X = rng.rand(25, 3000) >>> transformer = GaussianRandomProjection(random_state=rng) >>> X_new = transformer.fit_transform(X) >>> X_new.shape (25, 2759) """ def __init__( self, n_components="auto", *, eps=0.1, compute_inverse_components=False, random_state=None, ): super().__init__( n_components=n_components, eps=eps, compute_inverse_components=compute_inverse_components, random_state=random_state, ) def _make_random_matrix(self, n_components, n_features): """Generate the random projection matrix. Parameters ---------- n_components : int, Dimensionality of the target projection space. n_features : int, Dimensionality of the original source space. Returns ------- components : ndarray of shape (n_components, n_features) The generated random matrix. """ random_state = check_random_state(self.random_state) return _gaussian_random_matrix( n_components, n_features, random_state=random_state ) def transform(self, X): """Project the data by using matrix product with the random matrix. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The input data to project into a smaller dimensional space. Returns ------- X_new : ndarray of shape (n_samples, n_components) Projected array. """ check_is_fitted(self) X = validate_data( self, X, accept_sparse=["csr", "csc"], reset=False, dtype=[np.float64, np.float32], ) return X @ self.components_.T class SparseRandomProjection(BaseRandomProjection): """Reduce dimensionality through sparse random projection. Sparse random matrix is an alternative to dense random projection matrix that guarantees similar embedding quality while being much more memory efficient and allowing faster computation of the projected data. If we note `s = 1 / density` the components of the random matrix are drawn from: .. code-block:: text -sqrt(s) / sqrt(n_components) with probability 1 / 2s 0 with probability 1 - 1 / s +sqrt(s) / sqrt(n_components) with probability 1 / 2s Read more in the :ref:`User Guide <sparse_random_matrix>`. .. versionadded:: 0.13 Parameters ---------- n_components : int or 'auto', default='auto' Dimensionality of the target projection space. n_components can be automatically adjusted according to the number of samples in the dataset and the bound given by the Johnson-Lindenstrauss lemma. In that case the quality of the embedding is controlled by the ``eps`` parameter. It should be noted that Johnson-Lindenstrauss lemma can yield very conservative estimated of the required number of components as it makes no assumption on the structure of the dataset. density : float or 'auto', default='auto' Ratio in the range (0, 1] of non-zero component in the random projection matrix. If density = 'auto', the value is set to the minimum density as recommended by Ping Li et al.: 1 / sqrt(n_features). Use density = 1 / 3.0 if you want to reproduce the results from Achlioptas, 2001. eps : float, default=0.1 Parameter to control the quality of the embedding according to the Johnson-Lindenstrauss lemma when n_components is set to 'auto'. This value should be strictly positive. Smaller values lead to better embedding and higher number of dimensions (n_components) in the target projection space. dense_output : bool, default=False If True, ensure that the output of the random projection is a dense numpy array even if the input and random projection matrix are both sparse. In practice, if the number of components is small the number of zero components in the projected data will be very small and it will be more CPU and memory efficient to use a dense representation. If False, the projected data uses a sparse representation if the input is sparse. compute_inverse_components : bool, default=False Learn the inverse transform by computing the pseudo-inverse of the components during fit. Note that the pseudo-inverse is always a dense array, even if the training data was sparse. This means that it might be necessary to call `inverse_transform` on a small batch of samples at a time to avoid exhausting the available memory on the host. Moreover, computing the pseudo-inverse does not scale well to large matrices. random_state : int, RandomState instance or None, default=None Controls the pseudo random number generator used to generate the projection matrix at fit time. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. Attributes ---------- n_components_ : int Concrete number of components computed when n_components="auto". components_ : sparse matrix of shape (n_components, n_features) Random matrix used for the projection. Sparse matrix will be of CSR format. inverse_components_ : ndarray of shape (n_features, n_components) Pseudo-inverse of the components, only computed if `compute_inverse_components` is True. .. versionadded:: 1.1 density_ : float in range 0.0 - 1.0 Concrete density computed from when density = "auto". n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- GaussianRandomProjection : Reduce dimensionality through Gaussian random projection. References ---------- .. [1] Ping Li, T. Hastie and K. W. Church, 2006, "Very Sparse Random Projections". https://web.stanford.edu/~hastie/Papers/Ping/KDD06_rp.pdf .. [2] D. Achlioptas, 2001, "Database-friendly random projections", https://cgi.di.uoa.gr/~optas/papers/jl.pdf Examples -------- >>> import numpy as np >>> from sklearn.random_projection import SparseRandomProjection >>> rng = np.random.RandomState(42) >>> X = rng.rand(25, 3000) >>> transformer = SparseRandomProjection(random_state=rng) >>> X_new = transformer.fit_transform(X) >>> X_new.shape (25, 2759) >>> # very few components are non-zero >>> np.mean(transformer.components_ != 0) np.float64(0.0182) """ _parameter_constraints: dict = { **BaseRandomProjection._parameter_constraints, "density": [Interval(Real, 0.0, 1.0, closed="right"), StrOptions({"auto"})], "dense_output": ["boolean"], } def __init__( self, n_components="auto", *, density="auto", eps=0.1, dense_output=False, compute_inverse_components=False, random_state=None, ): super().__init__( n_components=n_components, eps=eps, compute_inverse_components=compute_inverse_components, random_state=random_state, ) self.dense_output = dense_output self.density = density def _make_random_matrix(self, n_components, n_features): """Generate the random projection matrix Parameters ---------- n_components : int Dimensionality of the target projection space. n_features : int Dimensionality of the original source space. Returns ------- components : sparse matrix of shape (n_components, n_features) The generated random matrix in CSR format. """ random_state = check_random_state(self.random_state) self.density_ = _check_density(self.density, n_features) return _sparse_random_matrix( n_components, n_features, density=self.density_, random_state=random_state ) def transform(self, X): """Project the data by using matrix product with the random matrix. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The input data to project into a smaller dimensional space. Returns ------- X_new : {ndarray, sparse matrix} of shape (n_samples, n_components) Projected array. It is a sparse matrix only when the input is sparse and `dense_output = False`. """ check_is_fitted(self) X = validate_data( self, X, accept_sparse=["csr", "csc"], reset=False, dtype=[np.float64, np.float32], ) return safe_sparse_dot(X, self.components_.T, dense_output=self.dense_output)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/kernel_ridge.py
sklearn/kernel_ridge.py
"""Kernel ridge regression.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from numbers import Real import numpy as np from sklearn.base import BaseEstimator, MultiOutputMixin, RegressorMixin, _fit_context from sklearn.linear_model._ridge import _solve_cholesky_kernel from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS, pairwise_kernels from sklearn.utils._param_validation import Interval, StrOptions from sklearn.utils.validation import ( _check_sample_weight, check_is_fitted, validate_data, ) class KernelRidge(MultiOutputMixin, RegressorMixin, BaseEstimator): """Kernel ridge regression. Kernel ridge regression (KRR) combines ridge regression (linear least squares with l2-norm regularization) with the kernel trick. It thus learns a linear function in the space induced by the respective kernel and the data. For non-linear kernels, this corresponds to a non-linear function in the original space. The form of the model learned by KRR is identical to support vector regression (SVR). However, different loss functions are used: KRR uses squared error loss while support vector regression uses epsilon-insensitive loss, both combined with l2 regularization. In contrast to SVR, fitting a KRR model can be done in closed-form and is typically faster for medium-sized datasets. On the other hand, the learned model is non-sparse and thus slower than SVR, which learns a sparse model for epsilon > 0, at prediction-time. This estimator has built-in support for multi-variate regression (i.e., when y is a 2d-array of shape [n_samples, n_targets]). Read more in the :ref:`User Guide <kernel_ridge>`. Parameters ---------- alpha : float or array-like of shape (n_targets,), default=1.0 Regularization strength; must be a positive float. Regularization improves the conditioning of the problem and reduces the variance of the estimates. Larger values specify stronger regularization. Alpha corresponds to ``1 / (2C)`` in other linear models such as :class:`~sklearn.linear_model.LogisticRegression` or :class:`~sklearn.svm.LinearSVC`. If an array is passed, penalties are assumed to be specific to the targets. Hence they must correspond in number. See :ref:`ridge_regression` for formula. kernel : str or callable, default="linear" Kernel mapping used internally. This parameter is directly passed to :class:`~sklearn.metrics.pairwise.pairwise_kernels`. If `kernel` is a string, it must be one of the metrics in `pairwise.PAIRWISE_KERNEL_FUNCTIONS` or "precomputed". If `kernel` is "precomputed", X is assumed to be a kernel matrix. Alternatively, if `kernel` is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two rows from X as input and return the corresponding kernel value as a single number. This means that callables from :mod:`sklearn.metrics.pairwise` are not allowed, as they operate on matrices, not single samples. Use the string identifying the kernel instead. gamma : float, default=None Gamma parameter for the RBF, laplacian, polynomial, exponential chi2 and sigmoid kernels. Interpretation of the default value is left to the kernel; see the documentation for sklearn.metrics.pairwise. Ignored by other kernels. degree : float, default=3 Degree of the polynomial kernel. Ignored by other kernels. coef0 : float, default=1 Zero coefficient for polynomial and sigmoid kernels. Ignored by other kernels. kernel_params : dict, default=None Additional parameters (keyword arguments) for kernel function passed as callable object. Attributes ---------- dual_coef_ : ndarray of shape (n_samples,) or (n_samples, n_targets) Representation of weight vector(s) in kernel space X_fit_ : {ndarray, sparse matrix} of shape (n_samples, n_features) Training data, which is also required for prediction. If kernel == "precomputed" this is instead the precomputed training matrix, of shape (n_samples, n_samples). n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- sklearn.gaussian_process.GaussianProcessRegressor : Gaussian Process regressor providing automatic kernel hyperparameters tuning and predictions uncertainty. sklearn.linear_model.Ridge : Linear ridge regression. sklearn.linear_model.RidgeCV : Ridge regression with built-in cross-validation. sklearn.svm.SVR : Support Vector Regression accepting a large variety of kernels. References ---------- * Kevin P. Murphy "Machine Learning: A Probabilistic Perspective", The MIT Press chapter 14.4.3, pp. 492-493 Examples -------- >>> from sklearn.kernel_ridge import KernelRidge >>> import numpy as np >>> n_samples, n_features = 10, 5 >>> rng = np.random.RandomState(0) >>> y = rng.randn(n_samples) >>> X = rng.randn(n_samples, n_features) >>> krr = KernelRidge(alpha=1.0) >>> krr.fit(X, y) KernelRidge(alpha=1.0) """ _parameter_constraints: dict = { "alpha": [Interval(Real, 0, None, closed="left"), "array-like"], "kernel": [ StrOptions(set(PAIRWISE_KERNEL_FUNCTIONS.keys()) | {"precomputed"}), callable, ], "gamma": [Interval(Real, 0, None, closed="left"), None], "degree": [Interval(Real, 0, None, closed="left")], "coef0": [Interval(Real, None, None, closed="neither")], "kernel_params": [dict, None], } def __init__( self, alpha=1, *, kernel="linear", gamma=None, degree=3, coef0=1, kernel_params=None, ): self.alpha = alpha self.kernel = kernel self.gamma = gamma self.degree = degree self.coef0 = coef0 self.kernel_params = kernel_params def _get_kernel(self, X, Y=None): if callable(self.kernel): params = self.kernel_params or {} else: params = {"gamma": self.gamma, "degree": self.degree, "coef0": self.coef0} return pairwise_kernels(X, Y, metric=self.kernel, filter_params=True, **params) def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.sparse = True tags.input_tags.pairwise = self.kernel == "precomputed" return tags @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y, sample_weight=None): """Fit Kernel Ridge regression model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. If kernel == "precomputed" this is instead a precomputed kernel matrix, of shape (n_samples, n_samples). y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. sample_weight : float or array-like of shape (n_samples,), default=None Individual weights for each sample, ignored if None is passed. Returns ------- self : object Returns the instance itself. """ # Convert data X, y = validate_data( self, X, y, accept_sparse=("csr", "csc"), multi_output=True, y_numeric=True ) if sample_weight is not None and not isinstance(sample_weight, float): sample_weight = _check_sample_weight(sample_weight, X) K = self._get_kernel(X) alpha = np.atleast_1d(self.alpha) ravel = False if len(y.shape) == 1: y = y.reshape(-1, 1) ravel = True copy = self.kernel == "precomputed" self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha, sample_weight, copy) if ravel: self.dual_coef_ = self.dual_coef_.ravel() self.X_fit_ = X return self def predict(self, X): """Predict using the kernel ridge model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Samples. If kernel == "precomputed" this is instead a precomputed kernel matrix, shape = [n_samples, n_samples_fitted], where n_samples_fitted is the number of samples used in the fitting for this estimator. Returns ------- C : ndarray of shape (n_samples,) or (n_samples, n_targets) Returns predicted values. """ check_is_fitted(self) X = validate_data(self, X, accept_sparse=("csr", "csc"), reset=False) K = self._get_kernel(X, self.X_fit_) return np.dot(K, self.dual_coef_)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/exceptions.py
sklearn/exceptions.py
"""Custom warnings and errors used across scikit-learn.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause __all__ = [ "ConvergenceWarning", "DataConversionWarning", "DataDimensionalityWarning", "EfficiencyWarning", "EstimatorCheckFailedWarning", "FitFailedWarning", "NotFittedError", "PositiveSpectrumWarning", "SkipTestWarning", "UndefinedMetricWarning", "UnsetMetadataPassedError", ] class UnsetMetadataPassedError(ValueError): """Exception class to raise if a metadata is passed which is not explicitly \ requested (metadata=True) or not requested (metadata=False). .. versionadded:: 1.3 Parameters ---------- message : str The message unrequested_params : dict A dictionary of parameters and their values which are provided but not requested. routed_params : dict A dictionary of routed parameters. """ def __init__(self, *, message, unrequested_params, routed_params): super().__init__(message) self.unrequested_params = unrequested_params self.routed_params = routed_params class NotFittedError(ValueError, AttributeError): """Exception class to raise if estimator is used before fitting. This class inherits from both ValueError and AttributeError to help with exception handling and backward compatibility. Examples -------- >>> from sklearn.svm import LinearSVC >>> from sklearn.exceptions import NotFittedError >>> try: ... LinearSVC().predict([[1, 2], [2, 3], [3, 4]]) ... except NotFittedError as e: ... print(repr(e)) NotFittedError("This LinearSVC instance is not fitted yet. Call 'fit' with appropriate arguments before using this estimator."...) .. versionchanged:: 0.18 Moved from sklearn.utils.validation. """ class ConvergenceWarning(UserWarning): """Custom warning to capture convergence problems .. versionchanged:: 0.18 Moved from sklearn.utils. """ class DataConversionWarning(UserWarning): """Warning used to notify implicit data conversions happening in the code. This warning occurs when some input data needs to be converted or interpreted in a way that may not match the user's expectations. For example, this warning may occur when the user - passes an integer array to a function which expects float input and will convert the input - requests a non-copying operation, but a copy is required to meet the implementation's data-type expectations; - passes an input whose shape can be interpreted ambiguously. .. versionchanged:: 0.18 Moved from sklearn.utils.validation. """ class DataDimensionalityWarning(UserWarning): """Custom warning to notify potential issues with data dimensionality. For example, in random projection, this warning is raised when the number of components, which quantifies the dimensionality of the target projection space, is higher than the number of features, which quantifies the dimensionality of the original source space, to imply that the dimensionality of the problem will not be reduced. .. versionchanged:: 0.18 Moved from sklearn.utils. """ class EfficiencyWarning(UserWarning): """Warning used to notify the user of inefficient computation. This warning notifies the user that the efficiency may not be optimal due to some reason which may be included as a part of the warning message. This may be subclassed into a more specific Warning class. .. versionadded:: 0.18 """ class FitFailedWarning(RuntimeWarning): """Warning class used if there is an error while fitting the estimator. This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV and the cross-validation helper function cross_val_score to warn when there is an error while fitting the estimator. .. versionchanged:: 0.18 Moved from sklearn.cross_validation. """ class SkipTestWarning(UserWarning): """Warning class used to notify the user of a test that was skipped. For example, one of the estimator checks requires a pandas import. If the pandas package cannot be imported, the test will be skipped rather than register as a failure. """ class UndefinedMetricWarning(UserWarning): """Warning used when the metric is invalid .. versionchanged:: 0.18 Moved from sklearn.base. """ class PositiveSpectrumWarning(UserWarning): """Warning raised when the eigenvalues of a PSD matrix have issues This warning is typically raised by ``_check_psd_eigenvalues`` when the eigenvalues of a positive semidefinite (PSD) matrix such as a gram matrix (kernel) present significant negative eigenvalues, or bad conditioning i.e. very small non-zero eigenvalues compared to the largest eigenvalue. .. versionadded:: 0.22 """ class InconsistentVersionWarning(UserWarning): """Warning raised when an estimator is unpickled with an inconsistent version. Parameters ---------- estimator_name : str Estimator name. current_sklearn_version : str Current scikit-learn version. original_sklearn_version : str Original scikit-learn version. """ def __init__( self, *, estimator_name, current_sklearn_version, original_sklearn_version ): self.estimator_name = estimator_name self.current_sklearn_version = current_sklearn_version self.original_sklearn_version = original_sklearn_version def __str__(self): return ( f"Trying to unpickle estimator {self.estimator_name} from version" f" {self.original_sklearn_version} when " f"using version {self.current_sklearn_version}. This might lead to breaking" " code or " "invalid results. Use at your own risk. " "For more info please refer to:\n" "https://scikit-learn.org/stable/model_persistence.html" "#security-maintainability-limitations" ) class EstimatorCheckFailedWarning(UserWarning): """Warning raised when an estimator check from the common tests fails. Parameters ---------- estimator : estimator object Estimator instance for which the test failed. check_name : str Name of the check that failed. exception : Exception Exception raised by the failed check. status : str Status of the check. expected_to_fail : bool Whether the check was expected to fail. expected_to_fail_reason : str Reason for the expected failure. """ def __init__( self, *, estimator, check_name: str, exception: Exception, status: str, expected_to_fail: bool, expected_to_fail_reason: str, ): self.estimator = estimator self.check_name = check_name self.exception = exception self.status = status self.expected_to_fail = expected_to_fail self.expected_to_fail_reason = expected_to_fail_reason def __repr__(self): expected_to_fail_str = ( f"Expected to fail: {self.expected_to_fail_reason}" if self.expected_to_fail else "Not expected to fail" ) return ( f"Test {self.check_name} failed for estimator {self.estimator!r}.\n" f"Expected to fail reason: {expected_to_fail_str}\n" f"Exception: {self.exception}" ) def __str__(self): return self.__repr__()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/_config.py
sklearn/_config.py
"""Global configuration state and functions for management""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import os import threading from contextlib import contextmanager as contextmanager _global_config = { "assume_finite": bool(os.environ.get("SKLEARN_ASSUME_FINITE", False)), "working_memory": int(os.environ.get("SKLEARN_WORKING_MEMORY", 1024)), "print_changed_only": True, "display": "diagram", "pairwise_dist_chunk_size": int( os.environ.get("SKLEARN_PAIRWISE_DIST_CHUNK_SIZE", 256) ), "enable_cython_pairwise_dist": True, "array_api_dispatch": False, "transform_output": "default", "enable_metadata_routing": False, "skip_parameter_validation": False, } _threadlocal = threading.local() def _get_threadlocal_config(): """Get a threadlocal **mutable** configuration. If the configuration does not exist, copy the default global configuration.""" if not hasattr(_threadlocal, "global_config"): _threadlocal.global_config = _global_config.copy() return _threadlocal.global_config def get_config(): """Retrieve the current scikit-learn configuration. This reflects the effective global configurations as established by default upon library import, or modified via :func:`set_config` or :func:`config_context`. Returns ------- config : dict Keys are parameter names that can be passed to :func:`set_config`. See Also -------- config_context : Context manager for global scikit-learn configuration. set_config : Set global scikit-learn configuration. Examples -------- >>> import sklearn >>> config = sklearn.get_config() >>> config.keys() dict_keys([...]) """ # Return a copy of the threadlocal configuration so that users will # not be able to modify the configuration with the returned dict. return _get_threadlocal_config().copy() def set_config( assume_finite=None, working_memory=None, print_changed_only=None, display=None, pairwise_dist_chunk_size=None, enable_cython_pairwise_dist=None, array_api_dispatch=None, transform_output=None, enable_metadata_routing=None, skip_parameter_validation=None, ): """Set global scikit-learn configuration. These settings control the behaviour of scikit-learn functions during a library usage session. Global configuration defaults (as described in the parameter list below) take effect when scikit-learn is imported. This function can be used to modify the global scikit-learn configuration at runtime. Passing `None` as an argument (the default) leaves the corresponding setting unchanged. This allows users to selectively update the global configuration values without affecting the others. .. versionadded:: 0.19 Parameters ---------- assume_finite : bool, default=None If True, validation for finiteness will be skipped, saving time, but leading to potential crashes. If False, validation for finiteness will be performed, avoiding error. Global default: False. .. versionadded:: 0.19 working_memory : int, default=None If set, scikit-learn will attempt to limit the size of temporary arrays to this number of MiB (per job when parallelised), often saving both computation time and memory on expensive operations that can be performed in chunks. Global default: 1024. .. versionadded:: 0.20 print_changed_only : bool, default=None If True, only the parameters that were set to non-default values will be printed when printing an estimator. For example, ``print(SVC())`` while True will only print 'SVC()' while the default behaviour would be to print 'SVC(C=1.0, cache_size=200, ...)' with all the non-changed parameters. Global default: True. .. versionadded:: 0.21 .. versionchanged:: 0.23 Global default configuration changed from False to True. display : {'text', 'diagram'}, default=None If 'diagram', estimators will be displayed as a diagram in a Jupyter lab or notebook context. If 'text', estimators will be displayed as text. Global default: 'diagram'. .. versionadded:: 0.23 pairwise_dist_chunk_size : int, default=None The number of row vectors per chunk for the accelerated pairwise- distances reduction backend. Global default: 256 (suitable for most of modern laptops' caches and architectures). Intended for easier benchmarking and testing of scikit-learn internals. End users are not expected to benefit from customizing this configuration setting. .. versionadded:: 1.1 enable_cython_pairwise_dist : bool, default=None Use the accelerated pairwise-distances reduction backend when possible. Global default: True. Intended for easier benchmarking and testing of scikit-learn internals. End users are not expected to benefit from customizing this configuration setting. .. versionadded:: 1.1 array_api_dispatch : bool, default=None Use Array API dispatching when inputs follow the Array API standard. Global default: False. See the :ref:`User Guide <array_api>` for more details. .. versionadded:: 1.2 transform_output : str, default=None Configure output of `transform` and `fit_transform`. See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py` for an example on how to use the API. - `"default"`: Default output format of a transformer - `"pandas"`: DataFrame output - `"polars"`: Polars output - `None`: Transform configuration is unchanged Global default: "default". .. versionadded:: 1.2 .. versionadded:: 1.4 `"polars"` option was added. enable_metadata_routing : bool, default=None Enable metadata routing. By default this feature is disabled. Refer to :ref:`metadata routing user guide <metadata_routing>` for more details. - `True`: Metadata routing is enabled - `False`: Metadata routing is disabled, use the old syntax. - `None`: Configuration is unchanged Global default: False. .. versionadded:: 1.3 skip_parameter_validation : bool, default=None If `True`, disable the validation of the hyper-parameters' types and values in the fit method of estimators and for arguments passed to public helper functions. It can save time in some situations but can lead to low level crashes and exceptions with confusing error messages. Global default: False. Note that for data parameters, such as `X` and `y`, only type validation is skipped but validation with `check_array` will continue to run. .. versionadded:: 1.3 See Also -------- config_context : Context manager for global scikit-learn configuration. get_config : Retrieve current values of the global configuration. Examples -------- >>> from sklearn import set_config >>> set_config(display='diagram') # doctest: +SKIP """ local_config = _get_threadlocal_config() if assume_finite is not None: local_config["assume_finite"] = assume_finite if working_memory is not None: local_config["working_memory"] = working_memory if print_changed_only is not None: local_config["print_changed_only"] = print_changed_only if display is not None: local_config["display"] = display if pairwise_dist_chunk_size is not None: local_config["pairwise_dist_chunk_size"] = pairwise_dist_chunk_size if enable_cython_pairwise_dist is not None: local_config["enable_cython_pairwise_dist"] = enable_cython_pairwise_dist if array_api_dispatch is not None: from sklearn.utils._array_api import _check_array_api_dispatch _check_array_api_dispatch(array_api_dispatch) local_config["array_api_dispatch"] = array_api_dispatch if transform_output is not None: local_config["transform_output"] = transform_output if enable_metadata_routing is not None: local_config["enable_metadata_routing"] = enable_metadata_routing if skip_parameter_validation is not None: local_config["skip_parameter_validation"] = skip_parameter_validation @contextmanager def config_context( *, assume_finite=None, working_memory=None, print_changed_only=None, display=None, pairwise_dist_chunk_size=None, enable_cython_pairwise_dist=None, array_api_dispatch=None, transform_output=None, enable_metadata_routing=None, skip_parameter_validation=None, ): """Context manager to temporarily change the global scikit-learn configuration. This context manager can be used to apply scikit-learn configuration changes within the scope of the with statement. Once the context exits, the global configuration is restored again. The default global configurations (which take effect when scikit-learn is imported) are defined below in the parameter list. Parameters ---------- assume_finite : bool, default=None If True, validation for finiteness will be skipped, saving time, but leading to potential crashes. If False, validation for finiteness will be performed, avoiding error. If None, the existing configuration won't change. Global default: False. working_memory : int, default=None If set, scikit-learn will attempt to limit the size of temporary arrays to this number of MiB (per job when parallelised), often saving both computation time and memory on expensive operations that can be performed in chunks. If None, the existing configuration won't change. Global default: 1024. print_changed_only : bool, default=None If True, only the parameters that were set to non-default values will be printed when printing an estimator. For example, ``print(SVC())`` while True will only print 'SVC()', but would print 'SVC(C=1.0, cache_size=200, ...)' with all the non-changed parameters when False. If None, the existing configuration won't change. Global default: True. .. versionchanged:: 0.23 Global default configuration changed from False to True. display : {'text', 'diagram'}, default=None If 'diagram', estimators will be displayed as a diagram in a Jupyter lab or notebook context. If 'text', estimators will be displayed as text. If None, the existing configuration won't change. Global default: 'diagram'. .. versionadded:: 0.23 pairwise_dist_chunk_size : int, default=None The number of row vectors per chunk for the accelerated pairwise- distances reduction backend. Global default: 256 (suitable for most of modern laptops' caches and architectures). Intended for easier benchmarking and testing of scikit-learn internals. End users are not expected to benefit from customizing this configuration setting. .. versionadded:: 1.1 enable_cython_pairwise_dist : bool, default=None Use the accelerated pairwise-distances reduction backend when possible. Global default: True. Intended for easier benchmarking and testing of scikit-learn internals. End users are not expected to benefit from customizing this configuration setting. .. versionadded:: 1.1 array_api_dispatch : bool, default=None Use Array API dispatching when inputs follow the Array API standard. Global default: False. See the :ref:`User Guide <array_api>` for more details. .. versionadded:: 1.2 transform_output : str, default=None Configure output of `transform` and `fit_transform`. See :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py` for an example on how to use the API. - `"default"`: Default output format of a transformer - `"pandas"`: DataFrame output - `"polars"`: Polars output - `None`: Transform configuration is unchanged Global default: "default". .. versionadded:: 1.2 .. versionadded:: 1.4 `"polars"` option was added. enable_metadata_routing : bool, default=None Enable metadata routing. By default this feature is disabled. Refer to :ref:`metadata routing user guide <metadata_routing>` for more details. - `True`: Metadata routing is enabled - `False`: Metadata routing is disabled, use the old syntax. - `None`: Configuration is unchanged Global default: False. .. versionadded:: 1.3 skip_parameter_validation : bool, default=None If `True`, disable the validation of the hyper-parameters' types and values in the fit method of estimators and for arguments passed to public helper functions. It can save time in some situations but can lead to low level crashes and exceptions with confusing error messages. Global default: False. Note that for data parameters, such as `X` and `y`, only type validation is skipped but validation with `check_array` will continue to run. .. versionadded:: 1.3 Yields ------ None. See Also -------- set_config : Set global scikit-learn configuration. get_config : Retrieve current values of the global configuration. Notes ----- All settings, not just those presently modified, will be returned to their previous values when the context manager is exited. Examples -------- >>> import sklearn >>> from sklearn.utils.validation import assert_all_finite >>> with sklearn.config_context(assume_finite=True): ... assert_all_finite([float('nan')]) >>> with sklearn.config_context(assume_finite=True): ... with sklearn.config_context(assume_finite=False): ... assert_all_finite([float('nan')]) Traceback (most recent call last): ... ValueError: Input contains NaN... """ old_config = get_config() set_config( assume_finite=assume_finite, working_memory=working_memory, print_changed_only=print_changed_only, display=display, pairwise_dist_chunk_size=pairwise_dist_chunk_size, enable_cython_pairwise_dist=enable_cython_pairwise_dist, array_api_dispatch=array_api_dispatch, transform_output=transform_output, enable_metadata_routing=enable_metadata_routing, skip_parameter_validation=skip_parameter_validation, ) try: yield finally: set_config(**old_config)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/multiclass.py
sklearn/multiclass.py
"""Multiclass learning algorithms. - one-vs-the-rest / one-vs-all - one-vs-one - error correcting output codes The estimators provided in this module are meta-estimators: they require a base estimator to be provided in their constructor. For example, it is possible to use these estimators to turn a binary classifier or a regressor into a multiclass classifier. It is also possible to use these estimators with multiclass estimators in the hope that their accuracy or runtime performance improves. All classifiers in scikit-learn implement multiclass classification; you only need to use this module if you want to experiment with custom multiclass strategies. The one-vs-the-rest meta-classifier also implements a `predict_proba` method, so long as such a method is implemented by the base classifier. This method returns probabilities of class membership in both the single label and multilabel case. Note that in the multilabel case, probabilities are the marginal probability that a given sample falls in the given class. As such, in the multilabel case the sum of these probabilities over all possible labels for a given sample *will not* sum to unity, as they do in the single label case. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import array import itertools import warnings from numbers import Integral, Real import numpy as np import scipy.sparse as sp from sklearn.base import ( BaseEstimator, ClassifierMixin, MetaEstimatorMixin, MultiOutputMixin, _fit_context, clone, is_classifier, is_regressor, ) from sklearn.metrics.pairwise import pairwise_distances_argmin from sklearn.preprocessing import LabelBinarizer from sklearn.utils import check_random_state from sklearn.utils._param_validation import HasMethods, Interval from sklearn.utils._tags import get_tags from sklearn.utils.metadata_routing import ( MetadataRouter, MethodMapping, _raise_for_params, process_routing, ) from sklearn.utils.metaestimators import _safe_split, available_if from sklearn.utils.multiclass import ( _check_partial_fit_first_call, _ovr_decision_function, check_classification_targets, ) from sklearn.utils.parallel import Parallel, delayed from sklearn.utils.validation import ( _check_method_params, _num_samples, check_is_fitted, validate_data, ) __all__ = [ "OneVsOneClassifier", "OneVsRestClassifier", "OutputCodeClassifier", ] def _fit_binary(estimator, X, y, fit_params, classes=None): """Fit a single binary estimator.""" unique_y = np.unique(y) if len(unique_y) == 1: if classes is not None: if y[0] == -1: c = 0 else: c = y[0] warnings.warn( "Label %s is present in all training examples." % str(classes[c]) ) estimator = _ConstantPredictor().fit(X, unique_y) else: estimator = clone(estimator) estimator.fit(X, y, **fit_params) return estimator def _partial_fit_binary(estimator, X, y, partial_fit_params): """Partially fit a single binary estimator.""" estimator.partial_fit(X, y, classes=np.array((0, 1)), **partial_fit_params) return estimator def _predict_binary(estimator, X): """Make predictions using a single binary estimator.""" if is_regressor(estimator): return estimator.predict(X) try: score = np.ravel(estimator.decision_function(X)) except (AttributeError, NotImplementedError): # probabilities of the positive class score = estimator.predict_proba(X)[:, 1] return score def _threshold_for_binary_predict(estimator): """Threshold for predictions from binary estimator.""" if hasattr(estimator, "decision_function") and is_classifier(estimator): return 0.0 else: # predict_proba threshold return 0.5 class _ConstantPredictor(BaseEstimator): """Helper predictor to be used when only one class is present.""" def fit(self, X, y): check_params = dict( ensure_all_finite=False, dtype=None, ensure_2d=False, accept_sparse=True ) validate_data( self, X, y, reset=True, validate_separately=(check_params, check_params) ) self.y_ = y return self def predict(self, X): check_is_fitted(self) validate_data( self, X, ensure_all_finite=False, dtype=None, accept_sparse=True, ensure_2d=False, reset=False, ) return np.repeat(self.y_, _num_samples(X)) def decision_function(self, X): check_is_fitted(self) validate_data( self, X, ensure_all_finite=False, dtype=None, accept_sparse=True, ensure_2d=False, reset=False, ) return np.repeat(self.y_, _num_samples(X)) def predict_proba(self, X): check_is_fitted(self) validate_data( self, X, ensure_all_finite=False, dtype=None, accept_sparse=True, ensure_2d=False, reset=False, ) y_ = self.y_.astype(np.float64) return np.repeat([np.hstack([1 - y_, y_])], _num_samples(X), axis=0) def _estimators_has(attr): """Check if self.estimator or self.estimators_[0] has attr. If `self.estimators_[0]` has the attr, then its safe to assume that other estimators have it too. We raise the original `AttributeError` if `attr` does not exist. This function is used together with `available_if`. """ def check(self): if hasattr(self, "estimators_"): getattr(self.estimators_[0], attr) else: getattr(self.estimator, attr) return True return check class OneVsRestClassifier( MultiOutputMixin, ClassifierMixin, MetaEstimatorMixin, BaseEstimator, ): """One-vs-the-rest (OvR) multiclass strategy. Also known as one-vs-all, this strategy consists in fitting one classifier per class. For each classifier, the class is fitted against all the other classes. In addition to its computational efficiency (only `n_classes` classifiers are needed), one advantage of this approach is its interpretability. Since each class is represented by one and one classifier only, it is possible to gain knowledge about the class by inspecting its corresponding classifier. This is the most commonly used strategy for multiclass classification and is a fair default choice. OneVsRestClassifier can also be used for multilabel classification. To use this feature, provide an indicator matrix for the target `y` when calling `.fit`. In other words, the target labels should be formatted as a 2D binary (0/1) matrix, where [i, j] == 1 indicates the presence of label j in sample i. This estimator uses the binary relevance method to perform multilabel classification, which involves training one binary classifier independently for each label. Read more in the :ref:`User Guide <ovr_classification>`. Parameters ---------- estimator : estimator object A regressor or a classifier that implements :term:`fit`. When a classifier is passed, :term:`decision_function` will be used in priority and it will fallback to :term:`predict_proba` if it is not available. When a regressor is passed, :term:`predict` is used. n_jobs : int, default=None The number of jobs to use for the computation: the `n_classes` one-vs-rest problems are computed in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. .. versionchanged:: 0.20 `n_jobs` default changed from 1 to None verbose : int, default=0 The verbosity level, if non zero, progress messages are printed. Below 50, the output is sent to stderr. Otherwise, the output is sent to stdout. The frequency of the messages increases with the verbosity level, reporting all iterations at 10. See :class:`joblib.Parallel` for more details. .. versionadded:: 1.1 Attributes ---------- estimators_ : list of `n_classes` estimators Estimators used for predictions. classes_ : array, shape = [`n_classes`] Class labels. n_classes_ : int Number of classes. label_binarizer_ : LabelBinarizer object Object used to transform multiclass labels to binary labels and vice-versa. multilabel_ : boolean Whether a OneVsRestClassifier is a multilabel classifier. n_features_in_ : int Number of features seen during :term:`fit`. Only defined if the underlying estimator exposes such an attribute when fit. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Only defined if the underlying estimator exposes such an attribute when fit. .. versionadded:: 1.0 See Also -------- OneVsOneClassifier : One-vs-one multiclass strategy. OutputCodeClassifier : (Error-Correcting) Output-Code multiclass strategy. sklearn.multioutput.MultiOutputClassifier : Alternate way of extending an estimator for multilabel classification. sklearn.preprocessing.MultiLabelBinarizer : Transform iterable of iterables to binary indicator matrix. Examples -------- >>> import numpy as np >>> from sklearn.multiclass import OneVsRestClassifier >>> from sklearn.svm import SVC >>> X = np.array([ ... [10, 10], ... [8, 10], ... [-5, 5.5], ... [-5.4, 5.5], ... [-20, -20], ... [-15, -20] ... ]) >>> y = np.array([0, 0, 1, 1, 2, 2]) >>> clf = OneVsRestClassifier(SVC()).fit(X, y) >>> clf.predict([[-19, -20], [9, 9], [-5, 5]]) array([2, 0, 1]) """ _parameter_constraints = { "estimator": [HasMethods(["fit"])], "n_jobs": [Integral, None], "verbose": ["verbose"], } def __init__(self, estimator, *, n_jobs=None, verbose=0): self.estimator = estimator self.n_jobs = n_jobs self.verbose = verbose @_fit_context( # OneVsRestClassifier.estimator is not validated yet prefer_skip_nested_validation=False ) def fit(self, X, y, **fit_params): """Fit underlying estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) Multi-class targets. An indicator matrix turns on multilabel classification. **fit_params : dict Parameters passed to the ``estimator.fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Instance of fitted estimator. """ _raise_for_params(fit_params, self, "fit") routed_params = process_routing( self, "fit", **fit_params, ) # A sparse LabelBinarizer, with sparse_output=True, has been shown to # outperform or match a dense label binarizer in all cases and has also # resulted in less or equal memory consumption in the fit_ovr function # overall. self.label_binarizer_ = LabelBinarizer(sparse_output=True) Y = self.label_binarizer_.fit_transform(y) Y = Y.tocsc() self.classes_ = self.label_binarizer_.classes_ columns = (col.toarray().ravel() for col in Y.T) # In cases where individual estimators are very fast to train setting # n_jobs > 1 in can results in slower performance due to the overhead # of spawning threads. See joblib issue #112. self.estimators_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( delayed(_fit_binary)( self.estimator, X, column, fit_params=routed_params.estimator.fit, classes=[ "not %s" % self.label_binarizer_.classes_[i], self.label_binarizer_.classes_[i], ], ) for i, column in enumerate(columns) ) if hasattr(self.estimators_[0], "n_features_in_"): self.n_features_in_ = self.estimators_[0].n_features_in_ if hasattr(self.estimators_[0], "feature_names_in_"): self.feature_names_in_ = self.estimators_[0].feature_names_in_ return self @available_if(_estimators_has("partial_fit")) @_fit_context( # OneVsRestClassifier.estimator is not validated yet prefer_skip_nested_validation=False ) def partial_fit(self, X, y, classes=None, **partial_fit_params): """Partially fit underlying estimators. Should be used when memory is inefficient to train all data. Chunks of data can be passed in several iterations. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) Multi-class targets. An indicator matrix turns on multilabel classification. classes : array, shape (n_classes, ) Classes across all calls to partial_fit. Can be obtained via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is only required in the first call of partial_fit and can be omitted in the subsequent calls. **partial_fit_params : dict Parameters passed to the ``estimator.partial_fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Instance of partially fitted estimator. """ _raise_for_params(partial_fit_params, self, "partial_fit") routed_params = process_routing( self, "partial_fit", **partial_fit_params, ) if _check_partial_fit_first_call(self, classes): self.estimators_ = [clone(self.estimator) for _ in range(self.n_classes_)] # A sparse LabelBinarizer, with sparse_output=True, has been # shown to outperform or match a dense label binarizer in all # cases and has also resulted in less or equal memory consumption # in the fit_ovr function overall. self.label_binarizer_ = LabelBinarizer(sparse_output=True) self.label_binarizer_.fit(self.classes_) if len(np.setdiff1d(y, self.classes_)): raise ValueError( ( "Mini-batch contains {0} while classes " + "must be subset of {1}" ).format(np.unique(y), self.classes_) ) Y = self.label_binarizer_.transform(y) Y = Y.tocsc() columns = (col.toarray().ravel() for col in Y.T) self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_partial_fit_binary)( estimator, X, column, partial_fit_params=routed_params.estimator.partial_fit, ) for estimator, column in zip(self.estimators_, columns) ) if hasattr(self.estimators_[0], "n_features_in_"): self.n_features_in_ = self.estimators_[0].n_features_in_ return self def predict(self, X): """Predict multi-class targets using underlying estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. Returns ------- y : {array-like, sparse matrix} of shape (n_samples,) or (n_samples, n_classes) Predicted multi-class targets. """ check_is_fitted(self) n_samples = _num_samples(X) if self.label_binarizer_.y_type_ == "multiclass": maxima = np.empty(n_samples, dtype=float) maxima.fill(-np.inf) argmaxima = np.zeros(n_samples, dtype=int) n_classes = len(self.estimators_) # Iterate in reverse order to match np.argmax tie-breaking behavior for i, e in enumerate(reversed(self.estimators_)): pred = _predict_binary(e, X) np.maximum(maxima, pred, out=maxima) argmaxima[maxima == pred] = n_classes - i - 1 return self.classes_[argmaxima] else: thresh = _threshold_for_binary_predict(self.estimators_[0]) indices = array.array("i") indptr = array.array("i", [0]) for e in self.estimators_: indices.extend(np.where(_predict_binary(e, X) > thresh)[0]) indptr.append(len(indices)) data = np.ones(len(indices), dtype=int) indicator = sp.csc_matrix( (data, indices, indptr), shape=(n_samples, len(self.estimators_)) ) return self.label_binarizer_.inverse_transform(indicator) @available_if(_estimators_has("predict_proba")) def predict_proba(self, X): """Probability estimates. The returned estimates for all classes are ordered by label of classes. Note that in the multilabel case, each sample can have any number of labels. This returns the marginal probability that the given sample has the label in question. For example, it is entirely consistent that two labels both have a 90% probability of applying to a given sample. In the single label multiclass case, the rows of the returned matrix sum to 1. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data. Returns ------- T : array-like of shape (n_samples, n_classes) Returns the probability of the sample for each class in the model, where classes are ordered as they are in `self.classes_`. """ check_is_fitted(self) # Y[i, j] gives the probability that sample i has the label j. # In the multi-label case, these are not disjoint. Y = np.array([e.predict_proba(X)[:, 1] for e in self.estimators_]).T if len(self.estimators_) == 1: # Only one estimator, but we still want to return probabilities # for two classes. Y = np.concatenate(((1 - Y), Y), axis=1) if not self.multilabel_: # Then, (nonzero) sample probability distributions should be normalized. row_sums = np.sum(Y, axis=1)[:, np.newaxis] np.divide(Y, row_sums, out=Y, where=row_sums != 0) return Y @available_if(_estimators_has("decision_function")) def decision_function(self, X): """Decision function for the OneVsRestClassifier. Return the distance of each sample from the decision boundary for each class. This can only be used with estimators which implement the `decision_function` method. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. Returns ------- T : array-like of shape (n_samples, n_classes) or (n_samples,) for \ binary classification. Result of calling `decision_function` on the final estimator. .. versionchanged:: 0.19 output shape changed to ``(n_samples,)`` to conform to scikit-learn conventions for binary classification. """ check_is_fitted(self) if len(self.estimators_) == 1: return self.estimators_[0].decision_function(X) return np.array( [est.decision_function(X).ravel() for est in self.estimators_] ).T @property def multilabel_(self): """Whether this is a multilabel classifier.""" return self.label_binarizer_.y_type_.startswith("multilabel") @property def n_classes_(self): """Number of classes.""" return len(self.classes_) def __sklearn_tags__(self): """Indicate if wrapped estimator is using a precomputed Gram matrix""" tags = super().__sklearn_tags__() tags.input_tags.pairwise = get_tags(self.estimator).input_tags.pairwise tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse return tags def get_metadata_routing(self): """Get metadata routing of this object. Please check :ref:`User Guide <metadata_routing>` on how the routing mechanism works. .. versionadded:: 1.4 Returns ------- routing : MetadataRouter A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating routing information. """ router = ( MetadataRouter(owner=self) .add_self_request(self) .add( estimator=self.estimator, method_mapping=MethodMapping() .add(caller="fit", callee="fit") .add(caller="partial_fit", callee="partial_fit"), ) ) return router def _fit_ovo_binary(estimator, X, y, i, j, fit_params): """Fit a single binary estimator (one-vs-one).""" cond = np.logical_or(y == i, y == j) y = y[cond] y_binary = np.empty(y.shape, int) y_binary[y == i] = 0 y_binary[y == j] = 1 indcond = np.arange(_num_samples(X))[cond] fit_params_subset = _check_method_params(X, params=fit_params, indices=indcond) return ( _fit_binary( estimator, _safe_split(estimator, X, None, indices=indcond)[0], y_binary, fit_params=fit_params_subset, classes=[i, j], ), indcond, ) def _partial_fit_ovo_binary(estimator, X, y, i, j, partial_fit_params): """Partially fit a single binary estimator(one-vs-one).""" cond = np.logical_or(y == i, y == j) y = y[cond] if len(y) != 0: y_binary = np.zeros_like(y) y_binary[y == j] = 1 partial_fit_params_subset = _check_method_params( X, params=partial_fit_params, indices=cond ) return _partial_fit_binary( estimator, X[cond], y_binary, partial_fit_params=partial_fit_params_subset ) return estimator class OneVsOneClassifier(MetaEstimatorMixin, ClassifierMixin, BaseEstimator): """One-vs-one multiclass strategy. This strategy consists in fitting one classifier per class pair. At prediction time, the class which received the most votes is selected. Since it requires to fit `n_classes * (n_classes - 1) / 2` classifiers, this method is usually slower than one-vs-the-rest, due to its O(n_classes^2) complexity. However, this method may be advantageous for algorithms such as kernel algorithms which don't scale well with `n_samples`. This is because each individual learning problem only involves a small subset of the data whereas, with one-vs-the-rest, the complete dataset is used `n_classes` times. Read more in the :ref:`User Guide <ovo_classification>`. Parameters ---------- estimator : estimator object A regressor or a classifier that implements :term:`fit`. When a classifier is passed, :term:`decision_function` will be used in priority and it will fallback to :term:`predict_proba` if it is not available. When a regressor is passed, :term:`predict` is used. n_jobs : int, default=None The number of jobs to use for the computation: the `n_classes * ( n_classes - 1) / 2` OVO problems are computed in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. Attributes ---------- estimators_ : list of ``n_classes * (n_classes - 1) / 2`` estimators Estimators used for predictions. classes_ : numpy array of shape [n_classes] Array containing labels. n_classes_ : int Number of classes. pairwise_indices_ : list, length = ``len(estimators_)``, or ``None`` Indices of samples used when training the estimators. ``None`` when ``estimator``'s `pairwise` tag is False. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- OneVsRestClassifier : One-vs-all multiclass strategy. OutputCodeClassifier : (Error-Correcting) Output-Code multiclass strategy. Examples -------- >>> from sklearn.datasets import load_iris >>> from sklearn.model_selection import train_test_split >>> from sklearn.multiclass import OneVsOneClassifier >>> from sklearn.svm import LinearSVC >>> X, y = load_iris(return_X_y=True) >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, test_size=0.33, shuffle=True, random_state=0) >>> clf = OneVsOneClassifier( ... LinearSVC(random_state=0)).fit(X_train, y_train) >>> clf.predict(X_test[:10]) array([2, 1, 0, 2, 0, 2, 0, 1, 1, 1]) """ _parameter_constraints: dict = { "estimator": [HasMethods(["fit"])], "n_jobs": [Integral, None], } def __init__(self, estimator, *, n_jobs=None): self.estimator = estimator self.n_jobs = n_jobs @_fit_context( # OneVsOneClassifier.estimator is not validated yet prefer_skip_nested_validation=False ) def fit(self, X, y, **fit_params): """Fit underlying estimators. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. y : array-like of shape (n_samples,) Multi-class targets. **fit_params : dict Parameters passed to the ``estimator.fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object The fitted underlying estimator. """ _raise_for_params(fit_params, self, "fit") routed_params = process_routing( self, "fit", **fit_params, ) # We need to validate the data because we do a safe_indexing later. X, y = validate_data( self, X, y, accept_sparse=["csr", "csc"], ensure_all_finite=False ) check_classification_targets(y) self.classes_ = np.unique(y) if len(self.classes_) == 1: raise ValueError( "OneVsOneClassifier can not be fit when only one class is present." ) n_classes = self.classes_.shape[0] estimators_indices = list( zip( *( Parallel(n_jobs=self.n_jobs)( delayed(_fit_ovo_binary)( self.estimator, X, y, self.classes_[i], self.classes_[j], fit_params=routed_params.estimator.fit, ) for i in range(n_classes) for j in range(i + 1, n_classes) ) ) ) ) self.estimators_ = estimators_indices[0] pairwise = self.__sklearn_tags__().input_tags.pairwise self.pairwise_indices_ = estimators_indices[1] if pairwise else None return self @available_if(_estimators_has("partial_fit")) @_fit_context( # OneVsOneClassifier.estimator is not validated yet prefer_skip_nested_validation=False ) def partial_fit(self, X, y, classes=None, **partial_fit_params): """Partially fit underlying estimators. Should be used when memory is inefficient to train all data. Chunks of data can be passed in several iteration, where the first call should have an array of all target variables. Parameters ---------- X : {array-like, sparse matrix) of shape (n_samples, n_features) Data. y : array-like of shape (n_samples,) Multi-class targets. classes : array, shape (n_classes, ) Classes across all calls to partial_fit. Can be obtained via `np.unique(y_all)`, where y_all is the target vector of the entire dataset. This argument is only required in the first call of partial_fit and can be omitted in the subsequent calls. **partial_fit_params : dict Parameters passed to the ``estimator.partial_fit`` method of each sub-estimator. .. versionadded:: 1.4 Only available if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object The partially fitted underlying estimator. """ _raise_for_params(partial_fit_params, self, "partial_fit") routed_params = process_routing( self, "partial_fit", **partial_fit_params, ) first_call = _check_partial_fit_first_call(self, classes) if first_call: self.estimators_ = [ clone(self.estimator) for _ in range(self.n_classes_ * (self.n_classes_ - 1) // 2) ] if len(np.setdiff1d(y, self.classes_)): raise ValueError( "Mini-batch contains {0} while it must be subset of {1}".format( np.unique(y), self.classes_ ) ) X, y = validate_data( self, X, y, accept_sparse=["csr", "csc"], ensure_all_finite=False, reset=first_call, ) check_classification_targets(y) combinations = itertools.combinations(range(self.n_classes_), 2) self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_partial_fit_ovo_binary)( estimator, X, y, self.classes_[i], self.classes_[j], partial_fit_params=routed_params.estimator.partial_fit, ) for estimator, (i, j) in zip(self.estimators_, (combinations)) ) self.pairwise_indices_ = None if hasattr(self.estimators_[0], "n_features_in_"): self.n_features_in_ = self.estimators_[0].n_features_in_ return self def predict(self, X): """Estimate the best class label for each sample in X. This is implemented as ``argmax(decision_function(X), axis=1)`` which will return the label of the class with most votes by estimators predicting the outcome of a decision for each possible class pair. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Data. Returns ------- y : numpy array of shape [n_samples] Predicted multi-class targets. """ Y = self.decision_function(X) if self.n_classes_ == 2:
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/conftest.py
sklearn/conftest.py
# Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import builtins import platform import sys from contextlib import suppress from functools import wraps from os import environ from unittest import SkipTest import joblib import numpy as np import pytest from _pytest.doctest import DoctestItem from scipy.datasets import face from threadpoolctl import threadpool_limits from sklearn._min_dependencies import PYTEST_MIN_VERSION from sklearn.datasets import ( fetch_20newsgroups, fetch_20newsgroups_vectorized, fetch_california_housing, fetch_covtype, fetch_kddcup99, fetch_lfw_pairs, fetch_lfw_people, fetch_olivetti_faces, fetch_rcv1, fetch_species_distributions, ) from sklearn.utils._testing import get_pytest_filterwarning_lines from sklearn.utils.fixes import ( _IS_32BIT, np_base_version, parse_version, sp_version, ) try: import pytest_run_parallel # noqa:F401 PARALLEL_RUN_AVAILABLE = True except ImportError: PARALLEL_RUN_AVAILABLE = False try: from scipy_doctest.conftest import dt_config except ModuleNotFoundError: dt_config = None if parse_version(pytest.__version__) < parse_version(PYTEST_MIN_VERSION): raise ImportError( f"Your version of pytest is too old. Got version {pytest.__version__}, you" f" should have pytest >= {PYTEST_MIN_VERSION} installed." ) def raccoon_face_or_skip(): # SciPy requires network access to get data run_network_tests = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0" if not run_network_tests: raise SkipTest("test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0") try: import pooch # noqa: F401 except ImportError: raise SkipTest("test requires pooch to be installed") return face(gray=True) dataset_fetchers = { "fetch_20newsgroups_fxt": fetch_20newsgroups, "fetch_20newsgroups_vectorized_fxt": fetch_20newsgroups_vectorized, "fetch_california_housing_fxt": fetch_california_housing, "fetch_covtype_fxt": fetch_covtype, "fetch_kddcup99_fxt": fetch_kddcup99, "fetch_lfw_pairs_fxt": fetch_lfw_pairs, "fetch_lfw_people_fxt": fetch_lfw_people, "fetch_olivetti_faces_fxt": fetch_olivetti_faces, "fetch_rcv1_fxt": fetch_rcv1, "fetch_species_distributions_fxt": fetch_species_distributions, } dataset_fetchers["raccoon_face_fxt"] = raccoon_face_or_skip _SKIP32_MARK = pytest.mark.skipif( environ.get("SKLEARN_RUN_FLOAT32_TESTS", "0") != "1", reason="Set SKLEARN_RUN_FLOAT32_TESTS=1 to run float32 dtype tests", ) # Global fixtures @pytest.fixture(params=[pytest.param(np.float32, marks=_SKIP32_MARK), np.float64]) def global_dtype(request): yield request.param def _fetch_fixture(f): """Fetch dataset (download if missing and requested by environment).""" download_if_missing = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0" @wraps(f) def wrapped(*args, **kwargs): kwargs["download_if_missing"] = download_if_missing try: return f(*args, **kwargs) except OSError as e: if str(e) != "Data not found and `download_if_missing` is False": raise pytest.skip("test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0") return pytest.fixture(lambda: wrapped) # Adds fixtures for fetching data fetch_20newsgroups_fxt = _fetch_fixture(fetch_20newsgroups) fetch_20newsgroups_vectorized_fxt = _fetch_fixture(fetch_20newsgroups_vectorized) fetch_california_housing_fxt = _fetch_fixture(fetch_california_housing) fetch_covtype_fxt = _fetch_fixture(fetch_covtype) fetch_kddcup99_fxt = _fetch_fixture(fetch_kddcup99) fetch_lfw_pairs_fxt = _fetch_fixture(fetch_lfw_pairs) fetch_lfw_people_fxt = _fetch_fixture(fetch_lfw_people) fetch_olivetti_faces_fxt = _fetch_fixture(fetch_olivetti_faces) fetch_rcv1_fxt = _fetch_fixture(fetch_rcv1) fetch_species_distributions_fxt = _fetch_fixture(fetch_species_distributions) raccoon_face_fxt = pytest.fixture(raccoon_face_or_skip) def pytest_collection_modifyitems(config, items): """Called after collect is completed. Parameters ---------- config : pytest config items : list of collected items """ run_network_tests = environ.get("SKLEARN_SKIP_NETWORK_TESTS", "1") == "0" skip_network = pytest.mark.skip( reason="test is enabled when SKLEARN_SKIP_NETWORK_TESTS=0" ) # download datasets during collection to avoid thread unsafe behavior # when running pytest in parallel with pytest-xdist dataset_features_set = set(dataset_fetchers) datasets_to_download = set() for item in items: if isinstance(item, DoctestItem) and "fetch_" in item.name: fetcher_function_name = item.name.split(".")[-1] dataset_fetchers_key = f"{fetcher_function_name}_fxt" dataset_to_fetch = set([dataset_fetchers_key]) & dataset_features_set elif not hasattr(item, "fixturenames"): continue else: item_fixtures = set(item.fixturenames) dataset_to_fetch = item_fixtures & dataset_features_set if not dataset_to_fetch: continue if run_network_tests: datasets_to_download |= dataset_to_fetch else: # network tests are skipped item.add_marker(skip_network) # Only download datasets on the first worker spawned by pytest-xdist # to avoid thread unsafe behavior. If pytest-xdist is not used, we still # download before tests run. worker_id = environ.get("PYTEST_XDIST_WORKER", "gw0") if worker_id == "gw0" and run_network_tests: for name in datasets_to_download: with suppress(SkipTest): dataset_fetchers[name]() for item in items: # Known failure on with GradientBoostingClassifier on ARM64 if ( item.name.endswith("GradientBoostingClassifier") and platform.machine() == "aarch64" ): marker = pytest.mark.xfail( reason=( "know failure. See " "https://github.com/scikit-learn/scikit-learn/issues/17797" ) ) item.add_marker(marker) skip_doctests = False try: import matplotlib # noqa: F401 except ImportError: skip_doctests = True reason = "matplotlib is required to run the doctests" if _IS_32BIT: reason = "doctest are only run when the default numpy int is 64 bits." skip_doctests = True elif sys.platform.startswith("win32"): reason = ( "doctests are not run for Windows because numpy arrays " "repr is inconsistent across platforms." ) skip_doctests = True if np_base_version < parse_version("2"): # TODO: configure numpy to output scalar arrays as regular Python scalars # once possible to improve readability of the tests docstrings. # https://numpy.org/neps/nep-0051-scalar-representation.html#implementation reason = "Due to NEP 51 numpy scalar repr has changed in numpy 2" skip_doctests = True if sp_version < parse_version("1.14"): reason = "Scipy sparse matrix repr has changed in scipy 1.14" skip_doctests = True # Normally doctest has the entire module's scope. Here we set globs to an empty dict # to remove the module's scope: # https://docs.python.org/3/library/doctest.html#what-s-the-execution-context for item in items: if isinstance(item, DoctestItem): item.dtest.globs = {} if skip_doctests: skip_marker = pytest.mark.skip(reason=reason) for item in items: if isinstance(item, DoctestItem): # work-around an internal error with pytest if adding a skip # mark to a doctest in a contextmanager, see # https://github.com/pytest-dev/pytest/issues/8796 for more # details. if item.name != "sklearn._config.config_context": item.add_marker(skip_marker) try: import PIL # noqa: F401 pillow_installed = True except ImportError: pillow_installed = False if not pillow_installed: skip_marker = pytest.mark.skip(reason="pillow (or PIL) not installed!") for item in items: if item.name in [ "sklearn.feature_extraction.image.PatchExtractor", "sklearn.feature_extraction.image.extract_patches_2d", ]: item.add_marker(skip_marker) @pytest.fixture(scope="function") def pyplot(): """Setup and teardown fixture for matplotlib. This fixture checks if we can import matplotlib. If not, the tests will be skipped. Otherwise, we close the figures before and after running the functions. Returns ------- pyplot : module The ``matplotlib.pyplot`` module. """ pyplot = pytest.importorskip("matplotlib.pyplot") pyplot.close("all") yield pyplot pyplot.close("all") def pytest_generate_tests(metafunc): """Parametrization of global_random_seed fixture based on the SKLEARN_TESTS_GLOBAL_RANDOM_SEED environment variable. The goal of this fixture is to prevent tests that use it to be sensitive to a specific seed value while still being deterministic by default. See the documentation for the SKLEARN_TESTS_GLOBAL_RANDOM_SEED variable for instructions on how to use this fixture. https://scikit-learn.org/dev/computing/parallelism.html#sklearn-tests-global-random-seed """ # When using pytest-xdist this function is called in the xdist workers. # We rely on SKLEARN_TESTS_GLOBAL_RANDOM_SEED environment variable which is # set in before running pytest and is available in xdist workers since they # are subprocesses. RANDOM_SEED_RANGE = list(range(100)) # All seeds in [0, 99] should be valid. random_seed_var = environ.get("SKLEARN_TESTS_GLOBAL_RANDOM_SEED") default_random_seeds = [42] if random_seed_var is None: random_seeds = default_random_seeds elif random_seed_var == "all": random_seeds = RANDOM_SEED_RANGE else: if "-" in random_seed_var: start, stop = random_seed_var.split("-") random_seeds = list(range(int(start), int(stop) + 1)) else: random_seeds = [int(random_seed_var)] if min(random_seeds) < 0 or max(random_seeds) > 99: raise ValueError( "The value(s) of the environment variable " "SKLEARN_TESTS_GLOBAL_RANDOM_SEED must be in the range [0, 99] " f"(or 'all'), got: {random_seed_var}" ) if "global_random_seed" in metafunc.fixturenames: metafunc.parametrize("global_random_seed", random_seeds) def pytest_addoption(parser, pluginmanager): if not PARALLEL_RUN_AVAILABLE: parser.addini("thread_unsafe_fixtures", "list of stuff") def pytest_configure(config): # Use matplotlib agg backend during the tests including doctests try: import matplotlib matplotlib.use("agg") except ImportError: pass allowed_parallelism = joblib.cpu_count(only_physical_cores=True) xdist_worker_count = environ.get("PYTEST_XDIST_WORKER_COUNT") if xdist_worker_count is not None: # Set the number of OpenMP and BLAS threads based on the number of workers # xdist is using to prevent oversubscription. allowed_parallelism = max(allowed_parallelism // int(xdist_worker_count), 1) threadpool_limits(allowed_parallelism) if environ.get("SKLEARN_WARNINGS_AS_ERRORS", "0") != "0": # This seems like the only way to programmatically change the config # filterwarnings. This was suggested in # https://github.com/pytest-dev/pytest/issues/3311#issuecomment-373177592 for line in get_pytest_filterwarning_lines(): config.addinivalue_line("filterwarnings", line) if not PARALLEL_RUN_AVAILABLE: config.addinivalue_line( "markers", "parallel_threads(n): run the given test function in parallel " "using `n` threads.", ) config.addinivalue_line( "markers", "thread_unsafe: mark the test function as single-threaded", ) config.addinivalue_line( "markers", "iterations(n): run the given test function `n` times in each thread", ) config.addinivalue_line( "markers", "iterations(n): run the given test function `n` times in each thread", ) @pytest.fixture def hide_available_pandas(monkeypatch): """Pretend pandas was not installed.""" import_orig = builtins.__import__ def mocked_import(name, *args, **kwargs): if name == "pandas": raise ImportError() return import_orig(name, *args, **kwargs) monkeypatch.setattr(builtins, "__import__", mocked_import) if dt_config is not None: # Strict mode to differentiate between 3.14 and np.float64(3.14) dt_config.strict_check = True # dt_config.rtol = 0.01
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/pipeline.py
sklearn/pipeline.py
"""Utilities to build a composite estimator as a chain of transforms and estimators.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from collections import Counter, defaultdict from copy import deepcopy from itertools import chain, islice import numpy as np from scipy import sparse from sklearn.base import TransformerMixin, _fit_context, clone from sklearn.exceptions import NotFittedError from sklearn.preprocessing import FunctionTransformer from sklearn.utils import Bunch from sklearn.utils._metadata_requests import METHODS from sklearn.utils._param_validation import HasMethods, Hidden from sklearn.utils._repr_html.estimator import _VisualBlock from sklearn.utils._set_output import _get_container_adapter, _safe_set_output from sklearn.utils._tags import get_tags from sklearn.utils._user_interface import _print_elapsed_time from sklearn.utils.metadata_routing import ( MetadataRouter, MethodMapping, _raise_for_params, _routing_enabled, get_routing_for_object, process_routing, ) from sklearn.utils.metaestimators import _BaseComposition, available_if from sklearn.utils.parallel import Parallel, delayed from sklearn.utils.validation import check_is_fitted, check_memory __all__ = ["FeatureUnion", "Pipeline", "make_pipeline", "make_union"] def _final_estimator_has(attr): """Check that final_estimator has `attr`. Used together with `available_if` in `Pipeline`.""" def check(self): # raise original `AttributeError` if `attr` does not exist getattr(self._final_estimator, attr) return True return check def _cached_transform( sub_pipeline, *, cache, param_name, param_value, transform_params ): """Transform a parameter value using a sub-pipeline and cache the result. Parameters ---------- sub_pipeline : Pipeline The sub-pipeline to be used for transformation. cache : dict The cache dictionary to store the transformed values. param_name : str The name of the parameter to be transformed. param_value : object The value of the parameter to be transformed. transform_params : dict The metadata to be used for transformation. This passed to the `transform` method of the sub-pipeline. Returns ------- transformed_value : object The transformed value of the parameter. """ if param_name not in cache: # If the parameter is a tuple, transform each element of the # tuple. This is needed to support the pattern present in # `lightgbm` and `xgboost` where users can pass multiple # validation sets. if isinstance(param_value, tuple): cache[param_name] = tuple( sub_pipeline.transform(element, **transform_params) for element in param_value ) else: cache[param_name] = sub_pipeline.transform(param_value, **transform_params) return cache[param_name] class Pipeline(_BaseComposition): """ A sequence of data transformers with an optional final predictor. `Pipeline` allows you to sequentially apply a list of transformers to preprocess the data and, if desired, conclude the sequence with a final :term:`predictor` for predictive modeling. Intermediate steps of the pipeline must be transformers, that is, they must implement `fit` and `transform` methods. The final :term:`estimator` only needs to implement `fit`. The transformers in the pipeline can be cached using ``memory`` argument. The purpose of the pipeline is to assemble several steps that can be cross-validated together while setting different parameters. For this, it enables setting parameters of the various steps using their names and the parameter name separated by a `'__'`, as in the example below. A step's estimator may be replaced entirely by setting the parameter with its name to another estimator, or a transformer removed by setting it to `'passthrough'` or `None`. For an example use case of `Pipeline` combined with :class:`~sklearn.model_selection.GridSearchCV`, refer to :ref:`sphx_glr_auto_examples_compose_plot_compare_reduction.py`. The example :ref:`sphx_glr_auto_examples_compose_plot_digits_pipe.py` shows how to grid search on a pipeline using `'__'` as a separator in the parameter names. Read more in the :ref:`User Guide <pipeline>`. .. versionadded:: 0.5 Parameters ---------- steps : list of tuples List of (name of step, estimator) tuples that are to be chained in sequential order. To be compatible with the scikit-learn API, all steps must define `fit`. All non-last steps must also define `transform`. See :ref:`Combining Estimators <combining_estimators>` for more details. transform_input : list of str, default=None The names of the :term:`metadata` parameters that should be transformed by the pipeline before passing it to the step consuming it. This enables transforming some input arguments to ``fit`` (other than ``X``) to be transformed by the steps of the pipeline up to the step which requires them. Requirement is defined via :ref:`metadata routing <metadata_routing>`. For instance, this can be used to pass a validation set through the pipeline. You can only set this if metadata routing is enabled, which you can enable using ``sklearn.set_config(enable_metadata_routing=True)``. .. versionadded:: 1.6 memory : str or object with the joblib.Memory interface, default=None Used to cache the fitted transformers of the pipeline. The last step will never be cached, even if it is a transformer. By default, no caching is performed. If a string is given, it is the path to the caching directory. Enabling caching triggers a clone of the transformers before fitting. Therefore, the transformer instance given to the pipeline cannot be inspected directly. Use the attribute ``named_steps`` or ``steps`` to inspect estimators within the pipeline. Caching the transformers is advantageous when fitting is time consuming. See :ref:`sphx_glr_auto_examples_neighbors_plot_caching_nearest_neighbors.py` for an example on how to enable caching. verbose : bool, default=False If True, the time elapsed while fitting each step will be printed as it is completed. Attributes ---------- named_steps : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. Read-only attribute to access any step parameter by user given name. Keys are step names and values are steps parameters. classes_ : ndarray of shape (n_classes,) The classes labels. Only exist if the last step of the pipeline is a classifier. n_features_in_ : int Number of features seen during :term:`fit`. Only defined if the underlying first estimator in `steps` exposes such an attribute when fit. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Only defined if the underlying estimator exposes such an attribute when fit. .. versionadded:: 1.0 See Also -------- make_pipeline : Convenience function for simplified pipeline construction. Examples -------- >>> from sklearn.svm import SVC >>> from sklearn.preprocessing import StandardScaler >>> from sklearn.datasets import make_classification >>> from sklearn.model_selection import train_test_split >>> from sklearn.pipeline import Pipeline >>> X, y = make_classification(random_state=0) >>> X_train, X_test, y_train, y_test = train_test_split(X, y, ... random_state=0) >>> pipe = Pipeline([('scaler', StandardScaler()), ('svc', SVC())]) >>> # The pipeline can be used as any other estimator >>> # and avoids leaking the test set into the train set >>> pipe.fit(X_train, y_train).score(X_test, y_test) 0.88 >>> # An estimator's parameter can be set using '__' syntax >>> pipe.set_params(svc__C=10).fit(X_train, y_train).score(X_test, y_test) 0.76 """ # BaseEstimator interface _parameter_constraints: dict = { "steps": [list, Hidden(tuple)], "transform_input": [list, None], "memory": [None, str, HasMethods(["cache"])], "verbose": ["boolean"], } def __init__(self, steps, *, transform_input=None, memory=None, verbose=False): self.steps = steps self.transform_input = transform_input self.memory = memory self.verbose = verbose def set_output(self, *, transform=None): """Set the output container when `"transform"` and `"fit_transform"` are called. Calling `set_output` will set the output of all estimators in `steps`. Parameters ---------- transform : {"default", "pandas", "polars"}, default=None Configure output of `transform` and `fit_transform`. - `"default"`: Default output format of a transformer - `"pandas"`: DataFrame output - `"polars"`: Polars output - `None`: Transform configuration is unchanged .. versionadded:: 1.4 `"polars"` option was added. Returns ------- self : estimator instance Estimator instance. """ for _, _, step in self._iter(): _safe_set_output(step, transform=transform) return self def get_params(self, deep=True): """Get parameters for this estimator. Returns the parameters given in the constructor as well as the estimators contained within the `steps` of the `Pipeline`. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ return self._get_params("steps", deep=deep) def set_params(self, **kwargs): """Set the parameters of this estimator. Valid parameter keys can be listed with ``get_params()``. Note that you can directly set the parameters of the estimators contained in `steps`. Parameters ---------- **kwargs : dict Parameters of this estimator or parameters of estimators contained in `steps`. Parameters of the steps may be set using its name and the parameter name separated by a '__'. Returns ------- self : object Pipeline class instance. """ self._set_params("steps", **kwargs) return self def _validate_steps(self): if not self.steps: raise ValueError("The pipeline is empty. Please add steps.") names, estimators = zip(*self.steps) # validate names self._validate_names(names) # validate estimators self._check_estimators_are_instances(estimators) transformers = estimators[:-1] estimator = estimators[-1] for t in transformers: if t is None or t == "passthrough": continue if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr( t, "transform" ): raise TypeError( "All intermediate steps should be " "transformers and implement fit and transform " "or be the string 'passthrough' " "'%s' (type %s) doesn't" % (t, type(t)) ) # We allow last estimator to be None as an identity transformation if ( estimator is not None and estimator != "passthrough" and not hasattr(estimator, "fit") ): raise TypeError( "Last step of Pipeline should implement fit " "or be the string 'passthrough'. " "'%s' (type %s) doesn't" % (estimator, type(estimator)) ) def _iter(self, with_final=True, filter_passthrough=True): """ Generate (idx, (name, trans)) tuples from self.steps When filter_passthrough is True, 'passthrough' and None transformers are filtered out. """ stop = len(self.steps) if not with_final: stop -= 1 for idx, (name, trans) in enumerate(islice(self.steps, 0, stop)): if not filter_passthrough: yield idx, name, trans elif trans is not None and trans != "passthrough": yield idx, name, trans def __len__(self): """ Returns the length of the Pipeline """ return len(self.steps) def __getitem__(self, ind): """Returns a sub-pipeline or a single estimator in the pipeline Indexing with an integer will return an estimator; using a slice returns another Pipeline instance which copies a slice of this Pipeline. This copy is shallow: modifying (or fitting) estimators in the sub-pipeline will affect the larger pipeline and vice-versa. However, replacing a value in `step` will not affect a copy. See :ref:`sphx_glr_auto_examples_feature_selection_plot_feature_selection_pipeline.py` for an example of how to use slicing to inspect part of a pipeline. """ if isinstance(ind, slice): if ind.step not in (1, None): raise ValueError("Pipeline slicing only supports a step of 1") return self.__class__( self.steps[ind], memory=self.memory, verbose=self.verbose ) try: name, est = self.steps[ind] except TypeError: # Not an int, try get step by name return self.named_steps[ind] return est @property def named_steps(self): """Access the steps by name. Read-only attribute to access any step by given name. Keys are steps names and values are the steps objects.""" # Use Bunch object to improve autocomplete return Bunch(**dict(self.steps)) @property def _final_estimator(self): try: estimator = self.steps[-1][1] return "passthrough" if estimator is None else estimator except (ValueError, AttributeError, TypeError): # This condition happens when a call to a method is first calling # `_available_if` and `fit` did not validate `steps` yet. We # return `None` and an `InvalidParameterError` will be raised # right after. return None def _log_message(self, step_idx): if not self.verbose: return None name, _ = self.steps[step_idx] return "(step %d of %d) Processing %s" % (step_idx + 1, len(self.steps), name) def _check_method_params(self, method, props, **kwargs): if _routing_enabled(): routed_params = process_routing(self, method, **props, **kwargs) return routed_params else: fit_params_steps = Bunch( **{ name: Bunch(**{method: {} for method in METHODS}) for name, step in self.steps if step is not None } ) for pname, pval in props.items(): if "__" not in pname: raise ValueError( "Pipeline.fit does not accept the {} parameter. " "You can pass parameters to specific steps of your " "pipeline using the stepname__parameter format, e.g. " "`Pipeline.fit(X, y, logisticregression__sample_weight" "=sample_weight)`.".format(pname) ) step, param = pname.split("__", 1) fit_params_steps[step]["fit"][param] = pval # without metadata routing, fit_transform and fit_predict # get all the same params and pass it to the last fit. fit_params_steps[step]["fit_transform"][param] = pval fit_params_steps[step]["fit_predict"][param] = pval return fit_params_steps def _get_metadata_for_step(self, *, step_idx, step_params, all_params): """Get params (metadata) for step `name`. This transforms the metadata up to this step if required, which is indicated by the `transform_input` parameter. If a param in `step_params` is included in the `transform_input` list, it will be transformed. Parameters ---------- step_idx : int Index of the step in the pipeline. step_params : dict Parameters specific to the step. These are routed parameters, e.g. `routed_params[name]`. If a parameter name here is included in the `pipeline.transform_input`, then it will be transformed. Note that these parameters are *after* routing, so the aliases are already resolved. all_params : dict All parameters passed by the user. Here this is used to call `transform` on the slice of the pipeline itself. Returns ------- dict Parameters to be passed to the step. The ones which should be transformed are transformed. """ if ( self.transform_input is None or not all_params or not step_params or step_idx == 0 ): # we only need to process step_params if transform_input is set # and metadata is given by the user. return step_params sub_pipeline = self[:step_idx] sub_metadata_routing = get_routing_for_object(sub_pipeline) # here we get the metadata required by sub_pipeline.transform transform_params = { key: value for key, value in all_params.items() if key in sub_metadata_routing.consumes( method="transform", params=all_params.keys() ) } transformed_params = dict() # this is to be returned transformed_cache = dict() # used to transform each param once # `step_params` is the output of `process_routing`, so it has a dict for each # method (e.g. fit, transform, predict), which are the args to be passed to # those methods. We need to transform the parameters which are in the # `transform_input`, before returning these dicts. for method, method_params in step_params.items(): transformed_params[method] = Bunch() for param_name, param_value in method_params.items(): # An example of `(param_name, param_value)` is # `('sample_weight', array([0.5, 0.5, ...]))` if param_name in self.transform_input: # This parameter now needs to be transformed by the sub_pipeline, to # this step. We cache these computations to avoid repeating them. transformed_params[method][param_name] = _cached_transform( sub_pipeline, cache=transformed_cache, param_name=param_name, param_value=param_value, transform_params=transform_params, ) else: transformed_params[method][param_name] = param_value return transformed_params # Estimator interface def _fit(self, X, y=None, routed_params=None, raw_params=None): """Fit the pipeline except the last step. routed_params is the output of `process_routing` raw_params is the parameters passed by the user, used when `transform_input` is set by the user, to transform metadata using a sub-pipeline. """ # shallow copy of steps - this should really be steps_ self.steps = list(self.steps) self._validate_steps() # Setup the memory memory = check_memory(self.memory) fit_transform_one_cached = memory.cache(_fit_transform_one) for step_idx, name, transformer in self._iter( with_final=False, filter_passthrough=False ): if transformer is None or transformer == "passthrough": with _print_elapsed_time("Pipeline", self._log_message(step_idx)): continue if hasattr(memory, "location") and memory.location is None: # we do not clone when caching is disabled to # preserve backward compatibility cloned_transformer = transformer else: cloned_transformer = clone(transformer) # Fit or load from cache the current transformer step_params = self._get_metadata_for_step( step_idx=step_idx, step_params=routed_params[name], all_params=raw_params, ) X, fitted_transformer = fit_transform_one_cached( cloned_transformer, X, y, weight=None, message_clsname="Pipeline", message=self._log_message(step_idx), params=step_params, ) # Replace the transformer of the step with the fitted # transformer. This is necessary when loading the transformer # from the cache. self.steps[step_idx] = (name, fitted_transformer) return X @_fit_context( # estimators in Pipeline.steps are not validated yet prefer_skip_nested_validation=False ) def fit(self, X, y=None, **params): """Fit the model. Fit all the transformers one after the other and sequentially transform the data. Finally, fit the transformed data using the final estimator. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **params : dict of str -> object - If `enable_metadata_routing=False` (default): Parameters passed to the ``fit`` method of each step, where each parameter name is prefixed such that parameter ``p`` for step ``s`` has key ``s__p``. - If `enable_metadata_routing=True`: Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionchanged:: 1.4 Parameters are now passed to the ``transform`` method of the intermediate steps as well, if requested, and if `enable_metadata_routing=True` is set via :func:`~sklearn.set_config`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- self : object Pipeline with fitted steps. """ if not _routing_enabled() and self.transform_input is not None: raise ValueError( "The `transform_input` parameter can only be set if metadata " "routing is enabled. You can enable metadata routing using " "`sklearn.set_config(enable_metadata_routing=True)`." ) routed_params = self._check_method_params(method="fit", props=params) Xt = self._fit(X, y, routed_params, raw_params=params) with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)): if self._final_estimator != "passthrough": last_step_params = self._get_metadata_for_step( step_idx=len(self) - 1, step_params=routed_params[self.steps[-1][0]], all_params=params, ) self._final_estimator.fit(Xt, y, **last_step_params["fit"]) return self def _can_fit_transform(self): return ( self._final_estimator == "passthrough" or hasattr(self._final_estimator, "transform") or hasattr(self._final_estimator, "fit_transform") ) @available_if(_can_fit_transform) @_fit_context( # estimators in Pipeline.steps are not validated yet prefer_skip_nested_validation=False ) def fit_transform(self, X, y=None, **params): """Fit the model and transform with the final estimator. Fit all the transformers one after the other and sequentially transform the data. Only valid if the final estimator either implements `fit_transform` or `fit` and `transform`. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **params : dict of str -> object - If `enable_metadata_routing=False` (default): Parameters passed to the ``fit`` method of each step, where each parameter name is prefixed such that parameter ``p`` for step ``s`` has key ``s__p``. - If `enable_metadata_routing=True`: Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionchanged:: 1.4 Parameters are now passed to the ``transform`` method of the intermediate steps as well, if requested, and if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Returns ------- Xt : ndarray of shape (n_samples, n_transformed_features) Transformed samples. """ routed_params = self._check_method_params(method="fit_transform", props=params) Xt = self._fit(X, y, routed_params) last_step = self._final_estimator with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)): if last_step == "passthrough": return Xt last_step_params = self._get_metadata_for_step( step_idx=len(self) - 1, step_params=routed_params[self.steps[-1][0]], all_params=params, ) if hasattr(last_step, "fit_transform"): return last_step.fit_transform( Xt, y, **last_step_params["fit_transform"] ) else: return last_step.fit(Xt, y, **last_step_params["fit"]).transform( Xt, **last_step_params["transform"] ) @available_if(_final_estimator_has("predict")) def predict(self, X, **params): """Transform the data, and apply `predict` with the final estimator. Call `transform` of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls `predict` method. Only valid if the final estimator implements `predict`. Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. **params : dict of str -> object - If `enable_metadata_routing=False` (default): Parameters to the ``predict`` called at the end of all transformations in the pipeline. - If `enable_metadata_routing=True`: Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 0.20 .. versionchanged:: 1.4 Parameters are now passed to the ``transform`` method of the intermediate steps as well, if requested, and if `enable_metadata_routing=True` is set via :func:`~sklearn.set_config`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Note that while this may be used to return uncertainties from some models with ``return_std`` or ``return_cov``, uncertainties that are generated by the transformations in the pipeline are not propagated to the final estimator. Returns ------- y_pred : ndarray Result of calling `predict` on the final estimator. """ check_is_fitted(self) Xt = X if not _routing_enabled(): for _, name, transform in self._iter(with_final=False): Xt = transform.transform(Xt) return self.steps[-1][1].predict(Xt, **params) # metadata routing enabled routed_params = process_routing(self, "predict", **params) for _, name, transform in self._iter(with_final=False): Xt = transform.transform(Xt, **routed_params[name].transform) return self.steps[-1][1].predict(Xt, **routed_params[self.steps[-1][0]].predict) @available_if(_final_estimator_has("fit_predict")) @_fit_context( # estimators in Pipeline.steps are not validated yet prefer_skip_nested_validation=False ) def fit_predict(self, X, y=None, **params): """Transform the data, and apply `fit_predict` with the final estimator. Call `fit_transform` of each transformer in the pipeline. The transformed data are finally passed to the final estimator that calls `fit_predict` method. Only valid if the final estimator implements `fit_predict`. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **params : dict of str -> object - If `enable_metadata_routing=False` (default): Parameters to the ``predict`` called at the end of all transformations in the pipeline. - If `enable_metadata_routing=True`: Parameters requested and accepted by steps. Each step must have requested certain metadata for these parameters to be forwarded to them. .. versionadded:: 0.20 .. versionchanged:: 1.4 Parameters are now passed to the ``transform`` method of the intermediate steps as well, if requested, and if `enable_metadata_routing=True`. See :ref:`Metadata Routing User Guide <metadata_routing>` for more details. Note that while this may be used to return uncertainties from some models with ``return_std`` or ``return_cov``, uncertainties that are generated by the transformations in the pipeline are not propagated to the final estimator. Returns ------- y_pred : ndarray Result of calling `fit_predict` on the final estimator. """ routed_params = self._check_method_params(method="fit_predict", props=params) Xt = self._fit(X, y, routed_params) params_last_step = routed_params[self.steps[-1][0]] with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)): y_pred = self.steps[-1][1].fit_predict( Xt, y, **params_last_step.get("fit_predict", {}) ) return y_pred @available_if(_final_estimator_has("predict_proba")) def predict_proba(self, X, **params): """Transform the data, and apply `predict_proba` with the final estimator.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/__init__.py
sklearn/__init__.py
"""Configure global settings and get information about the working environment.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # Machine learning module for Python # ================================== # # sklearn is a Python module integrating classical machine # learning algorithms in the tightly-knit world of scientific Python # packages (numpy, scipy, matplotlib). # # It aims to provide simple and efficient solutions to learning problems # that are accessible to everybody and reusable in various contexts: # machine-learning as a versatile tool for science and engineering. # # See https://scikit-learn.org for complete documentation. import importlib as _importlib import logging import os import random from sklearn._config import config_context, get_config, set_config logger = logging.getLogger(__name__) # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y.0 # For first release after an increment in Y # X.Y.Z # For bugfix releases # # Admissible pre-release markers: # X.Y.ZaN # Alpha release # X.Y.ZbN # Beta release # X.Y.ZrcN # Release Candidate # X.Y.Z # Final release # # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # __version__ = "1.9.dev0" # On OSX, we can get a runtime error due to multiple OpenMP libraries loaded # simultaneously. This can happen for instance when calling BLAS inside a # prange. Setting the following environment variable allows multiple OpenMP # libraries to be loaded. It should not degrade performances since we manually # take care of potential over-subcription performance issues, in sections of # the code where nested OpenMP loops can happen, by dynamically reconfiguring # the inner OpenMP runtime to temporarily disable it while under the scope of # the outer OpenMP parallel section. os.environ.setdefault("KMP_DUPLICATE_LIB_OK", "True") # Workaround issue discovered in intel-openmp 2019.5: # https://github.com/ContinuumIO/anaconda-issues/issues/11294 os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE") # `_distributor_init` allows distributors to run custom init code. # For instance, for the Windows wheel, this is used to pre-load the # vcomp shared library runtime for OpenMP embedded in the sklearn/.libs # sub-folder. # It is necessary to do this prior to importing show_versions as the # later is linked to the OpenMP runtime to make it possible to introspect # it and importing it first would fail if the OpenMP dll cannot be found. from sklearn import __check_build, _distributor_init # noqa: E402 F401 from sklearn.base import clone # noqa: E402 from sklearn.utils._show_versions import show_versions # noqa: E402 _submodules = [ "calibration", "cluster", "covariance", "cross_decomposition", "datasets", "decomposition", "dummy", "ensemble", "exceptions", "experimental", "externals", "feature_extraction", "feature_selection", "frozen", "gaussian_process", "inspection", "isotonic", "kernel_approximation", "kernel_ridge", "linear_model", "manifold", "metrics", "mixture", "model_selection", "multiclass", "multioutput", "naive_bayes", "neighbors", "neural_network", "pipeline", "preprocessing", "random_projection", "semi_supervised", "svm", "tree", "discriminant_analysis", "impute", "compose", ] __all__ = _submodules + [ # Non-modules: "clone", "get_config", "set_config", "config_context", "show_versions", ] def __dir__(): return __all__ def __getattr__(name): if name in _submodules: return _importlib.import_module(f"sklearn.{name}") else: try: return globals()[name] except KeyError: raise AttributeError(f"Module 'sklearn' has no attribute '{name}'") def setup_module(module): """Fixture for the tests to assure globally controllable seeding of RNGs""" import numpy as np # Check if a random seed exists in the environment, if not create one. _random_seed = os.environ.get("SKLEARN_SEED", None) if _random_seed is None: _random_seed = np.random.uniform() * np.iinfo(np.int32).max _random_seed = int(_random_seed) print("I: Seeding RNGs with %r" % _random_seed) np.random.seed(_random_seed) random.seed(_random_seed)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/multioutput.py
sklearn/multioutput.py
"""Multioutput regression and classification. The estimators provided in this module are meta-estimators: they require a base estimator to be provided in their constructor. The meta-estimator extends single output estimators to multioutput estimators. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings from abc import ABCMeta, abstractmethod from numbers import Integral import numpy as np import scipy.sparse as sp from sklearn.base import ( BaseEstimator, ClassifierMixin, MetaEstimatorMixin, RegressorMixin, _fit_context, clone, is_classifier, ) from sklearn.model_selection import cross_val_predict from sklearn.utils import Bunch, check_random_state, get_tags from sklearn.utils._param_validation import HasMethods, Hidden, StrOptions from sklearn.utils._response import _get_response_values from sklearn.utils._user_interface import _print_elapsed_time from sklearn.utils.metadata_routing import ( MetadataRouter, MethodMapping, _raise_for_params, _routing_enabled, process_routing, ) from sklearn.utils.metaestimators import available_if from sklearn.utils.multiclass import check_classification_targets from sklearn.utils.parallel import Parallel, delayed from sklearn.utils.validation import ( _check_method_params, _check_response_method, check_is_fitted, has_fit_parameter, validate_data, ) __all__ = [ "ClassifierChain", "MultiOutputClassifier", "MultiOutputRegressor", "RegressorChain", ] def _fit_estimator(estimator, X, y, sample_weight=None, **fit_params): estimator = clone(estimator) if sample_weight is not None: estimator.fit(X, y, sample_weight=sample_weight, **fit_params) else: estimator.fit(X, y, **fit_params) return estimator def _partial_fit_estimator( estimator, X, y, classes=None, partial_fit_params=None, first_time=True ): partial_fit_params = {} if partial_fit_params is None else partial_fit_params if first_time: estimator = clone(estimator) if classes is not None: estimator.partial_fit(X, y, classes=classes, **partial_fit_params) else: estimator.partial_fit(X, y, **partial_fit_params) return estimator def _available_if_estimator_has(attr): """Return a function to check if the sub-estimator(s) has(have) `attr`. Helper for Chain implementations. """ def _check(self): if hasattr(self, "estimators_"): return all(hasattr(est, attr) for est in self.estimators_) if hasattr(self.estimator, attr): return True return False return available_if(_check) class _MultiOutputEstimator(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta): _parameter_constraints: dict = { "estimator": [HasMethods(["fit", "predict"])], "n_jobs": [Integral, None], } @abstractmethod def __init__(self, estimator, *, n_jobs=None): self.estimator = estimator self.n_jobs = n_jobs @_available_if_estimator_has("partial_fit") @_fit_context( # MultiOutput*.estimator is not validated yet prefer_skip_nested_validation=False ) def partial_fit(self, X, y, classes=None, sample_weight=None, **partial_fit_params): """Incrementally fit a separate model for each class output. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. y : {array-like, sparse matrix} of shape (n_samples, n_outputs) Multi-output targets. classes : list of ndarray of shape (n_outputs,), default=None Each array is unique classes for one output in str/int. Can be obtained via ``[np.unique(y[:, i]) for i in range(y.shape[1])]``, where `y` is the target matrix of the entire dataset. This argument is required for the first call to partial_fit and can be omitted in the subsequent calls. Note that `y` doesn't need to contain all labels in `classes`. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If `None`, then samples are equally weighted. Only supported if the underlying regressor supports sample weights. **partial_fit_params : dict of str -> object Parameters passed to the ``estimator.partial_fit`` method of each sub-estimator. Only available if `enable_metadata_routing=True`. See the :ref:`User Guide <metadata_routing>`. .. versionadded:: 1.3 Returns ------- self : object Returns a fitted instance. """ _raise_for_params(partial_fit_params, self, "partial_fit") first_time = not hasattr(self, "estimators_") y = validate_data(self, X="no_validation", y=y, multi_output=True) if y.ndim == 1: raise ValueError( "y must have at least two dimensions for " "multi-output regression but has only one." ) if _routing_enabled(): if sample_weight is not None: partial_fit_params["sample_weight"] = sample_weight routed_params = process_routing( self, "partial_fit", **partial_fit_params, ) else: if sample_weight is not None and not has_fit_parameter( self.estimator, "sample_weight" ): raise ValueError( "Underlying estimator does not support sample weights." ) if sample_weight is not None: routed_params = Bunch( estimator=Bunch(partial_fit=Bunch(sample_weight=sample_weight)) ) else: routed_params = Bunch(estimator=Bunch(partial_fit=Bunch())) self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_partial_fit_estimator)( self.estimators_[i] if not first_time else self.estimator, X, y[:, i], classes[i] if classes is not None else None, partial_fit_params=routed_params.estimator.partial_fit, first_time=first_time, ) for i in range(y.shape[1]) ) if first_time and hasattr(self.estimators_[0], "n_features_in_"): self.n_features_in_ = self.estimators_[0].n_features_in_ if first_time and hasattr(self.estimators_[0], "feature_names_in_"): self.feature_names_in_ = self.estimators_[0].feature_names_in_ return self @_fit_context( # MultiOutput*.estimator is not validated yet prefer_skip_nested_validation=False ) def fit(self, X, y, sample_weight=None, **fit_params): """Fit the model to data, separately for each output variable. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. y : {array-like, sparse matrix} of shape (n_samples, n_outputs) Multi-output targets. An indicator matrix turns on multilabel estimation. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If `None`, then samples are equally weighted. Only supported if the underlying regressor supports sample weights. **fit_params : dict of string -> object Parameters passed to the ``estimator.fit`` method of each step. .. versionadded:: 0.23 Returns ------- self : object Returns a fitted instance. """ if not hasattr(self.estimator, "fit"): raise ValueError("The base estimator should implement a fit method") y = validate_data(self, X="no_validation", y=y, multi_output=True) if is_classifier(self): check_classification_targets(y) if y.ndim == 1: raise ValueError( "y must have at least two dimensions for " "multi-output regression but has only one." ) if _routing_enabled(): if sample_weight is not None: fit_params["sample_weight"] = sample_weight routed_params = process_routing( self, "fit", **fit_params, ) else: if sample_weight is not None and not has_fit_parameter( self.estimator, "sample_weight" ): raise ValueError( "Underlying estimator does not support sample weights." ) fit_params_validated = _check_method_params(X, params=fit_params) routed_params = Bunch(estimator=Bunch(fit=fit_params_validated)) if sample_weight is not None: routed_params.estimator.fit["sample_weight"] = sample_weight self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_fit_estimator)( self.estimator, X, y[:, i], **routed_params.estimator.fit ) for i in range(y.shape[1]) ) if hasattr(self.estimators_[0], "n_features_in_"): self.n_features_in_ = self.estimators_[0].n_features_in_ if hasattr(self.estimators_[0], "feature_names_in_"): self.feature_names_in_ = self.estimators_[0].feature_names_in_ return self def predict(self, X): """Predict multi-output variable using model for each target variable. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Returns ------- y : {array-like, sparse matrix} of shape (n_samples, n_outputs) Multi-output targets predicted across multiple predictors. Note: Separate models are generated for each predictor. """ check_is_fitted(self) if not hasattr(self.estimators_[0], "predict"): raise ValueError("The base estimator should implement a predict method") y = Parallel(n_jobs=self.n_jobs)( delayed(e.predict)(X) for e in self.estimators_ ) return np.asarray(y).T def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.sparse = get_tags(self.estimator).input_tags.sparse tags.target_tags.single_output = False tags.target_tags.multi_output = True return tags def get_metadata_routing(self): """Get metadata routing of this object. Please check :ref:`User Guide <metadata_routing>` on how the routing mechanism works. .. versionadded:: 1.3 Returns ------- routing : MetadataRouter A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating routing information. """ router = MetadataRouter(owner=self).add( estimator=self.estimator, method_mapping=MethodMapping() .add(caller="partial_fit", callee="partial_fit") .add(caller="fit", callee="fit"), ) return router class MultiOutputRegressor(RegressorMixin, _MultiOutputEstimator): """Multi target regression. This strategy consists of fitting one regressor per target. This is a simple strategy for extending regressors that do not natively support multi-target regression. .. versionadded:: 0.18 Parameters ---------- estimator : estimator object An estimator object implementing :term:`fit` and :term:`predict`. n_jobs : int or None, optional (default=None) The number of jobs to run in parallel. :meth:`fit`, :meth:`predict` and :meth:`partial_fit` (if supported by the passed estimator) will be parallelized for each target. When individual estimators are fast to train or predict, using ``n_jobs > 1`` can result in slower performance due to the parallelism overhead. ``None`` means `1` unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all available processes / threads. See :term:`Glossary <n_jobs>` for more details. .. versionchanged:: 0.20 `n_jobs` default changed from `1` to `None`. Attributes ---------- estimators_ : list of ``n_output`` estimators Estimators used for predictions. n_features_in_ : int Number of features seen during :term:`fit`. Only defined if the underlying `estimator` exposes such an attribute when fit. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Only defined if the underlying estimators expose such an attribute when fit. .. versionadded:: 1.0 See Also -------- RegressorChain : A multi-label model that arranges regressions into a chain. MultiOutputClassifier : Classifies each output independently rather than chaining. Examples -------- >>> import numpy as np >>> from sklearn.datasets import load_linnerud >>> from sklearn.multioutput import MultiOutputRegressor >>> from sklearn.linear_model import Ridge >>> X, y = load_linnerud(return_X_y=True) >>> regr = MultiOutputRegressor(Ridge(random_state=123)).fit(X, y) >>> regr.predict(X[[0]]) array([[176, 35.1, 57.1]]) """ def __init__(self, estimator, *, n_jobs=None): super().__init__(estimator, n_jobs=n_jobs) @_available_if_estimator_has("partial_fit") def partial_fit(self, X, y, sample_weight=None, **partial_fit_params): """Incrementally fit the model to data, for each output variable. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. y : {array-like, sparse matrix} of shape (n_samples, n_outputs) Multi-output targets. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If `None`, then samples are equally weighted. Only supported if the underlying regressor supports sample weights. **partial_fit_params : dict of str -> object Parameters passed to the ``estimator.partial_fit`` method of each sub-estimator. Only available if `enable_metadata_routing=True`. See the :ref:`User Guide <metadata_routing>`. .. versionadded:: 1.3 Returns ------- self : object Returns a fitted instance. """ super().partial_fit(X, y, sample_weight=sample_weight, **partial_fit_params) class MultiOutputClassifier(ClassifierMixin, _MultiOutputEstimator): """Multi target classification. This strategy consists of fitting one classifier per target. This is a simple strategy for extending classifiers that do not natively support multi-target classification. Parameters ---------- estimator : estimator object An estimator object implementing :term:`fit` and :term:`predict`. A :term:`predict_proba` method will be exposed only if `estimator` implements it. n_jobs : int or None, optional (default=None) The number of jobs to run in parallel. :meth:`fit`, :meth:`predict` and :meth:`partial_fit` (if supported by the passed estimator) will be parallelized for each target. When individual estimators are fast to train or predict, using ``n_jobs > 1`` can result in slower performance due to the parallelism overhead. ``None`` means `1` unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all available processes / threads. See :term:`Glossary <n_jobs>` for more details. .. versionchanged:: 0.20 `n_jobs` default changed from `1` to `None`. Attributes ---------- classes_ : ndarray of shape (n_classes,) Class labels. estimators_ : list of ``n_output`` estimators Estimators used for predictions. n_features_in_ : int Number of features seen during :term:`fit`. Only defined if the underlying `estimator` exposes such an attribute when fit. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Only defined if the underlying estimators expose such an attribute when fit. .. versionadded:: 1.0 See Also -------- ClassifierChain : A multi-label model that arranges binary classifiers into a chain. MultiOutputRegressor : Fits one regressor per target variable. Examples -------- >>> import numpy as np >>> from sklearn.datasets import make_multilabel_classification >>> from sklearn.multioutput import MultiOutputClassifier >>> from sklearn.linear_model import LogisticRegression >>> X, y = make_multilabel_classification(n_classes=3, random_state=0) >>> clf = MultiOutputClassifier(LogisticRegression()).fit(X, y) >>> clf.predict(X[-2:]) array([[1, 1, 1], [1, 0, 1]]) """ def __init__(self, estimator, *, n_jobs=None): super().__init__(estimator, n_jobs=n_jobs) def fit(self, X, Y, sample_weight=None, **fit_params): """Fit the model to data matrix X and targets Y. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Y : array-like of shape (n_samples, n_classes) The target values. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If `None`, then samples are equally weighted. Only supported if the underlying classifier supports sample weights. **fit_params : dict of string -> object Parameters passed to the ``estimator.fit`` method of each step. .. versionadded:: 0.23 Returns ------- self : object Returns a fitted instance. """ super().fit(X, Y, sample_weight=sample_weight, **fit_params) self.classes_ = [estimator.classes_ for estimator in self.estimators_] return self def _check_predict_proba(self): if hasattr(self, "estimators_"): # raise an AttributeError if `predict_proba` does not exist for # each estimator [getattr(est, "predict_proba") for est in self.estimators_] return True # raise an AttributeError if `predict_proba` does not exist for the # unfitted estimator getattr(self.estimator, "predict_proba") return True @available_if(_check_predict_proba) def predict_proba(self, X): """Return prediction probabilities for each class of each output. This method will raise a ``ValueError`` if any of the estimators do not have ``predict_proba``. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data. Returns ------- p : array of shape (n_samples, n_classes), or a list of n_outputs \ such arrays if n_outputs > 1. The class probabilities of the input samples. The order of the classes corresponds to that in the attribute :term:`classes_`. .. versionchanged:: 0.19 This function now returns a list of arrays where the length of the list is ``n_outputs``, and each array is (``n_samples``, ``n_classes``) for that particular output. """ check_is_fitted(self) results = [estimator.predict_proba(X) for estimator in self.estimators_] return results def score(self, X, y): """Return the mean accuracy on the given test data and labels. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples, n_outputs) True values for X. Returns ------- scores : float Mean accuracy of predicted target versus true target. """ check_is_fitted(self) n_outputs_ = len(self.estimators_) if y.ndim == 1: raise ValueError( "y must have at least two dimensions for " "multi target classification but has only one" ) if y.shape[1] != n_outputs_: raise ValueError( "The number of outputs of Y for fit {0} and" " score {1} should be same".format(n_outputs_, y.shape[1]) ) y_pred = self.predict(X) return np.mean(np.all(y == y_pred, axis=1)) def __sklearn_tags__(self): tags = super().__sklearn_tags__() # FIXME tags._skip_test = True return tags def _available_if_base_estimator_has(attr): """Return a function to check if `base_estimator` or `estimators_` has `attr`. Helper for Chain implementations. """ def _check(self): return hasattr(self._get_estimator(), attr) or all( hasattr(est, attr) for est in self.estimators_ ) return available_if(_check) class _BaseChain(BaseEstimator, metaclass=ABCMeta): _parameter_constraints: dict = { "base_estimator": [ HasMethods(["fit", "predict"]), StrOptions({"deprecated"}), ], "estimator": [ HasMethods(["fit", "predict"]), Hidden(None), ], "order": ["array-like", StrOptions({"random"}), None], "cv": ["cv_object", StrOptions({"prefit"})], "random_state": ["random_state"], "verbose": ["boolean"], } # TODO(1.9): Remove base_estimator def __init__( self, estimator=None, *, order=None, cv=None, random_state=None, verbose=False, base_estimator="deprecated", ): self.estimator = estimator self.base_estimator = base_estimator self.order = order self.cv = cv self.random_state = random_state self.verbose = verbose # TODO(1.9): This is a temporary getter method to validate input wrt deprecation. # It was only included to avoid relying on the presence of self.estimator_ def _get_estimator(self): """Get and validate estimator.""" if self.estimator is not None and (self.base_estimator != "deprecated"): raise ValueError( "Both `estimator` and `base_estimator` are provided. You should only" " pass `estimator`. `base_estimator` as a parameter is deprecated in" " version 1.7, and will be removed in version 1.9." ) if self.base_estimator != "deprecated": warning_msg = ( "`base_estimator` as an argument was deprecated in 1.7 and will be" " removed in 1.9. Use `estimator` instead." ) warnings.warn(warning_msg, FutureWarning) return self.base_estimator else: return self.estimator def _log_message(self, *, estimator_idx, n_estimators, processing_msg): if not self.verbose: return None return f"({estimator_idx} of {n_estimators}) {processing_msg}" def _get_predictions(self, X, *, output_method): """Get predictions for each model in the chain.""" check_is_fitted(self) X = validate_data(self, X, accept_sparse=True, reset=False) Y_output_chain = np.zeros((X.shape[0], len(self.estimators_))) Y_feature_chain = np.zeros((X.shape[0], len(self.estimators_))) # `RegressorChain` does not have a `chain_method_` parameter so we # default to "predict" chain_method = getattr(self, "chain_method_", "predict") hstack = sp.hstack if sp.issparse(X) else np.hstack for chain_idx, estimator in enumerate(self.estimators_): previous_predictions = Y_feature_chain[:, :chain_idx] # if `X` is a scipy sparse dok_array, we convert it to a sparse # coo_array format before hstacking, it's faster; see # https://github.com/scipy/scipy/issues/20060#issuecomment-1937007039: if sp.issparse(X) and not sp.isspmatrix(X) and X.format == "dok": X = sp.coo_array(X) X_aug = hstack((X, previous_predictions)) feature_predictions, _ = _get_response_values( estimator, X_aug, response_method=chain_method, ) Y_feature_chain[:, chain_idx] = feature_predictions output_predictions, _ = _get_response_values( estimator, X_aug, response_method=output_method, ) Y_output_chain[:, chain_idx] = output_predictions inv_order = np.empty_like(self.order_) inv_order[self.order_] = np.arange(len(self.order_)) Y_output = Y_output_chain[:, inv_order] return Y_output @abstractmethod def fit(self, X, Y, **fit_params): """Fit the model to data matrix X and targets Y. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Y : array-like of shape (n_samples, n_classes) The target values. **fit_params : dict of string -> object Parameters passed to the `fit` method of each step. .. versionadded:: 0.23 Returns ------- self : object Returns a fitted instance. """ X, Y = validate_data(self, X, Y, multi_output=True, accept_sparse=True) random_state = check_random_state(self.random_state) self.order_ = self.order if isinstance(self.order_, tuple): self.order_ = np.array(self.order_) if self.order_ is None: self.order_ = np.array(range(Y.shape[1])) elif isinstance(self.order_, str): if self.order_ == "random": self.order_ = random_state.permutation(Y.shape[1]) elif sorted(self.order_) != list(range(Y.shape[1])): raise ValueError("invalid order") self.estimators_ = [clone(self._get_estimator()) for _ in range(Y.shape[1])] if self.cv is None: Y_pred_chain = Y[:, self.order_] if sp.issparse(X): X_aug = sp.hstack((X, Y_pred_chain), format="lil") X_aug = X_aug.tocsr() else: X_aug = np.hstack((X, Y_pred_chain)) elif sp.issparse(X): # TODO: remove this condition check when the minimum supported scipy version # doesn't support sparse matrices anymore if not sp.isspmatrix(X): # if `X` is a scipy sparse dok_array, we convert it to a sparse # coo_array format before hstacking, it's faster; see # https://github.com/scipy/scipy/issues/20060#issuecomment-1937007039: if X.format == "dok": X = sp.coo_array(X) # in case that `X` is a sparse array we create `Y_pred_chain` as a # sparse array format: Y_pred_chain = sp.coo_array((X.shape[0], Y.shape[1])) else: Y_pred_chain = sp.coo_matrix((X.shape[0], Y.shape[1])) X_aug = sp.hstack((X, Y_pred_chain), format="lil") else: Y_pred_chain = np.zeros((X.shape[0], Y.shape[1])) X_aug = np.hstack((X, Y_pred_chain)) del Y_pred_chain if _routing_enabled(): routed_params = process_routing(self, "fit", **fit_params) else: routed_params = Bunch(estimator=Bunch(fit=fit_params)) if hasattr(self, "chain_method"): chain_method = _check_response_method( self._get_estimator(), self.chain_method, ).__name__ self.chain_method_ = chain_method else: # `RegressorChain` does not have a `chain_method` parameter chain_method = "predict" for chain_idx, estimator in enumerate(self.estimators_): message = self._log_message( estimator_idx=chain_idx + 1, n_estimators=len(self.estimators_), processing_msg=f"Processing order {self.order_[chain_idx]}", ) y = Y[:, self.order_[chain_idx]] with _print_elapsed_time("Chain", message): estimator.fit( X_aug[:, : (X.shape[1] + chain_idx)], y, **routed_params.estimator.fit, ) if self.cv is not None and chain_idx < len(self.estimators_) - 1: col_idx = X.shape[1] + chain_idx cv_result = cross_val_predict( self._get_estimator(), X_aug[:, :col_idx], y=y, cv=self.cv, method=chain_method, ) # `predict_proba` output is 2D, we use only output for classes[-1] if cv_result.ndim > 1: cv_result = cv_result[:, 1] if sp.issparse(X_aug): X_aug[:, col_idx] = np.expand_dims(cv_result, 1) else: X_aug[:, col_idx] = cv_result return self def predict(self, X): """Predict on the data matrix X using the ClassifierChain model. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Returns ------- Y_pred : array-like of shape (n_samples, n_classes) The predicted values. """ return self._get_predictions(X, output_method="predict") def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.sparse = get_tags(self._get_estimator()).input_tags.sparse return tags class ClassifierChain(MetaEstimatorMixin, ClassifierMixin, _BaseChain): """A multi-label model that arranges binary classifiers into a chain. Each model makes a prediction in the order specified by the chain using all of the available features provided to the model plus the predictions of models that are earlier in the chain. For an example of how to use ``ClassifierChain`` and benefit from its ensemble, see :ref:`ClassifierChain on a yeast dataset <sphx_glr_auto_examples_multioutput_plot_classifier_chain_yeast.py>` example. Read more in the :ref:`User Guide <classifierchain>`. .. versionadded:: 0.19 Parameters ---------- estimator : estimator The base estimator from which the classifier chain is built. order : array-like of shape (n_outputs,) or 'random', default=None If `None`, the order will be determined by the order of columns in the label matrix Y.:: order = [0, 1, 2, ..., Y.shape[1] - 1] The order of the chain can be explicitly set by providing a list of integers. For example, for a chain of length 5.:: order = [1, 3, 2, 4, 0] means that the first model in the chain will make predictions for column 1 in the Y matrix, the second model will make predictions for column 3, etc. If order is `random` a random ordering will be used. cv : int, cross-validation generator or an iterable, default=None Determines whether to use cross validated predictions or true labels for the results of previous estimators in the chain. Possible inputs for cv are: - None, to use true labels when fitting, - integer, to specify the number of folds in a (Stratified)KFold, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. chain_method : {'predict', 'predict_proba', 'predict_log_proba', \ 'decision_function'} or list of such str's, default='predict'
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/base.py
sklearn/base.py
"""Base classes for all estimators and various utility functions.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import copy import functools import inspect import platform import re import warnings from collections import defaultdict import numpy as np from sklearn import __version__ from sklearn._config import config_context, get_config from sklearn.exceptions import InconsistentVersionWarning from sklearn.utils._metadata_requests import _MetadataRequester, _routing_enabled from sklearn.utils._missing import is_pandas_na, is_scalar_nan from sklearn.utils._param_validation import validate_parameter_constraints from sklearn.utils._repr_html.base import ReprHTMLMixin, _HTMLDocumentationLinkMixin from sklearn.utils._repr_html.estimator import estimator_html_repr from sklearn.utils._repr_html.params import ParamsDict from sklearn.utils._set_output import _SetOutputMixin from sklearn.utils._tags import ( ClassifierTags, RegressorTags, Tags, TargetTags, TransformerTags, get_tags, ) from sklearn.utils.fixes import _IS_32BIT from sklearn.utils.validation import ( _check_feature_names_in, _generate_get_feature_names_out, _is_fitted, check_array, check_is_fitted, ) def clone(estimator, *, safe=True): """Construct a new unfitted estimator with the same parameters. Clone does a deep copy of the model in an estimator without actually copying attached data. It returns a new estimator with the same parameters that has not been fitted on any data. .. versionchanged:: 1.3 Delegates to `estimator.__sklearn_clone__` if the method exists. Parameters ---------- estimator : {list, tuple, set} of estimator instance or a single \ estimator instance The estimator or group of estimators to be cloned. safe : bool, default=True If safe is False, clone will fall back to a deep copy on objects that are not estimators. Ignored if `estimator.__sklearn_clone__` exists. Returns ------- estimator : object The deep copy of the input, an estimator if input is an estimator. Notes ----- If the estimator's `random_state` parameter is an integer (or if the estimator doesn't have a `random_state` parameter), an *exact clone* is returned: the clone and the original estimator will give the exact same results. Otherwise, *statistical clone* is returned: the clone might return different results from the original estimator. More details can be found in :ref:`randomness`. Examples -------- >>> from sklearn.base import clone >>> from sklearn.linear_model import LogisticRegression >>> X = [[-1, 0], [0, 1], [0, -1], [1, 0]] >>> y = [0, 0, 1, 1] >>> classifier = LogisticRegression().fit(X, y) >>> cloned_classifier = clone(classifier) >>> hasattr(classifier, "classes_") True >>> hasattr(cloned_classifier, "classes_") False >>> classifier is cloned_classifier False """ if hasattr(estimator, "__sklearn_clone__") and not inspect.isclass(estimator): return estimator.__sklearn_clone__() return _clone_parametrized(estimator, safe=safe) def _clone_parametrized(estimator, *, safe=True): """Default implementation of clone. See :func:`sklearn.base.clone` for details.""" estimator_type = type(estimator) if estimator_type is dict: return {k: clone(v, safe=safe) for k, v in estimator.items()} elif estimator_type in (list, tuple, set, frozenset): return estimator_type([clone(e, safe=safe) for e in estimator]) elif not hasattr(estimator, "get_params") or isinstance(estimator, type): if not safe: return copy.deepcopy(estimator) else: if isinstance(estimator, type): raise TypeError( "Cannot clone object. " "You should provide an instance of " "scikit-learn estimator instead of a class." ) else: raise TypeError( "Cannot clone object '%s' (type %s): " "it does not seem to be a scikit-learn " "estimator as it does not implement a " "'get_params' method." % (repr(estimator), type(estimator)) ) klass = estimator.__class__ new_object_params = estimator.get_params(deep=False) for name, param in new_object_params.items(): new_object_params[name] = clone(param, safe=False) new_object = klass(**new_object_params) try: new_object._metadata_request = copy.deepcopy(estimator._metadata_request) except AttributeError: pass params_set = new_object.get_params(deep=False) # quick sanity check of the parameters of the clone for name in new_object_params: param1 = new_object_params[name] param2 = params_set[name] if param1 is not param2: raise RuntimeError( "Cannot clone object %s, as the constructor " "either does not set or modifies parameter %s" % (estimator, name) ) # _sklearn_output_config is used by `set_output` to configure the output # container of an estimator. if hasattr(estimator, "_sklearn_output_config"): new_object._sklearn_output_config = copy.deepcopy( estimator._sklearn_output_config ) return new_object class BaseEstimator(ReprHTMLMixin, _HTMLDocumentationLinkMixin, _MetadataRequester): """Base class for all estimators in scikit-learn. Inheriting from this class provides default implementations of: - setting and getting parameters used by `GridSearchCV` and friends; - textual and HTML representation displayed in terminals and IDEs; - estimator serialization; - parameters validation; - data validation; - feature names validation. Read more in the :ref:`User Guide <rolling_your_own_estimator>`. Notes ----- All estimators should specify all the parameters that can be set at the class level in their ``__init__`` as explicit keyword arguments (no ``*args`` or ``**kwargs``). Examples -------- >>> import numpy as np >>> from sklearn.base import BaseEstimator >>> class MyEstimator(BaseEstimator): ... def __init__(self, *, param=1): ... self.param = param ... def fit(self, X, y=None): ... self.is_fitted_ = True ... return self ... def predict(self, X): ... return np.full(shape=X.shape[0], fill_value=self.param) >>> estimator = MyEstimator(param=2) >>> estimator.get_params() {'param': 2} >>> X = np.array([[1, 2], [2, 3], [3, 4]]) >>> y = np.array([1, 0, 1]) >>> estimator.fit(X, y).predict(X) array([2, 2, 2]) >>> estimator.set_params(param=3).fit(X, y).predict(X) array([3, 3, 3]) """ def __dir__(self): # Filters conditional methods that should be hidden based # on the `available_if` decorator with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=FutureWarning) return [attr for attr in super().__dir__() if hasattr(self, attr)] _html_repr = estimator_html_repr @classmethod def _get_param_names(cls): """Get parameter names for the estimator""" # fetch the constructor or the original constructor before # deprecation wrapping if any init = getattr(cls.__init__, "deprecated_original", cls.__init__) if init is object.__init__: # No explicit constructor to introspect return [] # introspect the constructor arguments to find the model parameters # to represent init_signature = inspect.signature(init) # Consider the constructor parameters excluding 'self' parameters = [ p for p in init_signature.parameters.values() if p.name != "self" and p.kind != p.VAR_KEYWORD ] for p in parameters: if p.kind == p.VAR_POSITIONAL: raise RuntimeError( "scikit-learn estimators should always " "specify their parameters in the signature" " of their __init__ (no varargs)." " %s with constructor %s doesn't " " follow this convention." % (cls, init_signature) ) # Extract and sort argument names excluding 'self' return sorted([p.name for p in parameters]) def get_params(self, deep=True): """ Get parameters for this estimator. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : dict Parameter names mapped to their values. """ out = dict() for key in self._get_param_names(): value = getattr(self, key) if deep and hasattr(value, "get_params") and not isinstance(value, type): deep_items = value.get_params().items() out.update((key + "__" + k, val) for k, val in deep_items) out[key] = value return out def _get_params_html(self, deep=True, doc_link=""): """ Get parameters for this estimator with a specific HTML representation. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. doc_link : str URL to the estimator documentation. Used for linking to the estimator's parameters documentation available in HTML displays. Returns ------- params : ParamsDict Parameter names mapped to their values. We return a `ParamsDict` dictionary, which renders a specific HTML representation in table form. """ out = self.get_params(deep=deep) init_func = getattr(self.__init__, "deprecated_original", self.__init__) init_default_params = inspect.signature(init_func).parameters init_default_params = { name: param.default for name, param in init_default_params.items() } def is_non_default(param_name, param_value): """Finds the parameters that have been set by the user.""" if param_name not in init_default_params: # happens if k is part of a **kwargs return True if init_default_params[param_name] == inspect._empty: # k has no default value return True # avoid calling repr on nested estimators if isinstance(param_value, BaseEstimator) and type(param_value) is not type( init_default_params[param_name] ): return True if is_pandas_na(param_value) and not is_pandas_na( init_default_params[param_name] ): return True if not np.array_equal( param_value, init_default_params[param_name] ) and not ( is_scalar_nan(init_default_params[param_name]) and is_scalar_nan(param_value) ): return True return False # Sort parameters so non-default parameters are shown first unordered_params = { name: out[name] for name in init_default_params if name in out } unordered_params.update( { name: value for name, value in out.items() if name not in init_default_params } ) non_default_params, default_params = [], [] for name, value in unordered_params.items(): if is_non_default(name, value): non_default_params.append(name) else: default_params.append(name) params = {name: out[name] for name in non_default_params + default_params} return ParamsDict( params=params, non_default=tuple(non_default_params), estimator_class=self.__class__, doc_link=doc_link, ) def set_params(self, **params): """Set the parameters of this estimator. The method works on simple estimators as well as on nested objects (such as :class:`~sklearn.pipeline.Pipeline`). The latter have parameters of the form ``<component>__<parameter>`` so that it's possible to update each component of a nested object. Parameters ---------- **params : dict Estimator parameters. Returns ------- self : estimator instance Estimator instance. """ if not params: # Simple optimization to gain speed (inspect is slow) return self valid_params = self.get_params(deep=True) nested_params = defaultdict(dict) # grouped by prefix for key, value in params.items(): key, delim, sub_key = key.partition("__") if key not in valid_params: local_valid_params = self._get_param_names() raise ValueError( f"Invalid parameter {key!r} for estimator {self}. " f"Valid parameters are: {local_valid_params!r}." ) if delim: nested_params[key][sub_key] = value else: setattr(self, key, value) valid_params[key] = value for key, sub_params in nested_params.items(): valid_params[key].set_params(**sub_params) return self def __sklearn_clone__(self): return _clone_parametrized(self) def __repr__(self, N_CHAR_MAX=700): # N_CHAR_MAX is the (approximate) maximum number of non-blank # characters to render. We pass it as an optional parameter to ease # the tests. from sklearn.utils._pprint import _EstimatorPrettyPrinter N_MAX_ELEMENTS_TO_SHOW = 30 # number of elements to show in sequences # use ellipsis for sequences with a lot of elements pp = _EstimatorPrettyPrinter( compact=True, indent=1, indent_at_name=True, n_max_elements_to_show=N_MAX_ELEMENTS_TO_SHOW, ) repr_ = pp.pformat(self) # Use bruteforce ellipsis when there are a lot of non-blank characters n_nonblank = len("".join(repr_.split())) if n_nonblank > N_CHAR_MAX: lim = N_CHAR_MAX // 2 # apprx number of chars to keep on both ends regex = r"^(\s*\S){%d}" % lim # The regex '^(\s*\S){%d}' % n # matches from the start of the string until the nth non-blank # character: # - ^ matches the start of string # - (pattern){n} matches n repetitions of pattern # - \s*\S matches a non-blank char following zero or more blanks left_lim = re.match(regex, repr_).end() right_lim = re.match(regex, repr_[::-1]).end() if "\n" in repr_[left_lim:-right_lim]: # The left side and right side aren't on the same line. # To avoid weird cuts, e.g.: # categoric...ore', # we need to start the right side with an appropriate newline # character so that it renders properly as: # categoric... # handle_unknown='ignore', # so we add [^\n]*\n which matches until the next \n regex += r"[^\n]*\n" right_lim = re.match(regex, repr_[::-1]).end() ellipsis = "..." if left_lim + len(ellipsis) < len(repr_) - right_lim: # Only add ellipsis if it results in a shorter repr repr_ = repr_[:left_lim] + "..." + repr_[-right_lim:] return repr_ def __getstate__(self): if getattr(self, "__slots__", None): raise TypeError( "You cannot use `__slots__` in objects inheriting from " "`sklearn.base.BaseEstimator`." ) try: state = super().__getstate__() if state is None: # For Python 3.11+, empty instance (no `__slots__`, # and `__dict__`) will return a state equal to `None`. state = self.__dict__.copy() except AttributeError: # Python < 3.11 state = self.__dict__.copy() if type(self).__module__.startswith("sklearn."): return dict(state.items(), _sklearn_version=__version__) else: return state def __setstate__(self, state): if type(self).__module__.startswith("sklearn."): pickle_version = state.pop("_sklearn_version", "pre-0.18") if pickle_version != __version__: warnings.warn( InconsistentVersionWarning( estimator_name=self.__class__.__name__, current_sklearn_version=__version__, original_sklearn_version=pickle_version, ), ) try: super().__setstate__(state) except AttributeError: self.__dict__.update(state) def __sklearn_tags__(self): return Tags( estimator_type=None, target_tags=TargetTags(required=False), transformer_tags=None, regressor_tags=None, classifier_tags=None, ) def _validate_params(self): """Validate types and values of constructor parameters The expected type and values must be defined in the `_parameter_constraints` class attribute, which is a dictionary `param_name: list of constraints`. See the docstring of `validate_parameter_constraints` for a description of the accepted constraints. """ validate_parameter_constraints( self._parameter_constraints, self.get_params(deep=False), caller_name=self.__class__.__name__, ) class ClassifierMixin: """Mixin class for all classifiers in scikit-learn. This mixin defines the following functionality: - set estimator type to `"classifier"` through the `estimator_type` tag; - `score` method that default to :func:`~sklearn.metrics.accuracy_score`. - enforce that `fit` requires `y` to be passed through the `requires_y` tag, which is done by setting the classifier type tag. Read more in the :ref:`User Guide <rolling_your_own_estimator>`. Examples -------- >>> import numpy as np >>> from sklearn.base import BaseEstimator, ClassifierMixin >>> # Mixin classes should always be on the left-hand side for a correct MRO >>> class MyEstimator(ClassifierMixin, BaseEstimator): ... def __init__(self, *, param=1): ... self.param = param ... def fit(self, X, y=None): ... self.is_fitted_ = True ... return self ... def predict(self, X): ... return np.full(shape=X.shape[0], fill_value=self.param) >>> estimator = MyEstimator(param=1) >>> X = np.array([[1, 2], [2, 3], [3, 4]]) >>> y = np.array([1, 0, 1]) >>> estimator.fit(X, y).predict(X) array([1, 1, 1]) >>> estimator.score(X, y) 0.66... """ def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.estimator_type = "classifier" tags.classifier_tags = ClassifierTags() tags.target_tags.required = True return tags def score(self, X, y, sample_weight=None): """ Return :ref:`accuracy <accuracy_score>` on provided data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. y : array-like of shape (n_samples,) or (n_samples, n_outputs) True labels for `X`. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float Mean accuracy of ``self.predict(X)`` w.r.t. `y`. """ from sklearn.metrics import accuracy_score return accuracy_score(y, self.predict(X), sample_weight=sample_weight) class RegressorMixin: """Mixin class for all regression estimators in scikit-learn. This mixin defines the following functionality: - set estimator type to `"regressor"` through the `estimator_type` tag; - `score` method that default to :func:`~sklearn.metrics.r2_score`. - enforce that `fit` requires `y` to be passed through the `requires_y` tag, which is done by setting the regressor type tag. Read more in the :ref:`User Guide <rolling_your_own_estimator>`. Examples -------- >>> import numpy as np >>> from sklearn.base import BaseEstimator, RegressorMixin >>> # Mixin classes should always be on the left-hand side for a correct MRO >>> class MyEstimator(RegressorMixin, BaseEstimator): ... def __init__(self, *, param=1): ... self.param = param ... def fit(self, X, y=None): ... self.is_fitted_ = True ... return self ... def predict(self, X): ... return np.full(shape=X.shape[0], fill_value=self.param) >>> estimator = MyEstimator(param=0) >>> X = np.array([[1, 2], [2, 3], [3, 4]]) >>> y = np.array([-1, 0, 1]) >>> estimator.fit(X, y).predict(X) array([0, 0, 0]) >>> estimator.score(X, y) 0.0 """ def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.estimator_type = "regressor" tags.regressor_tags = RegressorTags() tags.target_tags.required = True return tags def score(self, X, y, sample_weight=None): """Return :ref:`coefficient of determination <r2_score>` on test data. The coefficient of determination, :math:`R^2`, is defined as :math:`(1 - \\frac{u}{v})`, where :math:`u` is the residual sum of squares ``((y_true - y_pred)** 2).sum()`` and :math:`v` is the total sum of squares ``((y_true - y_true.mean()) ** 2).sum()``. The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of `y`, disregarding the input features, would get a :math:`R^2` score of 0.0. Parameters ---------- X : array-like of shape (n_samples, n_features) Test samples. For some estimators this may be a precomputed kernel matrix or a list of generic objects instead with shape ``(n_samples, n_samples_fitted)``, where ``n_samples_fitted`` is the number of samples used in the fitting for the estimator. y : array-like of shape (n_samples,) or (n_samples, n_outputs) True values for `X`. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float :math:`R^2` of ``self.predict(X)`` w.r.t. `y`. Notes ----- The :math:`R^2` score used when calling ``score`` on a regressor uses ``multioutput='uniform_average'`` from version 0.23 to keep consistent with default value of :func:`~sklearn.metrics.r2_score`. This influences the ``score`` method of all the multioutput regressors (except for :class:`~sklearn.multioutput.MultiOutputRegressor`). """ from sklearn.metrics import r2_score y_pred = self.predict(X) return r2_score(y, y_pred, sample_weight=sample_weight) class ClusterMixin: """Mixin class for all cluster estimators in scikit-learn. - set estimator type to `"clusterer"` through the `estimator_type` tag; - `fit_predict` method returning the cluster labels associated to each sample. Examples -------- >>> import numpy as np >>> from sklearn.base import BaseEstimator, ClusterMixin >>> class MyClusterer(ClusterMixin, BaseEstimator): ... def fit(self, X, y=None): ... self.labels_ = np.ones(shape=(len(X),), dtype=np.int64) ... return self >>> X = [[1, 2], [2, 3], [3, 4]] >>> MyClusterer().fit_predict(X) array([1, 1, 1]) """ def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.estimator_type = "clusterer" if tags.transformer_tags is not None: tags.transformer_tags.preserves_dtype = [] return tags def fit_predict(self, X, y=None, **kwargs): """ Perform clustering on `X` and returns cluster labels. Parameters ---------- X : array-like of shape (n_samples, n_features) Input data. y : Ignored Not used, present for API consistency by convention. **kwargs : dict Arguments to be passed to ``fit``. .. versionadded:: 1.4 Returns ------- labels : ndarray of shape (n_samples,), dtype=np.int64 Cluster labels. """ # non-optimized default implementation; override when a better # method is possible for a given clustering algorithm self.fit(X, **kwargs) return self.labels_ class BiclusterMixin: """Mixin class for all bicluster estimators in scikit-learn. This mixin defines the following functionality: - `biclusters_` property that returns the row and column indicators; - `get_indices` method that returns the row and column indices of a bicluster; - `get_shape` method that returns the shape of a bicluster; - `get_submatrix` method that returns the submatrix corresponding to a bicluster. Examples -------- >>> import numpy as np >>> from sklearn.base import BaseEstimator, BiclusterMixin >>> class DummyBiClustering(BiclusterMixin, BaseEstimator): ... def fit(self, X, y=None): ... self.rows_ = np.ones(shape=(1, X.shape[0]), dtype=bool) ... self.columns_ = np.ones(shape=(1, X.shape[1]), dtype=bool) ... return self >>> X = np.array([[1, 1], [2, 1], [1, 0], ... [4, 7], [3, 5], [3, 6]]) >>> bicluster = DummyBiClustering().fit(X) >>> hasattr(bicluster, "biclusters_") True >>> bicluster.get_indices(0) (array([0, 1, 2, 3, 4, 5]), array([0, 1])) """ @property def biclusters_(self): """Convenient way to get row and column indicators together. Returns the ``rows_`` and ``columns_`` members. """ return self.rows_, self.columns_ def get_indices(self, i): """Row and column indices of the `i`'th bicluster. Only works if ``rows_`` and ``columns_`` attributes exist. Parameters ---------- i : int The index of the cluster. Returns ------- row_ind : ndarray, dtype=np.intp Indices of rows in the dataset that belong to the bicluster. col_ind : ndarray, dtype=np.intp Indices of columns in the dataset that belong to the bicluster. """ rows = self.rows_[i] columns = self.columns_[i] return np.nonzero(rows)[0], np.nonzero(columns)[0] def get_shape(self, i): """Shape of the `i`'th bicluster. Parameters ---------- i : int The index of the cluster. Returns ------- n_rows : int Number of rows in the bicluster. n_cols : int Number of columns in the bicluster. """ indices = self.get_indices(i) return tuple(len(i) for i in indices) def get_submatrix(self, i, data): """Return the submatrix corresponding to bicluster `i`. Parameters ---------- i : int The index of the cluster. data : array-like of shape (n_samples, n_features) The data. Returns ------- submatrix : ndarray of shape (n_rows, n_cols) The submatrix corresponding to bicluster `i`. Notes ----- Works with sparse matrices. Only works if ``rows_`` and ``columns_`` attributes exist. """ data = check_array(data, accept_sparse="csr") row_ind, col_ind = self.get_indices(i) return data[row_ind[:, np.newaxis], col_ind] class TransformerMixin(_SetOutputMixin): """Mixin class for all transformers in scikit-learn. This mixin defines the following functionality: - a `fit_transform` method that delegates to `fit` and `transform`; - a `set_output` method to output `X` as a specific container type. If :term:`get_feature_names_out` is defined, then :class:`BaseEstimator` will automatically wrap `transform` and `fit_transform` to follow the `set_output` API. See the :ref:`developer_api_set_output` for details. :class:`OneToOneFeatureMixin` and :class:`ClassNamePrefixFeaturesOutMixin` are helpful mixins for defining :term:`get_feature_names_out`. Examples -------- >>> import numpy as np >>> from sklearn.base import BaseEstimator, TransformerMixin >>> class MyTransformer(TransformerMixin, BaseEstimator): ... def __init__(self, *, param=1): ... self.param = param ... def fit(self, X, y=None): ... return self ... def transform(self, X): ... return np.full(shape=len(X), fill_value=self.param) >>> transformer = MyTransformer() >>> X = [[1, 2], [2, 3], [3, 4]] >>> transformer.fit_transform(X) array([1, 1, 1]) """ def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.transformer_tags = TransformerTags() return tags def fit_transform(self, X, y=None, **fit_params): """ Fit to data, then transform it. Fits transformer to `X` and `y` with optional parameters `fit_params` and returns a transformed version of `X`. Parameters ---------- X : array-like of shape (n_samples, n_features) Input samples. y : array-like of shape (n_samples,) or (n_samples, n_outputs), \ default=None Target values (None for unsupervised transformations). **fit_params : dict Additional fit parameters. Pass only if the estimator accepts additional params in its `fit` method. Returns ------- X_new : ndarray array of shape (n_samples, n_features_new) Transformed array. """ # non-optimized default implementation; override when a better # method is possible for a given clustering algorithm # we do not route parameters here, since consumers don't route. But # since it's possible for a `transform` method to also consume # metadata, we check if that's the case, and we raise a warning telling # users that they should implement a custom `fit_transform` method # to forward metadata to `transform` as well. # # For that, we calculate routing and check if anything would be routed # to `transform` if we were to route them. if _routing_enabled(): transform_params = self.get_metadata_routing().consumes( method="transform", params=fit_params.keys() ) if transform_params: warnings.warn( ( f"This object ({self.__class__.__name__}) has a `transform`" " method which consumes metadata, but `fit_transform` does not" " forward metadata to `transform`. Please implement a custom" " `fit_transform` method to forward metadata to `transform` as" " well. Alternatively, you can explicitly do"
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/naive_bayes.py
sklearn/naive_bayes.py
"""Naive Bayes algorithms. These are supervised learning methods based on applying Bayes' theorem with strong (naive) feature independence assumptions. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings from abc import ABCMeta, abstractmethod from numbers import Integral, Real import numpy as np import sklearn.externals.array_api_extra as xpx from sklearn.base import BaseEstimator, ClassifierMixin, _fit_context from sklearn.preprocessing import LabelBinarizer, binarize, label_binarize from sklearn.utils._array_api import ( _average, _convert_to_numpy, _find_matching_floating_dtype, _isin, _logsumexp, get_namespace, get_namespace_and_device, size, ) from sklearn.utils._param_validation import Interval from sklearn.utils.extmath import safe_sparse_dot from sklearn.utils.multiclass import _check_partial_fit_first_call from sklearn.utils.validation import ( _check_n_features, _check_sample_weight, check_is_fitted, check_non_negative, validate_data, ) __all__ = [ "BernoulliNB", "CategoricalNB", "ComplementNB", "GaussianNB", "MultinomialNB", ] class _BaseNB(ClassifierMixin, BaseEstimator, metaclass=ABCMeta): """Abstract base class for naive Bayes estimators""" @abstractmethod def _joint_log_likelihood(self, X): """Compute the unnormalized posterior log probability of X I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of shape (n_samples, n_classes). Public methods predict, predict_proba, predict_log_proba, and predict_joint_log_proba pass the input through _check_X before handing it over to _joint_log_likelihood. The term "joint log likelihood" is used interchangibly with "joint log probability". """ @abstractmethod def _check_X(self, X): """To be overridden in subclasses with the actual checks. Only used in predict* methods. """ def predict_joint_log_proba(self, X): """Return joint log probability estimates for the test vector X. For each row x of X and class y, the joint log probability is given by ``log P(x, y) = log P(y) + log P(x|y),`` where ``log P(y)`` is the class prior probability and ``log P(x|y)`` is the class-conditional probability. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : ndarray of shape (n_samples, n_classes) Returns the joint log-probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:`classes_`. """ check_is_fitted(self) X = self._check_X(X) return self._joint_log_likelihood(X) def predict(self, X): """ Perform classification on an array of test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : ndarray of shape (n_samples,) Predicted target values for X. """ check_is_fitted(self) xp, _ = get_namespace(X) X = self._check_X(X) jll = self._joint_log_likelihood(X) pred_indices = xp.argmax(jll, axis=1) if isinstance(self.classes_[0], str): pred_indices = _convert_to_numpy(pred_indices, xp=xp) return self.classes_[pred_indices] def predict_log_proba(self, X): """ Return log-probability estimates for the test vector X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : array-like of shape (n_samples, n_classes) Returns the log-probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:`classes_`. """ check_is_fitted(self) xp, _ = get_namespace(X) X = self._check_X(X) jll = self._joint_log_likelihood(X) # normalize by P(x) = P(f_1, ..., f_n) log_prob_x = _logsumexp(jll, axis=1, xp=xp) return jll - xpx.atleast_nd(log_prob_x, ndim=2).T def predict_proba(self, X): """ Return probability estimates for the test vector X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Returns ------- C : array-like of shape (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:`classes_`. """ xp, _ = get_namespace(X) return xp.exp(self.predict_log_proba(X)) class GaussianNB(_BaseNB): """ Gaussian Naive Bayes (GaussianNB). Can perform online updates to model parameters via :meth:`partial_fit`. For details on algorithm used to update feature means and variance online, see `Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque <http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf>`_. Read more in the :ref:`User Guide <gaussian_naive_bayes>`. Parameters ---------- priors : array-like of shape (n_classes,), default=None Prior probabilities of the classes. If specified, the priors are not adjusted according to the data. var_smoothing : float, default=1e-9 Portion of the largest variance of all features that is added to variances for calculation stability. .. versionadded:: 0.20 Attributes ---------- class_count_ : ndarray of shape (n_classes,) number of training samples observed in each class. class_prior_ : ndarray of shape (n_classes,) probability of each class. classes_ : ndarray of shape (n_classes,) class labels known to the classifier. epsilon_ : float absolute additive value to variances. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 var_ : ndarray of shape (n_classes, n_features) Variance of each feature per class. .. versionadded:: 1.0 theta_ : ndarray of shape (n_classes, n_features) mean of each feature per class. See Also -------- BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models. CategoricalNB : Naive Bayes classifier for categorical features. ComplementNB : Complement Naive Bayes classifier. MultinomialNB : Naive Bayes classifier for multinomial models. Examples -------- >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> Y = np.array([1, 1, 1, 2, 2, 2]) >>> from sklearn.naive_bayes import GaussianNB >>> clf = GaussianNB() >>> clf.fit(X, Y) GaussianNB() >>> print(clf.predict([[-0.8, -1]])) [1] >>> clf_pf = GaussianNB() >>> clf_pf.partial_fit(X, Y, np.unique(Y)) GaussianNB() >>> print(clf_pf.predict([[-0.8, -1]])) [1] """ _parameter_constraints: dict = { "priors": ["array-like", None], "var_smoothing": [Interval(Real, 0, None, closed="left")], } def __init__(self, *, priors=None, var_smoothing=1e-9): self.priors = priors self.var_smoothing = var_smoothing @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y, sample_weight=None): """Fit Gaussian Naive Bayes according to X, y. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Weights applied to individual samples (1. for unweighted). .. versionadded:: 0.17 Gaussian Naive Bayes supports fitting with *sample_weight*. Returns ------- self : object Returns the instance itself. """ y = validate_data(self, y=y) xp_y, _ = get_namespace(y) return self._partial_fit( X, y, xp_y.unique_values(y), _refit=True, sample_weight=sample_weight ) def _check_X(self, X): """Validate X, used only in predict* methods.""" return validate_data(self, X, reset=False) @staticmethod def _update_mean_variance(n_past, mu, var, X, sample_weight=None): """Compute online update of Gaussian mean and variance. Given starting sample count, mean, and variance, a new set of points X, and optionally sample weights, return the updated mean and variance. (NB - each dimension (column) in X is treated as independent -- you get variance, not covariance). Can take scalar mean and variance, or vector mean and variance to simultaneously update a number of independent Gaussians. See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque: http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf Parameters ---------- n_past : int Number of samples represented in old mean and variance. If sample weights were given, this should contain the sum of sample weights represented in old mean and variance. mu : array-like of shape (number of Gaussians,) Means for Gaussians in original set. var : array-like of shape (number of Gaussians,) Variances for Gaussians in original set. sample_weight : array-like of shape (n_samples,), default=None Weights applied to individual samples (1. for unweighted). Returns ------- total_mu : array-like of shape (number of Gaussians,) Updated mean for each Gaussian over the combined set. total_var : array-like of shape (number of Gaussians,) Updated variance for each Gaussian over the combined set. """ xp, _ = get_namespace(X) if X.shape[0] == 0: return mu, var # Compute (potentially weighted) mean and variance of new datapoints if sample_weight is not None: n_new = float(xp.sum(sample_weight)) if np.isclose(n_new, 0.0): return mu, var new_mu = _average(X, axis=0, weights=sample_weight, xp=xp) new_var = _average((X - new_mu) ** 2, axis=0, weights=sample_weight, xp=xp) else: n_new = X.shape[0] new_var = xp.var(X, axis=0) new_mu = xp.mean(X, axis=0) if n_past == 0: return new_mu, new_var n_total = float(n_past + n_new) # Combine mean of old and new data, taking into consideration # (weighted) number of observations total_mu = (n_new * new_mu + n_past * mu) / n_total # Combine variance of old and new data, taking into consideration # (weighted) number of observations. This is achieved by combining # the sum-of-squared-differences (ssd) old_ssd = n_past * var new_ssd = n_new * new_var total_ssd = old_ssd + new_ssd + (n_new * n_past / n_total) * (mu - new_mu) ** 2 total_var = total_ssd / n_total return total_mu, total_var @_fit_context(prefer_skip_nested_validation=True) def partial_fit(self, X, y, classes=None, sample_weight=None): """Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once. This method has some performance and numerical stability overhead, hence it is better to call partial_fit on chunks of data that are as large as possible (as long as fitting in the memory budget) to hide the overhead. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values. classes : array-like of shape (n_classes,), default=None List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. sample_weight : array-like of shape (n_samples,), default=None Weights applied to individual samples (1. for unweighted). .. versionadded:: 0.17 Returns ------- self : object Returns the instance itself. """ return self._partial_fit( X, y, classes, _refit=False, sample_weight=sample_weight ) def _partial_fit(self, X, y, classes=None, _refit=False, sample_weight=None): """Actual implementation of Gaussian NB fitting. Parameters ---------- X : array-like of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values. classes : array-like of shape (n_classes,), default=None List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. _refit : bool, default=False If true, act as though this were the first time we called _partial_fit (ie, throw away any past fitting and start over). sample_weight : array-like of shape (n_samples,), default=None Weights applied to individual samples (1. for unweighted). Returns ------- self : object """ if _refit: self.classes_ = None first_call = _check_partial_fit_first_call(self, classes) X, y = validate_data(self, X, y, reset=first_call) xp, _, device_ = get_namespace_and_device(X) float_dtype = _find_matching_floating_dtype(X, xp=xp) if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X, dtype=float_dtype) xp_y, _ = get_namespace(y) # If the ratio of data variance between dimensions is too small, it # will cause numerical errors. To address this, we artificially # boost the variance by epsilon, a small fraction of the standard # deviation of the largest dimension. self.epsilon_ = self.var_smoothing * xp.max(xp.var(X, axis=0)) if first_call: # This is the first call to partial_fit: # initialize various cumulative counters n_features = X.shape[1] n_classes = self.classes_.shape[0] self.theta_ = xp.zeros( (n_classes, n_features), dtype=float_dtype, device=device_ ) self.var_ = xp.zeros( (n_classes, n_features), dtype=float_dtype, device=device_ ) self.class_count_ = xp.zeros(n_classes, dtype=float_dtype, device=device_) # Initialise the class prior # Take into account the priors if self.priors is not None: priors = xp.asarray(self.priors, dtype=float_dtype, device=device_) # Check that the provided prior matches the number of classes if priors.shape[0] != n_classes: raise ValueError("Number of priors must match number of classes.") # Check that the sum is 1 if not xpx.isclose(xp.sum(priors), 1.0): raise ValueError("The sum of the priors should be 1.") # Check that the priors are non-negative if xp.any(priors < 0): raise ValueError("Priors must be non-negative.") self.class_prior_ = priors else: # Initialize the priors to zeros for each class self.class_prior_ = xp.zeros( self.classes_.shape[0], dtype=float_dtype, device=device_ ) else: if X.shape[1] != self.theta_.shape[1]: msg = "Number of features %d does not match previous data %d." raise ValueError(msg % (X.shape[1], self.theta_.shape[1])) # Put epsilon back in each time self.var_[:, :] -= self.epsilon_ classes = self.classes_ unique_y = xp_y.unique_values(y) unique_y_in_classes = _isin(unique_y, classes, xp=xp_y) if not xp_y.all(unique_y_in_classes): raise ValueError( "The target label(s) %s in y do not exist in the initial classes %s" % (unique_y[~unique_y_in_classes], classes) ) for y_i in unique_y: i = int(xp_y.searchsorted(classes, y_i)) y_i_mask = xp.asarray(y == y_i, device=device_) X_i = X[y_i_mask] if sample_weight is not None: sw_i = sample_weight[y_i_mask] N_i = xp.sum(sw_i) else: sw_i = None N_i = X_i.shape[0] new_theta, new_sigma = self._update_mean_variance( self.class_count_[i], self.theta_[i, :], self.var_[i, :], X_i, sw_i ) self.theta_[i, :] = new_theta self.var_[i, :] = new_sigma self.class_count_[i] += N_i self.var_[:, :] += self.epsilon_ # Update if only no priors is provided if self.priors is None: # Empirical prior, with sample_weight taken into account self.class_prior_ = self.class_count_ / xp.sum(self.class_count_) return self def _joint_log_likelihood(self, X): xp, _ = get_namespace(X) joint_log_likelihood = [] for i in range(size(self.classes_)): jointi = xp.log(self.class_prior_[i]) n_ij = -0.5 * xp.sum(xp.log(2.0 * xp.pi * self.var_[i, :])) n_ij = n_ij - 0.5 * xp.sum( ((X - self.theta_[i, :]) ** 2) / (self.var_[i, :]), axis=1 ) joint_log_likelihood.append(jointi + n_ij) joint_log_likelihood = xp.stack(joint_log_likelihood).T return joint_log_likelihood def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.array_api_support = True return tags class _BaseDiscreteNB(_BaseNB): """Abstract base class for naive Bayes on discrete/categorical data Any estimator based on this class should provide: __init__ _joint_log_likelihood(X) as per _BaseNB _update_feature_log_prob(alpha) _count(X, Y) """ _parameter_constraints: dict = { "alpha": [Interval(Real, 0, None, closed="left"), "array-like"], "fit_prior": ["boolean"], "class_prior": ["array-like", None], "force_alpha": ["boolean"], } def __init__(self, alpha=1.0, fit_prior=True, class_prior=None, force_alpha=True): self.alpha = alpha self.fit_prior = fit_prior self.class_prior = class_prior self.force_alpha = force_alpha @abstractmethod def _count(self, X, Y): """Update counts that are used to calculate probabilities. The counts make up a sufficient statistic extracted from the data. Accordingly, this method is called each time `fit` or `partial_fit` update the model. `class_count_` and `feature_count_` must be updated here along with any model specific counts. Parameters ---------- X : {ndarray, sparse matrix} of shape (n_samples, n_features) The input samples. Y : ndarray of shape (n_samples, n_classes) Binarized class labels. """ @abstractmethod def _update_feature_log_prob(self, alpha): """Update feature log probabilities based on counts. This method is called each time `fit` or `partial_fit` update the model. Parameters ---------- alpha : float smoothing parameter. See :meth:`_check_alpha`. """ def _check_X(self, X): """Validate X, used only in predict* methods.""" return validate_data(self, X, accept_sparse="csr", reset=False) def _check_X_y(self, X, y, reset=True): """Validate X and y in fit methods.""" return validate_data(self, X, y, accept_sparse="csr", reset=reset) def _update_class_log_prior(self, class_prior=None): """Update class log priors. The class log priors are based on `class_prior`, class count or the number of classes. This method is called each time `fit` or `partial_fit` update the model. """ n_classes = len(self.classes_) if class_prior is not None: if len(class_prior) != n_classes: raise ValueError("Number of priors must match number of classes.") self.class_log_prior_ = np.log(class_prior) elif self.fit_prior: with warnings.catch_warnings(): # silence the warning when count is 0 because class was not yet # observed warnings.simplefilter("ignore", RuntimeWarning) log_class_count = np.log(self.class_count_) # empirical prior, with sample_weight taken into account self.class_log_prior_ = log_class_count - np.log(self.class_count_.sum()) else: self.class_log_prior_ = np.full(n_classes, -np.log(n_classes)) def _check_alpha(self): alpha = ( np.asarray(self.alpha) if not isinstance(self.alpha, Real) else self.alpha ) alpha_min = np.min(alpha) if isinstance(alpha, np.ndarray): if not alpha.shape[0] == self.n_features_in_: raise ValueError( "When alpha is an array, it should contains `n_features`. " f"Got {alpha.shape[0]} elements instead of {self.n_features_in_}." ) # check that all alpha are positive if alpha_min < 0: raise ValueError("All values in alpha must be greater than 0.") alpha_lower_bound = 1e-10 if alpha_min < alpha_lower_bound and not self.force_alpha: warnings.warn( "alpha too small will result in numeric errors, setting alpha =" f" {alpha_lower_bound:.1e}. Use `force_alpha=True` to keep alpha" " unchanged." ) return np.maximum(alpha, alpha_lower_bound) return alpha @_fit_context(prefer_skip_nested_validation=True) def partial_fit(self, X, y, classes=None, sample_weight=None): """Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different chunks of a dataset so as to implement out-of-core or online learning. This is especially useful when the whole dataset is too big to fit in memory at once. This method has some performance overhead hence it is better to call partial_fit on chunks of data that are as large as possible (as long as fitting in the memory budget) to hide the overhead. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values. classes : array-like of shape (n_classes,), default=None List of all the classes that can possibly appear in the y vector. Must be provided at the first call to partial_fit, can be omitted in subsequent calls. sample_weight : array-like of shape (n_samples,), default=None Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns the instance itself. """ first_call = not hasattr(self, "classes_") X, y = self._check_X_y(X, y, reset=first_call) _, n_features = X.shape if _check_partial_fit_first_call(self, classes): # This is the first call to partial_fit: # initialize various cumulative counters n_classes = len(classes) self._init_counters(n_classes, n_features) Y = label_binarize(y, classes=self.classes_) if Y.shape[1] == 1: if len(self.classes_) == 2: Y = np.concatenate((1 - Y, Y), axis=1) else: # degenerate case: just one class Y = np.ones_like(Y) if X.shape[0] != Y.shape[0]: msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible." raise ValueError(msg % (X.shape[0], y.shape[0])) # label_binarize() returns arrays with dtype=np.int64. # We convert it to np.float64 to support sample_weight consistently Y = Y.astype(np.float64, copy=False) if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X) sample_weight = np.atleast_2d(sample_weight) Y *= sample_weight.T class_prior = self.class_prior # Count raw events from data before updating the class log prior # and feature log probas self._count(X, Y) # XXX: OPTIM: we could introduce a public finalization method to # be called by the user explicitly just once after several consecutive # calls to partial_fit and prior any call to predict[_[log_]proba] # to avoid computing the smooth log probas at each call to partial fit alpha = self._check_alpha() self._update_feature_log_prob(alpha) self._update_class_log_prior(class_prior=class_prior) return self @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y, sample_weight=None): """Fit Naive Bayes classifier according to X, y. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where `n_samples` is the number of samples and `n_features` is the number of features. y : array-like of shape (n_samples,) Target values. sample_weight : array-like of shape (n_samples,), default=None Weights applied to individual samples (1. for unweighted). Returns ------- self : object Returns the instance itself. """ X, y = self._check_X_y(X, y) _, n_features = X.shape labelbin = LabelBinarizer() Y = labelbin.fit_transform(y) self.classes_ = labelbin.classes_ if Y.shape[1] == 1: if len(self.classes_) == 2: Y = np.concatenate((1 - Y, Y), axis=1) else: # degenerate case: just one class Y = np.ones_like(Y) # LabelBinarizer().fit_transform() returns arrays with dtype=np.int64. # We convert it to np.float64 to support sample_weight consistently; # this means we also don't have to cast X to floating point if sample_weight is not None: Y = Y.astype(np.float64, copy=False) sample_weight = _check_sample_weight(sample_weight, X) sample_weight = np.atleast_2d(sample_weight) Y *= sample_weight.T class_prior = self.class_prior # Count raw events from data before updating the class log prior # and feature log probas n_classes = Y.shape[1] self._init_counters(n_classes, n_features) self._count(X, Y) alpha = self._check_alpha() self._update_feature_log_prob(alpha) self._update_class_log_prior(class_prior=class_prior) return self def _init_counters(self, n_classes, n_features): self.class_count_ = np.zeros(n_classes, dtype=np.float64) self.feature_count_ = np.zeros((n_classes, n_features), dtype=np.float64) def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.input_tags.sparse = True tags.classifier_tags.poor_score = True return tags class MultinomialNB(_BaseDiscreteNB): """ Naive Bayes classifier for multinomial models. The multinomial Naive Bayes classifier is suitable for classification with discrete features (e.g., word counts for text classification). The multinomial distribution normally requires integer feature counts. However, in practice, fractional counts such as tf-idf may also work. Read more in the :ref:`User Guide <multinomial_naive_bayes>`. Parameters ---------- alpha : float or array-like of shape (n_features,), default=1.0 Additive (Laplace/Lidstone) smoothing parameter (set alpha=0 and force_alpha=True, for no smoothing). force_alpha : bool, default=True If False and alpha is less than 1e-10, it will set alpha to 1e-10. If True, alpha will remain unchanged. This may cause numerical errors if alpha is too close to 0. .. versionadded:: 1.2 .. versionchanged:: 1.4 The default value of `force_alpha` changed to `True`. fit_prior : bool, default=True Whether to learn class prior probabilities or not. If false, a uniform prior will be used. class_prior : array-like of shape (n_classes,), default=None Prior probabilities of the classes. If specified, the priors are not adjusted according to the data. Attributes ---------- class_count_ : ndarray of shape (n_classes,) Number of samples encountered for each class during fitting. This value is weighted by the sample weight when provided. class_log_prior_ : ndarray of shape (n_classes,) Smoothed empirical log probability for each class. classes_ : ndarray of shape (n_classes,) Class labels known to the classifier feature_count_ : ndarray of shape (n_classes, n_features) Number of samples encountered for each (class, feature) during fitting. This value is weighted by the sample weight when provided. feature_log_prob_ : ndarray of shape (n_classes, n_features) Empirical log probability of features given a class, ``P(x_i|y)``. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- BernoulliNB : Naive Bayes classifier for multivariate Bernoulli models. CategoricalNB : Naive Bayes classifier for categorical features. ComplementNB : Complement Naive Bayes classifier. GaussianNB : Gaussian Naive Bayes. References ---------- C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to Information Retrieval. Cambridge University Press, pp. 234-265. https://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html Examples -------- >>> import numpy as np >>> rng = np.random.RandomState(1) >>> X = rng.randint(5, size=(6, 100)) >>> y = np.array([1, 2, 3, 4, 5, 6]) >>> from sklearn.naive_bayes import MultinomialNB >>> clf = MultinomialNB() >>> clf.fit(X, y) MultinomialNB()
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/_distributor_init.py
sklearn/_distributor_init.py
"""Distributor init file Distributors: you can add custom code here to support particular distributions of scikit-learn. For example, this is a good place to put any checks for hardware requirements. The scikit-learn standard source distribution will not put code in this file, so you can safely replace this file with your own version. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/gaussian_process/_gpr.py
sklearn/gaussian_process/_gpr.py
"""Gaussian processes regression.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings from numbers import Integral, Real from operator import itemgetter import numpy as np import scipy.optimize from scipy.linalg import cho_solve, cholesky, solve_triangular from sklearn.base import ( BaseEstimator, MultiOutputMixin, RegressorMixin, _fit_context, clone, ) from sklearn.gaussian_process.kernels import RBF, Kernel from sklearn.gaussian_process.kernels import ConstantKernel as C from sklearn.preprocessing._data import _handle_zeros_in_scale from sklearn.utils import check_random_state from sklearn.utils._param_validation import Interval, StrOptions from sklearn.utils.optimize import _check_optimize_result from sklearn.utils.validation import validate_data GPR_CHOLESKY_LOWER = True class GaussianProcessRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator): """Gaussian process regression (GPR). The implementation is based on Algorithm 2.1 of [RW2006]_. In addition to standard scikit-learn estimator API, :class:`GaussianProcessRegressor`: * allows prediction without prior fitting (based on the GP prior) * provides an additional method `sample_y(X)`, which evaluates samples drawn from the GPR (prior or posterior) at given inputs * exposes a method `log_marginal_likelihood(theta)`, which can be used externally for other ways of selecting hyperparameters, e.g., via Markov chain Monte Carlo. To learn the difference between a point-estimate approach vs. a more Bayesian modelling approach, refer to the example entitled :ref:`sphx_glr_auto_examples_gaussian_process_plot_compare_gpr_krr.py`. Read more in the :ref:`User Guide <gaussian_process>`. .. versionadded:: 0.18 Parameters ---------- kernel : kernel instance, default=None The kernel specifying the covariance function of the GP. If None is passed, the kernel ``ConstantKernel(1.0, constant_value_bounds="fixed") * RBF(1.0, length_scale_bounds="fixed")`` is used as default. Note that the kernel hyperparameters are optimized during fitting unless the bounds are marked as "fixed". alpha : float or ndarray of shape (n_samples,), default=1e-10 Value added to the diagonal of the kernel matrix during fitting. This can prevent a potential numerical issue during fitting, by ensuring that the calculated values form a positive definite matrix. It can also be interpreted as the variance of additional Gaussian measurement noise on the training observations. Note that this is different from using a `WhiteKernel`. If an array is passed, it must have the same number of entries as the data used for fitting and is used as datapoint-dependent noise level. Allowing to specify the noise level directly as a parameter is mainly for convenience and for consistency with :class:`~sklearn.linear_model.Ridge`. For an example illustrating how the alpha parameter controls the noise variance in Gaussian Process Regression, see :ref:`sphx_glr_auto_examples_gaussian_process_plot_gpr_noisy_targets.py`. optimizer : "fmin_l_bfgs_b", callable or None, default="fmin_l_bfgs_b" Can either be one of the internally supported optimizers for optimizing the kernel's parameters, specified by a string, or an externally defined optimizer passed as a callable. If a callable is passed, it must have the signature:: def optimizer(obj_func, initial_theta, bounds): # * 'obj_func': the objective function to be minimized, which # takes the hyperparameters theta as a parameter and an # optional flag eval_gradient, which determines if the # gradient is returned additionally to the function value # * 'initial_theta': the initial value for theta, which can be # used by local optimizers # * 'bounds': the bounds on the values of theta .... # Returned are the best found hyperparameters theta and # the corresponding value of the target function. return theta_opt, func_min Per default, the L-BFGS-B algorithm from `scipy.optimize.minimize` is used. If None is passed, the kernel's parameters are kept fixed. Available internal optimizers are: `{'fmin_l_bfgs_b'}`. n_restarts_optimizer : int, default=0 The number of restarts of the optimizer for finding the kernel's parameters which maximize the log-marginal likelihood. The first run of the optimizer is performed from the kernel's initial parameters, the remaining ones (if any) from thetas sampled log-uniform randomly from the space of allowed theta-values. If greater than 0, all bounds must be finite. Note that `n_restarts_optimizer == 0` implies that one run is performed. normalize_y : bool, default=False Whether or not to normalize the target values `y` by removing the mean and scaling to unit-variance. This is recommended for cases where zero-mean, unit-variance priors are used. Note that, in this implementation, the normalisation is reversed before the GP predictions are reported. .. versionchanged:: 0.23 copy_X_train : bool, default=True If True, a persistent copy of the training data is stored in the object. Otherwise, just a reference to the training data is stored, which might cause predictions to change if the data is modified externally. n_targets : int, default=None The number of dimensions of the target values. Used to decide the number of outputs when sampling from the prior distributions (i.e. calling :meth:`sample_y` before :meth:`fit`). This parameter is ignored once :meth:`fit` has been called. .. versionadded:: 1.3 random_state : int, RandomState instance or None, default=None Determines random number generation used to initialize the centers. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. Attributes ---------- X_train_ : array-like of shape (n_samples, n_features) or list of object Feature vectors or other representations of training data (also required for prediction). y_train_ : array-like of shape (n_samples,) or (n_samples, n_targets) Target values in training data (also required for prediction). kernel_ : kernel instance The kernel used for prediction. The structure of the kernel is the same as the one passed as parameter but with optimized hyperparameters. L_ : array-like of shape (n_samples, n_samples) Lower-triangular Cholesky decomposition of the kernel in ``X_train_``. alpha_ : array-like of shape (n_samples,) Dual coefficients of training data points in kernel space. log_marginal_likelihood_value_ : float The log-marginal-likelihood of ``self.kernel_.theta``. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- GaussianProcessClassifier : Gaussian process classification (GPC) based on Laplace approximation. References ---------- .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams, "Gaussian Processes for Machine Learning", MIT Press 2006 <https://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_ Examples -------- >>> from sklearn.datasets import make_friedman2 >>> from sklearn.gaussian_process import GaussianProcessRegressor >>> from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) >>> kernel = DotProduct() + WhiteKernel() >>> gpr = GaussianProcessRegressor(kernel=kernel, ... random_state=0).fit(X, y) >>> gpr.score(X, y) 0.3680... >>> gpr.predict(X[:2,:], return_std=True) (array([653.0, 592.1]), array([316.6, 316.6])) """ _parameter_constraints: dict = { "kernel": [None, Kernel], "alpha": [Interval(Real, 0, None, closed="left"), np.ndarray], "optimizer": [StrOptions({"fmin_l_bfgs_b"}), callable, None], "n_restarts_optimizer": [Interval(Integral, 0, None, closed="left")], "normalize_y": ["boolean"], "copy_X_train": ["boolean"], "n_targets": [Interval(Integral, 1, None, closed="left"), None], "random_state": ["random_state"], } def __init__( self, kernel=None, *, alpha=1e-10, optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0, normalize_y=False, copy_X_train=True, n_targets=None, random_state=None, ): self.kernel = kernel self.alpha = alpha self.optimizer = optimizer self.n_restarts_optimizer = n_restarts_optimizer self.normalize_y = normalize_y self.copy_X_train = copy_X_train self.n_targets = n_targets self.random_state = random_state @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y): """Fit Gaussian process regression model. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Feature vectors or other representations of training data. y : array-like of shape (n_samples,) or (n_samples, n_targets) Target values. Returns ------- self : object GaussianProcessRegressor class instance. """ if self.kernel is None: # Use an RBF kernel as default self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF( 1.0, length_scale_bounds="fixed" ) else: self.kernel_ = clone(self.kernel) self._rng = check_random_state(self.random_state) if self.kernel_.requires_vector_input: dtype, ensure_2d = "numeric", True else: dtype, ensure_2d = None, False X, y = validate_data( self, X, y, multi_output=True, y_numeric=True, ensure_2d=ensure_2d, dtype=dtype, ) n_targets_seen = y.shape[1] if y.ndim > 1 else 1 if self.n_targets is not None and n_targets_seen != self.n_targets: raise ValueError( "The number of targets seen in `y` is different from the parameter " f"`n_targets`. Got {n_targets_seen} != {self.n_targets}." ) # Normalize target value if self.normalize_y: self._y_train_mean = np.mean(y, axis=0) self._y_train_std = _handle_zeros_in_scale(np.std(y, axis=0), copy=False) # Remove mean and make unit variance y = (y - self._y_train_mean) / self._y_train_std else: shape_y_stats = (y.shape[1],) if y.ndim == 2 else 1 self._y_train_mean = np.zeros(shape=shape_y_stats) self._y_train_std = np.ones(shape=shape_y_stats) if np.iterable(self.alpha) and self.alpha.shape[0] != y.shape[0]: if self.alpha.shape[0] == 1: self.alpha = self.alpha[0] else: raise ValueError( "alpha must be a scalar or an array with same number of " f"entries as y. ({self.alpha.shape[0]} != {y.shape[0]})" ) self.X_train_ = np.copy(X) if self.copy_X_train else X self.y_train_ = np.copy(y) if self.copy_X_train else y if self.optimizer is not None and self.kernel_.n_dims > 0: # Choose hyperparameters based on maximizing the log-marginal # likelihood (potentially starting from several initial values) def obj_func(theta, eval_gradient=True): if eval_gradient: lml, grad = self.log_marginal_likelihood( theta, eval_gradient=True, clone_kernel=False ) return -lml, -grad else: return -self.log_marginal_likelihood(theta, clone_kernel=False) # First optimize starting from theta specified in kernel optima = [ ( self._constrained_optimization( obj_func, self.kernel_.theta, self.kernel_.bounds ) ) ] # Additional runs are performed from log-uniform chosen initial # theta if self.n_restarts_optimizer > 0: if not np.isfinite(self.kernel_.bounds).all(): raise ValueError( "Multiple optimizer restarts (n_restarts_optimizer>0) " "requires that all bounds are finite." ) bounds = self.kernel_.bounds for iteration in range(self.n_restarts_optimizer): theta_initial = self._rng.uniform(bounds[:, 0], bounds[:, 1]) optima.append( self._constrained_optimization(obj_func, theta_initial, bounds) ) # Select result from run with minimal (negative) log-marginal # likelihood lml_values = list(map(itemgetter(1), optima)) self.kernel_.theta = optima[np.argmin(lml_values)][0] self.kernel_._check_bounds_params() self.log_marginal_likelihood_value_ = -np.min(lml_values) else: self.log_marginal_likelihood_value_ = self.log_marginal_likelihood( self.kernel_.theta, clone_kernel=False ) # Precompute quantities required for predictions which are independent # of actual query points # Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I) K = self.kernel_(self.X_train_) K[np.diag_indices_from(K)] += self.alpha try: self.L_ = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False) except np.linalg.LinAlgError as exc: exc.args = ( ( f"The kernel, {self.kernel_}, is not returning a positive " "definite matrix. Try gradually increasing the 'alpha' " "parameter of your GaussianProcessRegressor estimator." ), ) + exc.args raise # Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y) self.alpha_ = cho_solve( (self.L_, GPR_CHOLESKY_LOWER), self.y_train_, check_finite=False, ) return self def predict(self, X, return_std=False, return_cov=False): """Predict using the Gaussian process regression model. We can also predict based on an unfitted model by using the GP prior. In addition to the mean of the predictive distribution, optionally also returns its standard deviation (`return_std=True`) or covariance (`return_cov=True`). Note that at most one of the two can be requested. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Query points where the GP is evaluated. return_std : bool, default=False If True, the standard-deviation of the predictive distribution at the query points is returned along with the mean. return_cov : bool, default=False If True, the covariance of the joint predictive distribution at the query points is returned along with the mean. Returns ------- y_mean : ndarray of shape (n_samples,) or (n_samples, n_targets) Mean of predictive distribution at query points. y_std : ndarray of shape (n_samples,) or (n_samples, n_targets), optional Standard deviation of predictive distribution at query points. Only returned when `return_std` is True. y_cov : ndarray of shape (n_samples, n_samples) or \ (n_samples, n_samples, n_targets), optional Covariance of joint predictive distribution at query points. Only returned when `return_cov` is True. """ if return_std and return_cov: raise RuntimeError( "At most one of return_std or return_cov can be requested." ) if self.kernel is None or self.kernel.requires_vector_input: dtype, ensure_2d = "numeric", True else: dtype, ensure_2d = None, False X = validate_data(self, X, ensure_2d=ensure_2d, dtype=dtype, reset=False) if not hasattr(self, "X_train_"): # Unfitted;predict based on GP prior if self.kernel is None: kernel = C(1.0, constant_value_bounds="fixed") * RBF( 1.0, length_scale_bounds="fixed" ) else: kernel = self.kernel n_targets = self.n_targets if self.n_targets is not None else 1 y_mean = np.zeros(shape=(X.shape[0], n_targets)).squeeze() if return_cov: y_cov = kernel(X) if n_targets > 1: y_cov = np.repeat( np.expand_dims(y_cov, -1), repeats=n_targets, axis=-1 ) return y_mean, y_cov elif return_std: y_var = kernel.diag(X) if n_targets > 1: y_var = np.repeat( np.expand_dims(y_var, -1), repeats=n_targets, axis=-1 ) return y_mean, np.sqrt(y_var) else: return y_mean else: # Predict based on GP posterior # Alg 2.1, page 19, line 4 -> f*_bar = K(X_test, X_train) . alpha K_trans = self.kernel_(X, self.X_train_) y_mean = K_trans @ self.alpha_ # undo normalisation y_mean = self._y_train_std * y_mean + self._y_train_mean # if y_mean has shape (n_samples, 1), reshape to (n_samples,) if y_mean.ndim > 1 and y_mean.shape[1] == 1: y_mean = np.squeeze(y_mean, axis=1) if not return_cov and not return_std: return y_mean # Alg 2.1, page 19, line 5 -> v = L \ K(X_test, X_train)^T V = solve_triangular( self.L_, K_trans.T, lower=GPR_CHOLESKY_LOWER, check_finite=False ) if return_cov: # Alg 2.1, page 19, line 6 -> K(X_test, X_test) - v^T. v y_cov = self.kernel_(X) - V.T @ V # undo normalisation y_cov = np.outer(y_cov, self._y_train_std**2).reshape(*y_cov.shape, -1) # if y_cov has shape (n_samples, n_samples, 1), reshape to # (n_samples, n_samples) if y_cov.shape[2] == 1: y_cov = np.squeeze(y_cov, axis=2) return y_mean, y_cov else: # return_std # Compute variance of predictive distribution # Use einsum to avoid explicitly forming the large matrix # V^T @ V just to extract its diagonal afterward. y_var = self.kernel_.diag(X).copy() y_var -= np.einsum("ij,ji->i", V.T, V) # Check if any of the variances is negative because of # numerical issues. If yes: set the variance to 0. y_var_negative = y_var < 0 if np.any(y_var_negative): warnings.warn( "Predicted variances smaller than 0. " "Setting those variances to 0." ) y_var[y_var_negative] = 0.0 # undo normalisation y_var = np.outer(y_var, self._y_train_std**2).reshape(*y_var.shape, -1) # if y_var has shape (n_samples, 1), reshape to (n_samples,) if y_var.shape[1] == 1: y_var = np.squeeze(y_var, axis=1) return y_mean, np.sqrt(y_var) def sample_y(self, X, n_samples=1, random_state=0): """Draw samples from Gaussian process and evaluate at X. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Query points where the GP is evaluated. n_samples : int, default=1 Number of samples drawn from the Gaussian process per query point. random_state : int, RandomState instance or None, default=0 Determines random number generation to randomly draw samples. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. Returns ------- y_samples : ndarray of shape (n_samples_X, n_samples), or \ (n_samples_X, n_targets, n_samples) Values of n_samples samples drawn from Gaussian process and evaluated at query points. """ rng = check_random_state(random_state) y_mean, y_cov = self.predict(X, return_cov=True) if y_mean.ndim == 1: y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T else: y_samples = [ rng.multivariate_normal( y_mean[:, target], y_cov[..., target], n_samples ).T[:, np.newaxis] for target in range(y_mean.shape[1]) ] y_samples = np.hstack(y_samples) return y_samples def log_marginal_likelihood( self, theta=None, eval_gradient=False, clone_kernel=True ): """Return log-marginal likelihood of theta for training data. Parameters ---------- theta : array-like of shape (n_kernel_params,) default=None Kernel hyperparameters for which the log-marginal likelihood is evaluated. If None, the precomputed log_marginal_likelihood of ``self.kernel_.theta`` is returned. eval_gradient : bool, default=False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. If True, theta must not be None. clone_kernel : bool, default=True If True, the kernel attribute is copied. If False, the kernel attribute is modified, but may result in a performance improvement. Returns ------- log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : ndarray of shape (n_kernel_params,), optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when eval_gradient is True. """ if theta is None: if eval_gradient: raise ValueError("Gradient can only be evaluated for theta!=None") return self.log_marginal_likelihood_value_ if clone_kernel: kernel = self.kernel_.clone_with_theta(theta) else: kernel = self.kernel_ kernel.theta = theta if eval_gradient: K, K_gradient = kernel(self.X_train_, eval_gradient=True) else: K = kernel(self.X_train_) # Alg. 2.1, page 19, line 2 -> L = cholesky(K + sigma^2 I) K[np.diag_indices_from(K)] += self.alpha try: L = cholesky(K, lower=GPR_CHOLESKY_LOWER, check_finite=False) except np.linalg.LinAlgError: return (-np.inf, np.zeros_like(theta)) if eval_gradient else -np.inf # Support multi-dimensional output of self.y_train_ y_train = self.y_train_ if y_train.ndim == 1: y_train = y_train[:, np.newaxis] # Alg 2.1, page 19, line 3 -> alpha = L^T \ (L \ y) alpha = cho_solve((L, GPR_CHOLESKY_LOWER), y_train, check_finite=False) # Alg 2.1, page 19, line 7 # -0.5 . y^T . alpha - sum(log(diag(L))) - n_samples / 2 log(2*pi) # y is originally thought to be a (1, n_samples) row vector. However, # in multioutputs, y is of shape (n_samples, 2) and we need to compute # y^T . alpha for each output, independently using einsum. Thus, it # is equivalent to: # for output_idx in range(n_outputs): # log_likelihood_dims[output_idx] = ( # y_train[:, [output_idx]] @ alpha[:, [output_idx]] # ) log_likelihood_dims = -0.5 * np.einsum("ik,ik->k", y_train, alpha) log_likelihood_dims -= np.log(np.diag(L)).sum() log_likelihood_dims -= K.shape[0] / 2 * np.log(2 * np.pi) # the log likelihood is sum-up across the outputs log_likelihood = log_likelihood_dims.sum(axis=-1) if eval_gradient: # Eq. 5.9, p. 114, and footnote 5 in p. 114 # 0.5 * trace((alpha . alpha^T - K^-1) . K_gradient) # alpha is supposed to be a vector of (n_samples,) elements. With # multioutputs, alpha is a matrix of size (n_samples, n_outputs). # Therefore, we want to construct a matrix of # (n_samples, n_samples, n_outputs) equivalent to # for output_idx in range(n_outputs): # output_alpha = alpha[:, [output_idx]] # inner_term[..., output_idx] = output_alpha @ output_alpha.T inner_term = np.einsum("ik,jk->ijk", alpha, alpha) # compute K^-1 of shape (n_samples, n_samples) K_inv = cho_solve( (L, GPR_CHOLESKY_LOWER), np.eye(K.shape[0]), check_finite=False ) # create a new axis to use broadcasting between inner_term and # K_inv inner_term -= K_inv[..., np.newaxis] # Since we are interested about the trace of # inner_term @ K_gradient, we don't explicitly compute the # matrix-by-matrix operation and instead use an einsum. Therefore # it is equivalent to: # for param_idx in range(n_kernel_params): # for output_idx in range(n_output): # log_likehood_gradient_dims[param_idx, output_idx] = ( # inner_term[..., output_idx] @ # K_gradient[..., param_idx] # ) log_likelihood_gradient_dims = 0.5 * np.einsum( "ijl,jik->kl", inner_term, K_gradient ) # the log likelihood gradient is the sum-up across the outputs log_likelihood_gradient = log_likelihood_gradient_dims.sum(axis=-1) if eval_gradient: return log_likelihood, log_likelihood_gradient else: return log_likelihood def _constrained_optimization(self, obj_func, initial_theta, bounds): if self.optimizer == "fmin_l_bfgs_b": opt_res = scipy.optimize.minimize( obj_func, initial_theta, method="L-BFGS-B", jac=True, bounds=bounds, ) _check_optimize_result("lbfgs", opt_res) theta_opt, func_min = opt_res.x, opt_res.fun elif callable(self.optimizer): theta_opt, func_min = self.optimizer(obj_func, initial_theta, bounds=bounds) else: raise ValueError(f"Unknown optimizer {self.optimizer}.") return theta_opt, func_min def __sklearn_tags__(self): tags = super().__sklearn_tags__() tags.requires_fit = False return tags
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/gaussian_process/__init__.py
sklearn/gaussian_process/__init__.py
"""Gaussian process based regression and classification.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from sklearn.gaussian_process import kernels from sklearn.gaussian_process._gpc import GaussianProcessClassifier from sklearn.gaussian_process._gpr import GaussianProcessRegressor __all__ = ["GaussianProcessClassifier", "GaussianProcessRegressor", "kernels"]
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/gaussian_process/_gpc.py
sklearn/gaussian_process/_gpc.py
"""Gaussian processes classification.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from numbers import Integral from operator import itemgetter import numpy as np import scipy.optimize from scipy.linalg import cho_solve, cholesky, solve from scipy.special import erf, expit from sklearn.base import BaseEstimator, ClassifierMixin, _fit_context, clone from sklearn.gaussian_process.kernels import RBF, CompoundKernel, Kernel from sklearn.gaussian_process.kernels import ConstantKernel as C from sklearn.multiclass import OneVsOneClassifier, OneVsRestClassifier from sklearn.preprocessing import LabelEncoder from sklearn.utils import check_random_state from sklearn.utils._param_validation import Interval, StrOptions from sklearn.utils.optimize import _check_optimize_result from sklearn.utils.validation import check_is_fitted, validate_data # Values required for approximating the logistic sigmoid by # error functions. coefs are obtained via: # x = np.array([0, 0.6, 2, 3.5, 4.5, np.inf]) # b = logistic(x) # A = (erf(np.dot(x, self.lambdas)) + 1) / 2 # coefs = lstsq(A, b)[0] LAMBDAS = np.array([0.41, 0.4, 0.37, 0.44, 0.39])[:, np.newaxis] COEFS = np.array( [-1854.8214151, 3516.89893646, 221.29346712, 128.12323805, -2010.49422654] )[:, np.newaxis] class _BinaryGaussianProcessClassifierLaplace(BaseEstimator): """Binary Gaussian process classification based on Laplace approximation. The implementation is based on Algorithm 3.1, 3.2, and 5.1 from [RW2006]_. Internally, the Laplace approximation is used for approximating the non-Gaussian posterior by a Gaussian. Currently, the implementation is restricted to using the logistic link function. .. versionadded:: 0.18 Parameters ---------- kernel : kernel instance, default=None The kernel specifying the covariance function of the GP. If None is passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that the kernel's hyperparameters are optimized during fitting. optimizer : 'fmin_l_bfgs_b' or callable, default='fmin_l_bfgs_b' Can either be one of the internally supported optimizers for optimizing the kernel's parameters, specified by a string, or an externally defined optimizer passed as a callable. If a callable is passed, it must have the signature:: def optimizer(obj_func, initial_theta, bounds): # * 'obj_func' is the objective function to be maximized, which # takes the hyperparameters theta as parameter and an # optional flag eval_gradient, which determines if the # gradient is returned additionally to the function value # * 'initial_theta': the initial value for theta, which can be # used by local optimizers # * 'bounds': the bounds on the values of theta .... # Returned are the best found hyperparameters theta and # the corresponding value of the target function. return theta_opt, func_min Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize is used. If None is passed, the kernel's parameters are kept fixed. Available internal optimizers are:: 'fmin_l_bfgs_b' n_restarts_optimizer : int, default=0 The number of restarts of the optimizer for finding the kernel's parameters which maximize the log-marginal likelihood. The first run of the optimizer is performed from the kernel's initial parameters, the remaining ones (if any) from thetas sampled log-uniform randomly from the space of allowed theta-values. If greater than 0, all bounds must be finite. Note that n_restarts_optimizer=0 implies that one run is performed. max_iter_predict : int, default=100 The maximum number of iterations in Newton's method for approximating the posterior during predict. Smaller values will reduce computation time at the cost of worse results. warm_start : bool, default=False If warm-starts are enabled, the solution of the last Newton iteration on the Laplace approximation of the posterior mode is used as initialization for the next call of _posterior_mode(). This can speed up convergence when _posterior_mode is called several times on similar problems as in hyperparameter optimization. See :term:`the Glossary <warm_start>`. copy_X_train : bool, default=True If True, a persistent copy of the training data is stored in the object. Otherwise, just a reference to the training data is stored, which might cause predictions to change if the data is modified externally. random_state : int, RandomState instance or None, default=None Determines random number generation used to initialize the centers. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. Attributes ---------- X_train_ : array-like of shape (n_samples, n_features) or list of object Feature vectors or other representations of training data (also required for prediction). y_train_ : array-like of shape (n_samples,) Target values in training data (also required for prediction) classes_ : array-like of shape (n_classes,) Unique class labels. kernel_ : kernl instance The kernel used for prediction. The structure of the kernel is the same as the one passed as parameter but with optimized hyperparameters L_ : array-like of shape (n_samples, n_samples) Lower-triangular Cholesky decomposition of the kernel in X_train_ pi_ : array-like of shape (n_samples,) The probabilities of the positive class for the training points X_train_ W_sr_ : array-like of shape (n_samples,) Square root of W, the Hessian of log-likelihood of the latent function values for the observed labels. Since W is diagonal, only the diagonal of sqrt(W) is stored. log_marginal_likelihood_value_ : float The log-marginal-likelihood of ``self.kernel_.theta`` References ---------- .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams, "Gaussian Processes for Machine Learning", MIT Press 2006 <https://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_ """ def __init__( self, kernel=None, *, optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0, max_iter_predict=100, warm_start=False, copy_X_train=True, random_state=None, ): self.kernel = kernel self.optimizer = optimizer self.n_restarts_optimizer = n_restarts_optimizer self.max_iter_predict = max_iter_predict self.warm_start = warm_start self.copy_X_train = copy_X_train self.random_state = random_state def fit(self, X, y): """Fit Gaussian process classification model. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Feature vectors or other representations of training data. y : array-like of shape (n_samples,) Target values, must be binary. Returns ------- self : returns an instance of self. """ if self.kernel is None: # Use an RBF kernel as default self.kernel_ = C(1.0, constant_value_bounds="fixed") * RBF( 1.0, length_scale_bounds="fixed" ) else: self.kernel_ = clone(self.kernel) self.rng = check_random_state(self.random_state) self.X_train_ = np.copy(X) if self.copy_X_train else X # Encode class labels and check that it is a binary classification # problem label_encoder = LabelEncoder() self.y_train_ = label_encoder.fit_transform(y) self.classes_ = label_encoder.classes_ if self.classes_.size > 2: raise ValueError( "%s supports only binary classification. y contains classes %s" % (self.__class__.__name__, self.classes_) ) elif self.classes_.size == 1: raise ValueError( "{0:s} requires 2 classes; got {1:d} class".format( self.__class__.__name__, self.classes_.size ) ) if self.optimizer is not None and self.kernel_.n_dims > 0: # Choose hyperparameters based on maximizing the log-marginal # likelihood (potentially starting from several initial values) def obj_func(theta, eval_gradient=True): if eval_gradient: lml, grad = self.log_marginal_likelihood( theta, eval_gradient=True, clone_kernel=False ) return -lml, -grad else: return -self.log_marginal_likelihood(theta, clone_kernel=False) # First optimize starting from theta specified in kernel optima = [ self._constrained_optimization( obj_func, self.kernel_.theta, self.kernel_.bounds ) ] # Additional runs are performed from log-uniform chosen initial # theta if self.n_restarts_optimizer > 0: if not np.isfinite(self.kernel_.bounds).all(): raise ValueError( "Multiple optimizer restarts (n_restarts_optimizer>0) " "requires that all bounds are finite." ) bounds = self.kernel_.bounds for iteration in range(self.n_restarts_optimizer): theta_initial = np.exp(self.rng.uniform(bounds[:, 0], bounds[:, 1])) optima.append( self._constrained_optimization(obj_func, theta_initial, bounds) ) # Select result from run with minimal (negative) log-marginal # likelihood lml_values = list(map(itemgetter(1), optima)) self.kernel_.theta = optima[np.argmin(lml_values)][0] self.kernel_._check_bounds_params() self.log_marginal_likelihood_value_ = -np.min(lml_values) else: self.log_marginal_likelihood_value_ = self.log_marginal_likelihood( self.kernel_.theta ) # Precompute quantities required for predictions which are independent # of actual query points K = self.kernel_(self.X_train_) _, (self.pi_, self.W_sr_, self.L_, _, _) = self._posterior_mode( K, return_temporaries=True ) return self def predict(self, X): """Perform classification on an array of test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Query points where the GP is evaluated for classification. Returns ------- C : ndarray of shape (n_samples,) Predicted target values for X, values are from ``classes_`` """ check_is_fitted(self) # As discussed on Section 3.4.2 of GPML, for making hard binary # decisions, it is enough to compute the MAP of the posterior and # pass it through the link function K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star) f_star = K_star.T.dot(self.y_train_ - self.pi_) # Algorithm 3.2,Line 4 return np.where(f_star > 0, self.classes_[1], self.classes_[0]) def predict_proba(self, X): """Return probability estimates for the test vector X. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Query points where the GP is evaluated for classification. Returns ------- C : array-like of shape (n_samples, n_classes) Returns the probability of the samples for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute ``classes_``. """ check_is_fitted(self) # Compute the mean and variance of the latent function # (Lines 4-6 of Algorithm 3.2 of GPML) latent_mean, latent_var = self.latent_mean_and_variance(X) # Line 7: # Approximate \int log(z) * N(z | f_star, var_f_star) # Approximation is due to Williams & Barber, "Bayesian Classification # with Gaussian Processes", Appendix A: Approximate the logistic # sigmoid by a linear combination of 5 error functions. # For information on how this integral can be computed see # blitiri.blogspot.de/2012/11/gaussian-integral-of-error-function.html alpha = 1 / (2 * latent_var) gamma = LAMBDAS * latent_mean integrals = ( np.sqrt(np.pi / alpha) * erf(gamma * np.sqrt(alpha / (alpha + LAMBDAS**2))) / (2 * np.sqrt(latent_var * 2 * np.pi)) ) pi_star = (COEFS * integrals).sum(axis=0) + 0.5 * COEFS.sum() return np.vstack((1 - pi_star, pi_star)).T def log_marginal_likelihood( self, theta=None, eval_gradient=False, clone_kernel=True ): """Returns log-marginal likelihood of theta for training data. Parameters ---------- theta : array-like of shape (n_kernel_params,), default=None Kernel hyperparameters for which the log-marginal likelihood is evaluated. If None, the precomputed log_marginal_likelihood of ``self.kernel_.theta`` is returned. eval_gradient : bool, default=False If True, the gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta is returned additionally. If True, theta must not be None. clone_kernel : bool, default=True If True, the kernel attribute is copied. If False, the kernel attribute is modified, but may result in a performance improvement. Returns ------- log_likelihood : float Log-marginal likelihood of theta for training data. log_likelihood_gradient : ndarray of shape (n_kernel_params,), \ optional Gradient of the log-marginal likelihood with respect to the kernel hyperparameters at position theta. Only returned when `eval_gradient` is True. """ if theta is None: if eval_gradient: raise ValueError("Gradient can only be evaluated for theta!=None") return self.log_marginal_likelihood_value_ if clone_kernel: kernel = self.kernel_.clone_with_theta(theta) else: kernel = self.kernel_ kernel.theta = theta if eval_gradient: K, K_gradient = kernel(self.X_train_, eval_gradient=True) else: K = kernel(self.X_train_) # Compute log-marginal-likelihood Z and also store some temporaries # which can be reused for computing Z's gradient Z, (pi, W_sr, L, b, a) = self._posterior_mode(K, return_temporaries=True) if not eval_gradient: return Z # Compute gradient based on Algorithm 5.1 of GPML d_Z = np.empty(theta.shape[0]) # XXX: Get rid of the np.diag() in the next line R = W_sr[:, np.newaxis] * cho_solve((L, True), np.diag(W_sr)) # Line 7 C = solve(L, W_sr[:, np.newaxis] * K) # Line 8 # Line 9: (use einsum to compute np.diag(C.T.dot(C)))) s_2 = ( -0.5 * (np.diag(K) - np.einsum("ij, ij -> j", C, C)) * (pi * (1 - pi) * (1 - 2 * pi)) ) # third derivative for j in range(d_Z.shape[0]): C = K_gradient[:, :, j] # Line 11 # Line 12: (R.T.ravel().dot(C.ravel()) = np.trace(R.dot(C))) s_1 = 0.5 * a.T.dot(C).dot(a) - 0.5 * R.T.ravel().dot(C.ravel()) b = C.dot(self.y_train_ - pi) # Line 13 s_3 = b - K.dot(R.dot(b)) # Line 14 d_Z[j] = s_1 + s_2.T.dot(s_3) # Line 15 return Z, d_Z def latent_mean_and_variance(self, X): """Compute the mean and variance of the latent function values. Based on algorithm 3.2 of [RW2006]_, this function returns the latent mean (Line 4) and variance (Line 6) of the Gaussian process classification model. Note that this function is only supported for binary classification. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Query points where the GP is evaluated for classification. Returns ------- latent_mean : array-like of shape (n_samples,) Mean of the latent function values at the query points. latent_var : array-like of shape (n_samples,) Variance of the latent function values at the query points. """ check_is_fitted(self) # Based on Algorithm 3.2 of GPML K_star = self.kernel_(self.X_train_, X) # K_star =k(x_star) latent_mean = K_star.T.dot(self.y_train_ - self.pi_) # Line 4 v = solve(self.L_, self.W_sr_[:, np.newaxis] * K_star) # Line 5 # Line 6 (compute np.diag(v.T.dot(v)) via einsum) latent_var = self.kernel_.diag(X) - np.einsum("ij,ij->j", v, v) return latent_mean, latent_var def _posterior_mode(self, K, return_temporaries=False): """Mode-finding for binary Laplace GPC and fixed kernel. This approximates the posterior of the latent function values for given inputs and target observations with a Gaussian approximation and uses Newton's iteration to find the mode of this approximation. """ # Based on Algorithm 3.1 of GPML # If warm_start are enabled, we reuse the last solution for the # posterior mode as initialization; otherwise, we initialize with 0 if ( self.warm_start and hasattr(self, "f_cached") and self.f_cached.shape == self.y_train_.shape ): f = self.f_cached else: f = np.zeros_like(self.y_train_, dtype=np.float64) # Use Newton's iteration method to find mode of Laplace approximation log_marginal_likelihood = -np.inf for _ in range(self.max_iter_predict): # Line 4 pi = expit(f) W = pi * (1 - pi) # Line 5 W_sr = np.sqrt(W) W_sr_K = W_sr[:, np.newaxis] * K B = np.eye(W.shape[0]) + W_sr_K * W_sr L = cholesky(B, lower=True) # Line 6 b = W * f + (self.y_train_ - pi) # Line 7 a = b - W_sr * cho_solve((L, True), W_sr_K.dot(b)) # Line 8 f = K.dot(a) # Line 10: Compute log marginal likelihood in loop and use as # convergence criterion lml = ( -0.5 * a.T.dot(f) - np.log1p(np.exp(-(self.y_train_ * 2 - 1) * f)).sum() - np.log(np.diag(L)).sum() ) # Check if we have converged (log marginal likelihood does # not decrease) # XXX: more complex convergence criterion if lml - log_marginal_likelihood < 1e-10: break log_marginal_likelihood = lml self.f_cached = f # Remember solution for later warm-starts if return_temporaries: return log_marginal_likelihood, (pi, W_sr, L, b, a) else: return log_marginal_likelihood def _constrained_optimization(self, obj_func, initial_theta, bounds): if self.optimizer == "fmin_l_bfgs_b": opt_res = scipy.optimize.minimize( obj_func, initial_theta, method="L-BFGS-B", jac=True, bounds=bounds ) _check_optimize_result("lbfgs", opt_res) theta_opt, func_min = opt_res.x, opt_res.fun elif callable(self.optimizer): theta_opt, func_min = self.optimizer(obj_func, initial_theta, bounds=bounds) else: raise ValueError("Unknown optimizer %s." % self.optimizer) return theta_opt, func_min class GaussianProcessClassifier(ClassifierMixin, BaseEstimator): """Gaussian process classification (GPC) based on Laplace approximation. The implementation is based on Algorithm 3.1, 3.2, and 5.1 from [RW2006]_. Internally, the Laplace approximation is used for approximating the non-Gaussian posterior by a Gaussian. Currently, the implementation is restricted to using the logistic link function. For multi-class classification, several binary one-versus rest classifiers are fitted. Note that this class thus does not implement a true multi-class Laplace approximation. Read more in the :ref:`User Guide <gaussian_process>`. .. versionadded:: 0.18 Parameters ---------- kernel : kernel instance, default=None The kernel specifying the covariance function of the GP. If None is passed, the kernel "1.0 * RBF(1.0)" is used as default. Note that the kernel's hyperparameters are optimized during fitting. Also kernel cannot be a `CompoundKernel`. optimizer : 'fmin_l_bfgs_b', callable or None, default='fmin_l_bfgs_b' Can either be one of the internally supported optimizers for optimizing the kernel's parameters, specified by a string, or an externally defined optimizer passed as a callable. If a callable is passed, it must have the signature:: def optimizer(obj_func, initial_theta, bounds): # * 'obj_func' is the objective function to be maximized, which # takes the hyperparameters theta as parameter and an # optional flag eval_gradient, which determines if the # gradient is returned additionally to the function value # * 'initial_theta': the initial value for theta, which can be # used by local optimizers # * 'bounds': the bounds on the values of theta .... # Returned are the best found hyperparameters theta and # the corresponding value of the target function. return theta_opt, func_min Per default, the 'L-BFGS-B' algorithm from scipy.optimize.minimize is used. If None is passed, the kernel's parameters are kept fixed. Available internal optimizers are:: 'fmin_l_bfgs_b' n_restarts_optimizer : int, default=0 The number of restarts of the optimizer for finding the kernel's parameters which maximize the log-marginal likelihood. The first run of the optimizer is performed from the kernel's initial parameters, the remaining ones (if any) from thetas sampled log-uniform randomly from the space of allowed theta-values. If greater than 0, all bounds must be finite. Note that n_restarts_optimizer=0 implies that one run is performed. max_iter_predict : int, default=100 The maximum number of iterations in Newton's method for approximating the posterior during predict. Smaller values will reduce computation time at the cost of worse results. warm_start : bool, default=False If warm-starts are enabled, the solution of the last Newton iteration on the Laplace approximation of the posterior mode is used as initialization for the next call of _posterior_mode(). This can speed up convergence when _posterior_mode is called several times on similar problems as in hyperparameter optimization. See :term:`the Glossary <warm_start>`. copy_X_train : bool, default=True If True, a persistent copy of the training data is stored in the object. Otherwise, just a reference to the training data is stored, which might cause predictions to change if the data is modified externally. random_state : int, RandomState instance or None, default=None Determines random number generation used to initialize the centers. Pass an int for reproducible results across multiple function calls. See :term:`Glossary <random_state>`. multi_class : {'one_vs_rest', 'one_vs_one'}, default='one_vs_rest' Specifies how multi-class classification problems are handled. Supported are 'one_vs_rest' and 'one_vs_one'. In 'one_vs_rest', one binary Gaussian process classifier is fitted for each class, which is trained to separate this class from the rest. In 'one_vs_one', one binary Gaussian process classifier is fitted for each pair of classes, which is trained to separate these two classes. The predictions of these binary predictors are combined into multi-class predictions. Note that 'one_vs_one' does not support predicting probability estimates. n_jobs : int, default=None The number of jobs to use for the computation: the specified multiclass problems are computed in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. Attributes ---------- base_estimator_ : ``Estimator`` instance The estimator instance that defines the likelihood function using the observed data. kernel_ : kernel instance The kernel used for prediction. In case of binary classification, the structure of the kernel is the same as the one passed as parameter but with optimized hyperparameters. In case of multi-class classification, a CompoundKernel is returned which consists of the different kernels used in the one-versus-rest classifiers. log_marginal_likelihood_value_ : float The log-marginal-likelihood of ``self.kernel_.theta`` classes_ : array-like of shape (n_classes,) Unique class labels. n_classes_ : int The number of classes in the training data n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- GaussianProcessRegressor : Gaussian process regression (GPR). References ---------- .. [RW2006] `Carl E. Rasmussen and Christopher K.I. Williams, "Gaussian Processes for Machine Learning", MIT Press 2006 <https://www.gaussianprocess.org/gpml/chapters/RW.pdf>`_ Examples -------- >>> from sklearn.datasets import load_iris >>> from sklearn.gaussian_process import GaussianProcessClassifier >>> from sklearn.gaussian_process.kernels import RBF >>> X, y = load_iris(return_X_y=True) >>> kernel = 1.0 * RBF(1.0) >>> gpc = GaussianProcessClassifier(kernel=kernel, ... random_state=0).fit(X, y) >>> gpc.score(X, y) 0.9866... >>> gpc.predict_proba(X[:2,:]) array([[0.83548752, 0.03228706, 0.13222543], [0.79064206, 0.06525643, 0.14410151]]) For a comparison of the GaussianProcessClassifier with other classifiers see: :ref:`sphx_glr_auto_examples_classification_plot_classification_probability.py`. """ _parameter_constraints: dict = { "kernel": [Kernel, None], "optimizer": [StrOptions({"fmin_l_bfgs_b"}), callable, None], "n_restarts_optimizer": [Interval(Integral, 0, None, closed="left")], "max_iter_predict": [Interval(Integral, 1, None, closed="left")], "warm_start": ["boolean"], "copy_X_train": ["boolean"], "random_state": ["random_state"], "multi_class": [StrOptions({"one_vs_rest", "one_vs_one"})], "n_jobs": [Integral, None], } def __init__( self, kernel=None, *, optimizer="fmin_l_bfgs_b", n_restarts_optimizer=0, max_iter_predict=100, warm_start=False, copy_X_train=True, random_state=None, multi_class="one_vs_rest", n_jobs=None, ): self.kernel = kernel self.optimizer = optimizer self.n_restarts_optimizer = n_restarts_optimizer self.max_iter_predict = max_iter_predict self.warm_start = warm_start self.copy_X_train = copy_X_train self.random_state = random_state self.multi_class = multi_class self.n_jobs = n_jobs @_fit_context(prefer_skip_nested_validation=True) def fit(self, X, y): """Fit Gaussian process classification model. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Feature vectors or other representations of training data. y : array-like of shape (n_samples,) Target values, must be binary. Returns ------- self : object Returns an instance of self. """ if isinstance(self.kernel, CompoundKernel): raise ValueError("kernel cannot be a CompoundKernel") if self.kernel is None or self.kernel.requires_vector_input: X, y = validate_data( self, X, y, multi_output=False, ensure_2d=True, dtype="numeric" ) else: X, y = validate_data( self, X, y, multi_output=False, ensure_2d=False, dtype=None ) self.base_estimator_ = _BinaryGaussianProcessClassifierLaplace( kernel=self.kernel, optimizer=self.optimizer, n_restarts_optimizer=self.n_restarts_optimizer, max_iter_predict=self.max_iter_predict, warm_start=self.warm_start, copy_X_train=self.copy_X_train, random_state=self.random_state, ) self.classes_ = np.unique(y) self.n_classes_ = self.classes_.size if self.n_classes_ == 1: raise ValueError( "GaussianProcessClassifier requires 2 or more " "distinct classes; got %d class (only class %s " "is present)" % (self.n_classes_, self.classes_[0]) ) if self.n_classes_ > 2: if self.multi_class == "one_vs_rest": self.base_estimator_ = OneVsRestClassifier( self.base_estimator_, n_jobs=self.n_jobs ) elif self.multi_class == "one_vs_one": self.base_estimator_ = OneVsOneClassifier( self.base_estimator_, n_jobs=self.n_jobs ) else: raise ValueError("Unknown multi-class mode %s" % self.multi_class) self.base_estimator_.fit(X, y) if self.n_classes_ > 2: self.log_marginal_likelihood_value_ = np.mean( [ estimator.log_marginal_likelihood() for estimator in self.base_estimator_.estimators_ ] ) else: self.log_marginal_likelihood_value_ = ( self.base_estimator_.log_marginal_likelihood() ) return self def predict(self, X): """Perform classification on an array of test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) or list of object Query points where the GP is evaluated for classification. Returns ------- C : ndarray of shape (n_samples,) Predicted target values for X, values are from ``classes_``. """ check_is_fitted(self) if self.kernel is None or self.kernel.requires_vector_input: X = validate_data(self, X, ensure_2d=True, dtype="numeric", reset=False) else: X = validate_data(self, X, ensure_2d=False, dtype=None, reset=False) return self.base_estimator_.predict(X) def predict_proba(self, X): """Return probability estimates for the test vector X.
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/gaussian_process/kernels.py
sklearn/gaussian_process/kernels.py
"""A set of kernels that can be combined by operators and used in Gaussian processes.""" # Kernels for Gaussian process regression and classification. # # The kernels in this module allow kernel-engineering, i.e., they can be # combined via the "+" and "*" operators or be exponentiated with a scalar # via "**". These sum and product expressions can also contain scalar values, # which are automatically converted to a constant kernel. # # All kernels allow (analytic) gradient-based hyperparameter optimization. # The space of hyperparameters can be specified by giving lower und upper # boundaries for the value of each hyperparameter (the search space is thus # rectangular). Instead of specifying bounds, hyperparameters can also be # declared to be "fixed", which causes these hyperparameters to be excluded from # optimization. # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # Note: this module is strongly inspired by the kernel module of the george # package. import math import warnings from abc import ABCMeta, abstractmethod from collections import namedtuple from inspect import signature import numpy as np from scipy.spatial.distance import cdist, pdist, squareform from scipy.special import gamma, kv from sklearn.base import clone from sklearn.exceptions import ConvergenceWarning from sklearn.metrics.pairwise import pairwise_kernels from sklearn.utils.validation import _num_samples def _check_length_scale(X, length_scale): length_scale = np.squeeze(length_scale).astype(float) if np.ndim(length_scale) > 1: raise ValueError("length_scale cannot be of dimension greater than 1") if np.ndim(length_scale) == 1 and X.shape[1] != length_scale.shape[0]: raise ValueError( "Anisotropic kernel must have the same number of " "dimensions as data (%d!=%d)" % (length_scale.shape[0], X.shape[1]) ) return length_scale class Hyperparameter( namedtuple( "Hyperparameter", ("name", "value_type", "bounds", "n_elements", "fixed") ) ): """A kernel hyperparameter's specification in form of a namedtuple. .. versionadded:: 0.18 Attributes ---------- name : str The name of the hyperparameter. Note that a kernel using a hyperparameter with name "x" must have the attributes self.x and self.x_bounds value_type : str The type of the hyperparameter. Currently, only "numeric" hyperparameters are supported. bounds : pair of floats >= 0 or "fixed" The lower and upper bound on the parameter. If n_elements>1, a pair of 1d array with n_elements each may be given alternatively. If the string "fixed" is passed as bounds, the hyperparameter's value cannot be changed. n_elements : int, default=1 The number of elements of the hyperparameter value. Defaults to 1, which corresponds to a scalar hyperparameter. n_elements > 1 corresponds to a hyperparameter which is vector-valued, such as, e.g., anisotropic length-scales. fixed : bool, default=None Whether the value of this hyperparameter is fixed, i.e., cannot be changed during hyperparameter tuning. If None is passed, the "fixed" is derived based on the given bounds. Examples -------- >>> from sklearn.gaussian_process.kernels import ConstantKernel >>> from sklearn.datasets import make_friedman2 >>> from sklearn.gaussian_process import GaussianProcessRegressor >>> from sklearn.gaussian_process.kernels import Hyperparameter >>> X, y = make_friedman2(n_samples=50, noise=0, random_state=0) >>> kernel = ConstantKernel(constant_value=1.0, ... constant_value_bounds=(0.0, 10.0)) We can access each hyperparameter: >>> for hyperparameter in kernel.hyperparameters: ... print(hyperparameter) Hyperparameter(name='constant_value', value_type='numeric', bounds=array([[ 0., 10.]]), n_elements=1, fixed=False) >>> params = kernel.get_params() >>> for key in sorted(params): print(f"{key} : {params[key]}") constant_value : 1.0 constant_value_bounds : (0.0, 10.0) """ # A raw namedtuple is very memory efficient as it packs the attributes # in a struct to get rid of the __dict__ of attributes in particular it # does not copy the string for the keys on each instance. # By deriving a namedtuple class just to introduce the __init__ method we # would also reintroduce the __dict__ on the instance. By telling the # Python interpreter that this subclass uses static __slots__ instead of # dynamic attributes. Furthermore we don't need any additional slot in the # subclass so we set __slots__ to the empty tuple. __slots__ = () def __new__(cls, name, value_type, bounds, n_elements=1, fixed=None): if not isinstance(bounds, str) or bounds != "fixed": bounds = np.atleast_2d(bounds) if n_elements > 1: # vector-valued parameter if bounds.shape[0] == 1: bounds = np.repeat(bounds, n_elements, 0) elif bounds.shape[0] != n_elements: raise ValueError( "Bounds on %s should have either 1 or " "%d dimensions. Given are %d" % (name, n_elements, bounds.shape[0]) ) if fixed is None: fixed = isinstance(bounds, str) and bounds == "fixed" return super().__new__(cls, name, value_type, bounds, n_elements, fixed) # This is mainly a testing utility to check that two hyperparameters # are equal. def __eq__(self, other): return ( self.name == other.name and self.value_type == other.value_type and np.all(self.bounds == other.bounds) and self.n_elements == other.n_elements and self.fixed == other.fixed ) class Kernel(metaclass=ABCMeta): """Base class for all kernels. .. versionadded:: 0.18 Examples -------- >>> from sklearn.gaussian_process.kernels import Kernel, RBF >>> import numpy as np >>> class CustomKernel(Kernel): ... def __init__(self, length_scale=1.0): ... self.length_scale = length_scale ... def __call__(self, X, Y=None): ... if Y is None: ... Y = X ... return np.inner(X, X if Y is None else Y) ** 2 ... def diag(self, X): ... return np.ones(X.shape[0]) ... def is_stationary(self): ... return True >>> kernel = CustomKernel(length_scale=2.0) >>> X = np.array([[1, 2], [3, 4]]) >>> print(kernel(X)) [[ 25 121] [121 625]] """ def get_params(self, deep=True): """Get parameters of this kernel. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : dict Parameter names mapped to their values. """ params = dict() # introspect the constructor arguments to find the model parameters # to represent cls = self.__class__ init = getattr(cls.__init__, "deprecated_original", cls.__init__) init_sign = signature(init) args, varargs = [], [] for parameter in init_sign.parameters.values(): if parameter.kind != parameter.VAR_KEYWORD and parameter.name != "self": args.append(parameter.name) if parameter.kind == parameter.VAR_POSITIONAL: varargs.append(parameter.name) if len(varargs) != 0: raise RuntimeError( "scikit-learn kernels should always " "specify their parameters in the signature" " of their __init__ (no varargs)." " %s doesn't follow this convention." % (cls,) ) for arg in args: params[arg] = getattr(self, arg) return params def set_params(self, **params): """Set the parameters of this kernel. The method works on simple kernels as well as on nested kernels. The latter have parameters of the form ``<component>__<parameter>`` so that it's possible to update each component of a nested object. Returns ------- self """ if not params: # Simple optimisation to gain speed (inspect is slow) return self valid_params = self.get_params(deep=True) for key, value in params.items(): split = key.split("__", 1) if len(split) > 1: # nested objects case name, sub_name = split if name not in valid_params: raise ValueError( "Invalid parameter %s for kernel %s. " "Check the list of available parameters " "with `kernel.get_params().keys()`." % (name, self) ) sub_object = valid_params[name] sub_object.set_params(**{sub_name: value}) else: # simple objects case if key not in valid_params: raise ValueError( "Invalid parameter %s for kernel %s. " "Check the list of available parameters " "with `kernel.get_params().keys()`." % (key, self.__class__.__name__) ) setattr(self, key, value) return self def clone_with_theta(self, theta): """Returns a clone of self with given hyperparameters theta. Parameters ---------- theta : ndarray of shape (n_dims,) The hyperparameters """ cloned = clone(self) cloned.theta = theta return cloned @property def n_dims(self): """Returns the number of non-fixed hyperparameters of the kernel.""" return self.theta.shape[0] @property def hyperparameters(self): """Returns a list of all hyperparameter specifications.""" r = [ getattr(self, attr) for attr in dir(self) if attr.startswith("hyperparameter_") ] return r @property def theta(self): """Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel's hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns ------- theta : ndarray of shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ theta = [] params = self.get_params() for hyperparameter in self.hyperparameters: if not hyperparameter.fixed: theta.append(params[hyperparameter.name]) if len(theta) > 0: return np.log(np.hstack(theta)) else: return np.array([]) @theta.setter def theta(self, theta): """Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters ---------- theta : ndarray of shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ params = self.get_params() i = 0 for hyperparameter in self.hyperparameters: if hyperparameter.fixed: continue if hyperparameter.n_elements > 1: # vector-valued parameter params[hyperparameter.name] = np.exp( theta[i : i + hyperparameter.n_elements] ) i += hyperparameter.n_elements else: params[hyperparameter.name] = np.exp(theta[i]) i += 1 if i != len(theta): raise ValueError( "theta has not the correct number of entries." " Should be %d; given are %d" % (i, len(theta)) ) self.set_params(**params) @property def bounds(self): """Returns the log-transformed bounds on the theta. Returns ------- bounds : ndarray of shape (n_dims, 2) The log-transformed bounds on the kernel's hyperparameters theta """ bounds = [ hyperparameter.bounds for hyperparameter in self.hyperparameters if not hyperparameter.fixed ] if len(bounds) > 0: return np.log(np.vstack(bounds)) else: return np.array([]) def __add__(self, b): if not isinstance(b, Kernel): return Sum(self, ConstantKernel(b)) return Sum(self, b) def __radd__(self, b): if not isinstance(b, Kernel): return Sum(ConstantKernel(b), self) return Sum(b, self) def __mul__(self, b): if not isinstance(b, Kernel): return Product(self, ConstantKernel(b)) return Product(self, b) def __rmul__(self, b): if not isinstance(b, Kernel): return Product(ConstantKernel(b), self) return Product(b, self) def __pow__(self, b): return Exponentiation(self, b) def __eq__(self, b): if type(self) != type(b): return False params_a = self.get_params() params_b = b.get_params() for key in set(list(params_a.keys()) + list(params_b.keys())): if np.any(params_a.get(key, None) != params_b.get(key, None)): return False return True def __repr__(self): return "{0}({1})".format( self.__class__.__name__, ", ".join(map("{0:.3g}".format, self.theta)) ) @abstractmethod def __call__(self, X, Y=None, eval_gradient=False): """Evaluate the kernel.""" @abstractmethod def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array-like of shape (n_samples,) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : ndarray of shape (n_samples_X,) Diagonal of kernel k(X, X) """ @abstractmethod def is_stationary(self): """Returns whether the kernel is stationary.""" @property def requires_vector_input(self): """Returns whether the kernel is defined on fixed-length feature vectors or generic objects. Defaults to True for backward compatibility.""" return True def _check_bounds_params(self): """Called after fitting to warn if bounds may have been too tight.""" list_close = np.isclose(self.bounds, np.atleast_2d(self.theta).T) idx = 0 for hyp in self.hyperparameters: if hyp.fixed: continue for dim in range(hyp.n_elements): if list_close[idx, 0]: warnings.warn( "The optimal value found for " "dimension %s of parameter %s is " "close to the specified lower " "bound %s. Decreasing the bound and" " calling fit again may find a " "better value." % (dim, hyp.name, hyp.bounds[dim][0]), ConvergenceWarning, ) elif list_close[idx, 1]: warnings.warn( "The optimal value found for " "dimension %s of parameter %s is " "close to the specified upper " "bound %s. Increasing the bound and" " calling fit again may find a " "better value." % (dim, hyp.name, hyp.bounds[dim][1]), ConvergenceWarning, ) idx += 1 class NormalizedKernelMixin: """Mixin for kernels which are normalized: k(X, X)=1. .. versionadded:: 0.18 """ def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : ndarray of shape (n_samples_X,) Diagonal of kernel k(X, X) """ return np.ones(X.shape[0]) class StationaryKernelMixin: """Mixin for kernels which are stationary: k(X, Y)= f(X-Y). .. versionadded:: 0.18 """ def is_stationary(self): """Returns whether the kernel is stationary.""" return True class GenericKernelMixin: """Mixin for kernels which operate on generic objects such as variable- length sequences, trees, and graphs. .. versionadded:: 0.22 """ @property def requires_vector_input(self): """Whether the kernel works only on fixed-length feature vectors.""" return False class CompoundKernel(Kernel): """Kernel which is composed of a set of other kernels. .. versionadded:: 0.18 Parameters ---------- kernels : list of Kernels The other kernels Examples -------- >>> from sklearn.gaussian_process.kernels import WhiteKernel >>> from sklearn.gaussian_process.kernels import RBF >>> from sklearn.gaussian_process.kernels import CompoundKernel >>> kernel = CompoundKernel( ... [WhiteKernel(noise_level=3.0), RBF(length_scale=2.0)]) >>> print(kernel.bounds) [[-11.51292546 11.51292546] [-11.51292546 11.51292546]] >>> print(kernel.n_dims) 2 >>> print(kernel.theta) [1.09861229 0.69314718] """ def __init__(self, kernels): self.kernels = kernels def get_params(self, deep=True): """Get parameters of this kernel. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : dict Parameter names mapped to their values. """ return dict(kernels=self.kernels) @property def theta(self): """Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel's hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns ------- theta : ndarray of shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ return np.hstack([kernel.theta for kernel in self.kernels]) @theta.setter def theta(self, theta): """Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters ---------- theta : array of shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ k_dims = self.k1.n_dims for i, kernel in enumerate(self.kernels): kernel.theta = theta[i * k_dims : (i + 1) * k_dims] @property def bounds(self): """Returns the log-transformed bounds on the theta. Returns ------- bounds : array of shape (n_dims, 2) The log-transformed bounds on the kernel's hyperparameters theta """ return np.vstack([kernel.bounds for kernel in self.kernels]) def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Note that this compound kernel returns the results of all simple kernel stacked along an additional axis. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object, \ default=None Left argument of the returned kernel k(X, Y) Y : array-like of shape (n_samples_X, n_features) or list of object, \ default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) is evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y, n_kernels) Kernel k(X, Y) K_gradient : ndarray of shape \ (n_samples_X, n_samples_X, n_dims, n_kernels), optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when `eval_gradient` is True. """ if eval_gradient: K = [] K_grad = [] for kernel in self.kernels: K_single, K_grad_single = kernel(X, Y, eval_gradient) K.append(K_single) K_grad.append(K_grad_single[..., np.newaxis]) return np.dstack(K), np.concatenate(K_grad, 3) else: return np.dstack([kernel(X, Y, eval_gradient) for kernel in self.kernels]) def __eq__(self, b): if type(self) != type(b) or len(self.kernels) != len(b.kernels): return False return np.all( [self.kernels[i] == b.kernels[i] for i in range(len(self.kernels))] ) def is_stationary(self): """Returns whether the kernel is stationary.""" return np.all([kernel.is_stationary() for kernel in self.kernels]) @property def requires_vector_input(self): """Returns whether the kernel is defined on discrete structures.""" return np.any([kernel.requires_vector_input for kernel in self.kernels]) def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to `np.diag(self(X))`; however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Argument to the kernel. Returns ------- K_diag : ndarray of shape (n_samples_X, n_kernels) Diagonal of kernel k(X, X) """ return np.vstack([kernel.diag(X) for kernel in self.kernels]).T class KernelOperator(Kernel): """Base class for all kernel operators. .. versionadded:: 0.18 """ def __init__(self, k1, k2): self.k1 = k1 self.k2 = k2 def get_params(self, deep=True): """Get parameters of this kernel. Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : dict Parameter names mapped to their values. """ params = dict(k1=self.k1, k2=self.k2) if deep: deep_items = self.k1.get_params().items() params.update(("k1__" + k, val) for k, val in deep_items) deep_items = self.k2.get_params().items() params.update(("k2__" + k, val) for k, val in deep_items) return params @property def hyperparameters(self): """Returns a list of all hyperparameter.""" r = [ Hyperparameter( "k1__" + hyperparameter.name, hyperparameter.value_type, hyperparameter.bounds, hyperparameter.n_elements, ) for hyperparameter in self.k1.hyperparameters ] for hyperparameter in self.k2.hyperparameters: r.append( Hyperparameter( "k2__" + hyperparameter.name, hyperparameter.value_type, hyperparameter.bounds, hyperparameter.n_elements, ) ) return r @property def theta(self): """Returns the (flattened, log-transformed) non-fixed hyperparameters. Note that theta are typically the log-transformed values of the kernel's hyperparameters as this representation of the search space is more amenable for hyperparameter search, as hyperparameters like length-scales naturally live on a log-scale. Returns ------- theta : ndarray of shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ return np.append(self.k1.theta, self.k2.theta) @theta.setter def theta(self, theta): """Sets the (flattened, log-transformed) non-fixed hyperparameters. Parameters ---------- theta : ndarray of shape (n_dims,) The non-fixed, log-transformed hyperparameters of the kernel """ k1_dims = self.k1.n_dims self.k1.theta = theta[:k1_dims] self.k2.theta = theta[k1_dims:] @property def bounds(self): """Returns the log-transformed bounds on the theta. Returns ------- bounds : ndarray of shape (n_dims, 2) The log-transformed bounds on the kernel's hyperparameters theta """ if self.k1.bounds.size == 0: return self.k2.bounds if self.k2.bounds.size == 0: return self.k1.bounds return np.vstack((self.k1.bounds, self.k2.bounds)) def __eq__(self, b): if type(self) != type(b): return False return (self.k1 == b.k1 and self.k2 == b.k2) or ( self.k1 == b.k2 and self.k2 == b.k1 ) def is_stationary(self): """Returns whether the kernel is stationary.""" return self.k1.is_stationary() and self.k2.is_stationary() @property def requires_vector_input(self): """Returns whether the kernel is stationary.""" return self.k1.requires_vector_input or self.k2.requires_vector_input class Sum(KernelOperator): """The `Sum` kernel takes two kernels :math:`k_1` and :math:`k_2` and combines them via .. math:: k_{sum}(X, Y) = k_1(X, Y) + k_2(X, Y) Note that the `__add__` magic method is overridden, so `Sum(RBF(), RBF())` is equivalent to using the + operator with `RBF() + RBF()`. Read more in the :ref:`User Guide <gp_kernels>`. .. versionadded:: 0.18 Parameters ---------- k1 : Kernel The first base-kernel of the sum-kernel k2 : Kernel The second base-kernel of the sum-kernel Examples -------- >>> from sklearn.datasets import make_friedman2 >>> from sklearn.gaussian_process import GaussianProcessRegressor >>> from sklearn.gaussian_process.kernels import RBF, Sum, ConstantKernel >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) >>> kernel = Sum(ConstantKernel(2), RBF()) >>> gpr = GaussianProcessRegressor(kernel=kernel, ... random_state=0).fit(X, y) >>> gpr.score(X, y) 1.0 >>> kernel 1.41**2 + RBF(length_scale=1) """ def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Left argument of the returned kernel k(X, Y) Y : array-like of shape (n_samples_X, n_features) or list of object,\ default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) is evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when `eval_gradient` is True. """ if eval_gradient: K1, K1_gradient = self.k1(X, Y, eval_gradient=True) K2, K2_gradient = self.k2(X, Y, eval_gradient=True) return K1 + K2, np.dstack((K1_gradient, K2_gradient)) else: return self.k1(X, Y) + self.k2(X, Y) def diag(self, X): """Returns the diagonal of the kernel k(X, X). The result of this method is identical to `np.diag(self(X))`; however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Argument to the kernel. Returns ------- K_diag : ndarray of shape (n_samples_X,) Diagonal of kernel k(X, X) """ return self.k1.diag(X) + self.k2.diag(X) def __repr__(self): return "{0} + {1}".format(self.k1, self.k2) class Product(KernelOperator): """The `Product` kernel takes two kernels :math:`k_1` and :math:`k_2` and combines them via .. math:: k_{prod}(X, Y) = k_1(X, Y) * k_2(X, Y) Note that the `__mul__` magic method is overridden, so `Product(RBF(), RBF())` is equivalent to using the * operator with `RBF() * RBF()`. Read more in the :ref:`User Guide <gp_kernels>`. .. versionadded:: 0.18 Parameters ---------- k1 : Kernel The first base-kernel of the product-kernel k2 : Kernel The second base-kernel of the product-kernel Examples -------- >>> from sklearn.datasets import make_friedman2 >>> from sklearn.gaussian_process import GaussianProcessRegressor >>> from sklearn.gaussian_process.kernels import (RBF, Product, ... ConstantKernel) >>> X, y = make_friedman2(n_samples=500, noise=0, random_state=0) >>> kernel = Product(ConstantKernel(2), RBF()) >>> gpr = GaussianProcessRegressor(kernel=kernel, ... random_state=0).fit(X, y) >>> gpr.score(X, y) 1.0 >>> kernel 1.41**2 * RBF(length_scale=1) """ def __call__(self, X, Y=None, eval_gradient=False): """Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Left argument of the returned kernel k(X, Y) Y : array-like of shape (n_samples_Y, n_features) or list of object,\ default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) is evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), \ optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when `eval_gradient` is True. """ if eval_gradient: K1, K1_gradient = self.k1(X, Y, eval_gradient=True) K2, K2_gradient = self.k2(X, Y, eval_gradient=True) return K1 * K2, np.dstack( (K1_gradient * K2[:, :, np.newaxis], K2_gradient * K1[:, :, np.newaxis]) ) else: return self.k1(X, Y) * self.k2(X, Y) def diag(self, X):
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
true
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/gaussian_process/tests/test_kernels.py
sklearn/gaussian_process/tests/test_kernels.py
"""Testing for kernels for Gaussian processes.""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause from inspect import signature import numpy as np import pytest from sklearn.base import clone from sklearn.gaussian_process.kernels import ( RBF, CompoundKernel, ConstantKernel, DotProduct, Exponentiation, ExpSineSquared, KernelOperator, Matern, PairwiseKernel, RationalQuadratic, WhiteKernel, _approx_fprime, ) from sklearn.metrics.pairwise import ( PAIRWISE_KERNEL_FUNCTIONS, euclidean_distances, pairwise_kernels, ) from sklearn.utils._testing import ( assert_allclose, assert_almost_equal, assert_array_almost_equal, assert_array_equal, ) X = np.random.RandomState(0).normal(0, 1, (5, 2)) Y = np.random.RandomState(0).normal(0, 1, (6, 2)) # Set shared test data as read-only to avoid unintentional in-place # modifications that would introduce side-effects between tests. X.flags.writeable = False Y.flags.writeable = False kernel_rbf_plus_white = RBF(length_scale=2.0) + WhiteKernel(noise_level=3.0) kernels = [ RBF(length_scale=2.0), RBF(length_scale_bounds=(0.5, 2.0)), ConstantKernel(constant_value=10.0), 2.0 * RBF(length_scale=0.33, length_scale_bounds="fixed"), 2.0 * RBF(length_scale=0.5), kernel_rbf_plus_white, 2.0 * RBF(length_scale=[0.5, 2.0]), 2.0 * Matern(length_scale=0.33, length_scale_bounds="fixed"), 2.0 * Matern(length_scale=0.5, nu=0.5), 2.0 * Matern(length_scale=1.5, nu=1.5), 2.0 * Matern(length_scale=2.5, nu=2.5), 2.0 * Matern(length_scale=[0.5, 2.0], nu=0.5), 3.0 * Matern(length_scale=[2.0, 0.5], nu=1.5), 4.0 * Matern(length_scale=[0.5, 0.5], nu=2.5), RationalQuadratic(length_scale=0.5, alpha=1.5), ExpSineSquared(length_scale=0.5, periodicity=1.5), DotProduct(sigma_0=2.0), DotProduct(sigma_0=2.0) ** 2, RBF(length_scale=[2.0]), Matern(length_scale=[2.0]), ] for metric in PAIRWISE_KERNEL_FUNCTIONS: if metric in ["additive_chi2", "chi2"]: continue kernels.append(PairwiseKernel(gamma=1.0, metric=metric)) @pytest.mark.parametrize("kernel", kernels) def test_kernel_gradient(kernel): # Compare analytic and numeric gradient of kernels. kernel = clone(kernel) # make tests independent of one-another K, K_gradient = kernel(X, eval_gradient=True) assert K_gradient.shape[0] == X.shape[0] assert K_gradient.shape[1] == X.shape[0] assert K_gradient.shape[2] == kernel.theta.shape[0] def eval_kernel_for_theta(theta): kernel_clone = kernel.clone_with_theta(theta) K = kernel_clone(X, eval_gradient=False) return K K_gradient_approx = _approx_fprime(kernel.theta, eval_kernel_for_theta, 1e-10) assert_almost_equal(K_gradient, K_gradient_approx, 4) @pytest.mark.parametrize( "kernel", [ kernel for kernel in kernels # skip non-basic kernels if not (isinstance(kernel, (KernelOperator, Exponentiation))) ], ) def test_kernel_theta(kernel): # Check that parameter vector theta of kernel is set correctly. kernel = clone(kernel) # make tests independent of one-another theta = kernel.theta _, K_gradient = kernel(X, eval_gradient=True) # Determine kernel parameters that contribute to theta init_sign = signature(kernel.__class__.__init__).parameters.values() args = [p.name for p in init_sign if p.name != "self"] theta_vars = map( lambda s: s[0 : -len("_bounds")], filter(lambda s: s.endswith("_bounds"), args) ) assert set(hyperparameter.name for hyperparameter in kernel.hyperparameters) == set( theta_vars ) # Check that values returned in theta are consistent with # hyperparameter values (being their logarithms) for i, hyperparameter in enumerate(kernel.hyperparameters): assert theta[i] == np.log(getattr(kernel, hyperparameter.name)) # Fixed kernel parameters must be excluded from theta and gradient. for i, hyperparameter in enumerate(kernel.hyperparameters): # create copy with certain hyperparameter fixed params = kernel.get_params() params[hyperparameter.name + "_bounds"] = "fixed" kernel_class = kernel.__class__ new_kernel = kernel_class(**params) # Check that theta and K_gradient are identical with the fixed # dimension left out _, K_gradient_new = new_kernel(X, eval_gradient=True) assert theta.shape[0] == new_kernel.theta.shape[0] + 1 assert K_gradient.shape[2] == K_gradient_new.shape[2] + 1 if i > 0: assert theta[:i] == new_kernel.theta[:i] assert_array_equal(K_gradient[..., :i], K_gradient_new[..., :i]) if i + 1 < len(kernel.hyperparameters): assert theta[i + 1 :] == new_kernel.theta[i:] assert_array_equal(K_gradient[..., i + 1 :], K_gradient_new[..., i:]) # Check that values of theta are modified correctly for i, hyperparameter in enumerate(kernel.hyperparameters): theta[i] = np.log(42) kernel.theta = theta assert_almost_equal(getattr(kernel, hyperparameter.name), 42) setattr(kernel, hyperparameter.name, 43) assert_almost_equal(kernel.theta[i], np.log(43)) @pytest.mark.parametrize( "kernel", [ kernel for kernel in kernels # Identity is not satisfied on diagonal if kernel != kernel_rbf_plus_white ], ) def test_auto_vs_cross(kernel): kernel = clone(kernel) # make tests independent of one-another # Auto-correlation and cross-correlation should be consistent. K_auto = kernel(X) K_cross = kernel(X, X) assert_almost_equal(K_auto, K_cross, 5) @pytest.mark.parametrize("kernel", kernels) def test_kernel_diag(kernel): kernel = clone(kernel) # make tests independent of one-another # Test that diag method of kernel returns consistent results. K_call_diag = np.diag(kernel(X)) K_diag = kernel.diag(X) assert_almost_equal(K_call_diag, K_diag, 5) def test_kernel_operator_commutative(): # Adding kernels and multiplying kernels should be commutative. # Check addition assert_almost_equal((RBF(2.0) + 1.0)(X), (1.0 + RBF(2.0))(X)) # Check multiplication assert_almost_equal((3.0 * RBF(2.0))(X), (RBF(2.0) * 3.0)(X)) def test_kernel_anisotropic(): # Anisotropic kernel should be consistent with isotropic kernels. kernel = 3.0 * RBF([0.5, 2.0]) K = kernel(X) X1 = X.copy() X1[:, 0] *= 4 K1 = 3.0 * RBF(2.0)(X1) assert_almost_equal(K, K1) X2 = X.copy() X2[:, 1] /= 4 K2 = 3.0 * RBF(0.5)(X2) assert_almost_equal(K, K2) # Check getting and setting via theta kernel.theta = kernel.theta + np.log(2) assert_array_equal(kernel.theta, np.log([6.0, 1.0, 4.0])) assert_array_equal(kernel.k2.length_scale, [1.0, 4.0]) @pytest.mark.parametrize( "kernel", [kernel for kernel in kernels if kernel.is_stationary()] ) def test_kernel_stationary(kernel): kernel = clone(kernel) # make tests independent of one-another # Test stationarity of kernels. K = kernel(X, X + 1) assert_almost_equal(K[0, 0], np.diag(K)) @pytest.mark.parametrize("kernel", kernels) def test_kernel_input_type(kernel): kernel = clone(kernel) # make tests independent of one-another # Test whether kernels is for vectors or structured data if isinstance(kernel, Exponentiation): assert kernel.requires_vector_input == kernel.kernel.requires_vector_input if isinstance(kernel, KernelOperator): assert kernel.requires_vector_input == ( kernel.k1.requires_vector_input or kernel.k2.requires_vector_input ) def test_compound_kernel_input_type(): kernel = CompoundKernel([WhiteKernel(noise_level=3.0)]) assert not kernel.requires_vector_input kernel = CompoundKernel([WhiteKernel(noise_level=3.0), RBF(length_scale=2.0)]) assert kernel.requires_vector_input def check_hyperparameters_equal(kernel1, kernel2): # Check that hyperparameters of two kernels are equal for attr in set(dir(kernel1) + dir(kernel2)): if attr.startswith("hyperparameter_"): attr_value1 = getattr(kernel1, attr) attr_value2 = getattr(kernel2, attr) assert attr_value1 == attr_value2 @pytest.mark.parametrize("kernel", kernels) def test_kernel_clone(kernel): kernel = clone(kernel) # make tests independent of one-another # Test that sklearn's clone works correctly on kernels. kernel_cloned = clone(kernel) # XXX: Should this be fixed? # This differs from the sklearn's estimators equality check. assert kernel == kernel_cloned assert id(kernel) != id(kernel_cloned) # Check that all constructor parameters are equal. assert kernel.get_params() == kernel_cloned.get_params() # Check that all hyperparameters are equal. check_hyperparameters_equal(kernel, kernel_cloned) @pytest.mark.parametrize("kernel", kernels) def test_kernel_clone_after_set_params(kernel): kernel = clone(kernel) # make tests independent of one-another # This test is to verify that using set_params does not # break clone on kernels. # This used to break because in kernels such as the RBF, non-trivial # logic that modified the length scale used to be in the constructor # See https://github.com/scikit-learn/scikit-learn/issues/6961 # for more details. bounds = (1e-5, 1e5) kernel_cloned = clone(kernel) params = kernel.get_params() # RationalQuadratic kernel is isotropic. isotropic_kernels = (ExpSineSquared, RationalQuadratic) if "length_scale" in params and not isinstance(kernel, isotropic_kernels): length_scale = params["length_scale"] if np.iterable(length_scale): # XXX unreached code as of v0.22 params["length_scale"] = length_scale[0] params["length_scale_bounds"] = bounds else: params["length_scale"] = [length_scale] * 2 params["length_scale_bounds"] = bounds * 2 kernel_cloned.set_params(**params) kernel_cloned_clone = clone(kernel_cloned) assert kernel_cloned_clone.get_params() == kernel_cloned.get_params() assert id(kernel_cloned_clone) != id(kernel_cloned) check_hyperparameters_equal(kernel_cloned, kernel_cloned_clone) def test_matern_kernel(): # Test consistency of Matern kernel for special values of nu. K = Matern(nu=1.5, length_scale=1.0)(X) # the diagonal elements of a matern kernel are 1 assert_array_almost_equal(np.diag(K), np.ones(X.shape[0])) # matern kernel for coef0==0.5 is equal to absolute exponential kernel K_absexp = np.exp(-euclidean_distances(X, X, squared=False)) K = Matern(nu=0.5, length_scale=1.0)(X) assert_array_almost_equal(K, K_absexp) # matern kernel with coef0==inf is equal to RBF kernel K_rbf = RBF(length_scale=1.0)(X) K = Matern(nu=np.inf, length_scale=1.0)(X) assert_array_almost_equal(K, K_rbf) assert_allclose(K, K_rbf) # test that special cases of matern kernel (coef0 in [0.5, 1.5, 2.5]) # result in nearly identical results as the general case for coef0 in # [0.5 + tiny, 1.5 + tiny, 2.5 + tiny] tiny = 1e-10 for nu in [0.5, 1.5, 2.5]: K1 = Matern(nu=nu, length_scale=1.0)(X) K2 = Matern(nu=nu + tiny, length_scale=1.0)(X) assert_array_almost_equal(K1, K2) # test that coef0==large is close to RBF large = 100 K1 = Matern(nu=large, length_scale=1.0)(X) K2 = RBF(length_scale=1.0)(X) assert_array_almost_equal(K1, K2, decimal=2) @pytest.mark.parametrize("kernel", kernels) def test_kernel_versus_pairwise(kernel): kernel = clone(kernel) # make tests independent of one-another # Check that GP kernels can also be used as pairwise kernels. # Test auto-kernel if kernel != kernel_rbf_plus_white: # For WhiteKernel: k(X) != k(X,X). This is assumed by # pairwise_kernels K1 = kernel(X) K2 = pairwise_kernels(X, metric=kernel) assert_array_almost_equal(K1, K2) # Test cross-kernel K1 = kernel(X, Y) K2 = pairwise_kernels(X, Y, metric=kernel) assert_array_almost_equal(K1, K2) @pytest.mark.parametrize("kernel", kernels) def test_set_get_params(kernel): kernel = clone(kernel) # make tests independent of one-another # Check that set_params()/get_params() is consistent with kernel.theta. # Test get_params() index = 0 params = kernel.get_params() for hyperparameter in kernel.hyperparameters: if isinstance("string", type(hyperparameter.bounds)): if hyperparameter.bounds == "fixed": continue size = hyperparameter.n_elements if size > 1: # anisotropic kernels assert_almost_equal( np.exp(kernel.theta[index : index + size]), params[hyperparameter.name] ) index += size else: assert_almost_equal( np.exp(kernel.theta[index]), params[hyperparameter.name] ) index += 1 # Test set_params() index = 0 value = 10 # arbitrary value for hyperparameter in kernel.hyperparameters: if isinstance("string", type(hyperparameter.bounds)): if hyperparameter.bounds == "fixed": continue size = hyperparameter.n_elements if size > 1: # anisotropic kernels kernel.set_params(**{hyperparameter.name: [value] * size}) assert_almost_equal( np.exp(kernel.theta[index : index + size]), [value] * size ) index += size else: kernel.set_params(**{hyperparameter.name: value}) assert_almost_equal(np.exp(kernel.theta[index]), value) index += 1 @pytest.mark.parametrize("kernel", kernels) def test_repr_kernels(kernel): kernel = clone(kernel) # make tests independent of one-another # Smoke-test for repr in kernels. repr(kernel) def test_rational_quadratic_kernel(): kernel = RationalQuadratic(length_scale=[1.0, 1.0]) message = ( "RationalQuadratic kernel only supports isotropic " "version, please use a single " "scalar for length_scale" ) with pytest.raises(AttributeError, match=message): kernel(X)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/gaussian_process/tests/test_gpc.py
sklearn/gaussian_process/tests/test_gpc.py
"""Testing for Gaussian process classification""" # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause import warnings import numpy as np import pytest from scipy.optimize import approx_fprime from sklearn.exceptions import ConvergenceWarning from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import ( RBF, CompoundKernel, WhiteKernel, ) from sklearn.gaussian_process.kernels import ( ConstantKernel as C, ) from sklearn.gaussian_process.tests._mini_sequence_kernel import MiniSeqKernel from sklearn.utils._testing import assert_almost_equal, assert_array_equal def f(x): return np.sin(x) X = np.atleast_2d(np.linspace(0, 10, 30)).T X2 = np.atleast_2d([2.0, 4.0, 5.5, 6.5, 7.5]).T y = np.array(f(X).ravel() > 0, dtype=int) fX = f(X).ravel() y_mc = np.empty(y.shape, dtype=int) # multi-class y_mc[fX < -0.35] = 0 y_mc[(fX >= -0.35) & (fX < 0.35)] = 1 y_mc[fX > 0.35] = 2 fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed") kernels = [ RBF(length_scale=0.1), fixed_kernel, RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)), C(1.0, (1e-2, 1e2)) * RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)), ] non_fixed_kernels = [kernel for kernel in kernels if kernel != fixed_kernel] @pytest.mark.parametrize("kernel", kernels) def test_predict_consistent(kernel): # Check binary predict decision has also predicted probability above 0.5. gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) assert_array_equal(gpc.predict(X), gpc.predict_proba(X)[:, 1] >= 0.5) def test_predict_consistent_structured(): # Check binary predict decision has also predicted probability above 0.5. X = ["A", "AB", "B"] y = np.array([True, False, True]) kernel = MiniSeqKernel(baseline_similarity_bounds="fixed") gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) assert_array_equal(gpc.predict(X), gpc.predict_proba(X)[:, 1] >= 0.5) @pytest.mark.parametrize("kernel", non_fixed_kernels) def test_lml_improving(kernel): # Test that hyperparameter-tuning improves log-marginal likelihood. gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) assert gpc.log_marginal_likelihood(gpc.kernel_.theta) > gpc.log_marginal_likelihood( kernel.theta ) @pytest.mark.parametrize("kernel", kernels) def test_lml_precomputed(kernel): # Test that lml of optimized kernel is stored correctly. gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) assert_almost_equal( gpc.log_marginal_likelihood(gpc.kernel_.theta), gpc.log_marginal_likelihood(), 7 ) @pytest.mark.parametrize("kernel", kernels) def test_lml_without_cloning_kernel(kernel): # Test that clone_kernel=False has side-effects of kernel.theta. gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) input_theta = np.ones(gpc.kernel_.theta.shape, dtype=np.float64) gpc.log_marginal_likelihood(input_theta, clone_kernel=False) assert_almost_equal(gpc.kernel_.theta, input_theta, 7) @pytest.mark.parametrize("kernel", non_fixed_kernels) def test_converged_to_local_maximum(kernel): # Test that we are in local maximum after hyperparameter-optimization. gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) lml, lml_gradient = gpc.log_marginal_likelihood(gpc.kernel_.theta, True) assert np.all( (np.abs(lml_gradient) < 1e-4) | (gpc.kernel_.theta == gpc.kernel_.bounds[:, 0]) | (gpc.kernel_.theta == gpc.kernel_.bounds[:, 1]) ) @pytest.mark.parametrize("kernel", kernels) def test_lml_gradient(kernel): # Compare analytic and numeric gradient of log marginal likelihood. gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True) lml_gradient_approx = approx_fprime( kernel.theta, lambda theta: gpc.log_marginal_likelihood(theta, False), 1e-10 ) assert_almost_equal(lml_gradient, lml_gradient_approx, 3) def test_random_starts(global_random_seed): # Test that an increasing number of random-starts of GP fitting only # increases the log marginal likelihood of the chosen theta. n_samples, n_features = 25, 2 rng = np.random.RandomState(global_random_seed) X = rng.randn(n_samples, n_features) * 2 - 1 y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0 kernel = C(1.0, (1e-2, 1e2)) * RBF( length_scale=[1e-3] * n_features, length_scale_bounds=[(1e-4, 1e2)] * n_features ) last_lml = -np.inf for n_restarts_optimizer in range(5): gp = GaussianProcessClassifier( kernel=kernel, n_restarts_optimizer=n_restarts_optimizer, random_state=global_random_seed, ).fit(X, y) lml = gp.log_marginal_likelihood(gp.kernel_.theta) assert lml > last_lml - np.finfo(np.float32).eps last_lml = lml @pytest.mark.parametrize("kernel", non_fixed_kernels) def test_custom_optimizer(kernel, global_random_seed): # Test that GPC can use externally defined optimizers. # Define a dummy optimizer that simply tests 10 random hyperparameters def optimizer(obj_func, initial_theta, bounds): rng = np.random.RandomState(global_random_seed) theta_opt, func_min = ( initial_theta, obj_func(initial_theta, eval_gradient=False), ) for _ in range(10): theta = np.atleast_1d( rng.uniform(np.maximum(-2, bounds[:, 0]), np.minimum(1, bounds[:, 1])) ) f = obj_func(theta, eval_gradient=False) if f < func_min: theta_opt, func_min = theta, f return theta_opt, func_min gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer) gpc.fit(X, y_mc) # Checks that optimizer improved marginal likelihood assert gpc.log_marginal_likelihood( gpc.kernel_.theta ) >= gpc.log_marginal_likelihood(kernel.theta) @pytest.mark.parametrize("kernel", kernels) def test_multi_class(kernel): # Test GPC for multi-class classification problems. gpc = GaussianProcessClassifier(kernel=kernel) gpc.fit(X, y_mc) y_prob = gpc.predict_proba(X2) assert_almost_equal(y_prob.sum(1), 1) y_pred = gpc.predict(X2) assert_array_equal(np.argmax(y_prob, 1), y_pred) @pytest.mark.parametrize("kernel", kernels) def test_multi_class_n_jobs(kernel): # Test that multi-class GPC produces identical results with n_jobs>1. gpc = GaussianProcessClassifier(kernel=kernel) gpc.fit(X, y_mc) gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2) gpc_2.fit(X, y_mc) y_prob = gpc.predict_proba(X2) y_prob_2 = gpc_2.predict_proba(X2) assert_almost_equal(y_prob, y_prob_2) def test_warning_bounds(): kernel = RBF(length_scale_bounds=[1e-5, 1e-3]) gpc = GaussianProcessClassifier(kernel=kernel) warning_message = ( "The optimal value found for dimension 0 of parameter " "length_scale is close to the specified upper bound " "0.001. Increasing the bound and calling fit again may " "find a better value." ) with pytest.warns(ConvergenceWarning, match=warning_message): gpc.fit(X, y) kernel_sum = WhiteKernel(noise_level_bounds=[1e-5, 1e-3]) + RBF( length_scale_bounds=[1e3, 1e5] ) gpc_sum = GaussianProcessClassifier(kernel=kernel_sum) with warnings.catch_warnings(record=True) as record: warnings.simplefilter("always") gpc_sum.fit(X, y) assert len(record) == 2 assert issubclass(record[0].category, ConvergenceWarning) assert ( record[0].message.args[0] == "The optimal value found for " "dimension 0 of parameter " "k1__noise_level is close to the " "specified upper bound 0.001. " "Increasing the bound and calling " "fit again may find a better value." ) assert issubclass(record[1].category, ConvergenceWarning) assert ( record[1].message.args[0] == "The optimal value found for " "dimension 0 of parameter " "k2__length_scale is close to the " "specified lower bound 1000.0. " "Decreasing the bound and calling " "fit again may find a better value." ) X_tile = np.tile(X, 2) kernel_dims = RBF(length_scale=[1.0, 2.0], length_scale_bounds=[1e1, 1e2]) gpc_dims = GaussianProcessClassifier(kernel=kernel_dims) with warnings.catch_warnings(record=True) as record: warnings.simplefilter("always") gpc_dims.fit(X_tile, y) assert len(record) == 2 assert issubclass(record[0].category, ConvergenceWarning) assert ( record[0].message.args[0] == "The optimal value found for " "dimension 0 of parameter " "length_scale is close to the " "specified upper bound 100.0. " "Increasing the bound and calling " "fit again may find a better value." ) assert issubclass(record[1].category, ConvergenceWarning) assert ( record[1].message.args[0] == "The optimal value found for " "dimension 1 of parameter " "length_scale is close to the " "specified upper bound 100.0. " "Increasing the bound and calling " "fit again may find a better value." ) @pytest.mark.parametrize( "params, error_type, err_msg", [ ( {"kernel": CompoundKernel(0)}, ValueError, "kernel cannot be a CompoundKernel", ) ], ) def test_gpc_fit_error(params, error_type, err_msg): """Check that expected error are raised during fit.""" gpc = GaussianProcessClassifier(**params) with pytest.raises(error_type, match=err_msg): gpc.fit(X, y) @pytest.mark.parametrize("kernel", kernels) def test_gpc_latent_mean_and_variance_shape(kernel): """Checks that the latent mean and variance have the right shape.""" gpc = GaussianProcessClassifier(kernel=kernel) gpc.fit(X, y) # Check that the latent mean and variance have the right shape latent_mean, latent_variance = gpc.latent_mean_and_variance(X) assert latent_mean.shape == (X.shape[0],) assert latent_variance.shape == (X.shape[0],) def test_gpc_latent_mean_and_variance_complain_on_more_than_2_classes(): """Checks that the latent mean and variance have the right shape.""" gpc = GaussianProcessClassifier(kernel=RBF()) gpc.fit(X, y_mc) # Check that the latent mean and variance have the right shape with pytest.raises( ValueError, match="Returning the mean and variance of the latent function f " "is only supported for binary classification", ): gpc.latent_mean_and_variance(X) def test_latent_mean_and_variance_works_on_structured_kernels(): X = ["A", "AB", "B"] y = np.array([True, False, True]) kernel = MiniSeqKernel(baseline_similarity_bounds="fixed") gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y) gpc.latent_mean_and_variance(X)
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/gaussian_process/tests/_mini_sequence_kernel.py
sklearn/gaussian_process/tests/_mini_sequence_kernel.py
import numpy as np from sklearn.base import clone from sklearn.gaussian_process.kernels import ( GenericKernelMixin, Hyperparameter, Kernel, StationaryKernelMixin, ) class MiniSeqKernel(GenericKernelMixin, StationaryKernelMixin, Kernel): """ A minimal (but valid) convolutional kernel for sequences of variable length. """ def __init__(self, baseline_similarity=0.5, baseline_similarity_bounds=(1e-5, 1)): self.baseline_similarity = baseline_similarity self.baseline_similarity_bounds = baseline_similarity_bounds @property def hyperparameter_baseline_similarity(self): return Hyperparameter( "baseline_similarity", "numeric", self.baseline_similarity_bounds ) def _f(self, s1, s2): return sum( [1.0 if c1 == c2 else self.baseline_similarity for c1 in s1 for c2 in s2] ) def _g(self, s1, s2): return sum([0.0 if c1 == c2 else 1.0 for c1 in s1 for c2 in s2]) def __call__(self, X, Y=None, eval_gradient=False): if Y is None: Y = X if eval_gradient: return ( np.array([[self._f(x, y) for y in Y] for x in X]), np.array([[[self._g(x, y)] for y in Y] for x in X]), ) else: return np.array([[self._f(x, y) for y in Y] for x in X]) def diag(self, X): return np.array([self._f(x, x) for x in X]) def clone_with_theta(self, theta): cloned = clone(self) cloned.theta = theta return cloned
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false
scikit-learn/scikit-learn
https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/sklearn/gaussian_process/tests/__init__.py
sklearn/gaussian_process/tests/__init__.py
python
BSD-3-Clause
6dce55ebff962076625db46ab70b6b1c939f423b
2026-01-04T14:38:25.175347Z
false