repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
AntonOsika/gpt-engineer | https://github.com/AntonOsika/gpt-engineer/blob/a90fcd543eedcc0ff2c34561bc0785d2ba83c47e/gpt_engineer/core/linting.py | gpt_engineer/core/linting.py | import black
from gpt_engineer.core.files_dict import FilesDict
class Linting:
def __init__(self):
# Dictionary to hold linting methods for different file types
self.linters = {".py": self.lint_python}
import black
def lint_python(self, content, config):
"""Lint Python files using the `black` library, handling all exceptions silently and logging them.
This function attempts to format the code and returns the formatted code if successful.
If any error occurs during formatting, it logs the error and returns the original content.
"""
try:
# Try to format the content using black
linted_content = black.format_str(content, mode=black.FileMode(**config))
except black.NothingChanged:
# If nothing changed, log the info and return the original content
print("\nInfo: No changes were made during formatting.\n")
linted_content = content
except Exception as error:
# If any other exception occurs, log the error and return the original content
print(f"\nError: Could not format due to {error}\n")
linted_content = content
return linted_content
def lint_files(self, files_dict: FilesDict, config: dict = None) -> FilesDict:
"""
Lints files based on their extension using registered linting functions.
Parameters
----------
files_dict : FilesDict
The dictionary of file names to their respective source code content.
config : dict, optional
A dictionary of configuration options for the linting tools.
Returns
-------
FilesDict
The dictionary of file names to their respective source code content after linting.
"""
if config is None:
config = {}
for filename, content in files_dict.items():
extension = filename[
filename.rfind(".") :
].lower() # Ensure case insensitivity
if extension in self.linters:
original_content = content
linted_content = self.linters[extension](content, config)
if linted_content != original_content:
print(f"Linted {filename}.")
else:
print(f"No changes made for {filename}.")
files_dict[filename] = linted_content
else:
print(f"No linter registered for {filename}.")
return files_dict
| python | MIT | a90fcd543eedcc0ff2c34561bc0785d2ba83c47e | 2026-01-04T14:39:15.137338Z | false |
AntonOsika/gpt-engineer | https://github.com/AntonOsika/gpt-engineer/blob/a90fcd543eedcc0ff2c34561bc0785d2ba83c47e/gpt_engineer/core/prompt.py | gpt_engineer/core/prompt.py | import json
from typing import Dict, Optional
class Prompt:
def __init__(
self,
text: str,
image_urls: Optional[Dict[str, str]] = None,
entrypoint_prompt: str = "",
):
self.text = text
self.image_urls = image_urls
self.entrypoint_prompt = entrypoint_prompt
def __repr__(self):
return f"Prompt(text={self.text!r}, image_urls={self.image_urls!r})"
def to_langchain_content(self):
content = [{"type": "text", "text": f"Request: {self.text}"}]
if self.image_urls:
for name, url in self.image_urls.items():
image_content = {
"type": "image_url",
"image_url": {
"url": url,
"detail": "low",
},
}
content.append(image_content)
return content
def to_dict(self):
return {
"text": self.text,
"image_urls": self.image_urls,
"entrypoint_prompt": self.entrypoint_prompt,
}
def to_json(self):
return json.dumps(self.to_dict())
| python | MIT | a90fcd543eedcc0ff2c34561bc0785d2ba83c47e | 2026-01-04T14:39:15.137338Z | false |
AntonOsika/gpt-engineer | https://github.com/AntonOsika/gpt-engineer/blob/a90fcd543eedcc0ff2c34561bc0785d2ba83c47e/gpt_engineer/core/__init__.py | gpt_engineer/core/__init__.py | python | MIT | a90fcd543eedcc0ff2c34561bc0785d2ba83c47e | 2026-01-04T14:39:15.137338Z | false | |
AntonOsika/gpt-engineer | https://github.com/AntonOsika/gpt-engineer/blob/a90fcd543eedcc0ff2c34561bc0785d2ba83c47e/gpt_engineer/core/files_dict.py | gpt_engineer/core/files_dict.py | """
FilesDict Module
This module provides a FilesDict class which is a dictionary-based container for managing code files.
It extends the standard dictionary to enforce string keys and values, representing filenames and their
corresponding code content. It also provides methods to format its contents for chat-based interaction
with an AI agent and to enforce type checks on keys and values.
Classes:
FilesDict: A dictionary-based container for managing code files.
"""
from collections import OrderedDict
from pathlib import Path
from typing import Union
# class Code(MutableMapping[str | Path, str]):
# ToDo: implement as mutable mapping, potentially holding a dict instead of being a dict.
class FilesDict(dict):
"""
A dictionary-based container for managing code files.
This class extends the standard dictionary to enforce string keys and values,
representing filenames and their corresponding code content. It provides methods
to format its contents for chat-based interaction with an AI agent and to enforce
type checks on keys and values.
"""
def __setitem__(self, key: Union[str, Path], value: str):
"""
Set the code content for the given filename, enforcing type checks on the key and value.
Overrides the dictionary's __setitem__ to enforce type checks on the key and value.
The key must be a string or a Path object, and the value must be a string representing
the code content.
Parameters
----------
key : Union[str, Path]
The filename as a key for the code content.
value : str
The code content to associate with the filename.
Raises
------
TypeError
If the key is not a string or Path, or if the value is not a string.
"""
if not isinstance(key, (str, Path)):
raise TypeError("Keys must be strings or Path's")
if not isinstance(value, str):
raise TypeError("Values must be strings")
super().__setitem__(key, value)
def to_chat(self):
"""
Formats the items of the object (assuming file name and content pairs)
into a string suitable for chat display.
Returns
-------
str
A string representation of the files.
"""
chat_str = ""
for file_name, file_content in self.items():
lines_dict = file_to_lines_dict(file_content)
chat_str += f"File: {file_name}\n"
for line_number, line_content in lines_dict.items():
chat_str += f"{line_number} {line_content}\n"
chat_str += "\n"
return f"```\n{chat_str}```"
def to_log(self):
"""
Formats the items of the object (assuming file name and content pairs)
into a string suitable for log display.
Returns
-------
str
A string representation of the files.
"""
log_str = ""
for file_name, file_content in self.items():
log_str += f"File: {file_name}\n"
log_str += file_content
log_str += "\n"
return log_str
def file_to_lines_dict(file_content: str) -> dict:
"""
Converts file content into a dictionary where each line number is a key
and the corresponding line content is the value.
Parameters
----------
file_name : str
The name of the file.
file_content : str
The content of the file.
Returns
-------
dict
A dictionary with file names as keys and dictionaries (line numbers as keys and line contents as values) as values.
"""
lines_dict = OrderedDict(
{
line_number: line_content
for line_number, line_content in enumerate(file_content.split("\n"), 1)
}
)
return lines_dict
| python | MIT | a90fcd543eedcc0ff2c34561bc0785d2ba83c47e | 2026-01-04T14:39:15.137338Z | false |
AntonOsika/gpt-engineer | https://github.com/AntonOsika/gpt-engineer/blob/a90fcd543eedcc0ff2c34561bc0785d2ba83c47e/gpt_engineer/core/ai.py | gpt_engineer/core/ai.py | """
AI Module
This module provides an AI class that interfaces with language models to perform various tasks such as
starting a conversation, advancing the conversation, and handling message serialization. It also includes
backoff strategies for handling rate limit errors from the OpenAI API.
Classes:
AI: A class that interfaces with language models for conversation management and message serialization.
Functions:
serialize_messages(messages: List[Message]) -> str
Serialize a list of messages to a JSON string.
"""
from __future__ import annotations
import json
import logging
import os
from pathlib import Path
from typing import Any, List, Optional, Union
import backoff
import openai
import pyperclip
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chat_models.base import BaseChatModel
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage,
messages_from_dict,
messages_to_dict,
)
from langchain_anthropic import ChatAnthropic
from langchain_openai import AzureChatOpenAI, ChatOpenAI
from gpt_engineer.core.token_usage import TokenUsageLog
# Type hint for a chat message
Message = Union[AIMessage, HumanMessage, SystemMessage]
# Set up logging
logger = logging.getLogger(__name__)
class AI:
"""
A class that interfaces with language models for conversation management and message serialization.
This class provides methods to start and advance conversations, handle message serialization,
and implement backoff strategies for rate limit errors when interacting with the OpenAI API.
Attributes
----------
temperature : float
The temperature setting for the language model.
azure_endpoint : str
The endpoint URL for the Azure-hosted language model.
model_name : str
The name of the language model to use.
streaming : bool
A flag indicating whether to use streaming for the language model.
llm : BaseChatModel
The language model instance for conversation management.
token_usage_log : TokenUsageLog
A log for tracking token usage during conversations.
Methods
-------
start(system: str, user: str, step_name: str) -> List[Message]
Start the conversation with a system message and a user message.
next(messages: List[Message], prompt: Optional[str], step_name: str) -> List[Message]
Advances the conversation by sending message history to LLM and updating with the response.
backoff_inference(messages: List[Message]) -> Any
Perform inference using the language model with an exponential backoff strategy.
serialize_messages(messages: List[Message]) -> str
Serialize a list of messages to a JSON string.
deserialize_messages(jsondictstr: str) -> List[Message]
Deserialize a JSON string to a list of messages.
_create_chat_model() -> BaseChatModel
Create a chat model with the specified model name and temperature.
"""
def __init__(
self,
model_name="gpt-4-turbo",
temperature=0.1,
azure_endpoint=None,
streaming=True,
vision=False,
):
"""
Initialize the AI class.
Parameters
----------
model_name : str, optional
The name of the model to use, by default "gpt-4".
temperature : float, optional
The temperature to use for the model, by default 0.1.
"""
self.temperature = temperature
self.azure_endpoint = azure_endpoint
self.model_name = model_name
self.streaming = streaming
self.vision = (
("vision-preview" in model_name)
or ("gpt-4-turbo" in model_name and "preview" not in model_name)
or ("claude" in model_name)
)
self.llm = self._create_chat_model()
self.token_usage_log = TokenUsageLog(model_name)
logger.debug(f"Using model {self.model_name}")
def start(self, system: str, user: Any, *, step_name: str) -> List[Message]:
"""
Start the conversation with a system message and a user message.
Parameters
----------
system : str
The content of the system message.
user : str
The content of the user message.
step_name : str
The name of the step.
Returns
-------
List[Message]
The list of messages in the conversation.
"""
messages: List[Message] = [
SystemMessage(content=system),
HumanMessage(content=user),
]
return self.next(messages, step_name=step_name)
def _extract_content(self, content):
"""
Extracts text content from a message, supporting both string and list types.
Parameters
----------
content : Union[str, List[dict]]
The content of a message, which could be a string or a list.
Returns
-------
str
The extracted text content.
"""
if isinstance(content, str):
return content
elif isinstance(content, list) and content and "text" in content[0]:
# Assuming the structure of list content is [{'type': 'text', 'text': 'Some text'}, ...]
return content[0]["text"]
else:
return ""
def _collapse_text_messages(self, messages: List[Message]):
"""
Combine consecutive messages of the same type into a single message, where if the message content
is a list type, the first text element's content is taken. This method keeps `combined_content` as a string.
This method iterates through the list of messages, combining consecutive messages of the same type
by joining their content with a newline character. If the content is a list, it extracts text from the first
text element's content. This reduces the number of messages and simplifies the conversation for processing.
Parameters
----------
messages : List[Message]
The list of messages to collapse.
Returns
-------
List[Message]
The list of messages after collapsing consecutive messages of the same type.
"""
collapsed_messages = []
if not messages:
return collapsed_messages
previous_message = messages[0]
combined_content = self._extract_content(previous_message.content)
for current_message in messages[1:]:
if current_message.type == previous_message.type:
combined_content += "\n\n" + self._extract_content(
current_message.content
)
else:
collapsed_messages.append(
previous_message.__class__(content=combined_content)
)
previous_message = current_message
combined_content = self._extract_content(current_message.content)
collapsed_messages.append(previous_message.__class__(content=combined_content))
return collapsed_messages
def next(
self,
messages: List[Message],
prompt: Optional[str] = None,
*,
step_name: str,
) -> List[Message]:
"""
Advances the conversation by sending message history
to LLM and updating with the response.
Parameters
----------
messages : List[Message]
The list of messages in the conversation.
prompt : Optional[str], optional
The prompt to use, by default None.
step_name : str
The name of the step.
Returns
-------
List[Message]
The updated list of messages in the conversation.
"""
if prompt:
messages.append(HumanMessage(content=prompt))
logger.debug(
"Creating a new chat completion: %s",
"\n".join([m.pretty_repr() for m in messages]),
)
if not self.vision:
messages = self._collapse_text_messages(messages)
response = self.backoff_inference(messages)
self.token_usage_log.update_log(
messages=messages, answer=response.content, step_name=step_name
)
messages.append(response)
logger.debug(f"Chat completion finished: {messages}")
return messages
@backoff.on_exception(backoff.expo, openai.RateLimitError, max_tries=7, max_time=45)
def backoff_inference(self, messages):
"""
Perform inference using the language model while implementing an exponential backoff strategy.
This function will retry the inference in case of a rate limit error from the OpenAI API.
It uses an exponential backoff strategy, meaning the wait time between retries increases
exponentially. The function will attempt to retry up to 7 times within a span of 45 seconds.
Parameters
----------
messages : List[Message]
A list of chat messages which will be passed to the language model for processing.
callbacks : List[Callable]
A list of callback functions that are triggered after each inference. These functions
can be used for logging, monitoring, or other auxiliary tasks.
Returns
-------
Any
The output from the language model after processing the provided messages.
Raises
------
openai.error.RateLimitError
If the number of retries exceeds the maximum or if the rate limit persists beyond the
allotted time, the function will ultimately raise a RateLimitError.
Example
-------
>>> messages = [SystemMessage(content="Hello"), HumanMessage(content="How's the weather?")]
>>> response = backoff_inference(messages)
"""
return self.llm.invoke(messages) # type: ignore
@staticmethod
def serialize_messages(messages: List[Message]) -> str:
"""
Serialize a list of messages to a JSON string.
Parameters
----------
messages : List[Message]
The list of messages to serialize.
Returns
-------
str
The serialized messages as a JSON string.
"""
return json.dumps(messages_to_dict(messages))
@staticmethod
def deserialize_messages(jsondictstr: str) -> List[Message]:
"""
Deserialize a JSON string to a list of messages.
Parameters
----------
jsondictstr : str
The JSON string to deserialize.
Returns
-------
List[Message]
The deserialized list of messages.
"""
data = json.loads(jsondictstr)
# Modify implicit is_chunk property to ALWAYS false
# since Langchain's Message schema is stricter
prevalidated_data = [
{**item, "tools": {**item.get("tools", {}), "is_chunk": False}}
for item in data
]
return list(messages_from_dict(prevalidated_data)) # type: ignore
def _create_chat_model(self) -> BaseChatModel:
"""
Create a chat model with the specified model name and temperature.
Parameters
----------
model : str
The name of the model to create.
temperature : float
The temperature to use for the model.
Returns
-------
BaseChatModel
The created chat model.
"""
if self.azure_endpoint:
return AzureChatOpenAI(
azure_endpoint=self.azure_endpoint,
openai_api_version=os.getenv(
"OPENAI_API_VERSION", "2024-05-01-preview"
),
deployment_name=self.model_name,
openai_api_type="azure",
streaming=self.streaming,
callbacks=[StreamingStdOutCallbackHandler()],
)
elif "claude" in self.model_name:
return ChatAnthropic(
model=self.model_name,
temperature=self.temperature,
callbacks=[StreamingStdOutCallbackHandler()],
streaming=self.streaming,
max_tokens_to_sample=4096,
)
elif self.vision:
return ChatOpenAI(
model=self.model_name,
temperature=self.temperature,
streaming=self.streaming,
callbacks=[StreamingStdOutCallbackHandler()],
max_tokens=4096, # vision models default to low max token limits
)
else:
return ChatOpenAI(
model=self.model_name,
temperature=self.temperature,
streaming=self.streaming,
callbacks=[StreamingStdOutCallbackHandler()],
)
def serialize_messages(messages: List[Message]) -> str:
return AI.serialize_messages(messages)
class ClipboardAI(AI):
# Ignore not init superclass
def __init__(self, **_): # type: ignore
self.vision = False
self.token_usage_log = TokenUsageLog("clipboard_llm")
@staticmethod
def serialize_messages(messages: List[Message]) -> str:
return "\n\n".join([f"{m.type}:\n{m.content}" for m in messages])
@staticmethod
def multiline_input():
print("Enter/Paste your content. Ctrl-D or Ctrl-Z ( windows ) to save it.")
content = []
while True:
try:
line = input()
except EOFError:
break
content.append(line)
return "\n".join(content)
def next(
self,
messages: List[Message],
prompt: Optional[str] = None,
*,
step_name: str,
) -> List[Message]:
"""
Not yet fully supported
"""
if prompt:
messages.append(HumanMessage(content=prompt))
logger.debug(f"Creating a new chat completion: {messages}")
msgs = self.serialize_messages(messages)
pyperclip.copy(msgs)
Path("clipboard.txt").write_text(msgs)
print(
"Messages copied to clipboard and written to clipboard.txt,",
len(msgs),
"characters in total",
)
response = self.multiline_input()
messages.append(AIMessage(content=response))
logger.debug(f"Chat completion finished: {messages}")
return messages
| python | MIT | a90fcd543eedcc0ff2c34561bc0785d2ba83c47e | 2026-01-04T14:39:15.137338Z | false |
AntonOsika/gpt-engineer | https://github.com/AntonOsika/gpt-engineer/blob/a90fcd543eedcc0ff2c34561bc0785d2ba83c47e/gpt_engineer/core/default/steps.py | gpt_engineer/core/default/steps.py | """
Module for defining the steps involved in generating and improving code using AI.
This module provides functions that represent different steps in the process of generating
and improving code using an AI model. These steps include generating code from a prompt,
creating an entrypoint for the codebase, executing the entrypoint, and refining code edits.
Functions
---------
curr_fn : function
Returns the name of the current function.
setup_sys_prompt : function
Sets up the system prompt for generating code.
gen_code : function
Generates code from a prompt using AI and returns the generated files.
gen_entrypoint : function
Generates an entrypoint for the codebase and returns the entrypoint files.
execute_entrypoint : function
Executes the entrypoint of the codebase.
setup_sys_prompt_existing_code : function
Sets up the system prompt for improving existing code.
improve : function
Improves the code based on user input and returns the updated files.
"""
import inspect
import io
import re
import sys
import traceback
from pathlib import Path
from typing import List, MutableMapping, Union
from langchain.schema import HumanMessage, SystemMessage
from termcolor import colored
from gpt_engineer.core.ai import AI
from gpt_engineer.core.base_execution_env import BaseExecutionEnv
from gpt_engineer.core.base_memory import BaseMemory
from gpt_engineer.core.chat_to_files import apply_diffs, chat_to_files_dict, parse_diffs
from gpt_engineer.core.default.constants import MAX_EDIT_REFINEMENT_STEPS
from gpt_engineer.core.default.paths import (
CODE_GEN_LOG_FILE,
DEBUG_LOG_FILE,
DIFF_LOG_FILE,
ENTRYPOINT_FILE,
ENTRYPOINT_LOG_FILE,
IMPROVE_LOG_FILE,
)
from gpt_engineer.core.files_dict import FilesDict, file_to_lines_dict
from gpt_engineer.core.preprompts_holder import PrepromptsHolder
from gpt_engineer.core.prompt import Prompt
def curr_fn() -> str:
"""
Returns the name of the current function.
Returns
-------
str
The name of the function that called this function.
"""
return inspect.stack()[1].function
def setup_sys_prompt(preprompts: MutableMapping[Union[str, Path], str]) -> str:
"""
Sets up the system prompt for generating code.
Parameters
----------
preprompts : MutableMapping[Union[str, Path], str]
A mapping of preprompt messages to guide the AI model.
Returns
-------
str
The system prompt message for the AI model.
"""
return (
preprompts["roadmap"]
+ preprompts["generate"].replace("FILE_FORMAT", preprompts["file_format"])
+ "\nUseful to know:\n"
+ preprompts["philosophy"]
)
def setup_sys_prompt_existing_code(
preprompts: MutableMapping[Union[str, Path], str]
) -> str:
"""
Sets up the system prompt for improving existing code.
Parameters
----------
preprompts : MutableMapping[Union[str, Path], str]
A mapping of preprompt messages to guide the AI model.
Returns
-------
str
The system prompt message for the AI model to improve existing code.
"""
return (
preprompts["roadmap"]
+ preprompts["improve"].replace("FILE_FORMAT", preprompts["file_format_diff"])
+ "\nUseful to know:\n"
+ preprompts["philosophy"]
)
def gen_code(
ai: AI, prompt: Prompt, memory: BaseMemory, preprompts_holder: PrepromptsHolder
) -> FilesDict:
"""
Generates code from a prompt using AI and returns the generated files.
Parameters
----------
ai : AI
The AI model used for generating code.
prompt : str
The user prompt to generate code from.
memory : BaseMemory
The memory interface where the code and related data are stored.
preprompts_holder : PrepromptsHolder
The holder for preprompt messages that guide the AI model.
Returns
-------
FilesDict
A dictionary of file names to their respective source code content.
"""
preprompts = preprompts_holder.get_preprompts()
messages = ai.start(
setup_sys_prompt(preprompts), prompt.to_langchain_content(), step_name=curr_fn()
)
chat = messages[-1].content.strip()
memory.log(CODE_GEN_LOG_FILE, "\n\n".join(x.pretty_repr() for x in messages))
files_dict = chat_to_files_dict(chat)
return files_dict
def gen_entrypoint(
ai: AI,
prompt: Prompt,
files_dict: FilesDict,
memory: BaseMemory,
preprompts_holder: PrepromptsHolder,
) -> FilesDict:
"""
Generates an entrypoint for the codebase and returns the entrypoint files.
Parameters
----------
ai : AI
The AI model used for generating the entrypoint.
files_dict : FilesDict
The dictionary of file names to their respective source code content.
memory : BaseMemory
The memory interface where the code and related data are stored.
preprompts_holder : PrepromptsHolder
The holder for preprompt messages that guide the AI model.
Returns
-------
FilesDict
A dictionary containing the entrypoint file.
"""
user_prompt = prompt.entrypoint_prompt
if not user_prompt:
user_prompt = """
Make a unix script that
a) installs dependencies
b) runs all necessary parts of the codebase (in parallel if necessary)
"""
preprompts = preprompts_holder.get_preprompts()
messages = ai.start(
system=(preprompts["entrypoint"]),
user=user_prompt
+ "\nInformation about the codebase:\n\n"
+ files_dict.to_chat(),
step_name=curr_fn(),
)
print()
chat = messages[-1].content.strip()
regex = r"```\S*\n(.+?)```"
matches = re.finditer(regex, chat, re.DOTALL)
entrypoint_code = FilesDict(
{ENTRYPOINT_FILE: "\n".join(match.group(1) for match in matches)}
)
memory.log(ENTRYPOINT_LOG_FILE, "\n\n".join(x.pretty_repr() for x in messages))
return entrypoint_code
def execute_entrypoint(
ai: AI,
execution_env: BaseExecutionEnv,
files_dict: FilesDict,
prompt: Prompt = None,
preprompts_holder: PrepromptsHolder = None,
memory: BaseMemory = None,
) -> FilesDict:
"""
Executes the entrypoint of the codebase.
Parameters
----------
ai : AI
The AI model used for generating the entrypoint.
execution_env : BaseExecutionEnv
The execution environment in which the code is executed.
files_dict : FilesDict
The dictionary of file names to their respective source code content.
preprompts_holder : PrepromptsHolder, optional
The holder for preprompt messages that guide the AI model.
Returns
-------
FilesDict
The dictionary of file names to their respective source code content after execution.
"""
if ENTRYPOINT_FILE not in files_dict:
raise FileNotFoundError(
"The required entrypoint "
+ ENTRYPOINT_FILE
+ " does not exist in the code."
)
command = files_dict[ENTRYPOINT_FILE]
print()
print(
colored(
"Do you want to execute this code? (Y/n)",
"red",
)
)
print()
print(command)
print()
if input("").lower() not in ["", "y", "yes"]:
print("Ok, not executing the code.")
return files_dict
print("Executing the code...")
print()
print(
colored(
"Note: If it does not work as expected, consider running the code"
+ " in another way than above.",
"green",
)
)
print()
print("You can press ctrl+c *once* to stop the execution.")
print()
execution_env.upload(files_dict).run(f"bash {ENTRYPOINT_FILE}")
return files_dict
def improve_fn(
ai: AI,
prompt: Prompt,
files_dict: FilesDict,
memory: BaseMemory,
preprompts_holder: PrepromptsHolder,
diff_timeout=3,
) -> FilesDict:
"""
Improves the code based on user input and returns the updated files.
Parameters
----------
ai : AI
The AI model used for improving code.
prompt :str
The user prompt to improve the code.
files_dict : FilesDict
The dictionary of file names to their respective source code content.
memory : BaseMemory
The memory interface where the code and related data are stored.
preprompts_holder : PrepromptsHolder
The holder for preprompt messages that guide the AI model.
Returns
-------
FilesDict
The dictionary of file names to their respective updated source code content.
"""
preprompts = preprompts_holder.get_preprompts()
messages = [
SystemMessage(content=setup_sys_prompt_existing_code(preprompts)),
]
# Add files as input
messages.append(HumanMessage(content=f"{files_dict.to_chat()}"))
messages.append(HumanMessage(content=prompt.to_langchain_content()))
memory.log(
DEBUG_LOG_FILE,
"UPLOADED FILES:\n" + files_dict.to_log() + "\nPROMPT:\n" + prompt.text,
)
return _improve_loop(ai, files_dict, memory, messages, diff_timeout=diff_timeout)
def _improve_loop(
ai: AI, files_dict: FilesDict, memory: BaseMemory, messages: List, diff_timeout=3
) -> FilesDict:
messages = ai.next(messages, step_name=curr_fn())
files_dict, errors = salvage_correct_hunks(
messages, files_dict, memory, diff_timeout=diff_timeout
)
retries = 0
while errors and retries < MAX_EDIT_REFINEMENT_STEPS:
messages.append(
HumanMessage(
content="Some previously produced diffs were not on the requested format, or the code part was not found in the code. Details:\n"
+ "\n".join(errors)
+ "\n Only rewrite the problematic diffs, making sure that the failing ones are now on the correct format and can be found in the code. Make sure to not repeat past mistakes. \n"
)
)
messages = ai.next(messages, step_name=curr_fn())
files_dict, errors = salvage_correct_hunks(
messages, files_dict, memory, diff_timeout
)
retries += 1
return files_dict
def salvage_correct_hunks(
messages: List, files_dict: FilesDict, memory: BaseMemory, diff_timeout=3
) -> tuple[FilesDict, List[str]]:
error_messages = []
ai_response = messages[-1].content.strip()
diffs = parse_diffs(ai_response, diff_timeout=diff_timeout)
# validate and correct diffs
for _, diff in diffs.items():
# if diff is a new file, validation and correction is unnecessary
if not diff.is_new_file():
problems = diff.validate_and_correct(
file_to_lines_dict(files_dict[diff.filename_pre])
)
error_messages.extend(problems)
files_dict = apply_diffs(diffs, files_dict)
memory.log(IMPROVE_LOG_FILE, "\n\n".join(x.pretty_repr() for x in messages))
memory.log(DIFF_LOG_FILE, "\n\n".join(error_messages))
return files_dict, error_messages
class Tee(object):
def __init__(self, *files):
self.files = files
def write(self, obj):
for file in self.files:
file.write(obj)
def flush(self):
for file in self.files:
file.flush()
def handle_improve_mode(prompt, agent, memory, files_dict, diff_timeout=3):
captured_output = io.StringIO()
old_stdout = sys.stdout
sys.stdout = Tee(sys.stdout, captured_output)
try:
files_dict = agent.improve(files_dict, prompt, diff_timeout=diff_timeout)
except Exception as e:
print(
f"Error while improving the project: {e}\nCould you please upload the debug_log_file.txt in {memory.path}/logs folder to github?\nFULL STACK TRACE:\n"
)
traceback.print_exc(file=sys.stdout) # Print the full stack trace
finally:
# Reset stdout
sys.stdout = old_stdout
# Get the captured output
captured_string = captured_output.getvalue()
print(captured_string)
memory.log(DEBUG_LOG_FILE, "\nCONSOLE OUTPUT:\n" + captured_string)
return files_dict
| python | MIT | a90fcd543eedcc0ff2c34561bc0785d2ba83c47e | 2026-01-04T14:39:15.137338Z | false |
AntonOsika/gpt-engineer | https://github.com/AntonOsika/gpt-engineer/blob/a90fcd543eedcc0ff2c34561bc0785d2ba83c47e/gpt_engineer/core/default/paths.py | gpt_engineer/core/default/paths.py | """
Module defining file system paths used by the application.
This module contains definitions of file system paths that are used throughout the
application to locate and manage various files and directories, such as logs, memory,
and preprompts.
Constants
---------
META_DATA_REL_PATH : str
The relative path to the directory where metadata is stored.
MEMORY_REL_PATH : str
The relative path to the directory where memory-related files are stored.
CODE_GEN_LOG_FILE : str
The filename for the log file that contains all output from code generation.
DEBUG_LOG_FILE : str
The filename for the log file that contains debug information.
ENTRYPOINT_FILE : str
The filename for the entrypoint script that is executed to run the application.
ENTRYPOINT_LOG_FILE : str
The filename for the log file that contains the chat related to entrypoint generation.
PREPROMPTS_PATH : Path
The file system path to the directory containing preprompt files.
Functions
---------
memory_path : function
Constructs the full path to the memory directory based on a given base path.
metadata_path : function
Constructs the full path to the metadata directory based on a given base path.
"""
import os
from pathlib import Path
META_DATA_REL_PATH = ".gpteng"
MEMORY_REL_PATH = os.path.join(META_DATA_REL_PATH, "memory")
CODE_GEN_LOG_FILE = "all_output.txt"
IMPROVE_LOG_FILE = "improve.txt"
DIFF_LOG_FILE = "diff_errors.txt"
DEBUG_LOG_FILE = "debug_log_file.txt"
ENTRYPOINT_FILE = "run.sh"
ENTRYPOINT_LOG_FILE = "gen_entrypoint_chat.txt"
ENTRYPOINT_FILE = "run.sh"
PREPROMPTS_PATH = Path(__file__).parent.parent.parent / "preprompts"
def memory_path(path):
"""
Constructs the full path to the memory directory based on a given base path.
Parameters
----------
path : str
The base path to append the memory directory to.
Returns
-------
str
The full path to the memory directory.
"""
return os.path.join(path, MEMORY_REL_PATH)
def metadata_path(path):
"""
Constructs the full path to the metadata directory based on a given base path.
Parameters
----------
path : str
The base path to append the metadata directory to.
Returns
-------
str
The full path to the metadata directory.
"""
return os.path.join(path, META_DATA_REL_PATH)
| python | MIT | a90fcd543eedcc0ff2c34561bc0785d2ba83c47e | 2026-01-04T14:39:15.137338Z | false |
AntonOsika/gpt-engineer | https://github.com/AntonOsika/gpt-engineer/blob/a90fcd543eedcc0ff2c34561bc0785d2ba83c47e/gpt_engineer/core/default/disk_memory.py | gpt_engineer/core/default/disk_memory.py | """
Disk Memory Module
==================
This module provides a simple file-based key-value database system, where keys are
represented as filenames and values are the contents of these files. The `DiskMemory` class
is responsible for the CRUD operations on the database.
Attributes
----------
None
Functions
---------
None
Classes
-------
DiskMemory
A file-based key-value store where keys correspond to filenames and values to file contents.
"""
import base64
import json
import shutil
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Iterator, Optional, Union
from gpt_engineer.core.base_memory import BaseMemory
from gpt_engineer.tools.supported_languages import SUPPORTED_LANGUAGES
# This class represents a simple database that stores its tools as files in a directory.
class DiskMemory(BaseMemory):
"""
A file-based key-value store where keys correspond to filenames and values to file contents.
This class provides an interface to a file-based database, leveraging file operations to
facilitate CRUD-like interactions. It allows for quick checks on the existence of keys,
retrieval of values based on keys, and setting new key-value pairs.
Attributes
----------
path : Path
The directory path where the database files are stored.
"""
def __init__(self, path: Union[str, Path]):
"""
Initialize the DiskMemory class with a specified path.
Parameters
----------
path : str or Path
The path to the directory where the database files will be stored.
"""
self.path: Path = Path(path).absolute()
self.path.mkdir(parents=True, exist_ok=True)
def __contains__(self, key: str) -> bool:
"""
Determine whether the database contains a file with the specified key.
Parameters
----------
key : str
The key (filename) to check for existence in the database.
Returns
-------
bool
Returns True if the file exists, False otherwise.
"""
return (self.path / key).is_file()
def __getitem__(self, key: str) -> str:
"""
Retrieve the content of a file in the database corresponding to the given key.
If the file is an image with a .png or .jpeg extension, it returns the content
in Base64-encoded string format.
Parameters
----------
key : str
The key (filename) whose content is to be retrieved.
Returns
-------
str
The content of the file associated with the key, or Base64-encoded string if it's a .png or .jpeg file.
Raises
------
KeyError
If the file corresponding to the key does not exist in the database.
"""
full_path = self.path / key
if not full_path.is_file():
raise KeyError(f"File '{key}' could not be found in '{self.path}'")
if full_path.suffix in [".png", ".jpeg", ".jpg"]:
with full_path.open("rb") as image_file:
encoded_string = base64.b64encode(image_file.read()).decode("utf-8")
mime_type = "image/png" if full_path.suffix == ".png" else "image/jpeg"
return f"data:{mime_type};base64,{encoded_string}"
else:
with full_path.open("r", encoding="utf-8") as f:
return f.read()
def get(self, key: str, default: Optional[Any] = None) -> Any:
"""
Retrieve the content of a file in the database, or return a default value if not found.
Parameters
----------
key : str
The key (filename) whose content is to be retrieved.
default : Any, optional
The default value to return if the file does not exist. Default is None.
Returns
-------
Any
The content of the file if it exists, a new DiskMemory instance if the key corresponds to a directory.
"""
item_path = self.path / key
try:
if item_path.is_file():
return self[key]
elif item_path.is_dir():
return DiskMemory(item_path)
else:
return default
except:
return default
def __setitem__(self, key: Union[str, Path], val: str) -> None:
"""
Set or update the content of a file in the database corresponding to the given key.
Parameters
----------
key : str or Path
The key (filename) where the content is to be set.
val : str
The content to be written to the file.
Raises
------
ValueError
If the key attempts to access a parent path.
TypeError
If the value is not a string.
"""
if str(key).startswith("../"):
raise ValueError(f"File name {key} attempted to access parent path.")
if not isinstance(val, str):
raise TypeError("val must be str")
full_path = self.path / key
full_path.parent.mkdir(parents=True, exist_ok=True)
full_path.write_text(val, encoding="utf-8")
def __delitem__(self, key: Union[str, Path]) -> None:
"""
Delete a file or directory from the database corresponding to the given key.
Parameters
----------
key : str or Path
The key (filename or directory name) to be deleted.
Raises
------
KeyError
If the file or directory corresponding to the key does not exist in the database.
"""
item_path = self.path / key
if not item_path.exists():
raise KeyError(f"Item '{key}' could not be found in '{self.path}'")
if item_path.is_file():
item_path.unlink()
elif item_path.is_dir():
shutil.rmtree(item_path)
def __iter__(self) -> Iterator[str]:
"""
Iterate over the keys (filenames) in the database.
Yields
------
Iterator[str]
An iterator over the sorted list of keys (filenames) in the database.
"""
return iter(
sorted(
str(item.relative_to(self.path))
for item in sorted(self.path.rglob("*"))
if item.is_file()
)
)
def __len__(self) -> int:
"""
Get the number of files in the database.
Returns
-------
int
The number of files in the database.
"""
return len(list(self.__iter__()))
def _supported_files(self) -> str:
valid_extensions = {
ext for lang in SUPPORTED_LANGUAGES for ext in lang["extensions"]
}
file_paths = [
str(item)
for item in self
if Path(item).is_file() and Path(item).suffix in valid_extensions
]
return "\n".join(file_paths)
def _all_files(self) -> str:
file_paths = [str(item) for item in self if Path(item).is_file()]
return "\n".join(file_paths)
def to_path_list_string(self, supported_code_files_only: bool = False) -> str:
"""
Generate a string representation of the file paths in the database.
Parameters
----------
supported_code_files_only : bool, optional
If True, filter the list to include only supported code file extensions.
Default is False.
Returns
-------
str
A newline-separated string of file paths.
"""
if supported_code_files_only:
return self._supported_files()
else:
return self._all_files()
def to_dict(self) -> Dict[Union[str, Path], str]:
"""
Convert the database contents to a dictionary.
Returns
-------
Dict[Union[str, Path], str]
A dictionary with keys as filenames and values as file contents.
"""
return {file_path: self[file_path] for file_path in self}
def to_json(self) -> str:
"""
Serialize the database contents to a JSON string.
Returns
-------
str
A JSON string representation of the database contents.
"""
return json.dumps(self.to_dict())
def log(self, key: Union[str, Path], val: str) -> None:
"""
Append to a file or create and write to it if it doesn't exist.
Parameters
----------
key : str or Path
The key (filename) where the content is to be appended.
val : str
The content to be appended to the file.
"""
if str(key).startswith("../"):
raise ValueError(f"File name {key} attempted to access parent path.")
if not isinstance(val, str):
raise TypeError("val must be str")
full_path = self.path / "logs" / key
full_path.parent.mkdir(parents=True, exist_ok=True)
# Touch if it doesnt exist
if not full_path.exists():
full_path.touch()
with open(full_path, "a", encoding="utf-8") as file:
file.write(f"\n{datetime.now().isoformat()}\n")
file.write(val + "\n")
def archive_logs(self):
"""
Moves all logs to archive directory based on current timestamp
"""
if "logs" in self:
archive_dir = (
self.path / f"logs_{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}"
)
shutil.move(self.path / "logs", archive_dir)
| python | MIT | a90fcd543eedcc0ff2c34561bc0785d2ba83c47e | 2026-01-04T14:39:15.137338Z | false |
AntonOsika/gpt-engineer | https://github.com/AntonOsika/gpt-engineer/blob/a90fcd543eedcc0ff2c34561bc0785d2ba83c47e/gpt_engineer/core/default/constants.py | gpt_engineer/core/default/constants.py | """
Module defining constants used throughout the application.
This module contains definitions of constants that are used across various
components of the application to maintain consistency and ease of configuration.
Constants
---------
MAX_EDIT_REFINEMENT_STEPS : int
The maximum number of refinement steps allowed when generating edit blocks.
"""
MAX_EDIT_REFINEMENT_STEPS = 2
| python | MIT | a90fcd543eedcc0ff2c34561bc0785d2ba83c47e | 2026-01-04T14:39:15.137338Z | false |
AntonOsika/gpt-engineer | https://github.com/AntonOsika/gpt-engineer/blob/a90fcd543eedcc0ff2c34561bc0785d2ba83c47e/gpt_engineer/core/default/disk_execution_env.py | gpt_engineer/core/default/disk_execution_env.py | """
Module for managing the execution environment on the local disk.
This module provides a class that handles the execution of code stored on the local
file system. It includes methods for uploading files to the execution environment,
running commands, and capturing the output.
Classes
-------
DiskExecutionEnv
An execution environment that runs code on the local file system and captures
the output of the execution.
Imports
-------
- subprocess: For running shell commands.
- time: For timing the execution of commands.
- Path: For handling file system paths.
- Optional, Tuple, Union: For type annotations.
- BaseExecutionEnv: For inheriting the base execution environment interface.
- FileStore: For managing file storage.
- FilesDict: For handling collections of files.
"""
import subprocess
import time
from pathlib import Path
from typing import Optional, Tuple, Union
from gpt_engineer.core.base_execution_env import BaseExecutionEnv
from gpt_engineer.core.default.file_store import FileStore
from gpt_engineer.core.files_dict import FilesDict
class DiskExecutionEnv(BaseExecutionEnv):
"""
An execution environment that runs code on the local file system and captures
the output of the execution.
This class is responsible for executing code that is stored on disk. It ensures that
the necessary entrypoint file exists and then runs the code using a subprocess. If the
execution is interrupted by the user, it handles the interruption gracefully.
Attributes
----------
store : FileStore
An instance of FileStore that manages the storage of files in the execution
environment.
"""
def __init__(self, path: Union[str, Path, None] = None):
self.files = FileStore(path)
def upload(self, files: FilesDict) -> "DiskExecutionEnv":
self.files.push(files)
return self
def download(self) -> FilesDict:
return self.files.pull()
def popen(self, command: str) -> subprocess.Popen:
p = subprocess.Popen(
command,
shell=True,
cwd=self.files.working_dir,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return p
def run(self, command: str, timeout: Optional[int] = None) -> Tuple[str, str, int]:
start = time.time()
print("\n--- Start of run ---")
# while running, also print the stdout and stderr
p = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.files.working_dir,
text=True,
shell=True,
)
print("$", command)
stdout_full, stderr_full = "", ""
try:
while p.poll() is None:
assert p.stdout is not None
assert p.stderr is not None
stdout = p.stdout.readline()
stderr = p.stderr.readline()
if stdout:
print(stdout, end="")
stdout_full += stdout
if stderr:
print(stderr, end="")
stderr_full += stderr
if timeout and time.time() - start > timeout:
print("Timeout!")
p.kill()
raise TimeoutError()
except KeyboardInterrupt:
print()
print("Stopping execution.")
print("Execution stopped.")
p.kill()
print()
print("--- Finished run ---\n")
return stdout_full, stderr_full, p.returncode
| python | MIT | a90fcd543eedcc0ff2c34561bc0785d2ba83c47e | 2026-01-04T14:39:15.137338Z | false |
AntonOsika/gpt-engineer | https://github.com/AntonOsika/gpt-engineer/blob/a90fcd543eedcc0ff2c34561bc0785d2ba83c47e/gpt_engineer/core/default/__init__.py | gpt_engineer/core/default/__init__.py | python | MIT | a90fcd543eedcc0ff2c34561bc0785d2ba83c47e | 2026-01-04T14:39:15.137338Z | false | |
AntonOsika/gpt-engineer | https://github.com/AntonOsika/gpt-engineer/blob/a90fcd543eedcc0ff2c34561bc0785d2ba83c47e/gpt_engineer/core/default/simple_agent.py | gpt_engineer/core/default/simple_agent.py | """
Module for defining a simple agent that uses AI to manage code generation and improvement.
This module provides a class that represents an agent capable of initializing and improving
a codebase using AI. It handles interactions with the AI model, memory, and execution
environment to generate and refine code based on user prompts.
"""
import tempfile
from typing import Optional
from gpt_engineer.core.ai import AI
from gpt_engineer.core.base_agent import BaseAgent
from gpt_engineer.core.base_execution_env import BaseExecutionEnv
from gpt_engineer.core.base_memory import BaseMemory
from gpt_engineer.core.default.disk_execution_env import DiskExecutionEnv
from gpt_engineer.core.default.disk_memory import DiskMemory
from gpt_engineer.core.default.paths import PREPROMPTS_PATH, memory_path
from gpt_engineer.core.default.steps import gen_code, gen_entrypoint, improve_fn
from gpt_engineer.core.files_dict import FilesDict
from gpt_engineer.core.preprompts_holder import PrepromptsHolder
from gpt_engineer.core.prompt import Prompt
class SimpleAgent(BaseAgent):
"""
An agent that uses AI to generate and improve code based on a given prompt.
This agent is capable of initializing a codebase from a prompt and improving an existing
codebase based on user input. It uses an AI model to generate and refine code, and it
interacts with a repository and an execution environment to manage and execute the code.
Attributes
----------
memory : BaseMemory
The memory interface where the code and related data are stored.
execution_env : BaseExecutionEnv
The execution environment in which the code is executed.
ai : AI
The AI model used for generating and improving code.
preprompts_holder : PrepromptsHolder
The holder for preprompt messages that guide the AI model.
"""
def __init__(
self,
memory: BaseMemory,
execution_env: BaseExecutionEnv,
ai: AI = None,
preprompts_holder: PrepromptsHolder = None,
):
self.preprompts_holder = preprompts_holder or PrepromptsHolder(PREPROMPTS_PATH)
self.memory = memory
self.execution_env = execution_env
self.ai = ai or AI()
@classmethod
def with_default_config(
cls, path: str, ai: AI = None, preprompts_holder: PrepromptsHolder = None
):
return cls(
memory=DiskMemory(memory_path(path)),
execution_env=DiskExecutionEnv(),
ai=ai,
preprompts_holder=preprompts_holder or PrepromptsHolder(PREPROMPTS_PATH),
)
def init(self, prompt: Prompt) -> FilesDict:
files_dict = gen_code(self.ai, prompt, self.memory, self.preprompts_holder)
entrypoint = gen_entrypoint(
self.ai, prompt, files_dict, self.memory, self.preprompts_holder
)
combined_dict = {**files_dict, **entrypoint}
files_dict = FilesDict(combined_dict)
return files_dict
def improve(
self,
files_dict: FilesDict,
prompt: Prompt,
execution_command: Optional[str] = None,
) -> FilesDict:
files_dict = improve_fn(
self.ai, prompt, files_dict, self.memory, self.preprompts_holder
)
return files_dict
def default_config_agent():
"""
Creates an instance of SimpleAgent with default configuration.
Returns
-------
SimpleAgent
An instance of SimpleAgent with a temporary directory as its base path.
"""
return SimpleAgent.with_default_config(tempfile.mkdtemp())
| python | MIT | a90fcd543eedcc0ff2c34561bc0785d2ba83c47e | 2026-01-04T14:39:15.137338Z | false |
AntonOsika/gpt-engineer | https://github.com/AntonOsika/gpt-engineer/blob/a90fcd543eedcc0ff2c34561bc0785d2ba83c47e/gpt_engineer/core/default/file_store.py | gpt_engineer/core/default/file_store.py | import tempfile
from pathlib import Path
from typing import Union
from gpt_engineer.core.files_dict import FilesDict
from gpt_engineer.core.linting import Linting
class FileStore:
"""
Module for managing file storage in a temporary directory.
This module provides a class that manages the storage of files in a temporary directory.
It includes methods for uploading files to the directory and downloading them as a
collection of files.
Classes
-------
FileStore
Manages file storage in a temporary directory, allowing for upload and download of files.
Imports
-------
- tempfile: For creating temporary directories.
- Path: For handling file system paths.
- Union: For type annotations.
- FilesDict: For handling collections of files.
"""
def __init__(self, path: Union[str, Path, None] = None):
if path is None:
path = Path(tempfile.mkdtemp(prefix="gpt-engineer-"))
self.working_dir = Path(path)
self.working_dir.mkdir(parents=True, exist_ok=True)
self.id = self.working_dir.name.split("-")[-1]
def push(self, files: FilesDict):
for name, content in files.items():
path = self.working_dir / name
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, "w") as f:
f.write(content)
return self
def linting(self, files: FilesDict) -> FilesDict:
# lint the code
linting = Linting()
return linting.lint_files(files)
def pull(self) -> FilesDict:
files = {}
for path in self.working_dir.glob("**/*"):
if path.is_file():
with open(path, "r") as f:
try:
content = f.read()
except UnicodeDecodeError:
content = "binary file"
files[str(path.relative_to(self.working_dir))] = content
return FilesDict(files)
| python | MIT | a90fcd543eedcc0ff2c34561bc0785d2ba83c47e | 2026-01-04T14:39:15.137338Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/.github/scripts/label_title_regex.py | .github/scripts/label_title_regex.py | """Labels PRs based on title. Must be run in a github action with the
pull_request_target event."""
import json
import os
import re
from github import Github
context_dict = json.loads(os.getenv("CONTEXT_GITHUB"))
repo = context_dict["repository"]
g = Github(context_dict["token"])
repo = g.get_repo(repo)
pr_number = context_dict["event"]["number"]
issue = repo.get_issue(number=pr_number)
title = issue.title
regex_to_labels = [(r"\bDOC\b", "Documentation"), (r"\bCI\b", "Build / CI")]
labels_to_add = [label for regex, label in regex_to_labels if re.search(regex, title)]
if labels_to_add:
issue.add_to_labels(*labels_to_add)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_glm.py | benchmarks/bench_glm.py | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
if __name__ == "__main__":
import matplotlib.pyplot as plt
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print("Iteration %s of %s" % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.0)
ridge.fit(X, Y)
time_ridge[i] = (datetime.now() - start).total_seconds()
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = (datetime.now() - start).total_seconds()
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = (datetime.now() - start).total_seconds()
plt.figure("scikit-learn GLM benchmark results")
plt.xlabel("Dimensions")
plt.ylabel("Time (s)")
plt.plot(dimensions, time_ridge, color="r")
plt.plot(dimensions, time_ols, color="g")
plt.plot(dimensions, time_lasso, color="b")
plt.legend(["Ridge", "OLS", "LassoLars"], loc="upper left")
plt.axis("tight")
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_multilabel_metrics.py | benchmarks/bench_multilabel_metrics.py | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
import argparse
import itertools
import sys
from functools import partial
from timeit import timeit
import matplotlib.pyplot as plt
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (
accuracy_score,
f1_score,
hamming_loss,
jaccard_similarity_score,
)
from sklearn.utils._testing import ignore_warnings
METRICS = {
"f1": partial(f1_score, average="micro"),
"f1-by-sample": partial(f1_score, average="samples"),
"accuracy": accuracy_score,
"hamming": hamming_loss,
"jaccard": jaccard_similarity_score,
}
FORMATS = {
"sequences": lambda y: [list(np.flatnonzero(s)) for s in y],
"dense": lambda y: y,
"csr": sp.csr_matrix,
"csc": sp.csc_matrix,
}
@ignore_warnings
def benchmark(
metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000,
classes=4,
density=0.2,
n_times=5,
):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros(
(len(metrics), len(formats), len(samples), len(classes), len(density)),
dtype=float,
)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(
n_samples=s, n_features=1, n_classes=c, n_labels=d * c, random_state=42
)
_, y_pred = make_multilabel_classification(
n_samples=s, n_features=1, n_classes=c, n_labels=d * c, random_state=84
)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = "{:<{fw}s}" + "{:>{cw}s}" * len(formats)
row_fmt = "{:<{fw}s}" + "{:>{cw}.3f}" * len(formats)
print(head_fmt.format("Metric", *formats, cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row, cw=column_width, fw=first_width))
def _plot(
results,
metrics,
formats,
title,
x_ticks,
x_label,
format_markers=("x", "|", "o", "+"),
metric_colors=("c", "m", "y", "k", "g", "r", "b"),
):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure("scikit-learn multilabel metrics benchmarks")
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(
x_ticks,
results[i, j].flat,
label="{}, {}".format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)],
)
ax.set_xlabel(x_label)
ax.set_ylabel("Time (s)")
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument(
"metrics",
nargs="*",
default=sorted(METRICS),
help="Specifies metrics to benchmark, defaults to all. Choices are: {}".format(
sorted(METRICS)
),
)
ap.add_argument(
"--formats",
nargs="+",
choices=sorted(FORMATS),
help="Specifies multilabel formats to benchmark (defaults to all).",
)
ap.add_argument(
"--samples", type=int, default=1000, help="The number of samples to generate"
)
ap.add_argument("--classes", type=int, default=10, help="The number of classes")
ap.add_argument(
"--density",
type=float,
default=0.2,
help="The average density of labels per sample",
)
ap.add_argument(
"--plot",
choices=["classes", "density", "samples"],
default=None,
help=(
"Plot time with respect to this parameter varying up to the specified value"
),
)
ap.add_argument(
"--n-steps", default=10, type=int, help="Plot this many points for each metric"
)
ap.add_argument(
"--n-times", default=5, type=int, help="Time performance over n_times trials"
)
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ("classes", "samples"):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ("classes", "samples"):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark(
[METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples,
args.classes,
args.density,
args.n_times,
)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print("Displaying plot", file=sys.stderr)
title = "Multilabel metrics with %s" % ", ".join(
"{0}={1}".format(field, getattr(args, field))
for field in ["samples", "classes", "density"]
if args.plot != field
)
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_hist_gradient_boosting_higgsboson.py | benchmarks/bench_hist_gradient_boosting_higgsboson.py | import argparse
import os
from gzip import GzipFile
from time import time
from urllib.request import urlretrieve
import numpy as np
import pandas as pd
from joblib import Memory
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import train_test_split
parser = argparse.ArgumentParser()
parser.add_argument("--n-leaf-nodes", type=int, default=31)
parser.add_argument("--n-trees", type=int, default=10)
parser.add_argument("--lightgbm", action="store_true", default=False)
parser.add_argument("--xgboost", action="store_true", default=False)
parser.add_argument("--catboost", action="store_true", default=False)
parser.add_argument("--learning-rate", type=float, default=1.0)
parser.add_argument("--subsample", type=int, default=None)
parser.add_argument("--max-bins", type=int, default=255)
parser.add_argument("--no-predict", action="store_true", default=False)
parser.add_argument("--cache-loc", type=str, default="/tmp")
parser.add_argument("--no-interactions", type=bool, default=False)
parser.add_argument("--max-features", type=float, default=1.0)
args = parser.parse_args()
HERE = os.path.dirname(__file__)
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz"
m = Memory(location=args.cache_loc, mmap_mode="r")
n_leaf_nodes = args.n_leaf_nodes
n_trees = args.n_trees
subsample = args.subsample
lr = args.learning_rate
max_bins = args.max_bins
max_features = args.max_features
@m.cache
def load_data():
filename = os.path.join(HERE, URL.rsplit("/", 1)[-1])
if not os.path.exists(filename):
print(f"Downloading {URL} to {filename} (2.6 GB)...")
urlretrieve(URL, filename)
print("done.")
print(f"Parsing {filename}...")
tic = time()
with GzipFile(filename) as f:
df = pd.read_csv(f, header=None, dtype=np.float32)
toc = time()
print(f"Loaded {df.values.nbytes / 1e9:0.3f} GB in {toc - tic:0.3f}s")
return df
def fit(est, data_train, target_train, libname):
print(f"Fitting a {libname} model...")
tic = time()
est.fit(data_train, target_train)
toc = time()
print(f"fitted in {toc - tic:.3f}s")
def predict(est, data_test, target_test):
if args.no_predict:
return
tic = time()
predicted_test = est.predict(data_test)
predicted_proba_test = est.predict_proba(data_test)
toc = time()
roc_auc = roc_auc_score(target_test, predicted_proba_test[:, 1])
acc = accuracy_score(target_test, predicted_test)
print(f"predicted in {toc - tic:.3f}s, ROC AUC: {roc_auc:.4f}, ACC: {acc:.4f}")
df = load_data()
target = df.values[:, 0]
data = np.ascontiguousarray(df.values[:, 1:])
data_train, data_test, target_train, target_test = train_test_split(
data, target, test_size=0.2, random_state=0
)
n_classes = len(np.unique(target))
if subsample is not None:
data_train, target_train = data_train[:subsample], target_train[:subsample]
n_samples, n_features = data_train.shape
print(f"Training set with {n_samples} records with {n_features} features.")
if args.no_interactions:
interaction_cst = [[i] for i in range(n_features)]
else:
interaction_cst = None
est = HistGradientBoostingClassifier(
loss="log_loss",
learning_rate=lr,
max_iter=n_trees,
max_bins=max_bins,
max_leaf_nodes=n_leaf_nodes,
early_stopping=False,
random_state=0,
verbose=1,
interaction_cst=interaction_cst,
max_features=max_features,
)
fit(est, data_train, target_train, "sklearn")
predict(est, data_test, target_test)
if args.lightgbm:
est = get_equivalent_estimator(est, lib="lightgbm", n_classes=n_classes)
fit(est, data_train, target_train, "lightgbm")
predict(est, data_test, target_test)
if args.xgboost:
est = get_equivalent_estimator(est, lib="xgboost", n_classes=n_classes)
fit(est, data_train, target_train, "xgboost")
predict(est, data_test, target_test)
if args.catboost:
est = get_equivalent_estimator(est, lib="catboost", n_classes=n_classes)
fit(est, data_train, target_train, "catboost")
predict(est, data_test, target_test)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_isotonic.py | benchmarks/bench_isotonic.py | """
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This allows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
import argparse
import gc
from timeit import default_timer
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import expit
from sklearn.isotonic import isotonic_regression
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=size) + 50.0 * np.log(1 + np.arange(size))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < expit(X)
def generate_pathological_dataset(size):
# Triggers O(n^2) complexity on the original implementation.
return np.r_[
np.arange(size), np.arange(-(size - 1), size), np.arange(-(size - 1), 1)
]
DATASET_GENERATORS = {
"perturbed_logarithm": generate_perturbed_logarithm_dataset,
"logistic": generate_logistic_dataset,
"pathological": generate_pathological_dataset,
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = default_timer()
isotonic_regression(Y)
return default_timer() - tstart
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Isotonic Regression benchmark tool")
parser.add_argument("--seed", type=int, help="RNG seed")
parser.add_argument(
"--iterations",
type=int,
required=True,
help="Number of iterations to average timings over for each problem size",
)
parser.add_argument(
"--log_min_problem_size",
type=int,
required=True,
help="Base 10 logarithm of the minimum problem size",
)
parser.add_argument(
"--log_max_problem_size",
type=int,
required=True,
help="Base 10 logarithm of the maximum problem size",
)
parser.add_argument(
"--show_plot", action="store_true", help="Plot timing output with matplotlib"
)
parser.add_argument("--dataset", choices=DATASET_GENERATORS.keys(), required=True)
args = parser.parse_args()
np.random.seed(args.seed)
timings = []
for exponent in range(args.log_min_problem_size, args.log_max_problem_size):
n = 10**exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = [
bench_isotonic_regression(Y) for i in range(args.iterations)
]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel("Number of observations")
plt.ylabel("Time (s)")
plt.axis("tight")
plt.loglog()
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_plot_omp_lars.py | benchmarks/bench_plot_omp_lars.py | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
import gc
import sys
from time import time
import numpy as np
from sklearn.datasets import make_sparse_coded_signal
from sklearn.linear_model import lars_path, lars_path_gram, orthogonal_mp
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features // 10
print("====================")
print("Iteration %03d of %03d" % (it, max_it))
print("====================")
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
"n_samples": 1,
"n_components": n_features,
"n_features": n_samples,
"n_nonzero_coefs": n_informative,
"random_state": 0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X.T)
gc.collect()
print("benchmarking lars_path (with Gram):", end="")
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path_gram(Xy=Xy, Gram=G, n_samples=y.size, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end="")
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end="")
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True, n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end="")
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False, n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results["time(LARS) / time(OMP)\n (w/ Gram)"] = lars_gram / omp_gram
results["time(LARS) / time(OMP)\n (w/o Gram)"] = lars / omp
return results
if __name__ == "__main__":
samples_range = np.linspace(1000, 5000, 5).astype(int)
features_range = np.linspace(1000, 5000, 5).astype(int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import matplotlib.pyplot as plt
fig = plt.figure("scikit-learn OMP vs. LARS benchmark results")
for i, (label, timings) in enumerate(sorted(results.items())):
ax = fig.add_subplot(1, 2, i + 1)
vmax = max(1 - timings.min(), -1 + timings.max())
plt.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([""] + [str(each) for each in samples_range])
ax.set_yticklabels([""] + [str(each) for each in features_range])
plt.xlabel("n_samples")
plt.ylabel("n_features")
plt.title(label)
plt.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = plt.axes([0.1, 0.08, 0.8, 0.06])
plt.colorbar(cax=ax, orientation="horizontal")
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_plot_nmf.py | benchmarks/bench_plot_nmf.py | """
Benchmarks of Non-Negative Matrix Factorization
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import numbers
import sys
import warnings
from time import time
import matplotlib.pyplot as plt
import numpy as np
import pandas
from joblib import Memory
from sklearn.decomposition import NMF
from sklearn.decomposition._nmf import _beta_divergence, _check_init, _initialize_nmf
from sklearn.exceptions import ConvergenceWarning
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.utils import check_array
from sklearn.utils._testing import ignore_warnings
from sklearn.utils.extmath import safe_sparse_dot, squared_norm
from sklearn.utils.validation import check_is_fitted, check_non_negative
mem = Memory(cachedir=".", verbose=0)
###################
# Start of _PGNMF #
###################
# This class implements a projected gradient solver for the NMF.
# The projected gradient solver was removed from scikit-learn in version 0.19,
# and a simplified copy is used here for comparison purpose only.
# It is not tested, and it may change or disappear without notice.
def _norm(x):
"""Dot product-based Euclidean norm implementation
See: https://fa.bianp.net/blog/2011/computing-the-vector-norm/
"""
return np.sqrt(squared_norm(x))
def _nls_subproblem(
X, W, H, tol, max_iter, alpha=0.0, l1_ratio=0.0, sigma=0.01, beta=0.1
):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
https://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtX = safe_sparse_dot(W.T, X)
WtW = np.dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtX
if alpha > 0 and l1_ratio == 1.0:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if _norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.", ConvergenceWarning)
return H, grad, n_iter
def _fit_projected_gradient(X, W, H, tol, max_iter, nls_max_iter, alpha, l1_ratio):
gradW = np.dot(W, np.dot(H, H.T)) - safe_sparse_dot(X, H.T, dense_output=True)
gradH = np.dot(np.dot(W.T, W), H) - safe_sparse_dot(W.T, X, dense_output=True)
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol**2:
break
# update W
Wt, gradWt, iterW = _nls_subproblem(
X.T, H.T, W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio
)
W, gradW = Wt.T, gradWt.T
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _nls_subproblem(
X, W, H, tolH, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio
)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
Wt, _, _ = _nls_subproblem(
X.T, H.T, W.T, tolW, nls_max_iter, alpha=alpha, l1_ratio=l1_ratio
)
W = Wt.T
return W, H, n_iter
class _PGNMF(NMF):
"""Non-Negative Matrix Factorization (NMF) with projected gradient solver.
This class is private and for comparison purpose only.
It may change or disappear without notice.
"""
def __init__(
self,
n_components=None,
solver="pg",
init=None,
tol=1e-4,
max_iter=200,
random_state=None,
alpha=0.0,
l1_ratio=0.0,
nls_max_iter=10,
):
super().__init__(
n_components=n_components,
init=init,
solver=solver,
tol=tol,
max_iter=max_iter,
random_state=random_state,
alpha_W=alpha,
alpha_H=alpha,
l1_ratio=l1_ratio,
)
self.nls_max_iter = nls_max_iter
def fit(self, X, y=None, **params):
self.fit_transform(X, **params)
return self
def transform(self, X):
check_is_fitted(self)
H = self.components_
W, _, self.n_iter_ = self._fit_transform(X, H=H, update_H=False)
return W
def inverse_transform(self, W):
check_is_fitted(self)
return np.dot(W, self.components_)
def fit_transform(self, X, y=None, W=None, H=None):
W, H, self.n_iter = self._fit_transform(X, W=W, H=H, update_H=True)
self.components_ = H
return W
def _fit_transform(self, X, y=None, W=None, H=None, update_H=True):
X = check_array(X, accept_sparse=("csr", "csc"))
check_non_negative(X, "NMF (input X)")
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
if not isinstance(n_components, numbers.Integral) or n_components <= 0:
raise ValueError(
"Number of components must be a positive integer; got (n_components=%r)"
% n_components
)
if not isinstance(self.max_iter, numbers.Integral) or self.max_iter < 0:
raise ValueError(
"Maximum number of iterations must be a positive "
"integer; got (max_iter=%r)" % self.max_iter
)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError(
"Tolerance for stopping criteria must be positive; got (tol=%r)"
% self.tol
)
# check W and H, or initialize them
if self.init == "custom" and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(
X, n_components, init=self.init, random_state=self.random_state
)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(
X,
W,
H,
self.tol,
self.max_iter,
self.nls_max_iter,
self.alpha,
self.l1_ratio,
)
else: # transform
Wt, _, n_iter = _nls_subproblem(
X.T,
H.T,
W.T,
self.tol,
self.nls_max_iter,
alpha=self.alpha,
l1_ratio=self.l1_ratio,
)
W = Wt.T
if n_iter == self.max_iter and self.tol > 0:
warnings.warn(
"Maximum number of iteration %d reached. Increase it"
" to improve convergence." % self.max_iter,
ConvergenceWarning,
)
return W, H, n_iter
#################
# End of _PGNMF #
#################
def plot_results(results_df, plot_name):
if results_df is None:
return None
plt.figure(figsize=(16, 6))
colors = "bgr"
markers = "ovs"
ax = plt.subplot(1, 3, 1)
for i, init in enumerate(np.unique(results_df["init"])):
plt.subplot(1, 3, i + 1, sharex=ax, sharey=ax)
for j, method in enumerate(np.unique(results_df["method"])):
mask = np.logical_and(
results_df["init"] == init, results_df["method"] == method
)
selected_items = results_df[mask]
plt.plot(
selected_items["time"],
selected_items["loss"],
color=colors[j % len(colors)],
ls="-",
marker=markers[j % len(markers)],
label=method,
)
plt.legend(loc=0, fontsize="x-small")
plt.xlabel("Time (s)")
plt.ylabel("loss")
plt.title("%s" % init)
plt.suptitle(plot_name, fontsize=16)
@ignore_warnings(category=ConvergenceWarning)
# use joblib to cache the results.
# X_shape is specified in arguments for avoiding hashing X
@mem.cache(ignore=["X", "W0", "H0"])
def bench_one(
name, X, W0, H0, X_shape, clf_type, clf_params, init, n_components, random_state
):
W = W0.copy()
H = H0.copy()
clf = clf_type(**clf_params)
st = time()
W = clf.fit_transform(X, W=W, H=H)
end = time()
H = clf.components_
this_loss = _beta_divergence(X, W, H, 2.0, True)
duration = end - st
return this_loss, duration
def run_bench(X, clfs, plot_name, n_components, tol, alpha, l1_ratio):
start = time()
results = []
for name, clf_type, iter_range, clf_params in clfs:
print("Training %s:" % name)
for rs, init in enumerate(("nndsvd", "nndsvdar", "random")):
print(" %s %s: " % (init, " " * (8 - len(init))), end="")
W, H = _initialize_nmf(X, n_components, init, 1e-6, rs)
for max_iter in iter_range:
clf_params["alpha"] = alpha
clf_params["l1_ratio"] = l1_ratio
clf_params["max_iter"] = max_iter
clf_params["tol"] = tol
clf_params["random_state"] = rs
clf_params["init"] = "custom"
clf_params["n_components"] = n_components
this_loss, duration = bench_one(
name, X, W, H, X.shape, clf_type, clf_params, init, n_components, rs
)
init_name = "init='%s'" % init
results.append((name, this_loss, duration, init_name))
# print("loss: %.6f, time: %.3f sec" % (this_loss, duration))
print(".", end="")
sys.stdout.flush()
print(" ")
# Use a panda dataframe to organize the results
results_df = pandas.DataFrame(results, columns="method loss time init".split())
print("Total time = %0.3f sec\n" % (time() - start))
# plot the results
plot_results(results_df, plot_name)
return results_df
def load_20news():
print("Loading 20 newsgroups dataset")
print("-----------------------------")
from sklearn.datasets import fetch_20newsgroups
dataset = fetch_20newsgroups(
shuffle=True, random_state=1, remove=("headers", "footers", "quotes")
)
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, stop_words="english")
tfidf = vectorizer.fit_transform(dataset.data)
return tfidf
def load_faces():
print("Loading Olivetti face dataset")
print("-----------------------------")
from sklearn.datasets import fetch_olivetti_faces
faces = fetch_olivetti_faces(shuffle=True)
return faces.data
def build_clfs(cd_iters, pg_iters, mu_iters):
clfs = [
("Coordinate Descent", NMF, cd_iters, {"solver": "cd"}),
("Projected Gradient", _PGNMF, pg_iters, {"solver": "pg"}),
("Multiplicative Update", NMF, mu_iters, {"solver": "mu"}),
]
return clfs
if __name__ == "__main__":
alpha = 0.0
l1_ratio = 0.5
n_components = 10
tol = 1e-15
# first benchmark on 20 newsgroup dataset: sparse, shape(11314, 39116)
plot_name = "20 Newsgroups sparse dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 6)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_20news = load_20news()
run_bench(X_20news, clfs, plot_name, n_components, tol, alpha, l1_ratio)
# second benchmark on Olivetti faces dataset: dense, shape(400, 4096)
plot_name = "Olivetti Faces dense dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 12)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_faces = load_faces()
run_bench(
X_faces,
clfs,
plot_name,
n_components,
tol,
alpha,
l1_ratio,
)
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_hist_gradient_boosting_threading.py | benchmarks/bench_hist_gradient_boosting_threading.py | import argparse
import os
from pprint import pprint
from time import time
import numpy as np
from threadpoolctl import threadpool_limits
import sklearn
from sklearn.datasets import make_classification, make_regression
from sklearn.ensemble import (
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
)
from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator
from sklearn.model_selection import train_test_split
parser = argparse.ArgumentParser()
parser.add_argument("--n-leaf-nodes", type=int, default=31)
parser.add_argument("--n-trees", type=int, default=10)
parser.add_argument(
"--lightgbm", action="store_true", default=False, help="also benchmark lightgbm"
)
parser.add_argument(
"--xgboost", action="store_true", default=False, help="also benchmark xgboost"
)
parser.add_argument(
"--catboost", action="store_true", default=False, help="also benchmark catboost"
)
parser.add_argument("--learning-rate", type=float, default=0.1)
parser.add_argument(
"--problem",
type=str,
default="classification",
choices=["classification", "regression"],
)
parser.add_argument("--loss", type=str, default="default")
parser.add_argument("--missing-fraction", type=float, default=0)
parser.add_argument("--n-classes", type=int, default=2)
parser.add_argument("--n-samples", type=int, default=int(1e6))
parser.add_argument("--n-features", type=int, default=100)
parser.add_argument("--max-bins", type=int, default=255)
parser.add_argument("--print-params", action="store_true", default=False)
parser.add_argument(
"--random-sample-weights",
action="store_true",
default=False,
help="generate and use random sample weights",
)
parser.add_argument(
"--plot", action="store_true", default=False, help="show a plot results"
)
parser.add_argument(
"--plot-filename", default=None, help="filename to save the figure to disk"
)
args = parser.parse_args()
n_samples = args.n_samples
n_leaf_nodes = args.n_leaf_nodes
n_trees = args.n_trees
lr = args.learning_rate
max_bins = args.max_bins
print("Data size: %d samples train, %d samples test." % (n_samples, n_samples))
print(f"n_features: {args.n_features}")
def get_estimator_and_data():
if args.problem == "classification":
X, y = make_classification(
args.n_samples * 2,
n_features=args.n_features,
n_classes=args.n_classes,
n_clusters_per_class=1,
n_informative=args.n_features // 2,
random_state=0,
)
return X, y, HistGradientBoostingClassifier
elif args.problem == "regression":
X, y = make_regression(
args.n_samples_max * 2, n_features=args.n_features, random_state=0
)
return X, y, HistGradientBoostingRegressor
X, y, Estimator = get_estimator_and_data()
if args.missing_fraction:
mask = np.random.binomial(1, args.missing_fraction, size=X.shape).astype(bool)
X[mask] = np.nan
if args.random_sample_weights:
sample_weight = np.random.rand(len(X)) * 10
else:
sample_weight = None
if sample_weight is not None:
(X_train_, X_test_, y_train_, y_test_, sample_weight_train_, _) = train_test_split(
X, y, sample_weight, test_size=0.5, random_state=0
)
else:
X_train_, X_test_, y_train_, y_test_ = train_test_split(
X, y, test_size=0.5, random_state=0
)
sample_weight_train_ = None
sklearn_est = Estimator(
learning_rate=lr,
max_iter=n_trees,
max_bins=max_bins,
max_leaf_nodes=n_leaf_nodes,
early_stopping=False,
random_state=0,
verbose=0,
)
loss = args.loss
if args.problem == "classification":
if loss == "default":
# loss='auto' does not work with get_equivalent_estimator()
loss = "log_loss"
else:
# regression
if loss == "default":
loss = "squared_error"
sklearn_est.set_params(loss=loss)
if args.print_params:
print("scikit-learn")
pprint(sklearn_est.get_params())
for libname in ["lightgbm", "xgboost", "catboost"]:
if getattr(args, libname):
print(libname)
est = get_equivalent_estimator(
sklearn_est, lib=libname, n_classes=args.n_classes
)
pprint(est.get_params())
def one_run(n_threads, n_samples):
X_train = X_train_[:n_samples]
X_test = X_test_[:n_samples]
y_train = y_train_[:n_samples]
y_test = y_test_[:n_samples]
if sample_weight is not None:
sample_weight_train = sample_weight_train_[:n_samples]
else:
sample_weight_train = None
assert X_train.shape[0] == n_samples
assert X_test.shape[0] == n_samples
print("Fitting a sklearn model...")
tic = time()
est = sklearn.base.clone(sklearn_est)
with threadpool_limits(n_threads, user_api="openmp"):
est.fit(X_train, y_train, sample_weight=sample_weight_train)
sklearn_fit_duration = time() - tic
tic = time()
sklearn_score = est.score(X_test, y_test)
sklearn_score_duration = time() - tic
print("score: {:.4f}".format(sklearn_score))
print("fit duration: {:.3f}s,".format(sklearn_fit_duration))
print("score duration: {:.3f}s,".format(sklearn_score_duration))
lightgbm_score = None
lightgbm_fit_duration = None
lightgbm_score_duration = None
if args.lightgbm:
print("Fitting a LightGBM model...")
lightgbm_est = get_equivalent_estimator(
est, lib="lightgbm", n_classes=args.n_classes
)
lightgbm_est.set_params(num_threads=n_threads)
tic = time()
lightgbm_est.fit(X_train, y_train, sample_weight=sample_weight_train)
lightgbm_fit_duration = time() - tic
tic = time()
lightgbm_score = lightgbm_est.score(X_test, y_test)
lightgbm_score_duration = time() - tic
print("score: {:.4f}".format(lightgbm_score))
print("fit duration: {:.3f}s,".format(lightgbm_fit_duration))
print("score duration: {:.3f}s,".format(lightgbm_score_duration))
xgb_score = None
xgb_fit_duration = None
xgb_score_duration = None
if args.xgboost:
print("Fitting an XGBoost model...")
xgb_est = get_equivalent_estimator(est, lib="xgboost", n_classes=args.n_classes)
xgb_est.set_params(nthread=n_threads)
tic = time()
xgb_est.fit(X_train, y_train, sample_weight=sample_weight_train)
xgb_fit_duration = time() - tic
tic = time()
xgb_score = xgb_est.score(X_test, y_test)
xgb_score_duration = time() - tic
print("score: {:.4f}".format(xgb_score))
print("fit duration: {:.3f}s,".format(xgb_fit_duration))
print("score duration: {:.3f}s,".format(xgb_score_duration))
cat_score = None
cat_fit_duration = None
cat_score_duration = None
if args.catboost:
print("Fitting a CatBoost model...")
cat_est = get_equivalent_estimator(
est, lib="catboost", n_classes=args.n_classes
)
cat_est.set_params(thread_count=n_threads)
tic = time()
cat_est.fit(X_train, y_train, sample_weight=sample_weight_train)
cat_fit_duration = time() - tic
tic = time()
cat_score = cat_est.score(X_test, y_test)
cat_score_duration = time() - tic
print("score: {:.4f}".format(cat_score))
print("fit duration: {:.3f}s,".format(cat_fit_duration))
print("score duration: {:.3f}s,".format(cat_score_duration))
return (
sklearn_score,
sklearn_fit_duration,
sklearn_score_duration,
lightgbm_score,
lightgbm_fit_duration,
lightgbm_score_duration,
xgb_score,
xgb_fit_duration,
xgb_score_duration,
cat_score,
cat_fit_duration,
cat_score_duration,
)
max_threads = os.cpu_count()
n_threads_list = [2**i for i in range(8) if (2**i) < max_threads]
n_threads_list.append(max_threads)
sklearn_scores = []
sklearn_fit_durations = []
sklearn_score_durations = []
lightgbm_scores = []
lightgbm_fit_durations = []
lightgbm_score_durations = []
xgb_scores = []
xgb_fit_durations = []
xgb_score_durations = []
cat_scores = []
cat_fit_durations = []
cat_score_durations = []
for n_threads in n_threads_list:
print(f"n_threads: {n_threads}")
(
sklearn_score,
sklearn_fit_duration,
sklearn_score_duration,
lightgbm_score,
lightgbm_fit_duration,
lightgbm_score_duration,
xgb_score,
xgb_fit_duration,
xgb_score_duration,
cat_score,
cat_fit_duration,
cat_score_duration,
) = one_run(n_threads, n_samples)
for scores, score in (
(sklearn_scores, sklearn_score),
(sklearn_fit_durations, sklearn_fit_duration),
(sklearn_score_durations, sklearn_score_duration),
(lightgbm_scores, lightgbm_score),
(lightgbm_fit_durations, lightgbm_fit_duration),
(lightgbm_score_durations, lightgbm_score_duration),
(xgb_scores, xgb_score),
(xgb_fit_durations, xgb_fit_duration),
(xgb_score_durations, xgb_score_duration),
(cat_scores, cat_score),
(cat_fit_durations, cat_fit_duration),
(cat_score_durations, cat_score_duration),
):
scores.append(score)
if args.plot or args.plot_filename:
import matplotlib
import matplotlib.pyplot as plt
fig, axs = plt.subplots(2, figsize=(12, 12))
label = f"sklearn {sklearn.__version__}"
axs[0].plot(n_threads_list, sklearn_fit_durations, label=label)
axs[1].plot(n_threads_list, sklearn_score_durations, label=label)
if args.lightgbm:
import lightgbm
label = f"LightGBM {lightgbm.__version__}"
axs[0].plot(n_threads_list, lightgbm_fit_durations, label=label)
axs[1].plot(n_threads_list, lightgbm_score_durations, label=label)
if args.xgboost:
import xgboost
label = f"XGBoost {xgboost.__version__}"
axs[0].plot(n_threads_list, xgb_fit_durations, label=label)
axs[1].plot(n_threads_list, xgb_score_durations, label=label)
if args.catboost:
import catboost
label = f"CatBoost {catboost.__version__}"
axs[0].plot(n_threads_list, cat_fit_durations, label=label)
axs[1].plot(n_threads_list, cat_score_durations, label=label)
for ax in axs:
ax.set_xscale("log")
ax.set_xlabel("n_threads")
ax.set_ylabel("duration (s)")
ax.set_ylim(0, None)
ax.set_xticks(n_threads_list)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.legend(loc="best")
axs[0].set_title("fit duration (s)")
axs[1].set_title("score duration (s)")
title = args.problem
if args.problem == "classification":
title += " n_classes = {}".format(args.n_classes)
fig.suptitle(title)
plt.tight_layout()
if args.plot_filename:
plt.savefig(args.plot_filename)
if args.plot:
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_hist_gradient_boosting_categorical_only.py | benchmarks/bench_hist_gradient_boosting_categorical_only.py | import argparse
from time import time
from sklearn.datasets import make_classification
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator
from sklearn.preprocessing import KBinsDiscretizer
parser = argparse.ArgumentParser()
parser.add_argument("--n-leaf-nodes", type=int, default=31)
parser.add_argument("--n-trees", type=int, default=100)
parser.add_argument("--n-features", type=int, default=20)
parser.add_argument("--n-cats", type=int, default=20)
parser.add_argument("--n-samples", type=int, default=10_000)
parser.add_argument("--lightgbm", action="store_true", default=False)
parser.add_argument("--learning-rate", type=float, default=0.1)
parser.add_argument("--max-bins", type=int, default=255)
parser.add_argument("--no-predict", action="store_true", default=False)
parser.add_argument("--verbose", action="store_true", default=False)
args = parser.parse_args()
n_leaf_nodes = args.n_leaf_nodes
n_features = args.n_features
n_categories = args.n_cats
n_samples = args.n_samples
n_trees = args.n_trees
lr = args.learning_rate
max_bins = args.max_bins
verbose = args.verbose
def fit(est, data_train, target_train, libname, **fit_params):
print(f"Fitting a {libname} model...")
tic = time()
est.fit(data_train, target_train, **fit_params)
toc = time()
print(f"fitted in {toc - tic:.3f}s")
def predict(est, data_test):
# We don't report accuracy or ROC because the dataset doesn't really make
# sense: we treat ordered features as un-ordered categories.
if args.no_predict:
return
tic = time()
est.predict(data_test)
toc = time()
print(f"predicted in {toc - tic:.3f}s")
X, y = make_classification(n_samples=n_samples, n_features=n_features, random_state=0)
X = KBinsDiscretizer(n_bins=n_categories, encode="ordinal").fit_transform(X)
print(f"Number of features: {n_features}")
print(f"Number of samples: {n_samples}")
is_categorical = [True] * n_features
est = HistGradientBoostingClassifier(
loss="log_loss",
learning_rate=lr,
max_iter=n_trees,
max_bins=max_bins,
max_leaf_nodes=n_leaf_nodes,
categorical_features=is_categorical,
early_stopping=False,
random_state=0,
verbose=verbose,
)
fit(est, X, y, "sklearn")
predict(est, X)
if args.lightgbm:
est = get_equivalent_estimator(est, lib="lightgbm", n_classes=2)
est.set_params(max_cat_to_onehot=1) # dont use OHE
categorical_features = list(range(n_features))
fit(est, X, y, "lightgbm", categorical_feature=categorical_features)
predict(est, X)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_plot_fastkmeans.py | benchmarks/bench_plot_fastkmeans.py | from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster import KMeans, MiniBatchKMeans
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
chunk = 100
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print("==============================")
print("Iteration %03d of %03d" % (it, max_it))
print("==============================")
print()
data = nr.randint(-50, 51, (n_samples, n_features))
print("K-Means")
tstart = time()
kmeans = KMeans(init="k-means++", n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.5f" % kmeans.inertia_)
print()
results["kmeans_speed"].append(delta)
results["kmeans_quality"].append(kmeans.inertia_)
print("Fast K-Means")
# let's prepare the data in small chunks
mbkmeans = MiniBatchKMeans(
init="k-means++", n_clusters=10, batch_size=chunk
)
tstart = time()
mbkmeans.fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %f" % mbkmeans.inertia_)
print()
print()
results["MiniBatchKMeans Speed"].append(delta)
results["MiniBatchKMeans Quality"].append(mbkmeans.inertia_)
return results
def compute_bench_2(chunks):
results = defaultdict(lambda: [])
n_features = 50000
means = np.array(
[
[1, 1],
[-1, -1],
[1, -1],
[-1, 1],
[0.5, 0.5],
[0.75, -0.5],
[-1, 0.75],
[1, 0],
]
)
X = np.empty((0, 2))
for i in range(8):
X = np.r_[X, means[i] + 0.8 * np.random.randn(n_features, 2)]
max_it = len(chunks)
it = 0
for chunk in chunks:
it += 1
print("==============================")
print("Iteration %03d of %03d" % (it, max_it))
print("==============================")
print()
print("Fast K-Means")
tstart = time()
mbkmeans = MiniBatchKMeans(init="k-means++", n_clusters=8, batch_size=chunk)
mbkmeans.fit(X)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print("Inertia: %0.3fs" % mbkmeans.inertia_)
print()
results["MiniBatchKMeans Speed"].append(delta)
results["MiniBatchKMeans Quality"].append(mbkmeans.inertia_)
return results
if __name__ == "__main__":
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d # register the 3d projection # noqa: F401
samples_range = np.linspace(50, 150, 5).astype(int)
features_range = np.linspace(150, 50000, 5).astype(int)
chunks = np.linspace(500, 10000, 15).astype(int)
results = compute_bench(samples_range, features_range)
results_2 = compute_bench_2(chunks)
max_time = max(
[max(i) for i in [t for (label, t) in results.items() if "speed" in label]]
)
max_inertia = max(
[max(i) for i in [t for (label, t) in results.items() if "speed" not in label]]
)
fig = plt.figure("scikit-learn K-Means benchmark results")
for c, (label, timings) in zip("brcy", sorted(results.items())):
if "speed" in label:
ax = fig.add_subplot(2, 2, 1, projection="3d")
ax.set_zlim3d(0.0, max_time * 1.1)
else:
ax = fig.add_subplot(2, 2, 2, projection="3d")
ax.set_zlim3d(0.0, max_inertia * 1.1)
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.5)
ax.set_xlabel("n_samples")
ax.set_ylabel("n_features")
i = 0
for c, (label, timings) in zip("br", sorted(results_2.items())):
i += 1
ax = fig.add_subplot(2, 2, i + 2)
y = np.asarray(timings)
ax.plot(chunks, y, color=c, alpha=0.8)
ax.set_xlabel("Chunks")
ax.set_ylabel(label)
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_isolation_forest.py | benchmarks/bench_isolation_forest.py | """
==========================================
IsolationForest benchmark
==========================================
A test of IsolationForest on classical anomaly detection datasets.
The benchmark is run as follows:
1. The dataset is randomly split into a training set and a test set, both
assumed to contain outliers.
2. Isolation Forest is trained on the training set.
3. The ROC curve is computed on the test set using the knowledge of the labels.
Note that the smtp dataset contains a very small proportion of outliers.
Therefore, depending on the seed of the random number generator, randomly
splitting the data set might lead to a test set containing no outliers. In this
case a warning is raised when computing the ROC curve.
"""
from time import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_covtype, fetch_kddcup99, fetch_openml
from sklearn.ensemble import IsolationForest
from sklearn.metrics import auc, roc_curve
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle as sh
print(__doc__)
def print_outlier_ratio(y):
"""
Helper function to show the distinct value count of element in the target.
Useful indicator for the datasets used in bench_isolation_forest.py.
"""
uniq, cnt = np.unique(y, return_counts=True)
print("----- Target count values: ")
for u, c in zip(uniq, cnt):
print("------ %s -> %d occurrences" % (str(u), c))
print("----- Outlier ratio: %.5f" % (np.min(cnt) / len(y)))
random_state = 1
fig_roc, ax_roc = plt.subplots(1, 1, figsize=(8, 5))
# Set this to true for plotting score histograms for each dataset:
with_decision_function_histograms = False
# datasets available = ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
datasets = ["http", "smtp", "SA", "SF", "shuttle", "forestcover"]
# Loop over all datasets for fitting and scoring the estimator:
for dat in datasets:
# Loading and vectorizing the data:
print("====== %s ======" % dat)
print("--- Fetching data...")
if dat in ["http", "smtp", "SF", "SA"]:
dataset = fetch_kddcup99(
subset=dat, shuffle=True, percent10=True, random_state=random_state
)
X = dataset.data
y = dataset.target
if dat == "shuttle":
dataset = fetch_openml("shuttle", as_frame=False)
X = dataset.data
y = dataset.target.astype(np.int64)
X, y = sh(X, y, random_state=random_state)
# we remove data with label 4
# normal data are then those of class 1
s = y != 4
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
print("----- ")
if dat == "forestcover":
dataset = fetch_covtype(shuffle=True, random_state=random_state)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print_outlier_ratio(y)
print("--- Vectorizing data...")
if dat == "SF":
lb = LabelBinarizer()
x1 = lb.fit_transform(X[:, 1].astype(str))
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != b"normal.").astype(int)
print_outlier_ratio(y)
if dat == "SA":
lb = LabelBinarizer()
x1 = lb.fit_transform(X[:, 1].astype(str))
x2 = lb.fit_transform(X[:, 2].astype(str))
x3 = lb.fit_transform(X[:, 3].astype(str))
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != b"normal.").astype(int)
print_outlier_ratio(y)
if dat in ("http", "smtp"):
y = (y != b"normal.").astype(int)
print_outlier_ratio(y)
n_samples, n_features = X.shape
n_samples_train = n_samples // 2
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
print("--- Fitting the IsolationForest estimator...")
model = IsolationForest(n_jobs=-1, random_state=random_state)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = -model.decision_function(X_test) # the lower, the more abnormal
print("--- Preparing the plot elements...")
if with_decision_function_histograms:
fig, ax = plt.subplots(3, sharex=True, sharey=True)
bins = np.linspace(-0.5, 0.5, 200)
ax[0].hist(scoring, bins, color="black")
ax[0].set_title("Decision function for %s dataset" % dat)
ax[1].hist(scoring[y_test == 0], bins, color="b", label="normal data")
ax[1].legend(loc="lower right")
ax[2].hist(scoring[y_test == 1], bins, color="r", label="outliers")
ax[2].legend(loc="lower right")
# Show ROC Curves
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
auc_score = auc(fpr, tpr)
label = "%s (AUC: %0.3f, train_time= %0.2fs, test_time= %0.2fs)" % (
dat,
auc_score,
fit_time,
predict_time,
)
# Print AUC score and train/test time:
print(label)
ax_roc.plot(fpr, tpr, lw=1, label=label)
ax_roc.set_xlim([-0.05, 1.05])
ax_roc.set_ylim([-0.05, 1.05])
ax_roc.set_xlabel("False Positive Rate")
ax_roc.set_ylabel("True Positive Rate")
ax_roc.set_title("Receiver operating characteristic (ROC) curves")
ax_roc.legend(loc="lower right")
fig_roc.tight_layout()
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/plot_tsne_mnist.py | benchmarks/plot_tsne_mnist.py | import argparse
import os.path as op
import matplotlib.pyplot as plt
import numpy as np
LOG_DIR = "mnist_tsne_output"
if __name__ == "__main__":
parser = argparse.ArgumentParser("Plot benchmark results for t-SNE")
parser.add_argument(
"--labels",
type=str,
default=op.join(LOG_DIR, "mnist_original_labels_10000.npy"),
help="1D integer numpy array for labels",
)
parser.add_argument(
"--embedding",
type=str,
default=op.join(LOG_DIR, "mnist_sklearn_TSNE_10000.npy"),
help="2D float numpy array for embedded data",
)
args = parser.parse_args()
X = np.load(args.embedding)
y = np.load(args.labels)
for i in np.unique(y):
mask = y == i
plt.scatter(X[mask, 0], X[mask, 1], alpha=0.2, label=int(i))
plt.legend(loc="best")
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_plot_svd.py | benchmarks/bench_plot_svd.py | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from collections import defaultdict
from time import time
import numpy as np
from scipy.linalg import svd
from sklearn.datasets import make_low_rank_matrix
from sklearn.utils.extmath import randomized_svd
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print("====================")
print("Iteration %03d of %03d" % (it, max_it))
print("====================")
X = make_low_rank_matrix(
n_samples, n_features, effective_rank=rank, tail_strength=0.2
)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results["scipy svd"].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results["scikit-learn randomized_svd (n_iter=0)"].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d " % n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results["scikit-learn randomized_svd (n_iter=%d)" % n_iter].append(
time() - tstart
)
return results
if __name__ == "__main__":
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d # register the 3d projection # noqa: F401
samples_range = np.linspace(2, 1000, 4).astype(int)
features_range = np.linspace(2, 1000, 4).astype(int)
results = compute_bench(samples_range, features_range)
label = "scikit-learn singular value decomposition benchmark results"
fig = plt.figure(label)
ax = fig.gca(projection="3d")
for c, (label, timings) in zip("rbg", sorted(results.items())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3, color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel("n_samples")
ax.set_ylabel("n_features")
ax.set_zlabel("Time (s)")
ax.legend()
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_pca_solvers.py | benchmarks/bench_pca_solvers.py | # %%
#
# This benchmark compares the speed of PCA solvers on datasets of different
# sizes in order to determine the best solver to select by default via the
# "auto" heuristic.
#
# Note: we do not control for the accuracy of the solvers: we assume that all
# solvers yield transformed data with similar explained variance. This
# assumption is generally true, except for the randomized solver that might
# require more power iterations.
#
# We generate synthetic data with dimensions that are useful to plot:
# - time vs n_samples for a fixed n_features and,
# - time vs n_features for a fixed n_samples for a fixed n_features.
import itertools
from math import log10
from time import perf_counter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import config_context
from sklearn.decomposition import PCA
REF_DIMS = [100, 1000, 10_000]
data_shapes = []
for ref_dim in REF_DIMS:
data_shapes.extend([(ref_dim, 10**i) for i in range(1, 8 - int(log10(ref_dim)))])
data_shapes.extend(
[(ref_dim, 3 * 10**i) for i in range(1, 8 - int(log10(ref_dim)))]
)
data_shapes.extend([(10**i, ref_dim) for i in range(1, 8 - int(log10(ref_dim)))])
data_shapes.extend(
[(3 * 10**i, ref_dim) for i in range(1, 8 - int(log10(ref_dim)))]
)
# Remove duplicates:
data_shapes = sorted(set(data_shapes))
print("Generating test datasets...")
rng = np.random.default_rng(0)
datasets = [rng.normal(size=shape) for shape in data_shapes]
# %%
def measure_one(data, n_components, solver, method_name="fit"):
print(
f"Benchmarking {solver=!r}, {n_components=}, {method_name=!r} on data with"
f" shape {data.shape}"
)
pca = PCA(n_components=n_components, svd_solver=solver, random_state=0)
timings = []
elapsed = 0
method = getattr(pca, method_name)
with config_context(assume_finite=True):
while elapsed < 0.5:
tic = perf_counter()
method(data)
duration = perf_counter() - tic
timings.append(duration)
elapsed += duration
return np.median(timings)
SOLVERS = ["full", "covariance_eigh", "arpack", "randomized", "auto"]
measurements = []
for data, n_components, method_name in itertools.product(
datasets, [2, 50], ["fit", "fit_transform"]
):
if n_components >= min(data.shape):
continue
for solver in SOLVERS:
if solver == "covariance_eigh" and data.shape[1] > 5000:
# Too much memory and too slow.
continue
if solver in ["arpack", "full"] and log10(data.size) > 7:
# Too slow, in particular for the full solver.
continue
time = measure_one(data, n_components, solver, method_name=method_name)
measurements.append(
{
"n_components": n_components,
"n_samples": data.shape[0],
"n_features": data.shape[1],
"time": time,
"solver": solver,
"method_name": method_name,
}
)
measurements = pd.DataFrame(measurements)
measurements.to_csv("bench_pca_solvers.csv", index=False)
# %%
all_method_names = measurements["method_name"].unique()
all_n_components = measurements["n_components"].unique()
for method_name in all_method_names:
fig, axes = plt.subplots(
figsize=(16, 16),
nrows=len(REF_DIMS),
ncols=len(all_n_components),
sharey=True,
constrained_layout=True,
)
fig.suptitle(f"Benchmarks for PCA.{method_name}, varying n_samples", fontsize=16)
for row_idx, ref_dim in enumerate(REF_DIMS):
for n_components, ax in zip(all_n_components, axes[row_idx]):
for solver in SOLVERS:
if solver == "auto":
style_kwargs = dict(linewidth=2, color="black", style="--")
else:
style_kwargs = dict(style="o-")
ax.set(
title=f"n_components={n_components}, n_features={ref_dim}",
ylabel="time (s)",
)
measurements.query(
"n_components == @n_components and n_features == @ref_dim"
" and solver == @solver and method_name == @method_name"
).plot.line(
x="n_samples",
y="time",
label=solver,
logx=True,
logy=True,
ax=ax,
**style_kwargs,
)
# %%
for method_name in all_method_names:
fig, axes = plt.subplots(
figsize=(16, 16),
nrows=len(REF_DIMS),
ncols=len(all_n_components),
sharey=True,
)
fig.suptitle(f"Benchmarks for PCA.{method_name}, varying n_features", fontsize=16)
for row_idx, ref_dim in enumerate(REF_DIMS):
for n_components, ax in zip(all_n_components, axes[row_idx]):
for solver in SOLVERS:
if solver == "auto":
style_kwargs = dict(linewidth=2, color="black", style="--")
else:
style_kwargs = dict(style="o-")
ax.set(
title=f"n_components={n_components}, n_samples={ref_dim}",
ylabel="time (s)",
)
measurements.query(
"n_components == @n_components and n_samples == @ref_dim "
" and solver == @solver and method_name == @method_name"
).plot.line(
x="n_features",
y="time",
label=solver,
logx=True,
logy=True,
ax=ax,
**style_kwargs,
)
# %%
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_tsne_mnist.py | benchmarks/bench_tsne_mnist.py | """
=============================
MNIST dataset T-SNE benchmark
=============================
"""
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import json
import os
import os.path as op
from time import time
import numpy as np
from joblib import Memory
from sklearn.utils._openmp_helpers import _openmp_effective_n_threads
from sklearn.datasets import fetch_openml
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import check_array
from sklearn.utils import shuffle as _shuffle
LOG_DIR = "mnist_tsne_output"
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
memory = Memory(os.path.join(LOG_DIR, "mnist_tsne_benchmark_data"), mmap_mode="r")
@memory.cache
def load_data(dtype=np.float32, order="C", shuffle=True, seed=0):
"""Load the data, then cache and memmap the train/test split"""
print("Loading dataset...")
data = fetch_openml("mnist_784", as_frame=True)
X = check_array(data["data"], dtype=dtype, order=order)
y = data["target"]
if shuffle:
X, y = _shuffle(X, y, random_state=seed)
# Normalize features
X /= 255
return X, y
def nn_accuracy(X, X_embedded, k=1):
"""Accuracy of the first nearest neighbor"""
knn = NearestNeighbors(n_neighbors=1, n_jobs=-1)
_, neighbors_X = knn.fit(X).kneighbors()
_, neighbors_X_embedded = knn.fit(X_embedded).kneighbors()
return np.mean(neighbors_X == neighbors_X_embedded)
def tsne_fit_transform(model, data):
transformed = model.fit_transform(data)
return transformed, model.n_iter_
def sanitize(filename):
return filename.replace("/", "-").replace(" ", "_")
if __name__ == "__main__":
parser = argparse.ArgumentParser("Benchmark for t-SNE")
parser.add_argument(
"--order", type=str, default="C", help="Order of the input data"
)
parser.add_argument("--perplexity", type=float, default=30)
parser.add_argument(
"--bhtsne",
action="store_true",
help=(
"if set and the reference bhtsne code is "
"correctly installed, run it in the benchmark."
),
)
parser.add_argument(
"--all",
action="store_true",
help=(
"if set, run the benchmark with the whole MNIST."
"dataset. Note that it will take up to 1 hour."
),
)
parser.add_argument(
"--profile",
action="store_true",
help="if set, run the benchmark with a memory profiler.",
)
parser.add_argument("--verbose", type=int, default=0)
parser.add_argument(
"--pca-components",
type=int,
default=50,
help="Number of principal components for preprocessing.",
)
args = parser.parse_args()
print("Used number of threads: {}".format(_openmp_effective_n_threads()))
X, y = load_data(order=args.order)
if args.pca_components > 0:
t0 = time()
X = PCA(n_components=args.pca_components).fit_transform(X)
print(
"PCA preprocessing down to {} dimensions took {:0.3f}s".format(
args.pca_components, time() - t0
)
)
methods = []
# Put TSNE in methods
tsne = TSNE(
n_components=2,
init="pca",
perplexity=args.perplexity,
verbose=args.verbose,
n_iter=1000,
)
methods.append(("sklearn TSNE", lambda data: tsne_fit_transform(tsne, data)))
if args.bhtsne:
try:
from bhtsne.bhtsne import run_bh_tsne
except ImportError as e:
raise ImportError(
"""\
If you want comparison with the reference implementation, build the
binary from source (https://github.com/lvdmaaten/bhtsne) in the folder
benchmarks/bhtsne and add an empty `__init__.py` file in the folder:
$ git clone git@github.com:lvdmaaten/bhtsne.git
$ cd bhtsne
$ g++ sptree.cpp tsne.cpp tsne_main.cpp -o bh_tsne -O2
$ touch __init__.py
$ cd ..
"""
) from e
def bhtsne(X):
"""Wrapper for the reference lvdmaaten/bhtsne implementation."""
# PCA preprocessing is done elsewhere in the benchmark script
n_iter = -1 # TODO find a way to report the number of iterations
return (
run_bh_tsne(
X,
use_pca=False,
perplexity=args.perplexity,
verbose=args.verbose > 0,
),
n_iter,
)
methods.append(("lvdmaaten/bhtsne", bhtsne))
if args.profile:
try:
from memory_profiler import profile
except ImportError as e:
raise ImportError(
"To run the benchmark with `--profile`, you "
"need to install `memory_profiler`. Please "
"run `pip install memory_profiler`."
) from e
methods = [(n, profile(m)) for n, m in methods]
data_size = [100, 500, 1000, 5000, 10000]
if args.all:
data_size.append(70000)
results = []
basename = os.path.basename(os.path.splitext(__file__)[0])
log_filename = os.path.join(LOG_DIR, basename + ".json")
for n in data_size:
X_train = X[:n]
y_train = y[:n]
n = X_train.shape[0]
for name, method in methods:
print("Fitting {} on {} samples...".format(name, n))
t0 = time()
np.save(
os.path.join(LOG_DIR, "mnist_{}_{}.npy".format("original", n)), X_train
)
np.save(
os.path.join(LOG_DIR, "mnist_{}_{}.npy".format("original_labels", n)),
y_train,
)
X_embedded, n_iter = method(X_train)
duration = time() - t0
precision_5 = nn_accuracy(X_train, X_embedded)
print(
"Fitting {} on {} samples took {:.3f}s in {:d} iterations, "
"nn accuracy: {:0.3f}".format(name, n, duration, n_iter, precision_5)
)
results.append(dict(method=name, duration=duration, n_samples=n))
with open(log_filename, "w", encoding="utf-8") as f:
json.dump(results, f)
method_name = sanitize(name)
np.save(
op.join(LOG_DIR, "mnist_{}_{}.npy".format(method_name, n)), X_embedded
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_20newsgroups.py | benchmarks/bench_20newsgroups.py | import argparse
from time import time
import numpy as np
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import (
AdaBoostClassifier,
ExtraTreesClassifier,
RandomForestClassifier,
)
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.utils.validation import check_array
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(max_features="sqrt", min_samples_split=10),
"extra_trees": ExtraTreesClassifier(max_features="sqrt", min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-e", "--estimators", nargs="+", required=True, choices=ESTIMATORS
)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32, accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print(f"X_train.shape = {X_train.shape}")
print(f"X_train.format = {X_train.format}")
print(f"X_train.dtype = {X_train.dtype}")
print(f"X_train density = {X_train.nnz / np.prod(X_train.shape)}")
print(f"y_train {y_train.shape}")
print(f"X_test {X_test.shape}")
print(f"X_test.format = {X_test.format}")
print(f"X_test.dtype = {X_test.dtype}")
print(f"y_test {y_test.shape}")
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time", "Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print(
"%s %s %s %s"
% (
name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10),
)
)
print()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_isolation_forest_predict.py | benchmarks/bench_isolation_forest_predict.py | """
==========================================
IsolationForest prediction benchmark
==========================================
A test of IsolationForest on classical anomaly detection datasets.
The benchmark is run as follows:
1. The dataset is randomly split into a training set and a test set, both
assumed to contain outliers.
2. Isolation Forest is trained on the training set fixed at 1000 samples.
3. The test samples are scored using the trained model at:
- 1000, 10000, 50000 samples
- 10, 100, 1000 features
- 0.01, 0.1, 0.5 contamination
- 1, 2, 3, 4 n_jobs
We compare the prediction time at the very end.
Here are instructions for running this benchmark to compare runtime against main branch:
1. Build and run on a branch or main, e.g. for a branch named `pr`:
```bash
python bench_isolation_forest_predict.py bench ~/bench_results pr
```
2. Plotting to compare two branches `pr` and `main`:
```bash
python bench_isolation_forest_predict.py plot ~/bench_results pr main results_image.png
```
"""
import argparse
from collections import defaultdict
from pathlib import Path
from time import time
import numpy as np
import pandas as pd
from joblib import parallel_config
from sklearn.ensemble import IsolationForest
print(__doc__)
def get_data(
n_samples_train, n_samples_test, n_features, contamination=0.1, random_state=0
):
"""Function based on code from: https://scikit-learn.org/stable/
auto_examples/ensemble/plot_isolation_forest.html#sphx-glr-auto-
examples-ensemble-plot-isolation-forest-py
"""
rng = np.random.RandomState(random_state)
X = 0.3 * rng.randn(n_samples_train, n_features)
X_train = np.r_[X + 2, X - 2]
X = 0.3 * rng.randn(n_samples_test, n_features)
X_test = np.r_[X + 2, X - 2]
n_outliers = int(np.floor(contamination * n_samples_test))
X_outliers = rng.uniform(low=-4, high=4, size=(n_outliers, n_features))
outlier_idx = rng.choice(np.arange(0, n_samples_test), n_outliers, replace=False)
X_test[outlier_idx, :] = X_outliers
return X_train, X_test
def plot(args):
import matplotlib.pyplot as plt
import seaborn as sns
bench_results = Path(args.bench_results)
pr_name = args.pr_name
main_name = args.main_name
image_path = args.image_path
results_path = Path(bench_results)
pr_path = results_path / f"{pr_name}.csv"
main_path = results_path / f"{main_name}.csv"
image_path = results_path / image_path
df_pr = pd.read_csv(pr_path).assign(branch=pr_name)
df_main = pd.read_csv(main_path).assign(branch=main_name)
# Merge the two datasets on the common columns
merged_data = pd.merge(
df_pr,
df_main,
on=["n_samples_test", "n_jobs"],
suffixes=("_pr", "_main"),
)
# Set up the plotting grid
sns.set(style="whitegrid", context="notebook", font_scale=1.5)
# Create a figure with subplots
fig, axes = plt.subplots(1, 2, figsize=(18, 6), sharex=True, sharey=True)
# Plot predict time as a function of n_samples_test with different n_jobs
print(merged_data["n_jobs"].unique())
ax = axes[0]
sns.lineplot(
data=merged_data,
x="n_samples_test",
y="predict_time_pr",
hue="n_jobs",
style="n_jobs",
markers="o",
ax=ax,
legend="full",
)
ax.set_title(f"Predict Time vs. n_samples_test - {pr_name} branch")
ax.set_ylabel("Predict Time (Seconds)")
ax.set_xlabel("n_samples_test")
ax = axes[1]
sns.lineplot(
data=merged_data,
x="n_samples_test",
y="predict_time_main",
hue="n_jobs",
style="n_jobs",
markers="X",
dashes=True,
ax=ax,
legend=None,
)
ax.set_title(f"Predict Time vs. n_samples_test - {main_name} branch")
ax.set_ylabel("Predict Time")
ax.set_xlabel("n_samples_test")
# Adjust layout and display the plots
plt.tight_layout()
fig.savefig(image_path, bbox_inches="tight")
print(f"Saved image to {image_path}")
def bench(args):
results_dir = Path(args.bench_results)
branch = args.branch
random_state = 1
results = defaultdict(list)
# Loop over all datasets for fitting and scoring the estimator:
n_samples_train = 1000
for n_samples_test in [
1000,
10000,
50000,
]:
for n_features in [10, 100, 1000]:
for contamination in [0.01, 0.1, 0.5]:
for n_jobs in [1, 2, 3, 4]:
X_train, X_test = get_data(
n_samples_train,
n_samples_test,
n_features,
contamination,
random_state,
)
print("--- Fitting the IsolationForest estimator...")
model = IsolationForest(n_jobs=-1, random_state=random_state)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
# clearcache
for _ in range(1000):
1 + 1
with parallel_config("threading", n_jobs=n_jobs):
tstart = time()
model.decision_function(X_test) # the lower, the more abnormal
predict_time = time() - tstart
results["predict_time"].append(predict_time)
results["fit_time"].append(fit_time)
results["n_samples_train"].append(n_samples_train)
results["n_samples_test"].append(n_samples_test)
results["n_features"].append(n_features)
results["contamination"].append(contamination)
results["n_jobs"].append(n_jobs)
df = pd.DataFrame(results)
df.to_csv(results_dir / f"{branch}.csv", index=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parse arguments for benchmarking
subparsers = parser.add_subparsers()
bench_parser = subparsers.add_parser("bench")
bench_parser.add_argument("bench_results")
bench_parser.add_argument("branch")
bench_parser.set_defaults(func=bench)
# parse arguments for plotting
plot_parser = subparsers.add_parser("plot")
plot_parser.add_argument("bench_results")
plot_parser.add_argument("pr_name")
plot_parser.add_argument("main_name")
plot_parser.add_argument("image_path")
plot_parser.set_defaults(func=plot)
# enable the parser and run the relevant function
args = parser.parse_args()
args.func(args)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_covertype.py | benchmarks/bench_covertype.py | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* :doi:`"SVM Optimization: Inverse Dependence on Training Set Size"
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
<10.1145/1390156.1390273>`
* :doi:`"Pegasos: Primal estimated sub-gradient solver for svm"
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
<10.1145/1273496.1273598>`
* `"Training Linear SVMs in Linear Time"
<https://www.cs.cornell.edu/people/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] https://archive.ics.uci.edu/ml/datasets/Covertype
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import os
from time import time
import numpy as np
from joblib import Memory
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.ensemble import (
ExtraTreesClassifier,
GradientBoostingClassifier,
RandomForestClassifier,
)
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.metrics import zero_one_loss
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(
os.path.join(get_data_home(), "covertype_benchmark_data"), mmap_mode="r"
)
@memory.cache
def load_data(dtype=np.float32, order="C", random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_covtype(
download_if_missing=True, shuffle=True, random_state=random_state
)
X = check_array(data["data"], dtype=dtype, order=order)
y = (data["target"] != 1).astype(int)
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
# Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"GBRT": GradientBoostingClassifier(n_estimators=250),
"ExtraTrees": ExtraTreesClassifier(n_estimators=20),
"RandomForest": RandomForestClassifier(n_estimators=20),
"CART": DecisionTreeClassifier(min_samples_split=5),
"SGD": SGDClassifier(alpha=0.001),
"GaussianNB": GaussianNB(),
"liblinear": LinearSVC(loss="l2", penalty="l2", C=1000, dual=False, tol=1e-3),
"SAG": LogisticRegression(solver="sag", max_iter=2, C=1000),
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--classifiers",
nargs="+",
choices=ESTIMATORS,
type=str,
default=["liblinear", "GaussianNB", "SGD", "CART"],
help="list of classifiers to benchmark.",
)
parser.add_argument(
"--n-jobs",
nargs="?",
default=1,
type=int,
help=(
"Number of concurrently running workers for "
"models that support parallelism."
),
)
parser.add_argument(
"--order",
nargs="?",
default="C",
type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered data",
)
parser.add_argument(
"--random-seed",
nargs="?",
default=13,
type=int,
help="Common seed used by random number generator.",
)
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"]
)
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print(
"%s %d (pos=%d, neg=%d, size=%dMB)"
% (
"number of train samples:".ljust(25),
X_train.shape[0],
np.sum(y_train == 1),
np.sum(y_train == 0),
int(X_train.nbytes / 1e6),
)
)
print(
"%s %d (pos=%d, neg=%d, size=%dMB)"
% (
"number of test samples:".ljust(25),
X_test.shape[0],
np.sum(y_test == 1),
np.sum(y_test == 0),
int(X_test.nbytes / 1e6),
)
)
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(
**{
p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")
}
)
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print(
"%s %s %s %s"
% (
name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10),
)
)
print()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_plot_polynomial_kernel_approximation.py | benchmarks/bench_plot_polynomial_kernel_approximation.py | """
========================================================================
Benchmark for explicit feature map approximation of polynomial kernels
========================================================================
An example illustrating the approximation of the feature map
of a Homogeneous Polynomial kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`PolynomialCountSketch` and :class:`Nystroem` to
approximate the feature map of a polynomial kernel for
classification with an SVM on the digits dataset. Results using a linear
SVM in the original space, a linear SVM using the approximate mappings
and a kernelized SVM are compared.
The first plot shows the classification accuracy of Nystroem [2] and
PolynomialCountSketch [1] as the output dimension (n_components) grows.
It also shows the accuracy of a linear SVM and a polynomial kernel SVM
on the same data.
The second plot explores the scalability of PolynomialCountSketch
and Nystroem. For a sufficiently large output dimension,
PolynomialCountSketch should be faster as it is O(n(d+klog k))
while Nystroem is O(n(dk+k^2)). In addition, Nystroem requires
a time-consuming training phase, while training is almost immediate
for PolynomialCountSketch, whose training phase boils down to
initializing some random variables (because is data-independent).
[1] Pham, N., & Pagh, R. (2013, August). Fast and scalable polynomial
kernels via explicit feature maps. In Proceedings of the 19th ACM SIGKDD
international conference on Knowledge discovery and data mining (pp. 239-247)
(https://chbrown.github.io/kdd-2013-usb/kdd/p239.pdf)
[2] Charikar, M., Chen, K., & Farach-Colton, M. (2002, July). Finding frequent
items in data streams. In International Colloquium on Automata, Languages, and
Programming (pp. 693-703). Springer, Berlin, Heidelberg.
(https://people.cs.rutgers.edu/~farach/pubs/FrequentStream.pdf)
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# Load data manipulation functions
# Will use this for timing results
from time import time
# Some common libraries
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.kernel_approximation import Nystroem, PolynomialCountSketch
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
# Import SVM classifiers and feature map approximation algorithms
from sklearn.svm import SVC, LinearSVC
# Split data in train and test sets
X, y = load_digits()["data"], load_digits()["target"]
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)
# Set the range of n_components for our experiments
out_dims = range(20, 400, 20)
# Evaluate Linear SVM
lsvm = LinearSVC().fit(X_train, y_train)
lsvm_score = 100 * lsvm.score(X_test, y_test)
# Evaluate kernelized SVM
ksvm = SVC(kernel="poly", degree=2, gamma=1.0).fit(X_train, y_train)
ksvm_score = 100 * ksvm.score(X_test, y_test)
# Evaluate PolynomialCountSketch + LinearSVM
ps_svm_scores = []
n_runs = 5
# To compensate for the stochasticity of the method, we make n_tets runs
for k in out_dims:
score_avg = 0
for _ in range(n_runs):
ps_svm = Pipeline(
[
("PS", PolynomialCountSketch(degree=2, n_components=k)),
("SVM", LinearSVC()),
]
)
score_avg += ps_svm.fit(X_train, y_train).score(X_test, y_test)
ps_svm_scores.append(100 * score_avg / n_runs)
# Evaluate Nystroem + LinearSVM
ny_svm_scores = []
n_runs = 5
for k in out_dims:
score_avg = 0
for _ in range(n_runs):
ny_svm = Pipeline(
[
(
"NY",
Nystroem(
kernel="poly", gamma=1.0, degree=2, coef0=0, n_components=k
),
),
("SVM", LinearSVC()),
]
)
score_avg += ny_svm.fit(X_train, y_train).score(X_test, y_test)
ny_svm_scores.append(100 * score_avg / n_runs)
# Show results
fig, ax = plt.subplots(figsize=(6, 4))
ax.set_title("Accuracy results")
ax.plot(out_dims, ps_svm_scores, label="PolynomialCountSketch + linear SVM", c="orange")
ax.plot(out_dims, ny_svm_scores, label="Nystroem + linear SVM", c="blue")
ax.plot(
[out_dims[0], out_dims[-1]],
[lsvm_score, lsvm_score],
label="Linear SVM",
c="black",
dashes=[2, 2],
)
ax.plot(
[out_dims[0], out_dims[-1]],
[ksvm_score, ksvm_score],
label="Poly-kernel SVM",
c="red",
dashes=[2, 2],
)
ax.legend()
ax.set_xlabel("N_components for PolynomialCountSketch and Nystroem")
ax.set_ylabel("Accuracy (%)")
ax.set_xlim([out_dims[0], out_dims[-1]])
fig.tight_layout()
# Now let's evaluate the scalability of PolynomialCountSketch vs Nystroem
# First we generate some fake data with a lot of samples
fakeData = np.random.randn(10000, 100)
fakeDataY = np.random.randint(0, high=10, size=(10000))
out_dims = range(500, 6000, 500)
# Evaluate scalability of PolynomialCountSketch as n_components grows
ps_svm_times = []
for k in out_dims:
ps = PolynomialCountSketch(degree=2, n_components=k)
start = time()
ps.fit_transform(fakeData, None)
ps_svm_times.append(time() - start)
# Evaluate scalability of Nystroem as n_components grows
# This can take a while due to the inefficient training phase
ny_svm_times = []
for k in out_dims:
ny = Nystroem(kernel="poly", gamma=1.0, degree=2, coef0=0, n_components=k)
start = time()
ny.fit_transform(fakeData, None)
ny_svm_times.append(time() - start)
# Show results
fig, ax = plt.subplots(figsize=(6, 4))
ax.set_title("Scalability results")
ax.plot(out_dims, ps_svm_times, label="PolynomialCountSketch", c="orange")
ax.plot(out_dims, ny_svm_times, label="Nystroem", c="blue")
ax.legend()
ax.set_xlabel("N_components for PolynomialCountSketch and Nystroem")
ax.set_ylabel("fit_transform time \n(s/10.000 samples)")
ax.set_xlim([out_dims[0], out_dims[-1]])
fig.tight_layout()
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_plot_lasso_path.py | benchmarks/bench_plot_lasso_path.py | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
import gc
import sys
from collections import defaultdict
from time import time
import numpy as np
from sklearn.datasets import make_regression
from sklearn.linear_model import lars_path, lars_path_gram, lasso_path
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print("====================")
print("Iteration %03d of %03d" % (it, max_it))
print("====================")
dataset_kwargs = {
"n_samples": n_samples,
"n_features": n_features,
"n_informative": n_features // 10,
"effective_rank": min(n_samples, n_features) / 10,
# 'effective_rank': None,
"bias": 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end="")
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path_gram(Xy=Xy, Gram=G, n_samples=y.size, method="lasso")
delta = time() - tstart
print("%0.3fs" % delta)
results["lars_path (with Gram)"].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end="")
sys.stdout.flush()
tstart = time()
lars_path(X, y, method="lasso")
delta = time() - tstart
print("%0.3fs" % delta)
results["lars_path (without Gram)"].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end="")
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results["lasso_path (with Gram)"].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end="")
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results["lasso_path (without Gram)"].append(delta)
return results
if __name__ == "__main__":
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d # register the 3d projection # noqa: F401
samples_range = np.linspace(10, 2000, 5).astype(int)
features_range = np.linspace(10, 2000, 5).astype(int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure("scikit-learn Lasso path benchmark results")
i = 1
for c, (label, timings) in zip("bcry", sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection="3d")
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
# ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel("n_samples")
ax.set_ylabel("n_features")
ax.set_zlabel("Time (s)")
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
# ax.legend()
i += 1
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_online_ocsvm.py | benchmarks/bench_online_ocsvm.py | """
=====================================
SGDOneClassSVM benchmark
=====================================
This benchmark compares the :class:`SGDOneClassSVM` with :class:`OneClassSVM`.
The former is an online One-Class SVM implemented with a Stochastic Gradient
Descent (SGD). The latter is based on the LibSVM implementation. The
complexity of :class:`SGDOneClassSVM` is linear in the number of samples
whereas the one of :class:`OneClassSVM` is at best quadratic in the number of
samples. We here compare the performance in terms of AUC and training time on
classical anomaly detection datasets.
The :class:`OneClassSVM` is applied with a Gaussian kernel and we therefore
use a kernel approximation prior to the application of :class:`SGDOneClassSVM`.
"""
from time import time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import interp1d
from sklearn.datasets import fetch_covtype, fetch_kddcup99
from sklearn.kernel_approximation import Nystroem
from sklearn.linear_model import SGDOneClassSVM
from sklearn.metrics import auc, roc_curve
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelBinarizer, StandardScaler
from sklearn.svm import OneClassSVM
from sklearn.utils import shuffle
font = {"weight": "normal", "size": 15}
matplotlib.rc("font", **font)
print(__doc__)
def print_outlier_ratio(y):
"""
Helper function to show the distinct value count of element in the target.
Useful indicator for the datasets used in bench_isolation_forest.py.
"""
uniq, cnt = np.unique(y, return_counts=True)
print("----- Target count values: ")
for u, c in zip(uniq, cnt):
print("------ %s -> %d occurrences" % (str(u), c))
print("----- Outlier ratio: %.5f" % (np.min(cnt) / len(y)))
# for roc curve computation
n_axis = 1000
x_axis = np.linspace(0, 1, n_axis)
datasets = ["http", "smtp", "SA", "SF", "forestcover"]
novelty_detection = False # if False, training set polluted by outliers
random_states = [42]
nu = 0.05
results_libsvm = np.empty((len(datasets), n_axis + 5))
results_online = np.empty((len(datasets), n_axis + 5))
for dat, dataset_name in enumerate(datasets):
print(dataset_name)
# Loading datasets
if dataset_name in ["http", "smtp", "SA", "SF"]:
dataset = fetch_kddcup99(
subset=dataset_name, shuffle=False, percent10=False, random_state=88
)
X = dataset.data
y = dataset.target
if dataset_name == "forestcover":
dataset = fetch_covtype(shuffle=False)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
# Vectorizing data
if dataset_name == "SF":
# Casting type of X (object) as string is needed for string categorical
# features to apply LabelBinarizer
lb = LabelBinarizer()
x1 = lb.fit_transform(X[:, 1].astype(str))
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != b"normal.").astype(int)
if dataset_name == "SA":
lb = LabelBinarizer()
# Casting type of X (object) as string is needed for string categorical
# features to apply LabelBinarizer
x1 = lb.fit_transform(X[:, 1].astype(str))
x2 = lb.fit_transform(X[:, 2].astype(str))
x3 = lb.fit_transform(X[:, 3].astype(str))
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != b"normal.").astype(int)
if dataset_name in ["http", "smtp"]:
y = (y != b"normal.").astype(int)
print_outlier_ratio(y)
n_samples, n_features = np.shape(X)
if dataset_name == "SA": # LibSVM too long with n_samples // 2
n_samples_train = n_samples // 20
else:
n_samples_train = n_samples // 2
n_samples_test = n_samples - n_samples_train
print("n_train: ", n_samples_train)
print("n_features: ", n_features)
tpr_libsvm = np.zeros(n_axis)
tpr_online = np.zeros(n_axis)
fit_time_libsvm = 0
fit_time_online = 0
predict_time_libsvm = 0
predict_time_online = 0
X = X.astype(float)
gamma = 1 / n_features # OCSVM default parameter
for random_state in random_states:
print("random state: %s" % random_state)
X, y = shuffle(X, y, random_state=random_state)
X_train = X[:n_samples_train]
X_test = X[n_samples_train:]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
if novelty_detection:
X_train = X_train[y_train == 0]
y_train = y_train[y_train == 0]
std = StandardScaler()
print("----------- LibSVM OCSVM ------------")
ocsvm = OneClassSVM(kernel="rbf", gamma=gamma, nu=nu)
pipe_libsvm = make_pipeline(std, ocsvm)
tstart = time()
pipe_libsvm.fit(X_train)
fit_time_libsvm += time() - tstart
tstart = time()
# scoring such that the lower, the more normal
scoring = -pipe_libsvm.decision_function(X_test)
predict_time_libsvm += time() - tstart
fpr_libsvm_, tpr_libsvm_, _ = roc_curve(y_test, scoring)
f_libsvm = interp1d(fpr_libsvm_, tpr_libsvm_)
tpr_libsvm += f_libsvm(x_axis)
print("----------- Online OCSVM ------------")
nystroem = Nystroem(gamma=gamma, random_state=random_state)
online_ocsvm = SGDOneClassSVM(nu=nu, random_state=random_state)
pipe_online = make_pipeline(std, nystroem, online_ocsvm)
tstart = time()
pipe_online.fit(X_train)
fit_time_online += time() - tstart
tstart = time()
# scoring such that the lower, the more normal
scoring = -pipe_online.decision_function(X_test)
predict_time_online += time() - tstart
fpr_online_, tpr_online_, _ = roc_curve(y_test, scoring)
f_online = interp1d(fpr_online_, tpr_online_)
tpr_online += f_online(x_axis)
tpr_libsvm /= len(random_states)
tpr_libsvm[0] = 0.0
fit_time_libsvm /= len(random_states)
predict_time_libsvm /= len(random_states)
auc_libsvm = auc(x_axis, tpr_libsvm)
results_libsvm[dat] = [
fit_time_libsvm,
predict_time_libsvm,
auc_libsvm,
n_samples_train,
n_features,
] + list(tpr_libsvm)
tpr_online /= len(random_states)
tpr_online[0] = 0.0
fit_time_online /= len(random_states)
predict_time_online /= len(random_states)
auc_online = auc(x_axis, tpr_online)
results_online[dat] = [
fit_time_online,
predict_time_online,
auc_online,
n_samples_train,
n_features,
] + list(tpr_libsvm)
# -------- Plotting bar charts -------------
fit_time_libsvm_all = results_libsvm[:, 0]
predict_time_libsvm_all = results_libsvm[:, 1]
auc_libsvm_all = results_libsvm[:, 2]
n_train_all = results_libsvm[:, 3]
n_features_all = results_libsvm[:, 4]
fit_time_online_all = results_online[:, 0]
predict_time_online_all = results_online[:, 1]
auc_online_all = results_online[:, 2]
width = 0.7
ind = 2 * np.arange(len(datasets))
x_tickslabels = [
(name + "\n" + r"$n={:,d}$" + "\n" + r"$d={:d}$").format(int(n), int(d))
for name, n, d in zip(datasets, n_train_all, n_features_all)
]
def autolabel_auc(rects, ax):
"""Attach a text label above each bar displaying its height."""
for rect in rects:
height = rect.get_height()
ax.text(
rect.get_x() + rect.get_width() / 2.0,
1.05 * height,
"%.3f" % height,
ha="center",
va="bottom",
)
def autolabel_time(rects, ax):
"""Attach a text label above each bar displaying its height."""
for rect in rects:
height = rect.get_height()
ax.text(
rect.get_x() + rect.get_width() / 2.0,
1.05 * height,
"%.1f" % height,
ha="center",
va="bottom",
)
fig, ax = plt.subplots(figsize=(15, 8))
ax.set_ylabel("AUC")
ax.set_ylim((0, 1.3))
rect_libsvm = ax.bar(ind, auc_libsvm_all, width=width, color="r")
rect_online = ax.bar(ind + width, auc_online_all, width=width, color="y")
ax.legend((rect_libsvm[0], rect_online[0]), ("LibSVM", "Online SVM"))
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(x_tickslabels)
autolabel_auc(rect_libsvm, ax)
autolabel_auc(rect_online, ax)
plt.show()
fig, ax = plt.subplots(figsize=(15, 8))
ax.set_ylabel("Training time (sec) - Log scale")
ax.set_yscale("log")
rect_libsvm = ax.bar(ind, fit_time_libsvm_all, color="r", width=width)
rect_online = ax.bar(ind + width, fit_time_online_all, color="y", width=width)
ax.legend((rect_libsvm[0], rect_online[0]), ("LibSVM", "Online SVM"))
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(x_tickslabels)
autolabel_time(rect_libsvm, ax)
autolabel_time(rect_online, ax)
plt.show()
fig, ax = plt.subplots(figsize=(15, 8))
ax.set_ylabel("Testing time (sec) - Log scale")
ax.set_yscale("log")
rect_libsvm = ax.bar(ind, predict_time_libsvm_all, color="r", width=width)
rect_online = ax.bar(ind + width, predict_time_online_all, color="y", width=width)
ax.legend((rect_libsvm[0], rect_online[0]), ("LibSVM", "Online SVM"))
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(x_tickslabels)
autolabel_time(rect_libsvm, ax)
autolabel_time(rect_online, ax)
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_rcv1_logreg_convergence.py | benchmarks/bench_rcv1_logreg_convergence.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import gc
import time
import matplotlib.pyplot as plt
import numpy as np
from joblib import Memory
from sklearn.datasets import fetch_rcv1
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.linear_model._sag import get_auto_step_size
try:
import lightning.classification as lightning_clf
except ImportError:
lightning_clf = None
m = Memory(cachedir=".", verbose=0)
# compute logistic loss
def get_loss(w, intercept, myX, myy, C):
n_samples = myX.shape[0]
w = w.ravel()
p = np.mean(np.log(1.0 + np.exp(-myy * (myX.dot(w) + intercept))))
print("%f + %f" % (p, w.dot(w) / 2.0 / C / n_samples))
p += w.dot(w) / 2.0 / C / n_samples
return p
# We use joblib to cache individual fits. Note that we do not pass the dataset
# as argument as the hashing would be too slow, so we assume that the dataset
# never changes.
@m.cache()
def bench_one(name, clf_type, clf_params, n_iter):
clf = clf_type(**clf_params)
try:
clf.set_params(max_iter=n_iter, random_state=42)
except Exception:
clf.set_params(n_iter=n_iter, random_state=42)
st = time.time()
clf.fit(X, y)
end = time.time()
try:
C = 1.0 / clf.alpha / n_samples
except Exception:
C = clf.C
try:
intercept = clf.intercept_
except Exception:
intercept = 0.0
train_loss = get_loss(clf.coef_, intercept, X, y, C)
train_score = clf.score(X, y)
test_score = clf.score(X_test, y_test)
duration = end - st
return train_loss, train_score, test_score, duration
def bench(clfs):
for (
name,
clf,
iter_range,
train_losses,
train_scores,
test_scores,
durations,
) in clfs:
print("training %s" % name)
clf_type = type(clf)
clf_params = clf.get_params()
for n_iter in iter_range:
gc.collect()
train_loss, train_score, test_score, duration = bench_one(
name, clf_type, clf_params, n_iter
)
train_losses.append(train_loss)
train_scores.append(train_score)
test_scores.append(test_score)
durations.append(duration)
print("classifier: %s" % name)
print("train_loss: %.8f" % train_loss)
print("train_score: %.8f" % train_score)
print("test_score: %.8f" % test_score)
print("time for fit: %.8f seconds" % duration)
print("")
print("")
return clfs
def plot_train_losses(clfs):
plt.figure()
for name, _, _, train_losses, _, _, durations in clfs:
plt.plot(durations, train_losses, "-o", label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train loss")
def plot_train_scores(clfs):
plt.figure()
for name, _, _, _, train_scores, _, durations in clfs:
plt.plot(durations, train_scores, "-o", label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("train score")
plt.ylim((0.92, 0.96))
def plot_test_scores(clfs):
plt.figure()
for name, _, _, _, _, test_scores, durations in clfs:
plt.plot(durations, test_scores, "-o", label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("test score")
plt.ylim((0.92, 0.96))
def plot_dloss(clfs):
plt.figure()
pobj_final = []
for name, _, _, train_losses, _, _, durations in clfs:
pobj_final.append(train_losses[-1])
indices = np.argsort(pobj_final)
pobj_best = pobj_final[indices[0]]
for name, _, _, train_losses, _, _, durations in clfs:
log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10)
plt.plot(durations, log_pobj, "-o", label=name)
plt.legend(loc=0)
plt.xlabel("seconds")
plt.ylabel("log(best - train_loss)")
def get_max_squared_sum(X):
"""Get the maximum row-wise sum of squares"""
return np.sum(X**2, axis=1).max()
rcv1 = fetch_rcv1()
X = rcv1.data
n_samples, n_features = X.shape
# consider the binary classification problem 'CCAT' vs the rest
ccat_idx = rcv1.target_names.tolist().index("CCAT")
y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64)
y[y == 0] = -1
# parameters
C = 1.0
fit_intercept = True
tol = 1.0e-14
# max_iter range
sgd_iter_range = list(range(1, 121, 10))
newton_iter_range = list(range(1, 25, 3))
lbfgs_iter_range = list(range(1, 242, 12))
liblinear_iter_range = list(range(1, 37, 3))
liblinear_dual_iter_range = list(range(1, 85, 6))
sag_iter_range = list(range(1, 37, 3))
clfs = [
(
"LR-liblinear",
LogisticRegression(
C=C,
tol=tol,
solver="liblinear",
fit_intercept=fit_intercept,
intercept_scaling=1,
),
liblinear_iter_range,
[],
[],
[],
[],
),
(
"LR-liblinear-dual",
LogisticRegression(
C=C,
tol=tol,
dual=True,
solver="liblinear",
fit_intercept=fit_intercept,
intercept_scaling=1,
),
liblinear_dual_iter_range,
[],
[],
[],
[],
),
(
"LR-SAG",
LogisticRegression(C=C, tol=tol, solver="sag", fit_intercept=fit_intercept),
sag_iter_range,
[],
[],
[],
[],
),
(
"LR-newton-cg",
LogisticRegression(
C=C, tol=tol, solver="newton-cg", fit_intercept=fit_intercept
),
newton_iter_range,
[],
[],
[],
[],
),
(
"LR-lbfgs",
LogisticRegression(C=C, tol=tol, solver="lbfgs", fit_intercept=fit_intercept),
lbfgs_iter_range,
[],
[],
[],
[],
),
(
"SGD",
SGDClassifier(
alpha=1.0 / C / n_samples,
penalty="l2",
loss="log_loss",
fit_intercept=fit_intercept,
verbose=0,
),
sgd_iter_range,
[],
[],
[],
[],
),
]
if lightning_clf is not None and not fit_intercept:
alpha = 1.0 / C / n_samples
# compute the same step_size than in LR-sag
max_squared_sum = get_max_squared_sum(X)
step_size = get_auto_step_size(max_squared_sum, alpha, "log", fit_intercept)
clfs.append(
(
"Lightning-SVRG",
lightning_clf.SVRGClassifier(
alpha=alpha, eta=step_size, tol=tol, loss="log"
),
sag_iter_range,
[],
[],
[],
[],
)
)
clfs.append(
(
"Lightning-SAG",
lightning_clf.SAGClassifier(
alpha=alpha, eta=step_size, tol=tol, loss="log"
),
sag_iter_range,
[],
[],
[],
[],
)
)
# We keep only 200 features, to have a dense dataset,
# and compare to lightning SAG, which seems incorrect in the sparse case.
X_csc = X.tocsc()
nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1]
X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]]
X = X.toarray()
print("dataset: %.3f MB" % (X.nbytes / 1e6))
# Split training and testing. Switch train and test subset compared to
# LYRL2004 split, to have a larger training dataset.
n = 23149
X_test = X[:n, :]
y_test = y[:n]
X = X[n:, :]
y = y[n:]
clfs = bench(clfs)
plot_train_scores(clfs)
plot_test_scores(clfs)
plot_train_losses(clfs)
plot_dloss(clfs)
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_text_vectorizers.py | benchmarks/bench_text_vectorizers.py | """
To run this benchmark, you will need,
* scikit-learn
* pandas
* memory_profiler
* psutil (optional, but recommended)
"""
import itertools
import timeit
import numpy as np
import pandas as pd
from memory_profiler import memory_usage
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import (
CountVectorizer,
HashingVectorizer,
TfidfVectorizer,
)
n_repeat = 3
def run_vectorizer(Vectorizer, X, **params):
def f():
vect = Vectorizer(**params)
vect.fit_transform(X)
return f
text = fetch_20newsgroups(subset="train").data[:1000]
print("=" * 80 + "\n#" + " Text vectorizers benchmark" + "\n" + "=" * 80 + "\n")
print("Using a subset of the 20 newsgroups dataset ({} documents).".format(len(text)))
print("This benchmarks runs in ~1 min ...")
res = []
for Vectorizer, (analyzer, ngram_range) in itertools.product(
[CountVectorizer, TfidfVectorizer, HashingVectorizer],
[("word", (1, 1)), ("word", (1, 2)), ("char", (4, 4)), ("char_wb", (4, 4))],
):
bench = {"vectorizer": Vectorizer.__name__}
params = {"analyzer": analyzer, "ngram_range": ngram_range}
bench.update(params)
dt = timeit.repeat(
run_vectorizer(Vectorizer, text, **params), number=1, repeat=n_repeat
)
bench["time"] = "{:.3f} (+-{:.3f})".format(np.mean(dt), np.std(dt))
mem_usage = memory_usage(run_vectorizer(Vectorizer, text, **params))
bench["memory"] = "{:.1f}".format(np.max(mem_usage))
res.append(bench)
df = pd.DataFrame(res).set_index(["analyzer", "ngram_range", "vectorizer"])
print("\n========== Run time performance (sec) ===========\n")
print(
"Computing the mean and the standard deviation "
"of the run time over {} runs...\n".format(n_repeat)
)
print(df["time"].unstack(level=-1))
print("\n=============== Memory usage (MB) ===============\n")
print(df["memory"].unstack(level=-1))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_plot_neighbors.py | benchmarks/bench_plot_neighbors.py | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import ticker
from sklearn import datasets, neighbors
def get_data(N, D, dataset="dense"):
if dataset == "dense":
np.random.seed(0)
return np.random.random((N, D))
elif dataset == "digits":
X, _ = datasets.load_digits(return_X_y=True)
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(
Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset="digits",
):
algorithms = ("kd_tree", "brute", "ball_tree")
fiducial_values = {"N": N, "D": D, "k": k}
# ------------------------------------------------------------
# varying N
N_results_build = {alg: np.zeros(len(Nrange)) for alg in algorithms}
N_results_query = {alg: np.zeros(len(Nrange)) for alg in algorithms}
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(
n_neighbors=min(NN, k), algorithm=algorithm, leaf_size=leaf_size
)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = t1 - t0
N_results_query[algorithm][i] = t2 - t1
# ------------------------------------------------------------
# varying D
D_results_build = {alg: np.zeros(len(Drange)) for alg in algorithms}
D_results_query = {alg: np.zeros(len(Drange)) for alg in algorithms}
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(
n_neighbors=k, algorithm=algorithm, leaf_size=leaf_size
)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = t1 - t0
D_results_query[algorithm][i] = t2 - t1
# ------------------------------------------------------------
# varying k
k_results_build = {alg: np.zeros(len(krange)) for alg in algorithms}
k_results_query = {alg: np.zeros(len(krange)) for alg in algorithms}
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(
n_neighbors=kk, algorithm=algorithm, leaf_size=leaf_size
)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = t1 - t0
k_results_query[algorithm][i] = t2 - t1
plt.figure(figsize=(8, 11))
for sbplt, vals, quantity, build_time, query_time in [
(311, Nrange, "N", N_results_build, N_results_query),
(312, Drange, "D", D_results_build, D_results_query),
(313, krange, "k", k_results_build, k_results_query),
]:
ax = plt.subplot(sbplt, yscale="log")
plt.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min(
[min(np.floor(np.log10(build_time[alg]))) for alg in algorithms]
)
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = plt.bar(xvals, build_time[alg] - bottom, width, bottom, color="r")
q_bar = plt.bar(xvals, query_time[alg], width, build_time[alg], color="b")
tick_vals += list(xvals + 0.5 * width)
tick_labels += ["%i" % val for val in vals]
plt.text(
(i + 0.02) / len(algorithms),
0.98,
alg,
transform=ax.transAxes,
ha="left",
va="top",
bbox=dict(facecolor="w", edgecolor="w", alpha=0.5),
)
plt.ylabel("Time (s)")
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = "Varying %s" % quantity
descr_string = ""
for s in "NDk":
if s == quantity:
pass
else:
descr_string += "%s = %i, " % (s, fiducial_values[s])
descr_string = descr_string[:-2]
plt.text(
1.01,
0.5,
title_string,
transform=ax.transAxes,
rotation=-90,
ha="left",
va="center",
fontsize=20,
)
plt.text(
0.99,
0.5,
descr_string,
transform=ax.transAxes,
rotation=-90,
ha="right",
va="center",
)
plt.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
plt.figlegend((c_bar, q_bar), ("construction", "N-point query"), "upper right")
if __name__ == "__main__":
barplot_neighbors(dataset="digits")
barplot_neighbors(dataset="dense")
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_glmnet.py | benchmarks/bench_glmnet.py | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = time() - tstart
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == "__main__":
# Delayed import of matplotlib.pyplot
import matplotlib.pyplot as plt
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print("==================")
print("Iteration %s of %s" % (i, n))
print("==================")
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples,
n_features=n_features,
noise=0.1,
n_informative=n_informative,
coef=True,
)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[: (i * step)]
Y = Y[: (i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
plt.clf()
xx = range(0, n * step, step)
plt.title("Lasso regression on sample dataset (%d features)" % n_features)
plt.plot(xx, scikit_results, "b-", label="scikit-learn")
plt.plot(xx, glmnet_results, "r-", label="glmnet")
plt.legend()
plt.xlabel("number of samples to classify")
plt.ylabel("Time (s)")
plt.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print("==================")
print("Iteration %02d of %02d" % (i, n))
print("==================")
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples,
n_features=n_features,
noise=0.1,
n_informative=n_informative,
coef=True,
)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
plt.figure("scikit-learn vs. glmnet benchmark results")
plt.title("Regression in high dimensional spaces (%d samples)" % n_samples)
plt.plot(xx, scikit_results, "b-", label="scikit-learn")
plt.plot(xx, glmnet_results, "r-", label="glmnet")
plt.legend()
plt.xlabel("number of features")
plt.ylabel("Time (s)")
plt.axis("tight")
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_plot_ward.py | benchmarks/bench_plot_ward.py | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.cluster import hierarchy
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage="ward")
n_samples = np.logspace(0.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples, n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
plt.figure("scikit-learn Ward's method benchmark results")
plt.imshow(np.log(ratio), aspect="auto", origin="lower")
plt.colorbar()
plt.contour(
ratio,
levels=[
1,
],
colors="k",
)
plt.yticks(range(len(n_features)), n_features.astype(int))
plt.ylabel("N features")
plt.xticks(range(len(n_samples)), n_samples.astype(int))
plt.xlabel("N samples")
plt.title("Scikit's time, in units of scipy time (log)")
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_plot_parallel_pairwise.py | benchmarks/bench_plot_parallel_pairwise.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import time
import matplotlib.pyplot as plt
from sklearn.metrics.pairwise import pairwise_distances, pairwise_kernels
from sklearn.utils import check_random_state
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
plt.figure("scikit-learn parallel %s benchmark results" % func.__name__)
plt.plot(sample_sizes, one_core, label="one core")
plt.plot(sample_sizes, multi_core, label="multi core")
plt.xlabel("n_samples")
plt.ylabel("Time (s)")
plt.title("Parallel %s" % func.__name__)
plt.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_hist_gradient_boosting_adult.py | benchmarks/bench_hist_gradient_boosting_adult.py | import argparse
from time import time
import numpy as np
import pandas as pd
from sklearn.compose import make_column_selector, make_column_transformer
from sklearn.datasets import fetch_openml
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OrdinalEncoder
parser = argparse.ArgumentParser()
parser.add_argument("--n-leaf-nodes", type=int, default=31)
parser.add_argument("--n-trees", type=int, default=100)
parser.add_argument("--lightgbm", action="store_true", default=False)
parser.add_argument("--learning-rate", type=float, default=0.1)
parser.add_argument("--max-bins", type=int, default=255)
parser.add_argument("--no-predict", action="store_true", default=False)
parser.add_argument("--verbose", action="store_true", default=False)
args = parser.parse_args()
n_leaf_nodes = args.n_leaf_nodes
n_trees = args.n_trees
lr = args.learning_rate
max_bins = args.max_bins
verbose = args.verbose
def fit(est, data_train, target_train, libname, **fit_params):
print(f"Fitting a {libname} model...")
tic = time()
est.fit(data_train, target_train, **fit_params)
toc = time()
print(f"fitted in {toc - tic:.3f}s")
def predict(est, data_test, target_test):
if args.no_predict:
return
tic = time()
predicted_test = est.predict(data_test)
predicted_proba_test = est.predict_proba(data_test)
toc = time()
roc_auc = roc_auc_score(target_test, predicted_proba_test[:, 1])
acc = accuracy_score(target_test, predicted_test)
print(f"predicted in {toc - tic:.3f}s, ROC AUC: {roc_auc:.4f}, ACC: {acc:.4f}")
data = fetch_openml(data_id=179, as_frame=True) # adult dataset
X, y = data.data, data.target
# Ordinal encode the categories to use the native support available in HGBDT
cat_columns = make_column_selector(dtype_include="category")(X)
preprocessing = make_column_transformer(
(OrdinalEncoder(), cat_columns),
remainder="passthrough",
verbose_feature_names_out=False,
)
X = pd.DataFrame(
preprocessing.fit_transform(X),
columns=preprocessing.get_feature_names_out(),
)
n_classes = len(np.unique(y))
n_features = X.shape[1]
n_categorical_features = len(cat_columns)
n_numerical_features = n_features - n_categorical_features
print(f"Number of features: {n_features}")
print(f"Number of categorical features: {n_categorical_features}")
print(f"Number of numerical features: {n_numerical_features}")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
is_categorical = [True] * n_categorical_features + [False] * n_numerical_features
est = HistGradientBoostingClassifier(
loss="log_loss",
learning_rate=lr,
max_iter=n_trees,
max_bins=max_bins,
max_leaf_nodes=n_leaf_nodes,
categorical_features=is_categorical,
early_stopping=False,
random_state=0,
verbose=verbose,
)
fit(est, X_train, y_train, "sklearn")
predict(est, X_test, y_test)
if args.lightgbm:
est = get_equivalent_estimator(est, lib="lightgbm", n_classes=n_classes)
est.set_params(max_cat_to_onehot=1) # dont use OHE
categorical_features = [
f_idx for (f_idx, is_cat) in enumerate(is_categorical) if is_cat
]
fit(est, X_train, y_train, "lightgbm", categorical_feature=categorical_features)
predict(est, X_test, y_test)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_random_projections.py | benchmarks/bench_random_projections.py | """
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
import collections
import gc
import optparse
import sys
from datetime import datetime
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.random_projection import (
GaussianRandomProjection,
SparseRandomProjection,
johnson_lindenstrauss_min_dim,
)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10**6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transformer):
gc.collect()
clf = clone(transformer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = datetime.now() - t_start
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = datetime.now() - t_start
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros, random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(
rng.randn(n_nonzeros),
(
rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros),
),
),
shape=(n_samples, n_features),
)
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print(
"%s | %s | %s"
% (
clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12),
)
)
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option(
"--n-times",
dest="n_times",
default=5,
type=int,
help="Benchmark results are average over n_times experiments",
)
op.add_option(
"--n-features",
dest="n_features",
default=10**4,
type=int,
help="Number of features in the benchmarks",
)
op.add_option(
"--n-components",
dest="n_components",
default="auto",
help="Size of the random subspace. ('auto' or int > 0)",
)
op.add_option(
"--ratio-nonzeros",
dest="ratio_nonzeros",
default=10**-3,
type=float,
help="Number of features in the benchmarks",
)
op.add_option(
"--n-samples",
dest="n_samples",
default=500,
type=int,
help="Number of samples in the benchmarks",
)
op.add_option(
"--random-seed",
dest="random_seed",
default=13,
type=int,
help="Seed used by the random number generators.",
)
op.add_option(
"--density",
dest="density",
default=1 / 3,
help=(
"Density used by the sparse random projection. ('auto' or float (0.0, 1.0]"
),
)
op.add_option(
"--eps",
dest="eps",
default=0.5,
type=float,
help="See the documentation of the underlying transformers.",
)
op.add_option(
"--transformers",
dest="selected_transformers",
default="GaussianRandomProjection,SparseRandomProjection",
type=str,
help=(
"Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection"
),
)
op.add_option(
"--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.",
)
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(",")
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print("Dataset statistics")
print("===========================")
print("n_samples \t= %s" % opts.n_samples)
print("n_features \t= %s" % opts.n_features)
if opts.n_components == "auto":
print(
"n_components \t= %s (auto)"
% johnson_lindenstrauss_min_dim(n_samples=opts.n_samples, eps=opts.eps)
)
else:
print("n_components \t= %s" % opts.n_components)
print("n_elements \t= %s" % (opts.n_features * opts.n_samples))
print("n_nonzeros \t= %s per feature" % n_nonzeros)
print("ratio_nonzeros \t= %s" % opts.ratio_nonzeros)
print("")
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
}
transformers["GaussianRandomProjection"] = GaussianRandomProjection(
**gaussian_matrix_params
)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = SparseRandomProjection(
**sparse_matrix_params
)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print("Benchmarks")
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(
opts.n_samples, opts.n_features, n_nonzeros, random_state=opts.random_seed
)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in range(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(
X_dense, transformers[name]
)
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print(
"%s \t | %s "
% (
"Arguments".ljust(16),
"Value".center(12),
)
)
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16), str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print(
"%s | %s | %s"
% ("Transformer".ljust(30), "fit".center(12), "transform".center(12))
)
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name, np.mean(time_fit[name]), np.mean(time_transform[name]))
print("")
print("")
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_plot_incremental_pca.py | benchmarks/bench_plot_incremental_pca.py | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import gc
from collections import defaultdict
from time import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import PCA, IncrementalPCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker="o")
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {"time": training_time, "error": reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times["pca"], label="PCA")
plot_results(
all_components, all_times["ipca"], label="IncrementalPCA, bsize=%i" % batch_size
)
plt.legend(loc="upper left")
plt.suptitle(
"Algorithm runtime vs. n_components\n LFW, size %i x %i"
% data.shape
)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors["pca"], label="PCA")
plot_results(
all_components,
all_errors["ipca"],
label="IncrementalPCA, bsize=%i" % batch_size,
)
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\nLFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times["pca"], label="PCA")
plot_results(all_batch_sizes, all_times["ipca"], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle(
"Algorithm runtime vs. batch_size for n_components %i\n LFW,"
" size %i x %i" % (n_features, data.shape[0], data.shape[1])
)
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors["pca"], label="PCA")
plot_results(all_batch_sizes, all_errors["ipca"], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle(
"Algorithm error vs. batch_size for n_components %i\n LFW,"
" size %i x %i" % (n_features, data.shape[0], data.shape[1])
)
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [
i.astype(int) for i in np.linspace(data.shape[1] // 10, data.shape[1], num=5)
]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {
k: benchmark(est, data) for k, est in [("pca", pca), ("ipca", ipca)]
}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]["time"])
all_errors[k].append(results_dict[k]["error"])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [
i.astype(int) for i in np.linspace(data.shape[0] // 10, data.shape[0], num=10)
]
for n_components in [
i.astype(int) for i in np.linspace(data.shape[1] // 10, data.shape[1], num=4)
]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = PCA(
n_components=n_components, svd_solver="randomized", random_state=1999
)
results_dict = {
k: benchmark(est, data) for k, est in [("pca", pca), ("rpca", rpca)]
}
# Create flat baselines to compare the variation over batch size
all_times["pca"].extend([results_dict["pca"]["time"]] * len(batch_sizes))
all_errors["pca"].extend([results_dict["pca"]["error"]] * len(batch_sizes))
all_times["rpca"].extend([results_dict["rpca"]["time"]] * len(batch_sizes))
all_errors["rpca"].extend([results_dict["rpca"]["error"]] * len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [("ipca", ipca)]}
all_times["ipca"].append(results_dict["ipca"]["time"])
all_errors["ipca"].append(results_dict["ipca"]["error"])
plot_batch_times(all_times, n_components, batch_sizes, data)
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=0.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_kernel_pca_solvers_time_vs_n_samples.py | benchmarks/bench_kernel_pca_solvers_time_vs_n_samples.py | """
==========================================================
Kernel PCA Solvers comparison benchmark: time vs n_samples
==========================================================
This benchmark shows that the approximate solvers provided in Kernel PCA can
help significantly improve its execution speed when an approximate solution
(small `n_components`) is acceptable. In many real-world datasets the number of
samples is very large, but a few hundreds of principal components are
sufficient enough to capture the underlying distribution.
Description:
------------
An increasing number of examples is used to train a KernelPCA, between
`min_n_samples` (default: 101) and `max_n_samples` (default: 4000) with
`n_samples_grid_size` positions (default: 4). Samples have 2 features, and are
generated using `make_circles`. For each training sample size, KernelPCA models
are trained for the various possible `eigen_solver` values. All of them are
trained to obtain `n_components` principal components (default: 100). The
execution times are displayed in a plot at the end of the experiment.
What you can observe:
---------------------
When the number of samples provided gets large, the dense solver takes a lot
of time to complete, while the randomized method returns similar results in
much shorter execution times.
Going further:
--------------
You can increase `max_n_samples` and `nb_n_samples_to_try` if you wish to
explore a wider range of values for `n_samples`.
You can also set `include_arpack=True` to add this other solver in the
experiments (much slower).
Finally you can have a look at the second example of this series, "Kernel PCA
Solvers comparison benchmark: time vs n_components", where this time the number
of examples is fixed, and the desired number of components varies.
"""
# Author: Sylvain MARIE, Schneider Electric
import time
import matplotlib.pyplot as plt
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.datasets import make_circles
from sklearn.decomposition import KernelPCA
print(__doc__)
# 1- Design the Experiment
# ------------------------
min_n_samples, max_n_samples = 101, 4000 # min and max n_samples to try
n_samples_grid_size = 4 # nb of positions in the grid to try
# generate the grid
n_samples_range = [
min_n_samples
+ np.floor((x / (n_samples_grid_size - 1)) * (max_n_samples - min_n_samples))
for x in range(0, n_samples_grid_size)
]
n_components = 100 # the number of principal components we want to use
n_iter = 3 # the number of times each experiment will be repeated
include_arpack = False # set this to True to include arpack solver (slower)
# 2- Generate random data
# -----------------------
n_features = 2
X, y = make_circles(n_samples=max_n_samples, factor=0.3, noise=0.05, random_state=0)
# 3- Benchmark
# ------------
# init
ref_time = np.empty((len(n_samples_range), n_iter)) * np.nan
a_time = np.empty((len(n_samples_range), n_iter)) * np.nan
r_time = np.empty((len(n_samples_range), n_iter)) * np.nan
# loop
for j, n_samples in enumerate(n_samples_range):
n_samples = int(n_samples)
print("Performing kPCA with n_samples = %i" % n_samples)
X_train = X[:n_samples, :]
X_test = X_train
# A- reference (dense)
print(" - dense")
for i in range(n_iter):
start_time = time.perf_counter()
ref_pred = (
KernelPCA(n_components, eigen_solver="dense").fit(X_train).transform(X_test)
)
ref_time[j, i] = time.perf_counter() - start_time
# B- arpack
if include_arpack:
print(" - arpack")
for i in range(n_iter):
start_time = time.perf_counter()
a_pred = (
KernelPCA(n_components, eigen_solver="arpack")
.fit(X_train)
.transform(X_test)
)
a_time[j, i] = time.perf_counter() - start_time
# check that the result is still correct despite the approx
assert_array_almost_equal(np.abs(a_pred), np.abs(ref_pred))
# C- randomized
print(" - randomized")
for i in range(n_iter):
start_time = time.perf_counter()
r_pred = (
KernelPCA(n_components, eigen_solver="randomized")
.fit(X_train)
.transform(X_test)
)
r_time[j, i] = time.perf_counter() - start_time
# check that the result is still correct despite the approximation
assert_array_almost_equal(np.abs(r_pred), np.abs(ref_pred))
# Compute statistics for the 3 methods
avg_ref_time = ref_time.mean(axis=1)
std_ref_time = ref_time.std(axis=1)
avg_a_time = a_time.mean(axis=1)
std_a_time = a_time.std(axis=1)
avg_r_time = r_time.mean(axis=1)
std_r_time = r_time.std(axis=1)
# 4- Plots
# --------
fig, ax = plt.subplots(figsize=(12, 8))
# Display 1 plot with error bars per method
ax.errorbar(
n_samples_range,
avg_ref_time,
yerr=std_ref_time,
marker="x",
linestyle="",
color="r",
label="full",
)
if include_arpack:
ax.errorbar(
n_samples_range,
avg_a_time,
yerr=std_a_time,
marker="x",
linestyle="",
color="g",
label="arpack",
)
ax.errorbar(
n_samples_range,
avg_r_time,
yerr=std_r_time,
marker="x",
linestyle="",
color="b",
label="randomized",
)
ax.legend(loc="upper left")
# customize axes
ax.set_xlim(min(n_samples_range) * 0.9, max(n_samples_range) * 1.1)
ax.set_ylabel("Execution time (s)")
ax.set_xlabel("n_samples")
ax.set_title(
"Execution time comparison of kPCA with %i components on samples "
"with %i features, according to the choice of `eigen_solver`"
"" % (n_components, n_features)
)
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_plot_randomized_svd.py | benchmarks/bench_plot_randomized_svd.py | """
Benchmarks on the power iterations phase in randomized SVD.
We test on various synthetic and real datasets the effect of increasing
the number of power iterations in terms of quality of approximation
and running time. A number greater than 0 should help with noisy matrices,
which are characterized by a slow spectral decay.
We test several policy for normalizing the power iterations. Normalization
is crucial to avoid numerical issues.
The quality of the approximation is measured by the spectral norm discrepancy
between the original input matrix and the reconstructed one (by multiplying
the randomized_svd's outputs). The spectral norm is always equivalent to the
largest singular value of a matrix. (3) justifies this choice. However, one can
notice in these experiments that Frobenius and spectral norms behave
very similarly in a qualitative sense. Therefore, we suggest to run these
benchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to
compute.
The benchmarks follow.
(a) plot: time vs norm, varying number of power iterations
data: many datasets
goal: compare normalization policies and study how the number of power
iterations affect time and norm
(b) plot: n_iter vs norm, varying rank of data and number of components for
randomized_SVD
data: low-rank matrices on which we control the rank
goal: study whether the rank of the matrix and the number of components
extracted by randomized SVD affect "the optimal" number of power iterations
(c) plot: time vs norm, varying datasets
data: many datasets
goal: compare default configurations
We compare the following algorithms:
- randomized_svd(..., power_iteration_normalizer='none')
- randomized_svd(..., power_iteration_normalizer='LU')
- randomized_svd(..., power_iteration_normalizer='QR')
- randomized_svd(..., power_iteration_normalizer='auto')
- fbpca.pca() from https://github.com/facebook/fbpca (if installed)
Conclusion
----------
- n_iter=2 appears to be a good default value
- power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU
gives similar errors to QR but is cheaper. That's what 'auto' implements.
References
----------
(1) :arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions."
<0909.4061>`
Halko, et al., (2009)
(2) A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
(3) An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import gc
import os.path
import pickle
from collections import defaultdict
from time import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.datasets import (
fetch_20newsgroups_vectorized,
fetch_lfw_people,
fetch_olivetti_faces,
fetch_openml,
fetch_rcv1,
make_low_rank_matrix,
make_sparse_uncorrelated,
)
from sklearn.utils import gen_batches
from sklearn.utils._arpack import _init_arpack_v0
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.validation import check_random_state
try:
import fbpca
fbpca_available = True
except ImportError:
fbpca_available = False
# If this is enabled, tests are much slower and will crash with the large data
enable_spectral_norm = False
# TODO: compute approximate spectral norms with the power method as in
# Estimating the largest eigenvalues by the power and Lanczos methods with
# a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on
# Matrix Analysis and Applications, 13 (4): 1094-1122, 1992.
# This approximation is a very fast estimate of the spectral norm, but depends
# on starting random vectors.
# Determine when to switch to batch computation for matrix norms,
# in case the reconstructed (dense) matrix is too large
MAX_MEMORY = int(4e9)
# The following datasets can be downloaded manually from:
# CIFAR 10: https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
# SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat
CIFAR_FOLDER = "./cifar-10-batches-py/"
SVHN_FOLDER = "./SVHN/"
datasets = [
"low rank matrix",
"lfw_people",
"olivetti_faces",
"20newsgroups",
"mnist_784",
"CIFAR",
"a3a",
"SVHN",
"uncorrelated matrix",
]
big_sparse_datasets = ["big sparse matrix", "rcv1"]
def unpickle(file_name):
with open(file_name, "rb") as fo:
return pickle.load(fo, encoding="latin1")["data"]
def handle_missing_dataset(file_folder):
if not os.path.isdir(file_folder):
print("%s file folder not found. Test skipped." % file_folder)
return 0
def get_data(dataset_name):
print("Getting dataset: %s" % dataset_name)
if dataset_name == "lfw_people":
X = fetch_lfw_people().data
elif dataset_name == "20newsgroups":
X = fetch_20newsgroups_vectorized().data[:, :100000]
elif dataset_name == "olivetti_faces":
X = fetch_olivetti_faces().data
elif dataset_name == "rcv1":
X = fetch_rcv1().data
elif dataset_name == "CIFAR":
if handle_missing_dataset(CIFAR_FOLDER) == 0:
return
X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1)) for i in range(5)]
X = np.vstack(X1)
del X1
elif dataset_name == "SVHN":
if handle_missing_dataset(SVHN_FOLDER) == 0:
return
X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)["X"]
X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]
X = np.vstack(X2)
del X1
del X2
elif dataset_name == "low rank matrix":
X = make_low_rank_matrix(
n_samples=500,
n_features=int(1e4),
effective_rank=100,
tail_strength=0.5,
random_state=random_state,
)
elif dataset_name == "uncorrelated matrix":
X, _ = make_sparse_uncorrelated(
n_samples=500, n_features=10000, random_state=random_state
)
elif dataset_name == "big sparse matrix":
sparsity = int(1e6)
size = int(1e6)
small_size = int(1e4)
data = np.random.normal(0, 1, int(sparsity / 10))
data = np.repeat(data, 10)
row = np.random.uniform(0, small_size, sparsity)
col = np.random.uniform(0, small_size, sparsity)
X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))
del data
del row
del col
else:
X = fetch_openml(dataset_name).data
return X
def plot_time_vs_s(time, norm, point_labels, title):
plt.figure()
colors = ["g", "b", "y"]
for i, l in enumerate(sorted(norm.keys())):
if l != "fbpca":
plt.plot(time[l], norm[l], label=l, marker="o", c=colors.pop())
else:
plt.plot(time[l], norm[l], label=l, marker="^", c="red")
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(
label,
xy=(x, y),
xytext=(0, -20),
textcoords="offset points",
ha="right",
va="bottom",
)
plt.legend(loc="upper right")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def scatter_time_vs_s(time, norm, point_labels, title):
plt.figure()
size = 100
for i, l in enumerate(sorted(norm.keys())):
if l != "fbpca":
plt.scatter(time[l], norm[l], label=l, marker="o", c="b", s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(
label,
xy=(x, y),
xytext=(0, -80),
textcoords="offset points",
ha="right",
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"),
va="bottom",
size=11,
rotation=90,
)
else:
plt.scatter(time[l], norm[l], label=l, marker="^", c="red", s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(
label,
xy=(x, y),
xytext=(0, 30),
textcoords="offset points",
ha="right",
arrowprops=dict(arrowstyle="->", connectionstyle="arc3"),
va="bottom",
size=11,
rotation=90,
)
plt.legend(loc="best")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def plot_power_iter_vs_s(power_iter, s, title):
plt.figure()
for l in sorted(s.keys()):
plt.plot(power_iter, s[l], label=l, marker="o")
plt.legend(loc="lower right", prop={"size": 10})
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("n_iter")
def svd_timing(
X, n_comps, n_iter, n_oversamples, power_iteration_normalizer="auto", method=None
):
"""
Measure time for decomposition
"""
print("... running SVD ...")
if method != "fbpca":
gc.collect()
t0 = time()
U, mu, V = randomized_svd(
X,
n_comps,
n_oversamples=n_oversamples,
n_iter=n_iter,
power_iteration_normalizer=power_iteration_normalizer,
random_state=random_state,
transpose=False,
)
call_time = time() - t0
else:
gc.collect()
t0 = time()
# There is a different convention for l here
U, mu, V = fbpca.pca(
X, n_comps, raw=True, n_iter=n_iter, l=n_oversamples + n_comps
)
call_time = time() - t0
return U, mu, V, call_time
def norm_diff(A, norm=2, msg=True, random_state=None):
"""
Compute the norm diff with the original matrix, when randomized
SVD is called with *params.
norm: 2 => spectral; 'fro' => Frobenius
"""
if msg:
print("... computing %s norm ..." % norm)
if norm == 2:
# s = sp.linalg.norm(A, ord=2) # slow
v0 = _init_arpack_v0(min(A.shape), random_state)
value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False, v0=v0)
else:
if sp.sparse.issparse(A):
value = sp.sparse.linalg.norm(A, ord=norm)
else:
value = sp.linalg.norm(A, ord=norm)
return value
def scalable_frobenius_norm_discrepancy(X, U, s, V):
if not sp.sparse.issparse(X) or (
X.shape[0] * X.shape[1] * X.dtype.itemsize < MAX_MEMORY
):
# if the input is not sparse or sparse but not too big,
# U.dot(np.diag(s).dot(V)) will fit in RAM
A = X - U.dot(np.diag(s).dot(V))
return norm_diff(A, norm="fro")
print("... computing fro norm by batches...")
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = 0.0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm="fro", msg=False)
return np.sqrt(cum_norm)
def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
X_spectral_norm = norm_diff(X, norm=2, msg=False, random_state=0)
all_frobenius = defaultdict(list)
X_fro_norm = norm_diff(X, norm="fro", msg=False)
for pi in power_iter:
for pm in ["none", "LU", "QR"]:
print("n_iter = %d on sklearn - %s" % (pi, pm))
U, s, V, time = svd_timing(
X,
n_comps,
n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
)
label = "sklearn - %s" % pm
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(
norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm
)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
print("n_iter = %d on fbca" % (pi))
U, s, V, time = svd_timing(
X,
n_comps,
n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
method="fbpca",
)
label = "fbpca"
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(
norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm
)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_spectral, power_iter, title)
title = "%s: Frobenius norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_frobenius, power_iter, title)
def bench_b(power_list):
n_samples, n_features = 1000, 10000
data_params = {
"n_samples": n_samples,
"n_features": n_features,
"tail_strength": 0.7,
"random_state": random_state,
}
dataset_name = "low rank matrix %d x %d" % (n_samples, n_features)
ranks = [10, 50, 100]
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for rank in ranks:
X = make_low_rank_matrix(effective_rank=rank, **data_params)
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False, random_state=0)
X_fro_norm = norm_diff(X, norm="fro", msg=False)
for n_comp in [int(rank / 2), rank, rank * 2]:
label = "rank=%d, n_comp=%d" % (rank, n_comp)
print(label)
for pi in power_list:
U, s, V, _ = svd_timing(
X,
n_comp,
n_iter=pi,
n_oversamples=2,
power_iteration_normalizer="LU",
)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(
norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm
)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_spectral, title)
title = "%s: Frobenius norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_frobenius, title)
def bench_c(datasets, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False, random_state=0)
X_fro_norm = norm_diff(X, norm="fro", msg=False)
n_comps = np.minimum(n_comps, np.min(X.shape))
label = "sklearn"
print("%s %d x %d - %s" % (dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10, method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(
norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm
)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
label = "fbpca"
print("%s %d x %d - %s" % (dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(
X, n_comps, n_iter=2, n_oversamples=2, method=label
)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(
norm_diff(X - A, norm=2, random_state=0) / X_spectral_norm
)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if len(all_time) == 0:
raise ValueError("No tests ran. Aborting.")
if enable_spectral_norm:
title = "normalized spectral norm diff vs running time"
scatter_time_vs_s(all_time, all_spectral, datasets, title)
title = "normalized Frobenius norm diff vs running time"
scatter_time_vs_s(all_time, all_frobenius, datasets, title)
if __name__ == "__main__":
random_state = check_random_state(1234)
power_iter = np.arange(0, 6)
n_comps = 50
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
print(
" >>>>>> Benching sklearn and fbpca on %s %d x %d"
% (dataset_name, X.shape[0], X.shape[1])
)
bench_a(
X,
dataset_name,
power_iter,
n_oversamples=2,
n_comps=np.minimum(n_comps, np.min(X.shape)),
)
print(" >>>>>> Benching on simulated low rank matrix with variable rank")
bench_b(power_iter)
print(" >>>>>> Benching sklearn and fbpca default configurations")
bench_c(datasets + big_sparse_datasets, n_comps)
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_lasso.py | benchmarks/bench_lasso.py | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print("==================")
print("Iteration %s of %s" % (it, max(len(n_samples), len(n_features))))
print("==================")
n_informative = nf // 10
X, Y, coef_ = make_regression(
n_samples=ns,
n_features=nf,
n_informative=n_informative,
noise=0.1,
coef=True,
)
X /= np.sqrt(np.sum(X**2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == "__main__":
import matplotlib.pyplot as plt
from sklearn.linear_model import Lasso, LassoLars
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(int)
lasso_results, lars_lasso_results = compute_bench(
alpha, list_n_samples, [n_features], precompute=True
)
plt.figure("scikit-learn LASSO benchmark results")
plt.subplot(211)
plt.plot(list_n_samples, lasso_results, "b-", label="Lasso")
plt.plot(list_n_samples, lars_lasso_results, "r-", label="LassoLars")
plt.title("precomputed Gram matrix, %d features, alpha=%s" % (n_features, alpha))
plt.legend(loc="upper left")
plt.xlabel("number of samples")
plt.ylabel("Time (s)")
plt.axis("tight")
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(int)
lasso_results, lars_lasso_results = compute_bench(
alpha, [n_samples], list_n_features, precompute=False
)
plt.subplot(212)
plt.plot(list_n_features, lasso_results, "b-", label="Lasso")
plt.plot(list_n_features, lars_lasso_results, "r-", label="LassoLars")
plt.title("%d samples, alpha=%s" % (n_samples, alpha))
plt.legend(loc="upper left")
plt.xlabel("number of features")
plt.ylabel("Time (s)")
plt.axis("tight")
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_feature_expansions.py | benchmarks/bench_feature_expansions.py | from time import time
import matplotlib.pyplot as plt
import numpy as np
import scipy.sparse as sparse
from sklearn.preprocessing import PolynomialFeatures
degree = 2
trials = 3
num_rows = 1000
dimensionalities = np.array([1, 2, 8, 16, 32, 64])
densities = np.array([0.01, 0.1, 1.0])
csr_times = {d: np.zeros(len(dimensionalities)) for d in densities}
dense_times = {d: np.zeros(len(dimensionalities)) for d in densities}
transform = PolynomialFeatures(
degree=degree, include_bias=False, interaction_only=False
)
for trial in range(trials):
for density in densities:
for dim_index, dim in enumerate(dimensionalities):
print(trial, density, dim)
X_csr = sparse.random(num_rows, dim, density).tocsr()
X_dense = X_csr.toarray()
# CSR
t0 = time()
transform.fit_transform(X_csr)
csr_times[density][dim_index] += time() - t0
# Dense
t0 = time()
transform.fit_transform(X_dense)
dense_times[density][dim_index] += time() - t0
csr_linestyle = (0, (3, 1, 1, 1, 1, 1)) # densely dashdotdotted
dense_linestyle = (0, ()) # solid
fig, axes = plt.subplots(nrows=len(densities), ncols=1, figsize=(8, 10))
for density, ax in zip(densities, axes):
ax.plot(
dimensionalities,
csr_times[density] / trials,
label="csr",
linestyle=csr_linestyle,
)
ax.plot(
dimensionalities,
dense_times[density] / trials,
label="dense",
linestyle=dense_linestyle,
)
ax.set_title("density %0.2f, degree=%d, n_samples=%d" % (density, degree, num_rows))
ax.legend()
ax.set_xlabel("Dimensionality")
ax.set_ylabel("Time (seconds)")
plt.tight_layout()
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_hist_gradient_boosting.py | benchmarks/bench_hist_gradient_boosting.py | import argparse
from time import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_classification, make_regression
from sklearn.ensemble import (
HistGradientBoostingClassifier,
HistGradientBoostingRegressor,
)
from sklearn.ensemble._hist_gradient_boosting.utils import get_equivalent_estimator
from sklearn.model_selection import train_test_split
parser = argparse.ArgumentParser()
parser.add_argument("--n-leaf-nodes", type=int, default=31)
parser.add_argument("--n-trees", type=int, default=10)
parser.add_argument(
"--lightgbm", action="store_true", default=False, help="also plot lightgbm"
)
parser.add_argument(
"--xgboost", action="store_true", default=False, help="also plot xgboost"
)
parser.add_argument(
"--catboost", action="store_true", default=False, help="also plot catboost"
)
parser.add_argument("--learning-rate", type=float, default=0.1)
parser.add_argument(
"--problem",
type=str,
default="classification",
choices=["classification", "regression"],
)
parser.add_argument("--loss", type=str, default="default")
parser.add_argument("--missing-fraction", type=float, default=0)
parser.add_argument("--n-classes", type=int, default=2)
parser.add_argument("--n-samples-max", type=int, default=int(1e6))
parser.add_argument("--n-features", type=int, default=20)
parser.add_argument("--max-bins", type=int, default=255)
parser.add_argument(
"--random-sample-weights",
action="store_true",
default=False,
help="generate and use random sample weights",
)
args = parser.parse_args()
n_leaf_nodes = args.n_leaf_nodes
n_trees = args.n_trees
lr = args.learning_rate
max_bins = args.max_bins
def get_estimator_and_data():
if args.problem == "classification":
X, y = make_classification(
args.n_samples_max * 2,
n_features=args.n_features,
n_classes=args.n_classes,
n_clusters_per_class=1,
n_informative=args.n_classes,
random_state=0,
)
return X, y, HistGradientBoostingClassifier
elif args.problem == "regression":
X, y = make_regression(
args.n_samples_max * 2, n_features=args.n_features, random_state=0
)
return X, y, HistGradientBoostingRegressor
X, y, Estimator = get_estimator_and_data()
if args.missing_fraction:
mask = np.random.binomial(1, args.missing_fraction, size=X.shape).astype(bool)
X[mask] = np.nan
if args.random_sample_weights:
sample_weight = np.random.rand(len(X)) * 10
else:
sample_weight = None
if sample_weight is not None:
(X_train_, X_test_, y_train_, y_test_, sample_weight_train_, _) = train_test_split(
X, y, sample_weight, test_size=0.5, random_state=0
)
else:
X_train_, X_test_, y_train_, y_test_ = train_test_split(
X, y, test_size=0.5, random_state=0
)
sample_weight_train_ = None
def one_run(n_samples):
X_train = X_train_[:n_samples]
X_test = X_test_[:n_samples]
y_train = y_train_[:n_samples]
y_test = y_test_[:n_samples]
if sample_weight is not None:
sample_weight_train = sample_weight_train_[:n_samples]
else:
sample_weight_train = None
assert X_train.shape[0] == n_samples
assert X_test.shape[0] == n_samples
print("Data size: %d samples train, %d samples test." % (n_samples, n_samples))
print("Fitting a sklearn model...")
tic = time()
est = Estimator(
learning_rate=lr,
max_iter=n_trees,
max_bins=max_bins,
max_leaf_nodes=n_leaf_nodes,
early_stopping=False,
random_state=0,
verbose=0,
)
loss = args.loss
if args.problem == "classification":
if loss == "default":
loss = "log_loss"
else:
# regression
if loss == "default":
loss = "squared_error"
est.set_params(loss=loss)
est.fit(X_train, y_train, sample_weight=sample_weight_train)
sklearn_fit_duration = time() - tic
tic = time()
sklearn_score = est.score(X_test, y_test)
sklearn_score_duration = time() - tic
print("score: {:.4f}".format(sklearn_score))
print("fit duration: {:.3f}s,".format(sklearn_fit_duration))
print("score duration: {:.3f}s,".format(sklearn_score_duration))
lightgbm_score = None
lightgbm_fit_duration = None
lightgbm_score_duration = None
if args.lightgbm:
print("Fitting a LightGBM model...")
lightgbm_est = get_equivalent_estimator(
est, lib="lightgbm", n_classes=args.n_classes
)
tic = time()
lightgbm_est.fit(X_train, y_train, sample_weight=sample_weight_train)
lightgbm_fit_duration = time() - tic
tic = time()
lightgbm_score = lightgbm_est.score(X_test, y_test)
lightgbm_score_duration = time() - tic
print("score: {:.4f}".format(lightgbm_score))
print("fit duration: {:.3f}s,".format(lightgbm_fit_duration))
print("score duration: {:.3f}s,".format(lightgbm_score_duration))
xgb_score = None
xgb_fit_duration = None
xgb_score_duration = None
if args.xgboost:
print("Fitting an XGBoost model...")
xgb_est = get_equivalent_estimator(est, lib="xgboost", n_classes=args.n_classes)
tic = time()
xgb_est.fit(X_train, y_train, sample_weight=sample_weight_train)
xgb_fit_duration = time() - tic
tic = time()
xgb_score = xgb_est.score(X_test, y_test)
xgb_score_duration = time() - tic
print("score: {:.4f}".format(xgb_score))
print("fit duration: {:.3f}s,".format(xgb_fit_duration))
print("score duration: {:.3f}s,".format(xgb_score_duration))
cat_score = None
cat_fit_duration = None
cat_score_duration = None
if args.catboost:
print("Fitting a CatBoost model...")
cat_est = get_equivalent_estimator(
est, lib="catboost", n_classes=args.n_classes
)
tic = time()
cat_est.fit(X_train, y_train, sample_weight=sample_weight_train)
cat_fit_duration = time() - tic
tic = time()
cat_score = cat_est.score(X_test, y_test)
cat_score_duration = time() - tic
print("score: {:.4f}".format(cat_score))
print("fit duration: {:.3f}s,".format(cat_fit_duration))
print("score duration: {:.3f}s,".format(cat_score_duration))
return (
sklearn_score,
sklearn_fit_duration,
sklearn_score_duration,
lightgbm_score,
lightgbm_fit_duration,
lightgbm_score_duration,
xgb_score,
xgb_fit_duration,
xgb_score_duration,
cat_score,
cat_fit_duration,
cat_score_duration,
)
n_samples_list = [1000, 10000, 100000, 500000, 1000000, 5000000, 10000000]
n_samples_list = [
n_samples for n_samples in n_samples_list if n_samples <= args.n_samples_max
]
sklearn_scores = []
sklearn_fit_durations = []
sklearn_score_durations = []
lightgbm_scores = []
lightgbm_fit_durations = []
lightgbm_score_durations = []
xgb_scores = []
xgb_fit_durations = []
xgb_score_durations = []
cat_scores = []
cat_fit_durations = []
cat_score_durations = []
for n_samples in n_samples_list:
(
sklearn_score,
sklearn_fit_duration,
sklearn_score_duration,
lightgbm_score,
lightgbm_fit_duration,
lightgbm_score_duration,
xgb_score,
xgb_fit_duration,
xgb_score_duration,
cat_score,
cat_fit_duration,
cat_score_duration,
) = one_run(n_samples)
for scores, score in (
(sklearn_scores, sklearn_score),
(sklearn_fit_durations, sklearn_fit_duration),
(sklearn_score_durations, sklearn_score_duration),
(lightgbm_scores, lightgbm_score),
(lightgbm_fit_durations, lightgbm_fit_duration),
(lightgbm_score_durations, lightgbm_score_duration),
(xgb_scores, xgb_score),
(xgb_fit_durations, xgb_fit_duration),
(xgb_score_durations, xgb_score_duration),
(cat_scores, cat_score),
(cat_fit_durations, cat_fit_duration),
(cat_score_durations, cat_score_duration),
):
scores.append(score)
fig, axs = plt.subplots(3, sharex=True)
axs[0].plot(n_samples_list, sklearn_scores, label="sklearn")
axs[1].plot(n_samples_list, sklearn_fit_durations, label="sklearn")
axs[2].plot(n_samples_list, sklearn_score_durations, label="sklearn")
if args.lightgbm:
axs[0].plot(n_samples_list, lightgbm_scores, label="lightgbm")
axs[1].plot(n_samples_list, lightgbm_fit_durations, label="lightgbm")
axs[2].plot(n_samples_list, lightgbm_score_durations, label="lightgbm")
if args.xgboost:
axs[0].plot(n_samples_list, xgb_scores, label="XGBoost")
axs[1].plot(n_samples_list, xgb_fit_durations, label="XGBoost")
axs[2].plot(n_samples_list, xgb_score_durations, label="XGBoost")
if args.catboost:
axs[0].plot(n_samples_list, cat_scores, label="CatBoost")
axs[1].plot(n_samples_list, cat_fit_durations, label="CatBoost")
axs[2].plot(n_samples_list, cat_score_durations, label="CatBoost")
for ax in axs:
ax.set_xscale("log")
ax.legend(loc="best")
ax.set_xlabel("n_samples")
axs[0].set_title("scores")
axs[1].set_title("fit duration (s)")
axs[2].set_title("score duration (s)")
title = args.problem
if args.problem == "classification":
title += " n_classes = {}".format(args.n_classes)
fig.suptitle(title)
plt.tight_layout()
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_mnist.py | benchmarks/bench_mnist.py | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogeneous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
------------------------------------------------------------
MLP_adam 53.46s 0.11s 0.0224
Nystroem-SVM 112.97s 0.92s 0.0228
MultilayerPerceptron 24.33s 0.14s 0.0287
ExtraTrees 42.99s 0.57s 0.0294
RandomForest 42.70s 0.49s 0.0318
SampledRBF-SVM 135.81s 0.56s 0.0486
LinearRegression-SAG 16.67s 0.06s 0.0824
CART 20.69s 0.02s 0.1219
dummy 0.00s 0.01s 0.8973
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import os
from time import time
import numpy as np
from joblib import Memory
from sklearn.datasets import fetch_openml, get_data_home
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier
from sklearn.kernel_approximation import Nystroem, RBFSampler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import zero_one_loss
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), "mnist_benchmark_data"), mmap_mode="r")
@memory.cache
def load_data(dtype=np.float32, order="F"):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_openml("mnist_784", as_frame=True)
X = check_array(data["data"], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
"CART": DecisionTreeClassifier(),
"ExtraTrees": ExtraTreesClassifier(),
"RandomForest": RandomForestClassifier(),
"Nystroem-SVM": make_pipeline(
Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)
),
"SampledRBF-SVM": make_pipeline(
RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)
),
"LogisticRegression-SAG": LogisticRegression(solver="sag", tol=1e-1, C=1e4),
"LogisticRegression-SAGA": LogisticRegression(solver="saga", tol=1e-1, C=1e4),
"MultilayerPerceptron": MLPClassifier(
hidden_layer_sizes=(100, 100),
max_iter=400,
alpha=1e-4,
solver="sgd",
learning_rate_init=0.2,
momentum=0.9,
verbose=1,
tol=1e-4,
random_state=1,
),
"MLP-adam": MLPClassifier(
hidden_layer_sizes=(100, 100),
max_iter=400,
alpha=1e-4,
solver="adam",
learning_rate_init=0.001,
verbose=1,
tol=1e-4,
random_state=1,
),
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--classifiers",
nargs="+",
choices=ESTIMATORS,
type=str,
default=["ExtraTrees", "Nystroem-SVM"],
help="list of classifiers to benchmark.",
)
parser.add_argument(
"--n-jobs",
nargs="?",
default=1,
type=int,
help=(
"Number of concurrently running workers for "
"models that support parallelism."
),
)
parser.add_argument(
"--order",
nargs="?",
default="C",
type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered data",
)
parser.add_argument(
"--random-seed",
nargs="?",
default=0,
type=int,
help="Common seed used by random number generator.",
)
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print(
"%s %d (size=%dMB)"
% (
"number of train samples:".ljust(25),
X_train.shape[0],
int(X_train.nbytes / 1e6),
)
)
print(
"%s %d (size=%dMB)"
% (
"number of test samples:".ljust(25),
X_test.shape[0],
int(X_test.nbytes / 1e6),
)
)
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(
**{
p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")
}
)
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print(
"{0: <24} {1: >10} {2: >11} {3: >12}".format(
"Classifier ", "train-time", "test-time", "error-rate"
)
)
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print(
"{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}".format(
name, train_time[name], test_time[name], error[name]
)
)
print()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_lof.py | benchmarks/bench_lof.py | """
============================
LocalOutlierFactor benchmark
============================
A test of LocalOutlierFactor on classical anomaly detection datasets.
Note that LocalOutlierFactor is not meant to predict on a test set and its
performance is assessed in an outlier detection context:
1. The model is trained on the whole dataset which is assumed to contain
outliers.
2. The ROC curve is computed on the same dataset using the knowledge of the
labels.
In this context there is no need to shuffle the dataset because the model
is trained and tested on the whole dataset. The randomness of this benchmark
is only caused by the random selection of anomalies in the SA dataset.
"""
from time import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_covtype, fetch_kddcup99, fetch_openml
from sklearn.metrics import auc, roc_curve
from sklearn.neighbors import LocalOutlierFactor
from sklearn.preprocessing import LabelBinarizer
print(__doc__)
random_state = 2 # to control the random selection of anomalies in SA
# datasets available: ['http', 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
datasets = ["http", "smtp", "SA", "SF", "shuttle", "forestcover"]
plt.figure()
for dataset_name in datasets:
# loading and vectorization
print("loading data")
if dataset_name in ["http", "smtp", "SA", "SF"]:
dataset = fetch_kddcup99(
subset=dataset_name, percent10=True, random_state=random_state
)
X = dataset.data
y = dataset.target
if dataset_name == "shuttle":
dataset = fetch_openml("shuttle", as_frame=False)
X = dataset.data
y = dataset.target.astype(np.int64)
# we remove data with label 4
# normal data are then those of class 1
s = y != 4
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dataset_name == "forestcover":
dataset = fetch_covtype()
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print("vectorizing data")
if dataset_name == "SF":
lb = LabelBinarizer()
x1 = lb.fit_transform(X[:, 1].astype(str))
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != b"normal.").astype(int)
if dataset_name == "SA":
lb = LabelBinarizer()
x1 = lb.fit_transform(X[:, 1].astype(str))
x2 = lb.fit_transform(X[:, 2].astype(str))
x3 = lb.fit_transform(X[:, 3].astype(str))
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != b"normal.").astype(int)
if dataset_name == "http" or dataset_name == "smtp":
y = (y != b"normal.").astype(int)
X = X.astype(float)
print("LocalOutlierFactor processing...")
model = LocalOutlierFactor(n_neighbors=20)
tstart = time()
model.fit(X)
fit_time = time() - tstart
scoring = -model.negative_outlier_factor_ # the lower, the more normal
fpr, tpr, thresholds = roc_curve(y, scoring)
AUC = auc(fpr, tpr)
plt.plot(
fpr,
tpr,
lw=1,
label="ROC for %s (area = %0.3f, train-time: %0.2fs)"
% (dataset_name, AUC, fit_time),
)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Receiver operating characteristic")
plt.legend(loc="lower right")
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_tree.py | benchmarks/bench_tree.py | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import gc
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10**6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = datetime.now() - tstart
# stop time
scikit_classifier_results.append(delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = datetime.now() - tstart
# stop time
scikit_regressor_results.append(delta.seconds + delta.microseconds / mu_second)
if __name__ == "__main__":
print("============================================")
print("Warning: this is going to take a looong time")
print("============================================")
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print("============================================")
print("Entering iteration %s of %s" % (i, n))
print("============================================")
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
plt.figure("scikit-learn tree benchmark results")
plt.subplot(211)
plt.title("Learning with varying number of samples")
plt.plot(xx, scikit_classifier_results, "g-", label="classification")
plt.plot(xx, scikit_regressor_results, "r-", label="regression")
plt.legend(loc="upper left")
plt.xlabel("number of samples")
plt.ylabel("Time (s)")
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print("============================================")
print("Entering iteration %s of %s" % (i, n))
print("============================================")
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
plt.subplot(212)
plt.title("Learning in high dimensional spaces")
plt.plot(xx, scikit_classifier_results, "g-", label="classification")
plt.plot(xx, scikit_regressor_results, "r-", label="regression")
plt.legend(loc="upper left")
plt.xlabel("number of dimensions")
plt.ylabel("Time (s)")
plt.axis("tight")
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_sgd_regression.py | benchmarks/bench_sgd_regression.py | # Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import gc
from time import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_regression
from sklearn.linear_model import ElasticNet, Ridge, SGDRegressor
from sklearn.metrics import mean_squared_error
"""
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(int)
list_n_features = [10, 100, 1000]
n_test = 1000
max_iter = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test,
n_features=n_features,
noise=noise,
coef=True,
)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test), y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
clf = SGDRegressor(
alpha=alpha / n_train,
fit_intercept=False,
max_iter=max_iter,
learning_rate="invscaling",
eta0=0.01,
power_t=0.25,
tol=1e-3,
)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test), y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("max_iter", max_iter)
print("- benchmarking A-SGD")
clf = SGDRegressor(
alpha=alpha / n_train,
fit_intercept=False,
max_iter=max_iter,
learning_rate="invscaling",
eta0=0.002,
power_t=0.05,
tol=1e-3,
average=(max_iter * n_train // 2),
)
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test), y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test), y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
plt.figure("scikit-learn SGD regression benchmark results", figsize=(5 * 2, 4 * m))
for j in range(m):
plt.subplot(m, 2, i + 1)
plt.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]), label="ElasticNet")
plt.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]), label="SGDRegressor")
plt.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]), label="A-SGDRegressor")
plt.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]), label="Ridge")
plt.legend(prop={"size": 10})
plt.xlabel("n_train")
plt.ylabel("RMSE")
plt.title("Test error - %d features" % list_n_features[j])
i += 1
plt.subplot(m, 2, i + 1)
plt.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]), label="ElasticNet")
plt.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]), label="SGDRegressor")
plt.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]), label="A-SGDRegressor")
plt.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]), label="Ridge")
plt.legend(prop={"size": 10})
plt.xlabel("n_train")
plt.ylabel("Time [sec]")
plt.title("Training time - %d features" % list_n_features[j])
i += 1
plt.subplots_adjust(hspace=0.30)
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_sparsify.py | benchmarks/bench_sparsify.py | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
import numpy as np
from scipy.sparse import csr_matrix
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2) :]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features // 2 :]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[: n_samples // 2], y[: n_samples // 2]
X_test, y_test = X[n_samples // 2 :], y[n_samples // 2 :]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty="l1", alpha=0.2, max_iter=2000, tol=None)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), "dense model")
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), "sparse model")
benchmark_sparse_predict()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_saga.py | benchmarks/bench_saga.py | """Author: Arthur Mensch, Nelle Varoquaux
Benchmarks of sklearn SAGA vs lightning SAGA vs Liblinear. Shows the gain
in using multinomial logistic regression in term of learning time.
"""
import json
import os
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import (
fetch_20newsgroups_vectorized,
fetch_rcv1,
load_digits,
load_iris,
)
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import log_loss
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.utils.extmath import safe_sparse_dot, softmax
from sklearn.utils.parallel import Parallel, delayed
def fit_single(
solver,
X,
y,
penalty="l2",
single_target=True,
C=1,
max_iter=10,
skip_slow=False,
dtype=np.float64,
):
if skip_slow and solver == "lightning" and penalty == "l1":
print("skip_slowping l1 logistic regression with solver lightning.")
return
print(
"Solving %s logistic regression with penalty %s, solver %s."
% ("binary" if single_target else "multinomial", penalty, solver)
)
if solver == "lightning":
from lightning.classification import SAGAClassifier
if single_target or solver not in ["sag", "saga"]:
multi_class = "ovr"
else:
multi_class = "multinomial"
X = X.astype(dtype)
y = y.astype(dtype)
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42, stratify=y
)
n_samples = X_train.shape[0]
n_classes = np.unique(y_train).shape[0]
test_scores = [1]
train_scores = [1]
accuracies = [1 / n_classes]
times = [0]
if penalty == "l2":
l1_ratio = 0
alpha = 1.0 / (C * n_samples)
beta = 0
lightning_penalty = None
else:
l1_ratio = 1
alpha = 0.0
beta = 1.0 / (C * n_samples)
lightning_penalty = "l1"
for this_max_iter in range(1, max_iter + 1, 2):
print(
"[%s, %s, %s] Max iter: %s"
% (
"binary" if single_target else "multinomial",
penalty,
solver,
this_max_iter,
)
)
if solver == "lightning":
lr = SAGAClassifier(
loss="log",
alpha=alpha,
beta=beta,
penalty=lightning_penalty,
tol=-1,
max_iter=this_max_iter,
)
else:
lr = LogisticRegression(
solver=solver,
C=C,
l1_ratio=l1_ratio,
fit_intercept=False,
tol=0,
max_iter=this_max_iter,
random_state=42,
)
if multi_class == "ovr":
lr = OneVsRestClassifier(lr)
# Makes cpu cache even for all fit calls
X_train.max()
t0 = time.clock()
lr.fit(X_train, y_train)
train_time = time.clock() - t0
scores = []
for X, y in [(X_train, y_train), (X_test, y_test)]:
try:
y_pred = lr.predict_proba(X)
except NotImplementedError:
# Lightning predict_proba is not implemented for n_classes > 2
y_pred = _predict_proba(lr, X)
if isinstance(lr, OneVsRestClassifier):
coef = np.concatenate([est.coef_ for est in lr.estimators_])
else:
coef = lr.coef_
score = log_loss(y, y_pred, normalize=False) / n_samples
score += 0.5 * alpha * np.sum(coef**2) + beta * np.sum(np.abs(coef))
scores.append(score)
train_score, test_score = tuple(scores)
y_pred = lr.predict(X_test)
accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
test_scores.append(test_score)
train_scores.append(train_score)
accuracies.append(accuracy)
times.append(train_time)
return lr, times, train_scores, test_scores, accuracies
def _predict_proba(lr, X):
"""Predict proba for lightning for n_classes >=3."""
pred = safe_sparse_dot(X, lr.coef_.T)
if hasattr(lr, "intercept_"):
pred += lr.intercept_
return softmax(pred)
def exp(
solvers,
penalty,
single_target,
n_samples=30000,
max_iter=20,
dataset="rcv1",
n_jobs=1,
skip_slow=False,
):
dtypes_mapping = {
"float64": np.float64,
"float32": np.float32,
}
if dataset == "rcv1":
rcv1 = fetch_rcv1()
lbin = LabelBinarizer()
lbin.fit(rcv1.target_names)
X = rcv1.data
y = rcv1.target
y = lbin.inverse_transform(y)
le = LabelEncoder()
y = le.fit_transform(y)
if single_target:
y_n = y.copy()
y_n[y > 16] = 1
y_n[y <= 16] = 0
y = y_n
elif dataset == "digits":
X, y = load_digits(return_X_y=True)
if single_target:
y_n = y.copy()
y_n[y < 5] = 1
y_n[y >= 5] = 0
y = y_n
elif dataset == "iris":
iris = load_iris()
X, y = iris.data, iris.target
elif dataset == "20newspaper":
ng = fetch_20newsgroups_vectorized()
X = ng.data
y = ng.target
if single_target:
y_n = y.copy()
y_n[y > 4] = 1
y_n[y <= 16] = 0
y = y_n
X = X[:n_samples]
y = y[:n_samples]
out = Parallel(n_jobs=n_jobs, mmap_mode=None)(
delayed(fit_single)(
solver,
X,
y,
penalty=penalty,
single_target=single_target,
dtype=dtype,
C=1,
max_iter=max_iter,
skip_slow=skip_slow,
)
for solver in solvers
for dtype in dtypes_mapping.values()
)
res = []
idx = 0
for dtype_name in dtypes_mapping.keys():
for solver in solvers:
if not (skip_slow and solver == "lightning" and penalty == "l1"):
lr, times, train_scores, test_scores, accuracies = out[idx]
this_res = dict(
solver=solver,
penalty=penalty,
dtype=dtype_name,
single_target=single_target,
times=times,
train_scores=train_scores,
test_scores=test_scores,
accuracies=accuracies,
)
res.append(this_res)
idx += 1
with open("bench_saga.json", "w+") as f:
json.dump(res, f)
def plot(outname=None):
import pandas as pd
with open("bench_saga.json", "r") as f:
f = json.load(f)
res = pd.DataFrame(f)
res.set_index(["single_target"], inplace=True)
grouped = res.groupby(level=["single_target"])
colors = {"saga": "C0", "liblinear": "C1", "lightning": "C2"}
linestyles = {"float32": "--", "float64": "-"}
alpha = {"float64": 0.5, "float32": 1}
for idx, group in grouped:
single_target = idx
fig, axes = plt.subplots(figsize=(12, 4), ncols=4)
ax = axes[0]
for scores, times, solver, dtype in zip(
group["train_scores"], group["times"], group["solver"], group["dtype"]
):
ax.plot(
times,
scores,
label="%s - %s" % (solver, dtype),
color=colors[solver],
alpha=alpha[dtype],
marker=".",
linestyle=linestyles[dtype],
)
ax.axvline(
times[-1],
color=colors[solver],
alpha=alpha[dtype],
linestyle=linestyles[dtype],
)
ax.set_xlabel("Time (s)")
ax.set_ylabel("Training objective (relative to min)")
ax.set_yscale("log")
ax = axes[1]
for scores, times, solver, dtype in zip(
group["test_scores"], group["times"], group["solver"], group["dtype"]
):
ax.plot(
times,
scores,
label=solver,
color=colors[solver],
linestyle=linestyles[dtype],
marker=".",
alpha=alpha[dtype],
)
ax.axvline(
times[-1],
color=colors[solver],
alpha=alpha[dtype],
linestyle=linestyles[dtype],
)
ax.set_xlabel("Time (s)")
ax.set_ylabel("Test objective (relative to min)")
ax.set_yscale("log")
ax = axes[2]
for accuracy, times, solver, dtype in zip(
group["accuracies"], group["times"], group["solver"], group["dtype"]
):
ax.plot(
times,
accuracy,
label="%s - %s" % (solver, dtype),
alpha=alpha[dtype],
marker=".",
color=colors[solver],
linestyle=linestyles[dtype],
)
ax.axvline(
times[-1],
color=colors[solver],
alpha=alpha[dtype],
linestyle=linestyles[dtype],
)
ax.set_xlabel("Time (s)")
ax.set_ylabel("Test accuracy")
ax.legend()
name = "single_target" if single_target else "multi_target"
name += "_%s" % penalty
plt.suptitle(name)
if outname is None:
outname = name + ".png"
fig.tight_layout()
fig.subplots_adjust(top=0.9)
ax = axes[3]
for scores, times, solver, dtype in zip(
group["train_scores"], group["times"], group["solver"], group["dtype"]
):
ax.plot(
np.arange(len(scores)),
scores,
label="%s - %s" % (solver, dtype),
marker=".",
alpha=alpha[dtype],
color=colors[solver],
linestyle=linestyles[dtype],
)
ax.set_yscale("log")
ax.set_xlabel("# iterations")
ax.set_ylabel("Objective function")
ax.legend()
plt.savefig(outname)
if __name__ == "__main__":
solvers = ["saga", "liblinear", "lightning"]
penalties = ["l1", "l2"]
n_samples = [100000, 300000, 500000, 800000, None]
single_target = True
for penalty in penalties:
for n_sample in n_samples:
exp(
solvers,
penalty,
single_target,
n_samples=n_sample,
n_jobs=1,
dataset="rcv1",
max_iter=10,
)
if n_sample is not None:
outname = "figures/saga_%s_%d.png" % (penalty, n_sample)
else:
outname = "figures/saga_%s_all.png" % (penalty,)
try:
os.makedirs("figures")
except OSError:
pass
plot(outname)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_plot_hierarchical.py | benchmarks/bench_plot_hierarchical.py | from collections import defaultdict
from time import time
import numpy as np
from numpy import random as nr
from sklearn.cluster import AgglomerativeClustering
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print("==============================")
print("Iteration %03d of %03d" % (it, max_it))
print("n_samples %05d; n_features %02d" % (n_samples, n_features))
print("==============================")
print()
data = nr.randint(-50, 51, (n_samples, n_features))
for linkage in ("single", "average", "complete", "ward"):
print(linkage.capitalize())
tstart = time()
AgglomerativeClustering(linkage=linkage, n_clusters=10).fit(data)
delta = time() - tstart
print("Speed: %0.3fs" % delta)
print()
results[linkage].append(delta)
return results
if __name__ == "__main__":
import matplotlib.pyplot as plt
samples_range = np.linspace(1000, 15000, 8).astype(int)
features_range = np.array([2, 10, 20, 50])
results = compute_bench(samples_range, features_range)
max_time = max([max(i) for i in [t for (label, t) in results.items()]])
colors = plt.get_cmap("tab10")(np.linspace(0, 1, 10))[:4]
lines = {linkage: None for linkage in results.keys()}
fig, axs = plt.subplots(2, 2, sharex=True, sharey=True)
fig.suptitle("Scikit-learn agglomerative clustering benchmark results", fontsize=16)
for c, (label, timings) in zip(colors, sorted(results.items())):
timing_by_samples = np.asarray(timings).reshape(
samples_range.shape[0], features_range.shape[0]
)
for n in range(timing_by_samples.shape[1]):
ax = axs.flatten()[n]
(lines[label],) = ax.plot(
samples_range, timing_by_samples[:, n], color=c, label=label
)
ax.set_title("n_features = %d" % features_range[n])
if n >= 2:
ax.set_xlabel("n_samples")
if n % 2 == 0:
ax.set_ylabel("time (s)")
fig.subplots_adjust(right=0.8)
fig.legend(
[lines[link] for link in sorted(results.keys())],
sorted(results.keys()),
loc="center right",
fontsize=8,
)
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_sample_without_replacement.py | benchmarks/bench_sample_without_replacement.py | """
Benchmarks for sampling without replacement of integer.
"""
import gc
import operator
import optparse
import random
import sys
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10**6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = datetime.now() - t_start
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option(
"--n-times",
dest="n_times",
default=5,
type=int,
help="Benchmark results are average over n_times experiments",
)
op.add_option(
"--n-population",
dest="n_population",
default=100000,
type=int,
help="Size of the population to sample from.",
)
op.add_option(
"--n-step",
dest="n_steps",
default=5,
type=int,
help="Number of step interval between 0 and n_population.",
)
default_algorithms = (
"custom-tracking-selection,custom-auto,"
"custom-reservoir-sampling,custom-pool,"
"python-core-sample,numpy-permutation"
)
op.add_option(
"--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help=(
"Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default"
),
)
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(",")
for key in selected_algorithm:
if key not in default_algorithms.split(","):
raise ValueError(
'Unknown sampling algorithm "%s" not in (%s).'
% (key, default_algorithms)
)
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = (
lambda n_population, n_sample: random.sample(range(n_population), n_sample)
)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = (
lambda n_population, n_samples, random_state=None: sample_without_replacement(
n_population, n_samples, method="auto", random_state=random_state
)
)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = (
lambda n_population, n_samples, random_state=None: sample_without_replacement(
n_population,
n_samples,
method="tracking_selection",
random_state=random_state,
)
)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = (
lambda n_population, n_samples, random_state=None: sample_without_replacement(
n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state,
)
)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = (
lambda n_population, n_samples, random_state=None: sample_without_replacement(
n_population, n_samples, method="pool", random_state=random_state
)
)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = (
lambda n_population, n_sample: np.random.permutation(n_population)[:n_sample]
)
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = {
key: value
for key, value in sampling_algorithm.items()
if key in selected_algorithm
}
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population, num=opts.n_steps).astype(
int
)
ratio = n_samples / opts.n_population
print("Benchmarks")
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in range(opts.n_steps):
for it in range(opts.n_times):
time[name][step, it] = bench_sample(
sampling_algorithm[name], opts.n_population, n_samples[step]
)
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print(
"%s \t | %s "
% (
"Arguments".ljust(16),
"Value".center(12),
)
)
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16), str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure("scikit-learn sample w/o replacement benchmark results")
fig.suptitle("n_population = %s, n_times = %s" % (opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel("ratio of n_sample / n_population")
ax.set_ylabel("Time (s)")
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/benchmarks/bench_kernel_pca_solvers_time_vs_n_components.py | benchmarks/bench_kernel_pca_solvers_time_vs_n_components.py | """
=============================================================
Kernel PCA Solvers comparison benchmark: time vs n_components
=============================================================
This benchmark shows that the approximate solvers provided in Kernel PCA can
help significantly improve its execution speed when an approximate solution
(small `n_components`) is acceptable. In many real-world datasets a few
hundreds of principal components are indeed sufficient enough to capture the
underlying distribution.
Description:
------------
A fixed number of training (default: 2000) and test (default: 1000) samples
with 2 features is generated using the `make_circles` helper method.
KernelPCA models are trained on the training set with an increasing number of
principal components, between 1 and `max_n_compo` (default: 1999), with
`n_compo_grid_size` positions (default: 10). For each value of `n_components`
to try, KernelPCA models are trained for the various possible `eigen_solver`
values. The execution times are displayed in a plot at the end of the
experiment.
What you can observe:
---------------------
When the number of requested principal components is small, the dense solver
takes more time to complete, while the randomized method returns similar
results with shorter execution times.
Going further:
--------------
You can adjust `max_n_compo` and `n_compo_grid_size` if you wish to explore a
different range of values for `n_components`.
You can also set `arpack_all=True` to activate arpack solver for large number
of components (this takes more time).
"""
import time
import matplotlib.pyplot as plt
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.datasets import make_circles
from sklearn.decomposition import KernelPCA
print(__doc__)
# 1- Design the Experiment
# ------------------------
n_train, n_test = 2000, 1000 # the sample sizes to use
max_n_compo = 1999 # max n_components to try
n_compo_grid_size = 10 # nb of positions in the grid to try
# generate the grid
n_compo_range = [
np.round(np.exp((x / (n_compo_grid_size - 1)) * np.log(max_n_compo)))
for x in range(0, n_compo_grid_size)
]
n_iter = 3 # the number of times each experiment will be repeated
arpack_all = False # set to True if you wish to run arpack for all n_compo
# 2- Generate random data
# -----------------------
n_features = 2
X, y = make_circles(
n_samples=(n_train + n_test), factor=0.3, noise=0.05, random_state=0
)
X_train, X_test = X[:n_train, :], X[n_train:, :]
# 3- Benchmark
# ------------
# init
ref_time = np.empty((len(n_compo_range), n_iter)) * np.nan
a_time = np.empty((len(n_compo_range), n_iter)) * np.nan
r_time = np.empty((len(n_compo_range), n_iter)) * np.nan
# loop
for j, n_components in enumerate(n_compo_range):
n_components = int(n_components)
print("Performing kPCA with n_components = %i" % n_components)
# A- reference (dense)
print(" - dense solver")
for i in range(n_iter):
start_time = time.perf_counter()
ref_pred = (
KernelPCA(n_components, eigen_solver="dense").fit(X_train).transform(X_test)
)
ref_time[j, i] = time.perf_counter() - start_time
# B- arpack (for small number of components only, too slow otherwise)
if arpack_all or n_components < 100:
print(" - arpack solver")
for i in range(n_iter):
start_time = time.perf_counter()
a_pred = (
KernelPCA(n_components, eigen_solver="arpack")
.fit(X_train)
.transform(X_test)
)
a_time[j, i] = time.perf_counter() - start_time
# check that the result is still correct despite the approx
assert_array_almost_equal(np.abs(a_pred), np.abs(ref_pred))
# C- randomized
print(" - randomized solver")
for i in range(n_iter):
start_time = time.perf_counter()
r_pred = (
KernelPCA(n_components, eigen_solver="randomized")
.fit(X_train)
.transform(X_test)
)
r_time[j, i] = time.perf_counter() - start_time
# check that the result is still correct despite the approximation
assert_array_almost_equal(np.abs(r_pred), np.abs(ref_pred))
# Compute statistics for the 3 methods
avg_ref_time = ref_time.mean(axis=1)
std_ref_time = ref_time.std(axis=1)
avg_a_time = a_time.mean(axis=1)
std_a_time = a_time.std(axis=1)
avg_r_time = r_time.mean(axis=1)
std_r_time = r_time.std(axis=1)
# 4- Plots
# --------
fig, ax = plt.subplots(figsize=(12, 8))
# Display 1 plot with error bars per method
ax.errorbar(
n_compo_range,
avg_ref_time,
yerr=std_ref_time,
marker="x",
linestyle="",
color="r",
label="full",
)
ax.errorbar(
n_compo_range,
avg_a_time,
yerr=std_a_time,
marker="x",
linestyle="",
color="g",
label="arpack",
)
ax.errorbar(
n_compo_range,
avg_r_time,
yerr=std_r_time,
marker="x",
linestyle="",
color="b",
label="randomized",
)
ax.legend(loc="upper left")
# customize axes
ax.set_xscale("log")
ax.set_xlim(1, max(n_compo_range) * 1.1)
ax.set_ylabel("Execution time (s)")
ax.set_xlabel("n_components")
ax.set_title(
"kPCA Execution time comparison on %i samples with %i "
"features, according to the choice of `eigen_solver`"
"" % (n_train, n_features)
)
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/build_tools/generate_authors_table.py | build_tools/generate_authors_table.py | """
This script generates an html table of contributors, with names and avatars.
The list is generated from scikit-learn's teams on GitHub, plus a small number
of hard-coded contributors.
The table should be updated for each new inclusion in the teams.
Generating the table requires admin rights.
"""
import getpass
import sys
import time
from os import path
from pathlib import Path
import requests
print("Input user:", file=sys.stderr)
user = input()
token = getpass.getpass("Input access token:\n")
auth = (user, token)
LOGO_URL = "https://avatars2.githubusercontent.com/u/365630?v=4"
REPO_FOLDER = Path(path.abspath(__file__)).parent.parent
def get(url):
for sleep_time in [10, 30, 0]:
reply = requests.get(url, auth=auth)
api_limit = (
"message" in reply.json()
and "API rate limit exceeded" in reply.json()["message"]
)
if not api_limit:
break
print("API rate limit exceeded, waiting..")
time.sleep(sleep_time)
reply.raise_for_status()
return reply
def get_contributors():
"""Get the list of contributor profiles. Require admin rights."""
# get core devs and contributor experience team
core_devs = []
documentation_team = []
contributor_experience_team = []
comm_team = []
core_devs_slug = "core-devs"
contributor_experience_team_slug = "contributor-experience-team"
comm_team_slug = "communication-team"
documentation_team_slug = "documentation-team"
entry_point = "https://api.github.com/orgs/scikit-learn/"
for team_slug, lst in zip(
(
core_devs_slug,
contributor_experience_team_slug,
comm_team_slug,
documentation_team_slug,
),
(core_devs, contributor_experience_team, comm_team, documentation_team),
):
print(f"Retrieving {team_slug}\n")
for page in [1, 2]: # 30 per page
reply = get(f"{entry_point}teams/{team_slug}/members?page={page}")
lst.extend(reply.json())
# get members of scikit-learn on GitHub
print("Retrieving members\n")
members = []
for page in [1, 2, 3]: # 30 per page
reply = get(f"{entry_point}members?page={page}")
members.extend(reply.json())
# keep only the logins
core_devs = set(c["login"] for c in core_devs)
documentation_team = set(c["login"] for c in documentation_team)
contributor_experience_team = set(c["login"] for c in contributor_experience_team)
comm_team = set(c["login"] for c in comm_team)
members = set(c["login"] for c in members)
# add missing contributors with GitHub accounts
members |= {"dubourg", "mbrucher", "thouis", "jarrodmillman"}
# add missing contributors without GitHub accounts
members |= {"Angel Soler Gollonet"}
# remove CI bots
members -= {"sklearn-ci", "sklearn-wheels", "sklearn-lgtm"}
contributor_experience_team -= (
core_devs # remove ogrisel from contributor_experience_team
)
emeritus = (
members
- core_devs
- contributor_experience_team
- comm_team
- documentation_team
)
# hard coded
emeritus_contributor_experience_team = {
"cmarmo",
}
emeritus_comm_team = {"reshamas"}
# Up-to-now, we can subtract the team emeritus from the original emeritus
emeritus -= emeritus_contributor_experience_team | emeritus_comm_team
comm_team -= {"reshamas"} # in the comm team but not on the web page
# get profiles from GitHub
core_devs = [get_profile(login) for login in core_devs]
emeritus = [get_profile(login) for login in emeritus]
contributor_experience_team = [
get_profile(login) for login in contributor_experience_team
]
emeritus_contributor_experience_team = [
get_profile(login) for login in emeritus_contributor_experience_team
]
comm_team = [get_profile(login) for login in comm_team]
emeritus_comm_team = [get_profile(login) for login in emeritus_comm_team]
documentation_team = [get_profile(login) for login in documentation_team]
# sort by last name
core_devs = sorted(core_devs, key=key)
emeritus = sorted(emeritus, key=key)
contributor_experience_team = sorted(contributor_experience_team, key=key)
emeritus_contributor_experience_team = sorted(
emeritus_contributor_experience_team, key=key
)
documentation_team = sorted(documentation_team, key=key)
comm_team = sorted(comm_team, key=key)
emeritus_comm_team = sorted(emeritus_comm_team, key=key)
return (
core_devs,
emeritus,
contributor_experience_team,
emeritus_contributor_experience_team,
comm_team,
emeritus_comm_team,
documentation_team,
)
def get_profile(login):
"""Get the GitHub profile from login"""
print("get profile for %s" % (login,))
try:
profile = get("https://api.github.com/users/%s" % login).json()
except requests.exceptions.HTTPError:
return dict(name=login, avatar_url=LOGO_URL, html_url="")
if profile["name"] is None:
profile["name"] = profile["login"]
# fix missing names
missing_names = {
"bthirion": "Bertrand Thirion",
"dubourg": "Vincent Dubourg",
"Duchesnay": "Edouard Duchesnay",
"Lars": "Lars Buitinck",
"MechCoder": "Manoj Kumar",
}
if profile["name"] in missing_names:
profile["name"] = missing_names[profile["name"]]
return profile
def key(profile):
"""Get a sorting key based on the lower case last name, then firstname"""
components = profile["name"].lower().split(" ")
return " ".join([components[-1]] + components[:-1])
def generate_table(contributors):
lines = [
".. raw :: html\n",
" <!-- Generated by generate_authors_table.py -->",
' <div class="sk-authors-container">',
" <style>",
" img.avatar {border-radius: 10px;}",
" </style>",
]
for contributor in contributors:
lines.append(" <div>")
lines.append(
" <a href='%s'><img src='%s' class='avatar' /></a> <br />"
% (contributor["html_url"], contributor["avatar_url"])
)
lines.append(" <p>%s</p>" % (contributor["name"],))
lines.append(" </div>")
lines.append(" </div>")
return "\n".join(lines) + "\n"
def generate_list(contributors):
lines = []
for contributor in contributors:
lines.append("- %s" % (contributor["name"],))
return "\n".join(lines) + "\n"
if __name__ == "__main__":
(
core_devs,
emeritus,
contributor_experience_team,
emeritus_contributor_experience_team,
comm_team,
emeritus_comm_team,
documentation_team,
) = get_contributors()
print("Generating rst files")
with open(
REPO_FOLDER / "doc" / "maintainers.rst", "w+", encoding="utf-8"
) as rst_file:
rst_file.write(generate_table(core_devs))
with open(
REPO_FOLDER / "doc" / "maintainers_emeritus.rst", "w+", encoding="utf-8"
) as rst_file:
rst_file.write(generate_list(emeritus))
with open(
REPO_FOLDER / "doc" / "contributor_experience_team.rst", "w+", encoding="utf-8"
) as rst_file:
rst_file.write(generate_table(contributor_experience_team))
with open(
REPO_FOLDER / "doc" / "contributor_experience_team_emeritus.rst",
"w+",
encoding="utf-8",
) as rst_file:
rst_file.write(generate_list(emeritus_contributor_experience_team))
with open(
REPO_FOLDER / "doc" / "communication_team.rst", "w+", encoding="utf-8"
) as rst_file:
rst_file.write(generate_table(comm_team))
with open(
REPO_FOLDER / "doc" / "communication_team_emeritus.rst", "w+", encoding="utf-8"
) as rst_file:
rst_file.write(generate_list(emeritus_comm_team))
with open(
REPO_FOLDER / "doc" / "documentation_team.rst", "w+", encoding="utf-8"
) as rst_file:
rst_file.write(generate_table(documentation_team))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/build_tools/check-meson-openmp-dependencies.py | build_tools/check-meson-openmp-dependencies.py | """
Check that OpenMP dependencies are correctly defined in meson.build files.
This is based on trying to make sure the following two things match:
- the Cython files using OpenMP (based on a git grep regex)
- the Cython extension modules that are built with OpenMP compiler flags (based
on meson introspect json output)
"""
import json
import re
import subprocess
from pathlib import Path
def has_source_openmp_flags(target_source):
return any("openmp" in arg for arg in target_source["parameters"])
def has_openmp_flags(target):
"""Return whether target sources use OpenMP flags.
Make sure that both compiler and linker source use OpenMP.
Look at `get_meson_info` docstring to see what `target` looks like.
"""
target_sources = target["target_sources"]
target_use_openmp_flags = any(
has_source_openmp_flags(target_source) for target_source in target_sources
)
if not target_use_openmp_flags:
return False
# When the target use OpenMP we expect a compiler + linker source and we
# want to make sure that both the compiler and the linker use OpenMP
assert len(target_sources) == 2
compiler_source, linker_source = target_sources
assert "compiler" in compiler_source
assert "linker" in linker_source
compiler_use_openmp_flags = any(
"openmp" in arg for arg in compiler_source["parameters"]
)
linker_use_openmp_flags = any(
"openmp" in arg for arg in linker_source["parameters"]
)
assert compiler_use_openmp_flags == linker_use_openmp_flags
return compiler_use_openmp_flags
def get_canonical_name_meson(target, build_path):
"""Return a name based on generated shared library.
The goal is to return a name that can be easily matched with the output
from `git_grep_info`.
Look at `get_meson_info` docstring to see what `target` looks like.
"""
# Expect a list with one element with the name of the shared library
assert len(target["filename"]) == 1
shared_library_path = Path(target["filename"][0])
shared_library_relative_path = shared_library_path.relative_to(
build_path.absolute()
)
# Needed on Windows to match git grep output
rel_path = shared_library_relative_path.as_posix()
# OS-specific naming of the shared library .cpython- on POSIX and
# something like .cp312- on Windows
pattern = r"\.(cpython|cp\d+)-.+"
return re.sub(pattern, "", str(rel_path))
def get_canonical_name_git_grep(filename):
"""Return name based on filename.
The goal is to return a name that can easily be matched with the output
from `get_meson_info`.
"""
return re.sub(r"\.pyx(\.tp)?", "", filename)
def get_meson_info():
"""Return names of extension that use OpenMP based on meson introspect output.
The meson introspect json info is a list of targets where a target is a dict
that looks like this (parts not used in this script are not shown for simplicity):
{
'name': '_k_means_elkan.cpython-312-x86_64-linux-gnu',
'filename': [
'<meson_build_dir>/sklearn/cluster/_k_means_elkan.cpython-312-x86_64-linux-gnu.so'
],
'target_sources': [
{
'compiler': ['ccache', 'cc'],
'parameters': [
'-Wall',
'-std=c11',
'-fopenmp',
...
],
...
},
{
'linker': ['cc'],
'parameters': [
'-shared',
'-fPIC',
'-fopenmp',
...
]
}
]
}
"""
build_path = Path("build/introspect")
subprocess.check_call(["meson", "setup", build_path, "--reconfigure"])
json_out = subprocess.check_output(
["meson", "introspect", build_path, "--targets"], text=True
)
target_list = json.loads(json_out)
meson_targets = [target for target in target_list if has_openmp_flags(target)]
return [get_canonical_name_meson(each, build_path) for each in meson_targets]
def get_git_grep_info():
"""Return names of extensions that use OpenMP based on git grep regex."""
git_grep_filenames = subprocess.check_output(
["git", "grep", "-lP", "cython.*parallel|_openmp_helpers"], text=True
).splitlines()
git_grep_filenames = [f for f in git_grep_filenames if ".pyx" in f]
return [get_canonical_name_git_grep(each) for each in git_grep_filenames]
def main():
from_meson = set(get_meson_info())
from_git_grep = set(get_git_grep_info())
only_in_git_grep = from_git_grep - from_meson
only_in_meson = from_meson - from_git_grep
msg = ""
if only_in_git_grep:
only_in_git_grep_msg = "\n".join(
[f" {each}" for each in sorted(only_in_git_grep)]
)
msg += (
"Some Cython files use OpenMP,"
" but their meson.build is missing the openmp_dep dependency:\n"
f"{only_in_git_grep_msg}\n\n"
)
if only_in_meson:
only_in_meson_msg = "\n".join([f" {each}" for each in sorted(only_in_meson)])
msg += (
"Some Cython files do not use OpenMP,"
" you should remove openmp_dep from their meson.build:\n"
f"{only_in_meson_msg}\n\n"
)
if from_meson != from_git_grep:
raise ValueError(
f"Some issues have been found in Meson OpenMP dependencies:\n\n{msg}"
)
if __name__ == "__main__":
main()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/build_tools/get_comment.py | build_tools/get_comment.py | # This script is used to generate a comment for a PR when linting issues are
# detected. It is used by the `Comment on failed linting` GitHub Action.
import os
import re
from github import Auth, Github, GithubException
def get_versions(versions_file):
"""Get the versions of the packages used in the linter job.
Parameters
----------
versions_file : str
The path to the file that contains the versions of the packages.
Returns
-------
versions : dict
A dictionary with the versions of the packages.
"""
with open(versions_file, "r") as f:
return dict(line.strip().split("=") for line in f)
def get_step_message(log, start, end, title, message, details):
"""Get the message for a specific test.
Parameters
----------
log : str
The log of the linting job.
start : str
The string that marks the start of the test.
end : str
The string that marks the end of the test.
title : str
The title for this section.
message : str
The message to be added at the beginning of the section.
details : bool
Whether to add the details of each step.
Returns
-------
message : str
The message to be added to the comment.
"""
if end not in log:
return ""
res = (
f"-----------------------------------------------\n### {title}\n\n{message}\n\n"
)
if details:
res += (
"<details>\n\n```\n"
+ log[log.find(start) + len(start) + 1 : log.find(end) - 1]
+ "\n```\n\n</details>\n\n"
)
return res
def get_message(log_file, repo_str, pr_number, sha, run_id, details, versions):
with open(log_file, "r") as f:
log = f.read()
sub_text = (
"\n\n<sub> _Generated for commit:"
f" [{sha[:7]}](https://github.com/{repo_str}/pull/{pr_number}/commits/{sha}). "
"Link to the linter CI: [here]"
f"(https://github.com/{repo_str}/actions/runs/{run_id})_ </sub>"
)
if "### Linting completed ###" not in log:
return (
"## ❌ Linting issues\n\n"
"There was an issue running the linter job. Please update with "
"`upstream/main` ([link]("
"https://scikit-learn.org/dev/developers/contributing.html"
"#how-to-contribute)) and push the changes. If you already have done "
"that, please send an empty commit with `git commit --allow-empty` "
"and push the changes to trigger the CI.\n\n" + sub_text
)
message = ""
# ruff check
message += get_step_message(
log,
start="### Running the ruff linter ###",
end="Problems detected by ruff check",
title="`ruff check`",
message=(
"`ruff` detected issues. Please run "
"`ruff check --fix --output-format=full` locally, fix the remaining "
"issues, and push the changes. Here you can see the detected issues. Note "
f"that the installed `ruff` version is `ruff={versions['ruff']}`."
),
details=details,
)
# ruff format
message += get_step_message(
log,
start="### Running the ruff formatter ###",
end="Problems detected by ruff format",
title="`ruff format`",
message=(
"`ruff` detected issues. Please run `ruff format` locally and push "
"the changes. Here you can see the detected issues. Note that the "
f"installed `ruff` version is `ruff={versions['ruff']}`."
),
details=details,
)
# mypy
message += get_step_message(
log,
start="### Running mypy ###",
end="Problems detected by mypy",
title="`mypy`",
message=(
"`mypy` detected issues. Please fix them locally and push the changes. "
"Here you can see the detected issues. Note that the installed `mypy` "
f"version is `mypy={versions['mypy']}`."
),
details=details,
)
# cython-lint
message += get_step_message(
log,
start="### Running cython-lint ###",
end="Problems detected by cython-lint",
title="`cython-lint`",
message=(
"`cython-lint` detected issues. Please fix them locally and push "
"the changes. Here you can see the detected issues. Note that the "
"installed `cython-lint` version is "
f"`cython-lint={versions['cython-lint']}`."
),
details=details,
)
# deprecation order
message += get_step_message(
log,
start="### Checking for bad deprecation order ###",
end="Problems detected by deprecation order check",
title="Deprecation Order",
message=(
"Deprecation order check detected issues. Please fix them locally and "
"push the changes. Here you can see the detected issues."
),
details=details,
)
# doctest directives
message += get_step_message(
log,
start="### Checking for default doctest directives ###",
end="Problems detected by doctest directive check",
title="Doctest Directives",
message=(
"doctest directive check detected issues. Please fix them locally and "
"push the changes. Here you can see the detected issues."
),
details=details,
)
# joblib imports
message += get_step_message(
log,
start="### Checking for joblib imports ###",
end="Problems detected by joblib import check",
title="Joblib Imports",
message=(
"`joblib` import check detected issues. Please fix them locally and "
"push the changes. Here you can see the detected issues."
),
details=details,
)
if not message:
# no issues detected, the linting succeeded
return None
if not details:
# This happens if posting the log fails, which happens if the log is too
# long. Typically, this happens if the PR branch hasn't been updated
# since we've introduced import sorting.
branch_not_updated = (
"_Merging with `upstream/main` might fix / improve the issues if you "
"haven't done that since 21.06.2023._\n\n"
)
else:
branch_not_updated = ""
message = (
"## ❌ Linting issues\n\n"
+ branch_not_updated
+ "This PR is introducing linting issues. Here's a summary of the issues. "
+ "Note that you can avoid having linting issues by enabling `pre-commit` "
+ "hooks. Instructions to enable them can be found [here]("
+ "https://scikit-learn.org/dev/developers/development_setup.html#set-up-pre-commit)"
+ ".\n\n"
+ "You can see the details of the linting issues under the `lint` job [here]"
+ f"(https://github.com/{repo_str}/actions/runs/{run_id})\n\n"
+ message
+ sub_text
)
return message
def find_lint_bot_comments(issue):
"""Get the comment from the linting bot."""
failed_comment = "❌ Linting issues"
for comment in issue.get_comments():
if comment.user.login == "github-actions[bot]":
if failed_comment in comment.body:
return comment
return None
def create_or_update_comment(comment, message, issue):
"""Create a new comment or update the existing linting comment."""
if comment is not None:
print("Updating existing comment")
comment.edit(message)
else:
print("Creating new comment")
issue.create_comment(message)
def update_linter_fails_label(linting_failed, issue):
"""Add or remove the label indicating that the linting has failed."""
label = "CI:Linter failure"
if linting_failed:
issue.add_to_labels(label)
else:
try:
issue.remove_from_labels(label)
except GithubException as exception:
# The exception is ignored if raised because the issue did not have the
# label already
if not exception.message == "Label does not exist":
raise
if __name__ == "__main__":
repo_str = os.environ["GITHUB_REPOSITORY"]
token = os.environ["GITHUB_TOKEN"]
pr_number = os.environ["PR_NUMBER"]
sha = os.environ["BRANCH_SHA"]
log_file = os.environ["LOG_FILE"]
run_id = os.environ["RUN_ID"]
versions_file = os.environ["VERSIONS_FILE"]
versions = get_versions(versions_file)
for var, val in [
("GITHUB_REPOSITORY", repo_str),
("GITHUB_TOKEN", token),
("PR_NUMBER", pr_number),
("LOG_FILE", log_file),
("RUN_ID", run_id),
]:
if not val:
raise ValueError(f"The following environment variable is not set: {var}")
if not re.match(r"\d+$", pr_number):
raise ValueError(f"PR_NUMBER should be a number, got {pr_number!r} instead")
pr_number = int(pr_number)
gh = Github(auth=Auth.Token(token))
repo = gh.get_repo(repo_str)
issue = repo.get_issue(number=pr_number)
message = get_message(
log_file,
repo_str=repo_str,
pr_number=pr_number,
sha=sha,
run_id=run_id,
details=True,
versions=versions,
)
update_linter_fails_label(
linting_failed=message is not None,
issue=issue,
)
comment = find_lint_bot_comments(issue)
if message is None: # linting succeeded
if comment is not None:
print("Deleting existing comment.")
comment.delete()
else:
try:
create_or_update_comment(comment, message, issue)
print(message)
except GithubException:
# The above fails if the message is too long. In that case, we
# try again without the details.
message = get_message(
log_file,
repo=repo,
pr_number=pr_number,
sha=sha,
run_id=run_id,
details=False,
versions=versions,
)
create_or_update_comment(comment, message, issue)
print(message)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/build_tools/update_environments_and_lock_files.py | build_tools/update_environments_and_lock_files.py | """Script to update CI environment files and associated lock files.
To run it you need to be in the root folder of the scikit-learn repo:
python build_tools/update_environments_and_lock_files.py
Two scenarios where this script can be useful:
- make sure that the latest versions of all the dependencies are used in the CI.
There is a scheduled workflow that does this, see
.github/workflows/update-lock-files.yml. This is still useful to run this
script when the automated PR fails and for example some packages need to
be pinned. You can add the pins to this script, run it, and open a PR with
the changes.
- bump minimum dependencies in sklearn/_min_dependencies.py. Running this
script will update both the CI environment files and associated lock files.
You can then open a PR with the changes.
- pin some packages to an older version by adding them to the
default_package_constraints variable. This is useful when regressions are
introduced in our dependencies, this has happened for example with pytest 7
and coverage 6.3.
Environments are conda environment.yml or pip requirements.txt. Lock files are
conda-lock lock files or pip-compile requirements.txt.
pip requirements.txt are used when we install some dependencies (e.g. numpy and
scipy) with apt-get and the rest of the dependencies (e.g. pytest and joblib)
with pip.
To run this script you need:
- conda
- conda-lock. The version should match the one used in the CI in
sklearn/_min_dependencies.py
- pip-tools
To only update the environment and lock files for specific builds, you can use
the command line argument `--select-build` which will take a regex. For example,
to only update the documentation builds you can use:
`python build_tools/update_environments_and_lock_files.py --select-build doc`
"""
import json
import logging
import re
import subprocess
import sys
from importlib.metadata import version
from pathlib import Path
import click
from jinja2 import Environment
from packaging.version import Version
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
logger.addHandler(handler)
TRACE = logging.DEBUG - 5
common_dependencies_without_coverage = [
"python",
"numpy",
"blas",
"scipy",
"cython",
"joblib",
"threadpoolctl",
"matplotlib",
"pandas",
"pyamg",
"pytest",
"pytest-xdist",
"pillow",
"pip",
"ninja",
"meson-python",
]
common_dependencies = common_dependencies_without_coverage + [
"pytest-cov",
"coverage",
]
docstring_test_dependencies = ["sphinx", "numpydoc"]
default_package_constraints = {
# TODO: remove once https://github.com/numpy/numpydoc/issues/638 is fixed
# and released.
"numpydoc": "<1.9.0",
# TODO: remove once when we're using the new way to enable coverage in subprocess
# introduced in 7.0.0, see https://github.com/pytest-dev/pytest-cov?tab=readme-ov-file#upgrading-from-pytest-cov-63
"pytest-cov": "<=6.3.0",
}
def remove_from(alist, to_remove):
return [each for each in alist if each not in to_remove]
build_metadata_list = [
{
"name": "pylatest_conda_forge_cuda_array-api_linux-64",
"type": "conda",
"tag": "cuda",
"folder": "build_tools/github",
"platform": "linux-64",
"channels": ["conda-forge", "pytorch", "nvidia"],
"conda_dependencies": common_dependencies
+ [
"ccache",
"pytorch-gpu",
"polars",
"pyarrow",
"cupy",
"array-api-strict",
],
"package_constraints": {
"blas": "[build=mkl]",
},
},
{
"name": "pylatest_conda_forge_mkl_linux-64",
"type": "conda",
"tag": "main-ci",
"folder": "build_tools/azure",
"platform": "linux-64",
"channels": ["conda-forge"],
"conda_dependencies": common_dependencies
+ [
"ccache",
"pytorch",
"pytorch-cpu",
"polars",
"pyarrow",
"array-api-strict",
"scipy-doctest",
"pytest-playwright",
],
"package_constraints": {
"blas": "[build=mkl]",
},
},
{
"name": "pylatest_conda_forge_osx-arm64",
"type": "conda",
"tag": "main-ci",
"folder": "build_tools/azure",
"platform": "osx-arm64",
"channels": ["conda-forge"],
"conda_dependencies": common_dependencies
+ [
"ccache",
"compilers",
"llvm-openmp",
"pytorch",
"pytorch-cpu",
"array-api-strict",
],
},
{
"name": "pylatest_conda_forge_mkl_no_openmp",
"type": "conda",
"tag": "main-ci",
"folder": "build_tools/azure",
"platform": "osx-64",
"channels": ["conda-forge"],
"conda_dependencies": common_dependencies + ["ccache"],
"package_constraints": {
"blas": "[build=mkl]",
},
},
{
"name": "pymin_conda_forge_openblas_min_dependencies",
"type": "conda",
"tag": "main-ci",
"folder": "build_tools/azure",
"platform": "linux-64",
"channels": ["conda-forge"],
"conda_dependencies": remove_from(common_dependencies, ["pandas"])
+ ["ccache", "polars", "pyarrow"],
# TODO: move pandas to conda_dependencies when pandas 1.5.1 is the minimum
# supported version
"pip_dependencies": ["pandas"],
"package_constraints": {
"python": "3.11",
"blas": "[build=openblas]",
"numpy": "min",
"scipy": "min",
"matplotlib": "min",
"cython": "min",
"joblib": "min",
"threadpoolctl": "min",
"meson-python": "min",
"pandas": "min",
"polars": "min",
"pyamg": "min",
"pyarrow": "min",
},
},
{
"name": "pymin_conda_forge_openblas_ubuntu_2204",
"type": "conda",
"tag": "main-ci",
"folder": "build_tools/azure",
"platform": "linux-64",
"channels": ["conda-forge"],
"conda_dependencies": (
remove_from(common_dependencies_without_coverage, ["matplotlib"])
+ docstring_test_dependencies
+ ["ccache"]
),
"package_constraints": {
"python": "3.11",
"blas": "[build=openblas]",
},
},
{
"name": "pylatest_pip_openblas_pandas",
"type": "conda",
"tag": "main-ci",
"folder": "build_tools/azure",
"platform": "linux-64",
"channels": ["conda-forge"],
"conda_dependencies": ["python", "ccache"],
"package_constraints": {
# TODO: remove this constraint once pyamg provide binary
# wheels for Python 3.14 (or later) on PyPI.
"python": "3.13",
},
"pip_dependencies": (
remove_from(common_dependencies, ["python", "blas", "pip"])
+ docstring_test_dependencies
# Test with some optional dependencies
+ ["lightgbm"]
# Test array API on CPU without PyTorch
+ ["array-api-strict"]
# doctests dependencies
+ ["scipy-doctest"]
),
},
{
"name": "pylatest_pip_scipy_dev",
"type": "conda",
"tag": "scipy-dev",
"folder": "build_tools/azure",
"platform": "linux-64",
"channels": ["conda-forge"],
"conda_dependencies": ["python", "ccache"],
"pip_dependencies": (
remove_from(
common_dependencies,
[
"python",
"blas",
"matplotlib",
"pyamg",
# all the dependencies below have a development version
# installed in the CI, so they can be removed from the
# environment.yml
"numpy",
"scipy",
"pandas",
"cython",
"joblib",
"pillow",
],
)
+ ["pooch"]
+ docstring_test_dependencies
# python-dateutil is a dependency of pandas and pandas is removed from
# the environment.yml. Adding python-dateutil so it is pinned
+ ["python-dateutil"]
),
},
{
"name": "pylatest_free_threaded",
"type": "conda",
"tag": "free-threaded",
"folder": "build_tools/azure",
"platform": "linux-64",
"channels": ["conda-forge"],
"conda_dependencies": [
"python-freethreading",
"meson-python",
"cython",
"numpy",
"scipy",
"joblib",
"threadpoolctl",
"pytest",
"pytest-run-parallel",
"ccache",
"pip",
],
},
{
"name": "pymin_conda_forge_openblas",
"type": "conda",
"tag": "main-ci",
"folder": "build_tools/azure",
"platform": "win-64",
"channels": ["conda-forge"],
"conda_dependencies": remove_from(common_dependencies, ["pandas", "pyamg"])
+ [
"wheel",
"pip",
],
"package_constraints": {
"python": "3.11",
"blas": "[build=openblas]",
},
},
{
"name": "doc_min_dependencies",
"type": "conda",
"tag": "main-ci",
"folder": "build_tools/circle",
"platform": "linux-64",
"channels": ["conda-forge"],
"conda_dependencies": remove_from(
common_dependencies_without_coverage, ["pandas"]
)
+ [
"scikit-image",
"seaborn",
"memory_profiler",
"compilers",
"sphinx",
"sphinx-gallery",
"sphinx-copybutton",
"numpydoc",
"sphinx-prompt",
"plotly",
"polars",
"pooch",
"sphinxext-opengraph",
"sphinx-remove-toctrees",
"sphinx-design",
"pydata-sphinx-theme",
"towncrier",
],
"pip_dependencies": [
"sphinxcontrib-sass",
# TODO: move pandas to conda_dependencies when pandas 1.5.1 is the minimum
# supported version
"pandas",
],
"package_constraints": {
"python": "3.11",
"numpy": "min",
"scipy": "min",
"matplotlib": "min",
"cython": "min",
"scikit-image": "min",
"sphinx": "min",
"pandas": "min",
"sphinx-gallery": "min",
"sphinx-copybutton": "min",
"numpydoc": "min",
"sphinx-prompt": "min",
"sphinxext-opengraph": "min",
"plotly": "min",
"polars": "min",
"pooch": "min",
"pyamg": "min",
"sphinx-design": "min",
"sphinxcontrib-sass": "min",
"sphinx-remove-toctrees": "min",
"pydata-sphinx-theme": "min",
"towncrier": "min",
},
},
{
"name": "doc",
"type": "conda",
"tag": "main-ci",
"folder": "build_tools/circle",
"platform": "linux-64",
"channels": ["conda-forge"],
"conda_dependencies": common_dependencies_without_coverage
+ [
"scikit-image",
"seaborn",
"memory_profiler",
"compilers",
"sphinx",
"sphinx-gallery",
"sphinx-copybutton",
"numpydoc",
"sphinx-prompt",
"plotly",
"polars",
"pooch",
"sphinxext-opengraph",
"sphinx-remove-toctrees",
"sphinx-design",
"pydata-sphinx-theme",
"towncrier",
"jupyterlite-sphinx",
"jupyterlite-pyodide-kernel",
],
"pip_dependencies": [
"sphinxcontrib-sass",
],
"package_constraints": {
"python": "3.11",
# Pinned while https://github.com/pola-rs/polars/issues/25039 is
# not fixed.
"polars": "1.34.0",
},
},
{
"name": "pymin_conda_forge_arm",
"type": "conda",
"tag": "main-ci",
"folder": "build_tools/github",
"platform": "linux-aarch64",
"channels": ["conda-forge"],
"conda_dependencies": remove_from(common_dependencies, ["pandas", "pyamg"])
+ ["pip", "ccache"],
"package_constraints": {
"python": "3.11",
# The following is needed to avoid getting libnvpl build for blas for some
# reason.
"blas": "[build=openblas]",
},
},
{
"name": "debian_32bit",
"type": "pip",
"tag": "main-ci",
"folder": "build_tools/azure",
"pip_dependencies": [
"cython",
"joblib",
"threadpoolctl",
"pytest",
"pytest-xdist",
"pytest-cov",
"ninja",
"meson-python",
],
# Python version from the python3 APT package in the debian-32 docker
# image.
"python_version": "3.12.5",
},
{
"name": "ubuntu_atlas",
"type": "pip",
"tag": "main-ci",
"folder": "build_tools/azure",
"pip_dependencies": [
"cython",
"joblib",
"threadpoolctl",
"pytest",
"pytest-xdist",
"ninja",
"meson-python",
],
"package_constraints": {
"joblib": "min",
"threadpoolctl": "min",
"cython": "min",
},
"python_version": "3.12.3",
},
]
def execute_command(command_list):
logger.debug(" ".join(command_list))
proc = subprocess.Popen(
command_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = proc.communicate()
out, err = out.decode(errors="replace"), err.decode(errors="replace")
if proc.returncode != 0:
command_str = " ".join(command_list)
raise RuntimeError(
"Command exited with non-zero exit code.\n"
"Exit code: {}\n"
"Command:\n{}\n"
"stdout:\n{}\n"
"stderr:\n{}\n".format(proc.returncode, command_str, out, err)
)
logger.log(TRACE, out)
return out
def get_package_with_constraint(package_name, build_metadata, uses_pip=False):
build_package_constraints = build_metadata.get("package_constraints")
if build_package_constraints is None:
constraint = None
else:
constraint = build_package_constraints.get(package_name)
constraint = constraint or default_package_constraints.get(package_name)
if constraint is None:
return package_name
comment = ""
if constraint == "min":
constraint = execute_command(
[sys.executable, "sklearn/_min_dependencies.py", package_name]
).strip()
comment = " # min"
if re.match(r"\d[.\d]*", constraint):
equality = "==" if uses_pip else "="
constraint = equality + constraint
return f"{package_name}{constraint}{comment}"
environment = Environment(trim_blocks=True, lstrip_blocks=True)
environment.filters["get_package_with_constraint"] = get_package_with_constraint
def get_conda_environment_content(build_metadata):
template = environment.from_string(
"""
# DO NOT EDIT: this file is generated from the specification found in the
# following script to centralize the configuration for CI builds:
# build_tools/update_environments_and_lock_files.py
channels:
{% for channel in build_metadata['channels'] %}
- {{ channel }}
{% endfor %}
dependencies:
{% for conda_dep in build_metadata['conda_dependencies'] %}
- {{ conda_dep | get_package_with_constraint(build_metadata) }}
{% endfor %}
{% if build_metadata['pip_dependencies'] %}
- pip
- pip:
{% for pip_dep in build_metadata.get('pip_dependencies', []) %}
- {{ pip_dep | get_package_with_constraint(build_metadata, uses_pip=True) }}
{% endfor %}
{% endif %}""".strip()
)
return template.render(build_metadata=build_metadata)
def write_conda_environment(build_metadata):
content = get_conda_environment_content(build_metadata)
build_name = build_metadata["name"]
folder_path = Path(build_metadata["folder"])
output_path = folder_path / f"{build_name}_environment.yml"
logger.debug(output_path)
output_path.write_text(content)
def write_all_conda_environments(build_metadata_list):
for build_metadata in build_metadata_list:
write_conda_environment(build_metadata)
def conda_lock(environment_path, lock_file_path, platform):
execute_command(
[
"conda-lock",
"lock",
"--mamba",
"--kind",
"explicit",
"--platform",
platform,
"--file",
str(environment_path),
"--filename-template",
str(lock_file_path),
]
)
def create_conda_lock_file(build_metadata):
build_name = build_metadata["name"]
folder_path = Path(build_metadata["folder"])
environment_path = folder_path / f"{build_name}_environment.yml"
platform = build_metadata["platform"]
lock_file_basename = build_name
if not lock_file_basename.endswith(platform):
lock_file_basename = f"{lock_file_basename}_{platform}"
lock_file_path = folder_path / f"{lock_file_basename}_conda.lock"
conda_lock(environment_path, lock_file_path, platform)
def write_all_conda_lock_files(build_metadata_list):
for build_metadata in build_metadata_list:
logger.info(f"# Locking dependencies for {build_metadata['name']}")
create_conda_lock_file(build_metadata)
def get_pip_requirements_content(build_metadata):
template = environment.from_string(
"""
# DO NOT EDIT: this file is generated from the specification found in the
# following script to centralize the configuration for CI builds:
# build_tools/update_environments_and_lock_files.py
{% for pip_dep in build_metadata['pip_dependencies'] %}
{{ pip_dep | get_package_with_constraint(build_metadata, uses_pip=True) }}
{% endfor %}""".strip()
)
return template.render(build_metadata=build_metadata)
def write_pip_requirements(build_metadata):
build_name = build_metadata["name"]
content = get_pip_requirements_content(build_metadata)
folder_path = Path(build_metadata["folder"])
output_path = folder_path / f"{build_name}_requirements.txt"
logger.debug(output_path)
output_path.write_text(content)
def write_all_pip_requirements(build_metadata_list):
for build_metadata in build_metadata_list:
write_pip_requirements(build_metadata)
def pip_compile(pip_compile_path, requirements_path, lock_file_path):
execute_command(
[
str(pip_compile_path),
"--upgrade",
str(requirements_path),
"-o",
str(lock_file_path),
]
)
def write_pip_lock_file(build_metadata):
build_name = build_metadata["name"]
python_version = build_metadata["python_version"]
environment_name = f"pip-tools-python{python_version}"
# To make sure that the Python used to create the pip lock file is the same
# as the one used during the CI build where the lock file is used, we first
# create a conda environment with the correct Python version and
# pip-compile and run pip-compile in this environment
execute_command(
[
"conda",
"create",
"-c",
"conda-forge",
"-n",
f"pip-tools-python{python_version}",
f"python={python_version}",
"pip-tools",
"-y",
]
)
json_output = execute_command(["conda", "info", "--json"])
conda_info = json.loads(json_output)
environment_folder = next(
each for each in conda_info["envs"] if each.endswith(environment_name)
)
environment_path = Path(environment_folder)
pip_compile_path = environment_path / "bin" / "pip-compile"
folder_path = Path(build_metadata["folder"])
requirement_path = folder_path / f"{build_name}_requirements.txt"
lock_file_path = folder_path / f"{build_name}_lock.txt"
pip_compile(pip_compile_path, requirement_path, lock_file_path)
def write_all_pip_lock_files(build_metadata_list):
for build_metadata in build_metadata_list:
logger.info(f"# Locking dependencies for {build_metadata['name']}")
write_pip_lock_file(build_metadata)
def check_conda_lock_version():
# Check that the installed conda-lock version is consistent with _min_dependencies.
expected_conda_lock_version = execute_command(
[sys.executable, "sklearn/_min_dependencies.py", "conda-lock"]
).strip()
installed_conda_lock_version = version("conda-lock")
if installed_conda_lock_version != expected_conda_lock_version:
raise RuntimeError(
f"Expected conda-lock version: {expected_conda_lock_version}, got:"
f" {installed_conda_lock_version}"
)
def check_conda_version():
# Avoid issues with glibc (https://github.com/conda/conda-lock/issues/292)
# or osx (https://github.com/conda/conda-lock/issues/408) virtual package.
# The glibc one has been fixed in conda 23.1.0 and the osx has been fixed
# in conda 23.7.0.
conda_info_output = execute_command(["conda", "info", "--json"])
conda_info = json.loads(conda_info_output)
conda_version = Version(conda_info["conda_version"])
if Version("22.9.0") < conda_version < Version("23.7"):
raise RuntimeError(
f"conda version should be <= 22.9.0 or >= 23.7 got: {conda_version}"
)
@click.command()
@click.option(
"--select-build",
default="",
help=(
"Regex to filter the builds we want to update environment and lock files. By"
" default all the builds are selected."
),
)
@click.option(
"--skip-build",
default=None,
help="Regex to skip some builds from the builds selected by --select-build",
)
@click.option(
"--select-tag",
default=None,
help=(
"Tag to filter the builds, e.g. 'main-ci' or 'scipy-dev'. "
"This is an additional filtering on top of --select-build."
),
)
@click.option(
"-v",
"--verbose",
is_flag=True,
help="Print commands executed by the script",
)
@click.option(
"-vv",
"--very-verbose",
is_flag=True,
help="Print output of commands executed by the script",
)
def main(select_build, skip_build, select_tag, verbose, very_verbose):
if verbose:
logger.setLevel(logging.DEBUG)
if very_verbose:
logger.setLevel(TRACE)
handler.setLevel(TRACE)
check_conda_lock_version()
check_conda_version()
filtered_build_metadata_list = [
each for each in build_metadata_list if re.search(select_build, each["name"])
]
if select_tag is not None:
filtered_build_metadata_list = [
each for each in build_metadata_list if each["tag"] == select_tag
]
if skip_build is not None:
filtered_build_metadata_list = [
each
for each in filtered_build_metadata_list
if not re.search(skip_build, each["name"])
]
selected_build_info = "\n".join(
f" - {each['name']}, type: {each['type']}, tag: {each['tag']}"
for each in filtered_build_metadata_list
)
selected_build_message = (
f"# {len(filtered_build_metadata_list)} selected builds\n{selected_build_info}"
)
logger.info(selected_build_message)
filtered_conda_build_metadata_list = [
each for each in filtered_build_metadata_list if each["type"] == "conda"
]
if filtered_conda_build_metadata_list:
logger.info("# Writing conda environments")
write_all_conda_environments(filtered_conda_build_metadata_list)
logger.info("# Writing conda lock files")
write_all_conda_lock_files(filtered_conda_build_metadata_list)
filtered_pip_build_metadata_list = [
each for each in filtered_build_metadata_list if each["type"] == "pip"
]
if filtered_pip_build_metadata_list:
logger.info("# Writing pip requirements")
write_all_pip_requirements(filtered_pip_build_metadata_list)
logger.info("# Writing pip lock files")
write_all_pip_lock_files(filtered_pip_build_metadata_list)
if __name__ == "__main__":
main()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/build_tools/wheels/check_license.py | build_tools/wheels/check_license.py | """Checks the bundled license is installed with the wheel."""
import platform
import site
from itertools import chain
from pathlib import Path
site_packages = site.getsitepackages()
site_packages_path = (Path(p) for p in site_packages)
try:
distinfo_path = next(
chain(
s
for site_package in site_packages_path
for s in site_package.glob("scikit_learn-*.dist-info")
)
)
except StopIteration as e:
raise RuntimeError("Unable to find scikit-learn's dist-info") from e
license_text = (distinfo_path / "licenses" / "COPYING").read_text()
assert "Copyright (c)" in license_text
assert (
"This binary distribution of scikit-learn also bundles the following software"
in license_text
), f"Unable to find bundled license for {platform.system()}"
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/build_tools/circle/list_versions.py | build_tools/circle/list_versions.py | #!/usr/bin/env python3
# Write the available versions page (--rst) and the version switcher JSON (--json).
# Version switcher see:
# https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/version-dropdown.html
# https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/announcements.html#announcement-banners
import argparse
import json
import re
import sys
from urllib.request import urlopen
from sklearn.utils.fixes import parse_version
def json_urlread(url):
try:
return json.loads(urlopen(url).read().decode("utf8"))
except Exception:
print("Error reading", url, file=sys.stderr)
raise
def human_readable_data_quantity(quantity, multiple=1024):
# https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
if quantity == 0:
quantity = +0
SUFFIXES = ["B"] + [i + {1000: "B", 1024: "iB"}[multiple] for i in "KMGTPEZY"]
for suffix in SUFFIXES:
if quantity < multiple or suffix == SUFFIXES[-1]:
if suffix == SUFFIXES[0]:
return "%d %s" % (quantity, suffix)
else:
return "%.1f %s" % (quantity, suffix)
else:
quantity /= multiple
def get_file_extension(version):
if "dev" in version:
# The 'dev' branch should be explicitly handled
return "zip"
current_version = parse_version(version)
min_zip_version = parse_version("0.24")
return "zip" if current_version >= min_zip_version else "pdf"
def get_file_size(version):
api_url = ROOT_URL + "%s/_downloads" % version
for path_details in json_urlread(api_url):
file_extension = get_file_extension(version)
file_path = f"scikit-learn-docs.{file_extension}"
if path_details["name"] == file_path:
return human_readable_data_quantity(path_details["size"], 1000)
parser = argparse.ArgumentParser()
parser.add_argument("--rst", type=str, required=True)
parser.add_argument("--json", type=str, required=True)
args = parser.parse_args()
heading = "Available documentation for scikit-learn"
json_content = []
rst_content = [
":orphan:\n",
heading,
"=" * len(heading) + "\n",
"Web-based documentation is available for versions listed below:\n",
]
ROOT_URL = "https://api.github.com/repos/scikit-learn/scikit-learn.github.io/contents/"
RAW_FMT = "https://raw.githubusercontent.com/scikit-learn/scikit-learn.github.io/master/%s/index.html"
VERSION_RE = re.compile(r"scikit-learn ([\w\.\-]+) documentation</title>")
NAMED_DIRS = ["dev", "stable"]
# Gather data for each version directory, including symlinks
dirs = {}
symlinks = {}
root_listing = json_urlread(ROOT_URL)
for path_details in root_listing:
name = path_details["name"]
if not (name[:1].isdigit() or name in NAMED_DIRS):
continue
if path_details["type"] == "dir":
html = urlopen(RAW_FMT % name).read().decode("utf8")
version_num = VERSION_RE.search(html).group(1)
file_size = get_file_size(name)
dirs[name] = (version_num, file_size)
if path_details["type"] == "symlink":
symlinks[name] = json_urlread(path_details["_links"]["self"])["target"]
# Symlinks should have same data as target
for src, dst in symlinks.items():
if dst in dirs:
dirs[src] = dirs[dst]
# Output in order: dev, stable, decreasing other version
seen = set()
for i, name in enumerate(
NAMED_DIRS
+ sorted((k for k in dirs if k[:1].isdigit()), key=parse_version, reverse=True)
):
version_num, file_size = dirs[name]
if version_num in seen:
# symlink came first
continue
else:
seen.add(version_num)
full_name = f"{version_num}" if name[:1].isdigit() else f"{version_num} ({name})"
path = f"https://scikit-learn.org/{name}/"
# Update JSON for the version switcher; only keep the 8 latest versions to avoid
# overloading the version switcher dropdown
if i < 8:
info = {"name": full_name, "version": version_num, "url": path}
if name == "stable":
info["preferred"] = True
json_content.append(info)
# Printout for the historical version page
out = f"* `scikit-learn {full_name} documentation <{path}>`_"
if file_size is not None:
file_extension = get_file_extension(version_num)
out += (
f" (`{file_extension.upper()} {file_size} <{path}/"
f"_downloads/scikit-learn-docs.{file_extension}>`_)"
)
rst_content.append(out)
with open(args.rst, "w", encoding="utf-8") as f:
f.write("\n".join(rst_content) + "\n")
print(f"Written {args.rst}")
with open(args.json, "w", encoding="utf-8") as f:
json.dump(json_content, f, indent=2)
print(f"Written {args.json}")
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/build_tools/azure/get_commit_message.py | build_tools/azure/get_commit_message.py | import argparse
import os
import subprocess
def get_commit_message():
"""Retrieve the commit message."""
if "COMMIT_MESSAGE" in os.environ or "BUILD_SOURCEVERSIONMESSAGE" not in os.environ:
raise RuntimeError(
"This legacy script should only be used on Azure. "
"On GitHub actions, use the 'COMMIT_MESSAGE' environment variable"
)
build_source_version_message = os.environ["BUILD_SOURCEVERSIONMESSAGE"]
if os.environ["BUILD_REASON"] == "PullRequest":
# By default pull requests use refs/pull/PULL_ID/merge as the source branch
# which has a "Merge ID into ID" as a commit message. The latest commit
# message is the second to last commit
commit_id = build_source_version_message.split()[1]
git_cmd = ["git", "log", commit_id, "-1", "--pretty=%B"]
commit_message = subprocess.run(
git_cmd, capture_output=True, text=True
).stdout.strip()
else:
commit_message = build_source_version_message
# Sanitize the commit message to avoid introducing a vulnerability: a PR
# submitter could include the "##vso" special marker in their commit
# message to attempt to obfuscate the injection of arbitrary commands in
# the Azure pipeline.
#
# This can be a problem if the PR reviewers do not pay close enough
# attention to the full commit message prior to clicking the merge button
# and as a result make the inject code run in a protected branch with
# elevated access to CI secrets. On a protected branch, Azure
# already sanitizes `BUILD_SOURCEVERSIONMESSAGE`, but the message
# will still be sanitized here out of precaution.
commit_message = commit_message.replace("##vso", "..vso")
return commit_message
def parsed_args():
parser = argparse.ArgumentParser(
description=(
"Show commit message that triggered the build in Azure DevOps pipeline"
)
)
parser.add_argument(
"--only-show-message",
action="store_true",
default=False,
help=(
"Only print commit message. Useful for direct use in scripts rather than"
" setting output variable of the Azure job"
),
)
return parser.parse_args()
if __name__ == "__main__":
args = parsed_args()
commit_message = get_commit_message()
if args.only_show_message:
print(commit_message)
else:
# set the environment variable to be propagated to other steps
print(f"##vso[task.setvariable variable=message;isOutput=true]{commit_message}")
print(f"commit message: {commit_message}") # helps debugging
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/build_tools/azure/get_selected_tests.py | build_tools/azure/get_selected_tests.py | import os
from get_commit_message import get_commit_message
def get_selected_tests():
"""Parse the commit message to check if pytest should run only specific tests.
If so, selected tests will be run with SKLEARN_TESTS_GLOBAL_RANDOM_SEED="all".
The commit message must take the form:
<title> [all random seeds]
<test_name_1>
<test_name_2>
...
"""
if "SELECTED_TESTS" in os.environ:
raise RuntimeError(
"This legacy script should only be used on Azure. "
"On GitHub actions, use the 'SELECTED_TESTS' environment variable"
)
commit_message = get_commit_message()
if "[all random seeds]" in commit_message:
selected_tests = commit_message.split("[all random seeds]")[1].strip()
selected_tests = selected_tests.replace("\n", " or ")
else:
selected_tests = ""
return selected_tests
if __name__ == "__main__":
# set the environment variable to be propagated to other steps
selected_tests = get_selected_tests()
if selected_tests:
print(f"##vso[task.setvariable variable=SELECTED_TESTS]'{selected_tests}'")
print(f"selected tests: {selected_tests}") # helps debugging
else:
print("no selected tests")
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/build_tools/github/check_wheels.py | build_tools/github/check_wheels.py | """Checks that dist/* contains the number of wheels built from the
.github/workflows/wheels.yml config."""
import sys
from pathlib import Path
import yaml
gh_wheel_path = Path.cwd() / ".github" / "workflows" / "wheels.yml"
with gh_wheel_path.open("r") as f:
wheel_config = yaml.safe_load(f)
build_matrix = wheel_config["jobs"]["build_wheels"]["strategy"]["matrix"]["include"]
n_wheels = len(build_matrix)
# plus one more for the sdist
n_wheels += 1
dist_files = list(Path("dist").glob("**/*"))
n_dist_files = len(dist_files)
if n_dist_files != n_wheels:
print(
f"Expected {n_wheels} wheels in dist/* but "
f"got {n_dist_files} artifacts instead."
)
sys.exit(1)
print(f"dist/* has the expected {n_wheels} wheels:")
print("\n".join(file.name for file in dist_files))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/build_tools/github/autoclose_prs.py | build_tools/github/autoclose_prs.py | """Close PRs labeled with 'autoclose' more than 14 days ago.
Called from .github/workflows/autoclose-schedule.yml."""
import os
from datetime import datetime, timedelta, timezone
from pprint import pprint
from github import Auth, Github
def get_labeled_last_time(pr, label):
labeled_time = datetime.max
for event in pr.get_events():
if event.event == "labeled" and event.label.name == label:
labeled_time = event.created_at
return labeled_time
dry_run = False
cutoff_days = 14
gh_repo = "scikit-learn/scikit-learn"
github_token = os.getenv("GITHUB_TOKEN")
auth = Auth.Token(github_token)
gh = Github(auth=auth)
repo = gh.get_repo(gh_repo)
now = datetime.now(timezone.utc)
label = "autoclose"
prs = [
each for each in repo.get_issues(labels=[label]) if each.pull_request is not None
]
prs_info = [f"{pr.title}: {pr.html_url}" for pr in prs]
print(f"Found {len(prs)} opened PRs with label {label}")
pprint(prs_info)
prs = [
pr
for pr in prs
if (now - get_labeled_last_time(pr, label)) > timedelta(days=cutoff_days)
]
prs_info = [f"{pr.title} {pr.html_url}" for pr in prs]
print(f"Found {len(prs)} PRs to autoclose")
pprint(prs_info)
message = (
"Thank you for your interest in contributing to scikit-learn, but we cannot "
"accept your contribution as this pull request does not meet our development "
"standards.\n\n"
"Following our autoclose policy, we are closing this PR after allowing two "
"weeks time for improvements.\n\n"
"Thank you for your understanding. If you think your PR has been closed "
"by mistake, please comment below."
)
for pr in prs:
print(f"Closing PR #{pr.number} with comment")
if not dry_run:
pr.create_comment(message)
pr.edit(state="closed")
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/build_tools/github/vendor.py | build_tools/github/vendor.py | """Embed vcomp140.dll and msvcp140.dll."""
import os
import os.path as op
import shutil
import sys
import textwrap
TARGET_FOLDER = op.join("sklearn", ".libs")
DISTRIBUTOR_INIT = op.join("sklearn", "_distributor_init.py")
VCOMP140_SRC_PATH = "C:\\Windows\\System32\\vcomp140.dll"
MSVCP140_SRC_PATH = "C:\\Windows\\System32\\msvcp140.dll"
def make_distributor_init_64_bits(
distributor_init,
vcomp140_dll_filename,
msvcp140_dll_filename,
):
"""Create a _distributor_init.py file for 64-bit architectures.
This file is imported first when importing the sklearn package
so as to pre-load the vendored vcomp140.dll and msvcp140.dll.
"""
with open(distributor_init, "wt") as f:
f.write(
textwrap.dedent(
"""
'''Helper to preload vcomp140.dll and msvcp140.dll to prevent
"not found" errors.
Once vcomp140.dll and msvcp140.dll are
preloaded, the namespace is made available to any subsequent
vcomp140.dll and msvcp140.dll. This is
created as part of the scripts that build the wheel.
'''
import os
import os.path as op
from ctypes import WinDLL
if os.name == "nt":
libs_path = op.join(op.dirname(__file__), ".libs")
vcomp140_dll_filename = op.join(libs_path, "{0}")
msvcp140_dll_filename = op.join(libs_path, "{1}")
WinDLL(op.abspath(vcomp140_dll_filename))
WinDLL(op.abspath(msvcp140_dll_filename))
""".format(
vcomp140_dll_filename,
msvcp140_dll_filename,
)
)
)
def main(wheel_dirname):
"""Embed vcomp140.dll and msvcp140.dll."""
if not op.exists(VCOMP140_SRC_PATH):
raise ValueError(f"Could not find {VCOMP140_SRC_PATH}.")
if not op.exists(MSVCP140_SRC_PATH):
raise ValueError(f"Could not find {MSVCP140_SRC_PATH}.")
if not op.isdir(wheel_dirname):
raise RuntimeError(f"Could not find {wheel_dirname} file.")
vcomp140_dll_filename = op.basename(VCOMP140_SRC_PATH)
msvcp140_dll_filename = op.basename(MSVCP140_SRC_PATH)
target_folder = op.join(wheel_dirname, TARGET_FOLDER)
distributor_init = op.join(wheel_dirname, DISTRIBUTOR_INIT)
# Create the "sklearn/.libs" subfolder
if not op.exists(target_folder):
os.mkdir(target_folder)
print(f"Copying {VCOMP140_SRC_PATH} to {target_folder}.")
shutil.copy2(VCOMP140_SRC_PATH, target_folder)
print(f"Copying {MSVCP140_SRC_PATH} to {target_folder}.")
shutil.copy2(MSVCP140_SRC_PATH, target_folder)
# Generate the _distributor_init file in the source tree
print("Generating the '_distributor_init.py' file.")
make_distributor_init_64_bits(
distributor_init,
vcomp140_dll_filename,
msvcp140_dll_filename,
)
if __name__ == "__main__":
_, wheel_file = sys.argv
main(wheel_file)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/asv_benchmarks/benchmarks/model_selection.py | asv_benchmarks/benchmarks/model_selection.py | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV, cross_val_score
from .common import Benchmark, Estimator, Predictor
from .datasets import _synth_classification_dataset
from .utils import make_gen_classif_scorers
class CrossValidationBenchmark(Benchmark):
"""
Benchmarks for Cross Validation.
"""
timeout = 20000
param_names = ["n_jobs"]
params = (Benchmark.n_jobs_vals,)
def setup(self, *params):
(n_jobs,) = params
data = _synth_classification_dataset(n_samples=50000, n_features=100)
self.X, self.X_val, self.y, self.y_val = data
self.clf = RandomForestClassifier(n_estimators=50, max_depth=10, random_state=0)
cv = 16 if Benchmark.data_size == "large" else 4
self.cv_params = {"n_jobs": n_jobs, "cv": cv}
def time_crossval(self, *args):
cross_val_score(self.clf, self.X, self.y, **self.cv_params)
def peakmem_crossval(self, *args):
cross_val_score(self.clf, self.X, self.y, **self.cv_params)
def track_crossval(self, *args):
return float(cross_val_score(self.clf, self.X, self.y, **self.cv_params).mean())
class GridSearchBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for GridSearch.
"""
timeout = 20000
param_names = ["n_jobs"]
params = (Benchmark.n_jobs_vals,)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
data = _synth_classification_dataset(n_samples=10000, n_features=100)
return data
def make_estimator(self, params):
(n_jobs,) = params
clf = RandomForestClassifier(random_state=0)
if Benchmark.data_size == "large":
n_estimators_list = [10, 25, 50, 100, 500]
max_depth_list = [5, 10, None]
max_features_list = [0.1, 0.4, 0.8, 1.0]
else:
n_estimators_list = [10, 25, 50]
max_depth_list = [5, 10]
max_features_list = [0.1, 0.4, 0.8]
param_grid = {
"n_estimators": n_estimators_list,
"max_depth": max_depth_list,
"max_features": max_features_list,
}
estimator = GridSearchCV(clf, param_grid, n_jobs=n_jobs, cv=4)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/asv_benchmarks/benchmarks/cluster.py | asv_benchmarks/benchmarks/cluster.py | from sklearn.cluster import KMeans, MiniBatchKMeans
from .common import Benchmark, Estimator, Predictor, Transformer
from .datasets import _20newsgroups_highdim_dataset, _blobs_dataset
from .utils import neg_mean_inertia
class KMeansBenchmark(Predictor, Transformer, Estimator, Benchmark):
"""
Benchmarks for KMeans.
"""
param_names = ["representation", "algorithm", "init"]
params = (["dense", "sparse"], ["lloyd", "elkan"], ["random", "k-means++"])
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, algorithm, init = params
if representation == "sparse":
data = _20newsgroups_highdim_dataset(n_samples=8000)
else:
data = _blobs_dataset(n_clusters=20)
return data
def make_estimator(self, params):
representation, algorithm, init = params
max_iter = 30 if representation == "sparse" else 100
estimator = KMeans(
n_clusters=20,
algorithm=algorithm,
init=init,
n_init=1,
max_iter=max_iter,
tol=0,
random_state=0,
)
return estimator
def make_scorers(self):
self.train_scorer = lambda _, __: neg_mean_inertia(
self.X, self.estimator.predict(self.X), self.estimator.cluster_centers_
)
self.test_scorer = lambda _, __: neg_mean_inertia(
self.X_val,
self.estimator.predict(self.X_val),
self.estimator.cluster_centers_,
)
class MiniBatchKMeansBenchmark(Predictor, Transformer, Estimator, Benchmark):
"""
Benchmarks for MiniBatchKMeans.
"""
param_names = ["representation", "init"]
params = (["dense", "sparse"], ["random", "k-means++"])
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, init = params
if representation == "sparse":
data = _20newsgroups_highdim_dataset()
else:
data = _blobs_dataset(n_clusters=20)
return data
def make_estimator(self, params):
representation, init = params
max_iter = 5 if representation == "sparse" else 2
estimator = MiniBatchKMeans(
n_clusters=20,
init=init,
n_init=1,
max_iter=max_iter,
batch_size=1000,
max_no_improvement=None,
compute_labels=False,
random_state=0,
)
return estimator
def make_scorers(self):
self.train_scorer = lambda _, __: neg_mean_inertia(
self.X, self.estimator.predict(self.X), self.estimator.cluster_centers_
)
self.test_scorer = lambda _, __: neg_mean_inertia(
self.X_val,
self.estimator.predict(self.X_val),
self.estimator.cluster_centers_,
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/asv_benchmarks/benchmarks/metrics.py | asv_benchmarks/benchmarks/metrics.py | from sklearn.metrics.pairwise import pairwise_distances
from .common import Benchmark
from .datasets import _random_dataset
class PairwiseDistancesBenchmark(Benchmark):
"""
Benchmarks for pairwise distances.
"""
param_names = ["representation", "metric", "n_jobs"]
params = (
["dense", "sparse"],
["cosine", "euclidean", "manhattan", "correlation"],
Benchmark.n_jobs_vals,
)
def setup(self, *params):
representation, metric, n_jobs = params
if representation == "sparse" and metric == "correlation":
raise NotImplementedError
if Benchmark.data_size == "large":
if metric in ("manhattan", "correlation"):
n_samples = 8000
else:
n_samples = 24000
else:
if metric in ("manhattan", "correlation"):
n_samples = 4000
else:
n_samples = 12000
data = _random_dataset(n_samples=n_samples, representation=representation)
self.X, self.X_val, self.y, self.y_val = data
self.pdist_params = {"metric": metric, "n_jobs": n_jobs}
def time_pairwise_distances(self, *args):
pairwise_distances(self.X, **self.pdist_params)
def peakmem_pairwise_distances(self, *args):
pairwise_distances(self.X, **self.pdist_params)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/asv_benchmarks/benchmarks/datasets.py | asv_benchmarks/benchmarks/datasets.py | from pathlib import Path
import numpy as np
import scipy.sparse as sp
from joblib import Memory
from sklearn.datasets import (
fetch_20newsgroups,
fetch_olivetti_faces,
fetch_openml,
load_digits,
make_blobs,
make_classification,
make_regression,
)
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MaxAbsScaler, StandardScaler
# memory location for caching datasets
M = Memory(location=str(Path(__file__).resolve().parent / "cache"))
@M.cache
def _blobs_dataset(n_samples=500000, n_features=3, n_clusters=100, dtype=np.float32):
X, _ = make_blobs(
n_samples=n_samples, n_features=n_features, centers=n_clusters, random_state=0
)
X = X.astype(dtype, copy=False)
X, X_val = train_test_split(X, test_size=0.1, random_state=0)
return X, X_val, None, None
@M.cache
def _20newsgroups_highdim_dataset(n_samples=None, ngrams=(1, 1), dtype=np.float32):
newsgroups = fetch_20newsgroups(random_state=0)
vectorizer = TfidfVectorizer(ngram_range=ngrams, dtype=dtype)
X = vectorizer.fit_transform(newsgroups.data[:n_samples])
y = newsgroups.target[:n_samples]
X, X_val, y, y_val = train_test_split(X, y, test_size=0.1, random_state=0)
return X, X_val, y, y_val
@M.cache
def _20newsgroups_lowdim_dataset(n_components=100, ngrams=(1, 1), dtype=np.float32):
newsgroups = fetch_20newsgroups()
vectorizer = TfidfVectorizer(ngram_range=ngrams)
X = vectorizer.fit_transform(newsgroups.data)
X = X.astype(dtype, copy=False)
svd = TruncatedSVD(n_components=n_components)
X = svd.fit_transform(X)
y = newsgroups.target
X, X_val, y, y_val = train_test_split(X, y, test_size=0.1, random_state=0)
return X, X_val, y, y_val
@M.cache
def _mnist_dataset(dtype=np.float32):
X, y = fetch_openml("mnist_784", version=1, return_X_y=True, as_frame=False)
X = X.astype(dtype, copy=False)
X = MaxAbsScaler().fit_transform(X)
X, X_val, y, y_val = train_test_split(X, y, test_size=0.1, random_state=0)
return X, X_val, y, y_val
@M.cache
def _digits_dataset(n_samples=None, dtype=np.float32):
X, y = load_digits(return_X_y=True)
X = X.astype(dtype, copy=False)
X = MaxAbsScaler().fit_transform(X)
X = X[:n_samples]
y = y[:n_samples]
X, X_val, y, y_val = train_test_split(X, y, test_size=0.1, random_state=0)
return X, X_val, y, y_val
@M.cache
def _synth_regression_dataset(n_samples=100000, n_features=100, dtype=np.float32):
X, y = make_regression(
n_samples=n_samples,
n_features=n_features,
n_informative=n_features // 10,
noise=50,
random_state=0,
)
X = X.astype(dtype, copy=False)
X = StandardScaler().fit_transform(X)
X, X_val, y, y_val = train_test_split(X, y, test_size=0.1, random_state=0)
return X, X_val, y, y_val
@M.cache
def _synth_regression_sparse_dataset(
n_samples=10000, n_features=10000, density=0.01, dtype=np.float32
):
X = sp.random(
m=n_samples, n=n_features, density=density, format="csr", random_state=0
)
X.data = np.random.RandomState(0).randn(X.getnnz())
X = X.astype(dtype, copy=False)
coefs = sp.random(m=n_features, n=1, density=0.5, random_state=0)
coefs.data = np.random.RandomState(0).randn(coefs.getnnz())
y = X.dot(coefs.toarray()).reshape(-1)
y += 0.2 * y.std() * np.random.randn(n_samples)
X, X_val, y, y_val = train_test_split(X, y, test_size=0.1, random_state=0)
return X, X_val, y, y_val
@M.cache
def _synth_classification_dataset(
n_samples=1000, n_features=10000, n_classes=2, dtype=np.float32
):
X, y = make_classification(
n_samples=n_samples,
n_features=n_features,
n_classes=n_classes,
random_state=0,
n_informative=n_features,
n_redundant=0,
)
X = X.astype(dtype, copy=False)
X = StandardScaler().fit_transform(X)
X, X_val, y, y_val = train_test_split(X, y, test_size=0.1, random_state=0)
return X, X_val, y, y_val
@M.cache
def _olivetti_faces_dataset():
dataset = fetch_olivetti_faces(shuffle=True, random_state=42)
faces = dataset.data
n_samples, n_features = faces.shape
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
X = faces_centered
X, X_val = train_test_split(X, test_size=0.1, random_state=0)
return X, X_val, None, None
@M.cache
def _random_dataset(
n_samples=1000, n_features=1000, representation="dense", dtype=np.float32
):
if representation == "dense":
X = np.random.RandomState(0).random_sample((n_samples, n_features))
X = X.astype(dtype, copy=False)
else:
X = sp.random(
n_samples,
n_features,
density=0.05,
format="csr",
dtype=dtype,
random_state=0,
)
X, X_val = train_test_split(X, test_size=0.1, random_state=0)
return X, X_val, None, None
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/asv_benchmarks/benchmarks/ensemble.py | asv_benchmarks/benchmarks/ensemble.py | from sklearn.ensemble import (
GradientBoostingClassifier,
HistGradientBoostingClassifier,
RandomForestClassifier,
)
from .common import Benchmark, Estimator, Predictor
from .datasets import (
_20newsgroups_highdim_dataset,
_20newsgroups_lowdim_dataset,
_synth_classification_dataset,
)
from .utils import make_gen_classif_scorers
class RandomForestClassifierBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for RandomForestClassifier.
"""
param_names = ["representation", "n_jobs"]
params = (["dense", "sparse"], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, n_jobs = params
if representation == "sparse":
data = _20newsgroups_highdim_dataset()
else:
data = _20newsgroups_lowdim_dataset()
return data
def make_estimator(self, params):
representation, n_jobs = params
n_estimators = 500 if Benchmark.data_size == "large" else 100
estimator = RandomForestClassifier(
n_estimators=n_estimators,
min_samples_split=10,
max_features="log2",
n_jobs=n_jobs,
random_state=0,
)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
class GradientBoostingClassifierBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for GradientBoostingClassifier.
"""
param_names = ["representation"]
params = (["dense", "sparse"],)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
(representation,) = params
if representation == "sparse":
data = _20newsgroups_highdim_dataset()
else:
data = _20newsgroups_lowdim_dataset()
return data
def make_estimator(self, params):
(representation,) = params
n_estimators = 100 if Benchmark.data_size == "large" else 10
estimator = GradientBoostingClassifier(
n_estimators=n_estimators,
max_features="log2",
subsample=0.5,
random_state=0,
)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
class HistGradientBoostingClassifierBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for HistGradientBoostingClassifier.
"""
param_names = []
params = ()
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
data = _synth_classification_dataset(
n_samples=10000, n_features=100, n_classes=5
)
return data
def make_estimator(self, params):
estimator = HistGradientBoostingClassifier(
max_iter=100, max_leaf_nodes=15, early_stopping=False, random_state=0
)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/asv_benchmarks/benchmarks/common.py | asv_benchmarks/benchmarks/common.py | import itertools
import json
import os
import pickle
import timeit
from abc import ABC, abstractmethod
from multiprocessing import cpu_count
from pathlib import Path
import numpy as np
def get_from_config():
"""Get benchmarks configuration from the config.json file"""
current_path = Path(__file__).resolve().parent
config_path = current_path / "config.json"
with open(config_path, "r") as config_file:
config_file = "".join(line for line in config_file if line and "//" not in line)
config = json.loads(config_file)
profile = os.getenv("SKLBENCH_PROFILE", config["profile"])
n_jobs_vals_env = os.getenv("SKLBENCH_NJOBS")
if n_jobs_vals_env:
n_jobs_vals = json.loads(n_jobs_vals_env)
else:
n_jobs_vals = config["n_jobs_vals"]
if not n_jobs_vals:
n_jobs_vals = list(range(1, 1 + cpu_count()))
cache_path = current_path / "cache"
cache_path.mkdir(exist_ok=True)
(cache_path / "estimators").mkdir(exist_ok=True)
(cache_path / "tmp").mkdir(exist_ok=True)
save_estimators = os.getenv("SKLBENCH_SAVE_ESTIMATORS", config["save_estimators"])
save_dir = os.getenv("ASV_COMMIT", "new")[:8]
if save_estimators:
(cache_path / "estimators" / save_dir).mkdir(exist_ok=True)
base_commit = os.getenv("SKLBENCH_BASE_COMMIT", config["base_commit"])
bench_predict = os.getenv("SKLBENCH_PREDICT", config["bench_predict"])
bench_transform = os.getenv("SKLBENCH_TRANSFORM", config["bench_transform"])
return (
profile,
n_jobs_vals,
save_estimators,
save_dir,
base_commit,
bench_predict,
bench_transform,
)
def get_estimator_path(benchmark, directory, params, save=False):
"""Get path of pickled fitted estimator"""
path = Path(__file__).resolve().parent / "cache"
path = (path / "estimators" / directory) if save else (path / "tmp")
filename = (
benchmark.__class__.__name__
+ "_estimator_"
+ "_".join(list(map(str, params)))
+ ".pkl"
)
return path / filename
def clear_tmp():
"""Clean the tmp directory"""
path = Path(__file__).resolve().parent / "cache" / "tmp"
for child in path.iterdir():
child.unlink()
class Benchmark(ABC):
"""Abstract base class for all the benchmarks"""
timer = timeit.default_timer # wall time
processes = 1
timeout = 500
(
profile,
n_jobs_vals,
save_estimators,
save_dir,
base_commit,
bench_predict,
bench_transform,
) = get_from_config()
if profile == "fast":
warmup_time = 0
repeat = 1
number = 1
min_run_count = 1
data_size = "small"
elif profile == "regular":
warmup_time = 1
repeat = (3, 100, 30)
data_size = "small"
elif profile == "large_scale":
warmup_time = 1
repeat = 3
number = 1
data_size = "large"
@property
@abstractmethod
def params(self):
pass
class Estimator(ABC):
"""Abstract base class for all benchmarks of estimators"""
@abstractmethod
def make_data(self, params):
"""Return the dataset for a combination of parameters"""
# The datasets are cached using joblib.Memory so it's fast and can be
# called for each repeat
pass
@abstractmethod
def make_estimator(self, params):
"""Return an instance of the estimator for a combination of parameters"""
pass
def skip(self, params):
"""Return True if the benchmark should be skipped for these params"""
return False
def setup_cache(self):
"""Pickle a fitted estimator for all combinations of parameters"""
# This is run once per benchmark class.
clear_tmp()
param_grid = list(itertools.product(*self.params))
for params in param_grid:
if self.skip(params):
continue
estimator = self.make_estimator(params)
X, _, y, _ = self.make_data(params)
estimator.fit(X, y)
est_path = get_estimator_path(
self, Benchmark.save_dir, params, Benchmark.save_estimators
)
with est_path.open(mode="wb") as f:
pickle.dump(estimator, f)
def setup(self, *params):
"""Generate dataset and load the fitted estimator"""
# This is run once per combination of parameters and per repeat so we
# need to avoid doing expensive operations there.
if self.skip(params):
raise NotImplementedError
self.X, self.X_val, self.y, self.y_val = self.make_data(params)
est_path = get_estimator_path(
self, Benchmark.save_dir, params, Benchmark.save_estimators
)
with est_path.open(mode="rb") as f:
self.estimator = pickle.load(f)
self.make_scorers()
def time_fit(self, *args):
self.estimator.fit(self.X, self.y)
def peakmem_fit(self, *args):
self.estimator.fit(self.X, self.y)
def track_train_score(self, *args):
if hasattr(self.estimator, "predict"):
y_pred = self.estimator.predict(self.X)
else:
y_pred = None
return float(self.train_scorer(self.y, y_pred))
def track_test_score(self, *args):
if hasattr(self.estimator, "predict"):
y_val_pred = self.estimator.predict(self.X_val)
else:
y_val_pred = None
return float(self.test_scorer(self.y_val, y_val_pred))
class Predictor(ABC):
"""Abstract base class for benchmarks of estimators implementing predict"""
if Benchmark.bench_predict:
def time_predict(self, *args):
self.estimator.predict(self.X)
def peakmem_predict(self, *args):
self.estimator.predict(self.X)
if Benchmark.base_commit is not None:
def track_same_prediction(self, *args):
est_path = get_estimator_path(self, Benchmark.base_commit, args, True)
with est_path.open(mode="rb") as f:
estimator_base = pickle.load(f)
y_val_pred_base = estimator_base.predict(self.X_val)
y_val_pred = self.estimator.predict(self.X_val)
return np.allclose(y_val_pred_base, y_val_pred)
@property
@abstractmethod
def params(self):
pass
class Transformer(ABC):
"""Abstract base class for benchmarks of estimators implementing transform"""
if Benchmark.bench_transform:
def time_transform(self, *args):
self.estimator.transform(self.X)
def peakmem_transform(self, *args):
self.estimator.transform(self.X)
if Benchmark.base_commit is not None:
def track_same_transform(self, *args):
est_path = get_estimator_path(self, Benchmark.base_commit, args, True)
with est_path.open(mode="rb") as f:
estimator_base = pickle.load(f)
X_val_t_base = estimator_base.transform(self.X_val)
X_val_t = self.estimator.transform(self.X_val)
return np.allclose(X_val_t_base, X_val_t)
@property
@abstractmethod
def params(self):
pass
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/asv_benchmarks/benchmarks/decomposition.py | asv_benchmarks/benchmarks/decomposition.py | from sklearn.decomposition import PCA, DictionaryLearning, MiniBatchDictionaryLearning
from .common import Benchmark, Estimator, Transformer
from .datasets import _mnist_dataset, _olivetti_faces_dataset
from .utils import make_dict_learning_scorers, make_pca_scorers
class PCABenchmark(Transformer, Estimator, Benchmark):
"""
Benchmarks for PCA.
"""
param_names = ["svd_solver"]
params = (["full", "arpack", "randomized"],)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
return _mnist_dataset()
def make_estimator(self, params):
(svd_solver,) = params
estimator = PCA(n_components=32, svd_solver=svd_solver, random_state=0)
return estimator
def make_scorers(self):
make_pca_scorers(self)
class DictionaryLearningBenchmark(Transformer, Estimator, Benchmark):
"""
Benchmarks for DictionaryLearning.
"""
param_names = ["fit_algorithm", "n_jobs"]
params = (["lars", "cd"], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
return _olivetti_faces_dataset()
def make_estimator(self, params):
fit_algorithm, n_jobs = params
estimator = DictionaryLearning(
n_components=15,
fit_algorithm=fit_algorithm,
alpha=0.1,
transform_alpha=1,
max_iter=20,
tol=1e-16,
random_state=0,
n_jobs=n_jobs,
)
return estimator
def make_scorers(self):
make_dict_learning_scorers(self)
class MiniBatchDictionaryLearningBenchmark(Transformer, Estimator, Benchmark):
"""
Benchmarks for MiniBatchDictionaryLearning
"""
param_names = ["fit_algorithm", "n_jobs"]
params = (["lars", "cd"], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
return _olivetti_faces_dataset()
def make_estimator(self, params):
fit_algorithm, n_jobs = params
estimator = MiniBatchDictionaryLearning(
n_components=15,
fit_algorithm=fit_algorithm,
alpha=0.1,
batch_size=3,
random_state=0,
n_jobs=n_jobs,
)
return estimator
def make_scorers(self):
make_dict_learning_scorers(self)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/asv_benchmarks/benchmarks/svm.py | asv_benchmarks/benchmarks/svm.py | from sklearn.svm import SVC
from .common import Benchmark, Estimator, Predictor
from .datasets import _synth_classification_dataset
from .utils import make_gen_classif_scorers
class SVCBenchmark(Predictor, Estimator, Benchmark):
"""Benchmarks for SVC."""
param_names = ["kernel"]
params = (["linear", "poly", "rbf", "sigmoid"],)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
return _synth_classification_dataset()
def make_estimator(self, params):
(kernel,) = params
estimator = SVC(
max_iter=100, tol=1e-16, kernel=kernel, random_state=0, gamma="scale"
)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/asv_benchmarks/benchmarks/linear_model.py | asv_benchmarks/benchmarks/linear_model.py | from sklearn.linear_model import (
ElasticNet,
Lasso,
LinearRegression,
LogisticRegression,
Ridge,
SGDRegressor,
)
from .common import Benchmark, Estimator, Predictor
from .datasets import (
_20newsgroups_highdim_dataset,
_20newsgroups_lowdim_dataset,
_synth_regression_dataset,
_synth_regression_sparse_dataset,
)
from .utils import make_gen_classif_scorers, make_gen_reg_scorers
class LogisticRegressionBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for LogisticRegression.
"""
param_names = ["representation", "solver", "n_jobs"]
params = (["dense", "sparse"], ["lbfgs", "saga"], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, solver, n_jobs = params
if Benchmark.data_size == "large":
if representation == "sparse":
data = _20newsgroups_highdim_dataset(n_samples=10000)
else:
data = _20newsgroups_lowdim_dataset(n_components=1e3)
else:
if representation == "sparse":
data = _20newsgroups_highdim_dataset(n_samples=2500)
else:
data = _20newsgroups_lowdim_dataset()
return data
def make_estimator(self, params):
representation, solver, n_jobs = params
l1_ratio = 0 if solver == "lbfgs" else 1
estimator = LogisticRegression(
solver=solver,
l1_ratio=l1_ratio,
tol=0.01,
n_jobs=n_jobs,
random_state=0,
)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
class RidgeBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for Ridge.
"""
param_names = ["representation", "solver"]
params = (
["dense", "sparse"],
["auto", "svd", "cholesky", "lsqr", "sparse_cg", "sag", "saga"],
)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, solver = params
if representation == "dense":
data = _synth_regression_dataset(n_samples=500000, n_features=100)
else:
data = _synth_regression_sparse_dataset(
n_samples=100000, n_features=10000, density=0.005
)
return data
def make_estimator(self, params):
representation, solver = params
estimator = Ridge(solver=solver, fit_intercept=False, random_state=0)
return estimator
def make_scorers(self):
make_gen_reg_scorers(self)
def skip(self, params):
representation, solver = params
if representation == "sparse" and solver == "svd":
return True
return False
class LinearRegressionBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for Linear Regression.
"""
param_names = ["representation"]
params = (["dense", "sparse"],)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
(representation,) = params
if representation == "dense":
data = _synth_regression_dataset(n_samples=1000000, n_features=100)
else:
data = _synth_regression_sparse_dataset(
n_samples=10000, n_features=100000, density=0.01
)
return data
def make_estimator(self, params):
estimator = LinearRegression()
return estimator
def make_scorers(self):
make_gen_reg_scorers(self)
class SGDRegressorBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmark for SGD
"""
param_names = ["representation"]
params = (["dense", "sparse"],)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
(representation,) = params
if representation == "dense":
data = _synth_regression_dataset(n_samples=100000, n_features=200)
else:
data = _synth_regression_sparse_dataset(
n_samples=100000, n_features=1000, density=0.01
)
return data
def make_estimator(self, params):
(representation,) = params
max_iter = 60 if representation == "dense" else 300
estimator = SGDRegressor(max_iter=max_iter, tol=None, random_state=0)
return estimator
def make_scorers(self):
make_gen_reg_scorers(self)
class ElasticNetBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for ElasticNet.
"""
param_names = ["representation", "precompute"]
params = (["dense", "sparse"], [True, False])
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, precompute = params
if representation == "dense":
data = _synth_regression_dataset(n_samples=1000000, n_features=100)
else:
data = _synth_regression_sparse_dataset(
n_samples=50000, n_features=5000, density=0.01
)
return data
def make_estimator(self, params):
representation, precompute = params
estimator = ElasticNet(precompute=precompute, alpha=0.001, random_state=0)
return estimator
def make_scorers(self):
make_gen_reg_scorers(self)
def skip(self, params):
representation, precompute = params
if representation == "sparse" and precompute is False:
return True
return False
class LassoBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for Lasso.
"""
param_names = ["representation", "precompute"]
params = (["dense", "sparse"], [True, False])
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
representation, precompute = params
if representation == "dense":
data = _synth_regression_dataset(n_samples=1000000, n_features=100)
else:
data = _synth_regression_sparse_dataset(
n_samples=50000, n_features=5000, density=0.01
)
return data
def make_estimator(self, params):
representation, precompute = params
estimator = Lasso(precompute=precompute, alpha=0.001, random_state=0)
return estimator
def make_scorers(self):
make_gen_reg_scorers(self)
def skip(self, params):
representation, precompute = params
if representation == "sparse" and precompute is False:
return True
return False
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/asv_benchmarks/benchmarks/neighbors.py | asv_benchmarks/benchmarks/neighbors.py | from sklearn.neighbors import KNeighborsClassifier
from .common import Benchmark, Estimator, Predictor
from .datasets import _20newsgroups_lowdim_dataset
from .utils import make_gen_classif_scorers
class KNeighborsClassifierBenchmark(Predictor, Estimator, Benchmark):
"""
Benchmarks for KNeighborsClassifier.
"""
param_names = ["algorithm", "dimension", "n_jobs"]
params = (["brute", "kd_tree", "ball_tree"], ["low", "high"], Benchmark.n_jobs_vals)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
algorithm, dimension, n_jobs = params
if Benchmark.data_size == "large":
n_components = 40 if dimension == "low" else 200
else:
n_components = 10 if dimension == "low" else 50
data = _20newsgroups_lowdim_dataset(n_components=n_components)
return data
def make_estimator(self, params):
algorithm, dimension, n_jobs = params
estimator = KNeighborsClassifier(algorithm=algorithm, n_jobs=n_jobs)
return estimator
def make_scorers(self):
make_gen_classif_scorers(self)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/asv_benchmarks/benchmarks/utils.py | asv_benchmarks/benchmarks/utils.py | import numpy as np
from sklearn.metrics import balanced_accuracy_score, r2_score
def neg_mean_inertia(X, labels, centers):
return -(np.asarray(X - centers[labels]) ** 2).sum(axis=1).mean()
def make_gen_classif_scorers(caller):
caller.train_scorer = balanced_accuracy_score
caller.test_scorer = balanced_accuracy_score
def make_gen_reg_scorers(caller):
caller.test_scorer = r2_score
caller.train_scorer = r2_score
def neg_mean_data_error(X, U, V):
return -np.sqrt(((X - U.dot(V)) ** 2).mean())
def make_dict_learning_scorers(caller):
caller.train_scorer = lambda _, __: (
neg_mean_data_error(
caller.X, caller.estimator.transform(caller.X), caller.estimator.components_
)
)
caller.test_scorer = lambda _, __: (
neg_mean_data_error(
caller.X_val,
caller.estimator.transform(caller.X_val),
caller.estimator.components_,
)
)
def explained_variance_ratio(Xt, X):
return np.var(Xt, axis=0).sum() / np.var(X, axis=0).sum()
def make_pca_scorers(caller):
caller.train_scorer = lambda _, __: caller.estimator.explained_variance_ratio_.sum()
caller.test_scorer = lambda _, __: (
explained_variance_ratio(caller.estimator.transform(caller.X_val), caller.X_val)
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/asv_benchmarks/benchmarks/__init__.py | asv_benchmarks/benchmarks/__init__.py | """Benchmark suite for scikit-learn using ASV"""
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/asv_benchmarks/benchmarks/manifold.py | asv_benchmarks/benchmarks/manifold.py | from sklearn.manifold import TSNE
from .common import Benchmark, Estimator
from .datasets import _digits_dataset
class TSNEBenchmark(Estimator, Benchmark):
"""
Benchmarks for t-SNE.
"""
param_names = ["method"]
params = (["exact", "barnes_hut"],)
def setup_cache(self):
super().setup_cache()
def make_data(self, params):
(method,) = params
n_samples = 500 if method == "exact" else None
return _digits_dataset(n_samples=n_samples)
def make_estimator(self, params):
(method,) = params
estimator = TSNE(random_state=0, method=method)
return estimator
def make_scorers(self):
self.train_scorer = lambda _, __: self.estimator.kl_divergence_
self.test_scorer = lambda _, __: self.estimator.kl_divergence_
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/maint_tools/bump-dependencies-versions.py | maint_tools/bump-dependencies-versions.py | import io
import re
import subprocess
import sys
from datetime import datetime
from pathlib import Path
import pandas as pd
import requests
from packaging import version
req = requests.get("https://devguide.python.org/versions/")
df_list = pd.read_html(io.StringIO(req.content.decode("utf-8")))
df = pd.concat(df_list).astype({"Branch": str})
release_dates = {}
python_version_info = {
version: release_date
for version, release_date in zip(df["Branch"], df["First release"])
}
python_version_info = {
version: pd.to_datetime(release_date)
for version, release_date in python_version_info.items()
}
def get_min_version_with_wheel(package_name, python_version):
# For compiled dependencies we want the oldest minor version that has
# wheels for 'python_version'
url = f"https://pypi.org/pypi/{package_name}/json"
response = requests.get(url)
if response.status_code != 200:
return None
data = response.json()
releases = data["releases"]
compatible_versions = []
# We want only minor X.Y.0 and not bugfix X.Y.Z
minor_releases = [
(ver, release_info)
for ver, release_info in releases.items()
if re.match(r"^\d+\.\d+\.0$", ver)
]
for ver, release_info in minor_releases:
for file_info in release_info:
if (
file_info["packagetype"] == "bdist_wheel"
and f"cp{python_version.replace('.', '')}" in file_info["filename"]
and not file_info["yanked"]
):
compatible_versions.append(ver)
break
if not compatible_versions:
return None
return min(compatible_versions, key=version.parse)
def get_min_python_version(scikit_learn_release_date_str="today"):
# min Python version is the most recent Python release at least 3 years old
# at the time of the scikit-learn release
if scikit_learn_release_date_str == "today":
scikit_learn_release_date = pd.to_datetime(datetime.now().date())
else:
scikit_learn_release_date = datetime.strptime(
scikit_learn_release_date_str, "%Y-%m-%d"
)
version_and_releases = [
{"python_version": python_version, "python_release_date": python_release_date}
for python_version, python_release_date in python_version_info.items()
if (scikit_learn_release_date - python_release_date).days > 365 * 3
]
return max(version_and_releases, key=lambda each: each["python_release_date"])[
"python_version"
]
def get_min_version_pure_python_or_example_dependency(
package_name, scikit_learn_release_date_str="today"
):
# for pure Python dependencies we want the most recent minor release that
# is at least 2 years old
if scikit_learn_release_date_str == "today":
scikit_learn_release_date = pd.to_datetime(datetime.now().date())
else:
scikit_learn_release_date = datetime.strptime(
scikit_learn_release_date_str, "%Y-%m-%d"
)
url = f"https://pypi.org/pypi/{package_name}/json"
response = requests.get(url)
if response.status_code != 200:
return None
data = response.json()
releases = data["releases"]
compatible_versions = []
# We want only minor X.Y.0 and not bugfix X.Y.Z
releases = [
(ver, release_info)
for ver, release_info in releases.items()
if re.match(r"^\d+\.\d+\.0$", ver)
]
for ver, release_info in releases:
for file_info in release_info:
if (
file_info["packagetype"] == "bdist_wheel"
and not file_info["yanked"]
and (
scikit_learn_release_date - pd.to_datetime(file_info["upload_time"])
).days
> 365 * 2
):
compatible_versions.append(ver)
break
if not compatible_versions:
return None
return max(compatible_versions, key=version.parse)
def get_current_dependencies_version(dep):
return (
subprocess.check_output([sys.executable, "sklearn/_min_dependencies.py", dep])
.decode()
.strip()
)
def get_current_min_python_version():
content = Path("pyproject.toml").read_text()
min_python = re.findall(r'requires-python\s*=\s*">=(\d+\.\d+)"', content)[0]
return min_python
def show_versions_update(scikit_learn_release_date="today"):
future_versions = {"python": get_min_python_version(scikit_learn_release_date)}
compiled_dependencies = [
"numpy",
"scipy",
"pandas",
"matplotlib",
"pyamg",
"polars",
"pyarrow",
]
future_versions.update(
{
dep: get_min_version_with_wheel(dep, future_versions["python"])
for dep in compiled_dependencies
}
)
pure_python_or_example_dependencies = [
"joblib",
"threadpoolctl",
"scikit-image",
"seaborn",
"polars",
"Pillow",
"pooch",
"plotly",
]
future_versions.update(
{
dep: get_min_version_pure_python_or_example_dependency(
dep, scikit_learn_release_date
)
for dep in pure_python_or_example_dependencies
}
)
current_versions = {"python": get_current_min_python_version()}
current_versions.update(
{
dep: get_current_dependencies_version(dep)
for dep in compiled_dependencies + pure_python_or_example_dependencies
}
)
print(f"For future release at date {scikit_learn_release_date}")
for k in future_versions:
if future_versions[k] != current_versions[k]:
print(f"- {k}: {current_versions[k]} -> {future_versions[k]}")
if __name__ == "__main__":
scikit_learn_release_date = sys.argv[1] if len(sys.argv) > 1 else "today"
show_versions_update(scikit_learn_release_date)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/maint_tools/sort_whats_new.py | maint_tools/sort_whats_new.py | #!/usr/bin/env python
# Sorts what's new entries with per-module headings.
# Pass what's new entries on stdin.
import re
import sys
from collections import defaultdict
LABEL_ORDER = ["MajorFeature", "Feature", "Efficiency", "Enhancement", "Fix", "API"]
def entry_sort_key(s):
if s.startswith("- |"):
return LABEL_ORDER.index(s.split("|")[1])
else:
return -1
# discard headings and other non-entry lines
text = "".join(l for l in sys.stdin if l.startswith("- ") or l.startswith(" "))
bucketed = defaultdict(list)
for entry in re.split("\n(?=- )", text.strip()):
modules = re.findall(
r":(?:func|meth|mod|class):`(?:[^<`]*<|~)?(?:sklearn.)?([a-z]\w+)", entry
)
modules = set(modules)
if len(modules) > 1:
key = "Multiple modules"
elif modules:
key = ":mod:`sklearn.%s`" % next(iter(modules))
else:
key = "Miscellaneous"
bucketed[key].append(entry)
entry = entry.strip() + "\n"
everything = []
for key, bucket in sorted(bucketed.items()):
everything.append(key + "\n" + "." * len(key))
bucket.sort(key=entry_sort_key)
everything.extend(bucket)
print("\n\n".join(everything))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/maint_tools/update_tracking_issue.py | maint_tools/update_tracking_issue.py | """Creates or updates an issue if the CI fails. This is useful to keep track of
scheduled jobs that are failing repeatedly.
This script depends on:
- `defusedxml` for safer parsing for xml
- `PyGithub` for interacting with GitHub
The GitHub token only requires the `repo:public_repo` scope are described in
https://docs.github.com/en/developers/apps/building-oauth-apps/scopes-for-oauth-apps#available-scopes.
This scope allows the bot to create and edit its own issues. It is best to use a
github account that does **not** have commit access to the public repo.
"""
import argparse
import sys
import warnings
from datetime import datetime, timezone
from pathlib import Path
import defusedxml.ElementTree as ET
from github import Github
parser = argparse.ArgumentParser(
description="Create or update issue from JUnit test results from pytest"
)
parser.add_argument(
"bot_github_token", help="Github token for creating or updating an issue"
)
parser.add_argument("ci_name", help="Name of CI run instance")
parser.add_argument("issue_repo", help="Repo to track issues")
parser.add_argument("link_to_ci_run", help="URL to link to")
parser.add_argument(
"--job-name",
help=(
"Name of the job. If provided the job ID will be added to the log URL so that"
" it points to log of the job and not the whole workflow."
),
default=None,
)
parser.add_argument("--junit-file", help="JUnit file to determine if tests passed")
parser.add_argument(
"--tests-passed",
help=(
"If --tests-passed is true, then the original issue is closed if the issue "
"exists, unless --auto-close is set to false. If tests-passed is false, then "
"the issue is updated or created."
),
)
parser.add_argument(
"--auto-close",
help=(
"If --auto-close is false, then issues will not auto close even if the tests"
" pass."
),
default="true",
)
args = parser.parse_args()
if args.junit_file is not None and args.tests_passed is not None:
print("--junit-file and --test-passed can not be set together")
sys.exit(1)
if args.junit_file is None and args.tests_passed is None:
print("Either --junit-file or --test-passed must be passed in")
sys.exit(1)
gh = Github(args.bot_github_token)
issue_repo = gh.get_repo(args.issue_repo)
dt_now = datetime.now(tz=timezone.utc)
date_str = dt_now.strftime("%b %d, %Y")
title_query = f"CI failed on {args.ci_name}"
title = f"⚠️ {title_query} (last failure: {date_str}) ⚠️"
url = args.link_to_ci_run
if args.job_name is not None:
run_id = int(args.link_to_ci_run.split("/")[-1])
workflow_run = issue_repo.get_workflow_run(run_id)
jobs = workflow_run.jobs()
for job in jobs:
if job.name == args.job_name:
url = f"{url}/job/{job.id}"
break
else:
warnings.warn(
f"Job '{args.job_name}' not found, the URL in the issue will link to the"
" whole workflow's log rather than the job's one."
)
def get_issue():
login = gh.get_user().login
issues = gh.search_issues(
f"repo:{args.issue_repo} {title_query} in:title state:open author:{login}"
" is:issue"
)
first_page = issues.get_page(0)
# Return issue if it exist
return first_page[0] if first_page else None
def create_or_update_issue(body=""):
# Interact with GitHub API to create issue
link = f"[{args.ci_name}]({url})"
issue = get_issue()
max_body_length = 60_000
original_body_length = len(body)
# Avoid "body is too long (maximum is 65536 characters)" error from github REST API
if original_body_length > max_body_length:
body = (
f"{body[:max_body_length]}\n...\n"
f"Body was too long ({original_body_length} characters) and was shortened"
)
if issue is None:
# Create new issue
header = f"**CI failed on {link}** ({date_str})"
issue = issue_repo.create_issue(title=title, body=f"{header}\n{body}")
print(f"Created issue in {args.issue_repo}#{issue.number}")
sys.exit()
else:
# Update existing issue
header = f"**CI is still failing on {link}** ({date_str})"
issue.edit(title=title, body=f"{header}\n{body}")
print(f"Commented on issue: {args.issue_repo}#{issue.number}")
sys.exit()
def close_issue_if_opened():
print("Test has no failures!")
issue = get_issue()
if issue is not None:
header_str = "## CI is no longer failing!"
comment_str = f"{header_str} ✅\n\n[Successful run]({url}) on {date_str}"
print(f"Commented on issue #{issue.number}")
# New comment if "## CI is no longer failing!" comment does not exist
# If it does exist update the original comment which includes the new date
for comment in issue.get_comments():
if comment.body.startswith(header_str):
comment.edit(body=comment_str)
break
else: # no break
issue.create_comment(body=comment_str)
if args.auto_close.lower() == "true":
print(f"Closing issue #{issue.number}")
issue.edit(state="closed")
sys.exit()
if args.tests_passed is not None:
if args.tests_passed.lower() == "true":
close_issue_if_opened()
else:
create_or_update_issue()
junit_path = Path(args.junit_file)
if not junit_path.exists():
body = "Unable to find junit file. Please see link for details."
create_or_update_issue(body)
# Find failures in junit file
tree = ET.parse(args.junit_file)
failure_cases = []
# Check if test collection failed
error = tree.find("./testsuite/testcase/error")
if error is not None:
# Get information for test collection error
failure_cases.append("Test Collection Failure")
for item in tree.iter("testcase"):
failure = item.find("failure")
if failure is None:
continue
failure_cases.append(item.attrib["name"])
if not failure_cases:
close_issue_if_opened()
# Create content for issue
body_list = [f"- {case}" for case in failure_cases]
body = "\n".join(body_list)
create_or_update_issue(body)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/maint_tools/check_xfailed_checks.py | maint_tools/check_xfailed_checks.py | # This script checks that the common tests marked with xfail are actually
# failing.
# Note that in some cases, a test might be marked with xfail because it is
# failing on certain machines, and might not be triggered by this script.
import contextlib
import io
from sklearn.utils._test_common.instance_generator import (
_get_expected_failed_checks,
_tested_estimators,
)
from sklearn.utils.estimator_checks import check_estimator
for estimator in _tested_estimators():
# calling check_estimator w/o passing expected_failed_checks will find
# all the failing tests in your environment.
# suppress stdout/stderr while running checks
with (
contextlib.redirect_stdout(io.StringIO()),
contextlib.redirect_stderr(io.StringIO()),
):
check_results = check_estimator(estimator, on_skip=None, on_fail=None)
failed_tests = [e for e in check_results if e["status"] == "failed"]
failed_test_names = set(e["check_name"] for e in failed_tests)
expected_failed_tests = set(_get_expected_failed_checks(estimator).keys())
unexpected_failures = failed_test_names - expected_failed_tests
if unexpected_failures:
print(f"{estimator.__class__.__name__} failed with unexpected failures:")
for failure in unexpected_failures:
print(f" {failure}")
expected_but_not_raised = expected_failed_tests - failed_test_names
if expected_but_not_raised:
print(f"{estimator.__class__.__name__} did not fail expected failures:")
for failure in expected_but_not_raised:
print(f" {failure}")
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/release_highlights/plot_release_highlights_1_5_0.py | examples/release_highlights/plot_release_highlights_1_5_0.py | # ruff: noqa: CPY001
"""
=======================================
Release Highlights for scikit-learn 1.5
=======================================
.. currentmodule:: sklearn
We are pleased to announce the release of scikit-learn 1.5! Many bug fixes
and improvements were added, as well as some key new features. Below we
detail the highlights of this release. **For an exhaustive list of
all the changes**, please refer to the :ref:`release notes <release_notes_1_5>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
# %%
# FixedThresholdClassifier: Setting the decision threshold of a binary classifier
# -------------------------------------------------------------------------------
# All binary classifiers of scikit-learn use a fixed decision threshold of 0.5
# to convert probability estimates (i.e. output of `predict_proba`) into class
# predictions. However, 0.5 is almost never the desired threshold for a given
# problem. :class:`~model_selection.FixedThresholdClassifier` allows wrapping any
# binary classifier and setting a custom decision threshold.
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
X, y = make_classification(n_samples=10_000, weights=[0.9, 0.1], random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
classifier_05 = LogisticRegression(C=1e6, random_state=0).fit(X_train, y_train)
_ = ConfusionMatrixDisplay.from_estimator(classifier_05, X_test, y_test)
# %%
# Lowering the threshold, i.e. allowing more samples to be classified as the positive
# class, increases the number of true positives at the cost of more false positives
# (as is well known from the concavity of the ROC curve).
from sklearn.model_selection import FixedThresholdClassifier
classifier_01 = FixedThresholdClassifier(classifier_05, threshold=0.1)
classifier_01.fit(X_train, y_train)
_ = ConfusionMatrixDisplay.from_estimator(classifier_01, X_test, y_test)
# %%
# TunedThresholdClassifierCV: Tuning the decision threshold of a binary classifier
# --------------------------------------------------------------------------------
# The decision threshold of a binary classifier can be tuned to optimize a
# given metric, using :class:`~model_selection.TunedThresholdClassifierCV`.
#
# It is particularly useful to find the best decision threshold when the model
# is meant to be deployed in a specific application context where we can assign
# different gains or costs for true positives, true negatives, false positives,
# and false negatives.
#
# Let's illustrate this by considering an arbitrary case where:
#
# - each true positive gains 1 unit of profit, e.g. euro, year of life in good
# health, etc.;
# - true negatives gain or cost nothing;
# - each false negative costs 2;
# - each false positive costs 0.1.
#
# Our metric quantifies the average profit per sample, which is defined by the
# following Python function:
from sklearn.metrics import confusion_matrix
def custom_score(y_observed, y_pred):
tn, fp, fn, tp = confusion_matrix(y_observed, y_pred, normalize="all").ravel()
return tp - 2 * fn - 0.1 * fp
print("Untuned decision threshold: 0.5")
print(f"Custom score: {custom_score(y_test, classifier_05.predict(X_test)):.2f}")
# %%
# It is interesting to observe that the average gain per prediction is negative
# which means that this decision system is making a loss on average.
#
# Tuning the threshold to optimize this custom metric gives a smaller threshold
# that allows more samples to be classified as the positive class. As a result,
# the average gain per prediction improves.
from sklearn.metrics import make_scorer
from sklearn.model_selection import TunedThresholdClassifierCV
custom_scorer = make_scorer(
custom_score, response_method="predict", greater_is_better=True
)
tuned_classifier = TunedThresholdClassifierCV(
classifier_05, cv=5, scoring=custom_scorer
).fit(X, y)
print(f"Tuned decision threshold: {tuned_classifier.best_threshold_:.3f}")
print(f"Custom score: {custom_score(y_test, tuned_classifier.predict(X_test)):.2f}")
# %%
# We observe that tuning the decision threshold can turn a machine
# learning-based system that makes a loss on average into a beneficial one.
#
# In practice, defining a meaningful application-specific metric might involve
# making those costs for bad predictions and gains for good predictions depend on
# auxiliary metadata specific to each individual data point such as the amount
# of a transaction in a fraud detection system.
#
# To achieve this, :class:`~model_selection.TunedThresholdClassifierCV`
# leverages metadata routing support (:ref:`Metadata Routing User
# Guide<metadata_routing>`) allowing to optimize complex business metrics as
# detailed in :ref:`Post-tuning the decision threshold for cost-sensitive
# learning
# <sphx_glr_auto_examples_model_selection_plot_cost_sensitive_learning.py>`.
# %%
# Performance improvements in PCA
# -------------------------------
# :class:`~decomposition.PCA` has a new solver, `"covariance_eigh"`, which is
# up to an order of magnitude faster and more memory efficient than the other
# solvers for datasets with many data points and few features.
from sklearn.datasets import make_low_rank_matrix
from sklearn.decomposition import PCA
X = make_low_rank_matrix(
n_samples=10_000, n_features=100, tail_strength=0.1, random_state=0
)
pca = PCA(n_components=10, svd_solver="covariance_eigh").fit(X)
print(f"Explained variance: {pca.explained_variance_ratio_.sum():.2f}")
# %%
# The new solver also accepts sparse input data:
from scipy.sparse import random
X = random(10_000, 100, format="csr", random_state=0)
pca = PCA(n_components=10, svd_solver="covariance_eigh").fit(X)
print(f"Explained variance: {pca.explained_variance_ratio_.sum():.2f}")
# %%
# The `"full"` solver has also been improved to use less memory and allows
# faster transformation. The default `svd_solver="auto"` option takes
# advantage of the new solver and is now able to select an appropriate solver
# for sparse datasets.
#
# Similarly to most other PCA solvers, the new `"covariance_eigh"` solver can leverage
# GPU computation if the input data is passed as a PyTorch or CuPy array by
# enabling the experimental support for :ref:`Array API <array_api>`.
# %%
# ColumnTransformer is subscriptable
# ----------------------------------
# The transformers of a :class:`~compose.ColumnTransformer` can now be directly
# accessed using indexing by name.
import numpy as np
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
X = np.array([[0, 1, 2], [3, 4, 5]])
column_transformer = ColumnTransformer(
[("std_scaler", StandardScaler(), [0]), ("one_hot", OneHotEncoder(), [1, 2])]
)
column_transformer.fit(X)
print(column_transformer["std_scaler"])
print(column_transformer["one_hot"])
# %%
# Custom imputation strategies for the SimpleImputer
# --------------------------------------------------
# :class:`~impute.SimpleImputer` now supports custom strategies for imputation,
# using a callable that computes a scalar value from the non missing values of
# a column vector.
from sklearn.impute import SimpleImputer
X = np.array(
[
[-1.1, 1.1, 1.1],
[3.9, -1.2, np.nan],
[np.nan, 1.3, np.nan],
[-0.1, -1.4, -1.4],
[-4.9, 1.5, -1.5],
[np.nan, 1.6, 1.6],
]
)
def smallest_abs(arr):
"""Return the smallest absolute value of a 1D array."""
return np.min(np.abs(arr))
imputer = SimpleImputer(strategy=smallest_abs)
imputer.fit_transform(X)
# %%
# Pairwise distances with non-numeric arrays
# ------------------------------------------
# :func:`~metrics.pairwise_distances` can now compute distances between
# non-numeric arrays using a callable metric.
from sklearn.metrics import pairwise_distances
X = ["cat", "dog"]
Y = ["cat", "fox"]
def levenshtein_distance(x, y):
"""Return the Levenshtein distance between two strings."""
if x == "" or y == "":
return max(len(x), len(y))
if x[0] == y[0]:
return levenshtein_distance(x[1:], y[1:])
return 1 + min(
levenshtein_distance(x[1:], y),
levenshtein_distance(x, y[1:]),
levenshtein_distance(x[1:], y[1:]),
)
pairwise_distances(X, Y, metric=levenshtein_distance)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/release_highlights/plot_release_highlights_0_22_0.py | examples/release_highlights/plot_release_highlights_0_22_0.py | """
========================================
Release Highlights for scikit-learn 0.22
========================================
.. currentmodule:: sklearn
We are pleased to announce the release of scikit-learn 0.22, which comes
with many bug fixes and new features! We detail below a few of the major
features of this release. For an exhaustive list of all the changes, please
refer to the :ref:`release notes <release_notes_0_22>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# New plotting API
# ----------------
#
# A new plotting API is available for creating visualizations. This new API
# allows for quickly adjusting the visuals of a plot without involving any
# recomputation. It is also possible to add different plots to the same
# figure. The following example illustrates `plot_roc_curve`,
# but other plots utilities are supported like
# `plot_partial_dependence`,
# `plot_precision_recall_curve`, and
# `plot_confusion_matrix`. Read more about this new API in the
# :ref:`User Guide <visualizations>`.
import matplotlib
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
# from sklearn.metrics import plot_roc_curve
from sklearn.metrics import RocCurveDisplay
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.utils.fixes import parse_version
X, y = make_classification(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
svc = SVC(random_state=42)
svc.fit(X_train, y_train)
rfc = RandomForestClassifier(random_state=42)
rfc.fit(X_train, y_train)
# plot_roc_curve has been removed in version 1.2. From 1.2, use RocCurveDisplay instead.
# svc_disp = plot_roc_curve(svc, X_test, y_test)
# rfc_disp = plot_roc_curve(rfc, X_test, y_test, ax=svc_disp.ax_)
svc_disp = RocCurveDisplay.from_estimator(svc, X_test, y_test)
rfc_disp = RocCurveDisplay.from_estimator(rfc, X_test, y_test, ax=svc_disp.ax_)
rfc_disp.figure_.suptitle("ROC curve comparison")
plt.show()
# %%
# Stacking Classifier and Regressor
# ---------------------------------
# :class:`~ensemble.StackingClassifier` and
# :class:`~ensemble.StackingRegressor`
# allow you to have a stack of estimators with a final classifier or
# a regressor.
# Stacked generalization consists in stacking the output of individual
# estimators and use a classifier to compute the final prediction. Stacking
# allows to use the strength of each individual estimator by using their output
# as input of a final estimator.
# Base estimators are fitted on the full ``X`` while
# the final estimator is trained using cross-validated predictions of the
# base estimators using ``cross_val_predict``.
#
# Read more in the :ref:`User Guide <stacking>`.
from sklearn.datasets import load_iris
from sklearn.ensemble import StackingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
X, y = load_iris(return_X_y=True)
estimators = [
("rf", RandomForestClassifier(n_estimators=10, random_state=42)),
("svr", make_pipeline(StandardScaler(), LinearSVC(dual="auto", random_state=42))),
]
clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression())
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)
clf.fit(X_train, y_train).score(X_test, y_test)
# %%
# Permutation-based feature importance
# ------------------------------------
#
# The :func:`inspection.permutation_importance` can be used to get an
# estimate of the importance of each feature, for any fitted estimator:
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.inspection import permutation_importance
X, y = make_classification(random_state=0, n_features=5, n_informative=3)
feature_names = np.array([f"x_{i}" for i in range(X.shape[1])])
rf = RandomForestClassifier(random_state=0).fit(X, y)
result = permutation_importance(rf, X, y, n_repeats=10, random_state=0, n_jobs=2)
fig, ax = plt.subplots()
sorted_idx = result.importances_mean.argsort()
# `labels` argument in boxplot is deprecated in matplotlib 3.9 and has been
# renamed to `tick_labels`. The following code handles this, but as a
# scikit-learn user you probably can write simpler code by using `labels=...`
# (matplotlib < 3.9) or `tick_labels=...` (matplotlib >= 3.9).
tick_labels_parameter_name = (
"tick_labels"
if parse_version(matplotlib.__version__) >= parse_version("3.9")
else "labels"
)
tick_labels_dict = {tick_labels_parameter_name: feature_names[sorted_idx]}
ax.boxplot(result.importances[sorted_idx].T, vert=False, **tick_labels_dict)
ax.set_title("Permutation Importance of each feature")
ax.set_ylabel("Features")
fig.tight_layout()
plt.show()
# %%
# Native support for missing values for gradient boosting
# -------------------------------------------------------
#
# The :class:`ensemble.HistGradientBoostingClassifier`
# and :class:`ensemble.HistGradientBoostingRegressor` now have native
# support for missing values (NaNs). This means that there is no need for
# imputing data when training or predicting.
from sklearn.ensemble import HistGradientBoostingClassifier
X = np.array([0, 1, 2, np.nan]).reshape(-1, 1)
y = [0, 0, 1, 1]
gbdt = HistGradientBoostingClassifier(min_samples_leaf=1).fit(X, y)
print(gbdt.predict(X))
# %%
# Precomputed sparse nearest neighbors graph
# ------------------------------------------
# Most estimators based on nearest neighbors graphs now accept precomputed
# sparse graphs as input, to reuse the same graph for multiple estimator fits.
# To use this feature in a pipeline, one can use the `memory` parameter, along
# with one of the two new transformers,
# :class:`neighbors.KNeighborsTransformer` and
# :class:`neighbors.RadiusNeighborsTransformer`. The precomputation
# can also be performed by custom estimators to use alternative
# implementations, such as approximate nearest neighbors methods.
# See more details in the :ref:`User Guide <neighbors_transformer>`.
from tempfile import TemporaryDirectory
from sklearn.manifold import Isomap
from sklearn.neighbors import KNeighborsTransformer
from sklearn.pipeline import make_pipeline
X, y = make_classification(random_state=0)
with TemporaryDirectory(prefix="sklearn_cache_") as tmpdir:
estimator = make_pipeline(
KNeighborsTransformer(n_neighbors=10, mode="distance"),
Isomap(n_neighbors=10, metric="precomputed"),
memory=tmpdir,
)
estimator.fit(X)
# We can decrease the number of neighbors and the graph will not be
# recomputed.
estimator.set_params(isomap__n_neighbors=5)
estimator.fit(X)
# %%
# KNN Based Imputation
# ------------------------------------
# We now support imputation for completing missing values using k-Nearest
# Neighbors.
#
# Each sample's missing values are imputed using the mean value from
# ``n_neighbors`` nearest neighbors found in the training set. Two samples are
# close if the features that neither is missing are close.
# By default, a euclidean distance metric
# that supports missing values,
# :func:`~sklearn.metrics.pairwise.nan_euclidean_distances`, is used to find the nearest
# neighbors.
#
# Read more in the :ref:`User Guide <knnimpute>`.
from sklearn.impute import KNNImputer
X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]
imputer = KNNImputer(n_neighbors=2)
print(imputer.fit_transform(X))
# %%
# Tree pruning
# ------------
#
# It is now possible to prune most tree-based estimators once the trees are
# built. The pruning is based on minimal cost-complexity. Read more in the
# :ref:`User Guide <minimal_cost_complexity_pruning>` for details.
X, y = make_classification(random_state=0)
rf = RandomForestClassifier(random_state=0, ccp_alpha=0).fit(X, y)
print(
"Average number of nodes without pruning {:.1f}".format(
np.mean([e.tree_.node_count for e in rf.estimators_])
)
)
rf = RandomForestClassifier(random_state=0, ccp_alpha=0.05).fit(X, y)
print(
"Average number of nodes with pruning {:.1f}".format(
np.mean([e.tree_.node_count for e in rf.estimators_])
)
)
# %%
# Retrieve dataframes from OpenML
# -------------------------------
# :func:`datasets.fetch_openml` can now return pandas dataframe and thus
# properly handle datasets with heterogeneous data:
from sklearn.datasets import fetch_openml
titanic = fetch_openml("titanic", version=1, as_frame=True, parser="pandas")
print(titanic.data.head()[["pclass", "embarked"]])
# %%
# Checking scikit-learn compatibility of an estimator
# ---------------------------------------------------
# Developers can check the compatibility of their scikit-learn compatible
# estimators using :func:`~utils.estimator_checks.check_estimator`. For
# instance, the ``check_estimator(LinearSVC())`` passes.
#
# We now provide a ``pytest`` specific decorator which allows ``pytest``
# to run all checks independently and report the checks that are failing.
#
# ..note::
# This entry was slightly updated in version 0.24, where passing classes
# isn't supported anymore: pass instances instead.
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.utils.estimator_checks import parametrize_with_checks
@parametrize_with_checks([LogisticRegression(), DecisionTreeRegressor()])
def test_sklearn_compatible_estimator(estimator, check):
check(estimator)
# %%
# ROC AUC now supports multiclass classification
# ----------------------------------------------
# The :func:`~sklearn.metrics.roc_auc_score` function can also be used in multi-class
# classification. Two averaging strategies are currently supported: the
# one-vs-one algorithm computes the average of the pairwise ROC AUC scores, and
# the one-vs-rest algorithm computes the average of the ROC AUC scores for each
# class against all other classes. In both cases, the multiclass ROC AUC scores
# are computed from the probability estimates that a sample belongs to a
# particular class according to the model. The OvO and OvR algorithms support
# weighting uniformly (``average='macro'``) and weighting by the prevalence
# (``average='weighted'``).
#
# Read more in the :ref:`User Guide <roc_metrics>`.
from sklearn.datasets import make_classification
from sklearn.metrics import roc_auc_score
from sklearn.svm import SVC
X, y = make_classification(n_classes=4, n_informative=16)
clf = SVC(decision_function_shape="ovo", probability=True).fit(X, y)
print(roc_auc_score(y, clf.predict_proba(X), multi_class="ovo"))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/release_highlights/plot_release_highlights_1_3_0.py | examples/release_highlights/plot_release_highlights_1_3_0.py | # ruff: noqa: CPY001
"""
=======================================
Release Highlights for scikit-learn 1.3
=======================================
.. currentmodule:: sklearn
We are pleased to announce the release of scikit-learn 1.3! Many bug fixes
and improvements were added, as well as some new key features. We detail
below a few of the major features of this release. **For an exhaustive list of
all the changes**, please refer to the :ref:`release notes <release_notes_1_3>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
# %%
# Metadata Routing
# ----------------
# We are in the process of introducing a new way to route metadata such as
# ``sample_weight`` throughout the codebase, which would affect how
# meta-estimators such as :class:`pipeline.Pipeline` and
# :class:`model_selection.GridSearchCV` route metadata. While the
# infrastructure for this feature is already included in this release, the work
# is ongoing and not all meta-estimators support this new feature. You can read
# more about this feature in the :ref:`Metadata Routing User Guide
# <metadata_routing>`. Note that this feature is still under development and
# not implemented for most meta-estimators.
#
# Third party developers can already start incorporating this into their
# meta-estimators. For more details, see
# :ref:`metadata routing developer guide
# <sphx_glr_auto_examples_miscellaneous_plot_metadata_routing.py>`.
# %%
# HDBSCAN: hierarchical density-based clustering
# ----------------------------------------------
# Originally hosted in the scikit-learn-contrib repository, :class:`cluster.HDBSCAN`
# has been adpoted into scikit-learn. It's missing a few features from the original
# implementation which will be added in future releases.
# By performing a modified version of :class:`cluster.DBSCAN` over multiple epsilon
# values simultaneously, :class:`cluster.HDBSCAN` finds clusters of varying densities
# making it more robust to parameter selection than :class:`cluster.DBSCAN`.
# More details in the :ref:`User Guide <hdbscan>`.
import numpy as np
from sklearn.cluster import HDBSCAN
from sklearn.datasets import load_digits
from sklearn.metrics import v_measure_score
X, true_labels = load_digits(return_X_y=True)
print(f"number of digits: {len(np.unique(true_labels))}")
hdbscan = HDBSCAN(min_cluster_size=15, copy=True).fit(X)
non_noisy_labels = hdbscan.labels_[hdbscan.labels_ != -1]
print(f"number of clusters found: {len(np.unique(non_noisy_labels))}")
print(v_measure_score(true_labels[hdbscan.labels_ != -1], non_noisy_labels))
# %%
# TargetEncoder: a new category encoding strategy
# -----------------------------------------------
# Well suited for categorical features with high cardinality,
# :class:`preprocessing.TargetEncoder` encodes the categories based on a shrunk
# estimate of the average target values for observations belonging to that category.
# More details in the :ref:`User Guide <target_encoder>`.
import numpy as np
from sklearn.preprocessing import TargetEncoder
X = np.array([["cat"] * 30 + ["dog"] * 20 + ["snake"] * 38], dtype=object).T
y = [90.3] * 30 + [20.4] * 20 + [21.2] * 38
enc = TargetEncoder(random_state=0)
X_trans = enc.fit_transform(X, y)
enc.encodings_
# %%
# Missing values support in decision trees
# ----------------------------------------
# The classes :class:`tree.DecisionTreeClassifier` and
# :class:`tree.DecisionTreeRegressor` now support missing values. For each potential
# threshold on the non-missing data, the splitter will evaluate the split with all the
# missing values going to the left node or the right node.
# See more details in the :ref:`User Guide <tree_missing_value_support>` or see
# :ref:`sphx_glr_auto_examples_ensemble_plot_hgbt_regression.py` for a usecase
# example of this feature in :class:`~ensemble.HistGradientBoostingRegressor`.
import numpy as np
from sklearn.tree import DecisionTreeClassifier
X = np.array([0, 1, 6, np.nan]).reshape(-1, 1)
y = [0, 0, 1, 1]
tree = DecisionTreeClassifier(random_state=0).fit(X, y)
tree.predict(X)
# %%
# New display :class:`~model_selection.ValidationCurveDisplay`
# ------------------------------------------------------------
# :class:`model_selection.ValidationCurveDisplay` is now available to plot results
# from :func:`model_selection.validation_curve`.
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import ValidationCurveDisplay
X, y = make_classification(1000, 10, random_state=0)
_ = ValidationCurveDisplay.from_estimator(
LogisticRegression(),
X,
y,
param_name="C",
param_range=np.geomspace(1e-5, 1e3, num=9),
score_type="both",
score_name="Accuracy",
)
# %%
# Gamma loss for gradient boosting
# --------------------------------
# The class :class:`ensemble.HistGradientBoostingRegressor` supports the
# Gamma deviance loss function via `loss="gamma"`. This loss function is useful for
# modeling strictly positive targets with a right-skewed distribution.
import numpy as np
from sklearn.datasets import make_low_rank_matrix
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.model_selection import cross_val_score
n_samples, n_features = 500, 10
rng = np.random.RandomState(0)
X = make_low_rank_matrix(n_samples, n_features, random_state=rng)
coef = rng.uniform(low=-10, high=20, size=n_features)
y = rng.gamma(shape=2, scale=np.exp(X @ coef) / 2)
gbdt = HistGradientBoostingRegressor(loss="gamma")
cross_val_score(gbdt, X, y).mean()
# %%
# Grouping infrequent categories in :class:`~preprocessing.OrdinalEncoder`
# ------------------------------------------------------------------------
# Similarly to :class:`preprocessing.OneHotEncoder`, the class
# :class:`preprocessing.OrdinalEncoder` now supports aggregating infrequent categories
# into a single output for each feature. The parameters to enable the gathering of
# infrequent categories are `min_frequency` and `max_categories`.
# See the :ref:`User Guide <encoder_infrequent_categories>` for more details.
import numpy as np
from sklearn.preprocessing import OrdinalEncoder
X = np.array(
[["dog"] * 5 + ["cat"] * 20 + ["rabbit"] * 10 + ["snake"] * 3], dtype=object
).T
enc = OrdinalEncoder(min_frequency=6).fit(X)
enc.infrequent_categories_
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/release_highlights/plot_release_highlights_1_7_0.py | examples/release_highlights/plot_release_highlights_1_7_0.py | # ruff: noqa: CPY001
"""
=======================================
Release Highlights for scikit-learn 1.7
=======================================
.. currentmodule:: sklearn
We are pleased to announce the release of scikit-learn 1.7! Many bug fixes
and improvements were added, as well as some key new features. Below we
detail the highlights of this release. **For an exhaustive list of
all the changes**, please refer to the :ref:`release notes <release_notes_1_7>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
# %%
# Improved estimator's HTML representation
# ----------------------------------------
# The HTML representation of estimators now includes a section containing the list of
# parameters and their values. Non-default parameters are highlighted in orange. A copy
# button is also available to copy the "fully-qualified" parameter name without the
# need to call the `get_params` method. It is particularly useful when defining a
# parameter grid for a grid-search or a randomized-search with a complex pipeline.
#
# See the example below and click on the different estimator's blocks to see the
# improved HTML representation.
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
model = make_pipeline(StandardScaler(with_std=False), LogisticRegression(C=2.0))
model
# %%
# Custom validation set for histogram-based Gradient Boosting estimators
# ----------------------------------------------------------------------
# The :class:`ensemble.HistGradientBoostingClassifier` and
# :class:`ensemble.HistGradientBoostingRegressor` now support directly passing a custom
# validation set for early stopping to the `fit` method, using the `X_val`, `y_val`, and
# `sample_weight_val` parameters.
# In a :class:`pipeline.Pipeline`, the validation set `X_val` can be transformed along
# with `X` using the `transform_input` parameter.
import sklearn
from sklearn.datasets import make_classification
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
sklearn.set_config(enable_metadata_routing=True)
X, y = make_classification(random_state=0)
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=0)
clf = HistGradientBoostingClassifier()
clf.set_fit_request(X_val=True, y_val=True)
model = Pipeline([("sc", StandardScaler()), ("clf", clf)], transform_input=["X_val"])
model.fit(X, y, X_val=X_val, y_val=y_val)
# %%
# Plotting ROC curves from cross-validation results
# -------------------------------------------------
# The class :class:`metrics.RocCurveDisplay` has a new class method `from_cv_results`
# that allows to easily plot multiple ROC curves from the results of
# :func:`model_selection.cross_validate`.
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import RocCurveDisplay
from sklearn.model_selection import cross_validate
X, y = make_classification(n_samples=150, random_state=0)
clf = LogisticRegression(random_state=0)
cv_results = cross_validate(clf, X, y, cv=5, return_estimator=True, return_indices=True)
_ = RocCurveDisplay.from_cv_results(cv_results, X, y)
# %%
# Array API support
# -----------------
# Several functions have been updated to support array API compatible inputs since
# version 1.6, especially metrics from the :mod:`sklearn.metrics` module.
#
# In addition, it is no longer required to install the `array-api-compat` package to use
# the experimental array API support in scikit-learn.
#
# Please refer to the :ref:`array API support<array_api>` page for instructions to use
# scikit-learn with array API compatible libraries such as PyTorch or CuPy.
# %%
# Improved API consistency of Multi-layer Perceptron
# --------------------------------------------------
# The :class:`neural_network.MLPRegressor` has a new parameter `loss` and now supports
# the "poisson" loss in addition to the default "squared_error" loss.
# Moreover, the :class:`neural_network.MLPClassifier` and
# :class:`neural_network.MLPRegressor` estimators now support sample weights.
# These improvements have been made to improve the consistency of these estimators
# with regard to the other estimators in scikit-learn.
# %%
# Migration toward sparse arrays
# ------------------------------
# In order to prepare `SciPy migration from sparse matrices to sparse arrays <https://docs.scipy.org/doc/scipy/reference/sparse.migration_to_sparray.html>`_,
# all scikit-learn estimators that accept sparse matrices as input now also accept
# sparse arrays.
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/release_highlights/plot_release_highlights_1_0_0.py | examples/release_highlights/plot_release_highlights_1_0_0.py | # ruff: noqa: CPY001
"""
=======================================
Release Highlights for scikit-learn 1.0
=======================================
.. currentmodule:: sklearn
We are very pleased to announce the release of scikit-learn 1.0! The library
has been stable for quite some time, releasing version 1.0 is recognizing that
and signalling it to our users. This release does not include any breaking
changes apart from the usual two-release deprecation cycle. For the future, we
do our best to keep this pattern.
This release includes some new key features as well as many improvements and
bug fixes. We detail below a few of the major features of this release. **For
an exhaustive list of all the changes**, please refer to the :ref:`release
notes <release_notes_1_0>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
##############################################################################
# Keyword and positional arguments
# ---------------------------------------------------------
# The scikit-learn API exposes many functions and methods which have many input
# parameters. For example, before this release, one could instantiate a
# :class:`~ensemble.HistGradientBoostingRegressor` as::
#
# HistGradientBoostingRegressor("squared_error", 0.1, 100, 31, None,
# 20, 0.0, 255, None, None, False, "auto", "loss", 0.1, 10, 1e-7,
# 0, None)
#
# Understanding the above code requires the reader to go to the API
# documentation and to check each and every parameter for its position and
# its meaning. To improve the readability of code written based on scikit-learn,
# now users have to provide most parameters with their names, as keyword
# arguments, instead of positional arguments. For example, the above code would
# be::
#
# HistGradientBoostingRegressor(
# loss="squared_error",
# learning_rate=0.1,
# max_iter=100,
# max_leaf_nodes=31,
# max_depth=None,
# min_samples_leaf=20,
# l2_regularization=0.0,
# max_bins=255,
# categorical_features=None,
# monotonic_cst=None,
# warm_start=False,
# early_stopping="auto",
# scoring="loss",
# validation_fraction=0.1,
# n_iter_no_change=10,
# tol=1e-7,
# verbose=0,
# random_state=None,
# )
#
# which is much more readable. Positional arguments have been deprecated since
# version 0.23 and will now raise a ``TypeError``. A limited number of
# positional arguments are still allowed in some cases, for example in
# :class:`~decomposition.PCA`, where ``PCA(10)`` is still allowed, but ``PCA(10,
# False)`` is not allowed.
##############################################################################
# Spline Transformers
# ---------------------------------------------------------
# One way to add nonlinear terms to a dataset's feature set is to generate
# spline basis functions for continuous/numerical features with the new
# :class:`~preprocessing.SplineTransformer`. Splines are piecewise polynomials,
# parametrized by their polynomial degree and the positions of the knots. The
# :class:`~preprocessing.SplineTransformer` implements a B-spline basis.
#
# .. figure:: ../linear_model/images/sphx_glr_plot_polynomial_interpolation_001.png
# :target: ../linear_model/plot_polynomial_interpolation.html
# :align: center
#
# The following code shows splines in action, for more information, please
# refer to the :ref:`User Guide <spline_transformer>`.
import numpy as np
from sklearn.preprocessing import SplineTransformer
X = np.arange(5).reshape(5, 1)
spline = SplineTransformer(degree=2, n_knots=3)
spline.fit_transform(X)
##############################################################################
# Quantile Regressor
# --------------------------------------------------------------------------
# Quantile regression estimates the median or other quantiles of :math:`y`
# conditional on :math:`X`, while ordinary least squares (OLS) estimates the
# conditional mean.
#
# As a linear model, the new :class:`~linear_model.QuantileRegressor` gives
# linear predictions :math:`\hat{y}(w, X) = Xw` for the :math:`q`-th quantile,
# :math:`q \in (0, 1)`. The weights or coefficients :math:`w` are then found by
# the following minimization problem:
#
# .. math::
# \min_{w} {\frac{1}{n_{\text{samples}}}
# \sum_i PB_q(y_i - X_i w) + \alpha ||w||_1}.
#
# This consists of the pinball loss (also known as linear loss),
# see also :class:`~sklearn.metrics.mean_pinball_loss`,
#
# .. math::
# PB_q(t) = q \max(t, 0) + (1 - q) \max(-t, 0) =
# \begin{cases}
# q t, & t > 0, \\
# 0, & t = 0, \\
# (1-q) t, & t < 0
# \end{cases}
#
# and the L1 penalty controlled by parameter ``alpha``, similar to
# :class:`linear_model.Lasso`.
#
# Please check the following example to see how it works, and the :ref:`User
# Guide <quantile_regression>` for more details.
#
# .. figure:: ../linear_model/images/sphx_glr_plot_quantile_regression_002.png
# :target: ../linear_model/plot_quantile_regression.html
# :align: center
# :scale: 50%
##############################################################################
# Feature Names Support
# --------------------------------------------------------------------------
# When an estimator is passed a `pandas' dataframe
# <https://pandas.pydata.org/docs/user_guide/dsintro.html#dataframe>`_ during
# :term:`fit`, the estimator will set a `feature_names_in_` attribute
# containing the feature names. This is a part of
# `SLEP007 <https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep007/proposal.html>`__.
# Note that feature names support is only enabled
# when the column names in the dataframe are all strings. `feature_names_in_`
# is used to check that the column names of the dataframe passed in
# non-:term:`fit`, such as :term:`predict`, are consistent with features in
# :term:`fit`:
import pandas as pd
from sklearn.preprocessing import StandardScaler
X = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "b", "c"])
scalar = StandardScaler().fit(X)
scalar.feature_names_in_
# %%
# The support of :term:`get_feature_names_out` is available for transformers
# that already had `get_feature_names` and transformers with a one-to-one
# correspondence between input and output such as
# :class:`~preprocessing.StandardScaler`. :term:`get_feature_names_out` support
# will be added to all other transformers in future releases. Additionally,
# :meth:`compose.ColumnTransformer.get_feature_names_out` is available to
# combine feature names of its transformers:
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
X = pd.DataFrame({"pet": ["dog", "cat", "fish"], "age": [3, 7, 1]})
preprocessor = ColumnTransformer(
[
("numerical", StandardScaler(), ["age"]),
("categorical", OneHotEncoder(), ["pet"]),
],
verbose_feature_names_out=False,
).fit(X)
preprocessor.get_feature_names_out()
# %%
# When this ``preprocessor`` is used with a pipeline, the feature names used
# by the classifier are obtained by slicing and calling
# :term:`get_feature_names_out`:
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
y = [1, 0, 1]
pipe = make_pipeline(preprocessor, LogisticRegression())
pipe.fit(X, y)
pipe[:-1].get_feature_names_out()
##############################################################################
# A more flexible plotting API
# --------------------------------------------------------------------------
# :class:`metrics.ConfusionMatrixDisplay`,
# :class:`metrics.PrecisionRecallDisplay`, :class:`metrics.DetCurveDisplay`,
# and :class:`inspection.PartialDependenceDisplay` now expose two class
# methods: `from_estimator` and `from_predictions` which allow users to create
# a plot given the predictions or an estimator. This means the corresponding
# `plot_*` functions are deprecated. Please check :ref:`example one
# <sphx_glr_auto_examples_model_selection_plot_confusion_matrix.py>` and
# :ref:`example two
# <sphx_glr_auto_examples_classification_plot_digits_classification.py>` for
# how to use the new plotting functionalities.
##############################################################################
# Online One-Class SVM
# --------------------------------------------------------------------------
# The new class :class:`~linear_model.SGDOneClassSVM` implements an online
# linear version of the One-Class SVM using a stochastic gradient descent.
# Combined with kernel approximation techniques,
# :class:`~linear_model.SGDOneClassSVM` can be used to approximate the solution
# of a kernelized One-Class SVM, implemented in :class:`~svm.OneClassSVM`, with
# a fit time complexity linear in the number of samples. Note that the
# complexity of a kernelized One-Class SVM is at best quadratic in the number
# of samples. :class:`~linear_model.SGDOneClassSVM` is thus well suited for
# datasets with a large number of training samples (> 10,000) for which the SGD
# variant can be several orders of magnitude faster. Please check this
# :ref:`example
# <sphx_glr_auto_examples_miscellaneous_plot_anomaly_comparison.py>` to see how
# it's used, and the :ref:`User Guide <sgd_online_one_class_svm>` for more
# details.
#
# .. figure:: ../miscellaneous/images/sphx_glr_plot_anomaly_comparison_001.png
# :target: ../miscellaneous/plot_anomaly_comparison.html
# :align: center
##############################################################################
# Histogram-based Gradient Boosting Models are now stable
# --------------------------------------------------------------------------
# :class:`~sklearn.ensemble.HistGradientBoostingRegressor` and
# :class:`~ensemble.HistGradientBoostingClassifier` are no longer experimental
# and can simply be imported and used as::
#
# from sklearn.ensemble import HistGradientBoostingClassifier
##############################################################################
# New documentation improvements
# ------------------------------
# This release includes many documentation improvements. Out of over 2100
# merged pull requests, about 800 of them are improvements to our
# documentation.
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/release_highlights/plot_release_highlights_1_2_0.py | examples/release_highlights/plot_release_highlights_1_2_0.py | # ruff: noqa: CPY001, E501
"""
=======================================
Release Highlights for scikit-learn 1.2
=======================================
.. currentmodule:: sklearn
We are pleased to announce the release of scikit-learn 1.2! Many bug fixes
and improvements were added, as well as some new key features. We detail
below a few of the major features of this release. **For an exhaustive list of
all the changes**, please refer to the :ref:`release notes <release_notes_1_2>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
# %%
# Pandas output with `set_output` API
# -----------------------------------
# scikit-learn's transformers now support pandas output with the `set_output` API.
# To learn more about the `set_output` API see the example:
# :ref:`sphx_glr_auto_examples_miscellaneous_plot_set_output.py` and
# # this `video, pandas DataFrame output for scikit-learn transformers
# (some examples) <https://youtu.be/5bCg8VfX2x8>`__.
import numpy as np
from sklearn.compose import ColumnTransformer
from sklearn.datasets import load_iris
from sklearn.preprocessing import KBinsDiscretizer, StandardScaler
X, y = load_iris(as_frame=True, return_X_y=True)
sepal_cols = ["sepal length (cm)", "sepal width (cm)"]
petal_cols = ["petal length (cm)", "petal width (cm)"]
preprocessor = ColumnTransformer(
[
("scaler", StandardScaler(), sepal_cols),
(
"kbin",
KBinsDiscretizer(encode="ordinal", quantile_method="averaged_inverted_cdf"),
petal_cols,
),
],
verbose_feature_names_out=False,
).set_output(transform="pandas")
X_out = preprocessor.fit_transform(X)
X_out.sample(n=5, random_state=0)
# %%
# Interaction constraints in Histogram-based Gradient Boosting Trees
# ------------------------------------------------------------------
# :class:`~ensemble.HistGradientBoostingRegressor` and
# :class:`~ensemble.HistGradientBoostingClassifier` now supports interaction constraints
# with the `interaction_cst` parameter. For details, see the
# :ref:`User Guide <interaction_cst_hgbt>`. In the following example, features are not
# allowed to interact.
from sklearn.datasets import load_diabetes
from sklearn.ensemble import HistGradientBoostingRegressor
X, y = load_diabetes(return_X_y=True, as_frame=True)
hist_no_interact = HistGradientBoostingRegressor(
interaction_cst=[[i] for i in range(X.shape[1])], random_state=0
)
hist_no_interact.fit(X, y)
# %%
# New and enhanced displays
# -------------------------
# :class:`~metrics.PredictionErrorDisplay` provides a way to analyze regression
# models in a qualitative manner.
import matplotlib.pyplot as plt
from sklearn.metrics import PredictionErrorDisplay
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))
_ = PredictionErrorDisplay.from_estimator(
hist_no_interact, X, y, kind="actual_vs_predicted", ax=axs[0]
)
_ = PredictionErrorDisplay.from_estimator(
hist_no_interact, X, y, kind="residual_vs_predicted", ax=axs[1]
)
# %%
# :class:`~model_selection.LearningCurveDisplay` is now available to plot
# results from :func:`~model_selection.learning_curve`.
from sklearn.model_selection import LearningCurveDisplay
_ = LearningCurveDisplay.from_estimator(
hist_no_interact, X, y, cv=5, n_jobs=2, train_sizes=np.linspace(0.1, 1, 5)
)
# %%
# :class:`~inspection.PartialDependenceDisplay` exposes a new parameter
# `categorical_features` to display partial dependence for categorical features
# using bar plots and heatmaps.
from sklearn.datasets import fetch_openml
X, y = fetch_openml(
"titanic", version=1, as_frame=True, return_X_y=True, parser="pandas"
)
X = X.select_dtypes(["number", "category"]).drop(columns=["body"])
# %%
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
categorical_features = ["pclass", "sex", "embarked"]
model = make_pipeline(
ColumnTransformer(
transformers=[("cat", OrdinalEncoder(), categorical_features)],
remainder="passthrough",
),
HistGradientBoostingRegressor(random_state=0),
).fit(X, y)
# %%
from sklearn.inspection import PartialDependenceDisplay
fig, ax = plt.subplots(figsize=(14, 4), constrained_layout=True)
_ = PartialDependenceDisplay.from_estimator(
model,
X,
features=["age", "sex", ("pclass", "sex")],
categorical_features=categorical_features,
ax=ax,
)
# %%
# Faster parser in :func:`~datasets.fetch_openml`
# -----------------------------------------------
# :func:`~datasets.fetch_openml` now supports a new `"pandas"` parser that is
# more memory and CPU efficient. In v1.4, the default will change to
# `parser="auto"` which will automatically use the `"pandas"` parser for dense
# data and `"liac-arff"` for sparse data.
X, y = fetch_openml(
"titanic", version=1, as_frame=True, return_X_y=True, parser="pandas"
)
X.head()
# %%
# Experimental Array API support in :class:`~discriminant_analysis.LinearDiscriminantAnalysis`
# --------------------------------------------------------------------------------------------
# Experimental support for the `Array API <https://data-apis.org/array-api/latest/>`_
# specification was added to :class:`~discriminant_analysis.LinearDiscriminantAnalysis`.
# The estimator can now run on any Array API compliant libraries such as
# `CuPy <https://docs.cupy.dev/en/stable/overview.html>`__, a GPU-accelerated array
# library. For details, see the :ref:`User Guide <array_api>`.
# %%
# Improved efficiency of many estimators
# --------------------------------------
# In version 1.1 the efficiency of many estimators relying on the computation of
# pairwise distances (essentially estimators related to clustering, manifold
# learning and neighbors search algorithms) was greatly improved for float64
# dense input. Efficiency improvement especially were a reduced memory footprint
# and a much better scalability on multi-core machines.
# In version 1.2, the efficiency of these estimators was further improved for all
# combinations of dense and sparse inputs on float32 and float64 datasets, except
# the sparse-dense and dense-sparse combinations for the Euclidean and Squared
# Euclidean Distance metrics.
# A detailed list of the impacted estimators can be found in the
# :ref:`changelog <release_notes_1_2>`.
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/release_highlights/plot_release_highlights_1_6_0.py | examples/release_highlights/plot_release_highlights_1_6_0.py | # ruff: noqa: CPY001, E501
"""
=======================================
Release Highlights for scikit-learn 1.6
=======================================
.. currentmodule:: sklearn
We are pleased to announce the release of scikit-learn 1.6! Many bug fixes
and improvements were added, as well as some key new features. Below we
detail the highlights of this release. **For an exhaustive list of
all the changes**, please refer to the :ref:`release notes <release_notes_1_6>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
# %%
# FrozenEstimator: Freezing an estimator
# --------------------------------------
#
# This meta-estimator allows you to take an estimator and freeze its fit method, meaning
# that calling `fit` does not perform any operations; also, `fit_predict` and
# `fit_transform` call `predict` and `transform` respectively without calling `fit`. The
# original estimator's other methods and properties are left unchanged. An interesting
# use case for this is to use a pre-fitted model as a transformer step in a pipeline
# or to pass a pre-fitted model to some of the meta-estimators. Here's a short example:
import time
from sklearn.datasets import make_classification
from sklearn.frozen import FrozenEstimator
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import FixedThresholdClassifier
X, y = make_classification(n_samples=1000, random_state=0)
start = time.time()
classifier = SGDClassifier().fit(X, y)
print(f"Fitting the classifier took {(time.time() - start) * 1_000:.2f} milliseconds")
start = time.time()
threshold_classifier = FixedThresholdClassifier(
estimator=FrozenEstimator(classifier), threshold=0.9
).fit(X, y)
print(
f"Fitting the threshold classifier took {(time.time() - start) * 1_000:.2f} "
"milliseconds"
)
# %%
# Fitting the threshold classifier skipped fitting the inner `SGDClassifier`. For more
# details refer to the example :ref:`sphx_glr_auto_examples_frozen_plot_frozen_examples.py`.
# %%
# Transforming data other than X in a Pipeline
# --------------------------------------------
#
# The :class:`~pipeline.Pipeline` now supports transforming passed data other than `X`
# if necessary. This can be done by setting the new `transform_input` parameter. This
# is particularly useful when passing a validation set through the pipeline.
#
# As an example, imagine `EstimatorWithValidationSet` is an estimator which accepts
# a validation set. We can now have a pipeline which will transform the validation set
# and pass it to the estimator::
#
# with sklearn.config_context(enable_metadata_routing=True):
# est_gs = GridSearchCV(
# Pipeline(
# (
# StandardScaler(),
# EstimatorWithValidationSet(...).set_fit_request(X_val=True, y_val=True),
# ),
# # telling pipeline to transform these inputs up to the step which is
# # requesting them.
# transform_input=["X_val"],
# ),
# param_grid={"estimatorwithvalidationset__param_to_optimize": list(range(5))},
# cv=5,
# ).fit(X, y, X_val=X_val, y_val=y_val)
#
# In the above code, the key parts are the call to `set_fit_request` to specify that
# `X_val` and `y_val` are required by the `EstimatorWithValidationSet.fit` method, and
# the `transform_input` parameter to tell the pipeline to transform `X_val` before
# passing it to `EstimatorWithValidationSet.fit`.
#
# Note that at this time scikit-learn estimators have not yet been extended to accept
# user specified validation sets. This feature is released early to collect feedback
# from third-party libraries who might benefit from it.
# %%
# Multiclass support for `LogisticRegression(solver="newton-cholesky")`
# ---------------------------------------------------------------------
#
# The `"newton-cholesky"` solver (originally introduced in scikit-learn version
# 1.2) was previously limited to binary
# :class:`~linear_model.LogisticRegression` and some other generalized linear
# regression estimators (namely :class:`~linear_model.PoissonRegressor`,
# :class:`~linear_model.GammaRegressor` and
# :class:`~linear_model.TweedieRegressor`).
#
# This new release includes support for multiclass (multinomial)
# :class:`~linear_model.LogisticRegression`.
#
# This solver is particularly useful when the number of features is small to
# medium. It has been empirically shown to converge more reliably and faster
# than other solvers on some medium sized datasets with one-hot encoded
# categorical features as can be seen in the `benchmark results of the
# pull-request
# <https://github.com/scikit-learn/scikit-learn/pull/28840#issuecomment-2065368727>`_.
# %%
# Missing value support for Extra Trees
# -------------------------------------
#
# The classes :class:`ensemble.ExtraTreesClassifier` and
# :class:`ensemble.ExtraTreesRegressor` now support missing values. More details in the
# :ref:`User Guide <tree_missing_value_support>`.
import numpy as np
from sklearn.ensemble import ExtraTreesClassifier
X = np.array([0, 1, 6, np.nan]).reshape(-1, 1)
y = [0, 0, 1, 1]
forest = ExtraTreesClassifier(random_state=0).fit(X, y)
forest.predict(X)
# %%
# Download any dataset from the web
# ---------------------------------
#
# The function :func:`datasets.fetch_file` allows downloading a file from any given URL.
# This convenience function provides built-in local disk caching, sha256 digest
# integrity check and an automated retry mechanism on network error.
#
# The goal is to provide the same convenience and reliability as dataset fetchers while
# giving the flexibility to work with data from arbitrary online sources and file
# formats.
#
# The downloaded file can then be loaded with generic or domain specific functions such
# as `pandas.read_csv`, `pandas.read_parquet`, etc.
# %%
# Array API support
# -----------------
#
# Many more estimators and functions have been updated to support array API compatible
# inputs since version 1.5, in particular the meta-estimators for hyperparameter tuning
# from the :mod:`sklearn.model_selection` module and the metrics from the
# :mod:`sklearn.metrics` module.
#
# Please refer to the :ref:`array API support<array_api>` page for instructions to use
# scikit-learn with array API compatible libraries such as PyTorch or CuPy.
# %%
# Almost complete Metadata Routing support
# ----------------------------------------
#
# Support for routing metadata has been added to all remaining estimators and
# functions except AdaBoost. See :ref:`Metadata Routing User Guide <metadata_routing>`
# for more details.
# %%
# Free-threaded CPython 3.13 support
# ----------------------------------
#
# scikit-learn has preliminary support for free-threaded CPython, in particular
# free-threaded wheels are available for all of our supported platforms.
#
# Free-threaded (also known as nogil) CPython 3.13 is an experimental version of
# CPython 3.13 which aims at enabling efficient multi-threaded use cases by
# removing the Global Interpreter Lock (GIL).
#
# For more details about free-threaded CPython see `py-free-threading doc <https://py-free-threading.github.io>`_,
# in particular `how to install a free-threaded CPython <https://py-free-threading.github.io/installing_cpython/>`_
# and `Ecosystem compatibility tracking <https://py-free-threading.github.io/tracking/>`_.
#
# Feel free to try free-threaded CPython on your use case and report any issues!
# %%
# Improvements to the developer API for third party libraries
# -----------------------------------------------------------
#
# We have been working on improving the developer API for third party libraries.
# This is still a work in progress, but a fair amount of work has been done in this
# release. This release includes:
#
# - :func:`sklearn.utils.validation.validate_data` is introduced and replaces the
# previously private `BaseEstimator._validate_data` method. This function extends
# :func:`~sklearn.utils.validation.check_array` and adds support for remembering
# input feature counts and names.
# - Estimator tags are now revamped and a part of the public API via
# :class:`sklearn.utils.Tags`. Estimators should now override the
# :meth:`BaseEstimator.__sklearn_tags__` method instead of implementing a `_more_tags`
# method. If you'd like to support multiple scikit-learn versions, you can implement
# both methods in your class.
# - As a consequence of developing a public tag API, we've removed the `_xfail_checks`
# tag and tests which are expected to fail are directly passed to
# :func:`~sklearn.utils.estimator_checks.check_estimator` and
# :func:`~sklearn.utils.estimator_checks.parametrize_with_checks`. See their
# corresponding API docs for more details.
# - Many tests in the common test suite are updated and raise more helpful error
# messages. We've also added some new tests, which should help you more easily fix
# potential issues with your estimators.
#
# An updated version of our :ref:`develop` is also available, which we recommend you
# check out.
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/release_highlights/plot_release_highlights_1_8_0.py | examples/release_highlights/plot_release_highlights_1_8_0.py | # ruff: noqa: CPY001
"""
=======================================
Release Highlights for scikit-learn 1.8
=======================================
.. currentmodule:: sklearn
We are pleased to announce the release of scikit-learn 1.8! Many bug fixes
and improvements were added, as well as some key new features. Below we
detail the highlights of this release. **For an exhaustive list of
all the changes**, please refer to the :ref:`release notes <release_notes_1_8>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
# %%
# Array API support (enables GPU computations)
# --------------------------------------------
# The progressive adoption of the Python array API standard in
# scikit-learn means that PyTorch and CuPy input arrays
# are used directly. This means that in scikit-learn estimators
# and functions non-CPU devices, such as GPUs, can be used
# to perform the computation. As a result performance is improved
# and integration with these libraries is easier.
#
# In scikit-learn 1.8, several estimators and functions have been updated to
# support array API compatible inputs, for example PyTorch tensors and CuPy
# arrays.
#
# Array API support was added to the following estimators:
# :class:`preprocessing.StandardScaler`,
# :class:`preprocessing.PolynomialFeatures`, :class:`linear_model.RidgeCV`,
# :class:`linear_model.RidgeClassifierCV`, :class:`mixture.GaussianMixture` and
# :class:`calibration.CalibratedClassifierCV`.
#
# Array API support was also added to several metrics in :mod:`sklearn.metrics`
# module, see :ref:`array_api_supported` for more details.
#
# Please refer to the :ref:`array API support<array_api>` page for instructions
# to use scikit-learn with array API compatible libraries such as PyTorch or CuPy.
# Note: Array API support is experimental and must be explicitly enabled both
# in SciPy and scikit-learn.
#
# Here is an excerpt of using a feature engineering preprocessor on the CPU,
# followed by :class:`calibration.CalibratedClassifierCV`
# and :class:`linear_model.RidgeCV` together on a GPU with the help of PyTorch:
#
# .. code-block:: python
#
# ridge_pipeline_gpu = make_pipeline(
# # Ensure that all features (including categorical features) are preprocessed
# # on the CPU and mapped to a numerical representation.
# feature_preprocessor,
# # Move the results to the GPU and perform computations there
# FunctionTransformer(
# lambda x: torch.tensor(x.to_numpy().astype(np.float32), device="cuda"))
# ,
# CalibratedClassifierCV(
# RidgeClassifierCV(alphas=alphas), method="temperature"
# ),
# )
# with sklearn.config_context(array_api_dispatch=True):
# cv_results = cross_validate(ridge_pipeline_gpu, features, target)
#
#
# See the `full notebook on Google Colab
# <https://colab.research.google.com/drive/1ztH8gUPv31hSjEeR_8pw20qShTwViGRx?usp=sharing>`_
# for more details. On this particular example, using the Colab GPU vs using a
# single CPU core leads to a 10x speedup which is quite typical for such workloads.
# %%
# Free-threaded CPython 3.14 support
# ----------------------------------
#
# scikit-learn has support for free-threaded CPython, in particular
# free-threaded wheels are available for all of our supported platforms on Python
# 3.14.
#
# We would be very interested by user feedback. Here are a few things you can
# try:
#
# - install free-threaded CPython 3.14, run your favourite
# scikit-learn script and check that nothing breaks unexpectedly.
# Note that CPython 3.14 (rather than 3.13) is strongly advised because a
# number of free-threaded bugs have been fixed since CPython 3.13.
# - if you use some estimators with a `n_jobs` parameter, try changing the
# default backend to threading with `joblib.parallel_config` as in the
# snippet below. This could potentially speed-up your code because the
# default joblib backend is process-based and incurs more overhead than
# threads.
#
# .. code-block:: python
#
# grid_search = GridSearchCV(clf, param_grid=param_grid, n_jobs=4)
# with joblib.parallel_config(backend="threading"):
# grid_search.fit(X, y)
#
# - don't hesitate to report any issue or unexpected performance behaviour by
# opening a `GitHub issue <https://github.com/scikit-learn/scikit-learn/issues/new/choose>`_!
#
# Free-threaded (also known as nogil) CPython is a version of CPython that aims
# to enable efficient multi-threaded use cases by removing the Global
# Interpreter Lock (GIL).
#
# For more details about free-threaded CPython see `py-free-threading doc
# <https://py-free-threading.github.io>`_, in particular `how to install a
# free-threaded CPython <https://py-free-threading.github.io/installing-cpython/>`_
# and `Ecosystem compatibility tracking <https://py-free-threading.github.io/tracking/>`_.
#
# In scikit-learn, one hope with free-threaded Python is to more efficiently
# leverage multi-core CPUs by using thread workers instead of subprocess
# workers for parallel computation when passing `n_jobs>1` in functions or
# estimators. Efficiency gains are expected by removing the need for
# inter-process communication. Be aware that switching the default joblib
# backend and testing that everything works well with free-threaded Python is an
# ongoing long-term effort.
# %%
# Temperature scaling in `CalibratedClassifierCV`
# -----------------------------------------------
# Probability calibration of classifiers with temperature scaling is available in
# :class:`calibration.CalibratedClassifierCV` by setting `method="temperature"`.
# This method is particularly well suited for multiclass problems because it provides
# (better) calibrated probabilities with a single free parameter. This is in
# contrast to all the other available calibrations methods
# which use a "One-vs-Rest" scheme that adds more parameters for each class.
from sklearn.calibration import CalibratedClassifierCV
from sklearn.datasets import make_classification
from sklearn.naive_bayes import GaussianNB
X, y = make_classification(n_classes=3, n_informative=8, random_state=42)
clf = GaussianNB().fit(X, y)
sig = CalibratedClassifierCV(clf, method="sigmoid", ensemble=False).fit(X, y)
ts = CalibratedClassifierCV(clf, method="temperature", ensemble=False).fit(X, y)
# %%
# The following example shows that temperature scaling can produce better calibrated
# probabilities than sigmoid calibration in multi-class classification problem
# with 3 classes.
import matplotlib.pyplot as plt
from sklearn.calibration import CalibrationDisplay
fig, axes = plt.subplots(
figsize=(8, 4.5),
ncols=3,
sharey=True,
)
for i, c in enumerate(ts.classes_):
CalibrationDisplay.from_predictions(
y == c, clf.predict_proba(X)[:, i], name="Uncalibrated", ax=axes[i], marker="s"
)
CalibrationDisplay.from_predictions(
y == c,
ts.predict_proba(X)[:, i],
name="Temperature scaling",
ax=axes[i],
marker="o",
)
CalibrationDisplay.from_predictions(
y == c, sig.predict_proba(X)[:, i], name="Sigmoid", ax=axes[i], marker="v"
)
axes[i].set_title(f"Class {c}")
axes[i].set_xlabel(None)
axes[i].set_ylabel(None)
axes[i].get_legend().remove()
fig.suptitle("Reliability Diagrams per Class")
fig.supxlabel("Mean Predicted Probability")
fig.supylabel("Fraction of Class")
fig.legend(*axes[0].get_legend_handles_labels(), loc=(0.72, 0.5))
plt.subplots_adjust(right=0.7)
_ = fig.show()
# %%
# Efficiency improvements in linear models
# ----------------------------------------
# The fit time has been massively reduced for squared error based estimators
# with L1 penalty: `ElasticNet`, `Lasso`, `MultiTaskElasticNet`,
# `MultiTaskLasso` and their CV variants. The fit time improvement is mainly
# achieved by **gap safe screening rules**. They enable the coordinate descent
# solver to set feature coefficients to zero early on and not look at them
# again. The stronger the L1 penalty the earlier features can be excluded from
# further updates.
from time import time
from sklearn.datasets import make_regression
from sklearn.linear_model import ElasticNetCV
X, y = make_regression(n_features=10_000, random_state=0)
model = ElasticNetCV()
tic = time()
model.fit(X, y)
toc = time()
print(f"Fitting ElasticNetCV took {toc - tic:.3} seconds.")
# %%
# HTML representation of estimators
# ---------------------------------
# Hyperparameters in the dropdown table of the HTML representation now include
# links to the online documentation. Docstring descriptions are also shown as
# tooltips on hover.
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
clf = make_pipeline(StandardScaler(), LogisticRegression(random_state=0, C=10))
# %%
# Expand the estimator diagram below by clicking on "LogisticRegression" and then on
# "Parameters".
clf
# %%
# DecisionTreeRegressor with `criterion="absolute_error"`
# -------------------------------------------------------
# :class:`tree.DecisionTreeRegressor` with `criterion="absolute_error"`
# now runs much faster. It has now `O(n * log(n))` complexity compared to
# `O(n**2)` previously, which allows to scale to millions of data points.
#
# As an illustration, on a dataset with 100_000 samples and 1 feature, doing a
# single split takes of the order of 100 ms, compared to ~20 seconds before.
import time
from sklearn.datasets import make_regression
from sklearn.tree import DecisionTreeRegressor
X, y = make_regression(n_samples=100_000, n_features=1)
tree = DecisionTreeRegressor(criterion="absolute_error", max_depth=1)
tic = time.time()
tree.fit(X, y)
elapsed = time.time() - tic
print(f"Fit took {elapsed:.2f} seconds")
# %%
# ClassicalMDS
# ------------
# Classical MDS, also known as "Principal Coordinates Analysis" (PCoA)
# or "Torgerson's scaling" is now available within the `sklearn.manifold`
# module. Classical MDS is close to PCA and instead of approximating
# distances, it approximates pairwise scalar products, which has an exact
# analytic solution in terms of eigendecomposition.
#
# Let's illustrate this new addition by using it on an S-curve dataset to
# get a low-dimensional representation of the data.
import matplotlib.pyplot as plt
from matplotlib import ticker
from sklearn import datasets, manifold
n_samples = 1500
S_points, S_color = datasets.make_s_curve(n_samples, random_state=0)
md_classical = manifold.ClassicalMDS(n_components=2)
S_scaling = md_classical.fit_transform(S_points)
fig = plt.figure(figsize=(8, 4))
ax1 = fig.add_subplot(1, 2, 1, projection="3d")
x, y, z = S_points.T
ax1.scatter(x, y, z, c=S_color, s=50, alpha=0.8)
ax1.set_title("Original S-curve samples", size=16)
ax1.view_init(azim=-60, elev=9)
for axis in (ax1.xaxis, ax1.yaxis, ax1.zaxis):
axis.set_major_locator(ticker.MultipleLocator(1))
ax2 = fig.add_subplot(1, 2, 2)
x2, y2 = S_scaling.T
ax2.scatter(x2, y2, c=S_color, s=50, alpha=0.8)
ax2.set_title("Classical MDS", size=16)
for axis in (ax2.xaxis, ax2.yaxis):
axis.set_major_formatter(ticker.NullFormatter())
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/release_highlights/plot_release_highlights_1_4_0.py | examples/release_highlights/plot_release_highlights_1_4_0.py | # ruff: noqa: CPY001
"""
=======================================
Release Highlights for scikit-learn 1.4
=======================================
.. currentmodule:: sklearn
We are pleased to announce the release of scikit-learn 1.4! Many bug fixes
and improvements were added, as well as some new key features. We detail
below a few of the major features of this release. **For an exhaustive list of
all the changes**, please refer to the :ref:`release notes <release_notes_1_4>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
# %%
# HistGradientBoosting Natively Supports Categorical DTypes in DataFrames
# -----------------------------------------------------------------------
# :class:`ensemble.HistGradientBoostingClassifier` and
# :class:`ensemble.HistGradientBoostingRegressor` now directly supports dataframes with
# categorical features. Here we have a dataset with a mixture of
# categorical and numerical features:
from sklearn.datasets import fetch_openml
X_adult, y_adult = fetch_openml("adult", version=2, return_X_y=True)
# Remove redundant and non-feature columns
X_adult = X_adult.drop(["education-num", "fnlwgt"], axis="columns")
X_adult.dtypes
# %%
# By setting `categorical_features="from_dtype"`, the gradient boosting classifier
# treats the columns with categorical dtypes as categorical features in the
# algorithm:
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_adult, y_adult, random_state=0)
hist = HistGradientBoostingClassifier(categorical_features="from_dtype")
hist.fit(X_train, y_train)
y_decision = hist.decision_function(X_test)
print(f"ROC AUC score is {roc_auc_score(y_test, y_decision)}")
# %%
# Polars output in `set_output`
# -----------------------------
# scikit-learn's transformers now support polars output with the `set_output` API.
import polars as pl
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
df = pl.DataFrame(
{"height": [120, 140, 150, 110, 100], "pet": ["dog", "cat", "dog", "cat", "cat"]}
)
preprocessor = ColumnTransformer(
[
("numerical", StandardScaler(), ["height"]),
("categorical", OneHotEncoder(sparse_output=False), ["pet"]),
],
verbose_feature_names_out=False,
)
preprocessor.set_output(transform="polars")
df_out = preprocessor.fit_transform(df)
df_out
# %%
print(f"Output type: {type(df_out)}")
# %%
# Missing value support for Random Forest
# ---------------------------------------
# The classes :class:`ensemble.RandomForestClassifier` and
# :class:`ensemble.RandomForestRegressor` now support missing values. When training
# every individual tree, the splitter evaluates each potential threshold with the
# missing values going to the left and right nodes. More details in the
# :ref:`User Guide <tree_missing_value_support>`.
import numpy as np
from sklearn.ensemble import RandomForestClassifier
X = np.array([0, 1, 6, np.nan]).reshape(-1, 1)
y = [0, 0, 1, 1]
forest = RandomForestClassifier(random_state=0).fit(X, y)
forest.predict(X)
# %%
# Add support for monotonic constraints in tree-based models
# ----------------------------------------------------------
# While we added support for monotonic constraints in histogram-based gradient boosting
# in scikit-learn 0.23, we now support this feature for all other tree-based models as
# trees, random forests, extra-trees, and exact gradient boosting. Here, we show this
# feature for random forest on a regression problem.
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.inspection import PartialDependenceDisplay
n_samples = 500
rng = np.random.RandomState(0)
X = rng.randn(n_samples, 2)
noise = rng.normal(loc=0.0, scale=0.01, size=n_samples)
y = 5 * X[:, 0] + np.sin(10 * np.pi * X[:, 0]) - noise
rf_no_cst = RandomForestRegressor().fit(X, y)
rf_cst = RandomForestRegressor(monotonic_cst=[1, 0]).fit(X, y)
disp = PartialDependenceDisplay.from_estimator(
rf_no_cst,
X,
features=[0],
feature_names=["feature 0"],
line_kw={"linewidth": 4, "label": "unconstrained", "color": "tab:blue"},
)
PartialDependenceDisplay.from_estimator(
rf_cst,
X,
features=[0],
line_kw={"linewidth": 4, "label": "constrained", "color": "tab:orange"},
ax=disp.axes_,
)
disp.axes_[0, 0].plot(
X[:, 0], y, "o", alpha=0.5, zorder=-1, label="samples", color="tab:green"
)
disp.axes_[0, 0].set_ylim(-3, 3)
disp.axes_[0, 0].set_xlim(-1, 1)
disp.axes_[0, 0].legend()
plt.show()
# %%
# Enriched estimator displays
# ---------------------------
# Estimators displays have been enriched: if we look at `forest`, defined above:
forest
# %%
# One can access the documentation of the estimator by clicking on the icon "?" on
# the top right corner of the diagram.
#
# In addition, the display changes color, from orange to blue, when the estimator is
# fitted. You can also get this information by hovering on the icon "i".
from sklearn.base import clone
clone(forest) # the clone is not fitted
# %%
# Metadata Routing Support
# ------------------------
# Many meta-estimators and cross-validation routines now support metadata
# routing, which are listed in the :ref:`user guide
# <metadata_routing_models>`. For instance, this is how you can do a nested
# cross-validation with sample weights and :class:`~model_selection.GroupKFold`:
import sklearn
from sklearn.datasets import make_regression
from sklearn.linear_model import Lasso
from sklearn.metrics import get_scorer
from sklearn.model_selection import GridSearchCV, GroupKFold, cross_validate
# For now by default metadata routing is disabled, and need to be explicitly
# enabled.
sklearn.set_config(enable_metadata_routing=True)
n_samples = 100
X, y = make_regression(n_samples=n_samples, n_features=5, noise=0.5)
rng = np.random.RandomState(7)
groups = rng.randint(0, 10, size=n_samples)
sample_weights = rng.rand(n_samples)
estimator = Lasso().set_fit_request(sample_weight=True)
hyperparameter_grid = {"alpha": [0.1, 0.5, 1.0, 2.0]}
scoring_inner_cv = get_scorer("neg_mean_squared_error").set_score_request(
sample_weight=True
)
inner_cv = GroupKFold(n_splits=5)
grid_search = GridSearchCV(
estimator=estimator,
param_grid=hyperparameter_grid,
cv=inner_cv,
scoring=scoring_inner_cv,
)
outer_cv = GroupKFold(n_splits=5)
scorers = {
"mse": get_scorer("neg_mean_squared_error").set_score_request(sample_weight=True)
}
results = cross_validate(
grid_search,
X,
y,
cv=outer_cv,
scoring=scorers,
return_estimator=True,
params={"sample_weight": sample_weights, "groups": groups},
)
print("cv error on test sets:", results["test_mse"])
# Setting the flag to the default `False` to avoid interference with other
# scripts.
sklearn.set_config(enable_metadata_routing=False)
# %%
# Improved memory and runtime efficiency for PCA on sparse data
# -------------------------------------------------------------
# PCA is now able to handle sparse matrices natively for the `arpack`
# solver by levaraging `scipy.sparse.linalg.LinearOperator` to avoid
# materializing large sparse matrices when performing the
# eigenvalue decomposition of the data set covariance matrix.
#
from time import time
import scipy.sparse as sp
from sklearn.decomposition import PCA
X_sparse = sp.random(m=1000, n=1000, random_state=0)
X_dense = X_sparse.toarray()
t0 = time()
PCA(n_components=10, svd_solver="arpack").fit(X_sparse)
time_sparse = time() - t0
t0 = time()
PCA(n_components=10, svd_solver="arpack").fit(X_dense)
time_dense = time() - t0
print(f"Speedup: {time_dense / time_sparse:.1f}x")
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/release_highlights/plot_release_highlights_1_1_0.py | examples/release_highlights/plot_release_highlights_1_1_0.py | # ruff: noqa: CPY001
"""
=======================================
Release Highlights for scikit-learn 1.1
=======================================
.. currentmodule:: sklearn
We are pleased to announce the release of scikit-learn 1.1! Many bug fixes
and improvements were added, as well as some new key features. We detail
below a few of the major features of this release. **For an exhaustive list of
all the changes**, please refer to the :ref:`release notes <release_notes_1_1>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
# %%
# .. _quantile_support_hgbdt:
#
# Quantile loss in :class:`~ensemble.HistGradientBoostingRegressor`
# -----------------------------------------------------------------
# :class:`~ensemble.HistGradientBoostingRegressor` can model quantiles with
# `loss="quantile"` and the new parameter `quantile`.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.ensemble import HistGradientBoostingRegressor
# Simple regression function for X * cos(X)
rng = np.random.RandomState(42)
X_1d = np.linspace(0, 10, num=2000)
X = X_1d.reshape(-1, 1)
y = X_1d * np.cos(X_1d) + rng.normal(scale=X_1d / 3)
quantiles = [0.95, 0.5, 0.05]
parameters = dict(loss="quantile", max_bins=32, max_iter=50)
hist_quantiles = {
f"quantile={quantile:.2f}": HistGradientBoostingRegressor(
**parameters, quantile=quantile
).fit(X, y)
for quantile in quantiles
}
fig, ax = plt.subplots()
ax.plot(X_1d, y, "o", alpha=0.5, markersize=1)
for quantile, hist in hist_quantiles.items():
ax.plot(X_1d, hist.predict(X), label=quantile)
_ = ax.legend(loc="lower left")
# %%
# For a usecase example, see
# :ref:`sphx_glr_auto_examples_ensemble_plot_hgbt_regression.py`
# %%
# `get_feature_names_out` Available in all Transformers
# -----------------------------------------------------
# :term:`get_feature_names_out` is now available in all transformers, thereby
# concluding the implementation of
# `SLEP007 <https://scikit-learn-enhancement-proposals.readthedocs.io/en/latest/slep007/proposal.html>`__.
# This enables :class:`~pipeline.Pipeline` to construct the output feature names for
# more complex pipelines:
from sklearn.compose import ColumnTransformer
from sklearn.datasets import fetch_openml
from sklearn.feature_selection import SelectKBest
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
X, y = fetch_openml(
"titanic", version=1, as_frame=True, return_X_y=True, parser="pandas"
)
numeric_features = ["age", "fare"]
numeric_transformer = make_pipeline(SimpleImputer(strategy="median"), StandardScaler())
categorical_features = ["embarked", "pclass"]
preprocessor = ColumnTransformer(
[
("num", numeric_transformer, numeric_features),
(
"cat",
OneHotEncoder(handle_unknown="ignore", sparse_output=False),
categorical_features,
),
],
verbose_feature_names_out=False,
)
log_reg = make_pipeline(preprocessor, SelectKBest(k=7), LogisticRegression())
log_reg.fit(X, y)
# %%
# Here we slice the pipeline to include all the steps but the last one. The output
# feature names of this pipeline slice are the features put into logistic
# regression. These names correspond directly to the coefficients in the logistic
# regression:
import pandas as pd
log_reg_input_features = log_reg[:-1].get_feature_names_out()
pd.Series(log_reg[-1].coef_.ravel(), index=log_reg_input_features).plot.bar()
plt.tight_layout()
# %%
# Grouping infrequent categories in :class:`~preprocessing.OneHotEncoder`
# -----------------------------------------------------------------------
# :class:`~preprocessing.OneHotEncoder` supports aggregating infrequent
# categories into a single output for each feature. The parameters to enable
# the gathering of infrequent categories are `min_frequency` and
# `max_categories`. See the :ref:`User Guide <encoder_infrequent_categories>`
# for more details.
import numpy as np
from sklearn.preprocessing import OneHotEncoder
X = np.array(
[["dog"] * 5 + ["cat"] * 20 + ["rabbit"] * 10 + ["snake"] * 3], dtype=object
).T
enc = OneHotEncoder(min_frequency=6, sparse_output=False).fit(X)
enc.infrequent_categories_
# %%
# Since dog and snake are infrequent categories, they are grouped together when
# transformed:
encoded = enc.transform(np.array([["dog"], ["snake"], ["cat"], ["rabbit"]]))
pd.DataFrame(encoded, columns=enc.get_feature_names_out())
# %%
# Performance improvements
# ------------------------
# Reductions on pairwise distances for dense float64 datasets has been refactored
# to better take advantage of non-blocking thread parallelism. For example,
# :meth:`neighbors.NearestNeighbors.kneighbors` and
# :meth:`neighbors.NearestNeighbors.radius_neighbors` can respectively be up to ×20 and
# ×5 faster than previously. In summary, the following functions and estimators
# now benefit from improved performance:
#
# - :func:`metrics.pairwise_distances_argmin`
# - :func:`metrics.pairwise_distances_argmin_min`
# - :class:`cluster.AffinityPropagation`
# - :class:`cluster.Birch`
# - :class:`cluster.MeanShift`
# - :class:`cluster.OPTICS`
# - :class:`cluster.SpectralClustering`
# - :func:`feature_selection.mutual_info_regression`
# - :class:`neighbors.KNeighborsClassifier`
# - :class:`neighbors.KNeighborsRegressor`
# - :class:`neighbors.RadiusNeighborsClassifier`
# - :class:`neighbors.RadiusNeighborsRegressor`
# - :class:`neighbors.LocalOutlierFactor`
# - :class:`neighbors.NearestNeighbors`
# - :class:`manifold.Isomap`
# - :class:`manifold.LocallyLinearEmbedding`
# - :class:`manifold.TSNE`
# - :func:`manifold.trustworthiness`
# - :class:`semi_supervised.LabelPropagation`
# - :class:`semi_supervised.LabelSpreading`
#
# To know more about the technical details of this work, you can read
# `this suite of blog posts <https://blog.scikit-learn.org/technical/performances/>`_.
#
# Moreover, the computation of loss functions has been refactored using
# Cython resulting in performance improvements for the following estimators:
#
# - :class:`linear_model.LogisticRegression`
# - :class:`linear_model.GammaRegressor`
# - :class:`linear_model.PoissonRegressor`
# - :class:`linear_model.TweedieRegressor`
# %%
# :class:`~decomposition.MiniBatchNMF`: an online version of NMF
# --------------------------------------------------------------
# The new class :class:`~decomposition.MiniBatchNMF` implements a faster but
# less accurate version of non-negative matrix factorization
# (:class:`~decomposition.NMF`). :class:`~decomposition.MiniBatchNMF` divides the
# data into mini-batches and optimizes the NMF model in an online manner by
# cycling over the mini-batches, making it better suited for large datasets. In
# particular, it implements `partial_fit`, which can be used for online
# learning when the data is not readily available from the start, or when the
# data does not fit into memory.
import numpy as np
from sklearn.decomposition import MiniBatchNMF
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 10, 10, 5
true_W = rng.uniform(size=(n_samples, n_components))
true_H = rng.uniform(size=(n_components, n_features))
X = true_W @ true_H
nmf = MiniBatchNMF(n_components=n_components, random_state=0)
for _ in range(10):
nmf.partial_fit(X)
W = nmf.transform(X)
H = nmf.components_
X_reconstructed = W @ H
print(
"relative reconstruction error: ",
f"{np.sum((X - X_reconstructed) ** 2) / np.sum(X**2):.5f}",
)
# %%
# :class:`~cluster.BisectingKMeans`: divide and cluster
# -----------------------------------------------------
# The new class :class:`~cluster.BisectingKMeans` is a variant of
# :class:`~cluster.KMeans`, using divisive hierarchical clustering. Instead of
# creating all centroids at once, centroids are picked progressively based on a
# previous clustering: a cluster is split into two new clusters repeatedly
# until the target number of clusters is reached, giving a hierarchical
# structure to the clustering.
import matplotlib.pyplot as plt
from sklearn.cluster import BisectingKMeans, KMeans
from sklearn.datasets import make_blobs
X, _ = make_blobs(n_samples=1000, centers=2, random_state=0)
km = KMeans(n_clusters=5, random_state=0, n_init="auto").fit(X)
bisect_km = BisectingKMeans(n_clusters=5, random_state=0).fit(X)
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].scatter(X[:, 0], X[:, 1], s=10, c=km.labels_)
ax[0].scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1], s=20, c="r")
ax[0].set_title("KMeans")
ax[1].scatter(X[:, 0], X[:, 1], s=10, c=bisect_km.labels_)
ax[1].scatter(
bisect_km.cluster_centers_[:, 0], bisect_km.cluster_centers_[:, 1], s=20, c="r"
)
_ = ax[1].set_title("BisectingKMeans")
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/release_highlights/plot_release_highlights_0_23_0.py | examples/release_highlights/plot_release_highlights_0_23_0.py | # ruff: noqa: CPY001
"""
========================================
Release Highlights for scikit-learn 0.23
========================================
.. currentmodule:: sklearn
We are pleased to announce the release of scikit-learn 0.23! Many bug fixes
and improvements were added, as well as some new key features. We detail
below a few of the major features of this release. **For an exhaustive list of
all the changes**, please refer to the :ref:`release notes <release_notes_0_23>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
##############################################################################
# Generalized Linear Models, and Poisson loss for gradient boosting
# -----------------------------------------------------------------
# Long-awaited Generalized Linear Models with non-normal loss functions are now
# available. In particular, three new regressors were implemented:
# :class:`~sklearn.linear_model.PoissonRegressor`,
# :class:`~sklearn.linear_model.GammaRegressor`, and
# :class:`~sklearn.linear_model.TweedieRegressor`. The Poisson regressor can be
# used to model positive integer counts, or relative frequencies. Read more in
# the :ref:`User Guide <Generalized_linear_regression>`. Additionally,
# :class:`~sklearn.ensemble.HistGradientBoostingRegressor` supports a new
# 'poisson' loss as well.
import numpy as np
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.linear_model import PoissonRegressor
from sklearn.model_selection import train_test_split
n_samples, n_features = 1000, 20
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
# positive integer target correlated with X[:, 5] with many zeros:
y = rng.poisson(lam=np.exp(X[:, 5]) / 2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
glm = PoissonRegressor()
gbdt = HistGradientBoostingRegressor(loss="poisson", learning_rate=0.01)
glm.fit(X_train, y_train)
gbdt.fit(X_train, y_train)
print(glm.score(X_test, y_test))
print(gbdt.score(X_test, y_test))
##############################################################################
# Rich visual representation of estimators
# -----------------------------------------
# Estimators can now be visualized in notebooks by enabling the
# `display='diagram'` option. This is particularly useful to summarise the
# structure of pipelines and other composite estimators, with interactivity to
# provide detail. Click on the example image below to expand Pipeline
# elements. See :ref:`visualizing_composite_estimators` for how you can use
# this feature.
from sklearn import set_config
from sklearn.compose import make_column_transformer
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
set_config(display="diagram")
num_proc = make_pipeline(SimpleImputer(strategy="median"), StandardScaler())
cat_proc = make_pipeline(
SimpleImputer(strategy="constant", fill_value="missing"),
OneHotEncoder(handle_unknown="ignore"),
)
preprocessor = make_column_transformer(
(num_proc, ("feat1", "feat3")), (cat_proc, ("feat0", "feat2"))
)
clf = make_pipeline(preprocessor, LogisticRegression())
clf
##############################################################################
# Scalability and stability improvements to KMeans
# ------------------------------------------------
# The :class:`~sklearn.cluster.KMeans` estimator was entirely re-worked, and it
# is now significantly faster and more stable. In addition, the Elkan algorithm
# is now compatible with sparse matrices. The estimator uses OpenMP based
# parallelism instead of relying on joblib, so the `n_jobs` parameter has no
# effect anymore. For more details on how to control the number of threads,
# please refer to our :ref:`parallelism` notes.
import numpy as np
import scipy
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
from sklearn.metrics import completeness_score
from sklearn.model_selection import train_test_split
rng = np.random.RandomState(0)
X, y = make_blobs(random_state=rng)
X = scipy.sparse.csr_matrix(X)
X_train, X_test, _, y_test = train_test_split(X, y, random_state=rng)
kmeans = KMeans(n_init="auto").fit(X_train)
print(completeness_score(kmeans.predict(X_test), y_test))
##############################################################################
# Improvements to the histogram-based Gradient Boosting estimators
# ----------------------------------------------------------------
# Various improvements were made to
# :class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
# :class:`~sklearn.ensemble.HistGradientBoostingRegressor`. On top of the
# Poisson loss mentioned above, these estimators now support :ref:`sample
# weights <sw_hgbdt>`. Also, an automatic early-stopping criterion was added:
# early-stopping is enabled by default when the number of samples exceeds 10k.
# Finally, users can now define :ref:`monotonic constraints
# <monotonic_cst_gbdt>` to constrain the predictions based on the variations of
# specific features. In the following example, we construct a target that is
# generally positively correlated with the first feature, with some noise.
# Applying monotoinc constraints allows the prediction to capture the global
# effect of the first feature, instead of fitting the noise. For a usecase
# example, see :ref:`sphx_glr_auto_examples_ensemble_plot_hgbt_regression.py`.
import numpy as np
from matplotlib import pyplot as plt
from sklearn.ensemble import HistGradientBoostingRegressor
# from sklearn.inspection import plot_partial_dependence
from sklearn.inspection import PartialDependenceDisplay
from sklearn.model_selection import train_test_split
n_samples = 500
rng = np.random.RandomState(0)
X = rng.randn(n_samples, 2)
noise = rng.normal(loc=0.0, scale=0.01, size=n_samples)
y = 5 * X[:, 0] + np.sin(10 * np.pi * X[:, 0]) - noise
gbdt_no_cst = HistGradientBoostingRegressor().fit(X, y)
gbdt_cst = HistGradientBoostingRegressor(monotonic_cst=[1, 0]).fit(X, y)
# plot_partial_dependence has been removed in version 1.2. From 1.2, use
# PartialDependenceDisplay instead.
# disp = plot_partial_dependence(
disp = PartialDependenceDisplay.from_estimator(
gbdt_no_cst,
X,
features=[0],
feature_names=["feature 0"],
line_kw={"linewidth": 4, "label": "unconstrained", "color": "tab:blue"},
)
# plot_partial_dependence(
PartialDependenceDisplay.from_estimator(
gbdt_cst,
X,
features=[0],
line_kw={"linewidth": 4, "label": "constrained", "color": "tab:orange"},
ax=disp.axes_,
)
disp.axes_[0, 0].plot(
X[:, 0], y, "o", alpha=0.5, zorder=-1, label="samples", color="tab:green"
)
disp.axes_[0, 0].set_ylim(-3, 3)
disp.axes_[0, 0].set_xlim(-1, 1)
plt.legend()
plt.show()
##############################################################################
# Sample-weight support for Lasso and ElasticNet
# ----------------------------------------------
# The two linear regressors :class:`~sklearn.linear_model.Lasso` and
# :class:`~sklearn.linear_model.ElasticNet` now support sample weights.
import numpy as np
from sklearn.datasets import make_regression
from sklearn.linear_model import Lasso
from sklearn.model_selection import train_test_split
n_samples, n_features = 1000, 20
rng = np.random.RandomState(0)
X, y = make_regression(n_samples, n_features, random_state=rng)
sample_weight = rng.rand(n_samples)
X_train, X_test, y_train, y_test, sw_train, sw_test = train_test_split(
X, y, sample_weight, random_state=rng
)
reg = Lasso()
reg.fit(X_train, y_train, sample_weight=sw_train)
print(reg.score(X_test, y_test, sw_test))
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/release_highlights/plot_release_highlights_0_24_0.py | examples/release_highlights/plot_release_highlights_0_24_0.py | # ruff: noqa: CPY001, E501
"""
========================================
Release Highlights for scikit-learn 0.24
========================================
.. currentmodule:: sklearn
We are pleased to announce the release of scikit-learn 0.24! Many bug fixes
and improvements were added, as well as some new key features. We detail
below a few of the major features of this release. **For an exhaustive list of
all the changes**, please refer to the :ref:`release notes <release_notes_0_24>`.
To install the latest version (with pip)::
pip install --upgrade scikit-learn
or with conda::
conda install -c conda-forge scikit-learn
"""
##############################################################################
# Successive Halving estimators for tuning hyper-parameters
# ---------------------------------------------------------
# Successive Halving, a state of the art method, is now available to
# explore the space of the parameters and identify their best combination.
# :class:`~sklearn.model_selection.HalvingGridSearchCV` and
# :class:`~sklearn.model_selection.HalvingRandomSearchCV` can be
# used as drop-in replacement for
# :class:`~sklearn.model_selection.GridSearchCV` and
# :class:`~sklearn.model_selection.RandomizedSearchCV`.
# Successive Halving is an iterative selection process illustrated in the
# figure below. The first iteration is run with a small amount of resources,
# where the resource typically corresponds to the number of training samples,
# but can also be an arbitrary integer parameter such as `n_estimators` in a
# random forest. Only a subset of the parameter candidates are selected for the
# next iteration, which will be run with an increasing amount of allocated
# resources. Only a subset of candidates will last until the end of the
# iteration process, and the best parameter candidate is the one that has the
# highest score on the last iteration.
#
# Read more in the :ref:`User Guide <successive_halving_user_guide>` (note:
# the Successive Halving estimators are still :term:`experimental
# <experimental>`).
#
# .. figure:: ../model_selection/images/sphx_glr_plot_successive_halving_iterations_001.png
# :target: ../model_selection/plot_successive_halving_iterations.html
# :align: center
import numpy as np
from scipy.stats import randint
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier
from sklearn.experimental import enable_halving_search_cv # noqa: F401
from sklearn.model_selection import HalvingRandomSearchCV
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=700, random_state=rng)
clf = RandomForestClassifier(n_estimators=10, random_state=rng)
param_dist = {
"max_depth": [3, None],
"max_features": randint(1, 11),
"min_samples_split": randint(2, 11),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"],
}
rsh = HalvingRandomSearchCV(
estimator=clf, param_distributions=param_dist, factor=2, random_state=rng
)
rsh.fit(X, y)
rsh.best_params_
##############################################################################
# Native support for categorical features in HistGradientBoosting estimators
# --------------------------------------------------------------------------
# :class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
# :class:`~sklearn.ensemble.HistGradientBoostingRegressor` now have native
# support for categorical features: they can consider splits on non-ordered,
# categorical data. Read more in the :ref:`User Guide
# <categorical_support_gbdt>`.
#
# .. figure:: ../ensemble/images/sphx_glr_plot_gradient_boosting_categorical_001.png
# :target: ../ensemble/plot_gradient_boosting_categorical.html
# :align: center
#
# The plot shows that the new native support for categorical features leads to
# fitting times that are comparable to models where the categories are treated
# as ordered quantities, i.e. simply ordinal-encoded. Native support is also
# more expressive than both one-hot encoding and ordinal encoding. However, to
# use the new `categorical_features` parameter, it is still required to
# preprocess the data within a pipeline as demonstrated in this :ref:`example
# <sphx_glr_auto_examples_ensemble_plot_gradient_boosting_categorical.py>`.
##############################################################################
# Improved performances of HistGradientBoosting estimators
# --------------------------------------------------------
# The memory footprint of :class:`ensemble.HistGradientBoostingRegressor` and
# :class:`ensemble.HistGradientBoostingClassifier` has been significantly
# improved during calls to `fit`. In addition, histogram initialization is now
# done in parallel which results in slight speed improvements.
# See more in the `Benchmark page
# <https://scikit-learn.org/scikit-learn-benchmarks/>`_.
##############################################################################
# New self-training meta-estimator
# --------------------------------
# A new self-training implementation, based on `Yarowski's algorithm
# <https://doi.org/10.3115/981658.981684>`_ can now be used with any
# classifier that implements :term:`predict_proba`. The sub-classifier
# will behave as a
# semi-supervised classifier, allowing it to learn from unlabeled data.
# Read more in the :ref:`User guide <self_training>`.
import numpy as np
from sklearn import datasets
from sklearn.semi_supervised import SelfTrainingClassifier
from sklearn.svm import SVC
rng = np.random.RandomState(42)
iris = datasets.load_iris()
random_unlabeled_points = rng.rand(iris.target.shape[0]) < 0.3
iris.target[random_unlabeled_points] = -1
svc = SVC(probability=True, gamma="auto")
self_training_model = SelfTrainingClassifier(svc)
self_training_model.fit(iris.data, iris.target)
##############################################################################
# New SequentialFeatureSelector transformer
# -----------------------------------------
# A new iterative transformer to select features is available:
# :class:`~sklearn.feature_selection.SequentialFeatureSelector`.
# Sequential Feature Selection can add features one at a time (forward
# selection) or remove features from the list of the available features
# (backward selection), based on a cross-validated score maximization.
# See the :ref:`User Guide <sequential_feature_selection>`.
from sklearn.datasets import load_iris
from sklearn.feature_selection import SequentialFeatureSelector
from sklearn.neighbors import KNeighborsClassifier
X, y = load_iris(return_X_y=True, as_frame=True)
feature_names = X.columns
knn = KNeighborsClassifier(n_neighbors=3)
sfs = SequentialFeatureSelector(knn, n_features_to_select=2)
sfs.fit(X, y)
print(
"Features selected by forward sequential selection: "
f"{feature_names[sfs.get_support()].tolist()}"
)
##############################################################################
# New PolynomialCountSketch kernel approximation function
# -------------------------------------------------------
# The new :class:`~sklearn.kernel_approximation.PolynomialCountSketch`
# approximates a polynomial expansion of a feature space when used with linear
# models, but uses much less memory than
# :class:`~sklearn.preprocessing.PolynomialFeatures`.
from sklearn.datasets import fetch_covtype
from sklearn.kernel_approximation import PolynomialCountSketch
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler
X, y = fetch_covtype(return_X_y=True)
pipe = make_pipeline(
MinMaxScaler(),
PolynomialCountSketch(degree=2, n_components=300),
LogisticRegression(max_iter=1000),
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=5000, test_size=10000, random_state=42
)
pipe.fit(X_train, y_train).score(X_test, y_test)
##############################################################################
# For comparison, here is the score of a linear baseline for the same data:
linear_baseline = make_pipeline(MinMaxScaler(), LogisticRegression(max_iter=1000))
linear_baseline.fit(X_train, y_train).score(X_test, y_test)
##############################################################################
# Individual Conditional Expectation plots
# ----------------------------------------
# A new kind of partial dependence plot is available: the Individual
# Conditional Expectation (ICE) plot. ICE plots visualize the dependence of the
# prediction on a feature for each sample separately, with one line per sample.
# See the :ref:`User Guide <individual_conditional>`
from sklearn.datasets import fetch_california_housing
from sklearn.ensemble import RandomForestRegressor
# from sklearn.inspection import plot_partial_dependence
from sklearn.inspection import PartialDependenceDisplay
X, y = fetch_california_housing(return_X_y=True, as_frame=True)
features = ["MedInc", "AveOccup", "HouseAge", "AveRooms"]
est = RandomForestRegressor(n_estimators=10)
est.fit(X, y)
# plot_partial_dependence has been removed in version 1.2. From 1.2, use
# PartialDependenceDisplay instead.
# display = plot_partial_dependence(
display = PartialDependenceDisplay.from_estimator(
est,
X,
features,
kind="individual",
subsample=50,
n_jobs=3,
grid_resolution=20,
random_state=0,
)
display.figure_.suptitle(
"Partial dependence of house value on non-location features\n"
"for the California housing dataset, with BayesianRidge"
)
display.figure_.subplots_adjust(hspace=0.3)
##############################################################################
# New Poisson splitting criterion for DecisionTreeRegressor
# ---------------------------------------------------------
# The integration of Poisson regression estimation continues from version 0.23.
# :class:`~sklearn.tree.DecisionTreeRegressor` now supports a new `'poisson'`
# splitting criterion. Setting `criterion="poisson"` might be a good choice
# if your target is a count or a frequency.
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
n_samples, n_features = 1000, 20
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
# positive integer target correlated with X[:, 5] with many zeros:
y = rng.poisson(lam=np.exp(X[:, 5]) / 2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=rng)
regressor = DecisionTreeRegressor(criterion="poisson", random_state=0)
regressor.fit(X_train, y_train)
##############################################################################
# New documentation improvements
# ------------------------------
#
# New examples and documentation pages have been added, in a continuous effort
# to improve the understanding of machine learning practices:
#
# - a new section about :ref:`common pitfalls and recommended
# practices <common_pitfalls>`,
# - an example illustrating how to :ref:`statistically compare the performance of
# models <sphx_glr_auto_examples_model_selection_plot_grid_search_stats.py>`
# evaluated using :class:`~sklearn.model_selection.GridSearchCV`,
# - an example on how to :ref:`interpret coefficients of linear models
# <sphx_glr_auto_examples_inspection_plot_linear_model_coefficient_interpretation.py>`,
# - an :ref:`example
# <sphx_glr_auto_examples_cross_decomposition_plot_pcr_vs_pls.py>`
# comparing Principal Component Regression and Partial Least Squares.
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/gaussian_process/plot_gpr_noisy_targets.py | examples/gaussian_process/plot_gpr_noisy_targets.py | """
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression example computed in two different ways:
1. A noise-free case
2. A noisy case with known noise-level per datapoint
In both cases, the kernel's parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process model
as well as its probabilistic nature in the form of a pointwise 95% confidence
interval.
Note that `alpha` is a parameter to control the strength of the Tikhonov
regularization on the assumed training points' covariance matrix.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Dataset generation
# ------------------
#
# We will start by generating a synthetic dataset. The true generative process
# is defined as :math:`f(x) = x \sin(x)`.
import numpy as np
X = np.linspace(start=0, stop=10, num=1_000).reshape(-1, 1)
y = np.squeeze(X * np.sin(X))
# %%
import matplotlib.pyplot as plt
plt.plot(X, y, label=r"$f(x) = x \sin(x)$", linestyle="dotted")
plt.legend()
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
_ = plt.title("True generative process")
# %%
# We will use this dataset in the next experiment to illustrate how Gaussian
# Process regression is working.
#
# Example with noise-free target
# ------------------------------
#
# In this first example, we will use the true generative process without
# adding any noise. For training the Gaussian Process regression, we will only
# select few samples.
rng = np.random.RandomState(1)
training_indices = rng.choice(np.arange(y.size), size=6, replace=False)
X_train, y_train = X[training_indices], y[training_indices]
# %%
# Now, we fit a Gaussian process on these few training data samples. We will
# use a radial basis function (RBF) kernel and a constant parameter to fit the
# amplitude.
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
kernel = 1 * RBF(length_scale=1.0, length_scale_bounds=(1e-2, 1e2))
gaussian_process = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
gaussian_process.fit(X_train, y_train)
gaussian_process.kernel_
# %%
# After fitting our model, we see that the hyperparameters of the kernel have
# been optimized. Now, we will use our kernel to compute the mean prediction
# of the full dataset and plot the 95% confidence interval.
mean_prediction, std_prediction = gaussian_process.predict(X, return_std=True)
plt.plot(X, y, label=r"$f(x) = x \sin(x)$", linestyle="dotted")
plt.scatter(X_train, y_train, label="Observations")
plt.plot(X, mean_prediction, label="Mean prediction")
plt.fill_between(
X.ravel(),
mean_prediction - 1.96 * std_prediction,
mean_prediction + 1.96 * std_prediction,
alpha=0.5,
label=r"95% confidence interval",
)
plt.legend()
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
_ = plt.title("Gaussian process regression on noise-free dataset")
# %%
# We see that for a prediction made on a data point close to the one from the
# training set, the 95% confidence has a small amplitude. Whenever a sample
# falls far from training data, our model's prediction is less accurate and the
# model prediction is less precise (higher uncertainty).
#
# Example with noisy targets
# --------------------------
#
# We can repeat a similar experiment adding an additional noise to the target
# this time. It will allow seeing the effect of the noise on the fitted model.
#
# We add some random Gaussian noise to the target with an arbitrary
# standard deviation.
noise_std = 0.75
y_train_noisy = y_train + rng.normal(loc=0.0, scale=noise_std, size=y_train.shape)
# %%
# We create a similar Gaussian process model. In addition to the kernel, this
# time, we specify the parameter `alpha` which can be interpreted as the
# variance of a Gaussian noise.
gaussian_process = GaussianProcessRegressor(
kernel=kernel, alpha=noise_std**2, n_restarts_optimizer=9
)
gaussian_process.fit(X_train, y_train_noisy)
mean_prediction, std_prediction = gaussian_process.predict(X, return_std=True)
# %%
# Let's plot the mean prediction and the uncertainty region as before.
plt.plot(X, y, label=r"$f(x) = x \sin(x)$", linestyle="dotted")
plt.errorbar(
X_train,
y_train_noisy,
noise_std,
linestyle="None",
color="tab:blue",
marker=".",
markersize=10,
label="Observations",
)
plt.plot(X, mean_prediction, label="Mean prediction")
plt.fill_between(
X.ravel(),
mean_prediction - 1.96 * std_prediction,
mean_prediction + 1.96 * std_prediction,
color="tab:orange",
alpha=0.5,
label=r"95% confidence interval",
)
plt.legend()
plt.xlabel("$x$")
plt.ylabel("$f(x)$")
_ = plt.title("Gaussian process regression on a noisy dataset")
# %%
# The noise affects the predictions close to the training samples: the
# predictive uncertainty near to the training samples is larger because we
# explicitly model a given level target noise independent of the input
# variable.
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/gaussian_process/plot_gpc_iris.py | examples/gaussian_process/plot_gpc_iris.py | """
=====================================================
Gaussian process classification (GPC) on iris dataset
=====================================================
This example illustrates the predicted probability of GPC for an isotropic
and anisotropic RBF kernel on a two-dimensional version for the iris-dataset.
The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by
assigning different length-scales to the two feature dimensions.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = np.array(iris.target, dtype=int)
h = 0.02 # step size in the mesh
kernel = 1.0 * RBF([1.0])
gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
kernel = 1.0 * RBF([1.0, 1.0])
gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
titles = ["Isotropic RBF", "Anisotropic RBF"]
plt.figure(figsize=(10, 5))
for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)):
# Plot the predicted probabilities. For that, we will assign a color to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 2, i + 1)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape((xx.shape[0], xx.shape[1], 3))
plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower")
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y], edgecolors=(0, 0, 0))
plt.xlabel("Sepal length")
plt.ylabel("Sepal width")
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(
"%s, LML: %.3f" % (titles[i], clf.log_marginal_likelihood(clf.kernel_.theta))
)
plt.tight_layout()
plt.show()
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
scikit-learn/scikit-learn | https://github.com/scikit-learn/scikit-learn/blob/6dce55ebff962076625db46ab70b6b1c939f423b/examples/gaussian_process/plot_gpr_prior_posterior.py | examples/gaussian_process/plot_gpr_prior_posterior.py | """
==========================================================================
Illustration of prior and posterior Gaussian process for different kernels
==========================================================================
This example illustrates the prior and posterior of a
:class:`~sklearn.gaussian_process.GaussianProcessRegressor` with different
kernels. Mean, standard deviation, and 5 samples are shown for both prior
and posterior distributions.
Here, we only give some illustration. To know more about kernels' formulation,
refer to the :ref:`User Guide <gp_kernels>`.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Helper function
# ---------------
#
# Before presenting each individual kernel available for Gaussian processes,
# we will define a helper function allowing us plotting samples drawn from
# the Gaussian process.
#
# This function will take a
# :class:`~sklearn.gaussian_process.GaussianProcessRegressor` model and will
# drawn sample from the Gaussian process. If the model was not fit, the samples
# are drawn from the prior distribution while after model fitting, the samples are
# drawn from the posterior distribution.
import matplotlib.pyplot as plt
import numpy as np
def plot_gpr_samples(gpr_model, n_samples, ax):
"""Plot samples drawn from the Gaussian process model.
If the Gaussian process model is not trained then the drawn samples are
drawn from the prior distribution. Otherwise, the samples are drawn from
the posterior distribution. Be aware that a sample here corresponds to a
function.
Parameters
----------
gpr_model : `GaussianProcessRegressor`
A :class:`~sklearn.gaussian_process.GaussianProcessRegressor` model.
n_samples : int
The number of samples to draw from the Gaussian process distribution.
ax : matplotlib axis
The matplotlib axis where to plot the samples.
"""
x = np.linspace(0, 5, 100)
X = x.reshape(-1, 1)
y_mean, y_std = gpr_model.predict(X, return_std=True)
y_samples = gpr_model.sample_y(X, n_samples)
for idx, single_prior in enumerate(y_samples.T):
ax.plot(
x,
single_prior,
linestyle="--",
alpha=0.7,
label=f"Sampled function #{idx + 1}",
)
ax.plot(x, y_mean, color="black", label="Mean")
ax.fill_between(
x,
y_mean - y_std,
y_mean + y_std,
alpha=0.1,
color="black",
label=r"$\pm$ 1 std. dev.",
)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_ylim([-3, 3])
# %%
# Dataset and Gaussian process generation
# ---------------------------------------
# We will create a training dataset that we will use in the different sections.
rng = np.random.RandomState(4)
X_train = rng.uniform(0, 5, 10).reshape(-1, 1)
y_train = np.sin((X_train[:, 0] - 2.5) ** 2)
n_samples = 5
# %%
# Kernel cookbook
# ---------------
#
# In this section, we illustrate some samples drawn from the prior and posterior
# distributions of the Gaussian process with different kernels.
#
# Radial Basis Function kernel
# ............................
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0))
gpr = GaussianProcessRegressor(kernel=kernel, random_state=0)
fig, axs = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(10, 8))
# plot prior
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[0])
axs[0].set_title("Samples from prior distribution")
# plot posterior
gpr.fit(X_train, y_train)
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[1])
axs[1].scatter(X_train[:, 0], y_train, color="red", zorder=10, label="Observations")
axs[1].legend(bbox_to_anchor=(1.05, 1.5), loc="upper left")
axs[1].set_title("Samples from posterior distribution")
fig.suptitle("Radial Basis Function kernel", fontsize=18)
plt.tight_layout()
# %%
print(f"Kernel parameters before fit:\n{kernel})")
print(
f"Kernel parameters after fit: \n{gpr.kernel_} \n"
f"Log-likelihood: {gpr.log_marginal_likelihood(gpr.kernel_.theta):.3f}"
)
# %%
# Rational Quadratic kernel
# .........................
from sklearn.gaussian_process.kernels import RationalQuadratic
kernel = 1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1, alpha_bounds=(1e-5, 1e15))
gpr = GaussianProcessRegressor(kernel=kernel, random_state=0)
fig, axs = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(10, 8))
# plot prior
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[0])
axs[0].set_title("Samples from prior distribution")
# plot posterior
gpr.fit(X_train, y_train)
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[1])
axs[1].scatter(X_train[:, 0], y_train, color="red", zorder=10, label="Observations")
axs[1].legend(bbox_to_anchor=(1.05, 1.5), loc="upper left")
axs[1].set_title("Samples from posterior distribution")
fig.suptitle("Rational Quadratic kernel", fontsize=18)
plt.tight_layout()
# %%
print(f"Kernel parameters before fit:\n{kernel})")
print(
f"Kernel parameters after fit: \n{gpr.kernel_} \n"
f"Log-likelihood: {gpr.log_marginal_likelihood(gpr.kernel_.theta):.3f}"
)
# %%
# Exp-Sine-Squared kernel
# .......................
from sklearn.gaussian_process.kernels import ExpSineSquared
kernel = 1.0 * ExpSineSquared(
length_scale=1.0,
periodicity=3.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0),
)
gpr = GaussianProcessRegressor(kernel=kernel, random_state=0)
fig, axs = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(10, 8))
# plot prior
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[0])
axs[0].set_title("Samples from prior distribution")
# plot posterior
gpr.fit(X_train, y_train)
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[1])
axs[1].scatter(X_train[:, 0], y_train, color="red", zorder=10, label="Observations")
axs[1].legend(bbox_to_anchor=(1.05, 1.5), loc="upper left")
axs[1].set_title("Samples from posterior distribution")
fig.suptitle("Exp-Sine-Squared kernel", fontsize=18)
plt.tight_layout()
# %%
print(f"Kernel parameters before fit:\n{kernel})")
print(
f"Kernel parameters after fit: \n{gpr.kernel_} \n"
f"Log-likelihood: {gpr.log_marginal_likelihood(gpr.kernel_.theta):.3f}"
)
# %%
# Dot-product kernel
# ..................
from sklearn.gaussian_process.kernels import ConstantKernel, DotProduct
kernel = ConstantKernel(0.1, (0.01, 10.0)) * (
DotProduct(sigma_0=1.0, sigma_0_bounds=(0.1, 10.0)) ** 2
)
gpr = GaussianProcessRegressor(kernel=kernel, random_state=0, normalize_y=True)
fig, axs = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(10, 8))
# plot prior
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[0])
axs[0].set_title("Samples from prior distribution")
# plot posterior
gpr.fit(X_train, y_train)
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[1])
axs[1].scatter(X_train[:, 0], y_train, color="red", zorder=10, label="Observations")
axs[1].legend(bbox_to_anchor=(1.05, 1.5), loc="upper left")
axs[1].set_title("Samples from posterior distribution")
fig.suptitle("Dot-product kernel", fontsize=18)
plt.tight_layout()
# %%
print(f"Kernel parameters before fit:\n{kernel})")
print(
f"Kernel parameters after fit: \n{gpr.kernel_} \n"
f"Log-likelihood: {gpr.log_marginal_likelihood(gpr.kernel_.theta):.3f}"
)
# %%
# Matérn kernel
# ..............
from sklearn.gaussian_process.kernels import Matern
kernel = 1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-1, 10.0), nu=1.5)
gpr = GaussianProcessRegressor(kernel=kernel, random_state=0)
fig, axs = plt.subplots(nrows=2, sharex=True, sharey=True, figsize=(10, 8))
# plot prior
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[0])
axs[0].set_title("Samples from prior distribution")
# plot posterior
gpr.fit(X_train, y_train)
plot_gpr_samples(gpr, n_samples=n_samples, ax=axs[1])
axs[1].scatter(X_train[:, 0], y_train, color="red", zorder=10, label="Observations")
axs[1].legend(bbox_to_anchor=(1.05, 1.5), loc="upper left")
axs[1].set_title("Samples from posterior distribution")
fig.suptitle("Matérn kernel", fontsize=18)
plt.tight_layout()
# %%
print(f"Kernel parameters before fit:\n{kernel})")
print(
f"Kernel parameters after fit: \n{gpr.kernel_} \n"
f"Log-likelihood: {gpr.log_marginal_likelihood(gpr.kernel_.theta):.3f}"
)
| python | BSD-3-Clause | 6dce55ebff962076625db46ab70b6b1c939f423b | 2026-01-04T14:38:25.175347Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.