diff --git a/.gitattributes b/.gitattributes index 6bc8e55ddd545c5f49cfceb537d4b76fecbf48dd..60b9c5cb78c9f14c0850810292aaded95e888455 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1644,3 +1644,4 @@ evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/__pycache__/solvers. evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/single.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text evalkit_internvl/lib/python3.10/site-packages/sympy/polys/benchmarks/__pycache__/bench_solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +evalkit_internvl/lib/python3.10/site-packages/tiktoken/_tiktoken.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/LICENSE b/evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..b96dcb0480a0b0be0727976e5202a1e7b23edc3f --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Facebook, Inc. and its affiliates. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/REQUESTED b/evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_internvl/lib/python3.10/site-packages/httpcore/_backends/trio.py b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_backends/trio.py new file mode 100644 index 0000000000000000000000000000000000000000..b1626d28e2ded284a65d48a32309ee201d953c5e --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_backends/trio.py @@ -0,0 +1,161 @@ +import ssl +import typing + +import trio + +from .._exceptions import ( + ConnectError, + ConnectTimeout, + ExceptionMapping, + ReadError, + ReadTimeout, + WriteError, + WriteTimeout, + map_exceptions, +) +from .base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream + + +class TrioStream(AsyncNetworkStream): + def __init__(self, stream: trio.abc.Stream) -> None: + self._stream = stream + + async def read( + self, max_bytes: int, timeout: typing.Optional[float] = None + ) -> bytes: + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map: ExceptionMapping = { + trio.TooSlowError: ReadTimeout, + trio.BrokenResourceError: ReadError, + trio.ClosedResourceError: ReadError, + } + with map_exceptions(exc_map): + with trio.fail_after(timeout_or_inf): + data: bytes = await self._stream.receive_some(max_bytes=max_bytes) + return data + + async def write( + self, buffer: bytes, timeout: typing.Optional[float] = None + ) -> None: + if not buffer: + return + + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map: ExceptionMapping = { + trio.TooSlowError: WriteTimeout, + trio.BrokenResourceError: WriteError, + trio.ClosedResourceError: WriteError, + } + with map_exceptions(exc_map): + with trio.fail_after(timeout_or_inf): + await self._stream.send_all(data=buffer) + + async def aclose(self) -> None: + await self._stream.aclose() + + async def start_tls( + self, + ssl_context: ssl.SSLContext, + server_hostname: typing.Optional[str] = None, + timeout: typing.Optional[float] = None, + ) -> AsyncNetworkStream: + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map: ExceptionMapping = { + trio.TooSlowError: ConnectTimeout, + trio.BrokenResourceError: ConnectError, + } + ssl_stream = trio.SSLStream( + self._stream, + ssl_context=ssl_context, + server_hostname=server_hostname, + https_compatible=True, + server_side=False, + ) + with map_exceptions(exc_map): + try: + with trio.fail_after(timeout_or_inf): + await ssl_stream.do_handshake() + except Exception as exc: # pragma: nocover + await self.aclose() + raise exc + return TrioStream(ssl_stream) + + def get_extra_info(self, info: str) -> typing.Any: + if info == "ssl_object" and isinstance(self._stream, trio.SSLStream): + # Type checkers cannot see `_ssl_object` attribute because trio._ssl.SSLStream uses __getattr__/__setattr__. + # Tracked at https://github.com/python-trio/trio/issues/542 + return self._stream._ssl_object # type: ignore[attr-defined] + if info == "client_addr": + return self._get_socket_stream().socket.getsockname() + if info == "server_addr": + return self._get_socket_stream().socket.getpeername() + if info == "socket": + stream = self._stream + while isinstance(stream, trio.SSLStream): + stream = stream.transport_stream + assert isinstance(stream, trio.SocketStream) + return stream.socket + if info == "is_readable": + socket = self.get_extra_info("socket") + return socket.is_readable() + return None + + def _get_socket_stream(self) -> trio.SocketStream: + stream = self._stream + while isinstance(stream, trio.SSLStream): + stream = stream.transport_stream + assert isinstance(stream, trio.SocketStream) + return stream + + +class TrioBackend(AsyncNetworkBackend): + async def connect_tcp( + self, + host: str, + port: int, + timeout: typing.Optional[float] = None, + local_address: typing.Optional[str] = None, + socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None, + ) -> AsyncNetworkStream: + # By default for TCP sockets, trio enables TCP_NODELAY. + # https://trio.readthedocs.io/en/stable/reference-io.html#trio.SocketStream + if socket_options is None: + socket_options = [] # pragma: no cover + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map: ExceptionMapping = { + trio.TooSlowError: ConnectTimeout, + trio.BrokenResourceError: ConnectError, + OSError: ConnectError, + } + with map_exceptions(exc_map): + with trio.fail_after(timeout_or_inf): + stream: trio.abc.Stream = await trio.open_tcp_stream( + host=host, port=port, local_address=local_address + ) + for option in socket_options: + stream.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover + return TrioStream(stream) + + async def connect_unix_socket( + self, + path: str, + timeout: typing.Optional[float] = None, + socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None, + ) -> AsyncNetworkStream: # pragma: nocover + if socket_options is None: + socket_options = [] + timeout_or_inf = float("inf") if timeout is None else timeout + exc_map: ExceptionMapping = { + trio.TooSlowError: ConnectTimeout, + trio.BrokenResourceError: ConnectError, + OSError: ConnectError, + } + with map_exceptions(exc_map): + with trio.fail_after(timeout_or_inf): + stream: trio.abc.Stream = await trio.open_unix_socket(path) + for option in socket_options: + stream.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover + return TrioStream(stream) + + async def sleep(self, seconds: float) -> None: + await trio.sleep(seconds) # pragma: nocover diff --git a/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/__init__.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca39adfe1971d2daf06413af4becb0ec70572b32 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/connection.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/connection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eee6667b2e28c63a7f710096b4207ed01f1dcdef Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/connection.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/http11.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/http11.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb72bdf61f51d791da66da44ac03721adeb26230 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/http11.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/http2.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/http2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91cf4c8c74049c69736727b09cca4fe1da1212be Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/http2.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/http_proxy.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/http_proxy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c377769bd71e5fd767ba2ec58d7eb49ccb0df253 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/http_proxy.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/socks_proxy.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/socks_proxy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43839fe16d93a1002b06b4d9fb4eadccc828d22e Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/socks_proxy.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/connection.py b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..39b8b97e8780966200934fda8c305db23a69442a --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/connection.py @@ -0,0 +1,215 @@ +import itertools +import logging +import ssl +from types import TracebackType +from typing import Iterable, Iterator, Optional, Type + +from .._backends.sync import SyncBackend +from .._backends.base import SOCKET_OPTION, NetworkBackend, NetworkStream +from .._exceptions import ConnectError, ConnectionNotAvailable, ConnectTimeout +from .._models import Origin, Request, Response +from .._ssl import default_ssl_context +from .._synchronization import Lock +from .._trace import Trace +from .http11 import HTTP11Connection +from .interfaces import ConnectionInterface + +RETRIES_BACKOFF_FACTOR = 0.5 # 0s, 0.5s, 1s, 2s, 4s, etc. + + +logger = logging.getLogger("httpcore.connection") + + +def exponential_backoff(factor: float) -> Iterator[float]: + yield 0 + for n in itertools.count(2): + yield factor * (2 ** (n - 2)) + + +class HTTPConnection(ConnectionInterface): + def __init__( + self, + origin: Origin, + ssl_context: Optional[ssl.SSLContext] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: Optional[str] = None, + uds: Optional[str] = None, + network_backend: Optional[NetworkBackend] = None, + socket_options: Optional[Iterable[SOCKET_OPTION]] = None, + ) -> None: + self._origin = origin + self._ssl_context = ssl_context + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._retries = retries + self._local_address = local_address + self._uds = uds + + self._network_backend: NetworkBackend = ( + SyncBackend() if network_backend is None else network_backend + ) + self._connection: Optional[ConnectionInterface] = None + self._connect_failed: bool = False + self._request_lock = Lock() + self._socket_options = socket_options + + def handle_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection to {self._origin}" + ) + + with self._request_lock: + if self._connection is None: + try: + stream = self._connect(request) + + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + if http2_negotiated or (self._http2 and not self._http1): + from .http2 import HTTP2Connection + + self._connection = HTTP2Connection( + origin=self._origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = HTTP11Connection( + origin=self._origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + except Exception as exc: + self._connect_failed = True + raise exc + elif not self._connection.is_available(): + raise ConnectionNotAvailable() + + return self._connection.handle_request(request) + + def _connect(self, request: Request) -> NetworkStream: + timeouts = request.extensions.get("timeout", {}) + sni_hostname = request.extensions.get("sni_hostname", None) + timeout = timeouts.get("connect", None) + + retries_left = self._retries + delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR) + + while True: + try: + if self._uds is None: + kwargs = { + "host": self._origin.host.decode("ascii"), + "port": self._origin.port, + "local_address": self._local_address, + "timeout": timeout, + "socket_options": self._socket_options, + } + with Trace("connect_tcp", logger, request, kwargs) as trace: + stream = self._network_backend.connect_tcp(**kwargs) + trace.return_value = stream + else: + kwargs = { + "path": self._uds, + "timeout": timeout, + "socket_options": self._socket_options, + } + with Trace( + "connect_unix_socket", logger, request, kwargs + ) as trace: + stream = self._network_backend.connect_unix_socket( + **kwargs + ) + trace.return_value = stream + + if self._origin.scheme == b"https": + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": sni_hostname + or self._origin.host.decode("ascii"), + "timeout": timeout, + } + with Trace("start_tls", logger, request, kwargs) as trace: + stream = stream.start_tls(**kwargs) + trace.return_value = stream + return stream + except (ConnectError, ConnectTimeout): + if retries_left <= 0: + raise + retries_left -= 1 + delay = next(delays) + with Trace("retry", logger, request, kwargs) as trace: + self._network_backend.sleep(delay) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + def close(self) -> None: + if self._connection is not None: + with Trace("close", logger, None, {}): + self._connection.close() + + def is_available(self) -> bool: + if self._connection is None: + # If HTTP/2 support is enabled, and the resulting connection could + # end up as HTTP/2 then we should indicate the connection as being + # available to service multiple requests. + return ( + self._http2 + and (self._origin.scheme == b"https" or not self._http1) + and not self._connect_failed + ) + return self._connection.is_available() + + def has_expired(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.has_expired() + + def is_idle(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.is_idle() + + def is_closed(self) -> bool: + if self._connection is None: + return self._connect_failed + return self._connection.is_closed() + + def info(self) -> str: + if self._connection is None: + return "CONNECTION FAILED" if self._connect_failed else "CONNECTING" + return self._connection.info() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + def __enter__(self) -> "HTTPConnection": + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + self.close() diff --git a/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/http11.py b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/http11.py new file mode 100644 index 0000000000000000000000000000000000000000..edcce72abb8bc5b6aea634cf9d0b6d49ab8a2ffa --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/http11.py @@ -0,0 +1,331 @@ +import enum +import logging +import time +from types import TracebackType +from typing import ( + Iterable, + Iterator, + List, + Optional, + Tuple, + Type, + Union, + cast, +) + +import h11 + +from .._backends.base import NetworkStream +from .._exceptions import ( + ConnectionNotAvailable, + LocalProtocolError, + RemoteProtocolError, + map_exceptions, +) +from .._models import Origin, Request, Response +from .._synchronization import Lock, ShieldCancellation +from .._trace import Trace +from .interfaces import ConnectionInterface + +logger = logging.getLogger("httpcore.http11") + + +# A subset of `h11.Event` types supported by `_send_event` +H11SendEvent = Union[ + h11.Request, + h11.Data, + h11.EndOfMessage, +] + + +class HTTPConnectionState(enum.IntEnum): + NEW = 0 + ACTIVE = 1 + IDLE = 2 + CLOSED = 3 + + +class HTTP11Connection(ConnectionInterface): + READ_NUM_BYTES = 64 * 1024 + MAX_INCOMPLETE_EVENT_SIZE = 100 * 1024 + + def __init__( + self, + origin: Origin, + stream: NetworkStream, + keepalive_expiry: Optional[float] = None, + ) -> None: + self._origin = origin + self._network_stream = stream + self._keepalive_expiry: Optional[float] = keepalive_expiry + self._expire_at: Optional[float] = None + self._state = HTTPConnectionState.NEW + self._state_lock = Lock() + self._request_count = 0 + self._h11_state = h11.Connection( + our_role=h11.CLIENT, + max_incomplete_event_size=self.MAX_INCOMPLETE_EVENT_SIZE, + ) + + def handle_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection " + f"to {self._origin}" + ) + + with self._state_lock: + if self._state in (HTTPConnectionState.NEW, HTTPConnectionState.IDLE): + self._request_count += 1 + self._state = HTTPConnectionState.ACTIVE + self._expire_at = None + else: + raise ConnectionNotAvailable() + + try: + kwargs = {"request": request} + with Trace("send_request_headers", logger, request, kwargs) as trace: + self._send_request_headers(**kwargs) + with Trace("send_request_body", logger, request, kwargs) as trace: + self._send_request_body(**kwargs) + with Trace( + "receive_response_headers", logger, request, kwargs + ) as trace: + ( + http_version, + status, + reason_phrase, + headers, + ) = self._receive_response_headers(**kwargs) + trace.return_value = ( + http_version, + status, + reason_phrase, + headers, + ) + + return Response( + status=status, + headers=headers, + content=HTTP11ConnectionByteStream(self, request), + extensions={ + "http_version": http_version, + "reason_phrase": reason_phrase, + "network_stream": self._network_stream, + }, + ) + except BaseException as exc: + with ShieldCancellation(): + with Trace("response_closed", logger, request) as trace: + self._response_closed() + raise exc + + # Sending the request... + + def _send_request_headers(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + with map_exceptions({h11.LocalProtocolError: LocalProtocolError}): + event = h11.Request( + method=request.method, + target=request.url.target, + headers=request.headers, + ) + self._send_event(event, timeout=timeout) + + def _send_request_body(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + assert isinstance(request.stream, Iterable) + for chunk in request.stream: + event = h11.Data(data=chunk) + self._send_event(event, timeout=timeout) + + self._send_event(h11.EndOfMessage(), timeout=timeout) + + def _send_event( + self, event: h11.Event, timeout: Optional[float] = None + ) -> None: + bytes_to_send = self._h11_state.send(event) + if bytes_to_send is not None: + self._network_stream.write(bytes_to_send, timeout=timeout) + + # Receiving the response... + + def _receive_response_headers( + self, request: Request + ) -> Tuple[bytes, int, bytes, List[Tuple[bytes, bytes]]]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + while True: + event = self._receive_event(timeout=timeout) + if isinstance(event, h11.Response): + break + if ( + isinstance(event, h11.InformationalResponse) + and event.status_code == 101 + ): + break + + http_version = b"HTTP/" + event.http_version + + # h11 version 0.11+ supports a `raw_items` interface to get the + # raw header casing, rather than the enforced lowercase headers. + headers = event.headers.raw_items() + + return http_version, event.status_code, event.reason, headers + + def _receive_response_body(self, request: Request) -> Iterator[bytes]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + while True: + event = self._receive_event(timeout=timeout) + if isinstance(event, h11.Data): + yield bytes(event.data) + elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)): + break + + def _receive_event( + self, timeout: Optional[float] = None + ) -> Union[h11.Event, Type[h11.PAUSED]]: + while True: + with map_exceptions({h11.RemoteProtocolError: RemoteProtocolError}): + event = self._h11_state.next_event() + + if event is h11.NEED_DATA: + data = self._network_stream.read( + self.READ_NUM_BYTES, timeout=timeout + ) + + # If we feed this case through h11 we'll raise an exception like: + # + # httpcore.RemoteProtocolError: can't handle event type + # ConnectionClosed when role=SERVER and state=SEND_RESPONSE + # + # Which is accurate, but not very informative from an end-user + # perspective. Instead we handle this case distinctly and treat + # it as a ConnectError. + if data == b"" and self._h11_state.their_state == h11.SEND_RESPONSE: + msg = "Server disconnected without sending a response." + raise RemoteProtocolError(msg) + + self._h11_state.receive_data(data) + else: + # mypy fails to narrow the type in the above if statement above + return cast(Union[h11.Event, Type[h11.PAUSED]], event) + + def _response_closed(self) -> None: + with self._state_lock: + if ( + self._h11_state.our_state is h11.DONE + and self._h11_state.their_state is h11.DONE + ): + self._state = HTTPConnectionState.IDLE + self._h11_state.start_next_cycle() + if self._keepalive_expiry is not None: + now = time.monotonic() + self._expire_at = now + self._keepalive_expiry + else: + self.close() + + # Once the connection is no longer required... + + def close(self) -> None: + # Note that this method unilaterally closes the connection, and does + # not have any kind of locking in place around it. + self._state = HTTPConnectionState.CLOSED + self._network_stream.close() + + # The ConnectionInterface methods provide information about the state of + # the connection, allowing for a connection pooling implementation to + # determine when to reuse and when to close the connection... + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + def is_available(self) -> bool: + # Note that HTTP/1.1 connections in the "NEW" state are not treated as + # being "available". The control flow which created the connection will + # be able to send an outgoing request, but the connection will not be + # acquired from the connection pool for any other request. + return self._state == HTTPConnectionState.IDLE + + def has_expired(self) -> bool: + now = time.monotonic() + keepalive_expired = self._expire_at is not None and now > self._expire_at + + # If the HTTP connection is idle but the socket is readable, then the + # only valid state is that the socket is about to return b"", indicating + # a server-initiated disconnect. + server_disconnected = ( + self._state == HTTPConnectionState.IDLE + and self._network_stream.get_extra_info("is_readable") + ) + + return keepalive_expired or server_disconnected + + def is_idle(self) -> bool: + return self._state == HTTPConnectionState.IDLE + + def is_closed(self) -> bool: + return self._state == HTTPConnectionState.CLOSED + + def info(self) -> str: + origin = str(self._origin) + return ( + f"{origin!r}, HTTP/1.1, {self._state.name}, " + f"Request Count: {self._request_count}" + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + origin = str(self._origin) + return ( + f"<{class_name} [{origin!r}, {self._state.name}, " + f"Request Count: {self._request_count}]>" + ) + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + def __enter__(self) -> "HTTP11Connection": + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]] = None, + exc_value: Optional[BaseException] = None, + traceback: Optional[TracebackType] = None, + ) -> None: + self.close() + + +class HTTP11ConnectionByteStream: + def __init__(self, connection: HTTP11Connection, request: Request) -> None: + self._connection = connection + self._request = request + self._closed = False + + def __iter__(self) -> Iterator[bytes]: + kwargs = {"request": self._request} + try: + with Trace("receive_response_body", logger, self._request, kwargs): + for chunk in self._connection._receive_response_body(**kwargs): + yield chunk + except BaseException as exc: + # If we get an exception while streaming the response, + # we want to close the response (and possibly the connection) + # before raising that exception. + with ShieldCancellation(): + self.close() + raise exc + + def close(self) -> None: + if not self._closed: + self._closed = True + with Trace("response_closed", logger, self._request): + self._connection._response_closed() diff --git a/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/http2.py b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/http2.py new file mode 100644 index 0000000000000000000000000000000000000000..d141d459a59d134beac3b2dffb17d17f29abcea4 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/http2.py @@ -0,0 +1,589 @@ +import enum +import logging +import time +import types +import typing + +import h2.config +import h2.connection +import h2.events +import h2.exceptions +import h2.settings + +from .._backends.base import NetworkStream +from .._exceptions import ( + ConnectionNotAvailable, + LocalProtocolError, + RemoteProtocolError, +) +from .._models import Origin, Request, Response +from .._synchronization import Lock, Semaphore, ShieldCancellation +from .._trace import Trace +from .interfaces import ConnectionInterface + +logger = logging.getLogger("httpcore.http2") + + +def has_body_headers(request: Request) -> bool: + return any( + k.lower() == b"content-length" or k.lower() == b"transfer-encoding" + for k, v in request.headers + ) + + +class HTTPConnectionState(enum.IntEnum): + ACTIVE = 1 + IDLE = 2 + CLOSED = 3 + + +class HTTP2Connection(ConnectionInterface): + READ_NUM_BYTES = 64 * 1024 + CONFIG = h2.config.H2Configuration(validate_inbound_headers=False) + + def __init__( + self, + origin: Origin, + stream: NetworkStream, + keepalive_expiry: typing.Optional[float] = None, + ): + self._origin = origin + self._network_stream = stream + self._keepalive_expiry: typing.Optional[float] = keepalive_expiry + self._h2_state = h2.connection.H2Connection(config=self.CONFIG) + self._state = HTTPConnectionState.IDLE + self._expire_at: typing.Optional[float] = None + self._request_count = 0 + self._init_lock = Lock() + self._state_lock = Lock() + self._read_lock = Lock() + self._write_lock = Lock() + self._sent_connection_init = False + self._used_all_stream_ids = False + self._connection_error = False + + # Mapping from stream ID to response stream events. + self._events: typing.Dict[ + int, + typing.Union[ + h2.events.ResponseReceived, + h2.events.DataReceived, + h2.events.StreamEnded, + h2.events.StreamReset, + ], + ] = {} + + # Connection terminated events are stored as state since + # we need to handle them for all streams. + self._connection_terminated: typing.Optional[ + h2.events.ConnectionTerminated + ] = None + + self._read_exception: typing.Optional[Exception] = None + self._write_exception: typing.Optional[Exception] = None + + def handle_request(self, request: Request) -> Response: + if not self.can_handle_request(request.url.origin): + # This cannot occur in normal operation, since the connection pool + # will only send requests on connections that handle them. + # It's in place simply for resilience as a guard against incorrect + # usage, for anyone working directly with httpcore connections. + raise RuntimeError( + f"Attempted to send request to {request.url.origin} on connection " + f"to {self._origin}" + ) + + with self._state_lock: + if self._state in (HTTPConnectionState.ACTIVE, HTTPConnectionState.IDLE): + self._request_count += 1 + self._expire_at = None + self._state = HTTPConnectionState.ACTIVE + else: + raise ConnectionNotAvailable() + + with self._init_lock: + if not self._sent_connection_init: + try: + kwargs = {"request": request} + with Trace("send_connection_init", logger, request, kwargs): + self._send_connection_init(**kwargs) + except BaseException as exc: + with ShieldCancellation(): + self.close() + raise exc + + self._sent_connection_init = True + + # Initially start with just 1 until the remote server provides + # its max_concurrent_streams value + self._max_streams = 1 + + local_settings_max_streams = ( + self._h2_state.local_settings.max_concurrent_streams + ) + self._max_streams_semaphore = Semaphore(local_settings_max_streams) + + for _ in range(local_settings_max_streams - self._max_streams): + self._max_streams_semaphore.acquire() + + self._max_streams_semaphore.acquire() + + try: + stream_id = self._h2_state.get_next_available_stream_id() + self._events[stream_id] = [] + except h2.exceptions.NoAvailableStreamIDError: # pragma: nocover + self._used_all_stream_ids = True + self._request_count -= 1 + raise ConnectionNotAvailable() + + try: + kwargs = {"request": request, "stream_id": stream_id} + with Trace("send_request_headers", logger, request, kwargs): + self._send_request_headers(request=request, stream_id=stream_id) + with Trace("send_request_body", logger, request, kwargs): + self._send_request_body(request=request, stream_id=stream_id) + with Trace( + "receive_response_headers", logger, request, kwargs + ) as trace: + status, headers = self._receive_response( + request=request, stream_id=stream_id + ) + trace.return_value = (status, headers) + + return Response( + status=status, + headers=headers, + content=HTTP2ConnectionByteStream(self, request, stream_id=stream_id), + extensions={ + "http_version": b"HTTP/2", + "network_stream": self._network_stream, + "stream_id": stream_id, + }, + ) + except BaseException as exc: # noqa: PIE786 + with ShieldCancellation(): + kwargs = {"stream_id": stream_id} + with Trace("response_closed", logger, request, kwargs): + self._response_closed(stream_id=stream_id) + + if isinstance(exc, h2.exceptions.ProtocolError): + # One case where h2 can raise a protocol error is when a + # closed frame has been seen by the state machine. + # + # This happens when one stream is reading, and encounters + # a GOAWAY event. Other flows of control may then raise + # a protocol error at any point they interact with the 'h2_state'. + # + # In this case we'll have stored the event, and should raise + # it as a RemoteProtocolError. + if self._connection_terminated: # pragma: nocover + raise RemoteProtocolError(self._connection_terminated) + # If h2 raises a protocol error in some other state then we + # must somehow have made a protocol violation. + raise LocalProtocolError(exc) # pragma: nocover + + raise exc + + def _send_connection_init(self, request: Request) -> None: + """ + The HTTP/2 connection requires some initial setup before we can start + using individual request/response streams on it. + """ + # Need to set these manually here instead of manipulating via + # __setitem__() otherwise the H2Connection will emit SettingsUpdate + # frames in addition to sending the undesired defaults. + self._h2_state.local_settings = h2.settings.Settings( + client=True, + initial_values={ + # Disable PUSH_PROMISE frames from the server since we don't do anything + # with them for now. Maybe when we support caching? + h2.settings.SettingCodes.ENABLE_PUSH: 0, + # These two are taken from h2 for safe defaults + h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100, + h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 65536, + }, + ) + + # Some websites (*cough* Yahoo *cough*) balk at this setting being + # present in the initial handshake since it's not defined in the original + # RFC despite the RFC mandating ignoring settings you don't know about. + del self._h2_state.local_settings[ + h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL + ] + + self._h2_state.initiate_connection() + self._h2_state.increment_flow_control_window(2**24) + self._write_outgoing_data(request) + + # Sending the request... + + def _send_request_headers(self, request: Request, stream_id: int) -> None: + """ + Send the request headers to a given stream ID. + """ + end_stream = not has_body_headers(request) + + # In HTTP/2 the ':authority' pseudo-header is used instead of 'Host'. + # In order to gracefully handle HTTP/1.1 and HTTP/2 we always require + # HTTP/1.1 style headers, and map them appropriately if we end up on + # an HTTP/2 connection. + authority = [v for k, v in request.headers if k.lower() == b"host"][0] + + headers = [ + (b":method", request.method), + (b":authority", authority), + (b":scheme", request.url.scheme), + (b":path", request.url.target), + ] + [ + (k.lower(), v) + for k, v in request.headers + if k.lower() + not in ( + b"host", + b"transfer-encoding", + ) + ] + + self._h2_state.send_headers(stream_id, headers, end_stream=end_stream) + self._h2_state.increment_flow_control_window(2**24, stream_id=stream_id) + self._write_outgoing_data(request) + + def _send_request_body(self, request: Request, stream_id: int) -> None: + """ + Iterate over the request body sending it to a given stream ID. + """ + if not has_body_headers(request): + return + + assert isinstance(request.stream, typing.Iterable) + for data in request.stream: + self._send_stream_data(request, stream_id, data) + self._send_end_stream(request, stream_id) + + def _send_stream_data( + self, request: Request, stream_id: int, data: bytes + ) -> None: + """ + Send a single chunk of data in one or more data frames. + """ + while data: + max_flow = self._wait_for_outgoing_flow(request, stream_id) + chunk_size = min(len(data), max_flow) + chunk, data = data[:chunk_size], data[chunk_size:] + self._h2_state.send_data(stream_id, chunk) + self._write_outgoing_data(request) + + def _send_end_stream(self, request: Request, stream_id: int) -> None: + """ + Send an empty data frame on on a given stream ID with the END_STREAM flag set. + """ + self._h2_state.end_stream(stream_id) + self._write_outgoing_data(request) + + # Receiving the response... + + def _receive_response( + self, request: Request, stream_id: int + ) -> typing.Tuple[int, typing.List[typing.Tuple[bytes, bytes]]]: + """ + Return the response status code and headers for a given stream ID. + """ + while True: + event = self._receive_stream_event(request, stream_id) + if isinstance(event, h2.events.ResponseReceived): + break + + status_code = 200 + headers = [] + for k, v in event.headers: + if k == b":status": + status_code = int(v.decode("ascii", errors="ignore")) + elif not k.startswith(b":"): + headers.append((k, v)) + + return (status_code, headers) + + def _receive_response_body( + self, request: Request, stream_id: int + ) -> typing.Iterator[bytes]: + """ + Iterator that returns the bytes of the response body for a given stream ID. + """ + while True: + event = self._receive_stream_event(request, stream_id) + if isinstance(event, h2.events.DataReceived): + amount = event.flow_controlled_length + self._h2_state.acknowledge_received_data(amount, stream_id) + self._write_outgoing_data(request) + yield event.data + elif isinstance(event, h2.events.StreamEnded): + break + + def _receive_stream_event( + self, request: Request, stream_id: int + ) -> typing.Union[ + h2.events.ResponseReceived, h2.events.DataReceived, h2.events.StreamEnded + ]: + """ + Return the next available event for a given stream ID. + + Will read more data from the network if required. + """ + while not self._events.get(stream_id): + self._receive_events(request, stream_id) + event = self._events[stream_id].pop(0) + if isinstance(event, h2.events.StreamReset): + raise RemoteProtocolError(event) + return event + + def _receive_events( + self, request: Request, stream_id: typing.Optional[int] = None + ) -> None: + """ + Read some data from the network until we see one or more events + for a given stream ID. + """ + with self._read_lock: + if self._connection_terminated is not None: + last_stream_id = self._connection_terminated.last_stream_id + if stream_id and last_stream_id and stream_id > last_stream_id: + self._request_count -= 1 + raise ConnectionNotAvailable() + raise RemoteProtocolError(self._connection_terminated) + + # This conditional is a bit icky. We don't want to block reading if we've + # actually got an event to return for a given stream. We need to do that + # check *within* the atomic read lock. Though it also need to be optional, + # because when we call it from `_wait_for_outgoing_flow` we *do* want to + # block until we've available flow control, event when we have events + # pending for the stream ID we're attempting to send on. + if stream_id is None or not self._events.get(stream_id): + events = self._read_incoming_data(request) + for event in events: + if isinstance(event, h2.events.RemoteSettingsChanged): + with Trace( + "receive_remote_settings", logger, request + ) as trace: + self._receive_remote_settings_change(event) + trace.return_value = event + + elif isinstance( + event, + ( + h2.events.ResponseReceived, + h2.events.DataReceived, + h2.events.StreamEnded, + h2.events.StreamReset, + ), + ): + if event.stream_id in self._events: + self._events[event.stream_id].append(event) + + elif isinstance(event, h2.events.ConnectionTerminated): + self._connection_terminated = event + + self._write_outgoing_data(request) + + def _receive_remote_settings_change(self, event: h2.events.Event) -> None: + max_concurrent_streams = event.changed_settings.get( + h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS + ) + if max_concurrent_streams: + new_max_streams = min( + max_concurrent_streams.new_value, + self._h2_state.local_settings.max_concurrent_streams, + ) + if new_max_streams and new_max_streams != self._max_streams: + while new_max_streams > self._max_streams: + self._max_streams_semaphore.release() + self._max_streams += 1 + while new_max_streams < self._max_streams: + self._max_streams_semaphore.acquire() + self._max_streams -= 1 + + def _response_closed(self, stream_id: int) -> None: + self._max_streams_semaphore.release() + del self._events[stream_id] + with self._state_lock: + if self._connection_terminated and not self._events: + self.close() + + elif self._state == HTTPConnectionState.ACTIVE and not self._events: + self._state = HTTPConnectionState.IDLE + if self._keepalive_expiry is not None: + now = time.monotonic() + self._expire_at = now + self._keepalive_expiry + if self._used_all_stream_ids: # pragma: nocover + self.close() + + def close(self) -> None: + # Note that this method unilaterally closes the connection, and does + # not have any kind of locking in place around it. + self._h2_state.close_connection() + self._state = HTTPConnectionState.CLOSED + self._network_stream.close() + + # Wrappers around network read/write operations... + + def _read_incoming_data( + self, request: Request + ) -> typing.List[h2.events.Event]: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("read", None) + + if self._read_exception is not None: + raise self._read_exception # pragma: nocover + + try: + data = self._network_stream.read(self.READ_NUM_BYTES, timeout) + if data == b"": + raise RemoteProtocolError("Server disconnected") + except Exception as exc: + # If we get a network error we should: + # + # 1. Save the exception and just raise it immediately on any future reads. + # (For example, this means that a single read timeout or disconnect will + # immediately close all pending streams. Without requiring multiple + # sequential timeouts.) + # 2. Mark the connection as errored, so that we don't accept any other + # incoming requests. + self._read_exception = exc + self._connection_error = True + raise exc + + events: typing.List[h2.events.Event] = self._h2_state.receive_data(data) + + return events + + def _write_outgoing_data(self, request: Request) -> None: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("write", None) + + with self._write_lock: + data_to_send = self._h2_state.data_to_send() + + if self._write_exception is not None: + raise self._write_exception # pragma: nocover + + try: + self._network_stream.write(data_to_send, timeout) + except Exception as exc: # pragma: nocover + # If we get a network error we should: + # + # 1. Save the exception and just raise it immediately on any future write. + # (For example, this means that a single write timeout or disconnect will + # immediately close all pending streams. Without requiring multiple + # sequential timeouts.) + # 2. Mark the connection as errored, so that we don't accept any other + # incoming requests. + self._write_exception = exc + self._connection_error = True + raise exc + + # Flow control... + + def _wait_for_outgoing_flow(self, request: Request, stream_id: int) -> int: + """ + Returns the maximum allowable outgoing flow for a given stream. + + If the allowable flow is zero, then waits on the network until + WindowUpdated frames have increased the flow rate. + https://tools.ietf.org/html/rfc7540#section-6.9 + """ + local_flow: int = self._h2_state.local_flow_control_window(stream_id) + max_frame_size: int = self._h2_state.max_outbound_frame_size + flow = min(local_flow, max_frame_size) + while flow == 0: + self._receive_events(request) + local_flow = self._h2_state.local_flow_control_window(stream_id) + max_frame_size = self._h2_state.max_outbound_frame_size + flow = min(local_flow, max_frame_size) + return flow + + # Interface for connection pooling... + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._origin + + def is_available(self) -> bool: + return ( + self._state != HTTPConnectionState.CLOSED + and not self._connection_error + and not self._used_all_stream_ids + and not ( + self._h2_state.state_machine.state + == h2.connection.ConnectionState.CLOSED + ) + ) + + def has_expired(self) -> bool: + now = time.monotonic() + return self._expire_at is not None and now > self._expire_at + + def is_idle(self) -> bool: + return self._state == HTTPConnectionState.IDLE + + def is_closed(self) -> bool: + return self._state == HTTPConnectionState.CLOSED + + def info(self) -> str: + origin = str(self._origin) + return ( + f"{origin!r}, HTTP/2, {self._state.name}, " + f"Request Count: {self._request_count}" + ) + + def __repr__(self) -> str: + class_name = self.__class__.__name__ + origin = str(self._origin) + return ( + f"<{class_name} [{origin!r}, {self._state.name}, " + f"Request Count: {self._request_count}]>" + ) + + # These context managers are not used in the standard flow, but are + # useful for testing or working with connection instances directly. + + def __enter__(self) -> "HTTP2Connection": + return self + + def __exit__( + self, + exc_type: typing.Optional[typing.Type[BaseException]] = None, + exc_value: typing.Optional[BaseException] = None, + traceback: typing.Optional[types.TracebackType] = None, + ) -> None: + self.close() + + +class HTTP2ConnectionByteStream: + def __init__( + self, connection: HTTP2Connection, request: Request, stream_id: int + ) -> None: + self._connection = connection + self._request = request + self._stream_id = stream_id + self._closed = False + + def __iter__(self) -> typing.Iterator[bytes]: + kwargs = {"request": self._request, "stream_id": self._stream_id} + try: + with Trace("receive_response_body", logger, self._request, kwargs): + for chunk in self._connection._receive_response_body( + request=self._request, stream_id=self._stream_id + ): + yield chunk + except BaseException as exc: + # If we get an exception while streaming the response, + # we want to close the response (and possibly the connection) + # before raising that exception. + with ShieldCancellation(): + self.close() + raise exc + + def close(self) -> None: + if not self._closed: + self._closed = True + kwargs = {"stream_id": self._stream_id} + with Trace("response_closed", logger, self._request, kwargs): + self._connection._response_closed(stream_id=self._stream_id) diff --git a/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/http_proxy.py b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/http_proxy.py new file mode 100644 index 0000000000000000000000000000000000000000..bb368dd42d559a6de6961c95b0cdef855b868c97 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/http_proxy.py @@ -0,0 +1,350 @@ +import logging +import ssl +from base64 import b64encode +from typing import Iterable, List, Mapping, Optional, Sequence, Tuple, Union + +from .._backends.base import SOCKET_OPTION, NetworkBackend +from .._exceptions import ProxyError +from .._models import ( + URL, + Origin, + Request, + Response, + enforce_bytes, + enforce_headers, + enforce_url, +) +from .._ssl import default_ssl_context +from .._synchronization import Lock +from .._trace import Trace +from .connection import HTTPConnection +from .connection_pool import ConnectionPool +from .http11 import HTTP11Connection +from .interfaces import ConnectionInterface + +HeadersAsSequence = Sequence[Tuple[Union[bytes, str], Union[bytes, str]]] +HeadersAsMapping = Mapping[Union[bytes, str], Union[bytes, str]] + + +logger = logging.getLogger("httpcore.proxy") + + +def merge_headers( + default_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, + override_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, +) -> List[Tuple[bytes, bytes]]: + """ + Append default_headers and override_headers, de-duplicating if a key exists + in both cases. + """ + default_headers = [] if default_headers is None else list(default_headers) + override_headers = [] if override_headers is None else list(override_headers) + has_override = set(key.lower() for key, value in override_headers) + default_headers = [ + (key, value) + for key, value in default_headers + if key.lower() not in has_override + ] + return default_headers + override_headers + + +def build_auth_header(username: bytes, password: bytes) -> bytes: + userpass = username + b":" + password + return b"Basic " + b64encode(userpass) + + +class HTTPProxy(ConnectionPool): + """ + A connection pool that sends requests via an HTTP proxy. + """ + + def __init__( + self, + proxy_url: Union[URL, bytes, str], + proxy_auth: Optional[Tuple[Union[bytes, str], Union[bytes, str]]] = None, + proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None, + ssl_context: Optional[ssl.SSLContext] = None, + max_connections: Optional[int] = 10, + max_keepalive_connections: Optional[int] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + retries: int = 0, + local_address: Optional[str] = None, + uds: Optional[str] = None, + network_backend: Optional[NetworkBackend] = None, + socket_options: Optional[Iterable[SOCKET_OPTION]] = None, + ) -> None: + """ + A connection pool for making HTTP requests. + + Parameters: + proxy_url: The URL to use when connecting to the proxy server. + For example `"http://127.0.0.1:8080/"`. + proxy_auth: Any proxy authentication as a two-tuple of + (username, password). May be either bytes or ascii-only str. + proxy_headers: Any HTTP headers to use for the proxy requests. + For example `{"Proxy-Authorization": "Basic :"}`. + ssl_context: An SSL context to use for verifying connections. + If not specified, the default `httpcore.default_ssl_context()` + will be used. + max_connections: The maximum number of concurrent HTTP connections that + the pool should allow. Any attempt to send a request on a pool that + would exceed this amount will block until a connection is available. + max_keepalive_connections: The maximum number of idle HTTP connections + that will be maintained in the pool. + keepalive_expiry: The duration in seconds that an idle HTTP connection + may be maintained for before being expired from the pool. + http1: A boolean indicating if HTTP/1.1 requests should be supported + by the connection pool. Defaults to True. + http2: A boolean indicating if HTTP/2 requests should be supported by + the connection pool. Defaults to False. + retries: The maximum number of retries when trying to establish + a connection. + local_address: Local address to connect from. Can also be used to + connect using a particular address family. Using + `local_address="0.0.0.0"` will connect using an `AF_INET` address + (IPv4), while using `local_address="::"` will connect using an + `AF_INET6` address (IPv6). + uds: Path to a Unix Domain Socket to use instead of TCP sockets. + network_backend: A backend instance to use for handling network I/O. + """ + super().__init__( + ssl_context=ssl_context, + max_connections=max_connections, + max_keepalive_connections=max_keepalive_connections, + keepalive_expiry=keepalive_expiry, + http1=http1, + http2=http2, + network_backend=network_backend, + retries=retries, + local_address=local_address, + uds=uds, + socket_options=socket_options, + ) + self._ssl_context = ssl_context + self._proxy_url = enforce_url(proxy_url, name="proxy_url") + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + if proxy_auth is not None: + username = enforce_bytes(proxy_auth[0], name="proxy_auth") + password = enforce_bytes(proxy_auth[1], name="proxy_auth") + authorization = build_auth_header(username, password) + self._proxy_headers = [ + (b"Proxy-Authorization", authorization) + ] + self._proxy_headers + + def create_connection(self, origin: Origin) -> ConnectionInterface: + if origin.scheme == b"http": + return ForwardHTTPConnection( + proxy_origin=self._proxy_url.origin, + proxy_headers=self._proxy_headers, + remote_origin=origin, + keepalive_expiry=self._keepalive_expiry, + network_backend=self._network_backend, + ) + return TunnelHTTPConnection( + proxy_origin=self._proxy_url.origin, + proxy_headers=self._proxy_headers, + remote_origin=origin, + ssl_context=self._ssl_context, + keepalive_expiry=self._keepalive_expiry, + http1=self._http1, + http2=self._http2, + network_backend=self._network_backend, + ) + + +class ForwardHTTPConnection(ConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None, + keepalive_expiry: Optional[float] = None, + network_backend: Optional[NetworkBackend] = None, + socket_options: Optional[Iterable[SOCKET_OPTION]] = None, + ) -> None: + self._connection = HTTPConnection( + origin=proxy_origin, + keepalive_expiry=keepalive_expiry, + network_backend=network_backend, + socket_options=socket_options, + ) + self._proxy_origin = proxy_origin + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + self._remote_origin = remote_origin + + def handle_request(self, request: Request) -> Response: + headers = merge_headers(self._proxy_headers, request.headers) + url = URL( + scheme=self._proxy_origin.scheme, + host=self._proxy_origin.host, + port=self._proxy_origin.port, + target=bytes(request.url), + ) + proxy_request = Request( + method=request.method, + url=url, + headers=headers, + content=request.stream, + extensions=request.extensions, + ) + return self._connection.handle_request(proxy_request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + def close(self) -> None: + self._connection.close() + + def info(self) -> str: + return self._connection.info() + + def is_available(self) -> bool: + return self._connection.is_available() + + def has_expired(self) -> bool: + return self._connection.has_expired() + + def is_idle(self) -> bool: + return self._connection.is_idle() + + def is_closed(self) -> bool: + return self._connection.is_closed() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" + + +class TunnelHTTPConnection(ConnectionInterface): + def __init__( + self, + proxy_origin: Origin, + remote_origin: Origin, + ssl_context: Optional[ssl.SSLContext] = None, + proxy_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None, + keepalive_expiry: Optional[float] = None, + http1: bool = True, + http2: bool = False, + network_backend: Optional[NetworkBackend] = None, + socket_options: Optional[Iterable[SOCKET_OPTION]] = None, + ) -> None: + self._connection: ConnectionInterface = HTTPConnection( + origin=proxy_origin, + keepalive_expiry=keepalive_expiry, + network_backend=network_backend, + socket_options=socket_options, + ) + self._proxy_origin = proxy_origin + self._remote_origin = remote_origin + self._ssl_context = ssl_context + self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers") + self._keepalive_expiry = keepalive_expiry + self._http1 = http1 + self._http2 = http2 + self._connect_lock = Lock() + self._connected = False + + def handle_request(self, request: Request) -> Response: + timeouts = request.extensions.get("timeout", {}) + timeout = timeouts.get("connect", None) + + with self._connect_lock: + if not self._connected: + target = b"%b:%d" % (self._remote_origin.host, self._remote_origin.port) + + connect_url = URL( + scheme=self._proxy_origin.scheme, + host=self._proxy_origin.host, + port=self._proxy_origin.port, + target=target, + ) + connect_headers = merge_headers( + [(b"Host", target), (b"Accept", b"*/*")], self._proxy_headers + ) + connect_request = Request( + method=b"CONNECT", + url=connect_url, + headers=connect_headers, + extensions=request.extensions, + ) + connect_response = self._connection.handle_request( + connect_request + ) + + if connect_response.status < 200 or connect_response.status > 299: + reason_bytes = connect_response.extensions.get("reason_phrase", b"") + reason_str = reason_bytes.decode("ascii", errors="ignore") + msg = "%d %s" % (connect_response.status, reason_str) + self._connection.close() + raise ProxyError(msg) + + stream = connect_response.extensions["network_stream"] + + # Upgrade the stream to SSL + ssl_context = ( + default_ssl_context() + if self._ssl_context is None + else self._ssl_context + ) + alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"] + ssl_context.set_alpn_protocols(alpn_protocols) + + kwargs = { + "ssl_context": ssl_context, + "server_hostname": self._remote_origin.host.decode("ascii"), + "timeout": timeout, + } + with Trace("start_tls", logger, request, kwargs) as trace: + stream = stream.start_tls(**kwargs) + trace.return_value = stream + + # Determine if we should be using HTTP/1.1 or HTTP/2 + ssl_object = stream.get_extra_info("ssl_object") + http2_negotiated = ( + ssl_object is not None + and ssl_object.selected_alpn_protocol() == "h2" + ) + + # Create the HTTP/1.1 or HTTP/2 connection + if http2_negotiated or (self._http2 and not self._http1): + from .http2 import HTTP2Connection + + self._connection = HTTP2Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + else: + self._connection = HTTP11Connection( + origin=self._remote_origin, + stream=stream, + keepalive_expiry=self._keepalive_expiry, + ) + + self._connected = True + return self._connection.handle_request(request) + + def can_handle_request(self, origin: Origin) -> bool: + return origin == self._remote_origin + + def close(self) -> None: + self._connection.close() + + def info(self) -> str: + return self._connection.info() + + def is_available(self) -> bool: + return self._connection.is_available() + + def has_expired(self) -> bool: + return self._connection.has_expired() + + def is_idle(self) -> bool: + return self._connection.is_idle() + + def is_closed(self) -> bool: + return self._connection.is_closed() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.info()}]>" diff --git a/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/__init__.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f14a4add0e791e96e10f8f89661ec03effec0eb Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/_async.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/_async.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79b2bde42c7e5bf01b1c21c489dedbba65ba84de Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/_async.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/expect.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/expect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bb9058426533d24839908c78baa19e7e4b76ab3 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/expect.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/fdpexpect.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/fdpexpect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f408884011309063ecbafababd4d29631939ba2 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/fdpexpect.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/popen_spawn.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/popen_spawn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85b92a803d3c90d3e8763eef7ab96a9c9a09ed3c Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/popen_spawn.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/run.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/run.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17fc9e5b7b694293dc900eedc9bedc9eb09fd6e3 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/run.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/socket_pexpect.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/socket_pexpect.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..441b63b1192165e373537f58e6a251ef0fd8fb37 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/socket_pexpect.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/utils.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f32df9a6cf26be97eccd8c55ab1d97a7bee4a205 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/utils.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/tiktoken/_tiktoken.cpython-310-x86_64-linux-gnu.so b/evalkit_internvl/lib/python3.10/site-packages/tiktoken/_tiktoken.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..307a9c6c071db68ab6ec4ab5f049656d42191a70 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/tiktoken/_tiktoken.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab730d9e9941f3bde5507915e9ac6986e06e0d611b22cc548b5ef0f0fdbaa4d3 +size 3430112 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5939d83c8483812187c39d373e425630a9e44fe5 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__init__.py @@ -0,0 +1,8 @@ +"""This module implements histogram-based gradient boosting estimators. + +The implementation is a port from pygbm which is itself strongly inspired +from LightGBM. +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pxd b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pxd new file mode 100644 index 0000000000000000000000000000000000000000..9ff9fc89800d7bcd04a0a9d202d828a2079a6f28 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pxd @@ -0,0 +1,43 @@ +from ...utils._typedefs cimport float32_t, float64_t, intp_t, uint8_t, uint32_t + + +ctypedef float64_t X_DTYPE_C +ctypedef uint8_t X_BINNED_DTYPE_C +ctypedef float64_t Y_DTYPE_C +ctypedef float32_t G_H_DTYPE_C +ctypedef uint32_t BITSET_INNER_DTYPE_C +ctypedef BITSET_INNER_DTYPE_C[8] BITSET_DTYPE_C + + +cdef packed struct hist_struct: + # Same as histogram dtype but we need a struct to declare views. It needs + # to be packed since by default numpy dtypes aren't aligned + Y_DTYPE_C sum_gradients + Y_DTYPE_C sum_hessians + unsigned int count + + +cdef packed struct node_struct: + # Equivalent struct to PREDICTOR_RECORD_DTYPE to use in memory views. It + # needs to be packed since by default numpy dtypes aren't aligned + Y_DTYPE_C value + unsigned int count + intp_t feature_idx + X_DTYPE_C num_threshold + uint8_t missing_go_to_left + unsigned int left + unsigned int right + Y_DTYPE_C gain + unsigned int depth + uint8_t is_leaf + X_BINNED_DTYPE_C bin_threshold + uint8_t is_categorical + # The index of the corresponding bitsets in the Predictor's bitset arrays. + # Only used if is_categorical is True + unsigned int bitset_idx + + +cpdef enum MonotonicConstraint: + NO_CST = 0 + POS = 1 + NEG = -1 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/_weight_boosting.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/_weight_boosting.py new file mode 100644 index 0000000000000000000000000000000000000000..8503c4fdb8ae7d9549c7fa4fb412831f562d2667 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/_weight_boosting.py @@ -0,0 +1,1173 @@ +"""Weight Boosting. + +This module contains weight boosting estimators for both classification and +regression. + +The module structure is the following: + +- The `BaseWeightBoosting` base class implements a common ``fit`` method + for all the estimators in the module. Regression and classification + only differ from each other in the loss function that is optimized. + +- :class:`~sklearn.ensemble.AdaBoostClassifier` implements adaptive boosting + (AdaBoost-SAMME) for classification problems. + +- :class:`~sklearn.ensemble.AdaBoostRegressor` implements adaptive boosting + (AdaBoost.R2) for regression problems. +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real + +import numpy as np + +from ..base import ( + ClassifierMixin, + RegressorMixin, + _fit_context, + is_classifier, + is_regressor, +) +from ..metrics import accuracy_score, r2_score +from ..tree import DecisionTreeClassifier, DecisionTreeRegressor +from ..utils import _safe_indexing, check_random_state +from ..utils._param_validation import HasMethods, Hidden, Interval, StrOptions +from ..utils.extmath import softmax, stable_cumsum +from ..utils.metadata_routing import ( + _raise_for_unsupported_routing, + _RoutingNotSupportedMixin, +) +from ..utils.validation import ( + _check_sample_weight, + _num_samples, + check_is_fitted, + has_fit_parameter, + validate_data, +) +from ._base import BaseEnsemble + +__all__ = [ + "AdaBoostClassifier", + "AdaBoostRegressor", +] + + +class BaseWeightBoosting(BaseEnsemble, metaclass=ABCMeta): + """Base class for AdaBoost estimators. + + Warning: This class should not be used directly. Use derived classes + instead. + """ + + _parameter_constraints: dict = { + "estimator": [HasMethods(["fit", "predict"]), None], + "n_estimators": [Interval(Integral, 1, None, closed="left")], + "learning_rate": [Interval(Real, 0, None, closed="neither")], + "random_state": ["random_state"], + } + + @abstractmethod + def __init__( + self, + estimator=None, + *, + n_estimators=50, + estimator_params=tuple(), + learning_rate=1.0, + random_state=None, + ): + super().__init__( + estimator=estimator, + n_estimators=n_estimators, + estimator_params=estimator_params, + ) + + self.learning_rate = learning_rate + self.random_state = random_state + + def _check_X(self, X): + # Only called to validate X in non-fit methods, therefore reset=False + return validate_data( + self, + X, + accept_sparse=["csr", "csc"], + ensure_2d=True, + allow_nd=True, + dtype=None, + reset=False, + ) + + @_fit_context( + # AdaBoost*.estimator is not validated yet + prefer_skip_nested_validation=False + ) + def fit(self, X, y, sample_weight=None): + """Build a boosted classifier/regressor from the training set (X, y). + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + y : array-like of shape (n_samples,) + The target values. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. If None, the sample weights are initialized to + 1 / n_samples. + + Returns + ------- + self : object + Fitted estimator. + """ + _raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight) + X, y = validate_data( + self, + X, + y, + accept_sparse=["csr", "csc"], + ensure_2d=True, + allow_nd=True, + dtype=None, + y_numeric=is_regressor(self), + ) + + sample_weight = _check_sample_weight( + sample_weight, X, np.float64, copy=True, ensure_non_negative=True + ) + sample_weight /= sample_weight.sum() + + # Check parameters + self._validate_estimator() + + # Clear any previous fit results + self.estimators_ = [] + self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64) + self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64) + + # Initialization of the random number instance that will be used to + # generate a seed at each iteration + random_state = check_random_state(self.random_state) + epsilon = np.finfo(sample_weight.dtype).eps + + zero_weight_mask = sample_weight == 0.0 + for iboost in range(self.n_estimators): + # avoid extremely small sample weight, for details see issue #20320 + sample_weight = np.clip(sample_weight, a_min=epsilon, a_max=None) + # do not clip sample weights that were exactly zero originally + sample_weight[zero_weight_mask] = 0.0 + + # Boosting step + sample_weight, estimator_weight, estimator_error = self._boost( + iboost, X, y, sample_weight, random_state + ) + + # Early termination + if sample_weight is None: + break + self.estimator_weights_[iboost] = estimator_weight + self.estimator_errors_[iboost] = estimator_error + + # Stop if error is zero + if estimator_error == 0: + break + + sample_weight_sum = np.sum(sample_weight) + + if not np.isfinite(sample_weight_sum): + warnings.warn( + ( + "Sample weights have reached infinite values," + f" at iteration {iboost}, causing overflow. " + "Iterations stopped. Try lowering the learning rate." + ), + stacklevel=2, + ) + break + + # Stop if the sum of sample weights has become non-positive + if sample_weight_sum <= 0: + break + + if iboost < self.n_estimators - 1: + # Normalize + sample_weight /= sample_weight_sum + + return self + + @abstractmethod + def _boost(self, iboost, X, y, sample_weight, random_state): + """Implement a single boost. + + Warning: This method needs to be overridden by subclasses. + + Parameters + ---------- + iboost : int + The index of the current boost iteration. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + y : array-like of shape (n_samples,) + The target values (class labels). + + sample_weight : array-like of shape (n_samples,) + The current sample weights. + + random_state : RandomState + The current random number generator + + Returns + ------- + sample_weight : array-like of shape (n_samples,) or None + The reweighted sample weights. + If None then boosting has terminated early. + + estimator_weight : float + The weight for the current boost. + If None then boosting has terminated early. + + error : float + The classification error for the current boost. + If None then boosting has terminated early. + """ + pass + + def staged_score(self, X, y, sample_weight=None): + """Return staged scores for X, y. + + This generator method yields the ensemble score after each iteration of + boosting and therefore allows monitoring, such as to determine the + score on a test set after each boost. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + y : array-like of shape (n_samples,) + Labels for X. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + Yields + ------ + z : float + """ + X = self._check_X(X) + + for y_pred in self.staged_predict(X): + if is_classifier(self): + yield accuracy_score(y, y_pred, sample_weight=sample_weight) + else: + yield r2_score(y, y_pred, sample_weight=sample_weight) + + @property + def feature_importances_(self): + """The impurity-based feature importances. + + The higher, the more important the feature. + The importance of a feature is computed as the (normalized) + total reduction of the criterion brought by that feature. It is also + known as the Gini importance. + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + Returns + ------- + feature_importances_ : ndarray of shape (n_features,) + The feature importances. + """ + if self.estimators_ is None or len(self.estimators_) == 0: + raise ValueError( + "Estimator not fitted, call `fit` before `feature_importances_`." + ) + + try: + norm = self.estimator_weights_.sum() + return ( + sum( + weight * clf.feature_importances_ + for weight, clf in zip(self.estimator_weights_, self.estimators_) + ) + / norm + ) + + except AttributeError as e: + raise AttributeError( + "Unable to compute feature importances " + "since estimator does not have a " + "feature_importances_ attribute" + ) from e + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + return tags + + +def _samme_proba(estimator, n_classes, X): + """Calculate algorithm 4, step 2, equation c) of Zhu et al [1]. + + References + ---------- + .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. + + """ + proba = estimator.predict_proba(X) + + # Displace zero probabilities so the log is defined. + # Also fix negative elements which may occur with + # negative sample weights. + np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba) + log_proba = np.log(proba) + + return (n_classes - 1) * ( + log_proba - (1.0 / n_classes) * log_proba.sum(axis=1)[:, np.newaxis] + ) + + +class AdaBoostClassifier( + _RoutingNotSupportedMixin, ClassifierMixin, BaseWeightBoosting +): + """An AdaBoost classifier. + + An AdaBoost [1]_ classifier is a meta-estimator that begins by fitting a + classifier on the original dataset and then fits additional copies of the + classifier on the same dataset but where the weights of incorrectly + classified instances are adjusted such that subsequent classifiers focus + more on difficult cases. + + This class implements the algorithm based on [2]_. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.14 + + Parameters + ---------- + estimator : object, default=None + The base estimator from which the boosted ensemble is built. + Support for sample weighting is required, as well as proper + ``classes_`` and ``n_classes_`` attributes. If ``None``, then + the base estimator is :class:`~sklearn.tree.DecisionTreeClassifier` + initialized with `max_depth=1`. + + .. versionadded:: 1.2 + `base_estimator` was renamed to `estimator`. + + n_estimators : int, default=50 + The maximum number of estimators at which boosting is terminated. + In case of perfect fit, the learning procedure is stopped early. + Values must be in the range `[1, inf)`. + + learning_rate : float, default=1.0 + Weight applied to each classifier at each boosting iteration. A higher + learning rate increases the contribution of each classifier. There is + a trade-off between the `learning_rate` and `n_estimators` parameters. + Values must be in the range `(0.0, inf)`. + + algorithm : {'SAMME'}, default='SAMME' + Use the SAMME discrete boosting algorithm. + + .. deprecated:: 1.6 + `algorithm` is deprecated and will be removed in version 1.8. This + estimator only implements the 'SAMME' algorithm. + + random_state : int, RandomState instance or None, default=None + Controls the random seed given at each `estimator` at each + boosting iteration. + Thus, it is only used when `estimator` exposes a `random_state`. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + estimator_ : estimator + The base estimator from which the ensemble is grown. + + .. versionadded:: 1.2 + `base_estimator_` was renamed to `estimator_`. + + estimators_ : list of classifiers + The collection of fitted sub-estimators. + + classes_ : ndarray of shape (n_classes,) + The classes labels. + + n_classes_ : int + The number of classes. + + estimator_weights_ : ndarray of floats + Weights for each estimator in the boosted ensemble. + + estimator_errors_ : ndarray of floats + Classification error for each estimator in the boosted + ensemble. + + feature_importances_ : ndarray of shape (n_features,) + The impurity-based feature importances if supported by the + ``estimator`` (when based on decision trees). + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + AdaBoostRegressor : An AdaBoost regressor that begins by fitting a + regressor on the original dataset and then fits additional copies of + the regressor on the same dataset but where the weights of instances + are adjusted according to the error of the current prediction. + + GradientBoostingClassifier : GB builds an additive model in a forward + stage-wise fashion. Regression trees are fit on the negative gradient + of the binomial or multinomial deviance loss function. Binary + classification is a special case where only a single regression tree is + induced. + + sklearn.tree.DecisionTreeClassifier : A non-parametric supervised learning + method used for classification. + Creates a model that predicts the value of a target variable by + learning simple decision rules inferred from the data features. + + References + ---------- + .. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of + on-Line Learning and an Application to Boosting", 1995. + + .. [2] :doi:`J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class adaboost." + Statistics and its Interface 2.3 (2009): 349-360. + <10.4310/SII.2009.v2.n3.a8>` + + Examples + -------- + >>> from sklearn.ensemble import AdaBoostClassifier + >>> from sklearn.datasets import make_classification + >>> X, y = make_classification(n_samples=1000, n_features=4, + ... n_informative=2, n_redundant=0, + ... random_state=0, shuffle=False) + >>> clf = AdaBoostClassifier(n_estimators=100, random_state=0) + >>> clf.fit(X, y) + AdaBoostClassifier(n_estimators=100, random_state=0) + >>> clf.predict([[0, 0, 0, 0]]) + array([1]) + >>> clf.score(X, y) + 0.96... + + For a detailed example of using AdaBoost to fit a sequence of DecisionTrees + as weaklearners, please refer to + :ref:`sphx_glr_auto_examples_ensemble_plot_adaboost_multiclass.py`. + + For a detailed example of using AdaBoost to fit a non-linearly seperable + classification dataset composed of two Gaussian quantiles clusters, please + refer to :ref:`sphx_glr_auto_examples_ensemble_plot_adaboost_twoclass.py`. + """ + + # TODO(1.8): remove "algorithm" entry + _parameter_constraints: dict = { + **BaseWeightBoosting._parameter_constraints, + "algorithm": [StrOptions({"SAMME"}), Hidden(StrOptions({"deprecated"}))], + } + + def __init__( + self, + estimator=None, + *, + n_estimators=50, + learning_rate=1.0, + algorithm="deprecated", + random_state=None, + ): + super().__init__( + estimator=estimator, + n_estimators=n_estimators, + learning_rate=learning_rate, + random_state=random_state, + ) + + self.algorithm = algorithm + + def _validate_estimator(self): + """Check the estimator and set the estimator_ attribute.""" + super()._validate_estimator(default=DecisionTreeClassifier(max_depth=1)) + + if self.algorithm != "deprecated": + warnings.warn( + "The parameter 'algorithm' is deprecated in 1.6 and has no effect. " + "It will be removed in version 1.8.", + FutureWarning, + ) + + if not has_fit_parameter(self.estimator_, "sample_weight"): + raise ValueError( + f"{self.estimator.__class__.__name__} doesn't support sample_weight." + ) + + def _boost(self, iboost, X, y, sample_weight, random_state): + """Implement a single boost. + + Perform a single boost according to the discrete SAMME algorithm and return the + updated sample weights. + + Parameters + ---------- + iboost : int + The index of the current boost iteration. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,) + The target values (class labels). + + sample_weight : array-like of shape (n_samples,) + The current sample weights. + + random_state : RandomState instance + The RandomState instance used if the base estimator accepts a + `random_state` attribute. + + Returns + ------- + sample_weight : array-like of shape (n_samples,) or None + The reweighted sample weights. + If None then boosting has terminated early. + + estimator_weight : float + The weight for the current boost. + If None then boosting has terminated early. + + estimator_error : float + The classification error for the current boost. + If None then boosting has terminated early. + """ + estimator = self._make_estimator(random_state=random_state) + + estimator.fit(X, y, sample_weight=sample_weight) + + y_predict = estimator.predict(X) + + if iboost == 0: + self.classes_ = getattr(estimator, "classes_", None) + self.n_classes_ = len(self.classes_) + + # Instances incorrectly classified + incorrect = y_predict != y + + # Error fraction + estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0)) + + # Stop if classification is perfect + if estimator_error <= 0: + return sample_weight, 1.0, 0.0 + + n_classes = self.n_classes_ + + # Stop if the error is at least as bad as random guessing + if estimator_error >= 1.0 - (1.0 / n_classes): + self.estimators_.pop(-1) + if len(self.estimators_) == 0: + raise ValueError( + "BaseClassifier in AdaBoostClassifier " + "ensemble is worse than random, ensemble " + "can not be fit." + ) + return None, None, None + + # Boost weight using multi-class AdaBoost SAMME alg + estimator_weight = self.learning_rate * ( + np.log((1.0 - estimator_error) / estimator_error) + np.log(n_classes - 1.0) + ) + + # Only boost the weights if it will fit again + if not iboost == self.n_estimators - 1: + # Only boost positive weights + sample_weight = np.exp( + np.log(sample_weight) + + estimator_weight * incorrect * (sample_weight > 0) + ) + + return sample_weight, estimator_weight, estimator_error + + def predict(self, X): + """Predict classes for X. + + The predicted class of an input sample is computed as the weighted mean + prediction of the classifiers in the ensemble. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Returns + ------- + y : ndarray of shape (n_samples,) + The predicted classes. + """ + pred = self.decision_function(X) + + if self.n_classes_ == 2: + return self.classes_.take(pred > 0, axis=0) + + return self.classes_.take(np.argmax(pred, axis=1), axis=0) + + def staged_predict(self, X): + """Return staged predictions for X. + + The predicted class of an input sample is computed as the weighted mean + prediction of the classifiers in the ensemble. + + This generator method yields the ensemble prediction after each + iteration of boosting and therefore allows monitoring, such as to + determine the prediction on a test set after each boost. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + The input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Yields + ------ + y : generator of ndarray of shape (n_samples,) + The predicted classes. + """ + X = self._check_X(X) + + n_classes = self.n_classes_ + classes = self.classes_ + + if n_classes == 2: + for pred in self.staged_decision_function(X): + yield np.array(classes.take(pred > 0, axis=0)) + + else: + for pred in self.staged_decision_function(X): + yield np.array(classes.take(np.argmax(pred, axis=1), axis=0)) + + def decision_function(self, X): + """Compute the decision function of ``X``. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Returns + ------- + score : ndarray of shape of (n_samples, k) + The decision function of the input samples. The order of + outputs is the same as that of the :term:`classes_` attribute. + Binary classification is a special cases with ``k == 1``, + otherwise ``k==n_classes``. For binary classification, + values closer to -1 or 1 mean more like the first or second + class in ``classes_``, respectively. + """ + check_is_fitted(self) + X = self._check_X(X) + + n_classes = self.n_classes_ + classes = self.classes_[:, np.newaxis] + + if n_classes == 1: + return np.zeros_like(X, shape=(X.shape[0], 1)) + + pred = sum( + np.where( + (estimator.predict(X) == classes).T, + w, + -1 / (n_classes - 1) * w, + ) + for estimator, w in zip(self.estimators_, self.estimator_weights_) + ) + + pred /= self.estimator_weights_.sum() + if n_classes == 2: + pred[:, 0] *= -1 + return pred.sum(axis=1) + return pred + + def staged_decision_function(self, X): + """Compute decision function of ``X`` for each boosting iteration. + + This method allows monitoring (i.e. determine error on testing set) + after each boosting iteration. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Yields + ------ + score : generator of ndarray of shape (n_samples, k) + The decision function of the input samples. The order of + outputs is the same of that of the :term:`classes_` attribute. + Binary classification is a special cases with ``k == 1``, + otherwise ``k==n_classes``. For binary classification, + values closer to -1 or 1 mean more like the first or second + class in ``classes_``, respectively. + """ + check_is_fitted(self) + X = self._check_X(X) + + n_classes = self.n_classes_ + classes = self.classes_[:, np.newaxis] + pred = None + norm = 0.0 + + for weight, estimator in zip(self.estimator_weights_, self.estimators_): + norm += weight + + current_pred = np.where( + (estimator.predict(X) == classes).T, + weight, + -1 / (n_classes - 1) * weight, + ) + + if pred is None: + pred = current_pred + else: + pred += current_pred + + if n_classes == 2: + tmp_pred = np.copy(pred) + tmp_pred[:, 0] *= -1 + yield (tmp_pred / norm).sum(axis=1) + else: + yield pred / norm + + @staticmethod + def _compute_proba_from_decision(decision, n_classes): + """Compute probabilities from the decision function. + + This is based eq. (15) of [1] where: + p(y=c|X) = exp((1 / K-1) f_c(X)) / sum_k(exp((1 / K-1) f_k(X))) + = softmax((1 / K-1) * f(X)) + + References + ---------- + .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", + 2009. + """ + if n_classes == 2: + decision = np.vstack([-decision, decision]).T / 2 + else: + decision /= n_classes - 1 + return softmax(decision, copy=False) + + def predict_proba(self, X): + """Predict class probabilities for X. + + The predicted class probabilities of an input sample is computed as + the weighted mean predicted class probabilities of the classifiers + in the ensemble. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Returns + ------- + p : ndarray of shape (n_samples, n_classes) + The class probabilities of the input samples. The order of + outputs is the same of that of the :term:`classes_` attribute. + """ + check_is_fitted(self) + n_classes = self.n_classes_ + + if n_classes == 1: + return np.ones((_num_samples(X), 1)) + + decision = self.decision_function(X) + return self._compute_proba_from_decision(decision, n_classes) + + def staged_predict_proba(self, X): + """Predict class probabilities for X. + + The predicted class probabilities of an input sample is computed as + the weighted mean predicted class probabilities of the classifiers + in the ensemble. + + This generator method yields the ensemble predicted class probabilities + after each iteration of boosting and therefore allows monitoring, such + as to determine the predicted class probabilities on a test set after + each boost. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Yields + ------ + p : generator of ndarray of shape (n_samples,) + The class probabilities of the input samples. The order of + outputs is the same of that of the :term:`classes_` attribute. + """ + + n_classes = self.n_classes_ + + for decision in self.staged_decision_function(X): + yield self._compute_proba_from_decision(decision, n_classes) + + def predict_log_proba(self, X): + """Predict class log-probabilities for X. + + The predicted class log-probabilities of an input sample is computed as + the weighted mean predicted class log-probabilities of the classifiers + in the ensemble. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Returns + ------- + p : ndarray of shape (n_samples, n_classes) + The class probabilities of the input samples. The order of + outputs is the same of that of the :term:`classes_` attribute. + """ + return np.log(self.predict_proba(X)) + + +class AdaBoostRegressor(_RoutingNotSupportedMixin, RegressorMixin, BaseWeightBoosting): + """An AdaBoost regressor. + + An AdaBoost [1] regressor is a meta-estimator that begins by fitting a + regressor on the original dataset and then fits additional copies of the + regressor on the same dataset but where the weights of instances are + adjusted according to the error of the current prediction. As such, + subsequent regressors focus more on difficult cases. + + This class implements the algorithm known as AdaBoost.R2 [2]. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 0.14 + + Parameters + ---------- + estimator : object, default=None + The base estimator from which the boosted ensemble is built. + If ``None``, then the base estimator is + :class:`~sklearn.tree.DecisionTreeRegressor` initialized with + `max_depth=3`. + + .. versionadded:: 1.2 + `base_estimator` was renamed to `estimator`. + + n_estimators : int, default=50 + The maximum number of estimators at which boosting is terminated. + In case of perfect fit, the learning procedure is stopped early. + Values must be in the range `[1, inf)`. + + learning_rate : float, default=1.0 + Weight applied to each regressor at each boosting iteration. A higher + learning rate increases the contribution of each regressor. There is + a trade-off between the `learning_rate` and `n_estimators` parameters. + Values must be in the range `(0.0, inf)`. + + loss : {'linear', 'square', 'exponential'}, default='linear' + The loss function to use when updating the weights after each + boosting iteration. + + random_state : int, RandomState instance or None, default=None + Controls the random seed given at each `estimator` at each + boosting iteration. + Thus, it is only used when `estimator` exposes a `random_state`. + In addition, it controls the bootstrap of the weights used to train the + `estimator` at each boosting iteration. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + Attributes + ---------- + estimator_ : estimator + The base estimator from which the ensemble is grown. + + .. versionadded:: 1.2 + `base_estimator_` was renamed to `estimator_`. + + estimators_ : list of regressors + The collection of fitted sub-estimators. + + estimator_weights_ : ndarray of floats + Weights for each estimator in the boosted ensemble. + + estimator_errors_ : ndarray of floats + Regression error for each estimator in the boosted ensemble. + + feature_importances_ : ndarray of shape (n_features,) + The impurity-based feature importances if supported by the + ``estimator`` (when based on decision trees). + + Warning: impurity-based feature importances can be misleading for + high cardinality features (many unique values). See + :func:`sklearn.inspection.permutation_importance` as an alternative. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + AdaBoostClassifier : An AdaBoost classifier. + GradientBoostingRegressor : Gradient Boosting Classification Tree. + sklearn.tree.DecisionTreeRegressor : A decision tree regressor. + + References + ---------- + .. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of + on-Line Learning and an Application to Boosting", 1995. + + .. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997. + + Examples + -------- + >>> from sklearn.ensemble import AdaBoostRegressor + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(n_features=4, n_informative=2, + ... random_state=0, shuffle=False) + >>> regr = AdaBoostRegressor(random_state=0, n_estimators=100) + >>> regr.fit(X, y) + AdaBoostRegressor(n_estimators=100, random_state=0) + >>> regr.predict([[0, 0, 0, 0]]) + array([4.7972...]) + >>> regr.score(X, y) + 0.9771... + + For a detailed example of utilizing :class:`~sklearn.ensemble.AdaBoostRegressor` + to fit a sequence of decision trees as weak learners, please refer to + :ref:`sphx_glr_auto_examples_ensemble_plot_adaboost_regression.py`. + """ + + _parameter_constraints: dict = { + **BaseWeightBoosting._parameter_constraints, + "loss": [StrOptions({"linear", "square", "exponential"})], + } + + def __init__( + self, + estimator=None, + *, + n_estimators=50, + learning_rate=1.0, + loss="linear", + random_state=None, + ): + super().__init__( + estimator=estimator, + n_estimators=n_estimators, + learning_rate=learning_rate, + random_state=random_state, + ) + + self.loss = loss + self.random_state = random_state + + def _validate_estimator(self): + """Check the estimator and set the estimator_ attribute.""" + super()._validate_estimator(default=DecisionTreeRegressor(max_depth=3)) + + def _boost(self, iboost, X, y, sample_weight, random_state): + """Implement a single boost for regression + + Perform a single boost according to the AdaBoost.R2 algorithm and + return the updated sample weights. + + Parameters + ---------- + iboost : int + The index of the current boost iteration. + + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. + + y : array-like of shape (n_samples,) + The target values (class labels in classification, real numbers in + regression). + + sample_weight : array-like of shape (n_samples,) + The current sample weights. + + random_state : RandomState + The RandomState instance used if the base estimator accepts a + `random_state` attribute. + Controls also the bootstrap of the weights used to train the weak + learner. + + Returns + ------- + sample_weight : array-like of shape (n_samples,) or None + The reweighted sample weights. + If None then boosting has terminated early. + + estimator_weight : float + The weight for the current boost. + If None then boosting has terminated early. + + estimator_error : float + The regression error for the current boost. + If None then boosting has terminated early. + """ + estimator = self._make_estimator(random_state=random_state) + + # Weighted sampling of the training set with replacement + bootstrap_idx = random_state.choice( + np.arange(_num_samples(X)), + size=_num_samples(X), + replace=True, + p=sample_weight, + ) + + # Fit on the bootstrapped sample and obtain a prediction + # for all samples in the training set + X_ = _safe_indexing(X, bootstrap_idx) + y_ = _safe_indexing(y, bootstrap_idx) + estimator.fit(X_, y_) + y_predict = estimator.predict(X) + + error_vect = np.abs(y_predict - y) + sample_mask = sample_weight > 0 + masked_sample_weight = sample_weight[sample_mask] + masked_error_vector = error_vect[sample_mask] + + error_max = masked_error_vector.max() + if error_max != 0: + masked_error_vector /= error_max + + if self.loss == "square": + masked_error_vector **= 2 + elif self.loss == "exponential": + masked_error_vector = 1.0 - np.exp(-masked_error_vector) + + # Calculate the average loss + estimator_error = (masked_sample_weight * masked_error_vector).sum() + + if estimator_error <= 0: + # Stop if fit is perfect + return sample_weight, 1.0, 0.0 + + elif estimator_error >= 0.5: + # Discard current estimator only if it isn't the only one + if len(self.estimators_) > 1: + self.estimators_.pop(-1) + return None, None, None + + beta = estimator_error / (1.0 - estimator_error) + + # Boost weight using AdaBoost.R2 alg + estimator_weight = self.learning_rate * np.log(1.0 / beta) + + if not iboost == self.n_estimators - 1: + sample_weight[sample_mask] *= np.power( + beta, (1.0 - masked_error_vector) * self.learning_rate + ) + + return sample_weight, estimator_weight, estimator_error + + def _get_median_predict(self, X, limit): + # Evaluate predictions of all estimators + predictions = np.array([est.predict(X) for est in self.estimators_[:limit]]).T + + # Sort the predictions + sorted_idx = np.argsort(predictions, axis=1) + + # Find index of median prediction for each sample + weight_cdf = stable_cumsum(self.estimator_weights_[sorted_idx], axis=1) + median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis] + median_idx = median_or_above.argmax(axis=1) + + median_estimators = sorted_idx[np.arange(_num_samples(X)), median_idx] + + # Return median predictions + return predictions[np.arange(_num_samples(X)), median_estimators] + + def predict(self, X): + """Predict regression value for X. + + The predicted regression value of an input sample is computed + as the weighted median prediction of the regressors in the ensemble. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. Sparse matrix can be CSC, CSR, COO, + DOK, or LIL. COO, DOK, and LIL are converted to CSR. + + Returns + ------- + y : ndarray of shape (n_samples,) + The predicted regression values. + """ + check_is_fitted(self) + X = self._check_X(X) + + return self._get_median_predict(X, len(self.estimators_)) + + def staged_predict(self, X): + """Return staged predictions for X. + + The predicted regression value of an input sample is computed + as the weighted median prediction of the regressors in the ensemble. + + This generator method yields the ensemble prediction after each + iteration of boosting and therefore allows monitoring, such as to + determine the prediction on a test set after each boost. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + The training input samples. + + Yields + ------ + y : generator of ndarray of shape (n_samples,) + The predicted regression values. + """ + check_is_fitted(self) + X = self._check_X(X) + + for i, _ in enumerate(self.estimators_, 1): + yield self._get_median_predict(X, limit=i) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e05f4cf3cfd1b3393100dc1c3cdb11a38c705918 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..834fde4eb0f9197005896f09441b2115721d8ee7 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7992604c9d4a5b411c0389cff22e102639900abe Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cddc8d7624e56aded22156565247513ef64b651a Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f636b1be71f06b4f4aa3d65ad0990c80e09a61f Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/test_base.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..95a852b8a7cc50e3b4440461e7ed5f5facde3e69 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/test_base.py @@ -0,0 +1,109 @@ +""" +Testing for the base module (sklearn.ensemble.base). +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from collections import OrderedDict + +import numpy as np + +from sklearn.datasets import load_iris +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +from sklearn.ensemble import BaggingClassifier +from sklearn.ensemble._base import _set_random_states +from sklearn.feature_selection import SelectFromModel +from sklearn.linear_model import Perceptron +from sklearn.pipeline import Pipeline + + +def test_base(): + # Check BaseEnsemble methods. + ensemble = BaggingClassifier( + estimator=Perceptron(random_state=None), n_estimators=3 + ) + + iris = load_iris() + ensemble.fit(iris.data, iris.target) + ensemble.estimators_ = [] # empty the list and create estimators manually + + ensemble._make_estimator() + random_state = np.random.RandomState(3) + ensemble._make_estimator(random_state=random_state) + ensemble._make_estimator(random_state=random_state) + ensemble._make_estimator(append=False) + + assert 3 == len(ensemble) + assert 3 == len(ensemble.estimators_) + + assert isinstance(ensemble[0], Perceptron) + assert ensemble[0].random_state is None + assert isinstance(ensemble[1].random_state, int) + assert isinstance(ensemble[2].random_state, int) + assert ensemble[1].random_state != ensemble[2].random_state + + np_int_ensemble = BaggingClassifier( + estimator=Perceptron(), n_estimators=np.int32(3) + ) + np_int_ensemble.fit(iris.data, iris.target) + + +def test_set_random_states(): + # Linear Discriminant Analysis doesn't have random state: smoke test + _set_random_states(LinearDiscriminantAnalysis(), random_state=17) + + clf1 = Perceptron(random_state=None) + assert clf1.random_state is None + # check random_state is None still sets + _set_random_states(clf1, None) + assert isinstance(clf1.random_state, int) + + # check random_state fixes results in consistent initialisation + _set_random_states(clf1, 3) + assert isinstance(clf1.random_state, int) + clf2 = Perceptron(random_state=None) + _set_random_states(clf2, 3) + assert clf1.random_state == clf2.random_state + + # nested random_state + + def make_steps(): + return [ + ("sel", SelectFromModel(Perceptron(random_state=None))), + ("clf", Perceptron(random_state=None)), + ] + + est1 = Pipeline(make_steps()) + _set_random_states(est1, 3) + assert isinstance(est1.steps[0][1].estimator.random_state, int) + assert isinstance(est1.steps[1][1].random_state, int) + assert ( + est1.get_params()["sel__estimator__random_state"] + != est1.get_params()["clf__random_state"] + ) + + # ensure multiple random_state parameters are invariant to get_params() + # iteration order + + class AlphaParamPipeline(Pipeline): + def get_params(self, *args, **kwargs): + params = Pipeline.get_params(self, *args, **kwargs).items() + return OrderedDict(sorted(params)) + + class RevParamPipeline(Pipeline): + def get_params(self, *args, **kwargs): + params = Pipeline.get_params(self, *args, **kwargs).items() + return OrderedDict(sorted(params, reverse=True)) + + for cls in [AlphaParamPipeline, RevParamPipeline]: + est2 = cls(make_steps()) + _set_random_states(est2, 3) + assert ( + est1.get_params()["sel__estimator__random_state"] + == est2.get_params()["sel__estimator__random_state"] + ) + assert ( + est1.get_params()["clf__random_state"] + == est2.get_params()["clf__random_state"] + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/test_stacking.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/test_stacking.py new file mode 100644 index 0000000000000000000000000000000000000000..e944ecc4abb528c9bffb1cf23674831fcd0fb7ca --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/test_stacking.py @@ -0,0 +1,1019 @@ +"""Test the stacking classifier and regressor.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import re +from unittest.mock import Mock + +import numpy as np +import pytest +from numpy.testing import assert_array_equal +from scipy import sparse + +from sklearn import config_context +from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, clone +from sklearn.datasets import ( + load_breast_cancer, + load_diabetes, + load_iris, + make_classification, + make_multilabel_classification, + make_regression, +) +from sklearn.dummy import DummyClassifier, DummyRegressor +from sklearn.ensemble import ( + RandomForestClassifier, + RandomForestRegressor, + StackingClassifier, + StackingRegressor, +) +from sklearn.exceptions import ConvergenceWarning, NotFittedError +from sklearn.linear_model import ( + LinearRegression, + LogisticRegression, + Ridge, + RidgeClassifier, +) +from sklearn.model_selection import KFold, StratifiedKFold, train_test_split +from sklearn.neighbors import KNeighborsClassifier +from sklearn.neural_network import MLPClassifier +from sklearn.preprocessing import scale +from sklearn.svm import SVC, LinearSVC, LinearSVR +from sklearn.tests.metadata_routing_common import ( + ConsumingClassifier, + ConsumingRegressor, + _Registry, + check_recorded_metadata, +) +from sklearn.utils._mocking import CheckingClassifier +from sklearn.utils._testing import ( + assert_allclose, + assert_allclose_dense_sparse, + ignore_warnings, +) +from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS + +diabetes = load_diabetes() +X_diabetes, y_diabetes = diabetes.data, diabetes.target +iris = load_iris() +X_iris, y_iris = iris.data, iris.target +X_multilabel, y_multilabel = make_multilabel_classification( + n_classes=3, random_state=42 +) +X_binary, y_binary = make_classification(n_classes=2, random_state=42) + + +@pytest.mark.parametrize( + "cv", [3, StratifiedKFold(n_splits=3, shuffle=True, random_state=42)] +) +@pytest.mark.parametrize( + "final_estimator", [None, RandomForestClassifier(random_state=42)] +) +@pytest.mark.parametrize("passthrough", [False, True]) +def test_stacking_classifier_iris(cv, final_estimator, passthrough): + # prescale the data to avoid convergence warning without using a pipeline + # for later assert + X_train, X_test, y_train, y_test = train_test_split( + scale(X_iris), y_iris, stratify=y_iris, random_state=42 + ) + estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())] + clf = StackingClassifier( + estimators=estimators, + final_estimator=final_estimator, + cv=cv, + passthrough=passthrough, + ) + clf.fit(X_train, y_train) + clf.predict(X_test) + clf.predict_proba(X_test) + assert clf.score(X_test, y_test) > 0.8 + + X_trans = clf.transform(X_test) + expected_column_count = 10 if passthrough else 6 + assert X_trans.shape[1] == expected_column_count + if passthrough: + assert_allclose(X_test, X_trans[:, -4:]) + + clf.set_params(lr="drop") + clf.fit(X_train, y_train) + clf.predict(X_test) + clf.predict_proba(X_test) + if final_estimator is None: + # LogisticRegression has decision_function method + clf.decision_function(X_test) + + X_trans = clf.transform(X_test) + expected_column_count_drop = 7 if passthrough else 3 + assert X_trans.shape[1] == expected_column_count_drop + if passthrough: + assert_allclose(X_test, X_trans[:, -4:]) + + +def test_stacking_classifier_drop_column_binary_classification(): + # check that a column is dropped in binary classification + X, y = load_breast_cancer(return_X_y=True) + X_train, X_test, y_train, _ = train_test_split( + scale(X), y, stratify=y, random_state=42 + ) + + # both classifiers implement 'predict_proba' and will both drop one column + estimators = [ + ("lr", LogisticRegression()), + ("rf", RandomForestClassifier(random_state=42)), + ] + clf = StackingClassifier(estimators=estimators, cv=3) + + clf.fit(X_train, y_train) + X_trans = clf.transform(X_test) + assert X_trans.shape[1] == 2 + + # LinearSVC does not implement 'predict_proba' and will not drop one column + estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())] + clf.set_params(estimators=estimators) + + clf.fit(X_train, y_train) + X_trans = clf.transform(X_test) + assert X_trans.shape[1] == 2 + + +def test_stacking_classifier_drop_estimator(): + # prescale the data to avoid convergence warning without using a pipeline + # for later assert + X_train, X_test, y_train, _ = train_test_split( + scale(X_iris), y_iris, stratify=y_iris, random_state=42 + ) + estimators = [("lr", "drop"), ("svc", LinearSVC(random_state=0))] + rf = RandomForestClassifier(n_estimators=10, random_state=42) + clf = StackingClassifier( + estimators=[("svc", LinearSVC(random_state=0))], + final_estimator=rf, + cv=5, + ) + clf_drop = StackingClassifier(estimators=estimators, final_estimator=rf, cv=5) + + clf.fit(X_train, y_train) + clf_drop.fit(X_train, y_train) + assert_allclose(clf.predict(X_test), clf_drop.predict(X_test)) + assert_allclose(clf.predict_proba(X_test), clf_drop.predict_proba(X_test)) + assert_allclose(clf.transform(X_test), clf_drop.transform(X_test)) + + +def test_stacking_regressor_drop_estimator(): + # prescale the data to avoid convergence warning without using a pipeline + # for later assert + X_train, X_test, y_train, _ = train_test_split( + scale(X_diabetes), y_diabetes, random_state=42 + ) + estimators = [("lr", "drop"), ("svr", LinearSVR(random_state=0))] + rf = RandomForestRegressor(n_estimators=10, random_state=42) + reg = StackingRegressor( + estimators=[("svr", LinearSVR(random_state=0))], + final_estimator=rf, + cv=5, + ) + reg_drop = StackingRegressor(estimators=estimators, final_estimator=rf, cv=5) + + reg.fit(X_train, y_train) + reg_drop.fit(X_train, y_train) + assert_allclose(reg.predict(X_test), reg_drop.predict(X_test)) + assert_allclose(reg.transform(X_test), reg_drop.transform(X_test)) + + +@pytest.mark.parametrize("cv", [3, KFold(n_splits=3, shuffle=True, random_state=42)]) +@pytest.mark.parametrize( + "final_estimator, predict_params", + [ + (None, {}), + (RandomForestRegressor(random_state=42), {}), + (DummyRegressor(), {"return_std": True}), + ], +) +@pytest.mark.parametrize("passthrough", [False, True]) +def test_stacking_regressor_diabetes(cv, final_estimator, predict_params, passthrough): + # prescale the data to avoid convergence warning without using a pipeline + # for later assert + X_train, X_test, y_train, _ = train_test_split( + scale(X_diabetes), y_diabetes, random_state=42 + ) + estimators = [("lr", LinearRegression()), ("svr", LinearSVR())] + reg = StackingRegressor( + estimators=estimators, + final_estimator=final_estimator, + cv=cv, + passthrough=passthrough, + ) + reg.fit(X_train, y_train) + result = reg.predict(X_test, **predict_params) + expected_result_length = 2 if predict_params else 1 + if predict_params: + assert len(result) == expected_result_length + + X_trans = reg.transform(X_test) + expected_column_count = 12 if passthrough else 2 + assert X_trans.shape[1] == expected_column_count + if passthrough: + assert_allclose(X_test, X_trans[:, -10:]) + + reg.set_params(lr="drop") + reg.fit(X_train, y_train) + reg.predict(X_test) + + X_trans = reg.transform(X_test) + expected_column_count_drop = 11 if passthrough else 1 + assert X_trans.shape[1] == expected_column_count_drop + if passthrough: + assert_allclose(X_test, X_trans[:, -10:]) + + +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_stacking_regressor_sparse_passthrough(sparse_container): + # Check passthrough behavior on a sparse X matrix + X_train, X_test, y_train, _ = train_test_split( + sparse_container(scale(X_diabetes)), y_diabetes, random_state=42 + ) + estimators = [("lr", LinearRegression()), ("svr", LinearSVR())] + rf = RandomForestRegressor(n_estimators=10, random_state=42) + clf = StackingRegressor( + estimators=estimators, final_estimator=rf, cv=5, passthrough=True + ) + clf.fit(X_train, y_train) + X_trans = clf.transform(X_test) + assert_allclose_dense_sparse(X_test, X_trans[:, -10:]) + assert sparse.issparse(X_trans) + assert X_test.format == X_trans.format + + +@pytest.mark.parametrize( + "sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS +) +def test_stacking_classifier_sparse_passthrough(sparse_container): + # Check passthrough behavior on a sparse X matrix + X_train, X_test, y_train, _ = train_test_split( + sparse_container(scale(X_iris)), y_iris, random_state=42 + ) + estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())] + rf = RandomForestClassifier(n_estimators=10, random_state=42) + clf = StackingClassifier( + estimators=estimators, final_estimator=rf, cv=5, passthrough=True + ) + clf.fit(X_train, y_train) + X_trans = clf.transform(X_test) + assert_allclose_dense_sparse(X_test, X_trans[:, -4:]) + assert sparse.issparse(X_trans) + assert X_test.format == X_trans.format + + +def test_stacking_classifier_drop_binary_prob(): + # check that classifier will drop one of the probability column for + # binary classification problem + + # Select only the 2 first classes + X_, y_ = scale(X_iris[:100]), y_iris[:100] + + estimators = [("lr", LogisticRegression()), ("rf", RandomForestClassifier())] + clf = StackingClassifier(estimators=estimators) + clf.fit(X_, y_) + X_meta = clf.transform(X_) + assert X_meta.shape[1] == 2 + + +class NoWeightRegressor(RegressorMixin, BaseEstimator): + def fit(self, X, y): + self.reg = DummyRegressor() + return self.reg.fit(X, y) + + def predict(self, X): + return np.ones(X.shape[0]) + + +class NoWeightClassifier(ClassifierMixin, BaseEstimator): + def fit(self, X, y): + self.clf = DummyClassifier(strategy="stratified") + return self.clf.fit(X, y) + + +@pytest.mark.parametrize( + "y, params, type_err, msg_err", + [ + (y_iris, {"estimators": []}, ValueError, "Invalid 'estimators' attribute,"), + ( + y_iris, + { + "estimators": [ + ("lr", LogisticRegression()), + ("svm", SVC(max_iter=50_000)), + ], + "stack_method": "predict_proba", + }, + ValueError, + "does not implement the method predict_proba", + ), + ( + y_iris, + { + "estimators": [ + ("lr", LogisticRegression()), + ("cor", NoWeightClassifier()), + ] + }, + TypeError, + "does not support sample weight", + ), + ( + y_iris, + { + "estimators": [ + ("lr", LogisticRegression()), + ("cor", LinearSVC(max_iter=50_000)), + ], + "final_estimator": NoWeightClassifier(), + }, + TypeError, + "does not support sample weight", + ), + ], +) +def test_stacking_classifier_error(y, params, type_err, msg_err): + with pytest.raises(type_err, match=msg_err): + clf = StackingClassifier(**params, cv=3) + clf.fit(scale(X_iris), y, sample_weight=np.ones(X_iris.shape[0])) + + +@pytest.mark.parametrize( + "y, params, type_err, msg_err", + [ + (y_diabetes, {"estimators": []}, ValueError, "Invalid 'estimators' attribute,"), + ( + y_diabetes, + {"estimators": [("lr", LinearRegression()), ("cor", NoWeightRegressor())]}, + TypeError, + "does not support sample weight", + ), + ( + y_diabetes, + { + "estimators": [ + ("lr", LinearRegression()), + ("cor", LinearSVR()), + ], + "final_estimator": NoWeightRegressor(), + }, + TypeError, + "does not support sample weight", + ), + ], +) +def test_stacking_regressor_error(y, params, type_err, msg_err): + with pytest.raises(type_err, match=msg_err): + reg = StackingRegressor(**params, cv=3) + reg.fit(scale(X_diabetes), y, sample_weight=np.ones(X_diabetes.shape[0])) + + +@pytest.mark.parametrize( + "estimator, X, y", + [ + ( + StackingClassifier( + estimators=[ + ("lr", LogisticRegression(random_state=0)), + ("svm", LinearSVC(random_state=0)), + ] + ), + X_iris[:100], + y_iris[:100], + ), # keep only classes 0 and 1 + ( + StackingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR(random_state=0)), + ] + ), + X_diabetes, + y_diabetes, + ), + ], + ids=["StackingClassifier", "StackingRegressor"], +) +def test_stacking_randomness(estimator, X, y): + # checking that fixing the random state of the CV will lead to the same + # results + estimator_full = clone(estimator) + estimator_full.set_params( + cv=KFold(shuffle=True, random_state=np.random.RandomState(0)) + ) + + estimator_drop = clone(estimator) + estimator_drop.set_params(lr="drop") + estimator_drop.set_params( + cv=KFold(shuffle=True, random_state=np.random.RandomState(0)) + ) + + assert_allclose( + estimator_full.fit(X, y).transform(X)[:, 1:], + estimator_drop.fit(X, y).transform(X), + ) + + +def test_stacking_classifier_stratify_default(): + # check that we stratify the classes for the default CV + clf = StackingClassifier( + estimators=[ + ("lr", LogisticRegression(max_iter=10_000)), + ("svm", LinearSVC(max_iter=10_000)), + ] + ) + # since iris is not shuffled, a simple k-fold would not contain the + # 3 classes during training + clf.fit(X_iris, y_iris) + + +@pytest.mark.parametrize( + "stacker, X, y", + [ + ( + StackingClassifier( + estimators=[ + ("lr", LogisticRegression()), + ("svm", LinearSVC(random_state=42)), + ], + final_estimator=LogisticRegression(), + cv=KFold(shuffle=True, random_state=42), + ), + *load_breast_cancer(return_X_y=True), + ), + ( + StackingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR(random_state=42)), + ], + final_estimator=LinearRegression(), + cv=KFold(shuffle=True, random_state=42), + ), + X_diabetes, + y_diabetes, + ), + ], + ids=["StackingClassifier", "StackingRegressor"], +) +def test_stacking_with_sample_weight(stacker, X, y): + # check that sample weights has an influence on the fitting + # note: ConvergenceWarning are catch since we are not worrying about the + # convergence here + n_half_samples = len(y) // 2 + total_sample_weight = np.array( + [0.1] * n_half_samples + [0.9] * (len(y) - n_half_samples) + ) + X_train, X_test, y_train, _, sample_weight_train, _ = train_test_split( + X, y, total_sample_weight, random_state=42 + ) + + with ignore_warnings(category=ConvergenceWarning): + stacker.fit(X_train, y_train) + y_pred_no_weight = stacker.predict(X_test) + + with ignore_warnings(category=ConvergenceWarning): + stacker.fit(X_train, y_train, sample_weight=np.ones(y_train.shape)) + y_pred_unit_weight = stacker.predict(X_test) + + assert_allclose(y_pred_no_weight, y_pred_unit_weight) + + with ignore_warnings(category=ConvergenceWarning): + stacker.fit(X_train, y_train, sample_weight=sample_weight_train) + y_pred_biased = stacker.predict(X_test) + + assert np.abs(y_pred_no_weight - y_pred_biased).sum() > 0 + + +def test_stacking_classifier_sample_weight_fit_param(): + # check sample_weight is passed to all invocations of fit + stacker = StackingClassifier( + estimators=[("lr", CheckingClassifier(expected_sample_weight=True))], + final_estimator=CheckingClassifier(expected_sample_weight=True), + ) + stacker.fit(X_iris, y_iris, sample_weight=np.ones(X_iris.shape[0])) + + +@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning") +@pytest.mark.parametrize( + "stacker, X, y", + [ + ( + StackingClassifier( + estimators=[ + ("lr", LogisticRegression()), + ("svm", LinearSVC(random_state=42)), + ], + final_estimator=LogisticRegression(), + ), + *load_breast_cancer(return_X_y=True), + ), + ( + StackingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR(random_state=42)), + ], + final_estimator=LinearRegression(), + ), + X_diabetes, + y_diabetes, + ), + ], + ids=["StackingClassifier", "StackingRegressor"], +) +def test_stacking_cv_influence(stacker, X, y): + # check that the stacking affects the fit of the final estimator but not + # the fit of the base estimators + # note: ConvergenceWarning are catch since we are not worrying about the + # convergence here + stacker_cv_3 = clone(stacker) + stacker_cv_5 = clone(stacker) + + stacker_cv_3.set_params(cv=3) + stacker_cv_5.set_params(cv=5) + + stacker_cv_3.fit(X, y) + stacker_cv_5.fit(X, y) + + # the base estimators should be identical + for est_cv_3, est_cv_5 in zip(stacker_cv_3.estimators_, stacker_cv_5.estimators_): + assert_allclose(est_cv_3.coef_, est_cv_5.coef_) + + # the final estimator should be different + with pytest.raises(AssertionError, match="Not equal"): + assert_allclose( + stacker_cv_3.final_estimator_.coef_, stacker_cv_5.final_estimator_.coef_ + ) + + +@pytest.mark.parametrize( + "Stacker, Estimator, stack_method, final_estimator, X, y", + [ + ( + StackingClassifier, + DummyClassifier, + "predict_proba", + LogisticRegression(random_state=42), + X_iris, + y_iris, + ), + ( + StackingRegressor, + DummyRegressor, + "predict", + LinearRegression(), + X_diabetes, + y_diabetes, + ), + ], +) +def test_stacking_prefit(Stacker, Estimator, stack_method, final_estimator, X, y): + """Check the behaviour of stacking when `cv='prefit'`""" + X_train1, X_train2, y_train1, y_train2 = train_test_split( + X, y, random_state=42, test_size=0.5 + ) + estimators = [ + ("d0", Estimator().fit(X_train1, y_train1)), + ("d1", Estimator().fit(X_train1, y_train1)), + ] + + # mock out fit and stack_method to be asserted later + for _, estimator in estimators: + estimator.fit = Mock(name="fit") + stack_func = getattr(estimator, stack_method) + predict_method_mocked = Mock(side_effect=stack_func) + # Mocking a method will not provide a `__name__` while Python methods + # do and we are using it in `_get_response_method`. + predict_method_mocked.__name__ = stack_method + setattr(estimator, stack_method, predict_method_mocked) + + stacker = Stacker( + estimators=estimators, cv="prefit", final_estimator=final_estimator + ) + stacker.fit(X_train2, y_train2) + + assert stacker.estimators_ == [estimator for _, estimator in estimators] + # fit was not called again + assert all(estimator.fit.call_count == 0 for estimator in stacker.estimators_) + + # stack method is called with the proper inputs + for estimator in stacker.estimators_: + stack_func_mock = getattr(estimator, stack_method) + stack_func_mock.assert_called_with(X_train2) + + +@pytest.mark.parametrize( + "stacker, X, y", + [ + ( + StackingClassifier( + estimators=[("lr", LogisticRegression()), ("svm", SVC())], + cv="prefit", + ), + X_iris, + y_iris, + ), + ( + StackingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR()), + ], + cv="prefit", + ), + X_diabetes, + y_diabetes, + ), + ], +) +def test_stacking_prefit_error(stacker, X, y): + # check that NotFittedError is raised + # if base estimators are not fitted when cv="prefit" + with pytest.raises(NotFittedError): + stacker.fit(X, y) + + +@pytest.mark.parametrize( + "make_dataset, Stacking, Estimator", + [ + (make_classification, StackingClassifier, LogisticRegression), + (make_regression, StackingRegressor, LinearRegression), + ], +) +def test_stacking_without_n_features_in(make_dataset, Stacking, Estimator): + # Stacking supports estimators without `n_features_in_`. Regression test + # for #17353 + + class MyEstimator(Estimator): + """Estimator without n_features_in_""" + + def fit(self, X, y): + super().fit(X, y) + del self.n_features_in_ + + X, y = make_dataset(random_state=0, n_samples=100) + stacker = Stacking(estimators=[("lr", MyEstimator())]) + + msg = f"{Stacking.__name__} object has no attribute n_features_in_" + with pytest.raises(AttributeError, match=msg): + stacker.n_features_in_ + + # Does not raise + stacker.fit(X, y) + + msg = "'MyEstimator' object has no attribute 'n_features_in_'" + with pytest.raises(AttributeError, match=msg): + stacker.n_features_in_ + + +@pytest.mark.parametrize( + "estimator", + [ + # output a 2D array of the probability of the positive class for each output + MLPClassifier(random_state=42), + # output a list of 2D array containing the probability of each class + # for each output + RandomForestClassifier(random_state=42), + ], + ids=["MLPClassifier", "RandomForestClassifier"], +) +def test_stacking_classifier_multilabel_predict_proba(estimator): + """Check the behaviour for the multilabel classification case and the + `predict_proba` stacking method. + + Estimators are not consistent with the output arrays and we need to ensure that + we handle all cases. + """ + X_train, X_test, y_train, y_test = train_test_split( + X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42 + ) + n_outputs = 3 + + estimators = [("est", estimator)] + stacker = StackingClassifier( + estimators=estimators, + final_estimator=KNeighborsClassifier(), + stack_method="predict_proba", + ).fit(X_train, y_train) + + X_trans = stacker.transform(X_test) + assert X_trans.shape == (X_test.shape[0], n_outputs) + # we should not have any collinear classes and thus nothing should sum to 1 + assert not any(np.isclose(X_trans.sum(axis=1), 1.0)) + + y_pred = stacker.predict(X_test) + assert y_pred.shape == y_test.shape + + +def test_stacking_classifier_multilabel_decision_function(): + """Check the behaviour for the multilabel classification case and the + `decision_function` stacking method. Only `RidgeClassifier` supports this + case. + """ + X_train, X_test, y_train, y_test = train_test_split( + X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42 + ) + n_outputs = 3 + + estimators = [("est", RidgeClassifier())] + stacker = StackingClassifier( + estimators=estimators, + final_estimator=KNeighborsClassifier(), + stack_method="decision_function", + ).fit(X_train, y_train) + + X_trans = stacker.transform(X_test) + assert X_trans.shape == (X_test.shape[0], n_outputs) + + y_pred = stacker.predict(X_test) + assert y_pred.shape == y_test.shape + + +@pytest.mark.parametrize("stack_method", ["auto", "predict"]) +@pytest.mark.parametrize("passthrough", [False, True]) +def test_stacking_classifier_multilabel_auto_predict(stack_method, passthrough): + """Check the behaviour for the multilabel classification case for stack methods + supported for all estimators or automatically picked up. + """ + X_train, X_test, y_train, y_test = train_test_split( + X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42 + ) + y_train_before_fit = y_train.copy() + n_outputs = 3 + + estimators = [ + ("mlp", MLPClassifier(random_state=42)), + ("rf", RandomForestClassifier(random_state=42)), + ("ridge", RidgeClassifier()), + ] + final_estimator = KNeighborsClassifier() + + clf = StackingClassifier( + estimators=estimators, + final_estimator=final_estimator, + passthrough=passthrough, + stack_method=stack_method, + ).fit(X_train, y_train) + + # make sure we don't change `y_train` inplace + assert_array_equal(y_train_before_fit, y_train) + + y_pred = clf.predict(X_test) + assert y_pred.shape == y_test.shape + + if stack_method == "auto": + expected_stack_methods = ["predict_proba", "predict_proba", "decision_function"] + else: + expected_stack_methods = ["predict"] * len(estimators) + assert clf.stack_method_ == expected_stack_methods + + n_features_X_trans = n_outputs * len(estimators) + if passthrough: + n_features_X_trans += X_train.shape[1] + X_trans = clf.transform(X_test) + assert X_trans.shape == (X_test.shape[0], n_features_X_trans) + + assert_array_equal(clf.classes_, [np.array([0, 1])] * n_outputs) + + +@pytest.mark.parametrize( + "stacker, feature_names, X, y, expected_names", + [ + ( + StackingClassifier( + estimators=[ + ("lr", LogisticRegression(random_state=0)), + ("svm", LinearSVC(random_state=0)), + ] + ), + iris.feature_names, + X_iris, + y_iris, + [ + "stackingclassifier_lr0", + "stackingclassifier_lr1", + "stackingclassifier_lr2", + "stackingclassifier_svm0", + "stackingclassifier_svm1", + "stackingclassifier_svm2", + ], + ), + ( + StackingClassifier( + estimators=[ + ("lr", LogisticRegression(random_state=0)), + ("other", "drop"), + ("svm", LinearSVC(random_state=0)), + ] + ), + iris.feature_names, + X_iris[:100], + y_iris[:100], # keep only classes 0 and 1 + [ + "stackingclassifier_lr", + "stackingclassifier_svm", + ], + ), + ( + StackingRegressor( + estimators=[ + ("lr", LinearRegression()), + ("svm", LinearSVR(random_state=0)), + ] + ), + diabetes.feature_names, + X_diabetes, + y_diabetes, + [ + "stackingregressor_lr", + "stackingregressor_svm", + ], + ), + ], + ids=[ + "StackingClassifier_multiclass", + "StackingClassifier_binary", + "StackingRegressor", + ], +) +@pytest.mark.parametrize("passthrough", [True, False]) +def test_get_feature_names_out( + stacker, feature_names, X, y, expected_names, passthrough +): + """Check get_feature_names_out works for stacking.""" + + stacker.set_params(passthrough=passthrough) + stacker.fit(scale(X), y) + + if passthrough: + expected_names = np.concatenate((expected_names, feature_names)) + + names_out = stacker.get_feature_names_out(feature_names) + assert_array_equal(names_out, expected_names) + + +def test_stacking_classifier_base_regressor(): + """Check that a regressor can be used as the first layer in `StackingClassifier`.""" + X_train, X_test, y_train, y_test = train_test_split( + scale(X_iris), y_iris, stratify=y_iris, random_state=42 + ) + clf = StackingClassifier(estimators=[("ridge", Ridge())]) + clf.fit(X_train, y_train) + clf.predict(X_test) + clf.predict_proba(X_test) + assert clf.score(X_test, y_test) > 0.8 + + +def test_stacking_final_estimator_attribute_error(): + """Check that we raise the proper AttributeError when the final estimator + does not implement the `decision_function` method, which is decorated with + `available_if`. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/28108 + """ + X, y = make_classification(random_state=42) + + estimators = [ + ("lr", LogisticRegression()), + ("rf", RandomForestClassifier(n_estimators=2, random_state=42)), + ] + # RandomForestClassifier does not implement 'decision_function' and should raise + # an AttributeError + final_estimator = RandomForestClassifier(n_estimators=2, random_state=42) + clf = StackingClassifier( + estimators=estimators, final_estimator=final_estimator, cv=3 + ) + + outer_msg = "This 'StackingClassifier' has no attribute 'decision_function'" + inner_msg = "'RandomForestClassifier' object has no attribute 'decision_function'" + with pytest.raises(AttributeError, match=outer_msg) as exec_info: + clf.fit(X, y).decision_function(X) + assert isinstance(exec_info.value.__cause__, AttributeError) + assert inner_msg in str(exec_info.value.__cause__) + + +# Metadata Routing Tests +# ====================== + + +@pytest.mark.parametrize( + "Estimator, Child", + [ + (StackingClassifier, ConsumingClassifier), + (StackingRegressor, ConsumingRegressor), + ], +) +def test_routing_passed_metadata_not_supported(Estimator, Child): + """Test that the right error message is raised when metadata is passed while + not supported when `enable_metadata_routing=False`.""" + + with pytest.raises( + ValueError, match="is only supported if enable_metadata_routing=True" + ): + Estimator(["clf", Child()]).fit( + X_iris, y_iris, sample_weight=[1, 1, 1, 1, 1], metadata="a" + ) + + +@pytest.mark.parametrize( + "Estimator, Child", + [ + (StackingClassifier, ConsumingClassifier), + (StackingRegressor, ConsumingRegressor), + ], +) +@config_context(enable_metadata_routing=True) +def test_get_metadata_routing_without_fit(Estimator, Child): + # Test that metadata_routing() doesn't raise when called before fit. + est = Estimator([("sub_est", Child())]) + est.get_metadata_routing() + + +@pytest.mark.parametrize( + "Estimator, Child", + [ + (StackingClassifier, ConsumingClassifier), + (StackingRegressor, ConsumingRegressor), + ], +) +@pytest.mark.parametrize( + "prop, prop_value", [("sample_weight", np.ones(X_iris.shape[0])), ("metadata", "a")] +) +@config_context(enable_metadata_routing=True) +def test_metadata_routing_for_stacking_estimators(Estimator, Child, prop, prop_value): + """Test that metadata is routed correctly for Stacking*.""" + + est = Estimator( + [ + ( + "sub_est1", + Child(registry=_Registry()).set_fit_request(**{prop: True}), + ), + ( + "sub_est2", + Child(registry=_Registry()).set_fit_request(**{prop: True}), + ), + ], + final_estimator=Child(registry=_Registry()).set_predict_request(**{prop: True}), + ) + + est.fit(X_iris, y_iris, **{prop: prop_value}) + est.fit_transform(X_iris, y_iris, **{prop: prop_value}) + + est.predict(X_iris, **{prop: prop_value}) + + for estimator in est.estimators: + # access sub-estimator in (name, est) with estimator[1]: + registry = estimator[1].registry + assert len(registry) + for sub_est in registry: + check_recorded_metadata( + obj=sub_est, + method="fit", + parent="fit", + split_params=(prop), + **{prop: prop_value}, + ) + # access final_estimator: + registry = est.final_estimator_.registry + assert len(registry) + check_recorded_metadata( + obj=registry[-1], + method="predict", + parent="predict", + split_params=(prop), + **{prop: prop_value}, + ) + + +@pytest.mark.parametrize( + "Estimator, Child", + [ + (StackingClassifier, ConsumingClassifier), + (StackingRegressor, ConsumingRegressor), + ], +) +@config_context(enable_metadata_routing=True) +def test_metadata_routing_error_for_stacking_estimators(Estimator, Child): + """Test that the right error is raised when metadata is not requested.""" + sample_weight, metadata = np.ones(X_iris.shape[0]), "a" + + est = Estimator([("sub_est", Child())]) + + error_message = ( + "[sample_weight, metadata] are passed but are not explicitly set as requested" + f" or not requested for {Child.__name__}.fit" + ) + + with pytest.raises(ValueError, match=re.escape(error_message)): + est.fit(X_iris, y_iris, sample_weight=sample_weight, metadata=metadata) + + +# End of Metadata Routing Tests +# ============================= diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/test_weight_boosting.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/test_weight_boosting.py new file mode 100644 index 0000000000000000000000000000000000000000..55825c438d76b29b74d8108970f72e3ebaa5e745 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/test_weight_boosting.py @@ -0,0 +1,639 @@ +"""Testing for the boost module (sklearn.ensemble.boost).""" + +import re + +import numpy as np +import pytest + +from sklearn import datasets +from sklearn.base import BaseEstimator, clone +from sklearn.dummy import DummyClassifier, DummyRegressor +from sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor +from sklearn.ensemble._weight_boosting import _samme_proba +from sklearn.linear_model import LinearRegression +from sklearn.model_selection import GridSearchCV, train_test_split +from sklearn.svm import SVC, SVR +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor +from sklearn.utils import shuffle +from sklearn.utils._mocking import NoSampleWeightWrapper +from sklearn.utils._testing import ( + assert_allclose, + assert_array_almost_equal, + assert_array_equal, +) +from sklearn.utils.fixes import ( + COO_CONTAINERS, + CSC_CONTAINERS, + CSR_CONTAINERS, + DOK_CONTAINERS, + LIL_CONTAINERS, +) + +# Common random state +rng = np.random.RandomState(0) + +# Toy sample +X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] +y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels +y_regr = [-1, -1, -1, 1, 1, 1] +T = [[-1, -1], [2, 2], [3, 2]] +y_t_class = ["foo", 1, 1] +y_t_regr = [-1, 1, 1] + +# Load the iris dataset and randomly permute it +iris = datasets.load_iris() +perm = rng.permutation(iris.target.size) +iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng) + +# Load the diabetes dataset and randomly permute it +diabetes = datasets.load_diabetes() +diabetes.data, diabetes.target = shuffle( + diabetes.data, diabetes.target, random_state=rng +) + + +def test_samme_proba(): + # Test the `_samme_proba` helper function. + + # Define some example (bad) `predict_proba` output. + probs = np.array( + [[1, 1e-6, 0], [0.19, 0.6, 0.2], [-999, 0.51, 0.5], [1e-6, 1, 1e-9]] + ) + probs /= np.abs(probs.sum(axis=1))[:, np.newaxis] + + # _samme_proba calls estimator.predict_proba. + # Make a mock object so I can control what gets returned. + class MockEstimator: + def predict_proba(self, X): + assert_array_equal(X.shape, probs.shape) + return probs + + mock = MockEstimator() + + samme_proba = _samme_proba(mock, 3, np.ones_like(probs)) + + assert_array_equal(samme_proba.shape, probs.shape) + assert np.isfinite(samme_proba).all() + + # Make sure that the correct elements come out as smallest -- + # `_samme_proba` should preserve the ordering in each example. + assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2]) + assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1]) + + +def test_oneclass_adaboost_proba(): + # Test predict_proba robustness for one class label input. + # In response to issue #7501 + # https://github.com/scikit-learn/scikit-learn/issues/7501 + y_t = np.ones(len(X)) + clf = AdaBoostClassifier().fit(X, y_t) + assert_array_almost_equal(clf.predict_proba(X), np.ones((len(X), 1))) + + +def test_classification_toy(): + # Check classification on a toy dataset. + clf = AdaBoostClassifier(random_state=0) + clf.fit(X, y_class) + assert_array_equal(clf.predict(T), y_t_class) + assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_) + assert clf.predict_proba(T).shape == (len(T), 2) + assert clf.decision_function(T).shape == (len(T),) + + +def test_regression_toy(): + # Check classification on a toy dataset. + clf = AdaBoostRegressor(random_state=0) + clf.fit(X, y_regr) + assert_array_equal(clf.predict(T), y_t_regr) + + +def test_iris(): + # Check consistency on dataset iris. + classes = np.unique(iris.target) + + clf = AdaBoostClassifier() + clf.fit(iris.data, iris.target) + + assert_array_equal(classes, clf.classes_) + proba = clf.predict_proba(iris.data) + + assert proba.shape[1] == len(classes) + assert clf.decision_function(iris.data).shape[1] == len(classes) + + score = clf.score(iris.data, iris.target) + assert score > 0.9, f"Failed with {score = }" + + # Check we used multiple estimators + assert len(clf.estimators_) > 1 + # Check for distinct random states (see issue #7408) + assert len(set(est.random_state for est in clf.estimators_)) == len(clf.estimators_) + + +@pytest.mark.parametrize("loss", ["linear", "square", "exponential"]) +def test_diabetes(loss): + # Check consistency on dataset diabetes. + reg = AdaBoostRegressor(loss=loss, random_state=0) + reg.fit(diabetes.data, diabetes.target) + score = reg.score(diabetes.data, diabetes.target) + assert score > 0.55 + + # Check we used multiple estimators + assert len(reg.estimators_) > 1 + # Check for distinct random states (see issue #7408) + assert len(set(est.random_state for est in reg.estimators_)) == len(reg.estimators_) + + +def test_staged_predict(): + # Check staged predictions. + rng = np.random.RandomState(0) + iris_weights = rng.randint(10, size=iris.target.shape) + diabetes_weights = rng.randint(10, size=diabetes.target.shape) + + clf = AdaBoostClassifier(n_estimators=10) + clf.fit(iris.data, iris.target, sample_weight=iris_weights) + + predictions = clf.predict(iris.data) + staged_predictions = [p for p in clf.staged_predict(iris.data)] + proba = clf.predict_proba(iris.data) + staged_probas = [p for p in clf.staged_predict_proba(iris.data)] + score = clf.score(iris.data, iris.target, sample_weight=iris_weights) + staged_scores = [ + s for s in clf.staged_score(iris.data, iris.target, sample_weight=iris_weights) + ] + + assert len(staged_predictions) == 10 + assert_array_almost_equal(predictions, staged_predictions[-1]) + assert len(staged_probas) == 10 + assert_array_almost_equal(proba, staged_probas[-1]) + assert len(staged_scores) == 10 + assert_array_almost_equal(score, staged_scores[-1]) + + # AdaBoost regression + clf = AdaBoostRegressor(n_estimators=10, random_state=0) + clf.fit(diabetes.data, diabetes.target, sample_weight=diabetes_weights) + + predictions = clf.predict(diabetes.data) + staged_predictions = [p for p in clf.staged_predict(diabetes.data)] + score = clf.score(diabetes.data, diabetes.target, sample_weight=diabetes_weights) + staged_scores = [ + s + for s in clf.staged_score( + diabetes.data, diabetes.target, sample_weight=diabetes_weights + ) + ] + + assert len(staged_predictions) == 10 + assert_array_almost_equal(predictions, staged_predictions[-1]) + assert len(staged_scores) == 10 + assert_array_almost_equal(score, staged_scores[-1]) + + +def test_gridsearch(): + # Check that base trees can be grid-searched. + # AdaBoost classification + boost = AdaBoostClassifier(estimator=DecisionTreeClassifier()) + parameters = { + "n_estimators": (1, 2), + "estimator__max_depth": (1, 2), + } + clf = GridSearchCV(boost, parameters) + clf.fit(iris.data, iris.target) + + # AdaBoost regression + boost = AdaBoostRegressor(estimator=DecisionTreeRegressor(), random_state=0) + parameters = {"n_estimators": (1, 2), "estimator__max_depth": (1, 2)} + clf = GridSearchCV(boost, parameters) + clf.fit(diabetes.data, diabetes.target) + + +def test_pickle(): + # Check pickability. + import pickle + + # Adaboost classifier + obj = AdaBoostClassifier() + obj.fit(iris.data, iris.target) + score = obj.score(iris.data, iris.target) + s = pickle.dumps(obj) + + obj2 = pickle.loads(s) + assert type(obj2) == obj.__class__ + score2 = obj2.score(iris.data, iris.target) + assert score == score2 + + # Adaboost regressor + obj = AdaBoostRegressor(random_state=0) + obj.fit(diabetes.data, diabetes.target) + score = obj.score(diabetes.data, diabetes.target) + s = pickle.dumps(obj) + + obj2 = pickle.loads(s) + assert type(obj2) == obj.__class__ + score2 = obj2.score(diabetes.data, diabetes.target) + assert score == score2 + + +def test_importances(): + # Check variable importances. + X, y = datasets.make_classification( + n_samples=2000, + n_features=10, + n_informative=3, + n_redundant=0, + n_repeated=0, + shuffle=False, + random_state=1, + ) + + clf = AdaBoostClassifier() + + clf.fit(X, y) + importances = clf.feature_importances_ + + assert importances.shape[0] == 10 + assert (importances[:3, np.newaxis] >= importances[3:]).all() + + +def test_adaboost_classifier_sample_weight_error(): + # Test that it gives proper exception on incorrect sample weight. + clf = AdaBoostClassifier() + msg = re.escape("sample_weight.shape == (1,), expected (6,)") + with pytest.raises(ValueError, match=msg): + clf.fit(X, y_class, sample_weight=np.asarray([-1])) + + +def test_estimator(): + # Test different estimators. + from sklearn.ensemble import RandomForestClassifier + + # XXX doesn't work with y_class because RF doesn't support classes_ + # Shouldn't AdaBoost run a LabelBinarizer? + clf = AdaBoostClassifier(RandomForestClassifier()) + clf.fit(X, y_regr) + + clf = AdaBoostClassifier(SVC()) + clf.fit(X, y_class) + + from sklearn.ensemble import RandomForestRegressor + + clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0) + clf.fit(X, y_regr) + + clf = AdaBoostRegressor(SVR(), random_state=0) + clf.fit(X, y_regr) + + # Check that an empty discrete ensemble fails in fit, not predict. + X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]] + y_fail = ["foo", "bar", 1, 2] + clf = AdaBoostClassifier(SVC()) + with pytest.raises(ValueError, match="worse than random"): + clf.fit(X_fail, y_fail) + + +def test_sample_weights_infinite(): + msg = "Sample weights have reached infinite values" + clf = AdaBoostClassifier(n_estimators=30, learning_rate=23.0) + with pytest.warns(UserWarning, match=msg): + clf.fit(iris.data, iris.target) + + +@pytest.mark.parametrize( + "sparse_container, expected_internal_type", + zip( + [ + *CSC_CONTAINERS, + *CSR_CONTAINERS, + *LIL_CONTAINERS, + *COO_CONTAINERS, + *DOK_CONTAINERS, + ], + CSC_CONTAINERS + 4 * CSR_CONTAINERS, + ), +) +def test_sparse_classification(sparse_container, expected_internal_type): + # Check classification with sparse input. + + class CustomSVC(SVC): + """SVC variant that records the nature of the training set.""" + + def fit(self, X, y, sample_weight=None): + """Modification on fit caries data type for later verification.""" + super().fit(X, y, sample_weight=sample_weight) + self.data_type_ = type(X) + return self + + X, y = datasets.make_multilabel_classification( + n_classes=1, n_samples=15, n_features=5, random_state=42 + ) + # Flatten y to a 1d array + y = np.ravel(y) + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + X_train_sparse = sparse_container(X_train) + X_test_sparse = sparse_container(X_test) + + # Trained on sparse format + sparse_classifier = AdaBoostClassifier( + estimator=CustomSVC(probability=True), + random_state=1, + ).fit(X_train_sparse, y_train) + + # Trained on dense format + dense_classifier = AdaBoostClassifier( + estimator=CustomSVC(probability=True), + random_state=1, + ).fit(X_train, y_train) + + # predict + sparse_clf_results = sparse_classifier.predict(X_test_sparse) + dense_clf_results = dense_classifier.predict(X_test) + assert_array_equal(sparse_clf_results, dense_clf_results) + + # decision_function + sparse_clf_results = sparse_classifier.decision_function(X_test_sparse) + dense_clf_results = dense_classifier.decision_function(X_test) + assert_array_almost_equal(sparse_clf_results, dense_clf_results) + + # predict_log_proba + sparse_clf_results = sparse_classifier.predict_log_proba(X_test_sparse) + dense_clf_results = dense_classifier.predict_log_proba(X_test) + assert_array_almost_equal(sparse_clf_results, dense_clf_results) + + # predict_proba + sparse_clf_results = sparse_classifier.predict_proba(X_test_sparse) + dense_clf_results = dense_classifier.predict_proba(X_test) + assert_array_almost_equal(sparse_clf_results, dense_clf_results) + + # score + sparse_clf_results = sparse_classifier.score(X_test_sparse, y_test) + dense_clf_results = dense_classifier.score(X_test, y_test) + assert_array_almost_equal(sparse_clf_results, dense_clf_results) + + # staged_decision_function + sparse_clf_results = sparse_classifier.staged_decision_function(X_test_sparse) + dense_clf_results = dense_classifier.staged_decision_function(X_test) + for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results): + assert_array_almost_equal(sparse_clf_res, dense_clf_res) + + # staged_predict + sparse_clf_results = sparse_classifier.staged_predict(X_test_sparse) + dense_clf_results = dense_classifier.staged_predict(X_test) + for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results): + assert_array_equal(sparse_clf_res, dense_clf_res) + + # staged_predict_proba + sparse_clf_results = sparse_classifier.staged_predict_proba(X_test_sparse) + dense_clf_results = dense_classifier.staged_predict_proba(X_test) + for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results): + assert_array_almost_equal(sparse_clf_res, dense_clf_res) + + # staged_score + sparse_clf_results = sparse_classifier.staged_score(X_test_sparse, y_test) + dense_clf_results = dense_classifier.staged_score(X_test, y_test) + for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results): + assert_array_equal(sparse_clf_res, dense_clf_res) + + # Verify sparsity of data is maintained during training + types = [i.data_type_ for i in sparse_classifier.estimators_] + + assert all([t == expected_internal_type for t in types]) + + +@pytest.mark.parametrize( + "sparse_container, expected_internal_type", + zip( + [ + *CSC_CONTAINERS, + *CSR_CONTAINERS, + *LIL_CONTAINERS, + *COO_CONTAINERS, + *DOK_CONTAINERS, + ], + CSC_CONTAINERS + 4 * CSR_CONTAINERS, + ), +) +def test_sparse_regression(sparse_container, expected_internal_type): + # Check regression with sparse input. + + class CustomSVR(SVR): + """SVR variant that records the nature of the training set.""" + + def fit(self, X, y, sample_weight=None): + """Modification on fit caries data type for later verification.""" + super().fit(X, y, sample_weight=sample_weight) + self.data_type_ = type(X) + return self + + X, y = datasets.make_regression( + n_samples=15, n_features=50, n_targets=1, random_state=42 + ) + + X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) + + X_train_sparse = sparse_container(X_train) + X_test_sparse = sparse_container(X_test) + + # Trained on sparse format + sparse_regressor = AdaBoostRegressor(estimator=CustomSVR(), random_state=1).fit( + X_train_sparse, y_train + ) + + # Trained on dense format + dense_regressor = AdaBoostRegressor(estimator=CustomSVR(), random_state=1).fit( + X_train, y_train + ) + + # predict + sparse_regr_results = sparse_regressor.predict(X_test_sparse) + dense_regr_results = dense_regressor.predict(X_test) + assert_array_almost_equal(sparse_regr_results, dense_regr_results) + + # staged_predict + sparse_regr_results = sparse_regressor.staged_predict(X_test_sparse) + dense_regr_results = dense_regressor.staged_predict(X_test) + for sparse_regr_res, dense_regr_res in zip(sparse_regr_results, dense_regr_results): + assert_array_almost_equal(sparse_regr_res, dense_regr_res) + + types = [i.data_type_ for i in sparse_regressor.estimators_] + + assert all([t == expected_internal_type for t in types]) + + +def test_sample_weight_adaboost_regressor(): + """ + AdaBoostRegressor should work without sample_weights in the base estimator + The random weighted sampling is done internally in the _boost method in + AdaBoostRegressor. + """ + + class DummyEstimator(BaseEstimator): + def fit(self, X, y): + pass + + def predict(self, X): + return np.zeros(X.shape[0]) + + boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3) + boost.fit(X, y_regr) + assert len(boost.estimator_weights_) == len(boost.estimator_errors_) + + +def test_multidimensional_X(): + """ + Check that the AdaBoost estimators can work with n-dimensional + data matrix + """ + rng = np.random.RandomState(0) + + X = rng.randn(51, 3, 3) + yc = rng.choice([0, 1], 51) + yr = rng.randn(51) + + boost = AdaBoostClassifier(DummyClassifier(strategy="most_frequent")) + boost.fit(X, yc) + boost.predict(X) + boost.predict_proba(X) + + boost = AdaBoostRegressor(DummyRegressor()) + boost.fit(X, yr) + boost.predict(X) + + +def test_adaboostclassifier_without_sample_weight(): + X, y = iris.data, iris.target + estimator = NoSampleWeightWrapper(DummyClassifier()) + clf = AdaBoostClassifier(estimator=estimator) + err_msg = "{} doesn't support sample_weight".format(estimator.__class__.__name__) + with pytest.raises(ValueError, match=err_msg): + clf.fit(X, y) + + +def test_adaboostregressor_sample_weight(): + # check that giving weight will have an influence on the error computed + # for a weak learner + rng = np.random.RandomState(42) + X = np.linspace(0, 100, num=1000) + y = (0.8 * X + 0.2) + (rng.rand(X.shape[0]) * 0.0001) + X = X.reshape(-1, 1) + + # add an arbitrary outlier + X[-1] *= 10 + y[-1] = 10000 + + # random_state=0 ensure that the underlying bootstrap will use the outlier + regr_no_outlier = AdaBoostRegressor( + estimator=LinearRegression(), n_estimators=1, random_state=0 + ) + regr_with_weight = clone(regr_no_outlier) + regr_with_outlier = clone(regr_no_outlier) + + # fit 3 models: + # - a model containing the outlier + # - a model without the outlier + # - a model containing the outlier but with a null sample-weight + regr_with_outlier.fit(X, y) + regr_no_outlier.fit(X[:-1], y[:-1]) + sample_weight = np.ones_like(y) + sample_weight[-1] = 0 + regr_with_weight.fit(X, y, sample_weight=sample_weight) + + score_with_outlier = regr_with_outlier.score(X[:-1], y[:-1]) + score_no_outlier = regr_no_outlier.score(X[:-1], y[:-1]) + score_with_weight = regr_with_weight.score(X[:-1], y[:-1]) + + assert score_with_outlier < score_no_outlier + assert score_with_outlier < score_with_weight + assert score_no_outlier == pytest.approx(score_with_weight) + + +def test_adaboost_consistent_predict(): + # check that predict_proba and predict give consistent results + # regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/14084 + X_train, X_test, y_train, y_test = train_test_split( + *datasets.load_digits(return_X_y=True), random_state=42 + ) + model = AdaBoostClassifier(random_state=42) + model.fit(X_train, y_train) + + assert_array_equal( + np.argmax(model.predict_proba(X_test), axis=1), model.predict(X_test) + ) + + +@pytest.mark.parametrize( + "model, X, y", + [ + (AdaBoostClassifier(), iris.data, iris.target), + (AdaBoostRegressor(), diabetes.data, diabetes.target), + ], +) +def test_adaboost_negative_weight_error(model, X, y): + sample_weight = np.ones_like(y) + sample_weight[-1] = -10 + + err_msg = "Negative values in data passed to `sample_weight`" + with pytest.raises(ValueError, match=err_msg): + model.fit(X, y, sample_weight=sample_weight) + + +def test_adaboost_numerically_stable_feature_importance_with_small_weights(): + """Check that we don't create NaN feature importance with numerically + instable inputs. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/20320 + """ + rng = np.random.RandomState(42) + X = rng.normal(size=(1000, 10)) + y = rng.choice([0, 1], size=1000) + sample_weight = np.ones_like(y) * 1e-263 + tree = DecisionTreeClassifier(max_depth=10, random_state=12) + ada_model = AdaBoostClassifier(estimator=tree, n_estimators=20, random_state=12) + ada_model.fit(X, y, sample_weight=sample_weight) + assert np.isnan(ada_model.feature_importances_).sum() == 0 + + +def test_adaboost_decision_function(global_random_seed): + """Check that the decision function respects the symmetric constraint for weak + learners. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/issues/26520 + """ + n_classes = 3 + X, y = datasets.make_classification( + n_classes=n_classes, n_clusters_per_class=1, random_state=global_random_seed + ) + clf = AdaBoostClassifier(n_estimators=1, random_state=global_random_seed).fit(X, y) + + y_score = clf.decision_function(X) + assert_allclose(y_score.sum(axis=1), 0, atol=1e-8) + + # With a single learner, we expect to have a decision function in + # {1, - 1 / (n_classes - 1)}. + assert set(np.unique(y_score)) == {1, -1 / (n_classes - 1)} + + # We can assert the same for staged_decision_function since we have a single learner + for y_score in clf.staged_decision_function(X): + assert_allclose(y_score.sum(axis=1), 0, atol=1e-8) + + # With a single learner, we expect to have a decision function in + # {1, - 1 / (n_classes - 1)}. + assert set(np.unique(y_score)) == {1, -1 / (n_classes - 1)} + + clf.set_params(n_estimators=5).fit(X, y) + + y_score = clf.decision_function(X) + assert_allclose(y_score.sum(axis=1), 0, atol=1e-8) + + for y_score in clf.staged_decision_function(X): + assert_allclose(y_score.sum(axis=1), 0, atol=1e-8) + + +# TODO(1.8): remove +def test_deprecated_algorithm(): + adaboost_clf = AdaBoostClassifier(n_estimators=1, algorithm="SAMME") + with pytest.warns(FutureWarning, match="The parameter 'algorithm' is deprecated"): + adaboost_clf.fit(X, y_class) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8bb2b5dc575e9916cd6d713f0054c257b8bbf32a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__init__.py @@ -0,0 +1,16 @@ +"""Tools for model inspection.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from ._partial_dependence import partial_dependence +from ._permutation_importance import permutation_importance +from ._plot.decision_boundary import DecisionBoundaryDisplay +from ._plot.partial_dependence import PartialDependenceDisplay + +__all__ = [ + "partial_dependence", + "permutation_importance", + "PartialDependenceDisplay", + "DecisionBoundaryDisplay", +] diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cc80e6206458b9b0a51b220073203e34efc07f9 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_partial_dependence.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_partial_dependence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed3eb92ae6171528e2f668f09bd6343635ca0f3f Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_partial_dependence.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_pd_utils.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_pd_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f45287fc3d8b15c88bbf2a12e6e21d265d13f0be Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_pd_utils.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_permutation_importance.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_permutation_importance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d82558e0c5887ad2b45dc4cd259dc7f42396c5fd Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_permutation_importance.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_partial_dependence.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_partial_dependence.py new file mode 100644 index 0000000000000000000000000000000000000000..b5b893c036c62986683edda2baa4e2b949fa25ba --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_partial_dependence.py @@ -0,0 +1,695 @@ +"""Partial dependence plots for regression and classification models.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from collections.abc import Iterable + +import numpy as np +from scipy import sparse +from scipy.stats.mstats import mquantiles + +from ..base import is_classifier, is_regressor +from ..ensemble import RandomForestRegressor +from ..ensemble._gb import BaseGradientBoosting +from ..ensemble._hist_gradient_boosting.gradient_boosting import ( + BaseHistGradientBoosting, +) +from ..tree import DecisionTreeRegressor +from ..utils import Bunch, _safe_indexing, check_array +from ..utils._indexing import _determine_key_type, _get_column_indices, _safe_assign +from ..utils._optional_dependencies import check_matplotlib_support # noqa +from ..utils._param_validation import ( + HasMethods, + Integral, + Interval, + StrOptions, + validate_params, +) +from ..utils._response import _get_response_values +from ..utils.extmath import cartesian +from ..utils.validation import _check_sample_weight, check_is_fitted +from ._pd_utils import _check_feature_names, _get_feature_index + +__all__ = [ + "partial_dependence", +] + + +def _grid_from_X(X, percentiles, is_categorical, grid_resolution): + """Generate a grid of points based on the percentiles of X. + + The grid is a cartesian product between the columns of ``values``. The + ith column of ``values`` consists in ``grid_resolution`` equally-spaced + points between the percentiles of the jth column of X. + + If ``grid_resolution`` is bigger than the number of unique values in the + j-th column of X or if the feature is a categorical feature (by inspecting + `is_categorical`) , then those unique values will be used instead. + + Parameters + ---------- + X : array-like of shape (n_samples, n_target_features) + The data. + + percentiles : tuple of float + The percentiles which are used to construct the extreme values of + the grid. Must be in [0, 1]. + + is_categorical : list of bool + For each feature, tells whether it is categorical or not. If a feature + is categorical, then the values used will be the unique ones + (i.e. categories) instead of the percentiles. + + grid_resolution : int + The number of equally spaced points to be placed on the grid for each + feature. + + Returns + ------- + grid : ndarray of shape (n_points, n_target_features) + A value for each feature at each point in the grid. ``n_points`` is + always ``<= grid_resolution ** X.shape[1]``. + + values : list of 1d ndarrays + The values with which the grid has been created. The size of each + array ``values[j]`` is either ``grid_resolution``, or the number of + unique values in ``X[:, j]``, whichever is smaller. + """ + if not isinstance(percentiles, Iterable) or len(percentiles) != 2: + raise ValueError("'percentiles' must be a sequence of 2 elements.") + if not all(0 <= x <= 1 for x in percentiles): + raise ValueError("'percentiles' values must be in [0, 1].") + if percentiles[0] >= percentiles[1]: + raise ValueError("percentiles[0] must be strictly less than percentiles[1].") + + if grid_resolution <= 1: + raise ValueError("'grid_resolution' must be strictly greater than 1.") + + values = [] + # TODO: we should handle missing values (i.e. `np.nan`) specifically and store them + # in a different Bunch attribute. + for feature, is_cat in enumerate(is_categorical): + try: + uniques = np.unique(_safe_indexing(X, feature, axis=1)) + except TypeError as exc: + # `np.unique` will fail in the presence of `np.nan` and `str` categories + # due to sorting. Temporary, we reraise an error explaining the problem. + raise ValueError( + f"The column #{feature} contains mixed data types. Finding unique " + "categories fail due to sorting. It usually means that the column " + "contains `np.nan` values together with `str` categories. Such use " + "case is not yet supported in scikit-learn." + ) from exc + if is_cat or uniques.shape[0] < grid_resolution: + # Use the unique values either because: + # - feature has low resolution use unique values + # - feature is categorical + axis = uniques + else: + # create axis based on percentiles and grid resolution + emp_percentiles = mquantiles( + _safe_indexing(X, feature, axis=1), prob=percentiles, axis=0 + ) + if np.allclose(emp_percentiles[0], emp_percentiles[1]): + raise ValueError( + "percentiles are too close to each other, " + "unable to build the grid. Please choose percentiles " + "that are further apart." + ) + axis = np.linspace( + emp_percentiles[0], + emp_percentiles[1], + num=grid_resolution, + endpoint=True, + ) + values.append(axis) + + return cartesian(values), values + + +def _partial_dependence_recursion(est, grid, features): + """Calculate partial dependence via the recursion method. + + The recursion method is in particular enabled for tree-based estimators. + + For each `grid` value, a weighted tree traversal is performed: if a split node + involves an input feature of interest, the corresponding left or right branch + is followed; otherwise both branches are followed, each branch being weighted + by the fraction of training samples that entered that branch. Finally, the + partial dependence is given by a weighted average of all the visited leaves + values. + + This method is more efficient in terms of speed than the `'brute'` method + (:func:`~sklearn.inspection._partial_dependence._partial_dependence_brute`). + However, here, the partial dependence computation is done explicitly with the + `X` used during training of `est`. + + Parameters + ---------- + est : BaseEstimator + A fitted estimator object implementing :term:`predict` or + :term:`decision_function`. Multioutput-multiclass classifiers are not + supported. Note that `'recursion'` is only supported for some tree-based + estimators (namely + :class:`~sklearn.ensemble.GradientBoostingClassifier`, + :class:`~sklearn.ensemble.GradientBoostingRegressor`, + :class:`~sklearn.ensemble.HistGradientBoostingClassifier`, + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`, + :class:`~sklearn.tree.DecisionTreeRegressor`, + :class:`~sklearn.ensemble.RandomForestRegressor`, + ). + + grid : array-like of shape (n_points, n_target_features) + The grid of feature values for which the partial dependence is calculated. + Note that `n_points` is the number of points in the grid and `n_target_features` + is the number of features you are doing partial dependence at. + + features : array-like of {int, str} + The feature (e.g. `[0]`) or pair of interacting features + (e.g. `[(0, 1)]`) for which the partial dependency should be computed. + + Returns + ------- + averaged_predictions : array-like of shape (n_targets, n_points) + The averaged predictions for the given `grid` of features values. + Note that `n_targets` is the number of targets (e.g. 1 for binary + classification, `n_tasks` for multi-output regression, and `n_classes` for + multiclass classification) and `n_points` is the number of points in the `grid`. + """ + averaged_predictions = est._compute_partial_dependence_recursion(grid, features) + if averaged_predictions.ndim == 1: + # reshape to (1, n_points) for consistency with + # _partial_dependence_brute + averaged_predictions = averaged_predictions.reshape(1, -1) + + return averaged_predictions + + +def _partial_dependence_brute( + est, grid, features, X, response_method, sample_weight=None +): + """Calculate partial dependence via the brute force method. + + The brute method explicitly averages the predictions of an estimator over a + grid of feature values. + + For each `grid` value, all the samples from `X` have their variables of + interest replaced by that specific `grid` value. The predictions are then made + and averaged across the samples. + + This method is slower than the `'recursion'` + (:func:`~sklearn.inspection._partial_dependence._partial_dependence_recursion`) + version for estimators with this second option. However, with the `'brute'` + force method, the average will be done with the given `X` and not the `X` + used during training, as it is done in the `'recursion'` version. Therefore + the average can always accept `sample_weight` (even when the estimator was + fitted without). + + Parameters + ---------- + est : BaseEstimator + A fitted estimator object implementing :term:`predict`, + :term:`predict_proba`, or :term:`decision_function`. + Multioutput-multiclass classifiers are not supported. + + grid : array-like of shape (n_points, n_target_features) + The grid of feature values for which the partial dependence is calculated. + Note that `n_points` is the number of points in the grid and `n_target_features` + is the number of features you are doing partial dependence at. + + features : array-like of {int, str} + The feature (e.g. `[0]`) or pair of interacting features + (e.g. `[(0, 1)]`) for which the partial dependency should be computed. + + X : array-like of shape (n_samples, n_features) + `X` is used to generate values for the complement features. That is, for + each value in `grid`, the method will average the prediction of each + sample from `X` having that grid value for `features`. + + response_method : {'auto', 'predict_proba', 'decision_function'}, \ + default='auto' + Specifies whether to use :term:`predict_proba` or + :term:`decision_function` as the target response. For regressors + this parameter is ignored and the response is always the output of + :term:`predict`. By default, :term:`predict_proba` is tried first + and we revert to :term:`decision_function` if it doesn't exist. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights are used to calculate weighted means when averaging the + model output. If `None`, then samples are equally weighted. Note that + `sample_weight` does not change the individual predictions. + + Returns + ------- + averaged_predictions : array-like of shape (n_targets, n_points) + The averaged predictions for the given `grid` of features values. + Note that `n_targets` is the number of targets (e.g. 1 for binary + classification, `n_tasks` for multi-output regression, and `n_classes` for + multiclass classification) and `n_points` is the number of points in the `grid`. + + predictions : array-like + The predictions for the given `grid` of features values over the samples + from `X`. For non-multioutput regression and binary classification the + shape is `(n_instances, n_points)` and for multi-output regression and + multiclass classification the shape is `(n_targets, n_instances, n_points)`, + where `n_targets` is the number of targets (`n_tasks` for multi-output + regression, and `n_classes` for multiclass classification), `n_instances` + is the number of instances in `X`, and `n_points` is the number of points + in the `grid`. + """ + predictions = [] + averaged_predictions = [] + + if response_method == "auto": + response_method = ( + "predict" if is_regressor(est) else ["predict_proba", "decision_function"] + ) + + X_eval = X.copy() + for new_values in grid: + for i, variable in enumerate(features): + _safe_assign(X_eval, new_values[i], column_indexer=variable) + + # Note: predictions is of shape + # (n_points,) for non-multioutput regressors + # (n_points, n_tasks) for multioutput regressors + # (n_points, 1) for the regressors in cross_decomposition (I think) + # (n_points, 2) for binary classification + # (n_points, n_classes) for multiclass classification + pred, _ = _get_response_values(est, X_eval, response_method=response_method) + + predictions.append(pred) + # average over samples + averaged_predictions.append(np.average(pred, axis=0, weights=sample_weight)) + + n_samples = X.shape[0] + + # reshape to (n_targets, n_instances, n_points) where n_targets is: + # - 1 for non-multioutput regression and binary classification (shape is + # already correct in those cases) + # - n_tasks for multi-output regression + # - n_classes for multiclass classification. + predictions = np.array(predictions).T + if is_regressor(est) and predictions.ndim == 2: + # non-multioutput regression, shape is (n_instances, n_points,) + predictions = predictions.reshape(n_samples, -1) + elif is_classifier(est) and predictions.shape[0] == 2: + # Binary classification, shape is (2, n_instances, n_points). + # we output the effect of **positive** class + predictions = predictions[1] + predictions = predictions.reshape(n_samples, -1) + + # reshape averaged_predictions to (n_targets, n_points) where n_targets is: + # - 1 for non-multioutput regression and binary classification (shape is + # already correct in those cases) + # - n_tasks for multi-output regression + # - n_classes for multiclass classification. + averaged_predictions = np.array(averaged_predictions).T + if is_regressor(est) and averaged_predictions.ndim == 1: + # non-multioutput regression, shape is (n_points,) + averaged_predictions = averaged_predictions.reshape(1, -1) + elif is_classifier(est) and averaged_predictions.shape[0] == 2: + # Binary classification, shape is (2, n_points). + # we output the effect of **positive** class + averaged_predictions = averaged_predictions[1] + averaged_predictions = averaged_predictions.reshape(1, -1) + + return averaged_predictions, predictions + + +@validate_params( + { + "estimator": [ + HasMethods(["fit", "predict"]), + HasMethods(["fit", "predict_proba"]), + HasMethods(["fit", "decision_function"]), + ], + "X": ["array-like", "sparse matrix"], + "features": ["array-like", Integral, str], + "sample_weight": ["array-like", None], + "categorical_features": ["array-like", None], + "feature_names": ["array-like", None], + "response_method": [StrOptions({"auto", "predict_proba", "decision_function"})], + "percentiles": [tuple], + "grid_resolution": [Interval(Integral, 1, None, closed="left")], + "method": [StrOptions({"auto", "recursion", "brute"})], + "kind": [StrOptions({"average", "individual", "both"})], + }, + prefer_skip_nested_validation=True, +) +def partial_dependence( + estimator, + X, + features, + *, + sample_weight=None, + categorical_features=None, + feature_names=None, + response_method="auto", + percentiles=(0.05, 0.95), + grid_resolution=100, + method="auto", + kind="average", +): + """Partial dependence of ``features``. + + Partial dependence of a feature (or a set of features) corresponds to + the average response of an estimator for each possible value of the + feature. + + Read more in the :ref:`User Guide `. + + .. warning:: + + For :class:`~sklearn.ensemble.GradientBoostingClassifier` and + :class:`~sklearn.ensemble.GradientBoostingRegressor`, the + `'recursion'` method (used by default) will not account for the `init` + predictor of the boosting process. In practice, this will produce + the same values as `'brute'` up to a constant offset in the target + response, provided that `init` is a constant estimator (which is the + default). However, if `init` is not a constant estimator, the + partial dependence values are incorrect for `'recursion'` because the + offset will be sample-dependent. It is preferable to use the `'brute'` + method. Note that this only applies to + :class:`~sklearn.ensemble.GradientBoostingClassifier` and + :class:`~sklearn.ensemble.GradientBoostingRegressor`, not to + :class:`~sklearn.ensemble.HistGradientBoostingClassifier` and + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`. + + Parameters + ---------- + estimator : BaseEstimator + A fitted estimator object implementing :term:`predict`, + :term:`predict_proba`, or :term:`decision_function`. + Multioutput-multiclass classifiers are not supported. + + X : {array-like, sparse matrix or dataframe} of shape (n_samples, n_features) + ``X`` is used to generate a grid of values for the target + ``features`` (where the partial dependence will be evaluated), and + also to generate values for the complement features when the + `method` is 'brute'. + + features : array-like of {int, str, bool} or int or str + The feature (e.g. `[0]`) or pair of interacting features + (e.g. `[(0, 1)]`) for which the partial dependency should be computed. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights are used to calculate weighted means when averaging the + model output. If `None`, then samples are equally weighted. If + `sample_weight` is not `None`, then `method` will be set to `'brute'`. + Note that `sample_weight` is ignored for `kind='individual'`. + + .. versionadded:: 1.3 + + categorical_features : array-like of shape (n_features,) or shape \ + (n_categorical_features,), dtype={bool, int, str}, default=None + Indicates the categorical features. + + - `None`: no feature will be considered categorical; + - boolean array-like: boolean mask of shape `(n_features,)` + indicating which features are categorical. Thus, this array has + the same shape has `X.shape[1]`; + - integer or string array-like: integer indices or strings + indicating categorical features. + + .. versionadded:: 1.2 + + feature_names : array-like of shape (n_features,), dtype=str, default=None + Name of each feature; `feature_names[i]` holds the name of the feature + with index `i`. + By default, the name of the feature corresponds to their numerical + index for NumPy array and their column name for pandas dataframe. + + .. versionadded:: 1.2 + + response_method : {'auto', 'predict_proba', 'decision_function'}, \ + default='auto' + Specifies whether to use :term:`predict_proba` or + :term:`decision_function` as the target response. For regressors + this parameter is ignored and the response is always the output of + :term:`predict`. By default, :term:`predict_proba` is tried first + and we revert to :term:`decision_function` if it doesn't exist. If + ``method`` is 'recursion', the response is always the output of + :term:`decision_function`. + + percentiles : tuple of float, default=(0.05, 0.95) + The lower and upper percentile used to create the extreme values + for the grid. Must be in [0, 1]. + + grid_resolution : int, default=100 + The number of equally spaced points on the grid, for each target + feature. + + method : {'auto', 'recursion', 'brute'}, default='auto' + The method used to calculate the averaged predictions: + + - `'recursion'` is only supported for some tree-based estimators + (namely + :class:`~sklearn.ensemble.GradientBoostingClassifier`, + :class:`~sklearn.ensemble.GradientBoostingRegressor`, + :class:`~sklearn.ensemble.HistGradientBoostingClassifier`, + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`, + :class:`~sklearn.tree.DecisionTreeRegressor`, + :class:`~sklearn.ensemble.RandomForestRegressor`, + ) when `kind='average'`. + This is more efficient in terms of speed. + With this method, the target response of a + classifier is always the decision function, not the predicted + probabilities. Since the `'recursion'` method implicitly computes + the average of the Individual Conditional Expectation (ICE) by + design, it is not compatible with ICE and thus `kind` must be + `'average'`. + + - `'brute'` is supported for any estimator, but is more + computationally intensive. + + - `'auto'`: the `'recursion'` is used for estimators that support it, + and `'brute'` is used otherwise. If `sample_weight` is not `None`, + then `'brute'` is used regardless of the estimator. + + Please see :ref:`this note ` for + differences between the `'brute'` and `'recursion'` method. + + kind : {'average', 'individual', 'both'}, default='average' + Whether to return the partial dependence averaged across all the + samples in the dataset or one value per sample or both. + See Returns below. + + Note that the fast `method='recursion'` option is only available for + `kind='average'` and `sample_weights=None`. Computing individual + dependencies and doing weighted averages requires using the slower + `method='brute'`. + + .. versionadded:: 0.24 + + Returns + ------- + predictions : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + + individual : ndarray of shape (n_outputs, n_instances, \ + len(values[0]), len(values[1]), ...) + The predictions for all the points in the grid for all + samples in X. This is also known as Individual + Conditional Expectation (ICE). + Only available when `kind='individual'` or `kind='both'`. + + average : ndarray of shape (n_outputs, len(values[0]), \ + len(values[1]), ...) + The predictions for all the points in the grid, averaged + over all samples in X (or over the training data if + `method` is 'recursion'). + Only available when `kind='average'` or `kind='both'`. + + grid_values : seq of 1d ndarrays + The values with which the grid has been created. The generated + grid is a cartesian product of the arrays in `grid_values` where + `len(grid_values) == len(features)`. The size of each array + `grid_values[j]` is either `grid_resolution`, or the number of + unique values in `X[:, j]`, whichever is smaller. + + .. versionadded:: 1.3 + + `n_outputs` corresponds to the number of classes in a multi-class + setting, or to the number of tasks for multi-output regression. + For classical regression and binary classification `n_outputs==1`. + `n_values_feature_j` corresponds to the size `grid_values[j]`. + + See Also + -------- + PartialDependenceDisplay.from_estimator : Plot Partial Dependence. + PartialDependenceDisplay : Partial Dependence visualization. + + Examples + -------- + >>> X = [[0, 0, 2], [1, 0, 0]] + >>> y = [0, 1] + >>> from sklearn.ensemble import GradientBoostingClassifier + >>> gb = GradientBoostingClassifier(random_state=0).fit(X, y) + >>> partial_dependence(gb, features=[0], X=X, percentiles=(0, 1), + ... grid_resolution=2) # doctest: +SKIP + (array([[-4.52..., 4.52...]]), [array([ 0., 1.])]) + """ + check_is_fitted(estimator) + + if not (is_classifier(estimator) or is_regressor(estimator)): + raise ValueError("'estimator' must be a fitted regressor or classifier.") + + if is_classifier(estimator) and isinstance(estimator.classes_[0], np.ndarray): + raise ValueError("Multiclass-multioutput estimators are not supported") + + # Use check_array only on lists and other non-array-likes / sparse. Do not + # convert DataFrame into a NumPy array. + if not (hasattr(X, "__array__") or sparse.issparse(X)): + X = check_array(X, ensure_all_finite="allow-nan", dtype=object) + + if is_regressor(estimator) and response_method != "auto": + raise ValueError( + "The response_method parameter is ignored for regressors and " + "must be 'auto'." + ) + + if kind != "average": + if method == "recursion": + raise ValueError( + "The 'recursion' method only applies when 'kind' is set to 'average'" + ) + method = "brute" + + if method == "recursion" and sample_weight is not None: + raise ValueError( + "The 'recursion' method can only be applied when sample_weight is None." + ) + + if method == "auto": + if sample_weight is not None: + method = "brute" + elif isinstance(estimator, BaseGradientBoosting) and estimator.init is None: + method = "recursion" + elif isinstance( + estimator, + (BaseHistGradientBoosting, DecisionTreeRegressor, RandomForestRegressor), + ): + method = "recursion" + else: + method = "brute" + + if method == "recursion": + if not isinstance( + estimator, + ( + BaseGradientBoosting, + BaseHistGradientBoosting, + DecisionTreeRegressor, + RandomForestRegressor, + ), + ): + supported_classes_recursion = ( + "GradientBoostingClassifier", + "GradientBoostingRegressor", + "HistGradientBoostingClassifier", + "HistGradientBoostingRegressor", + "HistGradientBoostingRegressor", + "DecisionTreeRegressor", + "RandomForestRegressor", + ) + raise ValueError( + "Only the following estimators support the 'recursion' " + "method: {}. Try using method='brute'.".format( + ", ".join(supported_classes_recursion) + ) + ) + if response_method == "auto": + response_method = "decision_function" + + if response_method != "decision_function": + raise ValueError( + "With the 'recursion' method, the response_method must be " + "'decision_function'. Got {}.".format(response_method) + ) + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + + if _determine_key_type(features, accept_slice=False) == "int": + # _get_column_indices() supports negative indexing. Here, we limit + # the indexing to be positive. The upper bound will be checked + # by _get_column_indices() + if np.any(np.less(features, 0)): + raise ValueError("all features must be in [0, {}]".format(X.shape[1] - 1)) + + features_indices = np.asarray( + _get_column_indices(X, features), dtype=np.intp, order="C" + ).ravel() + + feature_names = _check_feature_names(X, feature_names) + + n_features = X.shape[1] + if categorical_features is None: + is_categorical = [False] * len(features_indices) + else: + categorical_features = np.asarray(categorical_features) + if categorical_features.dtype.kind == "b": + # categorical features provided as a list of boolean + if categorical_features.size != n_features: + raise ValueError( + "When `categorical_features` is a boolean array-like, " + "the array should be of shape (n_features,). Got " + f"{categorical_features.size} elements while `X` contains " + f"{n_features} features." + ) + is_categorical = [categorical_features[idx] for idx in features_indices] + elif categorical_features.dtype.kind in ("i", "O", "U"): + # categorical features provided as a list of indices or feature names + categorical_features_idx = [ + _get_feature_index(cat, feature_names=feature_names) + for cat in categorical_features + ] + is_categorical = [ + idx in categorical_features_idx for idx in features_indices + ] + else: + raise ValueError( + "Expected `categorical_features` to be an array-like of boolean," + f" integer, or string. Got {categorical_features.dtype} instead." + ) + + grid, values = _grid_from_X( + _safe_indexing(X, features_indices, axis=1), + percentiles, + is_categorical, + grid_resolution, + ) + + if method == "brute": + averaged_predictions, predictions = _partial_dependence_brute( + estimator, grid, features_indices, X, response_method, sample_weight + ) + + # reshape predictions to + # (n_outputs, n_instances, n_values_feature_0, n_values_feature_1, ...) + predictions = predictions.reshape( + -1, X.shape[0], *[val.shape[0] for val in values] + ) + else: + averaged_predictions = _partial_dependence_recursion( + estimator, grid, features_indices + ) + + # reshape averaged_predictions to + # (n_outputs, n_values_feature_0, n_values_feature_1, ...) + averaged_predictions = averaged_predictions.reshape( + -1, *[val.shape[0] for val in values] + ) + pdp_results = Bunch(grid_values=values) + + if kind == "average": + pdp_results["average"] = averaged_predictions + elif kind == "individual": + pdp_results["individual"] = predictions + else: # kind='both' + pdp_results["average"] = averaged_predictions + pdp_results["individual"] = predictions + + return pdp_results diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_pd_utils.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_pd_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a48ba4d9a4490df59b8503f0b8768c7a986537a9 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_pd_utils.py @@ -0,0 +1,68 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + + +def _check_feature_names(X, feature_names=None): + """Check feature names. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Input data. + + feature_names : None or array-like of shape (n_names,), dtype=str + Feature names to check or `None`. + + Returns + ------- + feature_names : list of str + Feature names validated. If `feature_names` is `None`, then a list of + feature names is provided, i.e. the column names of a pandas dataframe + or a generic list of feature names (e.g. `["x0", "x1", ...]`) for a + NumPy array. + """ + if feature_names is None: + if hasattr(X, "columns") and hasattr(X.columns, "tolist"): + # get the column names for a pandas dataframe + feature_names = X.columns.tolist() + else: + # define a list of numbered indices for a numpy array + feature_names = [f"x{i}" for i in range(X.shape[1])] + elif hasattr(feature_names, "tolist"): + # convert numpy array or pandas index to a list + feature_names = feature_names.tolist() + if len(set(feature_names)) != len(feature_names): + raise ValueError("feature_names should not contain duplicates.") + + return feature_names + + +def _get_feature_index(fx, feature_names=None): + """Get feature index. + + Parameters + ---------- + fx : int or str + Feature index or name. + + feature_names : list of str, default=None + All feature names from which to search the indices. + + Returns + ------- + idx : int + Feature index. + """ + if isinstance(fx, str): + if feature_names is None: + raise ValueError( + f"Cannot plot partial dependence for feature {fx!r} since " + "the list of feature names was not provided, neither as " + "column names of a pandas data-frame nor via the feature_names " + "parameter." + ) + try: + return feature_names.index(fx) + except ValueError as e: + raise ValueError(f"Feature {fx!r} not in feature_names") from e + return fx diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_permutation_importance.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_permutation_importance.py new file mode 100644 index 0000000000000000000000000000000000000000..74000aa9e8556a473649035ebacf3deeecf1b0d8 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_permutation_importance.py @@ -0,0 +1,312 @@ +"""Permutation importance for estimators.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import numbers + +import numpy as np + +from ..ensemble._bagging import _generate_indices +from ..metrics import check_scoring, get_scorer_names +from ..model_selection._validation import _aggregate_score_dicts +from ..utils import Bunch, _safe_indexing, check_array, check_random_state +from ..utils._param_validation import ( + HasMethods, + Integral, + Interval, + RealNotInt, + StrOptions, + validate_params, +) +from ..utils.parallel import Parallel, delayed + + +def _weights_scorer(scorer, estimator, X, y, sample_weight): + if sample_weight is not None: + return scorer(estimator, X, y, sample_weight=sample_weight) + return scorer(estimator, X, y) + + +def _calculate_permutation_scores( + estimator, + X, + y, + sample_weight, + col_idx, + random_state, + n_repeats, + scorer, + max_samples, +): + """Calculate score when `col_idx` is permuted.""" + random_state = check_random_state(random_state) + + # Work on a copy of X to ensure thread-safety in case of threading based + # parallelism. Furthermore, making a copy is also useful when the joblib + # backend is 'loky' (default) or the old 'multiprocessing': in those cases, + # if X is large it will be automatically be backed by a readonly memory map + # (memmap). X.copy() on the other hand is always guaranteed to return a + # writable data-structure whose columns can be shuffled inplace. + if max_samples < X.shape[0]: + row_indices = _generate_indices( + random_state=random_state, + bootstrap=False, + n_population=X.shape[0], + n_samples=max_samples, + ) + X_permuted = _safe_indexing(X, row_indices, axis=0) + y = _safe_indexing(y, row_indices, axis=0) + if sample_weight is not None: + sample_weight = _safe_indexing(sample_weight, row_indices, axis=0) + else: + X_permuted = X.copy() + + scores = [] + shuffling_idx = np.arange(X_permuted.shape[0]) + for _ in range(n_repeats): + random_state.shuffle(shuffling_idx) + if hasattr(X_permuted, "iloc"): + col = X_permuted.iloc[shuffling_idx, col_idx] + col.index = X_permuted.index + X_permuted[X_permuted.columns[col_idx]] = col + else: + X_permuted[:, col_idx] = X_permuted[shuffling_idx, col_idx] + scores.append(_weights_scorer(scorer, estimator, X_permuted, y, sample_weight)) + + if isinstance(scores[0], dict): + scores = _aggregate_score_dicts(scores) + else: + scores = np.array(scores) + + return scores + + +def _create_importances_bunch(baseline_score, permuted_score): + """Compute the importances as the decrease in score. + + Parameters + ---------- + baseline_score : ndarray of shape (n_features,) + The baseline score without permutation. + permuted_score : ndarray of shape (n_features, n_repeats) + The permuted scores for the `n` repetitions. + + Returns + ------- + importances : :class:`~sklearn.utils.Bunch` + Dictionary-like object, with the following attributes. + importances_mean : ndarray, shape (n_features, ) + Mean of feature importance over `n_repeats`. + importances_std : ndarray, shape (n_features, ) + Standard deviation over `n_repeats`. + importances : ndarray, shape (n_features, n_repeats) + Raw permutation importance scores. + """ + importances = baseline_score - permuted_score + return Bunch( + importances_mean=np.mean(importances, axis=1), + importances_std=np.std(importances, axis=1), + importances=importances, + ) + + +@validate_params( + { + "estimator": [HasMethods(["fit"])], + "X": ["array-like"], + "y": ["array-like", None], + "scoring": [ + StrOptions(set(get_scorer_names())), + callable, + list, + tuple, + dict, + None, + ], + "n_repeats": [Interval(Integral, 1, None, closed="left")], + "n_jobs": [Integral, None], + "random_state": ["random_state"], + "sample_weight": ["array-like", None], + "max_samples": [ + Interval(Integral, 1, None, closed="left"), + Interval(RealNotInt, 0, 1, closed="right"), + ], + }, + prefer_skip_nested_validation=True, +) +def permutation_importance( + estimator, + X, + y, + *, + scoring=None, + n_repeats=5, + n_jobs=None, + random_state=None, + sample_weight=None, + max_samples=1.0, +): + """Permutation importance for feature evaluation [BRE]_. + + The :term:`estimator` is required to be a fitted estimator. `X` can be the + data set used to train the estimator or a hold-out set. The permutation + importance of a feature is calculated as follows. First, a baseline metric, + defined by :term:`scoring`, is evaluated on a (potentially different) + dataset defined by the `X`. Next, a feature column from the validation set + is permuted and the metric is evaluated again. The permutation importance + is defined to be the difference between the baseline metric and metric from + permutating the feature column. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object + An estimator that has already been :term:`fitted` and is compatible + with :term:`scorer`. + + X : ndarray or DataFrame, shape (n_samples, n_features) + Data on which permutation importance will be computed. + + y : array-like or None, shape (n_samples, ) or (n_samples, n_classes) + Targets for supervised or `None` for unsupervised. + + scoring : str, callable, list, tuple, or dict, default=None + Scorer to use. + If `scoring` represents a single score, one can use: + + - a single string (see :ref:`scoring_parameter`); + - a callable (see :ref:`scoring_callable`) that returns a single value. + + If `scoring` represents multiple scores, one can use: + + - a list or tuple of unique strings; + - a callable returning a dictionary where the keys are the metric + names and the values are the metric scores; + - a dictionary with metric names as keys and callables a values. + + Passing multiple scores to `scoring` is more efficient than calling + `permutation_importance` for each of the scores as it reuses + predictions to avoid redundant computation. + + If None, the estimator's default scorer is used. + + n_repeats : int, default=5 + Number of times to permute a feature. + + n_jobs : int or None, default=None + Number of jobs to run in parallel. The computation is done by computing + permutation score for each columns and parallelized over the columns. + `None` means 1 unless in a :obj:`joblib.parallel_backend` context. + `-1` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance, default=None + Pseudo-random number generator to control the permutations of each + feature. + Pass an int to get reproducible results across function calls. + See :term:`Glossary `. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights used in scoring. + + .. versionadded:: 0.24 + + max_samples : int or float, default=1.0 + The number of samples to draw from X to compute feature importance + in each repeat (without replacement). + + - If int, then draw `max_samples` samples. + - If float, then draw `max_samples * X.shape[0]` samples. + - If `max_samples` is equal to `1.0` or `X.shape[0]`, all samples + will be used. + + While using this option may provide less accurate importance estimates, + it keeps the method tractable when evaluating feature importance on + large datasets. In combination with `n_repeats`, this allows to control + the computational speed vs statistical accuracy trade-off of this method. + + .. versionadded:: 1.0 + + Returns + ------- + result : :class:`~sklearn.utils.Bunch` or dict of such instances + Dictionary-like object, with the following attributes. + + importances_mean : ndarray of shape (n_features, ) + Mean of feature importance over `n_repeats`. + importances_std : ndarray of shape (n_features, ) + Standard deviation over `n_repeats`. + importances : ndarray of shape (n_features, n_repeats) + Raw permutation importance scores. + + If there are multiple scoring metrics in the scoring parameter + `result` is a dict with scorer names as keys (e.g. 'roc_auc') and + `Bunch` objects like above as values. + + References + ---------- + .. [BRE] :doi:`L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, + 2001. <10.1023/A:1010933404324>` + + Examples + -------- + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.inspection import permutation_importance + >>> X = [[1, 9, 9],[1, 9, 9],[1, 9, 9], + ... [0, 9, 9],[0, 9, 9],[0, 9, 9]] + >>> y = [1, 1, 1, 0, 0, 0] + >>> clf = LogisticRegression().fit(X, y) + >>> result = permutation_importance(clf, X, y, n_repeats=10, + ... random_state=0) + >>> result.importances_mean + array([0.4666..., 0. , 0. ]) + >>> result.importances_std + array([0.2211..., 0. , 0. ]) + """ + if not hasattr(X, "iloc"): + X = check_array(X, ensure_all_finite="allow-nan", dtype=None) + + # Precompute random seed from the random state to be used + # to get a fresh independent RandomState instance for each + # parallel call to _calculate_permutation_scores, irrespective of + # the fact that variables are shared or not depending on the active + # joblib backend (sequential, thread-based or process-based). + random_state = check_random_state(random_state) + random_seed = random_state.randint(np.iinfo(np.int32).max + 1) + + if not isinstance(max_samples, numbers.Integral): + max_samples = int(max_samples * X.shape[0]) + elif max_samples > X.shape[0]: + raise ValueError("max_samples must be <= n_samples") + + scorer = check_scoring(estimator, scoring=scoring) + baseline_score = _weights_scorer(scorer, estimator, X, y, sample_weight) + + scores = Parallel(n_jobs=n_jobs)( + delayed(_calculate_permutation_scores)( + estimator, + X, + y, + sample_weight, + col_idx, + random_seed, + n_repeats, + scorer, + max_samples, + ) + for col_idx in range(X.shape[1]) + ) + + if isinstance(baseline_score, dict): + return { + name: _create_importances_bunch( + baseline_score[name], + # unpack the permuted scores + np.array([scores[col_idx][name] for col_idx in range(X.shape[1])]), + ) + for name in baseline_score + } + else: + return _create_importances_bunch(baseline_score, np.array(scores)) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..67dd18fb94b593f0a3125c1f5833f3b9597614ba --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__init__.py @@ -0,0 +1,2 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13386b867fda8abf28eec897748d015b95cc818a Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/decision_boundary.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/decision_boundary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbf7da5221f4fcece1c5d98da120d91a3abfbcce Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/decision_boundary.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/partial_dependence.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/partial_dependence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65334e7c6f06a9fa27e3b6514224b4b74e5f939d Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/partial_dependence.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/decision_boundary.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/decision_boundary.py new file mode 100644 index 0000000000000000000000000000000000000000..d8be2ef5d9e9a3eba6f5e10d96000229343f59f2 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/decision_boundary.py @@ -0,0 +1,416 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import numpy as np + +from ...base import is_regressor +from ...preprocessing import LabelEncoder +from ...utils import _safe_indexing +from ...utils._optional_dependencies import check_matplotlib_support +from ...utils._response import _get_response_values +from ...utils._set_output import _get_adapter_from_container +from ...utils.validation import ( + _is_arraylike_not_scalar, + _is_pandas_df, + _is_polars_df, + _num_features, + check_is_fitted, +) + + +def _check_boundary_response_method(estimator, response_method, class_of_interest): + """Validate the response methods to be used with the fitted estimator. + + Parameters + ---------- + estimator : object + Fitted estimator to check. + + response_method : {'auto', 'predict_proba', 'decision_function', 'predict'} + Specifies whether to use :term:`predict_proba`, + :term:`decision_function`, :term:`predict` as the target response. + If set to 'auto', the response method is tried in the following order: + :term:`decision_function`, :term:`predict_proba`, :term:`predict`. + + class_of_interest : int, float, bool, str or None + The class considered when plotting the decision. Cannot be None if + multiclass and `response_method` is 'predict_proba' or 'decision_function'. + + .. versionadded:: 1.4 + + Returns + ------- + prediction_method : list of str or str + The name or list of names of the response methods to use. + """ + has_classes = hasattr(estimator, "classes_") + if has_classes and _is_arraylike_not_scalar(estimator.classes_[0]): + msg = "Multi-label and multi-output multi-class classifiers are not supported" + raise ValueError(msg) + + if has_classes and len(estimator.classes_) > 2: + if response_method not in {"auto", "predict"} and class_of_interest is None: + msg = ( + "Multiclass classifiers are only supported when `response_method` is " + "'predict' or 'auto'. Else you must provide `class_of_interest` to " + "plot the decision boundary of a specific class." + ) + raise ValueError(msg) + prediction_method = "predict" if response_method == "auto" else response_method + elif response_method == "auto": + if is_regressor(estimator): + prediction_method = "predict" + else: + prediction_method = ["decision_function", "predict_proba", "predict"] + else: + prediction_method = response_method + + return prediction_method + + +class DecisionBoundaryDisplay: + """Decisions boundary visualization. + + It is recommended to use + :func:`~sklearn.inspection.DecisionBoundaryDisplay.from_estimator` + to create a :class:`DecisionBoundaryDisplay`. All parameters are stored as + attributes. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.1 + + Parameters + ---------- + xx0 : ndarray of shape (grid_resolution, grid_resolution) + First output of :func:`meshgrid `. + + xx1 : ndarray of shape (grid_resolution, grid_resolution) + Second output of :func:`meshgrid `. + + response : ndarray of shape (grid_resolution, grid_resolution) + Values of the response function. + + xlabel : str, default=None + Default label to place on x axis. + + ylabel : str, default=None + Default label to place on y axis. + + Attributes + ---------- + surface_ : matplotlib `QuadContourSet` or `QuadMesh` + If `plot_method` is 'contour' or 'contourf', `surface_` is a + :class:`QuadContourSet `. If + `plot_method` is 'pcolormesh', `surface_` is a + :class:`QuadMesh `. + + ax_ : matplotlib Axes + Axes with decision boundary. + + figure_ : matplotlib Figure + Figure containing the decision boundary. + + See Also + -------- + DecisionBoundaryDisplay.from_estimator : Plot decision boundary given an estimator. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> import numpy as np + >>> from sklearn.datasets import load_iris + >>> from sklearn.inspection import DecisionBoundaryDisplay + >>> from sklearn.tree import DecisionTreeClassifier + >>> iris = load_iris() + >>> feature_1, feature_2 = np.meshgrid( + ... np.linspace(iris.data[:, 0].min(), iris.data[:, 0].max()), + ... np.linspace(iris.data[:, 1].min(), iris.data[:, 1].max()) + ... ) + >>> grid = np.vstack([feature_1.ravel(), feature_2.ravel()]).T + >>> tree = DecisionTreeClassifier().fit(iris.data[:, :2], iris.target) + >>> y_pred = np.reshape(tree.predict(grid), feature_1.shape) + >>> display = DecisionBoundaryDisplay( + ... xx0=feature_1, xx1=feature_2, response=y_pred + ... ) + >>> display.plot() + <...> + >>> display.ax_.scatter( + ... iris.data[:, 0], iris.data[:, 1], c=iris.target, edgecolor="black" + ... ) + <...> + >>> plt.show() + """ + + def __init__(self, *, xx0, xx1, response, xlabel=None, ylabel=None): + self.xx0 = xx0 + self.xx1 = xx1 + self.response = response + self.xlabel = xlabel + self.ylabel = ylabel + + def plot(self, plot_method="contourf", ax=None, xlabel=None, ylabel=None, **kwargs): + """Plot visualization. + + Parameters + ---------- + plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf' + Plotting method to call when plotting the response. Please refer + to the following matplotlib documentation for details: + :func:`contourf `, + :func:`contour `, + :func:`pcolormesh `. + + ax : Matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + xlabel : str, default=None + Overwrite the x-axis label. + + ylabel : str, default=None + Overwrite the y-axis label. + + **kwargs : dict + Additional keyword arguments to be passed to the `plot_method`. + + Returns + ------- + display: :class:`~sklearn.inspection.DecisionBoundaryDisplay` + Object that stores computed values. + """ + check_matplotlib_support("DecisionBoundaryDisplay.plot") + import matplotlib.pyplot as plt # noqa + + if plot_method not in ("contourf", "contour", "pcolormesh"): + raise ValueError( + "plot_method must be 'contourf', 'contour', or 'pcolormesh'" + ) + + if ax is None: + _, ax = plt.subplots() + + plot_func = getattr(ax, plot_method) + self.surface_ = plot_func(self.xx0, self.xx1, self.response, **kwargs) + + if xlabel is not None or not ax.get_xlabel(): + xlabel = self.xlabel if xlabel is None else xlabel + ax.set_xlabel(xlabel) + if ylabel is not None or not ax.get_ylabel(): + ylabel = self.ylabel if ylabel is None else ylabel + ax.set_ylabel(ylabel) + + self.ax_ = ax + self.figure_ = ax.figure + return self + + @classmethod + def from_estimator( + cls, + estimator, + X, + *, + grid_resolution=100, + eps=1.0, + plot_method="contourf", + response_method="auto", + class_of_interest=None, + xlabel=None, + ylabel=None, + ax=None, + **kwargs, + ): + """Plot decision boundary given an estimator. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + estimator : object + Trained estimator used to plot the decision boundary. + + X : {array-like, sparse matrix, dataframe} of shape (n_samples, 2) + Input data that should be only 2-dimensional. + + grid_resolution : int, default=100 + Number of grid points to use for plotting decision boundary. + Higher values will make the plot look nicer but be slower to + render. + + eps : float, default=1.0 + Extends the minimum and maximum values of X for evaluating the + response function. + + plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf' + Plotting method to call when plotting the response. Please refer + to the following matplotlib documentation for details: + :func:`contourf `, + :func:`contour `, + :func:`pcolormesh `. + + response_method : {'auto', 'predict_proba', 'decision_function', \ + 'predict'}, default='auto' + Specifies whether to use :term:`predict_proba`, + :term:`decision_function`, :term:`predict` as the target response. + If set to 'auto', the response method is tried in the following order: + :term:`decision_function`, :term:`predict_proba`, :term:`predict`. + For multiclass problems, :term:`predict` is selected when + `response_method="auto"`. + + class_of_interest : int, float, bool or str, default=None + The class considered when plotting the decision. If None, + `estimator.classes_[1]` is considered as the positive class + for binary classifiers. Must have an explicit value for + multiclass classifiers when `response_method` is 'predict_proba' + or 'decision_function'. + + .. versionadded:: 1.4 + + xlabel : str, default=None + The label used for the x-axis. If `None`, an attempt is made to + extract a label from `X` if it is a dataframe, otherwise an empty + string is used. + + ylabel : str, default=None + The label used for the y-axis. If `None`, an attempt is made to + extract a label from `X` if it is a dataframe, otherwise an empty + string is used. + + ax : Matplotlib axes, default=None + Axes object to plot on. If `None`, a new figure and axes is + created. + + **kwargs : dict + Additional keyword arguments to be passed to the + `plot_method`. + + Returns + ------- + display : :class:`~sklearn.inspection.DecisionBoundaryDisplay` + Object that stores the result. + + See Also + -------- + DecisionBoundaryDisplay : Decision boundary visualization. + sklearn.metrics.ConfusionMatrixDisplay.from_estimator : Plot the + confusion matrix given an estimator, the data, and the label. + sklearn.metrics.ConfusionMatrixDisplay.from_predictions : Plot the + confusion matrix given the true and predicted labels. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import load_iris + >>> from sklearn.linear_model import LogisticRegression + >>> from sklearn.inspection import DecisionBoundaryDisplay + >>> iris = load_iris() + >>> X = iris.data[:, :2] + >>> classifier = LogisticRegression().fit(X, iris.target) + >>> disp = DecisionBoundaryDisplay.from_estimator( + ... classifier, X, response_method="predict", + ... xlabel=iris.feature_names[0], ylabel=iris.feature_names[1], + ... alpha=0.5, + ... ) + >>> disp.ax_.scatter(X[:, 0], X[:, 1], c=iris.target, edgecolor="k") + <...> + >>> plt.show() + """ + check_matplotlib_support(f"{cls.__name__}.from_estimator") + check_is_fitted(estimator) + + if not grid_resolution > 1: + raise ValueError( + "grid_resolution must be greater than 1. Got" + f" {grid_resolution} instead." + ) + + if not eps >= 0: + raise ValueError( + f"eps must be greater than or equal to 0. Got {eps} instead." + ) + + possible_plot_methods = ("contourf", "contour", "pcolormesh") + if plot_method not in possible_plot_methods: + available_methods = ", ".join(possible_plot_methods) + raise ValueError( + f"plot_method must be one of {available_methods}. " + f"Got {plot_method} instead." + ) + + num_features = _num_features(X) + if num_features != 2: + raise ValueError( + f"n_features must be equal to 2. Got {num_features} instead." + ) + + x0, x1 = _safe_indexing(X, 0, axis=1), _safe_indexing(X, 1, axis=1) + + x0_min, x0_max = x0.min() - eps, x0.max() + eps + x1_min, x1_max = x1.min() - eps, x1.max() + eps + + xx0, xx1 = np.meshgrid( + np.linspace(x0_min, x0_max, grid_resolution), + np.linspace(x1_min, x1_max, grid_resolution), + ) + + X_grid = np.c_[xx0.ravel(), xx1.ravel()] + if _is_pandas_df(X) or _is_polars_df(X): + adapter = _get_adapter_from_container(X) + X_grid = adapter.create_container( + X_grid, + X_grid, + columns=X.columns, + ) + + prediction_method = _check_boundary_response_method( + estimator, response_method, class_of_interest + ) + try: + response, _, response_method_used = _get_response_values( + estimator, + X_grid, + response_method=prediction_method, + pos_label=class_of_interest, + return_response_method_used=True, + ) + except ValueError as exc: + if "is not a valid label" in str(exc): + # re-raise a more informative error message since `pos_label` is unknown + # to our user when interacting with + # `DecisionBoundaryDisplay.from_estimator` + raise ValueError( + f"class_of_interest={class_of_interest} is not a valid label: It " + f"should be one of {estimator.classes_}" + ) from exc + raise + + # convert classes predictions into integers + if response_method_used == "predict" and hasattr(estimator, "classes_"): + encoder = LabelEncoder() + encoder.classes_ = estimator.classes_ + response = encoder.transform(response) + + if response.ndim != 1: + if is_regressor(estimator): + raise ValueError("Multi-output regressors are not supported") + + # For the multiclass case, `_get_response_values` returns the response + # as-is. Thus, we have a column per class and we need to select the column + # corresponding to the positive class. + col_idx = np.flatnonzero(estimator.classes_ == class_of_interest)[0] + response = response[:, col_idx] + + if xlabel is None: + xlabel = X.columns[0] if hasattr(X, "columns") else "" + + if ylabel is None: + ylabel = X.columns[1] if hasattr(X, "columns") else "" + + display = cls( + xx0=xx0, + xx1=xx1, + response=response.reshape(xx0.shape), + xlabel=xlabel, + ylabel=ylabel, + ) + return display.plot(ax=ax, plot_method=plot_method, **kwargs) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/partial_dependence.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/partial_dependence.py new file mode 100644 index 0000000000000000000000000000000000000000..2e6007f65049091e39a8074346c8d0089ff46ef8 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/partial_dependence.py @@ -0,0 +1,1476 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import numbers +from itertools import chain +from math import ceil + +import numpy as np +from scipy import sparse +from scipy.stats.mstats import mquantiles + +from ...base import is_regressor +from ...utils import ( + Bunch, + _safe_indexing, + check_array, + check_random_state, +) +from ...utils._encode import _unique +from ...utils._optional_dependencies import check_matplotlib_support # noqa +from ...utils._plotting import _validate_style_kwargs +from ...utils.parallel import Parallel, delayed +from .. import partial_dependence +from .._pd_utils import _check_feature_names, _get_feature_index + + +class PartialDependenceDisplay: + """Partial Dependence Plot (PDP). + + This can also display individual partial dependencies which are often + referred to as: Individual Condition Expectation (ICE). + + It is recommended to use + :func:`~sklearn.inspection.PartialDependenceDisplay.from_estimator` to create a + :class:`~sklearn.inspection.PartialDependenceDisplay`. All parameters are + stored as attributes. + + Read more in + :ref:`sphx_glr_auto_examples_miscellaneous_plot_partial_dependence_visualization_api.py` + and the :ref:`User Guide `. + + .. versionadded:: 0.22 + + Parameters + ---------- + pd_results : list of Bunch + Results of :func:`~sklearn.inspection.partial_dependence` for + ``features``. + + features : list of (int,) or list of (int, int) + Indices of features for a given plot. A tuple of one integer will plot + a partial dependence curve of one feature. A tuple of two integers will + plot a two-way partial dependence curve as a contour plot. + + feature_names : list of str + Feature names corresponding to the indices in ``features``. + + target_idx : int + + - In a multiclass setting, specifies the class for which the PDPs + should be computed. Note that for binary classification, the + positive class (index 1) is always used. + - In a multioutput setting, specifies the task for which the PDPs + should be computed. + + Ignored in binary classification or classical regression settings. + + deciles : dict + Deciles for feature indices in ``features``. + + kind : {'average', 'individual', 'both'} or list of such str, \ + default='average' + Whether to plot the partial dependence averaged across all the samples + in the dataset or one line per sample or both. + + - ``kind='average'`` results in the traditional PD plot; + - ``kind='individual'`` results in the ICE plot; + - ``kind='both'`` results in plotting both the ICE and PD on the same + plot. + + A list of such strings can be provided to specify `kind` on a per-plot + basis. The length of the list should be the same as the number of + interaction requested in `features`. + + .. note:: + ICE ('individual' or 'both') is not a valid option for 2-ways + interactions plot. As a result, an error will be raised. + 2-ways interaction plots should always be configured to + use the 'average' kind instead. + + .. note:: + The fast ``method='recursion'`` option is only available for + `kind='average'` and `sample_weights=None`. Computing individual + dependencies and doing weighted averages requires using the slower + `method='brute'`. + + .. versionadded:: 0.24 + Add `kind` parameter with `'average'`, `'individual'`, and `'both'` + options. + + .. versionadded:: 1.1 + Add the possibility to pass a list of string specifying `kind` + for each plot. + + subsample : float, int or None, default=1000 + Sampling for ICE curves when `kind` is 'individual' or 'both'. + If float, should be between 0.0 and 1.0 and represent the proportion + of the dataset to be used to plot ICE curves. If int, represents the + maximum absolute number of samples to use. + + Note that the full dataset is still used to calculate partial + dependence when `kind='both'`. + + .. versionadded:: 0.24 + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the selected samples when subsamples is not + `None`. See :term:`Glossary ` for details. + + .. versionadded:: 0.24 + + is_categorical : list of (bool,) or list of (bool, bool), default=None + Whether each target feature in `features` is categorical or not. + The list should be same size as `features`. If `None`, all features + are assumed to be continuous. + + .. versionadded:: 1.2 + + Attributes + ---------- + bounding_ax_ : matplotlib Axes or None + If `ax` is an axes or None, the `bounding_ax_` is the axes where the + grid of partial dependence plots are drawn. If `ax` is a list of axes + or a numpy array of axes, `bounding_ax_` is None. + + axes_ : ndarray of matplotlib Axes + If `ax` is an axes or None, `axes_[i, j]` is the axes on the i-th row + and j-th column. If `ax` is a list of axes, `axes_[i]` is the i-th item + in `ax`. Elements that are None correspond to a nonexisting axes in + that position. + + lines_ : ndarray of matplotlib Artists + If `ax` is an axes or None, `lines_[i, j]` is the partial dependence + curve on the i-th row and j-th column. If `ax` is a list of axes, + `lines_[i]` is the partial dependence curve corresponding to the i-th + item in `ax`. Elements that are None correspond to a nonexisting axes + or an axes that does not include a line plot. + + deciles_vlines_ : ndarray of matplotlib LineCollection + If `ax` is an axes or None, `vlines_[i, j]` is the line collection + representing the x axis deciles of the i-th row and j-th column. If + `ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in + `ax`. Elements that are None correspond to a nonexisting axes or an + axes that does not include a PDP plot. + + .. versionadded:: 0.23 + + deciles_hlines_ : ndarray of matplotlib LineCollection + If `ax` is an axes or None, `vlines_[i, j]` is the line collection + representing the y axis deciles of the i-th row and j-th column. If + `ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in + `ax`. Elements that are None correspond to a nonexisting axes or an + axes that does not include a 2-way plot. + + .. versionadded:: 0.23 + + contours_ : ndarray of matplotlib Artists + If `ax` is an axes or None, `contours_[i, j]` is the partial dependence + plot on the i-th row and j-th column. If `ax` is a list of axes, + `contours_[i]` is the partial dependence plot corresponding to the i-th + item in `ax`. Elements that are None correspond to a nonexisting axes + or an axes that does not include a contour plot. + + bars_ : ndarray of matplotlib Artists + If `ax` is an axes or None, `bars_[i, j]` is the partial dependence bar + plot on the i-th row and j-th column (for a categorical feature). + If `ax` is a list of axes, `bars_[i]` is the partial dependence bar + plot corresponding to the i-th item in `ax`. Elements that are None + correspond to a nonexisting axes or an axes that does not include a + bar plot. + + .. versionadded:: 1.2 + + heatmaps_ : ndarray of matplotlib Artists + If `ax` is an axes or None, `heatmaps_[i, j]` is the partial dependence + heatmap on the i-th row and j-th column (for a pair of categorical + features) . If `ax` is a list of axes, `heatmaps_[i]` is the partial + dependence heatmap corresponding to the i-th item in `ax`. Elements + that are None correspond to a nonexisting axes or an axes that does not + include a heatmap. + + .. versionadded:: 1.2 + + figure_ : matplotlib Figure + Figure containing partial dependence plots. + + See Also + -------- + partial_dependence : Compute Partial Dependence values. + PartialDependenceDisplay.from_estimator : Plot Partial Dependence. + + Examples + -------- + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.ensemble import GradientBoostingRegressor + >>> from sklearn.inspection import PartialDependenceDisplay + >>> from sklearn.inspection import partial_dependence + >>> X, y = make_friedman1() + >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y) + >>> features, feature_names = [(0,)], [f"Features #{i}" for i in range(X.shape[1])] + >>> deciles = {0: np.linspace(0, 1, num=5)} + >>> pd_results = partial_dependence( + ... clf, X, features=0, kind="average", grid_resolution=5) + >>> display = PartialDependenceDisplay( + ... [pd_results], features=features, feature_names=feature_names, + ... target_idx=0, deciles=deciles + ... ) + >>> display.plot(pdp_lim={1: (-1.38, 0.66)}) + <...> + >>> plt.show() + """ + + def __init__( + self, + pd_results, + *, + features, + feature_names, + target_idx, + deciles, + kind="average", + subsample=1000, + random_state=None, + is_categorical=None, + ): + self.pd_results = pd_results + self.features = features + self.feature_names = feature_names + self.target_idx = target_idx + self.deciles = deciles + self.kind = kind + self.subsample = subsample + self.random_state = random_state + self.is_categorical = is_categorical + + @classmethod + def from_estimator( + cls, + estimator, + X, + features, + *, + sample_weight=None, + categorical_features=None, + feature_names=None, + target=None, + response_method="auto", + n_cols=3, + grid_resolution=100, + percentiles=(0.05, 0.95), + method="auto", + n_jobs=None, + verbose=0, + line_kw=None, + ice_lines_kw=None, + pd_line_kw=None, + contour_kw=None, + ax=None, + kind="average", + centered=False, + subsample=1000, + random_state=None, + ): + """Partial dependence (PD) and individual conditional expectation (ICE) plots. + + Partial dependence plots, individual conditional expectation plots or an + overlay of both of them can be plotted by setting the ``kind`` + parameter. The ``len(features)`` plots are arranged in a grid with + ``n_cols`` columns. Two-way partial dependence plots are plotted as + contour plots. The deciles of the feature values will be shown with tick + marks on the x-axes for one-way plots, and on both axes for two-way + plots. + + Read more in the :ref:`User Guide `. + + .. note:: + + :func:`PartialDependenceDisplay.from_estimator` does not support using the + same axes with multiple calls. To plot the partial dependence for + multiple estimators, please pass the axes created by the first call to the + second call:: + + >>> from sklearn.inspection import PartialDependenceDisplay + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.linear_model import LinearRegression + >>> from sklearn.ensemble import RandomForestRegressor + >>> X, y = make_friedman1() + >>> est1 = LinearRegression().fit(X, y) + >>> est2 = RandomForestRegressor().fit(X, y) + >>> disp1 = PartialDependenceDisplay.from_estimator(est1, X, + ... [1, 2]) + >>> disp2 = PartialDependenceDisplay.from_estimator(est2, X, [1, 2], + ... ax=disp1.axes_) + + .. warning:: + + For :class:`~sklearn.ensemble.GradientBoostingClassifier` and + :class:`~sklearn.ensemble.GradientBoostingRegressor`, the + `'recursion'` method (used by default) will not account for the `init` + predictor of the boosting process. In practice, this will produce + the same values as `'brute'` up to a constant offset in the target + response, provided that `init` is a constant estimator (which is the + default). However, if `init` is not a constant estimator, the + partial dependence values are incorrect for `'recursion'` because the + offset will be sample-dependent. It is preferable to use the `'brute'` + method. Note that this only applies to + :class:`~sklearn.ensemble.GradientBoostingClassifier` and + :class:`~sklearn.ensemble.GradientBoostingRegressor`, not to + :class:`~sklearn.ensemble.HistGradientBoostingClassifier` and + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`. + + .. versionadded:: 1.0 + + Parameters + ---------- + estimator : BaseEstimator + A fitted estimator object implementing :term:`predict`, + :term:`predict_proba`, or :term:`decision_function`. + Multioutput-multiclass classifiers are not supported. + + X : {array-like, dataframe} of shape (n_samples, n_features) + ``X`` is used to generate a grid of values for the target + ``features`` (where the partial dependence will be evaluated), and + also to generate values for the complement features when the + `method` is `'brute'`. + + features : list of {int, str, pair of int, pair of str} + The target features for which to create the PDPs. + If `features[i]` is an integer or a string, a one-way PDP is created; + if `features[i]` is a tuple, a two-way PDP is created (only supported + with `kind='average'`). Each tuple must be of size 2. + If any entry is a string, then it must be in ``feature_names``. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights are used to calculate weighted means when averaging the + model output. If `None`, then samples are equally weighted. If + `sample_weight` is not `None`, then `method` will be set to `'brute'`. + Note that `sample_weight` is ignored for `kind='individual'`. + + .. versionadded:: 1.3 + + categorical_features : array-like of shape (n_features,) or shape \ + (n_categorical_features,), dtype={bool, int, str}, default=None + Indicates the categorical features. + + - `None`: no feature will be considered categorical; + - boolean array-like: boolean mask of shape `(n_features,)` + indicating which features are categorical. Thus, this array has + the same shape has `X.shape[1]`; + - integer or string array-like: integer indices or strings + indicating categorical features. + + .. versionadded:: 1.2 + + feature_names : array-like of shape (n_features,), dtype=str, default=None + Name of each feature; `feature_names[i]` holds the name of the feature + with index `i`. + By default, the name of the feature corresponds to their numerical + index for NumPy array and their column name for pandas dataframe. + + target : int, default=None + - In a multiclass setting, specifies the class for which the PDPs + should be computed. Note that for binary classification, the + positive class (index 1) is always used. + - In a multioutput setting, specifies the task for which the PDPs + should be computed. + + Ignored in binary classification or classical regression settings. + + response_method : {'auto', 'predict_proba', 'decision_function'}, \ + default='auto' + Specifies whether to use :term:`predict_proba` or + :term:`decision_function` as the target response. For regressors + this parameter is ignored and the response is always the output of + :term:`predict`. By default, :term:`predict_proba` is tried first + and we revert to :term:`decision_function` if it doesn't exist. If + ``method`` is `'recursion'`, the response is always the output of + :term:`decision_function`. + + n_cols : int, default=3 + The maximum number of columns in the grid plot. Only active when `ax` + is a single axis or `None`. + + grid_resolution : int, default=100 + The number of equally spaced points on the axes of the plots, for each + target feature. + + percentiles : tuple of float, default=(0.05, 0.95) + The lower and upper percentile used to create the extreme values + for the PDP axes. Must be in [0, 1]. + + method : str, default='auto' + The method used to calculate the averaged predictions: + + - `'recursion'` is only supported for some tree-based estimators + (namely + :class:`~sklearn.ensemble.GradientBoostingClassifier`, + :class:`~sklearn.ensemble.GradientBoostingRegressor`, + :class:`~sklearn.ensemble.HistGradientBoostingClassifier`, + :class:`~sklearn.ensemble.HistGradientBoostingRegressor`, + :class:`~sklearn.tree.DecisionTreeRegressor`, + :class:`~sklearn.ensemble.RandomForestRegressor` + but is more efficient in terms of speed. + With this method, the target response of a + classifier is always the decision function, not the predicted + probabilities. Since the `'recursion'` method implicitly computes + the average of the ICEs by design, it is not compatible with ICE and + thus `kind` must be `'average'`. + + - `'brute'` is supported for any estimator, but is more + computationally intensive. + + - `'auto'`: the `'recursion'` is used for estimators that support it, + and `'brute'` is used otherwise. If `sample_weight` is not `None`, + then `'brute'` is used regardless of the estimator. + + Please see :ref:`this note ` for + differences between the `'brute'` and `'recursion'` method. + + n_jobs : int, default=None + The number of CPUs to use to compute the partial dependences. + Computation is parallelized over features specified by the `features` + parameter. + + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int, default=0 + Verbose output during PD computations. + + line_kw : dict, default=None + Dict with keywords passed to the ``matplotlib.pyplot.plot`` call. + For one-way partial dependence plots. It can be used to define common + properties for both `ice_lines_kw` and `pdp_line_kw`. + + ice_lines_kw : dict, default=None + Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. + For ICE lines in the one-way partial dependence plots. + The key value pairs defined in `ice_lines_kw` takes priority over + `line_kw`. + + pd_line_kw : dict, default=None + Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. + For partial dependence in one-way partial dependence plots. + The key value pairs defined in `pd_line_kw` takes priority over + `line_kw`. + + contour_kw : dict, default=None + Dict with keywords passed to the ``matplotlib.pyplot.contourf`` call. + For two-way partial dependence plots. + + ax : Matplotlib axes or array-like of Matplotlib axes, default=None + - If a single axis is passed in, it is treated as a bounding axes + and a grid of partial dependence plots will be drawn within + these bounds. The `n_cols` parameter controls the number of + columns in the grid. + - If an array-like of axes are passed in, the partial dependence + plots will be drawn directly into these axes. + - If `None`, a figure and a bounding axes is created and treated + as the single axes case. + + kind : {'average', 'individual', 'both'}, default='average' + Whether to plot the partial dependence averaged across all the samples + in the dataset or one line per sample or both. + + - ``kind='average'`` results in the traditional PD plot; + - ``kind='individual'`` results in the ICE plot. + + Note that the fast `method='recursion'` option is only available for + `kind='average'` and `sample_weights=None`. Computing individual + dependencies and doing weighted averages requires using the slower + `method='brute'`. + + centered : bool, default=False + If `True`, the ICE and PD lines will start at the origin of the + y-axis. By default, no centering is done. + + .. versionadded:: 1.1 + + subsample : float, int or None, default=1000 + Sampling for ICE curves when `kind` is 'individual' or 'both'. + If `float`, should be between 0.0 and 1.0 and represent the proportion + of the dataset to be used to plot ICE curves. If `int`, represents the + absolute number samples to use. + + Note that the full dataset is still used to calculate averaged partial + dependence when `kind='both'`. + + random_state : int, RandomState instance or None, default=None + Controls the randomness of the selected samples when subsamples is not + `None` and `kind` is either `'both'` or `'individual'`. + See :term:`Glossary ` for details. + + Returns + ------- + display : :class:`~sklearn.inspection.PartialDependenceDisplay` + + See Also + -------- + partial_dependence : Compute Partial Dependence values. + + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> from sklearn.datasets import make_friedman1 + >>> from sklearn.ensemble import GradientBoostingRegressor + >>> from sklearn.inspection import PartialDependenceDisplay + >>> X, y = make_friedman1() + >>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y) + >>> PartialDependenceDisplay.from_estimator(clf, X, [0, (0, 1)]) + <...> + >>> plt.show() + """ + check_matplotlib_support(f"{cls.__name__}.from_estimator") # noqa + import matplotlib.pyplot as plt # noqa + + # set target_idx for multi-class estimators + if hasattr(estimator, "classes_") and np.size(estimator.classes_) > 2: + if target is None: + raise ValueError("target must be specified for multi-class") + target_idx = np.searchsorted(estimator.classes_, target) + if ( + not (0 <= target_idx < len(estimator.classes_)) + or estimator.classes_[target_idx] != target + ): + raise ValueError("target not in est.classes_, got {}".format(target)) + else: + # regression and binary classification + target_idx = 0 + + # Use check_array only on lists and other non-array-likes / sparse. Do not + # convert DataFrame into a NumPy array. + if not (hasattr(X, "__array__") or sparse.issparse(X)): + X = check_array(X, ensure_all_finite="allow-nan", dtype=object) + n_features = X.shape[1] + + feature_names = _check_feature_names(X, feature_names) + # expand kind to always be a list of str + kind_ = [kind] * len(features) if isinstance(kind, str) else kind + if len(kind_) != len(features): + raise ValueError( + "When `kind` is provided as a list of strings, it should contain " + f"as many elements as `features`. `kind` contains {len(kind_)} " + f"element(s) and `features` contains {len(features)} element(s)." + ) + + # convert features into a seq of int tuples + tmp_features, ice_for_two_way_pd = [], [] + for kind_plot, fxs in zip(kind_, features): + if isinstance(fxs, (numbers.Integral, str)): + fxs = (fxs,) + try: + fxs = tuple( + _get_feature_index(fx, feature_names=feature_names) for fx in fxs + ) + except TypeError as e: + raise ValueError( + "Each entry in features must be either an int, " + "a string, or an iterable of size at most 2." + ) from e + if not 1 <= np.size(fxs) <= 2: + raise ValueError( + "Each entry in features must be either an int, " + "a string, or an iterable of size at most 2." + ) + # store the information if 2-way PD was requested with ICE to later + # raise a ValueError with an exhaustive list of problematic + # settings. + ice_for_two_way_pd.append(kind_plot != "average" and np.size(fxs) > 1) + + tmp_features.append(fxs) + + if any(ice_for_two_way_pd): + # raise an error and be specific regarding the parameter values + # when 1- and 2-way PD were requested + kind_ = [ + "average" if forcing_average else kind_plot + for forcing_average, kind_plot in zip(ice_for_two_way_pd, kind_) + ] + raise ValueError( + "ICE plot cannot be rendered for 2-way feature interactions. " + "2-way feature interactions mandates PD plots using the " + "'average' kind: " + f"features={features!r} should be configured to use " + f"kind={kind_!r} explicitly." + ) + features = tmp_features + + if categorical_features is None: + is_categorical = [ + (False,) if len(fxs) == 1 else (False, False) for fxs in features + ] + else: + # we need to create a boolean indicator of which features are + # categorical from the categorical_features list. + categorical_features = np.asarray(categorical_features) + if categorical_features.dtype.kind == "b": + # categorical features provided as a list of boolean + if categorical_features.size != n_features: + raise ValueError( + "When `categorical_features` is a boolean array-like, " + "the array should be of shape (n_features,). Got " + f"{categorical_features.size} elements while `X` contains " + f"{n_features} features." + ) + is_categorical = [ + tuple(categorical_features[fx] for fx in fxs) for fxs in features + ] + elif categorical_features.dtype.kind in ("i", "O", "U"): + # categorical features provided as a list of indices or feature names + categorical_features_idx = [ + _get_feature_index(cat, feature_names=feature_names) + for cat in categorical_features + ] + is_categorical = [ + tuple([idx in categorical_features_idx for idx in fxs]) + for fxs in features + ] + else: + raise ValueError( + "Expected `categorical_features` to be an array-like of boolean," + f" integer, or string. Got {categorical_features.dtype} instead." + ) + + for cats in is_categorical: + if np.size(cats) == 2 and (cats[0] != cats[1]): + raise ValueError( + "Two-way partial dependence plots are not supported for pairs" + " of continuous and categorical features." + ) + + # collect the indices of the categorical features targeted by the partial + # dependence computation + categorical_features_targeted = set( + [ + fx + for fxs, cats in zip(features, is_categorical) + for fx in fxs + if any(cats) + ] + ) + if categorical_features_targeted: + min_n_cats = min( + [ + len(_unique(_safe_indexing(X, idx, axis=1))) + for idx in categorical_features_targeted + ] + ) + if grid_resolution < min_n_cats: + raise ValueError( + "The resolution of the computed grid is less than the " + "minimum number of categories in the targeted categorical " + "features. Expect the `grid_resolution` to be greater than " + f"{min_n_cats}. Got {grid_resolution} instead." + ) + + for is_cat, kind_plot in zip(is_categorical, kind_): + if any(is_cat) and kind_plot != "average": + raise ValueError( + "It is not possible to display individual effects for" + " categorical features." + ) + + # Early exit if the axes does not have the correct number of axes + if ax is not None and not isinstance(ax, plt.Axes): + axes = np.asarray(ax, dtype=object) + if axes.size != len(features): + raise ValueError( + "Expected ax to have {} axes, got {}".format( + len(features), axes.size + ) + ) + + for i in chain.from_iterable(features): + if i >= len(feature_names): + raise ValueError( + "All entries of features must be less than " + "len(feature_names) = {0}, got {1}.".format(len(feature_names), i) + ) + + if isinstance(subsample, numbers.Integral): + if subsample <= 0: + raise ValueError( + f"When an integer, subsample={subsample} should be positive." + ) + elif isinstance(subsample, numbers.Real): + if subsample <= 0 or subsample >= 1: + raise ValueError( + f"When a floating-point, subsample={subsample} should be in " + "the (0, 1) range." + ) + + # compute predictions and/or averaged predictions + pd_results = Parallel(n_jobs=n_jobs, verbose=verbose)( + delayed(partial_dependence)( + estimator, + X, + fxs, + sample_weight=sample_weight, + feature_names=feature_names, + categorical_features=categorical_features, + response_method=response_method, + method=method, + grid_resolution=grid_resolution, + percentiles=percentiles, + kind=kind_plot, + ) + for kind_plot, fxs in zip(kind_, features) + ) + + # For multioutput regression, we can only check the validity of target + # now that we have the predictions. + # Also note: as multiclass-multioutput classifiers are not supported, + # multiclass and multioutput scenario are mutually exclusive. So there is + # no risk of overwriting target_idx here. + pd_result = pd_results[0] # checking the first result is enough + n_tasks = ( + pd_result.average.shape[0] + if kind_[0] == "average" + else pd_result.individual.shape[0] + ) + if is_regressor(estimator) and n_tasks > 1: + if target is None: + raise ValueError("target must be specified for multi-output regressors") + if not 0 <= target <= n_tasks: + raise ValueError( + "target must be in [0, n_tasks], got {}.".format(target) + ) + target_idx = target + + deciles = {} + for fxs, cats in zip(features, is_categorical): + for fx, cat in zip(fxs, cats): + if not cat and fx not in deciles: + X_col = _safe_indexing(X, fx, axis=1) + deciles[fx] = mquantiles(X_col, prob=np.arange(0.1, 1.0, 0.1)) + + display = cls( + pd_results=pd_results, + features=features, + feature_names=feature_names, + target_idx=target_idx, + deciles=deciles, + kind=kind, + subsample=subsample, + random_state=random_state, + is_categorical=is_categorical, + ) + return display.plot( + ax=ax, + n_cols=n_cols, + line_kw=line_kw, + ice_lines_kw=ice_lines_kw, + pd_line_kw=pd_line_kw, + contour_kw=contour_kw, + centered=centered, + ) + + def _get_sample_count(self, n_samples): + """Compute the number of samples as an integer.""" + if isinstance(self.subsample, numbers.Integral): + if self.subsample < n_samples: + return self.subsample + return n_samples + elif isinstance(self.subsample, numbers.Real): + return ceil(n_samples * self.subsample) + return n_samples + + def _plot_ice_lines( + self, + preds, + feature_values, + n_ice_to_plot, + ax, + pd_plot_idx, + n_total_lines_by_plot, + individual_line_kw, + ): + """Plot the ICE lines. + + Parameters + ---------- + preds : ndarray of shape \ + (n_instances, n_grid_points) + The predictions computed for all points of `feature_values` for a + given feature for all samples in `X`. + feature_values : ndarray of shape (n_grid_points,) + The feature values for which the predictions have been computed. + n_ice_to_plot : int + The number of ICE lines to plot. + ax : Matplotlib axes + The axis on which to plot the ICE lines. + pd_plot_idx : int + The sequential index of the plot. It will be unraveled to find the + matching 2D position in the grid layout. + n_total_lines_by_plot : int + The total number of lines expected to be plot on the axis. + individual_line_kw : dict + Dict with keywords passed when plotting the ICE lines. + """ + rng = check_random_state(self.random_state) + # subsample ice + ice_lines_idx = rng.choice( + preds.shape[0], + n_ice_to_plot, + replace=False, + ) + ice_lines_subsampled = preds[ice_lines_idx, :] + # plot the subsampled ice + for ice_idx, ice in enumerate(ice_lines_subsampled): + line_idx = np.unravel_index( + pd_plot_idx * n_total_lines_by_plot + ice_idx, self.lines_.shape + ) + self.lines_[line_idx] = ax.plot( + feature_values, ice.ravel(), **individual_line_kw + )[0] + + def _plot_average_dependence( + self, + avg_preds, + feature_values, + ax, + pd_line_idx, + line_kw, + categorical, + bar_kw, + ): + """Plot the average partial dependence. + + Parameters + ---------- + avg_preds : ndarray of shape (n_grid_points,) + The average predictions for all points of `feature_values` for a + given feature for all samples in `X`. + feature_values : ndarray of shape (n_grid_points,) + The feature values for which the predictions have been computed. + ax : Matplotlib axes + The axis on which to plot the average PD. + pd_line_idx : int + The sequential index of the plot. It will be unraveled to find the + matching 2D position in the grid layout. + line_kw : dict + Dict with keywords passed when plotting the PD plot. + categorical : bool + Whether feature is categorical. + bar_kw: dict + Dict with keywords passed when plotting the PD bars (categorical). + """ + if categorical: + bar_idx = np.unravel_index(pd_line_idx, self.bars_.shape) + self.bars_[bar_idx] = ax.bar(feature_values, avg_preds, **bar_kw)[0] + ax.tick_params(axis="x", rotation=90) + else: + line_idx = np.unravel_index(pd_line_idx, self.lines_.shape) + self.lines_[line_idx] = ax.plot( + feature_values, + avg_preds, + **line_kw, + )[0] + + def _plot_one_way_partial_dependence( + self, + kind, + preds, + avg_preds, + feature_values, + feature_idx, + n_ice_lines, + ax, + n_cols, + pd_plot_idx, + n_lines, + ice_lines_kw, + pd_line_kw, + categorical, + bar_kw, + pdp_lim, + ): + """Plot 1-way partial dependence: ICE and PDP. + + Parameters + ---------- + kind : str + The kind of partial plot to draw. + preds : ndarray of shape \ + (n_instances, n_grid_points) or None + The predictions computed for all points of `feature_values` for a + given feature for all samples in `X`. + avg_preds : ndarray of shape (n_grid_points,) + The average predictions for all points of `feature_values` for a + given feature for all samples in `X`. + feature_values : ndarray of shape (n_grid_points,) + The feature values for which the predictions have been computed. + feature_idx : int + The index corresponding to the target feature. + n_ice_lines : int + The number of ICE lines to plot. + ax : Matplotlib axes + The axis on which to plot the ICE and PDP lines. + n_cols : int or None + The number of column in the axis. + pd_plot_idx : int + The sequential index of the plot. It will be unraveled to find the + matching 2D position in the grid layout. + n_lines : int + The total number of lines expected to be plot on the axis. + ice_lines_kw : dict + Dict with keywords passed when plotting the ICE lines. + pd_line_kw : dict + Dict with keywords passed when plotting the PD plot. + categorical : bool + Whether feature is categorical. + bar_kw: dict + Dict with keywords passed when plotting the PD bars (categorical). + pdp_lim : dict + Global min and max average predictions, such that all plots will + have the same scale and y limits. `pdp_lim[1]` is the global min + and max for single partial dependence curves. + """ + from matplotlib import transforms # noqa + + if kind in ("individual", "both"): + self._plot_ice_lines( + preds[self.target_idx], + feature_values, + n_ice_lines, + ax, + pd_plot_idx, + n_lines, + ice_lines_kw, + ) + + if kind in ("average", "both"): + # the average is stored as the last line + if kind == "average": + pd_line_idx = pd_plot_idx + else: + pd_line_idx = pd_plot_idx * n_lines + n_ice_lines + self._plot_average_dependence( + avg_preds[self.target_idx].ravel(), + feature_values, + ax, + pd_line_idx, + pd_line_kw, + categorical, + bar_kw, + ) + + trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) + # create the decile line for the vertical axis + vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape) + if self.deciles.get(feature_idx[0], None) is not None: + self.deciles_vlines_[vlines_idx] = ax.vlines( + self.deciles[feature_idx[0]], + 0, + 0.05, + transform=trans, + color="k", + ) + # reset ylim which was overwritten by vlines + min_val = min(val[0] for val in pdp_lim.values()) + max_val = max(val[1] for val in pdp_lim.values()) + ax.set_ylim([min_val, max_val]) + + # Set xlabel if it is not already set + if not ax.get_xlabel(): + ax.set_xlabel(self.feature_names[feature_idx[0]]) + + if n_cols is None or pd_plot_idx % n_cols == 0: + if not ax.get_ylabel(): + ax.set_ylabel("Partial dependence") + else: + ax.set_yticklabels([]) + + if pd_line_kw.get("label", None) and kind != "individual" and not categorical: + ax.legend() + + def _plot_two_way_partial_dependence( + self, + avg_preds, + feature_values, + feature_idx, + ax, + pd_plot_idx, + Z_level, + contour_kw, + categorical, + heatmap_kw, + ): + """Plot 2-way partial dependence. + + Parameters + ---------- + avg_preds : ndarray of shape \ + (n_instances, n_grid_points, n_grid_points) + The average predictions for all points of `feature_values[0]` and + `feature_values[1]` for some given features for all samples in `X`. + feature_values : seq of 1d array + A sequence of array of the feature values for which the predictions + have been computed. + feature_idx : tuple of int + The indices of the target features + ax : Matplotlib axes + The axis on which to plot the ICE and PDP lines. + pd_plot_idx : int + The sequential index of the plot. It will be unraveled to find the + matching 2D position in the grid layout. + Z_level : ndarray of shape (8, 8) + The Z-level used to encode the average predictions. + contour_kw : dict + Dict with keywords passed when plotting the contours. + categorical : bool + Whether features are categorical. + heatmap_kw: dict + Dict with keywords passed when plotting the PD heatmap + (categorical). + """ + if categorical: + import matplotlib.pyplot as plt + + default_im_kw = dict(interpolation="nearest", cmap="viridis") + im_kw = {**default_im_kw, **heatmap_kw} + + data = avg_preds[self.target_idx] + im = ax.imshow(data, **im_kw) + text = None + cmap_min, cmap_max = im.cmap(0), im.cmap(1.0) + + text = np.empty_like(data, dtype=object) + # print text with appropriate color depending on background + thresh = (data.max() + data.min()) / 2.0 + + for flat_index in range(data.size): + row, col = np.unravel_index(flat_index, data.shape) + color = cmap_max if data[row, col] < thresh else cmap_min + + values_format = ".2f" + text_data = format(data[row, col], values_format) + + text_kwargs = dict(ha="center", va="center", color=color) + text[row, col] = ax.text(col, row, text_data, **text_kwargs) + + fig = ax.figure + fig.colorbar(im, ax=ax) + ax.set( + xticks=np.arange(len(feature_values[1])), + yticks=np.arange(len(feature_values[0])), + xticklabels=feature_values[1], + yticklabels=feature_values[0], + xlabel=self.feature_names[feature_idx[1]], + ylabel=self.feature_names[feature_idx[0]], + ) + + plt.setp(ax.get_xticklabels(), rotation="vertical") + + heatmap_idx = np.unravel_index(pd_plot_idx, self.heatmaps_.shape) + self.heatmaps_[heatmap_idx] = im + else: + from matplotlib import transforms # noqa + + XX, YY = np.meshgrid(feature_values[0], feature_values[1]) + Z = avg_preds[self.target_idx].T + CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5, colors="k") + contour_idx = np.unravel_index(pd_plot_idx, self.contours_.shape) + self.contours_[contour_idx] = ax.contourf( + XX, + YY, + Z, + levels=Z_level, + vmax=Z_level[-1], + vmin=Z_level[0], + **contour_kw, + ) + ax.clabel(CS, fmt="%2.2f", colors="k", fontsize=10, inline=True) + + trans = transforms.blended_transform_factory(ax.transData, ax.transAxes) + # create the decile line for the vertical axis + xlim, ylim = ax.get_xlim(), ax.get_ylim() + vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape) + self.deciles_vlines_[vlines_idx] = ax.vlines( + self.deciles[feature_idx[0]], + 0, + 0.05, + transform=trans, + color="k", + ) + # create the decile line for the horizontal axis + hlines_idx = np.unravel_index(pd_plot_idx, self.deciles_hlines_.shape) + self.deciles_hlines_[hlines_idx] = ax.hlines( + self.deciles[feature_idx[1]], + 0, + 0.05, + transform=trans, + color="k", + ) + # reset xlim and ylim since they are overwritten by hlines and + # vlines + ax.set_xlim(xlim) + ax.set_ylim(ylim) + + # set xlabel if it is not already set + if not ax.get_xlabel(): + ax.set_xlabel(self.feature_names[feature_idx[0]]) + ax.set_ylabel(self.feature_names[feature_idx[1]]) + + def plot( + self, + *, + ax=None, + n_cols=3, + line_kw=None, + ice_lines_kw=None, + pd_line_kw=None, + contour_kw=None, + bar_kw=None, + heatmap_kw=None, + pdp_lim=None, + centered=False, + ): + """Plot partial dependence plots. + + Parameters + ---------- + ax : Matplotlib axes or array-like of Matplotlib axes, default=None + - If a single axis is passed in, it is treated as a bounding axes + and a grid of partial dependence plots will be drawn within + these bounds. The `n_cols` parameter controls the number of + columns in the grid. + - If an array-like of axes are passed in, the partial dependence + plots will be drawn directly into these axes. + - If `None`, a figure and a bounding axes is created and treated + as the single axes case. + + n_cols : int, default=3 + The maximum number of columns in the grid plot. Only active when + `ax` is a single axes or `None`. + + line_kw : dict, default=None + Dict with keywords passed to the `matplotlib.pyplot.plot` call. + For one-way partial dependence plots. + + ice_lines_kw : dict, default=None + Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. + For ICE lines in the one-way partial dependence plots. + The key value pairs defined in `ice_lines_kw` takes priority over + `line_kw`. + + .. versionadded:: 1.0 + + pd_line_kw : dict, default=None + Dictionary with keywords passed to the `matplotlib.pyplot.plot` call. + For partial dependence in one-way partial dependence plots. + The key value pairs defined in `pd_line_kw` takes priority over + `line_kw`. + + .. versionadded:: 1.0 + + contour_kw : dict, default=None + Dict with keywords passed to the `matplotlib.pyplot.contourf` + call for two-way partial dependence plots. + + bar_kw : dict, default=None + Dict with keywords passed to the `matplotlib.pyplot.bar` + call for one-way categorical partial dependence plots. + + .. versionadded:: 1.2 + + heatmap_kw : dict, default=None + Dict with keywords passed to the `matplotlib.pyplot.imshow` + call for two-way categorical partial dependence plots. + + .. versionadded:: 1.2 + + pdp_lim : dict, default=None + Global min and max average predictions, such that all plots will have the + same scale and y limits. `pdp_lim[1]` is the global min and max for single + partial dependence curves. `pdp_lim[2]` is the global min and max for + two-way partial dependence curves. If `None` (default), the limit will be + inferred from the global minimum and maximum of all predictions. + + .. versionadded:: 1.1 + + centered : bool, default=False + If `True`, the ICE and PD lines will start at the origin of the + y-axis. By default, no centering is done. + + .. versionadded:: 1.1 + + Returns + ------- + display : :class:`~sklearn.inspection.PartialDependenceDisplay` + Returns a :class:`~sklearn.inspection.PartialDependenceDisplay` + object that contains the partial dependence plots. + """ + + check_matplotlib_support("plot_partial_dependence") + import matplotlib.pyplot as plt # noqa + from matplotlib.gridspec import GridSpecFromSubplotSpec # noqa + + if isinstance(self.kind, str): + kind = [self.kind] * len(self.features) + else: + kind = self.kind + + if self.is_categorical is None: + is_categorical = [ + (False,) if len(fx) == 1 else (False, False) for fx in self.features + ] + else: + is_categorical = self.is_categorical + + if len(kind) != len(self.features): + raise ValueError( + "When `kind` is provided as a list of strings, it should " + "contain as many elements as `features`. `kind` contains " + f"{len(kind)} element(s) and `features` contains " + f"{len(self.features)} element(s)." + ) + + valid_kinds = {"average", "individual", "both"} + if any([k not in valid_kinds for k in kind]): + raise ValueError( + f"Values provided to `kind` must be one of: {valid_kinds!r} or a list" + f" of such values. Currently, kind={self.kind!r}" + ) + + # Center results before plotting + if not centered: + pd_results_ = self.pd_results + else: + pd_results_ = [] + for kind_plot, pd_result in zip(kind, self.pd_results): + current_results = {"grid_values": pd_result["grid_values"]} + + if kind_plot in ("individual", "both"): + preds = pd_result.individual + preds = preds - preds[self.target_idx, :, 0, None] + current_results["individual"] = preds + + if kind_plot in ("average", "both"): + avg_preds = pd_result.average + avg_preds = avg_preds - avg_preds[self.target_idx, 0, None] + current_results["average"] = avg_preds + + pd_results_.append(Bunch(**current_results)) + + if pdp_lim is None: + # get global min and max average predictions of PD grouped by plot type + pdp_lim = {} + for kind_plot, pdp in zip(kind, pd_results_): + values = pdp["grid_values"] + preds = pdp.average if kind_plot == "average" else pdp.individual + min_pd = preds[self.target_idx].min() + max_pd = preds[self.target_idx].max() + + # expand the limits to account so that the plotted lines do not touch + # the edges of the plot + span = max_pd - min_pd + min_pd -= 0.05 * span + max_pd += 0.05 * span + + n_fx = len(values) + old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd)) + min_pd = min(min_pd, old_min_pd) + max_pd = max(max_pd, old_max_pd) + pdp_lim[n_fx] = (min_pd, max_pd) + + if line_kw is None: + line_kw = {} + if ice_lines_kw is None: + ice_lines_kw = {} + if pd_line_kw is None: + pd_line_kw = {} + if bar_kw is None: + bar_kw = {} + if heatmap_kw is None: + heatmap_kw = {} + + if ax is None: + _, ax = plt.subplots() + + if contour_kw is None: + contour_kw = {} + default_contour_kws = {"alpha": 0.75} + contour_kw = _validate_style_kwargs(default_contour_kws, contour_kw) + + n_features = len(self.features) + is_average_plot = [kind_plot == "average" for kind_plot in kind] + if all(is_average_plot): + # only average plots are requested + n_ice_lines = 0 + n_lines = 1 + else: + # we need to determine the number of ICE samples computed + ice_plot_idx = is_average_plot.index(False) + n_ice_lines = self._get_sample_count( + len(pd_results_[ice_plot_idx].individual[0]) + ) + if any([kind_plot == "both" for kind_plot in kind]): + n_lines = n_ice_lines + 1 # account for the average line + else: + n_lines = n_ice_lines + + if isinstance(ax, plt.Axes): + # If ax was set off, it has most likely been set to off + # by a previous call to plot. + if not ax.axison: + raise ValueError( + "The ax was already used in another plot " + "function, please set ax=display.axes_ " + "instead" + ) + + ax.set_axis_off() + self.bounding_ax_ = ax + self.figure_ = ax.figure + + n_cols = min(n_cols, n_features) + n_rows = int(np.ceil(n_features / float(n_cols))) + + self.axes_ = np.empty((n_rows, n_cols), dtype=object) + if all(is_average_plot): + self.lines_ = np.empty((n_rows, n_cols), dtype=object) + else: + self.lines_ = np.empty((n_rows, n_cols, n_lines), dtype=object) + self.contours_ = np.empty((n_rows, n_cols), dtype=object) + self.bars_ = np.empty((n_rows, n_cols), dtype=object) + self.heatmaps_ = np.empty((n_rows, n_cols), dtype=object) + + axes_ravel = self.axes_.ravel() + + gs = GridSpecFromSubplotSpec( + n_rows, n_cols, subplot_spec=ax.get_subplotspec() + ) + for i, spec in zip(range(n_features), gs): + axes_ravel[i] = self.figure_.add_subplot(spec) + + else: # array-like + ax = np.asarray(ax, dtype=object) + if ax.size != n_features: + raise ValueError( + "Expected ax to have {} axes, got {}".format(n_features, ax.size) + ) + + if ax.ndim == 2: + n_cols = ax.shape[1] + else: + n_cols = None + + self.bounding_ax_ = None + self.figure_ = ax.ravel()[0].figure + self.axes_ = ax + if all(is_average_plot): + self.lines_ = np.empty_like(ax, dtype=object) + else: + self.lines_ = np.empty(ax.shape + (n_lines,), dtype=object) + self.contours_ = np.empty_like(ax, dtype=object) + self.bars_ = np.empty_like(ax, dtype=object) + self.heatmaps_ = np.empty_like(ax, dtype=object) + + # create contour levels for two-way plots + if 2 in pdp_lim: + Z_level = np.linspace(*pdp_lim[2], num=8) + + self.deciles_vlines_ = np.empty_like(self.axes_, dtype=object) + self.deciles_hlines_ = np.empty_like(self.axes_, dtype=object) + + for pd_plot_idx, (axi, feature_idx, cat, pd_result, kind_plot) in enumerate( + zip( + self.axes_.ravel(), + self.features, + is_categorical, + pd_results_, + kind, + ) + ): + avg_preds = None + preds = None + feature_values = pd_result["grid_values"] + if kind_plot == "individual": + preds = pd_result.individual + elif kind_plot == "average": + avg_preds = pd_result.average + else: # kind_plot == 'both' + avg_preds = pd_result.average + preds = pd_result.individual + + if len(feature_values) == 1: + # define the line-style for the current plot + default_line_kws = { + "color": "C0", + "label": "average" if kind_plot == "both" else None, + } + if kind_plot == "individual": + default_ice_lines_kws = {"alpha": 0.3, "linewidth": 0.5} + default_pd_lines_kws = {} + elif kind_plot == "both": + # by default, we need to distinguish the average line from + # the individual lines via color and line style + default_ice_lines_kws = { + "alpha": 0.3, + "linewidth": 0.5, + "color": "tab:blue", + } + default_pd_lines_kws = { + "color": "tab:orange", + "linestyle": "--", + } + else: + default_ice_lines_kws = {} + default_pd_lines_kws = {} + + default_ice_lines_kws = {**default_line_kws, **default_ice_lines_kws} + default_pd_lines_kws = {**default_line_kws, **default_pd_lines_kws} + + line_kw = _validate_style_kwargs(default_line_kws, line_kw) + + ice_lines_kw = _validate_style_kwargs( + _validate_style_kwargs(default_ice_lines_kws, line_kw), ice_lines_kw + ) + del ice_lines_kw["label"] + + pd_line_kw = _validate_style_kwargs( + _validate_style_kwargs(default_pd_lines_kws, line_kw), pd_line_kw + ) + + default_bar_kws = {"color": "C0"} + bar_kw = _validate_style_kwargs(default_bar_kws, bar_kw) + + default_heatmap_kw = {} + heatmap_kw = _validate_style_kwargs(default_heatmap_kw, heatmap_kw) + + self._plot_one_way_partial_dependence( + kind_plot, + preds, + avg_preds, + feature_values[0], + feature_idx, + n_ice_lines, + axi, + n_cols, + pd_plot_idx, + n_lines, + ice_lines_kw, + pd_line_kw, + cat[0], + bar_kw, + pdp_lim, + ) + else: + self._plot_two_way_partial_dependence( + avg_preds, + feature_values, + feature_idx, + axi, + pd_plot_idx, + Z_level, + contour_kw, + cat[0] and cat[1], + heatmap_kw, + ) + + return self diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d89b30a3787f41d9a7fbca879b50a8bb3895716 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_boundary_decision_display.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_boundary_decision_display.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89faeb39cfc27a023452c10749ceeef46316b754 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_boundary_decision_display.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_plot_partial_dependence.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_plot_partial_dependence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f787eaf37bba5ee456cecdc5be303115645ce2a0 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/test_plot_partial_dependence.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_boundary_decision_display.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_boundary_decision_display.py new file mode 100644 index 0000000000000000000000000000000000000000..d0aabbbb15db936031a9627a8a1fb16f2f1fdcba --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_boundary_decision_display.py @@ -0,0 +1,606 @@ +import warnings + +import numpy as np +import pytest + +from sklearn.base import BaseEstimator, ClassifierMixin +from sklearn.datasets import ( + load_diabetes, + load_iris, + make_classification, + make_multilabel_classification, +) +from sklearn.ensemble import IsolationForest +from sklearn.inspection import DecisionBoundaryDisplay +from sklearn.inspection._plot.decision_boundary import _check_boundary_response_method +from sklearn.linear_model import LogisticRegression +from sklearn.preprocessing import scale +from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor +from sklearn.utils._testing import ( + _convert_container, + assert_allclose, + assert_array_equal, +) + +X, y = make_classification( + n_informative=1, + n_redundant=1, + n_clusters_per_class=1, + n_features=2, + random_state=42, +) + + +def load_iris_2d_scaled(): + X, y = load_iris(return_X_y=True) + X = scale(X)[:, :2] + return X, y + + +@pytest.fixture(scope="module") +def fitted_clf(): + return LogisticRegression().fit(X, y) + + +def test_input_data_dimension(pyplot): + """Check that we raise an error when `X` does not have exactly 2 features.""" + X, y = make_classification(n_samples=10, n_features=4, random_state=0) + + clf = LogisticRegression().fit(X, y) + msg = "n_features must be equal to 2. Got 4 instead." + with pytest.raises(ValueError, match=msg): + DecisionBoundaryDisplay.from_estimator(estimator=clf, X=X) + + +def test_check_boundary_response_method_error(): + """Check that we raise an error for the cases not supported by + `_check_boundary_response_method`. + """ + + class MultiLabelClassifier: + classes_ = [np.array([0, 1]), np.array([0, 1])] + + err_msg = "Multi-label and multi-output multi-class classifiers are not supported" + with pytest.raises(ValueError, match=err_msg): + _check_boundary_response_method(MultiLabelClassifier(), "predict", None) + + class MulticlassClassifier: + classes_ = [0, 1, 2] + + err_msg = "Multiclass classifiers are only supported when `response_method` is" + for response_method in ("predict_proba", "decision_function"): + with pytest.raises(ValueError, match=err_msg): + _check_boundary_response_method( + MulticlassClassifier(), response_method, None + ) + + +@pytest.mark.parametrize( + "estimator, response_method, class_of_interest, expected_prediction_method", + [ + (DecisionTreeRegressor(), "predict", None, "predict"), + (DecisionTreeRegressor(), "auto", None, "predict"), + (LogisticRegression().fit(*load_iris_2d_scaled()), "predict", None, "predict"), + (LogisticRegression().fit(*load_iris_2d_scaled()), "auto", None, "predict"), + ( + LogisticRegression().fit(*load_iris_2d_scaled()), + "predict_proba", + 0, + "predict_proba", + ), + ( + LogisticRegression().fit(*load_iris_2d_scaled()), + "decision_function", + 0, + "decision_function", + ), + ( + LogisticRegression().fit(X, y), + "auto", + None, + ["decision_function", "predict_proba", "predict"], + ), + (LogisticRegression().fit(X, y), "predict", None, "predict"), + ( + LogisticRegression().fit(X, y), + ["predict_proba", "decision_function"], + None, + ["predict_proba", "decision_function"], + ), + ], +) +def test_check_boundary_response_method( + estimator, response_method, class_of_interest, expected_prediction_method +): + """Check the behaviour of `_check_boundary_response_method` for the supported + cases. + """ + prediction_method = _check_boundary_response_method( + estimator, response_method, class_of_interest + ) + assert prediction_method == expected_prediction_method + + +@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"]) +def test_multiclass_error(pyplot, response_method): + """Check multiclass errors.""" + X, y = make_classification(n_classes=3, n_informative=3, random_state=0) + X = X[:, [0, 1]] + lr = LogisticRegression().fit(X, y) + + msg = ( + "Multiclass classifiers are only supported when `response_method` is 'predict'" + " or 'auto'" + ) + with pytest.raises(ValueError, match=msg): + DecisionBoundaryDisplay.from_estimator(lr, X, response_method=response_method) + + +@pytest.mark.parametrize("response_method", ["auto", "predict"]) +def test_multiclass(pyplot, response_method): + """Check multiclass gives expected results.""" + grid_resolution = 10 + eps = 1.0 + X, y = make_classification(n_classes=3, n_informative=3, random_state=0) + X = X[:, [0, 1]] + lr = LogisticRegression(random_state=0).fit(X, y) + + disp = DecisionBoundaryDisplay.from_estimator( + lr, X, response_method=response_method, grid_resolution=grid_resolution, eps=1.0 + ) + + x0_min, x0_max = X[:, 0].min() - eps, X[:, 0].max() + eps + x1_min, x1_max = X[:, 1].min() - eps, X[:, 1].max() + eps + xx0, xx1 = np.meshgrid( + np.linspace(x0_min, x0_max, grid_resolution), + np.linspace(x1_min, x1_max, grid_resolution), + ) + response = lr.predict(np.c_[xx0.ravel(), xx1.ravel()]) + assert_allclose(disp.response, response.reshape(xx0.shape)) + assert_allclose(disp.xx0, xx0) + assert_allclose(disp.xx1, xx1) + + +@pytest.mark.parametrize( + "kwargs, error_msg", + [ + ( + {"plot_method": "hello_world"}, + r"plot_method must be one of contourf, contour, pcolormesh. Got hello_world" + r" instead.", + ), + ( + {"grid_resolution": 1}, + r"grid_resolution must be greater than 1. Got 1 instead", + ), + ( + {"grid_resolution": -1}, + r"grid_resolution must be greater than 1. Got -1 instead", + ), + ({"eps": -1.1}, r"eps must be greater than or equal to 0. Got -1.1 instead"), + ], +) +def test_input_validation_errors(pyplot, kwargs, error_msg, fitted_clf): + """Check input validation from_estimator.""" + with pytest.raises(ValueError, match=error_msg): + DecisionBoundaryDisplay.from_estimator(fitted_clf, X, **kwargs) + + +def test_display_plot_input_error(pyplot, fitted_clf): + """Check input validation for `plot`.""" + disp = DecisionBoundaryDisplay.from_estimator(fitted_clf, X, grid_resolution=5) + + with pytest.raises(ValueError, match="plot_method must be 'contourf'"): + disp.plot(plot_method="hello_world") + + +@pytest.mark.parametrize( + "response_method", ["auto", "predict", "predict_proba", "decision_function"] +) +@pytest.mark.parametrize("plot_method", ["contourf", "contour"]) +def test_decision_boundary_display_classifier( + pyplot, fitted_clf, response_method, plot_method +): + """Check that decision boundary is correct.""" + fig, ax = pyplot.subplots() + eps = 2.0 + disp = DecisionBoundaryDisplay.from_estimator( + fitted_clf, + X, + grid_resolution=5, + response_method=response_method, + plot_method=plot_method, + eps=eps, + ax=ax, + ) + assert isinstance(disp.surface_, pyplot.matplotlib.contour.QuadContourSet) + assert disp.ax_ == ax + assert disp.figure_ == fig + + x0, x1 = X[:, 0], X[:, 1] + + x0_min, x0_max = x0.min() - eps, x0.max() + eps + x1_min, x1_max = x1.min() - eps, x1.max() + eps + + assert disp.xx0.min() == pytest.approx(x0_min) + assert disp.xx0.max() == pytest.approx(x0_max) + assert disp.xx1.min() == pytest.approx(x1_min) + assert disp.xx1.max() == pytest.approx(x1_max) + + fig2, ax2 = pyplot.subplots() + # change plotting method for second plot + disp.plot(plot_method="pcolormesh", ax=ax2, shading="auto") + assert isinstance(disp.surface_, pyplot.matplotlib.collections.QuadMesh) + assert disp.ax_ == ax2 + assert disp.figure_ == fig2 + + +@pytest.mark.parametrize("response_method", ["auto", "predict", "decision_function"]) +@pytest.mark.parametrize("plot_method", ["contourf", "contour"]) +def test_decision_boundary_display_outlier_detector( + pyplot, response_method, plot_method +): + """Check that decision boundary is correct for outlier detector.""" + fig, ax = pyplot.subplots() + eps = 2.0 + outlier_detector = IsolationForest(random_state=0).fit(X, y) + disp = DecisionBoundaryDisplay.from_estimator( + outlier_detector, + X, + grid_resolution=5, + response_method=response_method, + plot_method=plot_method, + eps=eps, + ax=ax, + ) + assert isinstance(disp.surface_, pyplot.matplotlib.contour.QuadContourSet) + assert disp.ax_ == ax + assert disp.figure_ == fig + + x0, x1 = X[:, 0], X[:, 1] + + x0_min, x0_max = x0.min() - eps, x0.max() + eps + x1_min, x1_max = x1.min() - eps, x1.max() + eps + + assert disp.xx0.min() == pytest.approx(x0_min) + assert disp.xx0.max() == pytest.approx(x0_max) + assert disp.xx1.min() == pytest.approx(x1_min) + assert disp.xx1.max() == pytest.approx(x1_max) + + +@pytest.mark.parametrize("response_method", ["auto", "predict"]) +@pytest.mark.parametrize("plot_method", ["contourf", "contour"]) +def test_decision_boundary_display_regressor(pyplot, response_method, plot_method): + """Check that we can display the decision boundary for a regressor.""" + X, y = load_diabetes(return_X_y=True) + X = X[:, :2] + tree = DecisionTreeRegressor().fit(X, y) + fig, ax = pyplot.subplots() + eps = 2.0 + disp = DecisionBoundaryDisplay.from_estimator( + tree, + X, + response_method=response_method, + ax=ax, + eps=eps, + plot_method=plot_method, + ) + assert isinstance(disp.surface_, pyplot.matplotlib.contour.QuadContourSet) + assert disp.ax_ == ax + assert disp.figure_ == fig + + x0, x1 = X[:, 0], X[:, 1] + + x0_min, x0_max = x0.min() - eps, x0.max() + eps + x1_min, x1_max = x1.min() - eps, x1.max() + eps + + assert disp.xx0.min() == pytest.approx(x0_min) + assert disp.xx0.max() == pytest.approx(x0_max) + assert disp.xx1.min() == pytest.approx(x1_min) + assert disp.xx1.max() == pytest.approx(x1_max) + + fig2, ax2 = pyplot.subplots() + # change plotting method for second plot + disp.plot(plot_method="pcolormesh", ax=ax2, shading="auto") + assert isinstance(disp.surface_, pyplot.matplotlib.collections.QuadMesh) + assert disp.ax_ == ax2 + assert disp.figure_ == fig2 + + +@pytest.mark.parametrize( + "response_method, msg", + [ + ( + "predict_proba", + "MyClassifier has none of the following attributes: predict_proba", + ), + ( + "decision_function", + "MyClassifier has none of the following attributes: decision_function", + ), + ( + "auto", + ( + "MyClassifier has none of the following attributes: decision_function, " + "predict_proba, predict" + ), + ), + ( + "bad_method", + "MyClassifier has none of the following attributes: bad_method", + ), + ], +) +def test_error_bad_response(pyplot, response_method, msg): + """Check errors for bad response.""" + + class MyClassifier(ClassifierMixin, BaseEstimator): + def fit(self, X, y): + self.fitted_ = True + self.classes_ = [0, 1] + return self + + clf = MyClassifier().fit(X, y) + + with pytest.raises(AttributeError, match=msg): + DecisionBoundaryDisplay.from_estimator(clf, X, response_method=response_method) + + +@pytest.mark.parametrize("response_method", ["auto", "predict", "predict_proba"]) +def test_multilabel_classifier_error(pyplot, response_method): + """Check that multilabel classifier raises correct error.""" + X, y = make_multilabel_classification(random_state=0) + X = X[:, :2] + tree = DecisionTreeClassifier().fit(X, y) + + msg = "Multi-label and multi-output multi-class classifiers are not supported" + with pytest.raises(ValueError, match=msg): + DecisionBoundaryDisplay.from_estimator( + tree, + X, + response_method=response_method, + ) + + +@pytest.mark.parametrize("response_method", ["auto", "predict", "predict_proba"]) +def test_multi_output_multi_class_classifier_error(pyplot, response_method): + """Check that multi-output multi-class classifier raises correct error.""" + X = np.asarray([[0, 1], [1, 2]]) + y = np.asarray([["tree", "cat"], ["cat", "tree"]]) + tree = DecisionTreeClassifier().fit(X, y) + + msg = "Multi-label and multi-output multi-class classifiers are not supported" + with pytest.raises(ValueError, match=msg): + DecisionBoundaryDisplay.from_estimator( + tree, + X, + response_method=response_method, + ) + + +def test_multioutput_regressor_error(pyplot): + """Check that multioutput regressor raises correct error.""" + X = np.asarray([[0, 1], [1, 2]]) + y = np.asarray([[0, 1], [4, 1]]) + tree = DecisionTreeRegressor().fit(X, y) + with pytest.raises(ValueError, match="Multi-output regressors are not supported"): + DecisionBoundaryDisplay.from_estimator(tree, X, response_method="predict") + + +@pytest.mark.parametrize( + "response_method", + ["predict_proba", "decision_function", ["predict_proba", "predict"]], +) +def test_regressor_unsupported_response(pyplot, response_method): + """Check that we can display the decision boundary for a regressor.""" + X, y = load_diabetes(return_X_y=True) + X = X[:, :2] + tree = DecisionTreeRegressor().fit(X, y) + err_msg = "should either be a classifier to be used with response_method" + with pytest.raises(ValueError, match=err_msg): + DecisionBoundaryDisplay.from_estimator(tree, X, response_method=response_method) + + +@pytest.mark.filterwarnings( + # We expect to raise the following warning because the classifier is fit on a + # NumPy array + "ignore:X has feature names, but LogisticRegression was fitted without" +) +def test_dataframe_labels_used(pyplot, fitted_clf): + """Check that column names are used for pandas.""" + pd = pytest.importorskip("pandas") + df = pd.DataFrame(X, columns=["col_x", "col_y"]) + + # pandas column names are used by default + _, ax = pyplot.subplots() + disp = DecisionBoundaryDisplay.from_estimator(fitted_clf, df, ax=ax) + assert ax.get_xlabel() == "col_x" + assert ax.get_ylabel() == "col_y" + + # second call to plot will have the names + fig, ax = pyplot.subplots() + disp.plot(ax=ax) + assert ax.get_xlabel() == "col_x" + assert ax.get_ylabel() == "col_y" + + # axes with a label will not get overridden + fig, ax = pyplot.subplots() + ax.set(xlabel="hello", ylabel="world") + disp.plot(ax=ax) + assert ax.get_xlabel() == "hello" + assert ax.get_ylabel() == "world" + + # labels get overridden only if provided to the `plot` method + disp.plot(ax=ax, xlabel="overwritten_x", ylabel="overwritten_y") + assert ax.get_xlabel() == "overwritten_x" + assert ax.get_ylabel() == "overwritten_y" + + # labels do not get inferred if provided to `from_estimator` + _, ax = pyplot.subplots() + disp = DecisionBoundaryDisplay.from_estimator( + fitted_clf, df, ax=ax, xlabel="overwritten_x", ylabel="overwritten_y" + ) + assert ax.get_xlabel() == "overwritten_x" + assert ax.get_ylabel() == "overwritten_y" + + +def test_string_target(pyplot): + """Check that decision boundary works with classifiers trained on string labels.""" + iris = load_iris() + X = iris.data[:, [0, 1]] + + # Use strings as target + y = iris.target_names[iris.target] + log_reg = LogisticRegression().fit(X, y) + + # Does not raise + DecisionBoundaryDisplay.from_estimator( + log_reg, + X, + grid_resolution=5, + response_method="predict", + ) + + +@pytest.mark.parametrize("constructor_name", ["pandas", "polars"]) +def test_dataframe_support(pyplot, constructor_name): + """Check that passing a dataframe at fit and to the Display does not + raise warnings. + + Non-regression test for: + * https://github.com/scikit-learn/scikit-learn/issues/23311 + * https://github.com/scikit-learn/scikit-learn/issues/28717 + """ + df = _convert_container( + X, constructor_name=constructor_name, columns_name=["col_x", "col_y"] + ) + estimator = LogisticRegression().fit(df, y) + + with warnings.catch_warnings(): + # no warnings linked to feature names validation should be raised + warnings.simplefilter("error", UserWarning) + DecisionBoundaryDisplay.from_estimator(estimator, df, response_method="predict") + + +@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"]) +def test_class_of_interest_binary(pyplot, response_method): + """Check the behaviour of passing `class_of_interest` for plotting the output of + `predict_proba` and `decision_function` in the binary case. + """ + iris = load_iris() + X = iris.data[:100, :2] + y = iris.target[:100] + assert_array_equal(np.unique(y), [0, 1]) + + estimator = LogisticRegression().fit(X, y) + # We will check that `class_of_interest=None` is equivalent to + # `class_of_interest=estimator.classes_[1]` + disp_default = DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=None, + ) + disp_class_1 = DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=estimator.classes_[1], + ) + + assert_allclose(disp_default.response, disp_class_1.response) + + # we can check that `_get_response_values` modifies the response when targeting + # the other class, i.e. 1 - p(y=1|x) for `predict_proba` and -decision_function + # for `decision_function`. + disp_class_0 = DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=estimator.classes_[0], + ) + + if response_method == "predict_proba": + assert_allclose(disp_default.response, 1 - disp_class_0.response) + else: + assert response_method == "decision_function" + assert_allclose(disp_default.response, -disp_class_0.response) + + +@pytest.mark.parametrize("response_method", ["predict_proba", "decision_function"]) +def test_class_of_interest_multiclass(pyplot, response_method): + """Check the behaviour of passing `class_of_interest` for plotting the output of + `predict_proba` and `decision_function` in the multiclass case. + """ + iris = load_iris() + X = iris.data[:, :2] + y = iris.target # the target are numerical labels + class_of_interest_idx = 2 + + estimator = LogisticRegression().fit(X, y) + disp = DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=class_of_interest_idx, + ) + + # we will check that we plot the expected values as response + grid = np.concatenate([disp.xx0.reshape(-1, 1), disp.xx1.reshape(-1, 1)], axis=1) + response = getattr(estimator, response_method)(grid)[:, class_of_interest_idx] + assert_allclose(response.reshape(*disp.response.shape), disp.response) + + # make the same test but this time using target as strings + y = iris.target_names[iris.target] + estimator = LogisticRegression().fit(X, y) + + disp = DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=iris.target_names[class_of_interest_idx], + ) + + grid = np.concatenate([disp.xx0.reshape(-1, 1), disp.xx1.reshape(-1, 1)], axis=1) + response = getattr(estimator, response_method)(grid)[:, class_of_interest_idx] + assert_allclose(response.reshape(*disp.response.shape), disp.response) + + # check that we raise an error for unknown labels + # this test should already be handled in `_get_response_values` but we can have this + # test here as well + err_msg = "class_of_interest=2 is not a valid label: It should be one of" + with pytest.raises(ValueError, match=err_msg): + DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=class_of_interest_idx, + ) + + # TODO: remove this test when we handle multiclass with class_of_interest=None + # by showing the max of the decision function or the max of the predicted + # probabilities. + err_msg = "Multiclass classifiers are only supported" + with pytest.raises(ValueError, match=err_msg): + DecisionBoundaryDisplay.from_estimator( + estimator, + X, + response_method=response_method, + class_of_interest=None, + ) + + +def test_subclass_named_constructors_return_type_is_subclass(pyplot): + """Check that named constructors return the correct type when subclassed. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/pull/27675 + """ + clf = LogisticRegression().fit(X, y) + + class SubclassOfDisplay(DecisionBoundaryDisplay): + pass + + curve = SubclassOfDisplay.from_estimator(estimator=clf, X=X) + + assert isinstance(curve, SubclassOfDisplay) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_plot_partial_dependence.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_plot_partial_dependence.py new file mode 100644 index 0000000000000000000000000000000000000000..7953f367ca38b0450edee87e8944265a78e62dc0 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/test_plot_partial_dependence.py @@ -0,0 +1,1132 @@ +import numpy as np +import pytest +from numpy.testing import assert_allclose +from scipy.stats.mstats import mquantiles + +from sklearn.compose import make_column_transformer +from sklearn.datasets import ( + load_diabetes, + load_iris, + make_classification, + make_regression, +) +from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor +from sklearn.inspection import PartialDependenceDisplay +from sklearn.linear_model import LinearRegression +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import OneHotEncoder +from sklearn.utils._testing import _convert_container + + +@pytest.fixture(scope="module") +def diabetes(): + # diabetes dataset, subsampled for speed + data = load_diabetes() + data.data = data.data[:50] + data.target = data.target[:50] + return data + + +@pytest.fixture(scope="module") +def clf_diabetes(diabetes): + clf = GradientBoostingRegressor(n_estimators=10, random_state=1) + clf.fit(diabetes.data, diabetes.target) + return clf + + +@pytest.mark.parametrize("grid_resolution", [10, 20]) +def test_plot_partial_dependence(grid_resolution, pyplot, clf_diabetes, diabetes): + # Test partial dependence plot function. + # Use columns 0 & 2 as 1 is not quantitative (sex) + feature_names = diabetes.feature_names + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 2, (0, 2)], + grid_resolution=grid_resolution, + feature_names=feature_names, + contour_kw={"cmap": "jet"}, + ) + fig = pyplot.gcf() + axs = fig.get_axes() + assert disp.figure_ is fig + assert len(axs) == 4 + + assert disp.bounding_ax_ is not None + assert disp.axes_.shape == (1, 3) + assert disp.lines_.shape == (1, 3) + assert disp.contours_.shape == (1, 3) + assert disp.deciles_vlines_.shape == (1, 3) + assert disp.deciles_hlines_.shape == (1, 3) + + assert disp.lines_[0, 2] is None + assert disp.contours_[0, 0] is None + assert disp.contours_[0, 1] is None + + # deciles lines: always show on xaxis, only show on yaxis if 2-way PDP + for i in range(3): + assert disp.deciles_vlines_[0, i] is not None + assert disp.deciles_hlines_[0, 0] is None + assert disp.deciles_hlines_[0, 1] is None + assert disp.deciles_hlines_[0, 2] is not None + + assert disp.features == [(0,), (2,), (0, 2)] + assert np.all(disp.feature_names == feature_names) + assert len(disp.deciles) == 2 + for i in [0, 2]: + assert_allclose( + disp.deciles[i], + mquantiles(diabetes.data[:, i], prob=np.arange(0.1, 1.0, 0.1)), + ) + + single_feature_positions = [(0, (0, 0)), (2, (0, 1))] + expected_ylabels = ["Partial dependence", ""] + + for i, (feat_col, pos) in enumerate(single_feature_positions): + ax = disp.axes_[pos] + assert ax.get_ylabel() == expected_ylabels[i] + assert ax.get_xlabel() == diabetes.feature_names[feat_col] + + line = disp.lines_[pos] + + avg_preds = disp.pd_results[i] + assert avg_preds.average.shape == (1, grid_resolution) + target_idx = disp.target_idx + + line_data = line.get_data() + assert_allclose(line_data[0], avg_preds["grid_values"][0]) + assert_allclose(line_data[1], avg_preds.average[target_idx].ravel()) + + # two feature position + ax = disp.axes_[0, 2] + coutour = disp.contours_[0, 2] + assert coutour.get_cmap().name == "jet" + assert ax.get_xlabel() == diabetes.feature_names[0] + assert ax.get_ylabel() == diabetes.feature_names[2] + + +@pytest.mark.parametrize( + "kind, centered, subsample, shape", + [ + ("average", False, None, (1, 3)), + ("individual", False, None, (1, 3, 50)), + ("both", False, None, (1, 3, 51)), + ("individual", False, 20, (1, 3, 20)), + ("both", False, 20, (1, 3, 21)), + ("individual", False, 0.5, (1, 3, 25)), + ("both", False, 0.5, (1, 3, 26)), + ("average", True, None, (1, 3)), + ("individual", True, None, (1, 3, 50)), + ("both", True, None, (1, 3, 51)), + ("individual", True, 20, (1, 3, 20)), + ("both", True, 20, (1, 3, 21)), + ], +) +def test_plot_partial_dependence_kind( + pyplot, + kind, + centered, + subsample, + shape, + clf_diabetes, + diabetes, +): + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 1, 2], + kind=kind, + centered=centered, + subsample=subsample, + ) + + assert disp.axes_.shape == (1, 3) + assert disp.lines_.shape == shape + assert disp.contours_.shape == (1, 3) + + assert disp.contours_[0, 0] is None + assert disp.contours_[0, 1] is None + assert disp.contours_[0, 2] is None + + if centered: + assert all([ln._y[0] == 0.0 for ln in disp.lines_.ravel() if ln is not None]) + else: + assert all([ln._y[0] != 0.0 for ln in disp.lines_.ravel() if ln is not None]) + + +@pytest.mark.parametrize( + "input_type, feature_names_type", + [ + ("dataframe", None), + ("dataframe", "list"), + ("list", "list"), + ("array", "list"), + ("dataframe", "array"), + ("list", "array"), + ("array", "array"), + ("dataframe", "series"), + ("list", "series"), + ("array", "series"), + ("dataframe", "index"), + ("list", "index"), + ("array", "index"), + ], +) +def test_plot_partial_dependence_str_features( + pyplot, + clf_diabetes, + diabetes, + input_type, + feature_names_type, +): + if input_type == "dataframe": + pd = pytest.importorskip("pandas") + X = pd.DataFrame(diabetes.data, columns=diabetes.feature_names) + elif input_type == "list": + X = diabetes.data.tolist() + else: + X = diabetes.data + + if feature_names_type is None: + feature_names = None + else: + feature_names = _convert_container(diabetes.feature_names, feature_names_type) + + grid_resolution = 25 + # check with str features and array feature names and single column + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + X, + [("age", "bmi"), "bmi"], + grid_resolution=grid_resolution, + feature_names=feature_names, + n_cols=1, + line_kw={"alpha": 0.8}, + ) + fig = pyplot.gcf() + axs = fig.get_axes() + assert len(axs) == 3 + + assert disp.figure_ is fig + assert disp.axes_.shape == (2, 1) + assert disp.lines_.shape == (2, 1) + assert disp.contours_.shape == (2, 1) + assert disp.deciles_vlines_.shape == (2, 1) + assert disp.deciles_hlines_.shape == (2, 1) + + assert disp.lines_[0, 0] is None + assert disp.deciles_vlines_[0, 0] is not None + assert disp.deciles_hlines_[0, 0] is not None + assert disp.contours_[1, 0] is None + assert disp.deciles_hlines_[1, 0] is None + assert disp.deciles_vlines_[1, 0] is not None + + # line + ax = disp.axes_[1, 0] + assert ax.get_xlabel() == "bmi" + assert ax.get_ylabel() == "Partial dependence" + + line = disp.lines_[1, 0] + avg_preds = disp.pd_results[1] + target_idx = disp.target_idx + assert line.get_alpha() == 0.8 + + line_data = line.get_data() + assert_allclose(line_data[0], avg_preds["grid_values"][0]) + assert_allclose(line_data[1], avg_preds.average[target_idx].ravel()) + + # contour + ax = disp.axes_[0, 0] + assert ax.get_xlabel() == "age" + assert ax.get_ylabel() == "bmi" + + +def test_plot_partial_dependence_custom_axes(pyplot, clf_diabetes, diabetes): + grid_resolution = 25 + fig, (ax1, ax2) = pyplot.subplots(1, 2) + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", ("age", "bmi")], + grid_resolution=grid_resolution, + feature_names=diabetes.feature_names, + ax=[ax1, ax2], + ) + assert fig is disp.figure_ + assert disp.bounding_ax_ is None + assert disp.axes_.shape == (2,) + assert disp.axes_[0] is ax1 + assert disp.axes_[1] is ax2 + + ax = disp.axes_[0] + assert ax.get_xlabel() == "age" + assert ax.get_ylabel() == "Partial dependence" + + line = disp.lines_[0] + avg_preds = disp.pd_results[0] + target_idx = disp.target_idx + + line_data = line.get_data() + assert_allclose(line_data[0], avg_preds["grid_values"][0]) + assert_allclose(line_data[1], avg_preds.average[target_idx].ravel()) + + # contour + ax = disp.axes_[1] + assert ax.get_xlabel() == "age" + assert ax.get_ylabel() == "bmi" + + +@pytest.mark.parametrize( + "kind, lines", [("average", 1), ("individual", 50), ("both", 51)] +) +def test_plot_partial_dependence_passing_numpy_axes( + pyplot, clf_diabetes, diabetes, kind, lines +): + grid_resolution = 25 + feature_names = diabetes.feature_names + disp1 = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + kind=kind, + grid_resolution=grid_resolution, + feature_names=feature_names, + ) + assert disp1.axes_.shape == (1, 2) + assert disp1.axes_[0, 0].get_ylabel() == "Partial dependence" + assert disp1.axes_[0, 1].get_ylabel() == "" + assert len(disp1.axes_[0, 0].get_lines()) == lines + assert len(disp1.axes_[0, 1].get_lines()) == lines + + lr = LinearRegression() + lr.fit(diabetes.data, diabetes.target) + + disp2 = PartialDependenceDisplay.from_estimator( + lr, + diabetes.data, + ["age", "bmi"], + kind=kind, + grid_resolution=grid_resolution, + feature_names=feature_names, + ax=disp1.axes_, + ) + + assert np.all(disp1.axes_ == disp2.axes_) + assert len(disp2.axes_[0, 0].get_lines()) == 2 * lines + assert len(disp2.axes_[0, 1].get_lines()) == 2 * lines + + +@pytest.mark.parametrize("nrows, ncols", [(2, 2), (3, 1)]) +def test_plot_partial_dependence_incorrent_num_axes( + pyplot, clf_diabetes, diabetes, nrows, ncols +): + grid_resolution = 5 + fig, axes = pyplot.subplots(nrows, ncols) + axes_formats = [list(axes.ravel()), tuple(axes.ravel()), axes] + + msg = "Expected ax to have 2 axes, got {}".format(nrows * ncols) + + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + grid_resolution=grid_resolution, + feature_names=diabetes.feature_names, + ) + + for ax_format in axes_formats: + with pytest.raises(ValueError, match=msg): + PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + grid_resolution=grid_resolution, + feature_names=diabetes.feature_names, + ax=ax_format, + ) + + # with axes object + with pytest.raises(ValueError, match=msg): + disp.plot(ax=ax_format) + + +def test_plot_partial_dependence_with_same_axes(pyplot, clf_diabetes, diabetes): + # The first call to plot_partial_dependence will create two new axes to + # place in the space of the passed in axes, which results in a total of + # three axes in the figure. + # Currently the API does not allow for the second call to + # plot_partial_dependence to use the same axes again, because it will + # create two new axes in the space resulting in five axes. To get the + # expected behavior one needs to pass the generated axes into the second + # call: + # disp1 = plot_partial_dependence(...) + # disp2 = plot_partial_dependence(..., ax=disp1.axes_) + + grid_resolution = 25 + fig, ax = pyplot.subplots() + PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + grid_resolution=grid_resolution, + feature_names=diabetes.feature_names, + ax=ax, + ) + + msg = ( + "The ax was already used in another plot function, please set " + "ax=display.axes_ instead" + ) + + with pytest.raises(ValueError, match=msg): + PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + grid_resolution=grid_resolution, + feature_names=diabetes.feature_names, + ax=ax, + ) + + +def test_plot_partial_dependence_feature_name_reuse(pyplot, clf_diabetes, diabetes): + # second call to plot does not change the feature names from the first + # call + + feature_names = diabetes.feature_names + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 1], + grid_resolution=10, + feature_names=feature_names, + ) + + PartialDependenceDisplay.from_estimator( + clf_diabetes, diabetes.data, [0, 1], grid_resolution=10, ax=disp.axes_ + ) + + for i, ax in enumerate(disp.axes_.ravel()): + assert ax.get_xlabel() == feature_names[i] + + +def test_plot_partial_dependence_multiclass(pyplot): + grid_resolution = 25 + clf_int = GradientBoostingClassifier(n_estimators=10, random_state=1) + iris = load_iris() + + # Test partial dependence plot function on multi-class input. + clf_int.fit(iris.data, iris.target) + disp_target_0 = PartialDependenceDisplay.from_estimator( + clf_int, iris.data, [0, 3], target=0, grid_resolution=grid_resolution + ) + assert disp_target_0.figure_ is pyplot.gcf() + assert disp_target_0.axes_.shape == (1, 2) + assert disp_target_0.lines_.shape == (1, 2) + assert disp_target_0.contours_.shape == (1, 2) + assert disp_target_0.deciles_vlines_.shape == (1, 2) + assert disp_target_0.deciles_hlines_.shape == (1, 2) + assert all(c is None for c in disp_target_0.contours_.flat) + assert disp_target_0.target_idx == 0 + + # now with symbol labels + target = iris.target_names[iris.target] + clf_symbol = GradientBoostingClassifier(n_estimators=10, random_state=1) + clf_symbol.fit(iris.data, target) + disp_symbol = PartialDependenceDisplay.from_estimator( + clf_symbol, iris.data, [0, 3], target="setosa", grid_resolution=grid_resolution + ) + assert disp_symbol.figure_ is pyplot.gcf() + assert disp_symbol.axes_.shape == (1, 2) + assert disp_symbol.lines_.shape == (1, 2) + assert disp_symbol.contours_.shape == (1, 2) + assert disp_symbol.deciles_vlines_.shape == (1, 2) + assert disp_symbol.deciles_hlines_.shape == (1, 2) + assert all(c is None for c in disp_symbol.contours_.flat) + assert disp_symbol.target_idx == 0 + + for int_result, symbol_result in zip( + disp_target_0.pd_results, disp_symbol.pd_results + ): + assert_allclose(int_result.average, symbol_result.average) + assert_allclose(int_result["grid_values"], symbol_result["grid_values"]) + + # check that the pd plots are different for another target + disp_target_1 = PartialDependenceDisplay.from_estimator( + clf_int, iris.data, [0, 3], target=1, grid_resolution=grid_resolution + ) + target_0_data_y = disp_target_0.lines_[0, 0].get_data()[1] + target_1_data_y = disp_target_1.lines_[0, 0].get_data()[1] + assert any(target_0_data_y != target_1_data_y) + + +multioutput_regression_data = make_regression(n_samples=50, n_targets=2, random_state=0) + + +@pytest.mark.parametrize("target", [0, 1]) +def test_plot_partial_dependence_multioutput(pyplot, target): + # Test partial dependence plot function on multi-output input. + X, y = multioutput_regression_data + clf = LinearRegression().fit(X, y) + + grid_resolution = 25 + disp = PartialDependenceDisplay.from_estimator( + clf, X, [0, 1], target=target, grid_resolution=grid_resolution + ) + fig = pyplot.gcf() + axs = fig.get_axes() + assert len(axs) == 3 + assert disp.target_idx == target + assert disp.bounding_ax_ is not None + + positions = [(0, 0), (0, 1)] + expected_label = ["Partial dependence", ""] + + for i, pos in enumerate(positions): + ax = disp.axes_[pos] + assert ax.get_ylabel() == expected_label[i] + assert ax.get_xlabel() == f"x{i}" + + +def test_plot_partial_dependence_dataframe(pyplot, clf_diabetes, diabetes): + pd = pytest.importorskip("pandas") + df = pd.DataFrame(diabetes.data, columns=diabetes.feature_names) + + grid_resolution = 25 + + PartialDependenceDisplay.from_estimator( + clf_diabetes, + df, + ["bp", "s1"], + grid_resolution=grid_resolution, + feature_names=df.columns.tolist(), + ) + + +dummy_classification_data = make_classification(random_state=0) + + +@pytest.mark.parametrize( + "data, params, err_msg", + [ + ( + multioutput_regression_data, + {"target": None, "features": [0]}, + "target must be specified for multi-output", + ), + ( + multioutput_regression_data, + {"target": -1, "features": [0]}, + r"target must be in \[0, n_tasks\]", + ), + ( + multioutput_regression_data, + {"target": 100, "features": [0]}, + r"target must be in \[0, n_tasks\]", + ), + ( + dummy_classification_data, + {"features": ["foobar"], "feature_names": None}, + "Feature 'foobar' not in feature_names", + ), + ( + dummy_classification_data, + {"features": ["foobar"], "feature_names": ["abcd", "def"]}, + "Feature 'foobar' not in feature_names", + ), + ( + dummy_classification_data, + {"features": [(1, 2, 3)]}, + "Each entry in features must be either an int, ", + ), + ( + dummy_classification_data, + {"features": [1, {}]}, + "Each entry in features must be either an int, ", + ), + ( + dummy_classification_data, + {"features": [tuple()]}, + "Each entry in features must be either an int, ", + ), + ( + dummy_classification_data, + {"features": [123], "feature_names": ["blahblah"]}, + "All entries of features must be less than ", + ), + ( + dummy_classification_data, + {"features": [0, 1, 2], "feature_names": ["a", "b", "a"]}, + "feature_names should not contain duplicates", + ), + ( + dummy_classification_data, + {"features": [1, 2], "kind": ["both"]}, + "When `kind` is provided as a list of strings, it should contain", + ), + ( + dummy_classification_data, + {"features": [1], "subsample": -1}, + "When an integer, subsample=-1 should be positive.", + ), + ( + dummy_classification_data, + {"features": [1], "subsample": 1.2}, + r"When a floating-point, subsample=1.2 should be in the \(0, 1\) range", + ), + ( + dummy_classification_data, + {"features": [1, 2], "categorical_features": [1.0, 2.0]}, + "Expected `categorical_features` to be an array-like of boolean,", + ), + ( + dummy_classification_data, + {"features": [(1, 2)], "categorical_features": [2]}, + "Two-way partial dependence plots are not supported for pairs", + ), + ( + dummy_classification_data, + {"features": [1], "categorical_features": [1], "kind": "individual"}, + "It is not possible to display individual effects", + ), + ], +) +def test_plot_partial_dependence_error(pyplot, data, params, err_msg): + X, y = data + estimator = LinearRegression().fit(X, y) + + with pytest.raises(ValueError, match=err_msg): + PartialDependenceDisplay.from_estimator(estimator, X, **params) + + +@pytest.mark.parametrize( + "params, err_msg", + [ + ({"target": 4, "features": [0]}, "target not in est.classes_, got 4"), + ({"target": None, "features": [0]}, "target must be specified for multi-class"), + ( + {"target": 1, "features": [4.5]}, + "Each entry in features must be either an int,", + ), + ], +) +def test_plot_partial_dependence_multiclass_error(pyplot, params, err_msg): + iris = load_iris() + clf = GradientBoostingClassifier(n_estimators=10, random_state=1) + clf.fit(iris.data, iris.target) + + with pytest.raises(ValueError, match=err_msg): + PartialDependenceDisplay.from_estimator(clf, iris.data, **params) + + +def test_plot_partial_dependence_does_not_override_ylabel( + pyplot, clf_diabetes, diabetes +): + # Non-regression test to be sure to not override the ylabel if it has been + # See https://github.com/scikit-learn/scikit-learn/issues/15772 + _, axes = pyplot.subplots(1, 2) + axes[0].set_ylabel("Hello world") + PartialDependenceDisplay.from_estimator( + clf_diabetes, diabetes.data, [0, 1], ax=axes + ) + + assert axes[0].get_ylabel() == "Hello world" + assert axes[1].get_ylabel() == "Partial dependence" + + +@pytest.mark.parametrize( + "categorical_features, array_type", + [ + (["col_A", "col_C"], "dataframe"), + ([0, 2], "array"), + ([True, False, True], "array"), + ], +) +def test_plot_partial_dependence_with_categorical( + pyplot, categorical_features, array_type +): + X = [[1, 1, "A"], [2, 0, "C"], [3, 2, "B"]] + column_name = ["col_A", "col_B", "col_C"] + X = _convert_container(X, array_type, columns_name=column_name) + y = np.array([1.2, 0.5, 0.45]).T + + preprocessor = make_column_transformer((OneHotEncoder(), categorical_features)) + model = make_pipeline(preprocessor, LinearRegression()) + model.fit(X, y) + + # single feature + disp = PartialDependenceDisplay.from_estimator( + model, + X, + features=["col_C"], + feature_names=column_name, + categorical_features=categorical_features, + ) + + assert disp.figure_ is pyplot.gcf() + assert disp.bars_.shape == (1, 1) + assert disp.bars_[0][0] is not None + assert disp.lines_.shape == (1, 1) + assert disp.lines_[0][0] is None + assert disp.contours_.shape == (1, 1) + assert disp.contours_[0][0] is None + assert disp.deciles_vlines_.shape == (1, 1) + assert disp.deciles_vlines_[0][0] is None + assert disp.deciles_hlines_.shape == (1, 1) + assert disp.deciles_hlines_[0][0] is None + assert disp.axes_[0, 0].get_legend() is None + + # interaction between two features + disp = PartialDependenceDisplay.from_estimator( + model, + X, + features=[("col_A", "col_C")], + feature_names=column_name, + categorical_features=categorical_features, + ) + + assert disp.figure_ is pyplot.gcf() + assert disp.bars_.shape == (1, 1) + assert disp.bars_[0][0] is None + assert disp.lines_.shape == (1, 1) + assert disp.lines_[0][0] is None + assert disp.contours_.shape == (1, 1) + assert disp.contours_[0][0] is None + assert disp.deciles_vlines_.shape == (1, 1) + assert disp.deciles_vlines_[0][0] is None + assert disp.deciles_hlines_.shape == (1, 1) + assert disp.deciles_hlines_[0][0] is None + assert disp.axes_[0, 0].get_legend() is None + + +def test_plot_partial_dependence_legend(pyplot): + pd = pytest.importorskip("pandas") + X = pd.DataFrame( + { + "col_A": ["A", "B", "C"], + "col_B": [1, 0, 2], + "col_C": ["C", "B", "A"], + } + ) + y = np.array([1.2, 0.5, 0.45]).T + + categorical_features = ["col_A", "col_C"] + preprocessor = make_column_transformer((OneHotEncoder(), categorical_features)) + model = make_pipeline(preprocessor, LinearRegression()) + model.fit(X, y) + + disp = PartialDependenceDisplay.from_estimator( + model, + X, + features=["col_B", "col_C"], + categorical_features=categorical_features, + kind=["both", "average"], + ) + + legend_text = disp.axes_[0, 0].get_legend().get_texts() + assert len(legend_text) == 1 + assert legend_text[0].get_text() == "average" + assert disp.axes_[0, 1].get_legend() is None + + +@pytest.mark.parametrize( + "kind, expected_shape", + [("average", (1, 2)), ("individual", (1, 2, 20)), ("both", (1, 2, 21))], +) +def test_plot_partial_dependence_subsampling( + pyplot, clf_diabetes, diabetes, kind, expected_shape +): + # check that the subsampling is properly working + # non-regression test for: + # https://github.com/scikit-learn/scikit-learn/pull/18359 + matplotlib = pytest.importorskip("matplotlib") + grid_resolution = 25 + feature_names = diabetes.feature_names + + disp1 = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + ["age", "bmi"], + kind=kind, + grid_resolution=grid_resolution, + feature_names=feature_names, + subsample=20, + random_state=0, + ) + + assert disp1.lines_.shape == expected_shape + assert all( + [isinstance(line, matplotlib.lines.Line2D) for line in disp1.lines_.ravel()] + ) + + +@pytest.mark.parametrize( + "kind, line_kw, label", + [ + ("individual", {}, None), + ("individual", {"label": "xxx"}, None), + ("average", {}, None), + ("average", {"label": "xxx"}, "xxx"), + ("both", {}, "average"), + ("both", {"label": "xxx"}, "xxx"), + ], +) +def test_partial_dependence_overwrite_labels( + pyplot, + clf_diabetes, + diabetes, + kind, + line_kw, + label, +): + """Test that make sure that we can overwrite the label of the PDP plot""" + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 2], + grid_resolution=25, + feature_names=diabetes.feature_names, + kind=kind, + line_kw=line_kw, + ) + + for ax in disp.axes_.ravel(): + if label is None: + assert ax.get_legend() is None + else: + legend_text = ax.get_legend().get_texts() + assert len(legend_text) == 1 + assert legend_text[0].get_text() == label + + +@pytest.mark.parametrize( + "categorical_features, array_type", + [ + (["col_A", "col_C"], "dataframe"), + ([0, 2], "array"), + ([True, False, True], "array"), + ], +) +def test_grid_resolution_with_categorical(pyplot, categorical_features, array_type): + """Check that we raise a ValueError when the grid_resolution is too small + respect to the number of categories in the categorical features targeted. + """ + X = [["A", 1, "A"], ["B", 0, "C"], ["C", 2, "B"]] + column_name = ["col_A", "col_B", "col_C"] + X = _convert_container(X, array_type, columns_name=column_name) + y = np.array([1.2, 0.5, 0.45]).T + + preprocessor = make_column_transformer((OneHotEncoder(), categorical_features)) + model = make_pipeline(preprocessor, LinearRegression()) + model.fit(X, y) + + err_msg = ( + "resolution of the computed grid is less than the minimum number of categories" + ) + with pytest.raises(ValueError, match=err_msg): + PartialDependenceDisplay.from_estimator( + model, + X, + features=["col_C"], + feature_names=column_name, + categorical_features=categorical_features, + grid_resolution=2, + ) + + +@pytest.mark.parametrize("kind", ["individual", "average", "both"]) +@pytest.mark.parametrize("centered", [True, False]) +def test_partial_dependence_plot_limits_one_way( + pyplot, clf_diabetes, diabetes, kind, centered +): + """Check that the PD limit on the plots are properly set on one-way plots.""" + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + features=(0, 1), + kind=kind, + grid_resolution=25, + feature_names=diabetes.feature_names, + ) + + range_pd = np.array([-1, 1], dtype=np.float64) + for pd in disp.pd_results: + if "average" in pd: + pd["average"][...] = range_pd[1] + pd["average"][0, 0] = range_pd[0] + if "individual" in pd: + pd["individual"][...] = range_pd[1] + pd["individual"][0, 0, 0] = range_pd[0] + + disp.plot(centered=centered) + # check that we anchor to zero x-axis when centering + y_lim = range_pd - range_pd[0] if centered else range_pd + padding = 0.05 * (y_lim[1] - y_lim[0]) + y_lim[0] -= padding + y_lim[1] += padding + for ax in disp.axes_.ravel(): + assert_allclose(ax.get_ylim(), y_lim) + + +@pytest.mark.parametrize("centered", [True, False]) +def test_partial_dependence_plot_limits_two_way( + pyplot, clf_diabetes, diabetes, centered +): + """Check that the PD limit on the plots are properly set on two-way plots.""" + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + features=[(0, 1)], + kind="average", + grid_resolution=25, + feature_names=diabetes.feature_names, + ) + + range_pd = np.array([-1, 1], dtype=np.float64) + for pd in disp.pd_results: + pd["average"][...] = range_pd[1] + pd["average"][0, 0] = range_pd[0] + + disp.plot(centered=centered) + contours = disp.contours_[0, 0] + levels = range_pd - range_pd[0] if centered else range_pd + + padding = 0.05 * (levels[1] - levels[0]) + levels[0] -= padding + levels[1] += padding + expect_levels = np.linspace(*levels, num=8) + assert_allclose(contours.levels, expect_levels) + + +def test_partial_dependence_kind_list( + pyplot, + clf_diabetes, + diabetes, +): + """Check that we can provide a list of strings to kind parameter.""" + matplotlib = pytest.importorskip("matplotlib") + + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + features=[0, 2, (1, 2)], + grid_resolution=20, + kind=["both", "both", "average"], + ) + + for idx in [0, 1]: + assert all( + [ + isinstance(line, matplotlib.lines.Line2D) + for line in disp.lines_[0, idx].ravel() + ] + ) + assert disp.contours_[0, idx] is None + + assert disp.contours_[0, 2] is not None + assert all([line is None for line in disp.lines_[0, 2].ravel()]) + + +@pytest.mark.parametrize( + "features, kind", + [ + ([0, 2, (1, 2)], "individual"), + ([0, 2, (1, 2)], "both"), + ([(0, 1), (0, 2), (1, 2)], "individual"), + ([(0, 1), (0, 2), (1, 2)], "both"), + ([0, 2, (1, 2)], ["individual", "individual", "individual"]), + ([0, 2, (1, 2)], ["both", "both", "both"]), + ], +) +def test_partial_dependence_kind_error( + pyplot, + clf_diabetes, + diabetes, + features, + kind, +): + """Check that we raise an informative error when 2-way PD is requested + together with 1-way PD/ICE""" + warn_msg = ( + "ICE plot cannot be rendered for 2-way feature interactions. 2-way " + "feature interactions mandates PD plots using the 'average' kind" + ) + with pytest.raises(ValueError, match=warn_msg): + PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + features=features, + grid_resolution=20, + kind=kind, + ) + + +@pytest.mark.parametrize( + "line_kw, pd_line_kw, ice_lines_kw, expected_colors", + [ + ({"color": "r"}, {"color": "g"}, {"color": "b"}, ("g", "b")), + (None, {"color": "g"}, {"color": "b"}, ("g", "b")), + ({"color": "r"}, None, {"color": "b"}, ("r", "b")), + ({"color": "r"}, {"color": "g"}, None, ("g", "r")), + ({"color": "r"}, None, None, ("r", "r")), + ({"color": "r"}, {"linestyle": "--"}, {"linestyle": "-."}, ("r", "r")), + ({"c": "r"}, None, None, ("r", "r")), + ({"c": "r", "ls": "-."}, {"color": "g"}, {"color": "b"}, ("g", "b")), + ({"c": "r"}, {"c": "g"}, {"c": "b"}, ("g", "b")), + ({"c": "r"}, {"ls": "--"}, {"ls": "-."}, ("r", "r")), + ], +) +def test_plot_partial_dependence_lines_kw( + pyplot, + clf_diabetes, + diabetes, + line_kw, + pd_line_kw, + ice_lines_kw, + expected_colors, +): + """Check that passing `pd_line_kw` and `ice_lines_kw` will act on the + specific lines in the plot. + """ + + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 2], + grid_resolution=20, + feature_names=diabetes.feature_names, + n_cols=2, + kind="both", + line_kw=line_kw, + pd_line_kw=pd_line_kw, + ice_lines_kw=ice_lines_kw, + ) + + line = disp.lines_[0, 0, -1] + assert line.get_color() == expected_colors[0], ( + f"{line.get_color()}!={expected_colors[0]}\n" f"{line_kw} and {pd_line_kw}" + ) + if pd_line_kw is not None: + if "linestyle" in pd_line_kw: + assert line.get_linestyle() == pd_line_kw["linestyle"] + elif "ls" in pd_line_kw: + assert line.get_linestyle() == pd_line_kw["ls"] + else: + assert line.get_linestyle() == "--" + + line = disp.lines_[0, 0, 0] + assert ( + line.get_color() == expected_colors[1] + ), f"{line.get_color()}!={expected_colors[1]}" + if ice_lines_kw is not None: + if "linestyle" in ice_lines_kw: + assert line.get_linestyle() == ice_lines_kw["linestyle"] + elif "ls" in ice_lines_kw: + assert line.get_linestyle() == ice_lines_kw["ls"] + else: + assert line.get_linestyle() == "-" + + +def test_partial_dependence_display_wrong_len_kind( + pyplot, + clf_diabetes, + diabetes, +): + """Check that we raise an error when `kind` is a list with a wrong length. + + This case can only be triggered using the `PartialDependenceDisplay.from_estimator` + method. + """ + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + features=[0, 2], + grid_resolution=20, + kind="average", # len(kind) != len(features) + ) + + # alter `kind` to be a list with a length different from length of `features` + disp.kind = ["average"] + err_msg = ( + r"When `kind` is provided as a list of strings, it should contain as many" + r" elements as `features`. `kind` contains 1 element\(s\) and `features`" + r" contains 2 element\(s\)." + ) + with pytest.raises(ValueError, match=err_msg): + disp.plot() + + +@pytest.mark.parametrize( + "kind", + ["individual", "both", "average", ["average", "both"], ["individual", "both"]], +) +def test_partial_dependence_display_kind_centered_interaction( + pyplot, + kind, + clf_diabetes, + diabetes, +): + """Check that we properly center ICE and PD when passing kind as a string and as a + list.""" + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 1], + kind=kind, + centered=True, + subsample=5, + ) + + assert all([ln._y[0] == 0.0 for ln in disp.lines_.ravel() if ln is not None]) + + +def test_partial_dependence_display_with_constant_sample_weight( + pyplot, + clf_diabetes, + diabetes, +): + """Check that the utilization of a constant sample weight maintains the + standard behavior. + """ + disp = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 1], + kind="average", + method="brute", + ) + + sample_weight = np.ones_like(diabetes.target) + disp_sw = PartialDependenceDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 1], + sample_weight=sample_weight, + kind="average", + method="brute", + ) + + assert np.array_equal( + disp.pd_results[0]["average"], disp_sw.pd_results[0]["average"] + ) + + +def test_subclass_named_constructors_return_type_is_subclass( + pyplot, diabetes, clf_diabetes +): + """Check that named constructors return the correct type when subclassed. + + Non-regression test for: + https://github.com/scikit-learn/scikit-learn/pull/27675 + """ + + class SubclassOfDisplay(PartialDependenceDisplay): + pass + + curve = SubclassOfDisplay.from_estimator( + clf_diabetes, + diabetes.data, + [0, 2, (0, 2)], + ) + + assert isinstance(curve, SubclassOfDisplay) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d7c54e85334e4bb15e58b8c1ed54b501ea1b63c Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_partial_dependence.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_partial_dependence.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2be4e3e844d54782c92732bec5e28a0e9d202ed2 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_partial_dependence.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_pd_utils.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_pd_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78371a3a395f7c9bd76c68afa82f53ca0accd7d8 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/__pycache__/test_pd_utils.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/test_partial_dependence.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/test_partial_dependence.py new file mode 100644 index 0000000000000000000000000000000000000000..16c23d4d5dd4e086de4e0c4831cac8185d7056ef --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/test_partial_dependence.py @@ -0,0 +1,930 @@ +""" +Testing for the partial dependence module. +""" + +import numpy as np +import pytest + +import sklearn +from sklearn.base import BaseEstimator, ClassifierMixin, clone, is_regressor +from sklearn.cluster import KMeans +from sklearn.compose import make_column_transformer +from sklearn.datasets import load_iris, make_classification, make_regression +from sklearn.dummy import DummyClassifier +from sklearn.ensemble import ( + GradientBoostingClassifier, + GradientBoostingRegressor, + HistGradientBoostingClassifier, + HistGradientBoostingRegressor, + RandomForestRegressor, +) +from sklearn.exceptions import NotFittedError +from sklearn.inspection import partial_dependence +from sklearn.inspection._partial_dependence import ( + _grid_from_X, + _partial_dependence_brute, + _partial_dependence_recursion, +) +from sklearn.linear_model import LinearRegression, LogisticRegression, MultiTaskLasso +from sklearn.metrics import r2_score +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import ( + PolynomialFeatures, + RobustScaler, + StandardScaler, + scale, +) +from sklearn.tree import DecisionTreeRegressor +from sklearn.tree.tests.test_tree import assert_is_subtree +from sklearn.utils._testing import assert_allclose, assert_array_equal +from sklearn.utils.fixes import _IS_32BIT +from sklearn.utils.validation import check_random_state + +# toy sample +X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] +y = [-1, -1, -1, 1, 1, 1] + + +# (X, y), n_targets <-- as expected in the output of partial_dep() +binary_classification_data = (make_classification(n_samples=50, random_state=0), 1) +multiclass_classification_data = ( + make_classification( + n_samples=50, n_classes=3, n_clusters_per_class=1, random_state=0 + ), + 3, +) +regression_data = (make_regression(n_samples=50, random_state=0), 1) +multioutput_regression_data = ( + make_regression(n_samples=50, n_targets=2, random_state=0), + 2, +) + +# iris +iris = load_iris() + + +@pytest.mark.parametrize( + "Estimator, method, data", + [ + (GradientBoostingClassifier, "auto", binary_classification_data), + (GradientBoostingClassifier, "auto", multiclass_classification_data), + (GradientBoostingClassifier, "brute", binary_classification_data), + (GradientBoostingClassifier, "brute", multiclass_classification_data), + (GradientBoostingRegressor, "auto", regression_data), + (GradientBoostingRegressor, "brute", regression_data), + (DecisionTreeRegressor, "brute", regression_data), + (LinearRegression, "brute", regression_data), + (LinearRegression, "brute", multioutput_regression_data), + (LogisticRegression, "brute", binary_classification_data), + (LogisticRegression, "brute", multiclass_classification_data), + (MultiTaskLasso, "brute", multioutput_regression_data), + ], +) +@pytest.mark.parametrize("grid_resolution", (5, 10)) +@pytest.mark.parametrize("features", ([1], [1, 2])) +@pytest.mark.parametrize("kind", ("average", "individual", "both")) +def test_output_shape(Estimator, method, data, grid_resolution, features, kind): + # Check that partial_dependence has consistent output shape for different + # kinds of estimators: + # - classifiers with binary and multiclass settings + # - regressors + # - multi-task regressors + + est = Estimator() + if hasattr(est, "n_estimators"): + est.set_params(n_estimators=2) # speed-up computations + + # n_target corresponds to the number of classes (1 for binary classif) or + # the number of tasks / outputs in multi task settings. It's equal to 1 for + # classical regression_data. + (X, y), n_targets = data + n_instances = X.shape[0] + + est.fit(X, y) + result = partial_dependence( + est, + X=X, + features=features, + method=method, + kind=kind, + grid_resolution=grid_resolution, + ) + pdp, axes = result, result["grid_values"] + + expected_pdp_shape = (n_targets, *[grid_resolution for _ in range(len(features))]) + expected_ice_shape = ( + n_targets, + n_instances, + *[grid_resolution for _ in range(len(features))], + ) + if kind == "average": + assert pdp.average.shape == expected_pdp_shape + elif kind == "individual": + assert pdp.individual.shape == expected_ice_shape + else: # 'both' + assert pdp.average.shape == expected_pdp_shape + assert pdp.individual.shape == expected_ice_shape + + expected_axes_shape = (len(features), grid_resolution) + assert axes is not None + assert np.asarray(axes).shape == expected_axes_shape + + +def test_grid_from_X(): + # tests for _grid_from_X: sanity check for output, and for shapes. + + # Make sure that the grid is a cartesian product of the input (it will use + # the unique values instead of the percentiles) + percentiles = (0.05, 0.95) + grid_resolution = 100 + is_categorical = [False, False] + X = np.asarray([[1, 2], [3, 4]]) + grid, axes = _grid_from_X(X, percentiles, is_categorical, grid_resolution) + assert_array_equal(grid, [[1, 2], [1, 4], [3, 2], [3, 4]]) + assert_array_equal(axes, X.T) + + # test shapes of returned objects depending on the number of unique values + # for a feature. + rng = np.random.RandomState(0) + grid_resolution = 15 + + # n_unique_values > grid_resolution + X = rng.normal(size=(20, 2)) + grid, axes = _grid_from_X( + X, percentiles, is_categorical, grid_resolution=grid_resolution + ) + assert grid.shape == (grid_resolution * grid_resolution, X.shape[1]) + assert np.asarray(axes).shape == (2, grid_resolution) + + # n_unique_values < grid_resolution, will use actual values + n_unique_values = 12 + X[n_unique_values - 1 :, 0] = 12345 + rng.shuffle(X) # just to make sure the order is irrelevant + grid, axes = _grid_from_X( + X, percentiles, is_categorical, grid_resolution=grid_resolution + ) + assert grid.shape == (n_unique_values * grid_resolution, X.shape[1]) + # axes is a list of arrays of different shapes + assert axes[0].shape == (n_unique_values,) + assert axes[1].shape == (grid_resolution,) + + +@pytest.mark.parametrize( + "grid_resolution", + [ + 2, # since n_categories > 2, we should not use quantiles resampling + 100, + ], +) +def test_grid_from_X_with_categorical(grid_resolution): + """Check that `_grid_from_X` always sample from categories and does not + depend from the percentiles. + """ + pd = pytest.importorskip("pandas") + percentiles = (0.05, 0.95) + is_categorical = [True] + X = pd.DataFrame({"cat_feature": ["A", "B", "C", "A", "B", "D", "E"]}) + grid, axes = _grid_from_X( + X, percentiles, is_categorical, grid_resolution=grid_resolution + ) + assert grid.shape == (5, X.shape[1]) + assert axes[0].shape == (5,) + + +@pytest.mark.parametrize("grid_resolution", [3, 100]) +def test_grid_from_X_heterogeneous_type(grid_resolution): + """Check that `_grid_from_X` always sample from categories and does not + depend from the percentiles. + """ + pd = pytest.importorskip("pandas") + percentiles = (0.05, 0.95) + is_categorical = [True, False] + X = pd.DataFrame( + { + "cat": ["A", "B", "C", "A", "B", "D", "E", "A", "B", "D"], + "num": [1, 1, 1, 2, 5, 6, 6, 6, 6, 8], + } + ) + nunique = X.nunique() + + grid, axes = _grid_from_X( + X, percentiles, is_categorical, grid_resolution=grid_resolution + ) + if grid_resolution == 3: + assert grid.shape == (15, 2) + assert axes[0].shape[0] == nunique["num"] + assert axes[1].shape[0] == grid_resolution + else: + assert grid.shape == (25, 2) + assert axes[0].shape[0] == nunique["cat"] + assert axes[1].shape[0] == nunique["cat"] + + +@pytest.mark.parametrize( + "grid_resolution, percentiles, err_msg", + [ + (2, (0, 0.0001), "percentiles are too close"), + (100, (1, 2, 3, 4), "'percentiles' must be a sequence of 2 elements"), + (100, 12345, "'percentiles' must be a sequence of 2 elements"), + (100, (-1, 0.95), r"'percentiles' values must be in \[0, 1\]"), + (100, (0.05, 2), r"'percentiles' values must be in \[0, 1\]"), + (100, (0.9, 0.1), r"percentiles\[0\] must be strictly less than"), + (1, (0.05, 0.95), "'grid_resolution' must be strictly greater than 1"), + ], +) +def test_grid_from_X_error(grid_resolution, percentiles, err_msg): + X = np.asarray([[1, 2], [3, 4]]) + is_categorical = [False] + with pytest.raises(ValueError, match=err_msg): + _grid_from_X(X, percentiles, is_categorical, grid_resolution) + + +@pytest.mark.parametrize("target_feature", range(5)) +@pytest.mark.parametrize( + "est, method", + [ + (LinearRegression(), "brute"), + (GradientBoostingRegressor(random_state=0), "brute"), + (GradientBoostingRegressor(random_state=0), "recursion"), + (HistGradientBoostingRegressor(random_state=0), "brute"), + (HistGradientBoostingRegressor(random_state=0), "recursion"), + ], +) +def test_partial_dependence_helpers(est, method, target_feature): + # Check that what is returned by _partial_dependence_brute or + # _partial_dependence_recursion is equivalent to manually setting a target + # feature to a given value, and computing the average prediction over all + # samples. + # This also checks that the brute and recursion methods give the same + # output. + # Note that even on the trainset, the brute and the recursion methods + # aren't always strictly equivalent, in particular when the slow method + # generates unrealistic samples that have low mass in the joint + # distribution of the input features, and when some of the features are + # dependent. Hence the high tolerance on the checks. + + X, y = make_regression(random_state=0, n_features=5, n_informative=5) + # The 'init' estimator for GBDT (here the average prediction) isn't taken + # into account with the recursion method, for technical reasons. We set + # the mean to 0 to that this 'bug' doesn't have any effect. + y = y - y.mean() + + # Clone is necessary to make the test thread-safe. + est = clone(est).fit(X, y) + + # target feature will be set to .5 and then to 123 + features = np.array([target_feature], dtype=np.intp) + grid = np.array([[0.5], [123]]) + + if method == "brute": + pdp, predictions = _partial_dependence_brute( + est, grid, features, X, response_method="auto" + ) + else: + pdp = _partial_dependence_recursion(est, grid, features) + + mean_predictions = [] + for val in (0.5, 123): + X_ = X.copy() + X_[:, target_feature] = val + mean_predictions.append(est.predict(X_).mean()) + + pdp = pdp[0] # (shape is (1, 2) so make it (2,)) + + # allow for greater margin for error with recursion method + rtol = 1e-1 if method == "recursion" else 1e-3 + assert np.allclose(pdp, mean_predictions, rtol=rtol) + + +@pytest.mark.parametrize("seed", range(1)) +def test_recursion_decision_tree_vs_forest_and_gbdt(seed): + # Make sure that the recursion method gives the same results on a + # DecisionTreeRegressor and a GradientBoostingRegressor or a + # RandomForestRegressor with 1 tree and equivalent parameters. + + rng = np.random.RandomState(seed) + + # Purely random dataset to avoid correlated features + n_samples = 1000 + n_features = 5 + X = rng.randn(n_samples, n_features) + y = rng.randn(n_samples) * 10 + + # The 'init' estimator for GBDT (here the average prediction) isn't taken + # into account with the recursion method, for technical reasons. We set + # the mean to 0 to that this 'bug' doesn't have any effect. + y = y - y.mean() + + # set max_depth not too high to avoid splits with same gain but different + # features + max_depth = 5 + + tree_seed = 0 + forest = RandomForestRegressor( + n_estimators=1, + max_features=None, + bootstrap=False, + max_depth=max_depth, + random_state=tree_seed, + ) + # The forest will use ensemble.base._set_random_states to set the + # random_state of the tree sub-estimator. We simulate this here to have + # equivalent estimators. + equiv_random_state = check_random_state(tree_seed).randint(np.iinfo(np.int32).max) + gbdt = GradientBoostingRegressor( + n_estimators=1, + learning_rate=1, + criterion="squared_error", + max_depth=max_depth, + random_state=equiv_random_state, + ) + tree = DecisionTreeRegressor(max_depth=max_depth, random_state=equiv_random_state) + + forest.fit(X, y) + gbdt.fit(X, y) + tree.fit(X, y) + + # sanity check: if the trees aren't the same, the PD values won't be equal + try: + assert_is_subtree(tree.tree_, gbdt[0, 0].tree_) + assert_is_subtree(tree.tree_, forest[0].tree_) + except AssertionError: + # For some reason the trees aren't exactly equal on 32bits, so the PDs + # cannot be equal either. See + # https://github.com/scikit-learn/scikit-learn/issues/8853 + assert _IS_32BIT, "this should only fail on 32 bit platforms" + return + + grid = rng.randn(50).reshape(-1, 1) + for f in range(n_features): + features = np.array([f], dtype=np.intp) + + pdp_forest = _partial_dependence_recursion(forest, grid, features) + pdp_gbdt = _partial_dependence_recursion(gbdt, grid, features) + pdp_tree = _partial_dependence_recursion(tree, grid, features) + + np.testing.assert_allclose(pdp_gbdt, pdp_tree) + np.testing.assert_allclose(pdp_forest, pdp_tree) + + +@pytest.mark.parametrize( + "est", + ( + GradientBoostingClassifier(random_state=0), + HistGradientBoostingClassifier(random_state=0), + ), +) +@pytest.mark.parametrize("target_feature", (0, 1, 2, 3, 4, 5)) +def test_recursion_decision_function(est, target_feature): + # Make sure the recursion method (implicitly uses decision_function) has + # the same result as using brute method with + # response_method=decision_function + + X, y = make_classification(n_classes=2, n_clusters_per_class=1, random_state=1) + assert np.mean(y) == 0.5 # make sure the init estimator predicts 0 anyway + + est = clone(est).fit(X, y) + + preds_1 = partial_dependence( + est, + X, + [target_feature], + response_method="decision_function", + method="recursion", + kind="average", + ) + preds_2 = partial_dependence( + est, + X, + [target_feature], + response_method="decision_function", + method="brute", + kind="average", + ) + + assert_allclose(preds_1["average"], preds_2["average"], atol=1e-7) + + +@pytest.mark.parametrize( + "est", + ( + LinearRegression(), + GradientBoostingRegressor(random_state=0), + HistGradientBoostingRegressor( + random_state=0, min_samples_leaf=1, max_leaf_nodes=None, max_iter=1 + ), + DecisionTreeRegressor(random_state=0), + ), +) +@pytest.mark.parametrize("power", (1, 2)) +def test_partial_dependence_easy_target(est, power): + # If the target y only depends on one feature in an obvious way (linear or + # quadratic) then the partial dependence for that feature should reflect + # it. + # We here fit a linear regression_data model (with polynomial features if + # needed) and compute r_squared to check that the partial dependence + # correctly reflects the target. + + rng = np.random.RandomState(0) + n_samples = 200 + target_variable = 2 + X = rng.normal(size=(n_samples, 5)) + y = X[:, target_variable] ** power + + est = clone(est).fit(X, y) + + pdp = partial_dependence( + est, features=[target_variable], X=X, grid_resolution=1000, kind="average" + ) + + new_X = pdp["grid_values"][0].reshape(-1, 1) + new_y = pdp["average"][0] + # add polynomial features if needed + new_X = PolynomialFeatures(degree=power).fit_transform(new_X) + + lr = LinearRegression().fit(new_X, new_y) + r2 = r2_score(new_y, lr.predict(new_X)) + + assert r2 > 0.99 + + +@pytest.mark.parametrize( + "Estimator", + ( + sklearn.tree.DecisionTreeClassifier, + sklearn.tree.ExtraTreeClassifier, + sklearn.ensemble.ExtraTreesClassifier, + sklearn.neighbors.KNeighborsClassifier, + sklearn.neighbors.RadiusNeighborsClassifier, + sklearn.ensemble.RandomForestClassifier, + ), +) +def test_multiclass_multioutput(Estimator): + # Make sure error is raised for multiclass-multioutput classifiers + + # make multiclass-multioutput dataset + X, y = make_classification(n_classes=3, n_clusters_per_class=1, random_state=0) + y = np.array([y, y]).T + + est = Estimator() + est.fit(X, y) + + with pytest.raises( + ValueError, match="Multiclass-multioutput estimators are not supported" + ): + partial_dependence(est, X, [0]) + + +class NoPredictProbaNoDecisionFunction(ClassifierMixin, BaseEstimator): + def fit(self, X, y): + # simulate that we have some classes + self.classes_ = [0, 1] + return self + + +@pytest.mark.parametrize( + "estimator, params, err_msg", + [ + ( + KMeans(random_state=0, n_init="auto"), + {"features": [0]}, + "'estimator' must be a fitted regressor or classifier", + ), + ( + LinearRegression(), + {"features": [0], "response_method": "predict_proba"}, + "The response_method parameter is ignored for regressors", + ), + ( + GradientBoostingClassifier(random_state=0), + { + "features": [0], + "response_method": "predict_proba", + "method": "recursion", + }, + "'recursion' method, the response_method must be 'decision_function'", + ), + ( + GradientBoostingClassifier(random_state=0), + {"features": [0], "response_method": "predict_proba", "method": "auto"}, + "'recursion' method, the response_method must be 'decision_function'", + ), + ( + LinearRegression(), + {"features": [0], "method": "recursion", "kind": "individual"}, + "The 'recursion' method only applies when 'kind' is set to 'average'", + ), + ( + LinearRegression(), + {"features": [0], "method": "recursion", "kind": "both"}, + "The 'recursion' method only applies when 'kind' is set to 'average'", + ), + ( + LinearRegression(), + {"features": [0], "method": "recursion"}, + "Only the following estimators support the 'recursion' method:", + ), + ], +) +def test_partial_dependence_error(estimator, params, err_msg): + X, y = make_classification(random_state=0) + estimator = clone(estimator).fit(X, y) + + with pytest.raises(ValueError, match=err_msg): + partial_dependence(estimator, X, **params) + + +@pytest.mark.parametrize( + "estimator", [LinearRegression(), GradientBoostingClassifier(random_state=0)] +) +@pytest.mark.parametrize("features", [-1, 10000]) +def test_partial_dependence_unknown_feature_indices(estimator, features): + X, y = make_classification(random_state=0) + estimator = clone(estimator).fit(X, y) + + err_msg = "all features must be in" + with pytest.raises(ValueError, match=err_msg): + partial_dependence(estimator, X, [features]) + + +@pytest.mark.parametrize( + "estimator", [LinearRegression(), GradientBoostingClassifier(random_state=0)] +) +def test_partial_dependence_unknown_feature_string(estimator): + pd = pytest.importorskip("pandas") + X, y = make_classification(random_state=0) + df = pd.DataFrame(X) + estimator = clone(estimator).fit(df, y) + + features = ["random"] + err_msg = "A given column is not a column of the dataframe" + with pytest.raises(ValueError, match=err_msg): + partial_dependence(estimator, df, features) + + +@pytest.mark.parametrize( + "estimator", [LinearRegression(), GradientBoostingClassifier(random_state=0)] +) +def test_partial_dependence_X_list(estimator): + # check that array-like objects are accepted + X, y = make_classification(random_state=0) + estimator = clone(estimator).fit(X, y) + partial_dependence(estimator, list(X), [0], kind="average") + + +def test_warning_recursion_non_constant_init(): + # make sure that passing a non-constant init parameter to a GBDT and using + # recursion method yields a warning. + + gbc = GradientBoostingClassifier(init=DummyClassifier(), random_state=0) + gbc.fit(X, y) + + with pytest.warns( + UserWarning, match="Using recursion method with a non-constant init predictor" + ): + partial_dependence(gbc, X, [0], method="recursion", kind="average") + + with pytest.warns( + UserWarning, match="Using recursion method with a non-constant init predictor" + ): + partial_dependence(gbc, X, [0], method="recursion", kind="average") + + +def test_partial_dependence_sample_weight_of_fitted_estimator(): + # Test near perfect correlation between partial dependence and diagonal + # when sample weights emphasize y = x predictions + # non-regression test for #13193 + # TODO: extend to HistGradientBoosting once sample_weight is supported + N = 1000 + rng = np.random.RandomState(123456) + mask = rng.randint(2, size=N, dtype=bool) + + x = rng.rand(N) + # set y = x on mask and y = -x outside + y = x.copy() + y[~mask] = -y[~mask] + X = np.c_[mask, x] + # sample weights to emphasize data points where y = x + sample_weight = np.ones(N) + sample_weight[mask] = 1000.0 + + clf = GradientBoostingRegressor(n_estimators=10, random_state=1) + clf.fit(X, y, sample_weight=sample_weight) + + pdp = partial_dependence(clf, X, features=[1], kind="average") + + assert np.corrcoef(pdp["average"], pdp["grid_values"])[0, 1] > 0.99 + + +def test_hist_gbdt_sw_not_supported(): + # TODO: remove/fix when PDP supports HGBT with sample weights + clf = HistGradientBoostingRegressor(random_state=1) + clf.fit(X, y, sample_weight=np.ones(len(X))) + + with pytest.raises( + NotImplementedError, match="does not support partial dependence" + ): + partial_dependence(clf, X, features=[1]) + + +def test_partial_dependence_pipeline(): + # check that the partial dependence support pipeline + iris = load_iris() + + scaler = StandardScaler() + clf = DummyClassifier(random_state=42) + pipe = make_pipeline(scaler, clf) + + clf.fit(scaler.fit_transform(iris.data), iris.target) + pipe.fit(iris.data, iris.target) + + features = 0 + pdp_pipe = partial_dependence( + pipe, iris.data, features=[features], grid_resolution=10, kind="average" + ) + pdp_clf = partial_dependence( + clf, + scaler.transform(iris.data), + features=[features], + grid_resolution=10, + kind="average", + ) + assert_allclose(pdp_pipe["average"], pdp_clf["average"]) + assert_allclose( + pdp_pipe["grid_values"][0], + pdp_clf["grid_values"][0] * scaler.scale_[features] + scaler.mean_[features], + ) + + +@pytest.mark.parametrize( + "estimator", + [ + LogisticRegression(max_iter=1000, random_state=0), + GradientBoostingClassifier(random_state=0, n_estimators=5), + ], + ids=["estimator-brute", "estimator-recursion"], +) +@pytest.mark.parametrize( + "preprocessor", + [ + None, + make_column_transformer( + (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]), + (RobustScaler(), [iris.feature_names[i] for i in (1, 3)]), + ), + make_column_transformer( + (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]), + remainder="passthrough", + ), + ], + ids=["None", "column-transformer", "column-transformer-passthrough"], +) +@pytest.mark.parametrize( + "features", + [[0, 2], [iris.feature_names[i] for i in (0, 2)]], + ids=["features-integer", "features-string"], +) +def test_partial_dependence_dataframe(estimator, preprocessor, features): + # check that the partial dependence support dataframe and pipeline + # including a column transformer + pd = pytest.importorskip("pandas") + df = pd.DataFrame(scale(iris.data), columns=iris.feature_names) + + pipe = make_pipeline(preprocessor, clone(estimator)) + pipe.fit(df, iris.target) + pdp_pipe = partial_dependence( + pipe, df, features=features, grid_resolution=10, kind="average" + ) + + # the column transformer will reorder the column when transforming + # we mixed the index to be sure that we are computing the partial + # dependence of the right columns + if preprocessor is not None: + X_proc = clone(preprocessor).fit_transform(df) + features_clf = [0, 1] + else: + X_proc = df + features_clf = [0, 2] + + clf = clone(estimator).fit(X_proc, iris.target) + pdp_clf = partial_dependence( + clf, + X_proc, + features=features_clf, + method="brute", + grid_resolution=10, + kind="average", + ) + + assert_allclose(pdp_pipe["average"], pdp_clf["average"]) + if preprocessor is not None: + scaler = preprocessor.named_transformers_["standardscaler"] + assert_allclose( + pdp_pipe["grid_values"][1], + pdp_clf["grid_values"][1] * scaler.scale_[1] + scaler.mean_[1], + ) + else: + assert_allclose(pdp_pipe["grid_values"][1], pdp_clf["grid_values"][1]) + + +@pytest.mark.parametrize( + "features, expected_pd_shape", + [ + (0, (3, 10)), + (iris.feature_names[0], (3, 10)), + ([0, 2], (3, 10, 10)), + ([iris.feature_names[i] for i in (0, 2)], (3, 10, 10)), + ([True, False, True, False], (3, 10, 10)), + ], + ids=["scalar-int", "scalar-str", "list-int", "list-str", "mask"], +) +def test_partial_dependence_feature_type(features, expected_pd_shape): + # check all possible features type supported in PDP + pd = pytest.importorskip("pandas") + df = pd.DataFrame(iris.data, columns=iris.feature_names) + + preprocessor = make_column_transformer( + (StandardScaler(), [iris.feature_names[i] for i in (0, 2)]), + (RobustScaler(), [iris.feature_names[i] for i in (1, 3)]), + ) + pipe = make_pipeline( + preprocessor, LogisticRegression(max_iter=1000, random_state=0) + ) + pipe.fit(df, iris.target) + pdp_pipe = partial_dependence( + pipe, df, features=features, grid_resolution=10, kind="average" + ) + assert pdp_pipe["average"].shape == expected_pd_shape + assert len(pdp_pipe["grid_values"]) == len(pdp_pipe["average"].shape) - 1 + + +@pytest.mark.parametrize( + "estimator", + [ + LinearRegression(), + LogisticRegression(), + GradientBoostingRegressor(), + GradientBoostingClassifier(), + ], +) +def test_partial_dependence_unfitted(estimator): + X = iris.data + preprocessor = make_column_transformer( + (StandardScaler(), [0, 2]), (RobustScaler(), [1, 3]) + ) + pipe = make_pipeline(preprocessor, estimator) + with pytest.raises(NotFittedError, match="is not fitted yet"): + partial_dependence(pipe, X, features=[0, 2], grid_resolution=10) + with pytest.raises(NotFittedError, match="is not fitted yet"): + partial_dependence(estimator, X, features=[0, 2], grid_resolution=10) + + +@pytest.mark.parametrize( + "Estimator, data", + [ + (LinearRegression, multioutput_regression_data), + (LogisticRegression, binary_classification_data), + ], +) +def test_kind_average_and_average_of_individual(Estimator, data): + est = Estimator() + (X, y), n_targets = data + est.fit(X, y) + + pdp_avg = partial_dependence(est, X=X, features=[1, 2], kind="average") + pdp_ind = partial_dependence(est, X=X, features=[1, 2], kind="individual") + avg_ind = np.mean(pdp_ind["individual"], axis=1) + assert_allclose(avg_ind, pdp_avg["average"]) + + +@pytest.mark.parametrize( + "Estimator, data", + [ + (LinearRegression, multioutput_regression_data), + (LogisticRegression, binary_classification_data), + ], +) +def test_partial_dependence_kind_individual_ignores_sample_weight(Estimator, data): + """Check that `sample_weight` does not have any effect on reported ICE.""" + est = Estimator() + (X, y), n_targets = data + sample_weight = np.arange(X.shape[0]) + est.fit(X, y) + + pdp_nsw = partial_dependence(est, X=X, features=[1, 2], kind="individual") + pdp_sw = partial_dependence( + est, X=X, features=[1, 2], kind="individual", sample_weight=sample_weight + ) + assert_allclose(pdp_nsw["individual"], pdp_sw["individual"]) + assert_allclose(pdp_nsw["grid_values"], pdp_sw["grid_values"]) + + +@pytest.mark.parametrize( + "estimator", + [ + LinearRegression(), + LogisticRegression(), + RandomForestRegressor(), + GradientBoostingClassifier(), + ], +) +@pytest.mark.parametrize("non_null_weight_idx", [0, 1, -1]) +def test_partial_dependence_non_null_weight_idx(estimator, non_null_weight_idx): + """Check that if we pass a `sample_weight` of zeros with only one index with + sample weight equals one, then the average `partial_dependence` with this + `sample_weight` is equal to the individual `partial_dependence` of the + corresponding index. + """ + X, y = iris.data, iris.target + preprocessor = make_column_transformer( + (StandardScaler(), [0, 2]), (RobustScaler(), [1, 3]) + ) + pipe = make_pipeline(preprocessor, clone(estimator)).fit(X, y) + + sample_weight = np.zeros_like(y) + sample_weight[non_null_weight_idx] = 1 + pdp_sw = partial_dependence( + pipe, + X, + [2, 3], + kind="average", + sample_weight=sample_weight, + grid_resolution=10, + ) + pdp_ind = partial_dependence(pipe, X, [2, 3], kind="individual", grid_resolution=10) + output_dim = 1 if is_regressor(pipe) else len(np.unique(y)) + for i in range(output_dim): + assert_allclose( + pdp_ind["individual"][i][non_null_weight_idx], + pdp_sw["average"][i], + ) + + +@pytest.mark.parametrize( + "Estimator, data", + [ + (LinearRegression, multioutput_regression_data), + (LogisticRegression, binary_classification_data), + ], +) +def test_partial_dependence_equivalence_equal_sample_weight(Estimator, data): + """Check that `sample_weight=None` is equivalent to having equal weights.""" + + est = Estimator() + (X, y), n_targets = data + est.fit(X, y) + + sample_weight, params = None, {"X": X, "features": [1, 2], "kind": "average"} + pdp_sw_none = partial_dependence(est, **params, sample_weight=sample_weight) + sample_weight = np.ones(len(y)) + pdp_sw_unit = partial_dependence(est, **params, sample_weight=sample_weight) + assert_allclose(pdp_sw_none["average"], pdp_sw_unit["average"]) + sample_weight = 2 * np.ones(len(y)) + pdp_sw_doubling = partial_dependence(est, **params, sample_weight=sample_weight) + assert_allclose(pdp_sw_none["average"], pdp_sw_doubling["average"]) + + +def test_partial_dependence_sample_weight_size_error(): + """Check that we raise an error when the size of `sample_weight` is not + consistent with `X` and `y`. + """ + est = LogisticRegression() + (X, y), n_targets = binary_classification_data + sample_weight = np.ones_like(y) + est.fit(X, y) + + with pytest.raises(ValueError, match="sample_weight.shape =="): + partial_dependence( + est, X, features=[0], sample_weight=sample_weight[1:], grid_resolution=10 + ) + + +def test_partial_dependence_sample_weight_with_recursion(): + """Check that we raise an error when `sample_weight` is provided with + `"recursion"` method. + """ + est = RandomForestRegressor() + (X, y), n_targets = regression_data + sample_weight = np.ones_like(y) + est.fit(X, y, sample_weight=sample_weight) + + with pytest.raises(ValueError, match="'recursion' method can only be applied when"): + partial_dependence( + est, X, features=[0], method="recursion", sample_weight=sample_weight + ) + + +def test_mixed_type_categorical(): + """Check that we raise a proper error when a column has mixed types and + the sorting of `np.unique` will fail.""" + X = np.array(["A", "B", "C", np.nan], dtype=object).reshape(-1, 1) + y = np.array([0, 1, 0, 1]) + + from sklearn.preprocessing import OrdinalEncoder + + clf = make_pipeline( + OrdinalEncoder(encoded_missing_value=-1), + LogisticRegression(), + ).fit(X, y) + with pytest.raises(ValueError, match="The column #0 contains mixed data types"): + partial_dependence(clf, X, features=[0]) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/test_pd_utils.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/test_pd_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5dea3834a77a70891a4efab25a560d09a49a13e1 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/test_pd_utils.py @@ -0,0 +1,47 @@ +import numpy as np +import pytest + +from sklearn.inspection._pd_utils import _check_feature_names, _get_feature_index +from sklearn.utils._testing import _convert_container + + +@pytest.mark.parametrize( + "feature_names, array_type, expected_feature_names", + [ + (None, "array", ["x0", "x1", "x2"]), + (None, "dataframe", ["a", "b", "c"]), + (np.array(["a", "b", "c"]), "array", ["a", "b", "c"]), + ], +) +def test_check_feature_names(feature_names, array_type, expected_feature_names): + X = np.random.randn(10, 3) + column_names = ["a", "b", "c"] + X = _convert_container(X, constructor_name=array_type, columns_name=column_names) + feature_names_validated = _check_feature_names(X, feature_names) + assert feature_names_validated == expected_feature_names + + +def test_check_feature_names_error(): + X = np.random.randn(10, 3) + feature_names = ["a", "b", "c", "a"] + msg = "feature_names should not contain duplicates." + with pytest.raises(ValueError, match=msg): + _check_feature_names(X, feature_names) + + +@pytest.mark.parametrize("fx, idx", [(0, 0), (1, 1), ("a", 0), ("b", 1), ("c", 2)]) +def test_get_feature_index(fx, idx): + feature_names = ["a", "b", "c"] + assert _get_feature_index(fx, feature_names) == idx + + +@pytest.mark.parametrize( + "fx, feature_names, err_msg", + [ + ("a", None, "Cannot plot partial dependence for feature 'a'"), + ("d", ["a", "b", "c"], "Feature 'd' not in feature_names"), + ], +) +def test_get_feature_names_error(fx, feature_names, err_msg): + with pytest.raises(ValueError, match=err_msg): + _get_feature_index(fx, feature_names) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/test_permutation_importance.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/test_permutation_importance.py new file mode 100644 index 0000000000000000000000000000000000000000..478a10515aa01abcf6a793b43e9f803c4b77656c --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/tests/test_permutation_importance.py @@ -0,0 +1,540 @@ +import numpy as np +import pytest +from joblib import parallel_backend +from numpy.testing import assert_allclose + +from sklearn.compose import ColumnTransformer +from sklearn.datasets import ( + load_diabetes, + load_iris, + make_classification, + make_regression, +) +from sklearn.dummy import DummyClassifier +from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor +from sklearn.impute import SimpleImputer +from sklearn.inspection import permutation_importance +from sklearn.linear_model import LinearRegression, LogisticRegression +from sklearn.metrics import ( + get_scorer, + mean_squared_error, + r2_score, +) +from sklearn.model_selection import train_test_split +from sklearn.pipeline import make_pipeline +from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder, StandardScaler, scale +from sklearn.utils._testing import _convert_container + + +@pytest.mark.parametrize("n_jobs", [1, 2]) +@pytest.mark.parametrize("max_samples", [0.5, 1.0]) +@pytest.mark.parametrize("sample_weight", [None, "ones"]) +def test_permutation_importance_correlated_feature_regression( + n_jobs, max_samples, sample_weight +): + # Make sure that feature highly correlated to the target have a higher + # importance + rng = np.random.RandomState(42) + n_repeats = 5 + + X, y = load_diabetes(return_X_y=True) + y_with_little_noise = (y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1) + + X = np.hstack([X, y_with_little_noise]) + + weights = np.ones_like(y) if sample_weight == "ones" else sample_weight + clf = RandomForestRegressor(n_estimators=10, random_state=42) + clf.fit(X, y) + + result = permutation_importance( + clf, + X, + y, + sample_weight=weights, + n_repeats=n_repeats, + random_state=rng, + n_jobs=n_jobs, + max_samples=max_samples, + ) + + assert result.importances.shape == (X.shape[1], n_repeats) + + # the correlated feature with y was added as the last column and should + # have the highest importance + assert np.all(result.importances_mean[-1] > result.importances_mean[:-1]) + + +@pytest.mark.parametrize("n_jobs", [1, 2]) +@pytest.mark.parametrize("max_samples", [0.5, 1.0]) +def test_permutation_importance_correlated_feature_regression_pandas( + n_jobs, max_samples +): + pd = pytest.importorskip("pandas") + + # Make sure that feature highly correlated to the target have a higher + # importance + rng = np.random.RandomState(42) + n_repeats = 5 + + dataset = load_iris() + X, y = dataset.data, dataset.target + y_with_little_noise = (y + rng.normal(scale=0.001, size=y.shape[0])).reshape(-1, 1) + + # Adds feature correlated with y as the last column + X = pd.DataFrame(X, columns=dataset.feature_names) + X["correlated_feature"] = y_with_little_noise + + clf = RandomForestClassifier(n_estimators=10, random_state=42) + clf.fit(X, y) + + result = permutation_importance( + clf, + X, + y, + n_repeats=n_repeats, + random_state=rng, + n_jobs=n_jobs, + max_samples=max_samples, + ) + + assert result.importances.shape == (X.shape[1], n_repeats) + + # the correlated feature with y was added as the last column and should + # have the highest importance + assert np.all(result.importances_mean[-1] > result.importances_mean[:-1]) + + +@pytest.mark.parametrize("n_jobs", [1, 2]) +@pytest.mark.parametrize("max_samples", [0.5, 1.0]) +def test_robustness_to_high_cardinality_noisy_feature(n_jobs, max_samples, seed=42): + # Permutation variable importance should not be affected by the high + # cardinality bias of traditional feature importances, especially when + # computed on a held-out test set: + rng = np.random.RandomState(seed) + n_repeats = 5 + n_samples = 1000 + n_classes = 5 + n_informative_features = 2 + n_noise_features = 1 + n_features = n_informative_features + n_noise_features + + # Generate a multiclass classification dataset and a set of informative + # binary features that can be used to predict some classes of y exactly + # while leaving some classes unexplained to make the problem harder. + classes = np.arange(n_classes) + y = rng.choice(classes, size=n_samples) + X = np.hstack([(y == c).reshape(-1, 1) for c in classes[:n_informative_features]]) + X = X.astype(np.float32) + + # Not all target classes are explained by the binary class indicator + # features: + assert n_informative_features < n_classes + + # Add 10 other noisy features with high cardinality (numerical) values + # that can be used to overfit the training data. + X = np.concatenate([X, rng.randn(n_samples, n_noise_features)], axis=1) + assert X.shape == (n_samples, n_features) + + # Split the dataset to be able to evaluate on a held-out test set. The + # Test size should be large enough for importance measurements to be + # stable: + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=0.5, random_state=rng + ) + clf = RandomForestClassifier(n_estimators=5, random_state=rng) + clf.fit(X_train, y_train) + + # Variable importances computed by impurity decrease on the tree node + # splits often use the noisy features in splits. This can give misleading + # impression that high cardinality noisy variables are the most important: + tree_importances = clf.feature_importances_ + informative_tree_importances = tree_importances[:n_informative_features] + noisy_tree_importances = tree_importances[n_informative_features:] + assert informative_tree_importances.max() < noisy_tree_importances.min() + + # Let's check that permutation-based feature importances do not have this + # problem. + r = permutation_importance( + clf, + X_test, + y_test, + n_repeats=n_repeats, + random_state=rng, + n_jobs=n_jobs, + max_samples=max_samples, + ) + + assert r.importances.shape == (X.shape[1], n_repeats) + + # Split the importances between informative and noisy features + informative_importances = r.importances_mean[:n_informative_features] + noisy_importances = r.importances_mean[n_informative_features:] + + # Because we do not have a binary variable explaining each target classes, + # the RF model will have to use the random variable to make some + # (overfitting) splits (as max_depth is not set). Therefore the noisy + # variables will be non-zero but with small values oscillating around + # zero: + assert max(np.abs(noisy_importances)) > 1e-7 + assert noisy_importances.max() < 0.05 + + # The binary features correlated with y should have a higher importance + # than the high cardinality noisy features. + # The maximum test accuracy is 2 / 5 == 0.4, each informative feature + # contributing approximately a bit more than 0.2 of accuracy. + assert informative_importances.min() > 0.15 + + +def test_permutation_importance_mixed_types(): + rng = np.random.RandomState(42) + n_repeats = 4 + + # Last column is correlated with y + X = np.array([[1.0, 2.0, 3.0, np.nan], [2, 1, 2, 1]]).T + y = np.array([0, 1, 0, 1]) + + clf = make_pipeline(SimpleImputer(), LogisticRegression(solver="lbfgs")) + clf.fit(X, y) + result = permutation_importance(clf, X, y, n_repeats=n_repeats, random_state=rng) + + assert result.importances.shape == (X.shape[1], n_repeats) + + # the correlated feature with y is the last column and should + # have the highest importance + assert np.all(result.importances_mean[-1] > result.importances_mean[:-1]) + + # use another random state + rng = np.random.RandomState(0) + result2 = permutation_importance(clf, X, y, n_repeats=n_repeats, random_state=rng) + assert result2.importances.shape == (X.shape[1], n_repeats) + + assert not np.allclose(result.importances, result2.importances) + + # the correlated feature with y is the last column and should + # have the highest importance + assert np.all(result2.importances_mean[-1] > result2.importances_mean[:-1]) + + +def test_permutation_importance_mixed_types_pandas(): + pd = pytest.importorskip("pandas") + rng = np.random.RandomState(42) + n_repeats = 5 + + # Last column is correlated with y + X = pd.DataFrame({"col1": [1.0, 2.0, 3.0, np.nan], "col2": ["a", "b", "a", "b"]}) + y = np.array([0, 1, 0, 1]) + + num_preprocess = make_pipeline(SimpleImputer(), StandardScaler()) + preprocess = ColumnTransformer( + [("num", num_preprocess, ["col1"]), ("cat", OneHotEncoder(), ["col2"])] + ) + clf = make_pipeline(preprocess, LogisticRegression(solver="lbfgs")) + clf.fit(X, y) + + result = permutation_importance(clf, X, y, n_repeats=n_repeats, random_state=rng) + + assert result.importances.shape == (X.shape[1], n_repeats) + # the correlated feature with y is the last column and should + # have the highest importance + assert np.all(result.importances_mean[-1] > result.importances_mean[:-1]) + + +def test_permutation_importance_linear_regresssion(): + X, y = make_regression(n_samples=500, n_features=10, random_state=0) + + X = scale(X) + y = scale(y) + + lr = LinearRegression().fit(X, y) + + # this relationship can be computed in closed form + expected_importances = 2 * lr.coef_**2 + results = permutation_importance( + lr, X, y, n_repeats=50, scoring="neg_mean_squared_error" + ) + assert_allclose( + expected_importances, results.importances_mean, rtol=1e-1, atol=1e-6 + ) + + +@pytest.mark.parametrize("max_samples", [500, 1.0]) +def test_permutation_importance_equivalence_sequential_parallel(max_samples): + # regression test to make sure that sequential and parallel calls will + # output the same results. + # Also tests that max_samples equal to number of samples is equivalent to 1.0 + X, y = make_regression(n_samples=500, n_features=10, random_state=0) + lr = LinearRegression().fit(X, y) + + importance_sequential = permutation_importance( + lr, X, y, n_repeats=5, random_state=0, n_jobs=1, max_samples=max_samples + ) + + # First check that the problem is structured enough and that the model is + # complex enough to not yield trivial, constant importances: + imp_min = importance_sequential["importances"].min() + imp_max = importance_sequential["importances"].max() + assert imp_max - imp_min > 0.3 + + # The actually check that parallelism does not impact the results + # either with shared memory (threading) or without isolated memory + # via process-based parallelism using the default backend + # ('loky' or 'multiprocessing') depending on the joblib version: + + # process-based parallelism (by default): + importance_processes = permutation_importance( + lr, X, y, n_repeats=5, random_state=0, n_jobs=2 + ) + assert_allclose( + importance_processes["importances"], importance_sequential["importances"] + ) + + # thread-based parallelism: + with parallel_backend("threading"): + importance_threading = permutation_importance( + lr, X, y, n_repeats=5, random_state=0, n_jobs=2 + ) + assert_allclose( + importance_threading["importances"], importance_sequential["importances"] + ) + + +@pytest.mark.parametrize("n_jobs", [None, 1, 2]) +@pytest.mark.parametrize("max_samples", [0.5, 1.0]) +def test_permutation_importance_equivalence_array_dataframe(n_jobs, max_samples): + # This test checks that the column shuffling logic has the same behavior + # both a dataframe and a simple numpy array. + pd = pytest.importorskip("pandas") + + # regression test to make sure that sequential and parallel calls will + # output the same results. + X, y = make_regression(n_samples=100, n_features=5, random_state=0) + X_df = pd.DataFrame(X) + + # Add a categorical feature that is statistically linked to y: + binner = KBinsDiscretizer(n_bins=3, encode="ordinal") + cat_column = binner.fit_transform(y.reshape(-1, 1)) + + # Concatenate the extra column to the numpy array: integers will be + # cast to float values + X = np.hstack([X, cat_column]) + assert X.dtype.kind == "f" + + # Insert extra column as a non-numpy-native dtype (while keeping backward + # compat for old pandas versions): + if hasattr(pd, "Categorical"): + cat_column = pd.Categorical(cat_column.ravel()) + else: + cat_column = cat_column.ravel() + new_col_idx = len(X_df.columns) + X_df[new_col_idx] = cat_column + assert X_df[new_col_idx].dtype == cat_column.dtype + + # Stich an arbitrary index to the dataframe: + X_df.index = np.arange(len(X_df)).astype(str) + + rf = RandomForestRegressor(n_estimators=5, max_depth=3, random_state=0) + rf.fit(X, y) + + n_repeats = 3 + importance_array = permutation_importance( + rf, + X, + y, + n_repeats=n_repeats, + random_state=0, + n_jobs=n_jobs, + max_samples=max_samples, + ) + + # First check that the problem is structured enough and that the model is + # complex enough to not yield trivial, constant importances: + imp_min = importance_array["importances"].min() + imp_max = importance_array["importances"].max() + assert imp_max - imp_min > 0.3 + + # Now check that importances computed on dataframe matche the values + # of those computed on the array with the same data. + importance_dataframe = permutation_importance( + rf, + X_df, + y, + n_repeats=n_repeats, + random_state=0, + n_jobs=n_jobs, + max_samples=max_samples, + ) + assert_allclose( + importance_array["importances"], importance_dataframe["importances"] + ) + + +@pytest.mark.parametrize("input_type", ["array", "dataframe"]) +def test_permutation_importance_large_memmaped_data(input_type): + # Smoke, non-regression test for: + # https://github.com/scikit-learn/scikit-learn/issues/15810 + n_samples, n_features = int(5e4), 4 + X, y = make_classification( + n_samples=n_samples, n_features=n_features, random_state=0 + ) + assert X.nbytes > 1e6 # trigger joblib memmaping + + X = _convert_container(X, input_type) + clf = DummyClassifier(strategy="prior").fit(X, y) + + # Actual smoke test: should not raise any error: + n_repeats = 5 + r = permutation_importance(clf, X, y, n_repeats=n_repeats, n_jobs=2) + + # Auxiliary check: DummyClassifier is feature independent: + # permutating feature should not change the predictions + expected_importances = np.zeros((n_features, n_repeats)) + assert_allclose(expected_importances, r.importances) + + +def test_permutation_importance_sample_weight(): + # Creating data with 2 features and 1000 samples, where the target + # variable is a linear combination of the two features, such that + # in half of the samples the impact of feature 1 is twice the impact of + # feature 2, and vice versa on the other half of the samples. + rng = np.random.RandomState(1) + n_samples = 1000 + n_features = 2 + n_half_samples = n_samples // 2 + x = rng.normal(0.0, 0.001, (n_samples, n_features)) + y = np.zeros(n_samples) + y[:n_half_samples] = 2 * x[:n_half_samples, 0] + x[:n_half_samples, 1] + y[n_half_samples:] = x[n_half_samples:, 0] + 2 * x[n_half_samples:, 1] + + # Fitting linear regression with perfect prediction + lr = LinearRegression(fit_intercept=False) + lr.fit(x, y) + + # When all samples are weighted with the same weights, the ratio of + # the two features importance should equal to 1 on expectation (when using + # mean absolutes error as the loss function). + pi = permutation_importance( + lr, x, y, random_state=1, scoring="neg_mean_absolute_error", n_repeats=200 + ) + x1_x2_imp_ratio_w_none = pi.importances_mean[0] / pi.importances_mean[1] + assert x1_x2_imp_ratio_w_none == pytest.approx(1, 0.01) + + # When passing a vector of ones as the sample_weight, results should be + # the same as in the case that sample_weight=None. + w = np.ones(n_samples) + pi = permutation_importance( + lr, + x, + y, + random_state=1, + scoring="neg_mean_absolute_error", + n_repeats=200, + sample_weight=w, + ) + x1_x2_imp_ratio_w_ones = pi.importances_mean[0] / pi.importances_mean[1] + assert x1_x2_imp_ratio_w_ones == pytest.approx(x1_x2_imp_ratio_w_none, 0.01) + + # When the ratio between the weights of the first half of the samples and + # the second half of the samples approaches to infinity, the ratio of + # the two features importance should equal to 2 on expectation (when using + # mean absolutes error as the loss function). + w = np.hstack([np.repeat(10.0**10, n_half_samples), np.repeat(1.0, n_half_samples)]) + lr.fit(x, y, w) + pi = permutation_importance( + lr, + x, + y, + random_state=1, + scoring="neg_mean_absolute_error", + n_repeats=200, + sample_weight=w, + ) + x1_x2_imp_ratio_w = pi.importances_mean[0] / pi.importances_mean[1] + assert x1_x2_imp_ratio_w / x1_x2_imp_ratio_w_none == pytest.approx(2, 0.01) + + +def test_permutation_importance_no_weights_scoring_function(): + # Creating a scorer function that does not takes sample_weight + def my_scorer(estimator, X, y): + return 1 + + # Creating some data and estimator for the permutation test + x = np.array([[1, 2], [3, 4]]) + y = np.array([1, 2]) + w = np.array([1, 1]) + lr = LinearRegression() + lr.fit(x, y) + + # test that permutation_importance does not return error when + # sample_weight is None + try: + permutation_importance(lr, x, y, random_state=1, scoring=my_scorer, n_repeats=1) + except TypeError: + pytest.fail( + "permutation_test raised an error when using a scorer " + "function that does not accept sample_weight even though " + "sample_weight was None" + ) + + # test that permutation_importance raise exception when sample_weight is + # not None + with pytest.raises(TypeError): + permutation_importance( + lr, x, y, random_state=1, scoring=my_scorer, n_repeats=1, sample_weight=w + ) + + +@pytest.mark.parametrize( + "list_single_scorer, multi_scorer", + [ + (["r2", "neg_mean_squared_error"], ["r2", "neg_mean_squared_error"]), + ( + ["r2", "neg_mean_squared_error"], + { + "r2": get_scorer("r2"), + "neg_mean_squared_error": get_scorer("neg_mean_squared_error"), + }, + ), + ( + ["r2", "neg_mean_squared_error"], + lambda estimator, X, y: { + "r2": r2_score(y, estimator.predict(X)), + "neg_mean_squared_error": -mean_squared_error(y, estimator.predict(X)), + }, + ), + ], +) +def test_permutation_importance_multi_metric(list_single_scorer, multi_scorer): + # Test permutation importance when scoring contains multiple scorers + + # Creating some data and estimator for the permutation test + x, y = make_regression(n_samples=500, n_features=10, random_state=0) + lr = LinearRegression().fit(x, y) + + multi_importance = permutation_importance( + lr, x, y, random_state=1, scoring=multi_scorer, n_repeats=2 + ) + assert set(multi_importance.keys()) == set(list_single_scorer) + + for scorer in list_single_scorer: + multi_result = multi_importance[scorer] + single_result = permutation_importance( + lr, x, y, random_state=1, scoring=scorer, n_repeats=2 + ) + + assert_allclose(multi_result.importances, single_result.importances) + + +def test_permutation_importance_max_samples_error(): + """Check that a proper error message is raised when `max_samples` is not + set to a valid input value. + """ + X = np.array([(1.0, 2.0, 3.0, 4.0)]).T + y = np.array([0, 1, 0, 1]) + + clf = LogisticRegression() + clf.fit(X, y) + + err_msg = r"max_samples must be <= n_samples" + + with pytest.raises(ValueError, match=err_msg): + permutation_importance(clf, X, y, max_samples=5) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ff3ab57f01612e659f102abded5ffae092b2fef Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_isomap.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_isomap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac2d8baf8fad0a9c088a0f02330113eb722746e4 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_isomap.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_locally_linear.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_locally_linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..347e4ccd091b2dfad6f056f47ece75534a7cd212 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_locally_linear.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_mds.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_mds.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c9484c033e38150dddcc63e9e39825f6432c405 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_mds.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_spectral_embedding.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_spectral_embedding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4e9175cbafc0bd3fa512440a61b7c90840bb1f1 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_spectral_embedding.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_t_sne.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_t_sne.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6acaedc19c0344c5d6b2793cf226bc48155aafad Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/__pycache__/_t_sne.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/meson.build b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/meson.build new file mode 100644 index 0000000000000000000000000000000000000000..ee83e8afc501949e2a7b1ee27b1f4a3c8c8f0c8a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/meson.build @@ -0,0 +1,16 @@ +py.extension_module( + '_utils', + ['_utils.pyx', utils_cython_tree], + cython_args: cython_args, + subdir: 'sklearn/manifold', + install: true +) + +py.extension_module( + '_barnes_hut_tsne', + '_barnes_hut_tsne.pyx', + dependencies: [np_dep, openmp_dep], + cython_args: cython_args, + subdir: 'sklearn/manifold', + install: true +) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c449efd463b676b4454957a7555788659c994b7d Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_isomap.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_isomap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab6807b4add247d6d2ff36e8e6f14b7ffd7dff48 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_isomap.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_mds.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_mds.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8158eda7fe11793eef8383925532c4ee1b798785 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_mds.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_spectral_embedding.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_spectral_embedding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..221f77c2e892d27ca2bc0e833d54ada24f588a4b Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_spectral_embedding.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_t_sne.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_t_sne.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..77e4ce5dd60513f732ad8798a4b6bfa11a50b2a3 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/__pycache__/test_t_sne.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/test_isomap.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/test_isomap.py new file mode 100644 index 0000000000000000000000000000000000000000..e38b92442e58d9881726bdee85073ad38a7c95e1 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/test_isomap.py @@ -0,0 +1,348 @@ +import math +from itertools import product + +import numpy as np +import pytest +from scipy.sparse import rand as sparse_rand + +from sklearn import clone, datasets, manifold, neighbors, pipeline, preprocessing +from sklearn.datasets import make_blobs +from sklearn.metrics.pairwise import pairwise_distances +from sklearn.utils._testing import ( + assert_allclose, + assert_allclose_dense_sparse, + assert_array_equal, +) +from sklearn.utils.fixes import CSR_CONTAINERS + +eigen_solvers = ["auto", "dense", "arpack"] +path_methods = ["auto", "FW", "D"] + + +def create_sample_data(dtype, n_pts=25, add_noise=False): + # grid of equidistant points in 2D, n_components = n_dim + n_per_side = int(math.sqrt(n_pts)) + X = np.array(list(product(range(n_per_side), repeat=2))).astype(dtype, copy=False) + if add_noise: + # add noise in a third dimension + rng = np.random.RandomState(0) + noise = 0.1 * rng.randn(n_pts, 1).astype(dtype, copy=False) + X = np.concatenate((X, noise), 1) + return X + + +@pytest.mark.parametrize("n_neighbors, radius", [(24, None), (None, np.inf)]) +@pytest.mark.parametrize("eigen_solver", eigen_solvers) +@pytest.mark.parametrize("path_method", path_methods) +def test_isomap_simple_grid( + global_dtype, n_neighbors, radius, eigen_solver, path_method +): + # Isomap should preserve distances when all neighbors are used + n_pts = 25 + X = create_sample_data(global_dtype, n_pts=n_pts, add_noise=False) + + # distances from each point to all others + if n_neighbors is not None: + G = neighbors.kneighbors_graph(X, n_neighbors, mode="distance") + else: + G = neighbors.radius_neighbors_graph(X, radius, mode="distance") + + clf = manifold.Isomap( + n_neighbors=n_neighbors, + radius=radius, + n_components=2, + eigen_solver=eigen_solver, + path_method=path_method, + ) + clf.fit(X) + + if n_neighbors is not None: + G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode="distance") + else: + G_iso = neighbors.radius_neighbors_graph( + clf.embedding_, radius, mode="distance" + ) + atol = 1e-5 if global_dtype == np.float32 else 0 + assert_allclose_dense_sparse(G, G_iso, atol=atol) + + +@pytest.mark.parametrize("n_neighbors, radius", [(24, None), (None, np.inf)]) +@pytest.mark.parametrize("eigen_solver", eigen_solvers) +@pytest.mark.parametrize("path_method", path_methods) +def test_isomap_reconstruction_error( + global_dtype, n_neighbors, radius, eigen_solver, path_method +): + if global_dtype is np.float32: + pytest.skip( + "Skipping test due to numerical instabilities on float32 data" + "from KernelCenterer used in the reconstruction_error method" + ) + + # Same setup as in test_isomap_simple_grid, with an added dimension + n_pts = 25 + X = create_sample_data(global_dtype, n_pts=n_pts, add_noise=True) + + # compute input kernel + if n_neighbors is not None: + G = neighbors.kneighbors_graph(X, n_neighbors, mode="distance").toarray() + else: + G = neighbors.radius_neighbors_graph(X, radius, mode="distance").toarray() + centerer = preprocessing.KernelCenterer() + K = centerer.fit_transform(-0.5 * G**2) + + clf = manifold.Isomap( + n_neighbors=n_neighbors, + radius=radius, + n_components=2, + eigen_solver=eigen_solver, + path_method=path_method, + ) + clf.fit(X) + + # compute output kernel + if n_neighbors is not None: + G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode="distance") + else: + G_iso = neighbors.radius_neighbors_graph( + clf.embedding_, radius, mode="distance" + ) + G_iso = G_iso.toarray() + K_iso = centerer.fit_transform(-0.5 * G_iso**2) + + # make sure error agrees + reconstruction_error = np.linalg.norm(K - K_iso) / n_pts + atol = 1e-5 if global_dtype == np.float32 else 0 + assert_allclose(reconstruction_error, clf.reconstruction_error(), atol=atol) + + +@pytest.mark.parametrize("n_neighbors, radius", [(2, None), (None, 0.5)]) +def test_transform(global_dtype, n_neighbors, radius): + n_samples = 200 + n_components = 10 + noise_scale = 0.01 + + # Create S-curve dataset + X, y = datasets.make_s_curve(n_samples, random_state=0) + + X = X.astype(global_dtype, copy=False) + + # Compute isomap embedding + iso = manifold.Isomap( + n_components=n_components, n_neighbors=n_neighbors, radius=radius + ) + X_iso = iso.fit_transform(X) + + # Re-embed a noisy version of the points + rng = np.random.RandomState(0) + noise = noise_scale * rng.randn(*X.shape) + X_iso2 = iso.transform(X + noise) + + # Make sure the rms error on re-embedding is comparable to noise_scale + assert np.sqrt(np.mean((X_iso - X_iso2) ** 2)) < 2 * noise_scale + + +@pytest.mark.parametrize("n_neighbors, radius", [(2, None), (None, 10.0)]) +def test_pipeline(n_neighbors, radius, global_dtype): + # check that Isomap works fine as a transformer in a Pipeline + # only checks that no error is raised. + # TODO check that it actually does something useful + X, y = datasets.make_blobs(random_state=0) + X = X.astype(global_dtype, copy=False) + clf = pipeline.Pipeline( + [ + ("isomap", manifold.Isomap(n_neighbors=n_neighbors, radius=radius)), + ("clf", neighbors.KNeighborsClassifier()), + ] + ) + clf.fit(X, y) + assert 0.9 < clf.score(X, y) + + +def test_pipeline_with_nearest_neighbors_transformer(global_dtype): + # Test chaining NearestNeighborsTransformer and Isomap with + # neighbors_algorithm='precomputed' + algorithm = "auto" + n_neighbors = 10 + + X, _ = datasets.make_blobs(random_state=0) + X2, _ = datasets.make_blobs(random_state=1) + + X = X.astype(global_dtype, copy=False) + X2 = X2.astype(global_dtype, copy=False) + + # compare the chained version and the compact version + est_chain = pipeline.make_pipeline( + neighbors.KNeighborsTransformer( + n_neighbors=n_neighbors, algorithm=algorithm, mode="distance" + ), + manifold.Isomap(n_neighbors=n_neighbors, metric="precomputed"), + ) + est_compact = manifold.Isomap( + n_neighbors=n_neighbors, neighbors_algorithm=algorithm + ) + + Xt_chain = est_chain.fit_transform(X) + Xt_compact = est_compact.fit_transform(X) + assert_allclose(Xt_chain, Xt_compact) + + Xt_chain = est_chain.transform(X2) + Xt_compact = est_compact.transform(X2) + assert_allclose(Xt_chain, Xt_compact) + + +@pytest.mark.parametrize( + "metric, p, is_euclidean", + [ + ("euclidean", 2, True), + ("manhattan", 1, False), + ("minkowski", 1, False), + ("minkowski", 2, True), + (lambda x1, x2: np.sqrt(np.sum(x1**2 + x2**2)), 2, False), + ], +) +def test_different_metric(global_dtype, metric, p, is_euclidean): + # Isomap must work on various metric parameters work correctly + # and must default to euclidean. + X, _ = datasets.make_blobs(random_state=0) + X = X.astype(global_dtype, copy=False) + + reference = manifold.Isomap().fit_transform(X) + embedding = manifold.Isomap(metric=metric, p=p).fit_transform(X) + + if is_euclidean: + assert_allclose(embedding, reference) + else: + with pytest.raises(AssertionError, match="Not equal to tolerance"): + assert_allclose(embedding, reference) + + +def test_isomap_clone_bug(): + # regression test for bug reported in #6062 + model = manifold.Isomap() + for n_neighbors in [10, 15, 20]: + model.set_params(n_neighbors=n_neighbors) + model.fit(np.random.rand(50, 2)) + assert model.nbrs_.n_neighbors == n_neighbors + + +@pytest.mark.parametrize("eigen_solver", eigen_solvers) +@pytest.mark.parametrize("path_method", path_methods) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_sparse_input( + global_dtype, eigen_solver, path_method, global_random_seed, csr_container +): + # TODO: compare results on dense and sparse data as proposed in: + # https://github.com/scikit-learn/scikit-learn/pull/23585#discussion_r968388186 + X = csr_container( + sparse_rand( + 100, + 3, + density=0.1, + format="csr", + dtype=global_dtype, + random_state=global_random_seed, + ) + ) + + iso_dense = manifold.Isomap( + n_components=2, + eigen_solver=eigen_solver, + path_method=path_method, + n_neighbors=8, + ) + iso_sparse = clone(iso_dense) + + X_trans_dense = iso_dense.fit_transform(X.toarray()) + X_trans_sparse = iso_sparse.fit_transform(X) + + assert_allclose(X_trans_sparse, X_trans_dense, rtol=1e-4, atol=1e-4) + + +def test_isomap_fit_precomputed_radius_graph(global_dtype): + # Isomap.fit_transform must yield similar result when using + # a precomputed distance matrix. + + X, y = datasets.make_s_curve(200, random_state=0) + X = X.astype(global_dtype, copy=False) + radius = 10 + + g = neighbors.radius_neighbors_graph(X, radius=radius, mode="distance") + isomap = manifold.Isomap(n_neighbors=None, radius=radius, metric="precomputed") + isomap.fit(g) + precomputed_result = isomap.embedding_ + + isomap = manifold.Isomap(n_neighbors=None, radius=radius, metric="minkowski") + result = isomap.fit_transform(X) + atol = 1e-5 if global_dtype == np.float32 else 0 + assert_allclose(precomputed_result, result, atol=atol) + + +def test_isomap_fitted_attributes_dtype(global_dtype): + """Check that the fitted attributes are stored accordingly to the + data type of X.""" + iso = manifold.Isomap(n_neighbors=2) + + X = np.array([[1, 2], [3, 4], [5, 6]], dtype=global_dtype) + + iso.fit(X) + + assert iso.dist_matrix_.dtype == global_dtype + assert iso.embedding_.dtype == global_dtype + + +def test_isomap_dtype_equivalence(): + """Check the equivalence of the results with 32 and 64 bits input.""" + iso_32 = manifold.Isomap(n_neighbors=2) + X_32 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float32) + iso_32.fit(X_32) + + iso_64 = manifold.Isomap(n_neighbors=2) + X_64 = np.array([[1, 2], [3, 4], [5, 6]], dtype=np.float64) + iso_64.fit(X_64) + + assert_allclose(iso_32.dist_matrix_, iso_64.dist_matrix_) + + +def test_isomap_raise_error_when_neighbor_and_radius_both_set(): + # Isomap.fit_transform must raise a ValueError if + # radius and n_neighbors are provided. + + X, _ = datasets.load_digits(return_X_y=True) + isomap = manifold.Isomap(n_neighbors=3, radius=5.5) + msg = "Both n_neighbors and radius are provided" + with pytest.raises(ValueError, match=msg): + isomap.fit_transform(X) + + +def test_multiple_connected_components(): + # Test that a warning is raised when the graph has multiple components + X = np.array([0, 1, 2, 5, 6, 7])[:, None] + with pytest.warns(UserWarning, match="number of connected components"): + manifold.Isomap(n_neighbors=2).fit(X) + + +def test_multiple_connected_components_metric_precomputed(global_dtype): + # Test that an error is raised when the graph has multiple components + # and when X is a precomputed neighbors graph. + X = np.array([0, 1, 2, 5, 6, 7])[:, None].astype(global_dtype, copy=False) + + # works with a precomputed distance matrix (dense) + X_distances = pairwise_distances(X) + with pytest.warns(UserWarning, match="number of connected components"): + manifold.Isomap(n_neighbors=1, metric="precomputed").fit(X_distances) + + # does not work with a precomputed neighbors graph (sparse) + X_graph = neighbors.kneighbors_graph(X, n_neighbors=2, mode="distance") + with pytest.raises(RuntimeError, match="number of connected components"): + manifold.Isomap(n_neighbors=1, metric="precomputed").fit(X_graph) + + +def test_get_feature_names_out(): + """Check get_feature_names_out for Isomap.""" + X, y = make_blobs(random_state=0, n_features=4) + n_components = 2 + + iso = manifold.Isomap(n_components=n_components) + iso.fit_transform(X) + names = iso.get_feature_names_out() + assert_array_equal([f"isomap{i}" for i in range(n_components)], names) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/test_locally_linear.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/test_locally_linear.py new file mode 100644 index 0000000000000000000000000000000000000000..835aa20fd1d32ace684eea9afd451bcdcf695f79 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/test_locally_linear.py @@ -0,0 +1,171 @@ +from itertools import product + +import numpy as np +import pytest +from scipy import linalg + +from sklearn import manifold, neighbors +from sklearn.datasets import make_blobs +from sklearn.manifold._locally_linear import barycenter_kneighbors_graph +from sklearn.utils._testing import ( + assert_allclose, + assert_array_equal, + ignore_warnings, +) + +eigen_solvers = ["dense", "arpack"] + + +# ---------------------------------------------------------------------- +# Test utility routines +def test_barycenter_kneighbors_graph(global_dtype): + X = np.array([[0, 1], [1.01, 1.0], [2, 0]], dtype=global_dtype) + + graph = barycenter_kneighbors_graph(X, 1) + expected_graph = np.array( + [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]], dtype=global_dtype + ) + + assert graph.dtype == global_dtype + + assert_allclose(graph.toarray(), expected_graph) + + graph = barycenter_kneighbors_graph(X, 2) + # check that columns sum to one + assert_allclose(np.sum(graph.toarray(), axis=1), np.ones(3)) + pred = np.dot(graph.toarray(), X) + assert linalg.norm(pred - X) / X.shape[0] < 1 + + +# ---------------------------------------------------------------------- +# Test LLE by computing the reconstruction error on some manifolds. + + +def test_lle_simple_grid(global_dtype): + # note: ARPACK is numerically unstable, so this test will fail for + # some random seeds. We choose 42 because the tests pass. + # for arm64 platforms 2 makes the test fail. + # TODO: rewrite this test to make less sensitive to the random seed, + # irrespective of the platform. + rng = np.random.RandomState(42) + + # grid of equidistant points in 2D, n_components = n_dim + X = np.array(list(product(range(5), repeat=2))) + X = X + 1e-10 * rng.uniform(size=X.shape) + X = X.astype(global_dtype, copy=False) + + n_components = 2 + clf = manifold.LocallyLinearEmbedding( + n_neighbors=5, n_components=n_components, random_state=rng + ) + tol = 0.1 + + N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray() + reconstruction_error = linalg.norm(np.dot(N, X) - X, "fro") + assert reconstruction_error < tol + + for solver in eigen_solvers: + clf.set_params(eigen_solver=solver) + clf.fit(X) + assert clf.embedding_.shape[1] == n_components + reconstruction_error = ( + linalg.norm(np.dot(N, clf.embedding_) - clf.embedding_, "fro") ** 2 + ) + + assert reconstruction_error < tol + assert_allclose(clf.reconstruction_error_, reconstruction_error, atol=1e-1) + + # re-embed a noisy version of X using the transform method + noise = rng.randn(*X.shape).astype(global_dtype, copy=False) / 100 + X_reembedded = clf.transform(X + noise) + assert linalg.norm(X_reembedded - clf.embedding_) < tol + + +@pytest.mark.parametrize("method", ["standard", "hessian", "modified", "ltsa"]) +@pytest.mark.parametrize("solver", eigen_solvers) +def test_lle_manifold(global_dtype, method, solver): + rng = np.random.RandomState(0) + # similar test on a slightly more complex manifold + X = np.array(list(product(np.arange(18), repeat=2))) + X = np.c_[X, X[:, 0] ** 2 / 18] + X = X + 1e-10 * rng.uniform(size=X.shape) + X = X.astype(global_dtype, copy=False) + n_components = 2 + + clf = manifold.LocallyLinearEmbedding( + n_neighbors=6, n_components=n_components, method=method, random_state=0 + ) + tol = 1.5 if method == "standard" else 3 + + N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray() + reconstruction_error = linalg.norm(np.dot(N, X) - X) + assert reconstruction_error < tol + + clf.set_params(eigen_solver=solver) + clf.fit(X) + assert clf.embedding_.shape[1] == n_components + reconstruction_error = ( + linalg.norm(np.dot(N, clf.embedding_) - clf.embedding_, "fro") ** 2 + ) + details = "solver: %s, method: %s" % (solver, method) + assert reconstruction_error < tol, details + assert ( + np.abs(clf.reconstruction_error_ - reconstruction_error) + < tol * reconstruction_error + ), details + + +def test_pipeline(): + # check that LocallyLinearEmbedding works fine as a Pipeline + # only checks that no error is raised. + # TODO check that it actually does something useful + from sklearn import datasets, pipeline + + X, y = datasets.make_blobs(random_state=0) + clf = pipeline.Pipeline( + [ + ("filter", manifold.LocallyLinearEmbedding(random_state=0)), + ("clf", neighbors.KNeighborsClassifier()), + ] + ) + clf.fit(X, y) + assert 0.9 < clf.score(X, y) + + +# Test the error raised when the weight matrix is singular +def test_singular_matrix(): + M = np.ones((200, 3)) + f = ignore_warnings + with pytest.raises(ValueError, match="Error in determining null-space with ARPACK"): + f( + manifold.locally_linear_embedding( + M, + n_neighbors=2, + n_components=1, + method="standard", + eigen_solver="arpack", + ) + ) + + +# regression test for #6033 +def test_integer_input(): + rand = np.random.RandomState(0) + X = rand.randint(0, 100, size=(20, 3)) + + for method in ["standard", "hessian", "modified", "ltsa"]: + clf = manifold.LocallyLinearEmbedding(method=method, n_neighbors=10) + clf.fit(X) # this previously raised a TypeError + + +def test_get_feature_names_out(): + """Check get_feature_names_out for LocallyLinearEmbedding.""" + X, y = make_blobs(random_state=0, n_features=4) + n_components = 2 + + iso = manifold.LocallyLinearEmbedding(n_components=n_components) + iso.fit(X) + names = iso.get_feature_names_out() + assert_array_equal( + [f"locallylinearembedding{i}" for i in range(n_components)], names + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/test_mds.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/test_mds.py new file mode 100644 index 0000000000000000000000000000000000000000..2d286ef0942bfe65802dad803da5c2eee8c0e89e --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/test_mds.py @@ -0,0 +1,87 @@ +from unittest.mock import Mock + +import numpy as np +import pytest +from numpy.testing import assert_allclose, assert_array_almost_equal + +from sklearn.manifold import _mds as mds +from sklearn.metrics import euclidean_distances + + +def test_smacof(): + # test metric smacof using the data of "Modern Multidimensional Scaling", + # Borg & Groenen, p 154 + sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + Z = np.array([[-0.266, -0.539], [0.451, 0.252], [0.016, -0.238], [-0.200, 0.524]]) + X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1) + X_true = np.array( + [[-1.415, -2.471], [1.633, 1.107], [0.249, -0.067], [-0.468, 1.431]] + ) + assert_array_almost_equal(X, X_true, decimal=3) + + +def test_smacof_error(): + # Not symmetric similarity matrix: + sim = np.array([[0, 5, 9, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + + with pytest.raises(ValueError): + mds.smacof(sim) + + # Not squared similarity matrix: + sim = np.array([[0, 5, 9, 4], [5, 0, 2, 2], [4, 2, 1, 0]]) + + with pytest.raises(ValueError): + mds.smacof(sim) + + # init not None and not correct format: + sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + + Z = np.array([[-0.266, -0.539], [0.016, -0.238], [-0.200, 0.524]]) + with pytest.raises(ValueError): + mds.smacof(sim, init=Z, n_init=1) + + +def test_MDS(): + sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed") + mds_clf.fit(sim) + + +@pytest.mark.parametrize("k", [0.5, 1.5, 2]) +def test_normed_stress(k): + """Test that non-metric MDS normalized stress is scale-invariant.""" + sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + + X1, stress1 = mds.smacof(sim, metric=False, max_iter=5, random_state=0) + X2, stress2 = mds.smacof(k * sim, metric=False, max_iter=5, random_state=0) + + assert_allclose(stress1, stress2, rtol=1e-5) + assert_allclose(X1, X2, rtol=1e-5) + + +def test_normalize_metric_warning(): + """ + Test that a UserWarning is emitted when using normalized stress with + metric-MDS. + """ + msg = "Normalized stress is not supported" + sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) + with pytest.raises(ValueError, match=msg): + mds.smacof(sim, metric=True, normalized_stress=True) + + +@pytest.mark.parametrize("metric", [True, False]) +def test_normalized_stress_auto(metric, monkeypatch): + rng = np.random.RandomState(0) + X = rng.randn(4, 3) + dist = euclidean_distances(X) + + mock = Mock(side_effect=mds._smacof_single) + monkeypatch.setattr("sklearn.manifold._mds._smacof_single", mock) + + est = mds.MDS(metric=metric, normalized_stress="auto", random_state=rng) + est.fit_transform(X) + assert mock.call_args[1]["normalized_stress"] != metric + + mds.smacof(dist, metric=metric, normalized_stress="auto", random_state=rng) + assert mock.call_args[1]["normalized_stress"] != metric diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/test_t_sne.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/test_t_sne.py new file mode 100644 index 0000000000000000000000000000000000000000..138c06d05dfde4626b4584ff815388eca198d97f --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/manifold/tests/test_t_sne.py @@ -0,0 +1,1208 @@ +import sys +from io import StringIO + +import numpy as np +import pytest +import scipy.sparse as sp +from numpy.testing import assert_allclose +from scipy.optimize import check_grad +from scipy.spatial.distance import pdist, squareform + +from sklearn import config_context +from sklearn.datasets import make_blobs + +# mypy error: Module 'sklearn.manifold' has no attribute '_barnes_hut_tsne' +from sklearn.manifold import ( # type: ignore + TSNE, + _barnes_hut_tsne, +) +from sklearn.manifold._t_sne import ( + _gradient_descent, + _joint_probabilities, + _joint_probabilities_nn, + _kl_divergence, + _kl_divergence_bh, + trustworthiness, +) +from sklearn.manifold._utils import _binary_search_perplexity +from sklearn.metrics.pairwise import ( + cosine_distances, + manhattan_distances, + pairwise_distances, +) +from sklearn.neighbors import NearestNeighbors, kneighbors_graph +from sklearn.utils import check_random_state +from sklearn.utils._testing import ( + assert_almost_equal, + assert_array_almost_equal, + assert_array_equal, + skip_if_32bit, +) +from sklearn.utils.fixes import CSR_CONTAINERS, LIL_CONTAINERS + +x = np.linspace(0, 1, 10) +xx, yy = np.meshgrid(x, x) +X_2d_grid = np.hstack( + [ + xx.ravel().reshape(-1, 1), + yy.ravel().reshape(-1, 1), + ] +) + + +def test_gradient_descent_stops(): + # Test stopping conditions of gradient descent. + class ObjectiveSmallGradient: + def __init__(self): + self.it = -1 + + def __call__(self, _, compute_error=True): + self.it += 1 + return (10 - self.it) / 10.0, np.array([1e-5]) + + def flat_function(_, compute_error=True): + return 0.0, np.ones(1) + + # Gradient norm + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + _, error, it = _gradient_descent( + ObjectiveSmallGradient(), + np.zeros(1), + 0, + max_iter=100, + n_iter_without_progress=100, + momentum=0.0, + learning_rate=0.0, + min_gain=0.0, + min_grad_norm=1e-5, + verbose=2, + ) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + assert error == 1.0 + assert it == 0 + assert "gradient norm" in out + + # Maximum number of iterations without improvement + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + _, error, it = _gradient_descent( + flat_function, + np.zeros(1), + 0, + max_iter=100, + n_iter_without_progress=10, + momentum=0.0, + learning_rate=0.0, + min_gain=0.0, + min_grad_norm=0.0, + verbose=2, + ) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + assert error == 0.0 + assert it == 11 + assert "did not make any progress" in out + + # Maximum number of iterations + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + _, error, it = _gradient_descent( + ObjectiveSmallGradient(), + np.zeros(1), + 0, + max_iter=11, + n_iter_without_progress=100, + momentum=0.0, + learning_rate=0.0, + min_gain=0.0, + min_grad_norm=0.0, + verbose=2, + ) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + assert error == 0.0 + assert it == 10 + assert "Iteration 10" in out + + +def test_binary_search(): + # Test if the binary search finds Gaussians with desired perplexity. + random_state = check_random_state(0) + data = random_state.randn(50, 5) + distances = pairwise_distances(data).astype(np.float32) + desired_perplexity = 25.0 + P = _binary_search_perplexity(distances, desired_perplexity, verbose=0) + P = np.maximum(P, np.finfo(np.double).eps) + mean_perplexity = np.mean( + [np.exp(-np.sum(P[i] * np.log(P[i]))) for i in range(P.shape[0])] + ) + assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3) + + +def test_binary_search_underflow(): + # Test if the binary search finds Gaussians with desired perplexity. + # A more challenging case than the one above, producing numeric + # underflow in float precision (see issue #19471 and PR #19472). + random_state = check_random_state(42) + data = random_state.randn(1, 90).astype(np.float32) + 100 + desired_perplexity = 30.0 + P = _binary_search_perplexity(data, desired_perplexity, verbose=0) + perplexity = 2 ** -np.nansum(P[0, 1:] * np.log2(P[0, 1:])) + assert_almost_equal(perplexity, desired_perplexity, decimal=3) + + +def test_binary_search_neighbors(): + # Binary perplexity search approximation. + # Should be approximately equal to the slow method when we use + # all points as neighbors. + n_samples = 200 + desired_perplexity = 25.0 + random_state = check_random_state(0) + data = random_state.randn(n_samples, 2).astype(np.float32, copy=False) + distances = pairwise_distances(data) + P1 = _binary_search_perplexity(distances, desired_perplexity, verbose=0) + + # Test that when we use all the neighbors the results are identical + n_neighbors = n_samples - 1 + nn = NearestNeighbors().fit(data) + distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors, mode="distance") + distances_nn = distance_graph.data.astype(np.float32, copy=False) + distances_nn = distances_nn.reshape(n_samples, n_neighbors) + P2 = _binary_search_perplexity(distances_nn, desired_perplexity, verbose=0) + + indptr = distance_graph.indptr + P1_nn = np.array( + [ + P1[k, distance_graph.indices[indptr[k] : indptr[k + 1]]] + for k in range(n_samples) + ] + ) + assert_array_almost_equal(P1_nn, P2, decimal=4) + + # Test that the highest P_ij are the same when fewer neighbors are used + for k in np.linspace(150, n_samples - 1, 5): + k = int(k) + topn = k * 10 # check the top 10 * k entries out of k * k entries + distance_graph = nn.kneighbors_graph(n_neighbors=k, mode="distance") + distances_nn = distance_graph.data.astype(np.float32, copy=False) + distances_nn = distances_nn.reshape(n_samples, k) + P2k = _binary_search_perplexity(distances_nn, desired_perplexity, verbose=0) + assert_array_almost_equal(P1_nn, P2, decimal=2) + idx = np.argsort(P1.ravel())[::-1] + P1top = P1.ravel()[idx][:topn] + idx = np.argsort(P2k.ravel())[::-1] + P2top = P2k.ravel()[idx][:topn] + assert_array_almost_equal(P1top, P2top, decimal=2) + + +def test_binary_perplexity_stability(): + # Binary perplexity search should be stable. + # The binary_search_perplexity had a bug wherein the P array + # was uninitialized, leading to sporadically failing tests. + n_neighbors = 10 + n_samples = 100 + random_state = check_random_state(0) + data = random_state.randn(n_samples, 5) + nn = NearestNeighbors().fit(data) + distance_graph = nn.kneighbors_graph(n_neighbors=n_neighbors, mode="distance") + distances = distance_graph.data.astype(np.float32, copy=False) + distances = distances.reshape(n_samples, n_neighbors) + last_P = None + desired_perplexity = 3 + for _ in range(100): + P = _binary_search_perplexity(distances.copy(), desired_perplexity, verbose=0) + P1 = _joint_probabilities_nn(distance_graph, desired_perplexity, verbose=0) + # Convert the sparse matrix to a dense one for testing + P1 = P1.toarray() + if last_P is None: + last_P = P + last_P1 = P1 + else: + assert_array_almost_equal(P, last_P, decimal=4) + assert_array_almost_equal(P1, last_P1, decimal=4) + + +def test_gradient(): + # Test gradient of Kullback-Leibler divergence. + random_state = check_random_state(0) + + n_samples = 50 + n_features = 2 + n_components = 2 + alpha = 1.0 + + distances = random_state.randn(n_samples, n_features).astype(np.float32) + distances = np.abs(distances.dot(distances.T)) + np.fill_diagonal(distances, 0.0) + X_embedded = random_state.randn(n_samples, n_components).astype(np.float32) + + P = _joint_probabilities(distances, desired_perplexity=25.0, verbose=0) + + def fun(params): + return _kl_divergence(params, P, alpha, n_samples, n_components)[0] + + def grad(params): + return _kl_divergence(params, P, alpha, n_samples, n_components)[1] + + assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0, decimal=5) + + +def test_trustworthiness(): + # Test trustworthiness score. + random_state = check_random_state(0) + + # Affine transformation + X = random_state.randn(100, 2) + assert trustworthiness(X, 5.0 + X / 10.0) == 1.0 + + # Randomly shuffled + X = np.arange(100).reshape(-1, 1) + X_embedded = X.copy() + random_state.shuffle(X_embedded) + assert trustworthiness(X, X_embedded) < 0.6 + + # Completely different + X = np.arange(5).reshape(-1, 1) + X_embedded = np.array([[0], [2], [4], [1], [3]]) + assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2) + + +def test_trustworthiness_n_neighbors_error(): + """Raise an error when n_neighbors >= n_samples / 2. + + Non-regression test for #18567. + """ + regex = "n_neighbors .+ should be less than .+" + rng = np.random.RandomState(42) + X = rng.rand(7, 4) + X_embedded = rng.rand(7, 2) + with pytest.raises(ValueError, match=regex): + trustworthiness(X, X_embedded, n_neighbors=5) + + trust = trustworthiness(X, X_embedded, n_neighbors=3) + assert 0 <= trust <= 1 + + +@pytest.mark.parametrize("method", ["exact", "barnes_hut"]) +@pytest.mark.parametrize("init", ("random", "pca")) +def test_preserve_trustworthiness_approximately(method, init): + # Nearest neighbors should be preserved approximately. + random_state = check_random_state(0) + n_components = 2 + X = random_state.randn(50, n_components).astype(np.float32) + tsne = TSNE( + n_components=n_components, + init=init, + random_state=0, + method=method, + max_iter=700, + learning_rate="auto", + ) + X_embedded = tsne.fit_transform(X) + t = trustworthiness(X, X_embedded, n_neighbors=1) + assert t > 0.85 + + +def test_optimization_minimizes_kl_divergence(): + """t-SNE should give a lower KL divergence with more iterations.""" + random_state = check_random_state(0) + X, _ = make_blobs(n_features=3, random_state=random_state) + kl_divergences = [] + for max_iter in [250, 300, 350]: + tsne = TSNE( + n_components=2, + init="random", + perplexity=10, + learning_rate=100.0, + max_iter=max_iter, + random_state=0, + ) + tsne.fit_transform(X) + kl_divergences.append(tsne.kl_divergence_) + assert kl_divergences[1] <= kl_divergences[0] + assert kl_divergences[2] <= kl_divergences[1] + + +@pytest.mark.parametrize("method", ["exact", "barnes_hut"]) +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_fit_transform_csr_matrix(method, csr_container): + # TODO: compare results on dense and sparse data as proposed in: + # https://github.com/scikit-learn/scikit-learn/pull/23585#discussion_r968388186 + # X can be a sparse matrix. + rng = check_random_state(0) + X = rng.randn(50, 2) + X[(rng.randint(0, 50, 25), rng.randint(0, 2, 25))] = 0.0 + X_csr = csr_container(X) + tsne = TSNE( + n_components=2, + init="random", + perplexity=10, + learning_rate=100.0, + random_state=0, + method=method, + max_iter=750, + ) + X_embedded = tsne.fit_transform(X_csr) + assert_allclose(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0, rtol=1.1e-1) + + +def test_preserve_trustworthiness_approximately_with_precomputed_distances(): + # Nearest neighbors should be preserved approximately. + random_state = check_random_state(0) + for i in range(3): + X = random_state.randn(80, 2) + D = squareform(pdist(X), "sqeuclidean") + tsne = TSNE( + n_components=2, + perplexity=2, + learning_rate=100.0, + early_exaggeration=2.0, + metric="precomputed", + random_state=i, + verbose=0, + max_iter=500, + init="random", + ) + X_embedded = tsne.fit_transform(D) + t = trustworthiness(D, X_embedded, n_neighbors=1, metric="precomputed") + assert t > 0.95 + + +def test_trustworthiness_not_euclidean_metric(): + # Test trustworthiness with a metric different from 'euclidean' and + # 'precomputed' + random_state = check_random_state(0) + X = random_state.randn(100, 2) + assert trustworthiness(X, X, metric="cosine") == trustworthiness( + pairwise_distances(X, metric="cosine"), X, metric="precomputed" + ) + + +@pytest.mark.parametrize( + "method, retype", + [ + ("exact", np.asarray), + ("barnes_hut", np.asarray), + *[("barnes_hut", csr_container) for csr_container in CSR_CONTAINERS], + ], +) +@pytest.mark.parametrize( + "D, message_regex", + [ + ([[0.0], [1.0]], ".* square distance matrix"), + ([[0.0, -1.0], [1.0, 0.0]], ".* positive.*"), + ], +) +def test_bad_precomputed_distances(method, D, retype, message_regex): + tsne = TSNE( + metric="precomputed", + method=method, + init="random", + random_state=42, + perplexity=1, + ) + with pytest.raises(ValueError, match=message_regex): + tsne.fit_transform(retype(D)) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_exact_no_precomputed_sparse(csr_container): + tsne = TSNE( + metric="precomputed", + method="exact", + init="random", + random_state=42, + perplexity=1, + ) + with pytest.raises(TypeError, match="sparse"): + tsne.fit_transform(csr_container([[0, 5], [5, 0]])) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_high_perplexity_precomputed_sparse_distances(csr_container): + # Perplexity should be less than 50 + dist = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]) + bad_dist = csr_container(dist) + tsne = TSNE(metric="precomputed", init="random", random_state=42, perplexity=1) + msg = "3 neighbors per samples are required, but some samples have only 1" + with pytest.raises(ValueError, match=msg): + tsne.fit_transform(bad_dist) + + +@pytest.mark.filterwarnings( + "ignore:Precomputed sparse input was not sorted by " + "row values:sklearn.exceptions.EfficiencyWarning" +) +@pytest.mark.parametrize("sparse_container", CSR_CONTAINERS + LIL_CONTAINERS) +def test_sparse_precomputed_distance(sparse_container): + """Make sure that TSNE works identically for sparse and dense matrix""" + random_state = check_random_state(0) + X = random_state.randn(100, 2) + + D_sparse = kneighbors_graph(X, n_neighbors=100, mode="distance", include_self=True) + D = pairwise_distances(X) + assert sp.issparse(D_sparse) + assert_almost_equal(D_sparse.toarray(), D) + + tsne = TSNE( + metric="precomputed", random_state=0, init="random", learning_rate="auto" + ) + Xt_dense = tsne.fit_transform(D) + + Xt_sparse = tsne.fit_transform(sparse_container(D_sparse)) + assert_almost_equal(Xt_dense, Xt_sparse) + + +def test_non_positive_computed_distances(): + # Computed distance matrices must be positive. + def metric(x, y): + return -1 + + # Negative computed distances should be caught even if result is squared + tsne = TSNE(metric=metric, method="exact", perplexity=1) + X = np.array([[0.0, 0.0], [1.0, 1.0]]) + with pytest.raises(ValueError, match="All distances .*metric given.*"): + tsne.fit_transform(X) + + +def test_init_ndarray(): + # Initialize TSNE with ndarray and test fit + tsne = TSNE(init=np.zeros((100, 2)), learning_rate="auto") + X_embedded = tsne.fit_transform(np.ones((100, 5))) + assert_array_equal(np.zeros((100, 2)), X_embedded) + + +def test_init_ndarray_precomputed(): + # Initialize TSNE with ndarray and metric 'precomputed' + # Make sure no FutureWarning is thrown from _fit + tsne = TSNE( + init=np.zeros((100, 2)), + metric="precomputed", + learning_rate=50.0, + ) + tsne.fit(np.zeros((100, 100))) + + +def test_pca_initialization_not_compatible_with_precomputed_kernel(): + # Precomputed distance matrices cannot use PCA initialization. + tsne = TSNE(metric="precomputed", init="pca", perplexity=1) + with pytest.raises( + ValueError, + match='The parameter init="pca" cannot be used with metric="precomputed".', + ): + tsne.fit_transform(np.array([[0.0], [1.0]])) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_pca_initialization_not_compatible_with_sparse_input(csr_container): + # Sparse input matrices cannot use PCA initialization. + tsne = TSNE(init="pca", learning_rate=100.0, perplexity=1) + with pytest.raises(TypeError, match="PCA initialization.*"): + tsne.fit_transform(csr_container([[0, 5], [5, 0]])) + + +def test_n_components_range(): + # barnes_hut method should only be used with n_components <= 3 + tsne = TSNE(n_components=4, method="barnes_hut", perplexity=1) + with pytest.raises(ValueError, match="'n_components' should be .*"): + tsne.fit_transform(np.array([[0.0], [1.0]])) + + +def test_early_exaggeration_used(): + # check that the ``early_exaggeration`` parameter has an effect + random_state = check_random_state(0) + n_components = 2 + methods = ["exact", "barnes_hut"] + X = random_state.randn(25, n_components).astype(np.float32) + for method in methods: + tsne = TSNE( + n_components=n_components, + perplexity=1, + learning_rate=100.0, + init="pca", + random_state=0, + method=method, + early_exaggeration=1.0, + max_iter=250, + ) + X_embedded1 = tsne.fit_transform(X) + tsne = TSNE( + n_components=n_components, + perplexity=1, + learning_rate=100.0, + init="pca", + random_state=0, + method=method, + early_exaggeration=10.0, + max_iter=250, + ) + X_embedded2 = tsne.fit_transform(X) + + assert not np.allclose(X_embedded1, X_embedded2) + + +def test_max_iter_used(): + # check that the ``max_iter`` parameter has an effect + random_state = check_random_state(0) + n_components = 2 + methods = ["exact", "barnes_hut"] + X = random_state.randn(25, n_components).astype(np.float32) + for method in methods: + for max_iter in [251, 500]: + tsne = TSNE( + n_components=n_components, + perplexity=1, + learning_rate=0.5, + init="random", + random_state=0, + method=method, + early_exaggeration=1.0, + max_iter=max_iter, + ) + tsne.fit_transform(X) + + assert tsne.n_iter_ == max_iter - 1 + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_answer_gradient_two_points(csr_container): + # Test the tree with only a single set of children. + # + # These tests & answers have been checked against the reference + # implementation by LvdM. + pos_input = np.array([[1.0, 0.0], [0.0, 1.0]]) + pos_output = np.array( + [[-4.961291e-05, -1.072243e-04], [9.259460e-05, 2.702024e-04]] + ) + neighbors = np.array([[1], [0]]) + grad_output = np.array( + [[-2.37012478e-05, -6.29044398e-05], [2.37012478e-05, 6.29044398e-05]] + ) + _run_answer_test(pos_input, pos_output, neighbors, grad_output, csr_container) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_answer_gradient_four_points(csr_container): + # Four points tests the tree with multiple levels of children. + # + # These tests & answers have been checked against the reference + # implementation by LvdM. + pos_input = np.array([[1.0, 0.0], [0.0, 1.0], [5.0, 2.0], [7.3, 2.2]]) + pos_output = np.array( + [ + [6.080564e-05, -7.120823e-05], + [-1.718945e-04, -4.000536e-05], + [-2.271720e-04, 8.663310e-05], + [-1.032577e-04, -3.582033e-05], + ] + ) + neighbors = np.array([[1, 2, 3], [0, 2, 3], [1, 0, 3], [1, 2, 0]]) + grad_output = np.array( + [ + [5.81128448e-05, -7.78033454e-06], + [-5.81526851e-05, 7.80976444e-06], + [4.24275173e-08, -3.69569698e-08], + [-2.58720939e-09, 7.52706374e-09], + ] + ) + _run_answer_test(pos_input, pos_output, neighbors, grad_output, csr_container) + + +@pytest.mark.parametrize("csr_container", CSR_CONTAINERS) +def test_skip_num_points_gradient(csr_container): + # Test the kwargs option skip_num_points. + # + # Skip num points should make it such that the Barnes_hut gradient + # is not calculated for indices below skip_num_point. + # Aside from skip_num_points=2 and the first two gradient rows + # being set to zero, these data points are the same as in + # test_answer_gradient_four_points() + pos_input = np.array([[1.0, 0.0], [0.0, 1.0], [5.0, 2.0], [7.3, 2.2]]) + pos_output = np.array( + [ + [6.080564e-05, -7.120823e-05], + [-1.718945e-04, -4.000536e-05], + [-2.271720e-04, 8.663310e-05], + [-1.032577e-04, -3.582033e-05], + ] + ) + neighbors = np.array([[1, 2, 3], [0, 2, 3], [1, 0, 3], [1, 2, 0]]) + grad_output = np.array( + [ + [0.0, 0.0], + [0.0, 0.0], + [4.24275173e-08, -3.69569698e-08], + [-2.58720939e-09, 7.52706374e-09], + ] + ) + _run_answer_test( + pos_input, pos_output, neighbors, grad_output, csr_container, False, 0.1, 2 + ) + + +def _run_answer_test( + pos_input, + pos_output, + neighbors, + grad_output, + csr_container, + verbose=False, + perplexity=0.1, + skip_num_points=0, +): + distances = pairwise_distances(pos_input).astype(np.float32) + args = distances, perplexity, verbose + pos_output = pos_output.astype(np.float32) + neighbors = neighbors.astype(np.int64, copy=False) + pij_input = _joint_probabilities(*args) + pij_input = squareform(pij_input).astype(np.float32) + grad_bh = np.zeros(pos_output.shape, dtype=np.float32) + + P = csr_container(pij_input) + + neighbors = P.indices.astype(np.int64) + indptr = P.indptr.astype(np.int64) + + _barnes_hut_tsne.gradient( + P.data, pos_output, neighbors, indptr, grad_bh, 0.5, 2, 1, skip_num_points=0 + ) + assert_array_almost_equal(grad_bh, grad_output, decimal=4) + + +def test_verbose(): + # Verbose options write to stdout. + random_state = check_random_state(0) + tsne = TSNE(verbose=2, perplexity=4) + X = random_state.randn(5, 2) + + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + tsne.fit_transform(X) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + + assert "[t-SNE]" in out + assert "nearest neighbors..." in out + assert "Computed conditional probabilities" in out + assert "Mean sigma" in out + assert "early exaggeration" in out + + +def test_chebyshev_metric(): + # t-SNE should allow metrics that cannot be squared (issue #3526). + random_state = check_random_state(0) + tsne = TSNE(metric="chebyshev", perplexity=4) + X = random_state.randn(5, 2) + tsne.fit_transform(X) + + +def test_reduction_to_one_component(): + # t-SNE should allow reduction to one component (issue #4154). + random_state = check_random_state(0) + tsne = TSNE(n_components=1, perplexity=4) + X = random_state.randn(5, 2) + X_embedded = tsne.fit(X).embedding_ + assert np.all(np.isfinite(X_embedded)) + + +@pytest.mark.parametrize("method", ["barnes_hut", "exact"]) +@pytest.mark.parametrize("dt", [np.float32, np.float64]) +def test_64bit(method, dt): + # Ensure 64bit arrays are handled correctly. + random_state = check_random_state(0) + + X = random_state.randn(10, 2).astype(dt, copy=False) + tsne = TSNE( + n_components=2, + perplexity=2, + learning_rate=100.0, + random_state=0, + method=method, + verbose=0, + max_iter=300, + init="random", + ) + X_embedded = tsne.fit_transform(X) + effective_type = X_embedded.dtype + + # tsne cython code is only single precision, so the output will + # always be single precision, irrespectively of the input dtype + assert effective_type == np.float32 + + +@pytest.mark.parametrize("method", ["barnes_hut", "exact"]) +def test_kl_divergence_not_nan(method): + # Ensure kl_divergence_ is computed at last iteration + # even though max_iter % n_iter_check != 0, i.e. 1003 % 50 != 0 + random_state = check_random_state(0) + + X = random_state.randn(50, 2) + tsne = TSNE( + n_components=2, + perplexity=2, + learning_rate=100.0, + random_state=0, + method=method, + verbose=0, + max_iter=503, + init="random", + ) + tsne.fit_transform(X) + + assert not np.isnan(tsne.kl_divergence_) + + +def test_barnes_hut_angle(): + # When Barnes-Hut's angle=0 this corresponds to the exact method. + angle = 0.0 + perplexity = 10 + n_samples = 100 + for n_components in [2, 3]: + n_features = 5 + degrees_of_freedom = float(n_components - 1.0) + + random_state = check_random_state(0) + data = random_state.randn(n_samples, n_features) + distances = pairwise_distances(data) + params = random_state.randn(n_samples, n_components) + P = _joint_probabilities(distances, perplexity, verbose=0) + kl_exact, grad_exact = _kl_divergence( + params, P, degrees_of_freedom, n_samples, n_components + ) + + n_neighbors = n_samples - 1 + distances_csr = ( + NearestNeighbors() + .fit(data) + .kneighbors_graph(n_neighbors=n_neighbors, mode="distance") + ) + P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0) + kl_bh, grad_bh = _kl_divergence_bh( + params, + P_bh, + degrees_of_freedom, + n_samples, + n_components, + angle=angle, + skip_num_points=0, + verbose=0, + ) + + P = squareform(P) + P_bh = P_bh.toarray() + assert_array_almost_equal(P_bh, P, decimal=5) + assert_almost_equal(kl_exact, kl_bh, decimal=3) + + +@skip_if_32bit +def test_n_iter_without_progress(): + # Use a dummy negative n_iter_without_progress and check output on stdout + random_state = check_random_state(0) + X = random_state.randn(100, 10) + for method in ["barnes_hut", "exact"]: + tsne = TSNE( + n_iter_without_progress=-1, + verbose=2, + learning_rate=1e8, + random_state=0, + method=method, + max_iter=351, + init="random", + ) + tsne._N_ITER_CHECK = 1 + tsne._EXPLORATION_MAX_ITER = 0 + + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + tsne.fit_transform(X) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + + # The output needs to contain the value of n_iter_without_progress + assert "did not make any progress during the last -1 episodes. Finished." in out + + +def test_min_grad_norm(): + # Make sure that the parameter min_grad_norm is used correctly + random_state = check_random_state(0) + X = random_state.randn(100, 2) + min_grad_norm = 0.002 + tsne = TSNE(min_grad_norm=min_grad_norm, verbose=2, random_state=0, method="exact") + + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + tsne.fit_transform(X) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + + lines_out = out.split("\n") + + # extract the gradient norm from the verbose output + gradient_norm_values = [] + for line in lines_out: + # When the computation is Finished just an old gradient norm value + # is repeated that we do not need to store + if "Finished" in line: + break + + start_grad_norm = line.find("gradient norm") + if start_grad_norm >= 0: + line = line[start_grad_norm:] + line = line.replace("gradient norm = ", "").split(" ")[0] + gradient_norm_values.append(float(line)) + + # Compute how often the gradient norm is smaller than min_grad_norm + gradient_norm_values = np.array(gradient_norm_values) + n_smaller_gradient_norms = len( + gradient_norm_values[gradient_norm_values <= min_grad_norm] + ) + + # The gradient norm can be smaller than min_grad_norm at most once, + # because in the moment it becomes smaller the optimization stops + assert n_smaller_gradient_norms <= 1 + + +def test_accessible_kl_divergence(): + # Ensures that the accessible kl_divergence matches the computed value + random_state = check_random_state(0) + X = random_state.randn(50, 2) + tsne = TSNE( + n_iter_without_progress=2, + verbose=2, + random_state=0, + method="exact", + max_iter=500, + ) + + old_stdout = sys.stdout + sys.stdout = StringIO() + try: + tsne.fit_transform(X) + finally: + out = sys.stdout.getvalue() + sys.stdout.close() + sys.stdout = old_stdout + + # The output needs to contain the accessible kl_divergence as the error at + # the last iteration + for line in out.split("\n")[::-1]: + if "Iteration" in line: + _, _, error = line.partition("error = ") + if error: + error, _, _ = error.partition(",") + break + assert_almost_equal(tsne.kl_divergence_, float(error), decimal=5) + + +@pytest.mark.parametrize("method", ["barnes_hut", "exact"]) +def test_uniform_grid(method): + """Make sure that TSNE can approximately recover a uniform 2D grid + + Due to ties in distances between point in X_2d_grid, this test is platform + dependent for ``method='barnes_hut'`` due to numerical imprecision. + + Also, t-SNE is not assured to converge to the right solution because bad + initialization can lead to convergence to bad local minimum (the + optimization problem is non-convex). To avoid breaking the test too often, + we re-run t-SNE from the final point when the convergence is not good + enough. + """ + seeds = range(3) + max_iter = 500 + for seed in seeds: + tsne = TSNE( + n_components=2, + init="random", + random_state=seed, + perplexity=50, + max_iter=max_iter, + method=method, + learning_rate="auto", + ) + Y = tsne.fit_transform(X_2d_grid) + + try_name = "{}_{}".format(method, seed) + try: + assert_uniform_grid(Y, try_name) + except AssertionError: + # If the test fails a first time, re-run with init=Y to see if + # this was caused by a bad initialization. Note that this will + # also run an early_exaggeration step. + try_name += ":rerun" + tsne.init = Y + Y = tsne.fit_transform(X_2d_grid) + assert_uniform_grid(Y, try_name) + + +def assert_uniform_grid(Y, try_name=None): + # Ensure that the resulting embedding leads to approximately + # uniformly spaced points: the distance to the closest neighbors + # should be non-zero and approximately constant. + nn = NearestNeighbors(n_neighbors=1).fit(Y) + dist_to_nn = nn.kneighbors(return_distance=True)[0].ravel() + assert dist_to_nn.min() > 0.1 + + smallest_to_mean = dist_to_nn.min() / np.mean(dist_to_nn) + largest_to_mean = dist_to_nn.max() / np.mean(dist_to_nn) + + assert smallest_to_mean > 0.5, try_name + assert largest_to_mean < 2, try_name + + +def test_bh_match_exact(): + # check that the ``barnes_hut`` method match the exact one when + # ``angle = 0`` and ``perplexity > n_samples / 3`` + random_state = check_random_state(0) + n_features = 10 + X = random_state.randn(30, n_features).astype(np.float32) + X_embeddeds = {} + max_iter = {} + for method in ["exact", "barnes_hut"]: + tsne = TSNE( + n_components=2, + method=method, + learning_rate=1.0, + init="random", + random_state=0, + max_iter=251, + perplexity=29.5, + angle=0, + ) + # Kill the early_exaggeration + tsne._EXPLORATION_MAX_ITER = 0 + X_embeddeds[method] = tsne.fit_transform(X) + max_iter[method] = tsne.n_iter_ + + assert max_iter["exact"] == max_iter["barnes_hut"] + assert_allclose(X_embeddeds["exact"], X_embeddeds["barnes_hut"], rtol=1e-4) + + +def test_gradient_bh_multithread_match_sequential(): + # check that the bh gradient with different num_threads gives the same + # results + + n_features = 10 + n_samples = 30 + n_components = 2 + degrees_of_freedom = 1 + + angle = 3 + perplexity = 5 + + random_state = check_random_state(0) + data = random_state.randn(n_samples, n_features).astype(np.float32) + params = random_state.randn(n_samples, n_components) + + n_neighbors = n_samples - 1 + distances_csr = ( + NearestNeighbors() + .fit(data) + .kneighbors_graph(n_neighbors=n_neighbors, mode="distance") + ) + P_bh = _joint_probabilities_nn(distances_csr, perplexity, verbose=0) + kl_sequential, grad_sequential = _kl_divergence_bh( + params, + P_bh, + degrees_of_freedom, + n_samples, + n_components, + angle=angle, + skip_num_points=0, + verbose=0, + num_threads=1, + ) + for num_threads in [2, 4]: + kl_multithread, grad_multithread = _kl_divergence_bh( + params, + P_bh, + degrees_of_freedom, + n_samples, + n_components, + angle=angle, + skip_num_points=0, + verbose=0, + num_threads=num_threads, + ) + + assert_allclose(kl_multithread, kl_sequential, rtol=1e-6) + assert_allclose(grad_multithread, grad_multithread) + + +@pytest.mark.parametrize( + "metric, dist_func", + [("manhattan", manhattan_distances), ("cosine", cosine_distances)], +) +@pytest.mark.parametrize("method", ["barnes_hut", "exact"]) +def test_tsne_with_different_distance_metrics(metric, dist_func, method): + """Make sure that TSNE works for different distance metrics""" + + if method == "barnes_hut" and metric == "manhattan": + # The distances computed by `manhattan_distances` differ slightly from those + # computed internally by NearestNeighbors via the PairwiseDistancesReduction + # Cython code-based. This in turns causes T-SNE to converge to a different + # solution but this should not impact the qualitative results as both + # methods. + # NOTE: it's probably not valid from a mathematical point of view to use the + # Manhattan distance for T-SNE... + # TODO: re-enable this test if/when `manhattan_distances` is refactored to + # reuse the same underlying Cython code NearestNeighbors. + # For reference, see: + # https://github.com/scikit-learn/scikit-learn/pull/23865/files#r925721573 + pytest.xfail( + "Distance computations are different for method == 'barnes_hut' and metric" + " == 'manhattan', but this is expected." + ) + + random_state = check_random_state(0) + n_components_original = 3 + n_components_embedding = 2 + X = random_state.randn(50, n_components_original).astype(np.float32) + X_transformed_tsne = TSNE( + metric=metric, + method=method, + n_components=n_components_embedding, + random_state=0, + max_iter=300, + init="random", + learning_rate="auto", + ).fit_transform(X) + X_transformed_tsne_precomputed = TSNE( + metric="precomputed", + method=method, + n_components=n_components_embedding, + random_state=0, + max_iter=300, + init="random", + learning_rate="auto", + ).fit_transform(dist_func(X)) + assert_array_equal(X_transformed_tsne, X_transformed_tsne_precomputed) + + +@pytest.mark.parametrize("method", ["exact", "barnes_hut"]) +def test_tsne_n_jobs(method): + """Make sure that the n_jobs parameter doesn't impact the output""" + random_state = check_random_state(0) + n_features = 10 + X = random_state.randn(30, n_features) + X_tr_ref = TSNE( + n_components=2, + method=method, + perplexity=25.0, + angle=0, + n_jobs=1, + random_state=0, + init="random", + learning_rate="auto", + ).fit_transform(X) + X_tr = TSNE( + n_components=2, + method=method, + perplexity=25.0, + angle=0, + n_jobs=2, + random_state=0, + init="random", + learning_rate="auto", + ).fit_transform(X) + + assert_allclose(X_tr_ref, X_tr) + + +def test_tsne_with_mahalanobis_distance(): + """Make sure that method_parameters works with mahalanobis distance.""" + random_state = check_random_state(0) + n_samples, n_features = 300, 10 + X = random_state.randn(n_samples, n_features) + default_params = { + "perplexity": 40, + "max_iter": 250, + "learning_rate": "auto", + "init": "random", + "n_components": 3, + "random_state": 0, + } + + tsne = TSNE(metric="mahalanobis", **default_params) + msg = "Must provide either V or VI for Mahalanobis distance" + with pytest.raises(ValueError, match=msg): + tsne.fit_transform(X) + + precomputed_X = squareform(pdist(X, metric="mahalanobis"), checks=True) + X_trans_expected = TSNE(metric="precomputed", **default_params).fit_transform( + precomputed_X + ) + + X_trans = TSNE( + metric="mahalanobis", metric_params={"V": np.cov(X.T)}, **default_params + ).fit_transform(X) + assert_allclose(X_trans, X_trans_expected) + + +@pytest.mark.parametrize("perplexity", (20, 30)) +def test_tsne_perplexity_validation(perplexity): + """Make sure that perplexity > n_samples results in a ValueError""" + + random_state = check_random_state(0) + X = random_state.randn(20, 2) + est = TSNE( + learning_rate="auto", + init="pca", + perplexity=perplexity, + random_state=random_state, + ) + msg = "perplexity must be less than n_samples" + with pytest.raises(ValueError, match=msg): + est.fit_transform(X) + + +def test_tsne_works_with_pandas_output(): + """Make sure that TSNE works when the output is set to "pandas". + + Non-regression test for gh-25365. + """ + pytest.importorskip("pandas") + with config_context(transform_output="pandas"): + arr = np.arange(35 * 4).reshape(35, 4) + TSNE(n_components=2).fit_transform(arr) + + +# TODO(1.7): remove +def test_tnse_n_iter_deprecated(): + """Check `n_iter` parameter deprecated.""" + random_state = check_random_state(0) + X = random_state.randn(40, 100) + tsne = TSNE(n_iter=250) + msg = "'n_iter' was renamed to 'max_iter'" + with pytest.warns(FutureWarning, match=msg): + tsne.fit_transform(X) + + +# TODO(1.7): remove +def test_tnse_n_iter_max_iter_both_set(): + """Check error raised when `n_iter` and `max_iter` both set.""" + random_state = check_random_state(0) + X = random_state.randn(40, 100) + tsne = TSNE(n_iter=250, max_iter=500) + msg = "Both 'n_iter' and 'max_iter' attributes were set" + with pytest.raises(ValueError, match=msg): + tsne.fit_transform(X)