Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/LICENSE +21 -0
- evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/REQUESTED +0 -0
- evalkit_internvl/lib/python3.10/site-packages/httpcore/_backends/trio.py +161 -0
- evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/connection.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/http11.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/http2.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/http_proxy.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/socks_proxy.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/connection.py +215 -0
- evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/http11.py +331 -0
- evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/http2.py +589 -0
- evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/http_proxy.py +350 -0
- evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/_async.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/expect.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/fdpexpect.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/popen_spawn.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/run.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/socket_pexpect.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/utils.cpython-310.pyc +0 -0
- evalkit_internvl/lib/python3.10/site-packages/tiktoken/_tiktoken.cpython-310-x86_64-linux-gnu.so +3 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__init__.py +8 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pxd +43 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/_weight_boosting.py +1173 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/test_base.py +109 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/test_stacking.py +1019 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/test_weight_boosting.py +639 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__init__.py +16 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_partial_dependence.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_pd_utils.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_permutation_importance.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_partial_dependence.py +695 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_pd_utils.py +68 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_permutation_importance.py +312 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__init__.py +2 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/__init__.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/decision_boundary.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/partial_dependence.cpython-310.pyc +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/decision_boundary.py +416 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/partial_dependence.py +1476 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__init__.py +0 -0
- evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/__init__.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -1644,3 +1644,4 @@ evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/__pycache__/solvers.
|
|
| 1644 |
evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1645 |
evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/single.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1646 |
evalkit_internvl/lib/python3.10/site-packages/sympy/polys/benchmarks/__pycache__/bench_solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 1644 |
evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/__pycache__/solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1645 |
evalkit_internvl/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/single.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1646 |
evalkit_internvl/lib/python3.10/site-packages/sympy/polys/benchmarks/__pycache__/bench_solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 1647 |
+
evalkit_internvl/lib/python3.10/site-packages/tiktoken/_tiktoken.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) Facebook, Inc. and its affiliates.
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
evalkit_internvl/lib/python3.10/site-packages/bitsandbytes-0.41.0.dist-info/REQUESTED
ADDED
|
File without changes
|
evalkit_internvl/lib/python3.10/site-packages/httpcore/_backends/trio.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ssl
|
| 2 |
+
import typing
|
| 3 |
+
|
| 4 |
+
import trio
|
| 5 |
+
|
| 6 |
+
from .._exceptions import (
|
| 7 |
+
ConnectError,
|
| 8 |
+
ConnectTimeout,
|
| 9 |
+
ExceptionMapping,
|
| 10 |
+
ReadError,
|
| 11 |
+
ReadTimeout,
|
| 12 |
+
WriteError,
|
| 13 |
+
WriteTimeout,
|
| 14 |
+
map_exceptions,
|
| 15 |
+
)
|
| 16 |
+
from .base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class TrioStream(AsyncNetworkStream):
|
| 20 |
+
def __init__(self, stream: trio.abc.Stream) -> None:
|
| 21 |
+
self._stream = stream
|
| 22 |
+
|
| 23 |
+
async def read(
|
| 24 |
+
self, max_bytes: int, timeout: typing.Optional[float] = None
|
| 25 |
+
) -> bytes:
|
| 26 |
+
timeout_or_inf = float("inf") if timeout is None else timeout
|
| 27 |
+
exc_map: ExceptionMapping = {
|
| 28 |
+
trio.TooSlowError: ReadTimeout,
|
| 29 |
+
trio.BrokenResourceError: ReadError,
|
| 30 |
+
trio.ClosedResourceError: ReadError,
|
| 31 |
+
}
|
| 32 |
+
with map_exceptions(exc_map):
|
| 33 |
+
with trio.fail_after(timeout_or_inf):
|
| 34 |
+
data: bytes = await self._stream.receive_some(max_bytes=max_bytes)
|
| 35 |
+
return data
|
| 36 |
+
|
| 37 |
+
async def write(
|
| 38 |
+
self, buffer: bytes, timeout: typing.Optional[float] = None
|
| 39 |
+
) -> None:
|
| 40 |
+
if not buffer:
|
| 41 |
+
return
|
| 42 |
+
|
| 43 |
+
timeout_or_inf = float("inf") if timeout is None else timeout
|
| 44 |
+
exc_map: ExceptionMapping = {
|
| 45 |
+
trio.TooSlowError: WriteTimeout,
|
| 46 |
+
trio.BrokenResourceError: WriteError,
|
| 47 |
+
trio.ClosedResourceError: WriteError,
|
| 48 |
+
}
|
| 49 |
+
with map_exceptions(exc_map):
|
| 50 |
+
with trio.fail_after(timeout_or_inf):
|
| 51 |
+
await self._stream.send_all(data=buffer)
|
| 52 |
+
|
| 53 |
+
async def aclose(self) -> None:
|
| 54 |
+
await self._stream.aclose()
|
| 55 |
+
|
| 56 |
+
async def start_tls(
|
| 57 |
+
self,
|
| 58 |
+
ssl_context: ssl.SSLContext,
|
| 59 |
+
server_hostname: typing.Optional[str] = None,
|
| 60 |
+
timeout: typing.Optional[float] = None,
|
| 61 |
+
) -> AsyncNetworkStream:
|
| 62 |
+
timeout_or_inf = float("inf") if timeout is None else timeout
|
| 63 |
+
exc_map: ExceptionMapping = {
|
| 64 |
+
trio.TooSlowError: ConnectTimeout,
|
| 65 |
+
trio.BrokenResourceError: ConnectError,
|
| 66 |
+
}
|
| 67 |
+
ssl_stream = trio.SSLStream(
|
| 68 |
+
self._stream,
|
| 69 |
+
ssl_context=ssl_context,
|
| 70 |
+
server_hostname=server_hostname,
|
| 71 |
+
https_compatible=True,
|
| 72 |
+
server_side=False,
|
| 73 |
+
)
|
| 74 |
+
with map_exceptions(exc_map):
|
| 75 |
+
try:
|
| 76 |
+
with trio.fail_after(timeout_or_inf):
|
| 77 |
+
await ssl_stream.do_handshake()
|
| 78 |
+
except Exception as exc: # pragma: nocover
|
| 79 |
+
await self.aclose()
|
| 80 |
+
raise exc
|
| 81 |
+
return TrioStream(ssl_stream)
|
| 82 |
+
|
| 83 |
+
def get_extra_info(self, info: str) -> typing.Any:
|
| 84 |
+
if info == "ssl_object" and isinstance(self._stream, trio.SSLStream):
|
| 85 |
+
# Type checkers cannot see `_ssl_object` attribute because trio._ssl.SSLStream uses __getattr__/__setattr__.
|
| 86 |
+
# Tracked at https://github.com/python-trio/trio/issues/542
|
| 87 |
+
return self._stream._ssl_object # type: ignore[attr-defined]
|
| 88 |
+
if info == "client_addr":
|
| 89 |
+
return self._get_socket_stream().socket.getsockname()
|
| 90 |
+
if info == "server_addr":
|
| 91 |
+
return self._get_socket_stream().socket.getpeername()
|
| 92 |
+
if info == "socket":
|
| 93 |
+
stream = self._stream
|
| 94 |
+
while isinstance(stream, trio.SSLStream):
|
| 95 |
+
stream = stream.transport_stream
|
| 96 |
+
assert isinstance(stream, trio.SocketStream)
|
| 97 |
+
return stream.socket
|
| 98 |
+
if info == "is_readable":
|
| 99 |
+
socket = self.get_extra_info("socket")
|
| 100 |
+
return socket.is_readable()
|
| 101 |
+
return None
|
| 102 |
+
|
| 103 |
+
def _get_socket_stream(self) -> trio.SocketStream:
|
| 104 |
+
stream = self._stream
|
| 105 |
+
while isinstance(stream, trio.SSLStream):
|
| 106 |
+
stream = stream.transport_stream
|
| 107 |
+
assert isinstance(stream, trio.SocketStream)
|
| 108 |
+
return stream
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
class TrioBackend(AsyncNetworkBackend):
|
| 112 |
+
async def connect_tcp(
|
| 113 |
+
self,
|
| 114 |
+
host: str,
|
| 115 |
+
port: int,
|
| 116 |
+
timeout: typing.Optional[float] = None,
|
| 117 |
+
local_address: typing.Optional[str] = None,
|
| 118 |
+
socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
|
| 119 |
+
) -> AsyncNetworkStream:
|
| 120 |
+
# By default for TCP sockets, trio enables TCP_NODELAY.
|
| 121 |
+
# https://trio.readthedocs.io/en/stable/reference-io.html#trio.SocketStream
|
| 122 |
+
if socket_options is None:
|
| 123 |
+
socket_options = [] # pragma: no cover
|
| 124 |
+
timeout_or_inf = float("inf") if timeout is None else timeout
|
| 125 |
+
exc_map: ExceptionMapping = {
|
| 126 |
+
trio.TooSlowError: ConnectTimeout,
|
| 127 |
+
trio.BrokenResourceError: ConnectError,
|
| 128 |
+
OSError: ConnectError,
|
| 129 |
+
}
|
| 130 |
+
with map_exceptions(exc_map):
|
| 131 |
+
with trio.fail_after(timeout_or_inf):
|
| 132 |
+
stream: trio.abc.Stream = await trio.open_tcp_stream(
|
| 133 |
+
host=host, port=port, local_address=local_address
|
| 134 |
+
)
|
| 135 |
+
for option in socket_options:
|
| 136 |
+
stream.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover
|
| 137 |
+
return TrioStream(stream)
|
| 138 |
+
|
| 139 |
+
async def connect_unix_socket(
|
| 140 |
+
self,
|
| 141 |
+
path: str,
|
| 142 |
+
timeout: typing.Optional[float] = None,
|
| 143 |
+
socket_options: typing.Optional[typing.Iterable[SOCKET_OPTION]] = None,
|
| 144 |
+
) -> AsyncNetworkStream: # pragma: nocover
|
| 145 |
+
if socket_options is None:
|
| 146 |
+
socket_options = []
|
| 147 |
+
timeout_or_inf = float("inf") if timeout is None else timeout
|
| 148 |
+
exc_map: ExceptionMapping = {
|
| 149 |
+
trio.TooSlowError: ConnectTimeout,
|
| 150 |
+
trio.BrokenResourceError: ConnectError,
|
| 151 |
+
OSError: ConnectError,
|
| 152 |
+
}
|
| 153 |
+
with map_exceptions(exc_map):
|
| 154 |
+
with trio.fail_after(timeout_or_inf):
|
| 155 |
+
stream: trio.abc.Stream = await trio.open_unix_socket(path)
|
| 156 |
+
for option in socket_options:
|
| 157 |
+
stream.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover
|
| 158 |
+
return TrioStream(stream)
|
| 159 |
+
|
| 160 |
+
async def sleep(self, seconds: float) -> None:
|
| 161 |
+
await trio.sleep(seconds) # pragma: nocover
|
evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (1.39 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/connection.cpython-310.pyc
ADDED
|
Binary file (6.22 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/http11.cpython-310.pyc
ADDED
|
Binary file (9.25 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/http2.cpython-310.pyc
ADDED
|
Binary file (15.6 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/http_proxy.cpython-310.pyc
ADDED
|
Binary file (11.9 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/__pycache__/socks_proxy.cpython-310.pyc
ADDED
|
Binary file (9.85 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/connection.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
import logging
|
| 3 |
+
import ssl
|
| 4 |
+
from types import TracebackType
|
| 5 |
+
from typing import Iterable, Iterator, Optional, Type
|
| 6 |
+
|
| 7 |
+
from .._backends.sync import SyncBackend
|
| 8 |
+
from .._backends.base import SOCKET_OPTION, NetworkBackend, NetworkStream
|
| 9 |
+
from .._exceptions import ConnectError, ConnectionNotAvailable, ConnectTimeout
|
| 10 |
+
from .._models import Origin, Request, Response
|
| 11 |
+
from .._ssl import default_ssl_context
|
| 12 |
+
from .._synchronization import Lock
|
| 13 |
+
from .._trace import Trace
|
| 14 |
+
from .http11 import HTTP11Connection
|
| 15 |
+
from .interfaces import ConnectionInterface
|
| 16 |
+
|
| 17 |
+
RETRIES_BACKOFF_FACTOR = 0.5 # 0s, 0.5s, 1s, 2s, 4s, etc.
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger("httpcore.connection")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def exponential_backoff(factor: float) -> Iterator[float]:
|
| 24 |
+
yield 0
|
| 25 |
+
for n in itertools.count(2):
|
| 26 |
+
yield factor * (2 ** (n - 2))
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class HTTPConnection(ConnectionInterface):
|
| 30 |
+
def __init__(
|
| 31 |
+
self,
|
| 32 |
+
origin: Origin,
|
| 33 |
+
ssl_context: Optional[ssl.SSLContext] = None,
|
| 34 |
+
keepalive_expiry: Optional[float] = None,
|
| 35 |
+
http1: bool = True,
|
| 36 |
+
http2: bool = False,
|
| 37 |
+
retries: int = 0,
|
| 38 |
+
local_address: Optional[str] = None,
|
| 39 |
+
uds: Optional[str] = None,
|
| 40 |
+
network_backend: Optional[NetworkBackend] = None,
|
| 41 |
+
socket_options: Optional[Iterable[SOCKET_OPTION]] = None,
|
| 42 |
+
) -> None:
|
| 43 |
+
self._origin = origin
|
| 44 |
+
self._ssl_context = ssl_context
|
| 45 |
+
self._keepalive_expiry = keepalive_expiry
|
| 46 |
+
self._http1 = http1
|
| 47 |
+
self._http2 = http2
|
| 48 |
+
self._retries = retries
|
| 49 |
+
self._local_address = local_address
|
| 50 |
+
self._uds = uds
|
| 51 |
+
|
| 52 |
+
self._network_backend: NetworkBackend = (
|
| 53 |
+
SyncBackend() if network_backend is None else network_backend
|
| 54 |
+
)
|
| 55 |
+
self._connection: Optional[ConnectionInterface] = None
|
| 56 |
+
self._connect_failed: bool = False
|
| 57 |
+
self._request_lock = Lock()
|
| 58 |
+
self._socket_options = socket_options
|
| 59 |
+
|
| 60 |
+
def handle_request(self, request: Request) -> Response:
|
| 61 |
+
if not self.can_handle_request(request.url.origin):
|
| 62 |
+
raise RuntimeError(
|
| 63 |
+
f"Attempted to send request to {request.url.origin} on connection to {self._origin}"
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
with self._request_lock:
|
| 67 |
+
if self._connection is None:
|
| 68 |
+
try:
|
| 69 |
+
stream = self._connect(request)
|
| 70 |
+
|
| 71 |
+
ssl_object = stream.get_extra_info("ssl_object")
|
| 72 |
+
http2_negotiated = (
|
| 73 |
+
ssl_object is not None
|
| 74 |
+
and ssl_object.selected_alpn_protocol() == "h2"
|
| 75 |
+
)
|
| 76 |
+
if http2_negotiated or (self._http2 and not self._http1):
|
| 77 |
+
from .http2 import HTTP2Connection
|
| 78 |
+
|
| 79 |
+
self._connection = HTTP2Connection(
|
| 80 |
+
origin=self._origin,
|
| 81 |
+
stream=stream,
|
| 82 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 83 |
+
)
|
| 84 |
+
else:
|
| 85 |
+
self._connection = HTTP11Connection(
|
| 86 |
+
origin=self._origin,
|
| 87 |
+
stream=stream,
|
| 88 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 89 |
+
)
|
| 90 |
+
except Exception as exc:
|
| 91 |
+
self._connect_failed = True
|
| 92 |
+
raise exc
|
| 93 |
+
elif not self._connection.is_available():
|
| 94 |
+
raise ConnectionNotAvailable()
|
| 95 |
+
|
| 96 |
+
return self._connection.handle_request(request)
|
| 97 |
+
|
| 98 |
+
def _connect(self, request: Request) -> NetworkStream:
|
| 99 |
+
timeouts = request.extensions.get("timeout", {})
|
| 100 |
+
sni_hostname = request.extensions.get("sni_hostname", None)
|
| 101 |
+
timeout = timeouts.get("connect", None)
|
| 102 |
+
|
| 103 |
+
retries_left = self._retries
|
| 104 |
+
delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR)
|
| 105 |
+
|
| 106 |
+
while True:
|
| 107 |
+
try:
|
| 108 |
+
if self._uds is None:
|
| 109 |
+
kwargs = {
|
| 110 |
+
"host": self._origin.host.decode("ascii"),
|
| 111 |
+
"port": self._origin.port,
|
| 112 |
+
"local_address": self._local_address,
|
| 113 |
+
"timeout": timeout,
|
| 114 |
+
"socket_options": self._socket_options,
|
| 115 |
+
}
|
| 116 |
+
with Trace("connect_tcp", logger, request, kwargs) as trace:
|
| 117 |
+
stream = self._network_backend.connect_tcp(**kwargs)
|
| 118 |
+
trace.return_value = stream
|
| 119 |
+
else:
|
| 120 |
+
kwargs = {
|
| 121 |
+
"path": self._uds,
|
| 122 |
+
"timeout": timeout,
|
| 123 |
+
"socket_options": self._socket_options,
|
| 124 |
+
}
|
| 125 |
+
with Trace(
|
| 126 |
+
"connect_unix_socket", logger, request, kwargs
|
| 127 |
+
) as trace:
|
| 128 |
+
stream = self._network_backend.connect_unix_socket(
|
| 129 |
+
**kwargs
|
| 130 |
+
)
|
| 131 |
+
trace.return_value = stream
|
| 132 |
+
|
| 133 |
+
if self._origin.scheme == b"https":
|
| 134 |
+
ssl_context = (
|
| 135 |
+
default_ssl_context()
|
| 136 |
+
if self._ssl_context is None
|
| 137 |
+
else self._ssl_context
|
| 138 |
+
)
|
| 139 |
+
alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"]
|
| 140 |
+
ssl_context.set_alpn_protocols(alpn_protocols)
|
| 141 |
+
|
| 142 |
+
kwargs = {
|
| 143 |
+
"ssl_context": ssl_context,
|
| 144 |
+
"server_hostname": sni_hostname
|
| 145 |
+
or self._origin.host.decode("ascii"),
|
| 146 |
+
"timeout": timeout,
|
| 147 |
+
}
|
| 148 |
+
with Trace("start_tls", logger, request, kwargs) as trace:
|
| 149 |
+
stream = stream.start_tls(**kwargs)
|
| 150 |
+
trace.return_value = stream
|
| 151 |
+
return stream
|
| 152 |
+
except (ConnectError, ConnectTimeout):
|
| 153 |
+
if retries_left <= 0:
|
| 154 |
+
raise
|
| 155 |
+
retries_left -= 1
|
| 156 |
+
delay = next(delays)
|
| 157 |
+
with Trace("retry", logger, request, kwargs) as trace:
|
| 158 |
+
self._network_backend.sleep(delay)
|
| 159 |
+
|
| 160 |
+
def can_handle_request(self, origin: Origin) -> bool:
|
| 161 |
+
return origin == self._origin
|
| 162 |
+
|
| 163 |
+
def close(self) -> None:
|
| 164 |
+
if self._connection is not None:
|
| 165 |
+
with Trace("close", logger, None, {}):
|
| 166 |
+
self._connection.close()
|
| 167 |
+
|
| 168 |
+
def is_available(self) -> bool:
|
| 169 |
+
if self._connection is None:
|
| 170 |
+
# If HTTP/2 support is enabled, and the resulting connection could
|
| 171 |
+
# end up as HTTP/2 then we should indicate the connection as being
|
| 172 |
+
# available to service multiple requests.
|
| 173 |
+
return (
|
| 174 |
+
self._http2
|
| 175 |
+
and (self._origin.scheme == b"https" or not self._http1)
|
| 176 |
+
and not self._connect_failed
|
| 177 |
+
)
|
| 178 |
+
return self._connection.is_available()
|
| 179 |
+
|
| 180 |
+
def has_expired(self) -> bool:
|
| 181 |
+
if self._connection is None:
|
| 182 |
+
return self._connect_failed
|
| 183 |
+
return self._connection.has_expired()
|
| 184 |
+
|
| 185 |
+
def is_idle(self) -> bool:
|
| 186 |
+
if self._connection is None:
|
| 187 |
+
return self._connect_failed
|
| 188 |
+
return self._connection.is_idle()
|
| 189 |
+
|
| 190 |
+
def is_closed(self) -> bool:
|
| 191 |
+
if self._connection is None:
|
| 192 |
+
return self._connect_failed
|
| 193 |
+
return self._connection.is_closed()
|
| 194 |
+
|
| 195 |
+
def info(self) -> str:
|
| 196 |
+
if self._connection is None:
|
| 197 |
+
return "CONNECTION FAILED" if self._connect_failed else "CONNECTING"
|
| 198 |
+
return self._connection.info()
|
| 199 |
+
|
| 200 |
+
def __repr__(self) -> str:
|
| 201 |
+
return f"<{self.__class__.__name__} [{self.info()}]>"
|
| 202 |
+
|
| 203 |
+
# These context managers are not used in the standard flow, but are
|
| 204 |
+
# useful for testing or working with connection instances directly.
|
| 205 |
+
|
| 206 |
+
def __enter__(self) -> "HTTPConnection":
|
| 207 |
+
return self
|
| 208 |
+
|
| 209 |
+
def __exit__(
|
| 210 |
+
self,
|
| 211 |
+
exc_type: Optional[Type[BaseException]] = None,
|
| 212 |
+
exc_value: Optional[BaseException] = None,
|
| 213 |
+
traceback: Optional[TracebackType] = None,
|
| 214 |
+
) -> None:
|
| 215 |
+
self.close()
|
evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/http11.py
ADDED
|
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import enum
|
| 2 |
+
import logging
|
| 3 |
+
import time
|
| 4 |
+
from types import TracebackType
|
| 5 |
+
from typing import (
|
| 6 |
+
Iterable,
|
| 7 |
+
Iterator,
|
| 8 |
+
List,
|
| 9 |
+
Optional,
|
| 10 |
+
Tuple,
|
| 11 |
+
Type,
|
| 12 |
+
Union,
|
| 13 |
+
cast,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
import h11
|
| 17 |
+
|
| 18 |
+
from .._backends.base import NetworkStream
|
| 19 |
+
from .._exceptions import (
|
| 20 |
+
ConnectionNotAvailable,
|
| 21 |
+
LocalProtocolError,
|
| 22 |
+
RemoteProtocolError,
|
| 23 |
+
map_exceptions,
|
| 24 |
+
)
|
| 25 |
+
from .._models import Origin, Request, Response
|
| 26 |
+
from .._synchronization import Lock, ShieldCancellation
|
| 27 |
+
from .._trace import Trace
|
| 28 |
+
from .interfaces import ConnectionInterface
|
| 29 |
+
|
| 30 |
+
logger = logging.getLogger("httpcore.http11")
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
# A subset of `h11.Event` types supported by `_send_event`
|
| 34 |
+
H11SendEvent = Union[
|
| 35 |
+
h11.Request,
|
| 36 |
+
h11.Data,
|
| 37 |
+
h11.EndOfMessage,
|
| 38 |
+
]
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class HTTPConnectionState(enum.IntEnum):
|
| 42 |
+
NEW = 0
|
| 43 |
+
ACTIVE = 1
|
| 44 |
+
IDLE = 2
|
| 45 |
+
CLOSED = 3
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class HTTP11Connection(ConnectionInterface):
|
| 49 |
+
READ_NUM_BYTES = 64 * 1024
|
| 50 |
+
MAX_INCOMPLETE_EVENT_SIZE = 100 * 1024
|
| 51 |
+
|
| 52 |
+
def __init__(
|
| 53 |
+
self,
|
| 54 |
+
origin: Origin,
|
| 55 |
+
stream: NetworkStream,
|
| 56 |
+
keepalive_expiry: Optional[float] = None,
|
| 57 |
+
) -> None:
|
| 58 |
+
self._origin = origin
|
| 59 |
+
self._network_stream = stream
|
| 60 |
+
self._keepalive_expiry: Optional[float] = keepalive_expiry
|
| 61 |
+
self._expire_at: Optional[float] = None
|
| 62 |
+
self._state = HTTPConnectionState.NEW
|
| 63 |
+
self._state_lock = Lock()
|
| 64 |
+
self._request_count = 0
|
| 65 |
+
self._h11_state = h11.Connection(
|
| 66 |
+
our_role=h11.CLIENT,
|
| 67 |
+
max_incomplete_event_size=self.MAX_INCOMPLETE_EVENT_SIZE,
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
def handle_request(self, request: Request) -> Response:
|
| 71 |
+
if not self.can_handle_request(request.url.origin):
|
| 72 |
+
raise RuntimeError(
|
| 73 |
+
f"Attempted to send request to {request.url.origin} on connection "
|
| 74 |
+
f"to {self._origin}"
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
with self._state_lock:
|
| 78 |
+
if self._state in (HTTPConnectionState.NEW, HTTPConnectionState.IDLE):
|
| 79 |
+
self._request_count += 1
|
| 80 |
+
self._state = HTTPConnectionState.ACTIVE
|
| 81 |
+
self._expire_at = None
|
| 82 |
+
else:
|
| 83 |
+
raise ConnectionNotAvailable()
|
| 84 |
+
|
| 85 |
+
try:
|
| 86 |
+
kwargs = {"request": request}
|
| 87 |
+
with Trace("send_request_headers", logger, request, kwargs) as trace:
|
| 88 |
+
self._send_request_headers(**kwargs)
|
| 89 |
+
with Trace("send_request_body", logger, request, kwargs) as trace:
|
| 90 |
+
self._send_request_body(**kwargs)
|
| 91 |
+
with Trace(
|
| 92 |
+
"receive_response_headers", logger, request, kwargs
|
| 93 |
+
) as trace:
|
| 94 |
+
(
|
| 95 |
+
http_version,
|
| 96 |
+
status,
|
| 97 |
+
reason_phrase,
|
| 98 |
+
headers,
|
| 99 |
+
) = self._receive_response_headers(**kwargs)
|
| 100 |
+
trace.return_value = (
|
| 101 |
+
http_version,
|
| 102 |
+
status,
|
| 103 |
+
reason_phrase,
|
| 104 |
+
headers,
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
return Response(
|
| 108 |
+
status=status,
|
| 109 |
+
headers=headers,
|
| 110 |
+
content=HTTP11ConnectionByteStream(self, request),
|
| 111 |
+
extensions={
|
| 112 |
+
"http_version": http_version,
|
| 113 |
+
"reason_phrase": reason_phrase,
|
| 114 |
+
"network_stream": self._network_stream,
|
| 115 |
+
},
|
| 116 |
+
)
|
| 117 |
+
except BaseException as exc:
|
| 118 |
+
with ShieldCancellation():
|
| 119 |
+
with Trace("response_closed", logger, request) as trace:
|
| 120 |
+
self._response_closed()
|
| 121 |
+
raise exc
|
| 122 |
+
|
| 123 |
+
# Sending the request...
|
| 124 |
+
|
| 125 |
+
def _send_request_headers(self, request: Request) -> None:
|
| 126 |
+
timeouts = request.extensions.get("timeout", {})
|
| 127 |
+
timeout = timeouts.get("write", None)
|
| 128 |
+
|
| 129 |
+
with map_exceptions({h11.LocalProtocolError: LocalProtocolError}):
|
| 130 |
+
event = h11.Request(
|
| 131 |
+
method=request.method,
|
| 132 |
+
target=request.url.target,
|
| 133 |
+
headers=request.headers,
|
| 134 |
+
)
|
| 135 |
+
self._send_event(event, timeout=timeout)
|
| 136 |
+
|
| 137 |
+
def _send_request_body(self, request: Request) -> None:
|
| 138 |
+
timeouts = request.extensions.get("timeout", {})
|
| 139 |
+
timeout = timeouts.get("write", None)
|
| 140 |
+
|
| 141 |
+
assert isinstance(request.stream, Iterable)
|
| 142 |
+
for chunk in request.stream:
|
| 143 |
+
event = h11.Data(data=chunk)
|
| 144 |
+
self._send_event(event, timeout=timeout)
|
| 145 |
+
|
| 146 |
+
self._send_event(h11.EndOfMessage(), timeout=timeout)
|
| 147 |
+
|
| 148 |
+
def _send_event(
|
| 149 |
+
self, event: h11.Event, timeout: Optional[float] = None
|
| 150 |
+
) -> None:
|
| 151 |
+
bytes_to_send = self._h11_state.send(event)
|
| 152 |
+
if bytes_to_send is not None:
|
| 153 |
+
self._network_stream.write(bytes_to_send, timeout=timeout)
|
| 154 |
+
|
| 155 |
+
# Receiving the response...
|
| 156 |
+
|
| 157 |
+
def _receive_response_headers(
|
| 158 |
+
self, request: Request
|
| 159 |
+
) -> Tuple[bytes, int, bytes, List[Tuple[bytes, bytes]]]:
|
| 160 |
+
timeouts = request.extensions.get("timeout", {})
|
| 161 |
+
timeout = timeouts.get("read", None)
|
| 162 |
+
|
| 163 |
+
while True:
|
| 164 |
+
event = self._receive_event(timeout=timeout)
|
| 165 |
+
if isinstance(event, h11.Response):
|
| 166 |
+
break
|
| 167 |
+
if (
|
| 168 |
+
isinstance(event, h11.InformationalResponse)
|
| 169 |
+
and event.status_code == 101
|
| 170 |
+
):
|
| 171 |
+
break
|
| 172 |
+
|
| 173 |
+
http_version = b"HTTP/" + event.http_version
|
| 174 |
+
|
| 175 |
+
# h11 version 0.11+ supports a `raw_items` interface to get the
|
| 176 |
+
# raw header casing, rather than the enforced lowercase headers.
|
| 177 |
+
headers = event.headers.raw_items()
|
| 178 |
+
|
| 179 |
+
return http_version, event.status_code, event.reason, headers
|
| 180 |
+
|
| 181 |
+
def _receive_response_body(self, request: Request) -> Iterator[bytes]:
|
| 182 |
+
timeouts = request.extensions.get("timeout", {})
|
| 183 |
+
timeout = timeouts.get("read", None)
|
| 184 |
+
|
| 185 |
+
while True:
|
| 186 |
+
event = self._receive_event(timeout=timeout)
|
| 187 |
+
if isinstance(event, h11.Data):
|
| 188 |
+
yield bytes(event.data)
|
| 189 |
+
elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)):
|
| 190 |
+
break
|
| 191 |
+
|
| 192 |
+
def _receive_event(
|
| 193 |
+
self, timeout: Optional[float] = None
|
| 194 |
+
) -> Union[h11.Event, Type[h11.PAUSED]]:
|
| 195 |
+
while True:
|
| 196 |
+
with map_exceptions({h11.RemoteProtocolError: RemoteProtocolError}):
|
| 197 |
+
event = self._h11_state.next_event()
|
| 198 |
+
|
| 199 |
+
if event is h11.NEED_DATA:
|
| 200 |
+
data = self._network_stream.read(
|
| 201 |
+
self.READ_NUM_BYTES, timeout=timeout
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
# If we feed this case through h11 we'll raise an exception like:
|
| 205 |
+
#
|
| 206 |
+
# httpcore.RemoteProtocolError: can't handle event type
|
| 207 |
+
# ConnectionClosed when role=SERVER and state=SEND_RESPONSE
|
| 208 |
+
#
|
| 209 |
+
# Which is accurate, but not very informative from an end-user
|
| 210 |
+
# perspective. Instead we handle this case distinctly and treat
|
| 211 |
+
# it as a ConnectError.
|
| 212 |
+
if data == b"" and self._h11_state.their_state == h11.SEND_RESPONSE:
|
| 213 |
+
msg = "Server disconnected without sending a response."
|
| 214 |
+
raise RemoteProtocolError(msg)
|
| 215 |
+
|
| 216 |
+
self._h11_state.receive_data(data)
|
| 217 |
+
else:
|
| 218 |
+
# mypy fails to narrow the type in the above if statement above
|
| 219 |
+
return cast(Union[h11.Event, Type[h11.PAUSED]], event)
|
| 220 |
+
|
| 221 |
+
def _response_closed(self) -> None:
|
| 222 |
+
with self._state_lock:
|
| 223 |
+
if (
|
| 224 |
+
self._h11_state.our_state is h11.DONE
|
| 225 |
+
and self._h11_state.their_state is h11.DONE
|
| 226 |
+
):
|
| 227 |
+
self._state = HTTPConnectionState.IDLE
|
| 228 |
+
self._h11_state.start_next_cycle()
|
| 229 |
+
if self._keepalive_expiry is not None:
|
| 230 |
+
now = time.monotonic()
|
| 231 |
+
self._expire_at = now + self._keepalive_expiry
|
| 232 |
+
else:
|
| 233 |
+
self.close()
|
| 234 |
+
|
| 235 |
+
# Once the connection is no longer required...
|
| 236 |
+
|
| 237 |
+
def close(self) -> None:
|
| 238 |
+
# Note that this method unilaterally closes the connection, and does
|
| 239 |
+
# not have any kind of locking in place around it.
|
| 240 |
+
self._state = HTTPConnectionState.CLOSED
|
| 241 |
+
self._network_stream.close()
|
| 242 |
+
|
| 243 |
+
# The ConnectionInterface methods provide information about the state of
|
| 244 |
+
# the connection, allowing for a connection pooling implementation to
|
| 245 |
+
# determine when to reuse and when to close the connection...
|
| 246 |
+
|
| 247 |
+
def can_handle_request(self, origin: Origin) -> bool:
|
| 248 |
+
return origin == self._origin
|
| 249 |
+
|
| 250 |
+
def is_available(self) -> bool:
|
| 251 |
+
# Note that HTTP/1.1 connections in the "NEW" state are not treated as
|
| 252 |
+
# being "available". The control flow which created the connection will
|
| 253 |
+
# be able to send an outgoing request, but the connection will not be
|
| 254 |
+
# acquired from the connection pool for any other request.
|
| 255 |
+
return self._state == HTTPConnectionState.IDLE
|
| 256 |
+
|
| 257 |
+
def has_expired(self) -> bool:
|
| 258 |
+
now = time.monotonic()
|
| 259 |
+
keepalive_expired = self._expire_at is not None and now > self._expire_at
|
| 260 |
+
|
| 261 |
+
# If the HTTP connection is idle but the socket is readable, then the
|
| 262 |
+
# only valid state is that the socket is about to return b"", indicating
|
| 263 |
+
# a server-initiated disconnect.
|
| 264 |
+
server_disconnected = (
|
| 265 |
+
self._state == HTTPConnectionState.IDLE
|
| 266 |
+
and self._network_stream.get_extra_info("is_readable")
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
return keepalive_expired or server_disconnected
|
| 270 |
+
|
| 271 |
+
def is_idle(self) -> bool:
|
| 272 |
+
return self._state == HTTPConnectionState.IDLE
|
| 273 |
+
|
| 274 |
+
def is_closed(self) -> bool:
|
| 275 |
+
return self._state == HTTPConnectionState.CLOSED
|
| 276 |
+
|
| 277 |
+
def info(self) -> str:
|
| 278 |
+
origin = str(self._origin)
|
| 279 |
+
return (
|
| 280 |
+
f"{origin!r}, HTTP/1.1, {self._state.name}, "
|
| 281 |
+
f"Request Count: {self._request_count}"
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
def __repr__(self) -> str:
|
| 285 |
+
class_name = self.__class__.__name__
|
| 286 |
+
origin = str(self._origin)
|
| 287 |
+
return (
|
| 288 |
+
f"<{class_name} [{origin!r}, {self._state.name}, "
|
| 289 |
+
f"Request Count: {self._request_count}]>"
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
# These context managers are not used in the standard flow, but are
|
| 293 |
+
# useful for testing or working with connection instances directly.
|
| 294 |
+
|
| 295 |
+
def __enter__(self) -> "HTTP11Connection":
|
| 296 |
+
return self
|
| 297 |
+
|
| 298 |
+
def __exit__(
|
| 299 |
+
self,
|
| 300 |
+
exc_type: Optional[Type[BaseException]] = None,
|
| 301 |
+
exc_value: Optional[BaseException] = None,
|
| 302 |
+
traceback: Optional[TracebackType] = None,
|
| 303 |
+
) -> None:
|
| 304 |
+
self.close()
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
class HTTP11ConnectionByteStream:
|
| 308 |
+
def __init__(self, connection: HTTP11Connection, request: Request) -> None:
|
| 309 |
+
self._connection = connection
|
| 310 |
+
self._request = request
|
| 311 |
+
self._closed = False
|
| 312 |
+
|
| 313 |
+
def __iter__(self) -> Iterator[bytes]:
|
| 314 |
+
kwargs = {"request": self._request}
|
| 315 |
+
try:
|
| 316 |
+
with Trace("receive_response_body", logger, self._request, kwargs):
|
| 317 |
+
for chunk in self._connection._receive_response_body(**kwargs):
|
| 318 |
+
yield chunk
|
| 319 |
+
except BaseException as exc:
|
| 320 |
+
# If we get an exception while streaming the response,
|
| 321 |
+
# we want to close the response (and possibly the connection)
|
| 322 |
+
# before raising that exception.
|
| 323 |
+
with ShieldCancellation():
|
| 324 |
+
self.close()
|
| 325 |
+
raise exc
|
| 326 |
+
|
| 327 |
+
def close(self) -> None:
|
| 328 |
+
if not self._closed:
|
| 329 |
+
self._closed = True
|
| 330 |
+
with Trace("response_closed", logger, self._request):
|
| 331 |
+
self._connection._response_closed()
|
evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/http2.py
ADDED
|
@@ -0,0 +1,589 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import enum
|
| 2 |
+
import logging
|
| 3 |
+
import time
|
| 4 |
+
import types
|
| 5 |
+
import typing
|
| 6 |
+
|
| 7 |
+
import h2.config
|
| 8 |
+
import h2.connection
|
| 9 |
+
import h2.events
|
| 10 |
+
import h2.exceptions
|
| 11 |
+
import h2.settings
|
| 12 |
+
|
| 13 |
+
from .._backends.base import NetworkStream
|
| 14 |
+
from .._exceptions import (
|
| 15 |
+
ConnectionNotAvailable,
|
| 16 |
+
LocalProtocolError,
|
| 17 |
+
RemoteProtocolError,
|
| 18 |
+
)
|
| 19 |
+
from .._models import Origin, Request, Response
|
| 20 |
+
from .._synchronization import Lock, Semaphore, ShieldCancellation
|
| 21 |
+
from .._trace import Trace
|
| 22 |
+
from .interfaces import ConnectionInterface
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger("httpcore.http2")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def has_body_headers(request: Request) -> bool:
|
| 28 |
+
return any(
|
| 29 |
+
k.lower() == b"content-length" or k.lower() == b"transfer-encoding"
|
| 30 |
+
for k, v in request.headers
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class HTTPConnectionState(enum.IntEnum):
|
| 35 |
+
ACTIVE = 1
|
| 36 |
+
IDLE = 2
|
| 37 |
+
CLOSED = 3
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class HTTP2Connection(ConnectionInterface):
|
| 41 |
+
READ_NUM_BYTES = 64 * 1024
|
| 42 |
+
CONFIG = h2.config.H2Configuration(validate_inbound_headers=False)
|
| 43 |
+
|
| 44 |
+
def __init__(
|
| 45 |
+
self,
|
| 46 |
+
origin: Origin,
|
| 47 |
+
stream: NetworkStream,
|
| 48 |
+
keepalive_expiry: typing.Optional[float] = None,
|
| 49 |
+
):
|
| 50 |
+
self._origin = origin
|
| 51 |
+
self._network_stream = stream
|
| 52 |
+
self._keepalive_expiry: typing.Optional[float] = keepalive_expiry
|
| 53 |
+
self._h2_state = h2.connection.H2Connection(config=self.CONFIG)
|
| 54 |
+
self._state = HTTPConnectionState.IDLE
|
| 55 |
+
self._expire_at: typing.Optional[float] = None
|
| 56 |
+
self._request_count = 0
|
| 57 |
+
self._init_lock = Lock()
|
| 58 |
+
self._state_lock = Lock()
|
| 59 |
+
self._read_lock = Lock()
|
| 60 |
+
self._write_lock = Lock()
|
| 61 |
+
self._sent_connection_init = False
|
| 62 |
+
self._used_all_stream_ids = False
|
| 63 |
+
self._connection_error = False
|
| 64 |
+
|
| 65 |
+
# Mapping from stream ID to response stream events.
|
| 66 |
+
self._events: typing.Dict[
|
| 67 |
+
int,
|
| 68 |
+
typing.Union[
|
| 69 |
+
h2.events.ResponseReceived,
|
| 70 |
+
h2.events.DataReceived,
|
| 71 |
+
h2.events.StreamEnded,
|
| 72 |
+
h2.events.StreamReset,
|
| 73 |
+
],
|
| 74 |
+
] = {}
|
| 75 |
+
|
| 76 |
+
# Connection terminated events are stored as state since
|
| 77 |
+
# we need to handle them for all streams.
|
| 78 |
+
self._connection_terminated: typing.Optional[
|
| 79 |
+
h2.events.ConnectionTerminated
|
| 80 |
+
] = None
|
| 81 |
+
|
| 82 |
+
self._read_exception: typing.Optional[Exception] = None
|
| 83 |
+
self._write_exception: typing.Optional[Exception] = None
|
| 84 |
+
|
| 85 |
+
def handle_request(self, request: Request) -> Response:
|
| 86 |
+
if not self.can_handle_request(request.url.origin):
|
| 87 |
+
# This cannot occur in normal operation, since the connection pool
|
| 88 |
+
# will only send requests on connections that handle them.
|
| 89 |
+
# It's in place simply for resilience as a guard against incorrect
|
| 90 |
+
# usage, for anyone working directly with httpcore connections.
|
| 91 |
+
raise RuntimeError(
|
| 92 |
+
f"Attempted to send request to {request.url.origin} on connection "
|
| 93 |
+
f"to {self._origin}"
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
with self._state_lock:
|
| 97 |
+
if self._state in (HTTPConnectionState.ACTIVE, HTTPConnectionState.IDLE):
|
| 98 |
+
self._request_count += 1
|
| 99 |
+
self._expire_at = None
|
| 100 |
+
self._state = HTTPConnectionState.ACTIVE
|
| 101 |
+
else:
|
| 102 |
+
raise ConnectionNotAvailable()
|
| 103 |
+
|
| 104 |
+
with self._init_lock:
|
| 105 |
+
if not self._sent_connection_init:
|
| 106 |
+
try:
|
| 107 |
+
kwargs = {"request": request}
|
| 108 |
+
with Trace("send_connection_init", logger, request, kwargs):
|
| 109 |
+
self._send_connection_init(**kwargs)
|
| 110 |
+
except BaseException as exc:
|
| 111 |
+
with ShieldCancellation():
|
| 112 |
+
self.close()
|
| 113 |
+
raise exc
|
| 114 |
+
|
| 115 |
+
self._sent_connection_init = True
|
| 116 |
+
|
| 117 |
+
# Initially start with just 1 until the remote server provides
|
| 118 |
+
# its max_concurrent_streams value
|
| 119 |
+
self._max_streams = 1
|
| 120 |
+
|
| 121 |
+
local_settings_max_streams = (
|
| 122 |
+
self._h2_state.local_settings.max_concurrent_streams
|
| 123 |
+
)
|
| 124 |
+
self._max_streams_semaphore = Semaphore(local_settings_max_streams)
|
| 125 |
+
|
| 126 |
+
for _ in range(local_settings_max_streams - self._max_streams):
|
| 127 |
+
self._max_streams_semaphore.acquire()
|
| 128 |
+
|
| 129 |
+
self._max_streams_semaphore.acquire()
|
| 130 |
+
|
| 131 |
+
try:
|
| 132 |
+
stream_id = self._h2_state.get_next_available_stream_id()
|
| 133 |
+
self._events[stream_id] = []
|
| 134 |
+
except h2.exceptions.NoAvailableStreamIDError: # pragma: nocover
|
| 135 |
+
self._used_all_stream_ids = True
|
| 136 |
+
self._request_count -= 1
|
| 137 |
+
raise ConnectionNotAvailable()
|
| 138 |
+
|
| 139 |
+
try:
|
| 140 |
+
kwargs = {"request": request, "stream_id": stream_id}
|
| 141 |
+
with Trace("send_request_headers", logger, request, kwargs):
|
| 142 |
+
self._send_request_headers(request=request, stream_id=stream_id)
|
| 143 |
+
with Trace("send_request_body", logger, request, kwargs):
|
| 144 |
+
self._send_request_body(request=request, stream_id=stream_id)
|
| 145 |
+
with Trace(
|
| 146 |
+
"receive_response_headers", logger, request, kwargs
|
| 147 |
+
) as trace:
|
| 148 |
+
status, headers = self._receive_response(
|
| 149 |
+
request=request, stream_id=stream_id
|
| 150 |
+
)
|
| 151 |
+
trace.return_value = (status, headers)
|
| 152 |
+
|
| 153 |
+
return Response(
|
| 154 |
+
status=status,
|
| 155 |
+
headers=headers,
|
| 156 |
+
content=HTTP2ConnectionByteStream(self, request, stream_id=stream_id),
|
| 157 |
+
extensions={
|
| 158 |
+
"http_version": b"HTTP/2",
|
| 159 |
+
"network_stream": self._network_stream,
|
| 160 |
+
"stream_id": stream_id,
|
| 161 |
+
},
|
| 162 |
+
)
|
| 163 |
+
except BaseException as exc: # noqa: PIE786
|
| 164 |
+
with ShieldCancellation():
|
| 165 |
+
kwargs = {"stream_id": stream_id}
|
| 166 |
+
with Trace("response_closed", logger, request, kwargs):
|
| 167 |
+
self._response_closed(stream_id=stream_id)
|
| 168 |
+
|
| 169 |
+
if isinstance(exc, h2.exceptions.ProtocolError):
|
| 170 |
+
# One case where h2 can raise a protocol error is when a
|
| 171 |
+
# closed frame has been seen by the state machine.
|
| 172 |
+
#
|
| 173 |
+
# This happens when one stream is reading, and encounters
|
| 174 |
+
# a GOAWAY event. Other flows of control may then raise
|
| 175 |
+
# a protocol error at any point they interact with the 'h2_state'.
|
| 176 |
+
#
|
| 177 |
+
# In this case we'll have stored the event, and should raise
|
| 178 |
+
# it as a RemoteProtocolError.
|
| 179 |
+
if self._connection_terminated: # pragma: nocover
|
| 180 |
+
raise RemoteProtocolError(self._connection_terminated)
|
| 181 |
+
# If h2 raises a protocol error in some other state then we
|
| 182 |
+
# must somehow have made a protocol violation.
|
| 183 |
+
raise LocalProtocolError(exc) # pragma: nocover
|
| 184 |
+
|
| 185 |
+
raise exc
|
| 186 |
+
|
| 187 |
+
def _send_connection_init(self, request: Request) -> None:
|
| 188 |
+
"""
|
| 189 |
+
The HTTP/2 connection requires some initial setup before we can start
|
| 190 |
+
using individual request/response streams on it.
|
| 191 |
+
"""
|
| 192 |
+
# Need to set these manually here instead of manipulating via
|
| 193 |
+
# __setitem__() otherwise the H2Connection will emit SettingsUpdate
|
| 194 |
+
# frames in addition to sending the undesired defaults.
|
| 195 |
+
self._h2_state.local_settings = h2.settings.Settings(
|
| 196 |
+
client=True,
|
| 197 |
+
initial_values={
|
| 198 |
+
# Disable PUSH_PROMISE frames from the server since we don't do anything
|
| 199 |
+
# with them for now. Maybe when we support caching?
|
| 200 |
+
h2.settings.SettingCodes.ENABLE_PUSH: 0,
|
| 201 |
+
# These two are taken from h2 for safe defaults
|
| 202 |
+
h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100,
|
| 203 |
+
h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 65536,
|
| 204 |
+
},
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
# Some websites (*cough* Yahoo *cough*) balk at this setting being
|
| 208 |
+
# present in the initial handshake since it's not defined in the original
|
| 209 |
+
# RFC despite the RFC mandating ignoring settings you don't know about.
|
| 210 |
+
del self._h2_state.local_settings[
|
| 211 |
+
h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL
|
| 212 |
+
]
|
| 213 |
+
|
| 214 |
+
self._h2_state.initiate_connection()
|
| 215 |
+
self._h2_state.increment_flow_control_window(2**24)
|
| 216 |
+
self._write_outgoing_data(request)
|
| 217 |
+
|
| 218 |
+
# Sending the request...
|
| 219 |
+
|
| 220 |
+
def _send_request_headers(self, request: Request, stream_id: int) -> None:
|
| 221 |
+
"""
|
| 222 |
+
Send the request headers to a given stream ID.
|
| 223 |
+
"""
|
| 224 |
+
end_stream = not has_body_headers(request)
|
| 225 |
+
|
| 226 |
+
# In HTTP/2 the ':authority' pseudo-header is used instead of 'Host'.
|
| 227 |
+
# In order to gracefully handle HTTP/1.1 and HTTP/2 we always require
|
| 228 |
+
# HTTP/1.1 style headers, and map them appropriately if we end up on
|
| 229 |
+
# an HTTP/2 connection.
|
| 230 |
+
authority = [v for k, v in request.headers if k.lower() == b"host"][0]
|
| 231 |
+
|
| 232 |
+
headers = [
|
| 233 |
+
(b":method", request.method),
|
| 234 |
+
(b":authority", authority),
|
| 235 |
+
(b":scheme", request.url.scheme),
|
| 236 |
+
(b":path", request.url.target),
|
| 237 |
+
] + [
|
| 238 |
+
(k.lower(), v)
|
| 239 |
+
for k, v in request.headers
|
| 240 |
+
if k.lower()
|
| 241 |
+
not in (
|
| 242 |
+
b"host",
|
| 243 |
+
b"transfer-encoding",
|
| 244 |
+
)
|
| 245 |
+
]
|
| 246 |
+
|
| 247 |
+
self._h2_state.send_headers(stream_id, headers, end_stream=end_stream)
|
| 248 |
+
self._h2_state.increment_flow_control_window(2**24, stream_id=stream_id)
|
| 249 |
+
self._write_outgoing_data(request)
|
| 250 |
+
|
| 251 |
+
def _send_request_body(self, request: Request, stream_id: int) -> None:
|
| 252 |
+
"""
|
| 253 |
+
Iterate over the request body sending it to a given stream ID.
|
| 254 |
+
"""
|
| 255 |
+
if not has_body_headers(request):
|
| 256 |
+
return
|
| 257 |
+
|
| 258 |
+
assert isinstance(request.stream, typing.Iterable)
|
| 259 |
+
for data in request.stream:
|
| 260 |
+
self._send_stream_data(request, stream_id, data)
|
| 261 |
+
self._send_end_stream(request, stream_id)
|
| 262 |
+
|
| 263 |
+
def _send_stream_data(
|
| 264 |
+
self, request: Request, stream_id: int, data: bytes
|
| 265 |
+
) -> None:
|
| 266 |
+
"""
|
| 267 |
+
Send a single chunk of data in one or more data frames.
|
| 268 |
+
"""
|
| 269 |
+
while data:
|
| 270 |
+
max_flow = self._wait_for_outgoing_flow(request, stream_id)
|
| 271 |
+
chunk_size = min(len(data), max_flow)
|
| 272 |
+
chunk, data = data[:chunk_size], data[chunk_size:]
|
| 273 |
+
self._h2_state.send_data(stream_id, chunk)
|
| 274 |
+
self._write_outgoing_data(request)
|
| 275 |
+
|
| 276 |
+
def _send_end_stream(self, request: Request, stream_id: int) -> None:
|
| 277 |
+
"""
|
| 278 |
+
Send an empty data frame on on a given stream ID with the END_STREAM flag set.
|
| 279 |
+
"""
|
| 280 |
+
self._h2_state.end_stream(stream_id)
|
| 281 |
+
self._write_outgoing_data(request)
|
| 282 |
+
|
| 283 |
+
# Receiving the response...
|
| 284 |
+
|
| 285 |
+
def _receive_response(
|
| 286 |
+
self, request: Request, stream_id: int
|
| 287 |
+
) -> typing.Tuple[int, typing.List[typing.Tuple[bytes, bytes]]]:
|
| 288 |
+
"""
|
| 289 |
+
Return the response status code and headers for a given stream ID.
|
| 290 |
+
"""
|
| 291 |
+
while True:
|
| 292 |
+
event = self._receive_stream_event(request, stream_id)
|
| 293 |
+
if isinstance(event, h2.events.ResponseReceived):
|
| 294 |
+
break
|
| 295 |
+
|
| 296 |
+
status_code = 200
|
| 297 |
+
headers = []
|
| 298 |
+
for k, v in event.headers:
|
| 299 |
+
if k == b":status":
|
| 300 |
+
status_code = int(v.decode("ascii", errors="ignore"))
|
| 301 |
+
elif not k.startswith(b":"):
|
| 302 |
+
headers.append((k, v))
|
| 303 |
+
|
| 304 |
+
return (status_code, headers)
|
| 305 |
+
|
| 306 |
+
def _receive_response_body(
|
| 307 |
+
self, request: Request, stream_id: int
|
| 308 |
+
) -> typing.Iterator[bytes]:
|
| 309 |
+
"""
|
| 310 |
+
Iterator that returns the bytes of the response body for a given stream ID.
|
| 311 |
+
"""
|
| 312 |
+
while True:
|
| 313 |
+
event = self._receive_stream_event(request, stream_id)
|
| 314 |
+
if isinstance(event, h2.events.DataReceived):
|
| 315 |
+
amount = event.flow_controlled_length
|
| 316 |
+
self._h2_state.acknowledge_received_data(amount, stream_id)
|
| 317 |
+
self._write_outgoing_data(request)
|
| 318 |
+
yield event.data
|
| 319 |
+
elif isinstance(event, h2.events.StreamEnded):
|
| 320 |
+
break
|
| 321 |
+
|
| 322 |
+
def _receive_stream_event(
|
| 323 |
+
self, request: Request, stream_id: int
|
| 324 |
+
) -> typing.Union[
|
| 325 |
+
h2.events.ResponseReceived, h2.events.DataReceived, h2.events.StreamEnded
|
| 326 |
+
]:
|
| 327 |
+
"""
|
| 328 |
+
Return the next available event for a given stream ID.
|
| 329 |
+
|
| 330 |
+
Will read more data from the network if required.
|
| 331 |
+
"""
|
| 332 |
+
while not self._events.get(stream_id):
|
| 333 |
+
self._receive_events(request, stream_id)
|
| 334 |
+
event = self._events[stream_id].pop(0)
|
| 335 |
+
if isinstance(event, h2.events.StreamReset):
|
| 336 |
+
raise RemoteProtocolError(event)
|
| 337 |
+
return event
|
| 338 |
+
|
| 339 |
+
def _receive_events(
|
| 340 |
+
self, request: Request, stream_id: typing.Optional[int] = None
|
| 341 |
+
) -> None:
|
| 342 |
+
"""
|
| 343 |
+
Read some data from the network until we see one or more events
|
| 344 |
+
for a given stream ID.
|
| 345 |
+
"""
|
| 346 |
+
with self._read_lock:
|
| 347 |
+
if self._connection_terminated is not None:
|
| 348 |
+
last_stream_id = self._connection_terminated.last_stream_id
|
| 349 |
+
if stream_id and last_stream_id and stream_id > last_stream_id:
|
| 350 |
+
self._request_count -= 1
|
| 351 |
+
raise ConnectionNotAvailable()
|
| 352 |
+
raise RemoteProtocolError(self._connection_terminated)
|
| 353 |
+
|
| 354 |
+
# This conditional is a bit icky. We don't want to block reading if we've
|
| 355 |
+
# actually got an event to return for a given stream. We need to do that
|
| 356 |
+
# check *within* the atomic read lock. Though it also need to be optional,
|
| 357 |
+
# because when we call it from `_wait_for_outgoing_flow` we *do* want to
|
| 358 |
+
# block until we've available flow control, event when we have events
|
| 359 |
+
# pending for the stream ID we're attempting to send on.
|
| 360 |
+
if stream_id is None or not self._events.get(stream_id):
|
| 361 |
+
events = self._read_incoming_data(request)
|
| 362 |
+
for event in events:
|
| 363 |
+
if isinstance(event, h2.events.RemoteSettingsChanged):
|
| 364 |
+
with Trace(
|
| 365 |
+
"receive_remote_settings", logger, request
|
| 366 |
+
) as trace:
|
| 367 |
+
self._receive_remote_settings_change(event)
|
| 368 |
+
trace.return_value = event
|
| 369 |
+
|
| 370 |
+
elif isinstance(
|
| 371 |
+
event,
|
| 372 |
+
(
|
| 373 |
+
h2.events.ResponseReceived,
|
| 374 |
+
h2.events.DataReceived,
|
| 375 |
+
h2.events.StreamEnded,
|
| 376 |
+
h2.events.StreamReset,
|
| 377 |
+
),
|
| 378 |
+
):
|
| 379 |
+
if event.stream_id in self._events:
|
| 380 |
+
self._events[event.stream_id].append(event)
|
| 381 |
+
|
| 382 |
+
elif isinstance(event, h2.events.ConnectionTerminated):
|
| 383 |
+
self._connection_terminated = event
|
| 384 |
+
|
| 385 |
+
self._write_outgoing_data(request)
|
| 386 |
+
|
| 387 |
+
def _receive_remote_settings_change(self, event: h2.events.Event) -> None:
|
| 388 |
+
max_concurrent_streams = event.changed_settings.get(
|
| 389 |
+
h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS
|
| 390 |
+
)
|
| 391 |
+
if max_concurrent_streams:
|
| 392 |
+
new_max_streams = min(
|
| 393 |
+
max_concurrent_streams.new_value,
|
| 394 |
+
self._h2_state.local_settings.max_concurrent_streams,
|
| 395 |
+
)
|
| 396 |
+
if new_max_streams and new_max_streams != self._max_streams:
|
| 397 |
+
while new_max_streams > self._max_streams:
|
| 398 |
+
self._max_streams_semaphore.release()
|
| 399 |
+
self._max_streams += 1
|
| 400 |
+
while new_max_streams < self._max_streams:
|
| 401 |
+
self._max_streams_semaphore.acquire()
|
| 402 |
+
self._max_streams -= 1
|
| 403 |
+
|
| 404 |
+
def _response_closed(self, stream_id: int) -> None:
|
| 405 |
+
self._max_streams_semaphore.release()
|
| 406 |
+
del self._events[stream_id]
|
| 407 |
+
with self._state_lock:
|
| 408 |
+
if self._connection_terminated and not self._events:
|
| 409 |
+
self.close()
|
| 410 |
+
|
| 411 |
+
elif self._state == HTTPConnectionState.ACTIVE and not self._events:
|
| 412 |
+
self._state = HTTPConnectionState.IDLE
|
| 413 |
+
if self._keepalive_expiry is not None:
|
| 414 |
+
now = time.monotonic()
|
| 415 |
+
self._expire_at = now + self._keepalive_expiry
|
| 416 |
+
if self._used_all_stream_ids: # pragma: nocover
|
| 417 |
+
self.close()
|
| 418 |
+
|
| 419 |
+
def close(self) -> None:
|
| 420 |
+
# Note that this method unilaterally closes the connection, and does
|
| 421 |
+
# not have any kind of locking in place around it.
|
| 422 |
+
self._h2_state.close_connection()
|
| 423 |
+
self._state = HTTPConnectionState.CLOSED
|
| 424 |
+
self._network_stream.close()
|
| 425 |
+
|
| 426 |
+
# Wrappers around network read/write operations...
|
| 427 |
+
|
| 428 |
+
def _read_incoming_data(
|
| 429 |
+
self, request: Request
|
| 430 |
+
) -> typing.List[h2.events.Event]:
|
| 431 |
+
timeouts = request.extensions.get("timeout", {})
|
| 432 |
+
timeout = timeouts.get("read", None)
|
| 433 |
+
|
| 434 |
+
if self._read_exception is not None:
|
| 435 |
+
raise self._read_exception # pragma: nocover
|
| 436 |
+
|
| 437 |
+
try:
|
| 438 |
+
data = self._network_stream.read(self.READ_NUM_BYTES, timeout)
|
| 439 |
+
if data == b"":
|
| 440 |
+
raise RemoteProtocolError("Server disconnected")
|
| 441 |
+
except Exception as exc:
|
| 442 |
+
# If we get a network error we should:
|
| 443 |
+
#
|
| 444 |
+
# 1. Save the exception and just raise it immediately on any future reads.
|
| 445 |
+
# (For example, this means that a single read timeout or disconnect will
|
| 446 |
+
# immediately close all pending streams. Without requiring multiple
|
| 447 |
+
# sequential timeouts.)
|
| 448 |
+
# 2. Mark the connection as errored, so that we don't accept any other
|
| 449 |
+
# incoming requests.
|
| 450 |
+
self._read_exception = exc
|
| 451 |
+
self._connection_error = True
|
| 452 |
+
raise exc
|
| 453 |
+
|
| 454 |
+
events: typing.List[h2.events.Event] = self._h2_state.receive_data(data)
|
| 455 |
+
|
| 456 |
+
return events
|
| 457 |
+
|
| 458 |
+
def _write_outgoing_data(self, request: Request) -> None:
|
| 459 |
+
timeouts = request.extensions.get("timeout", {})
|
| 460 |
+
timeout = timeouts.get("write", None)
|
| 461 |
+
|
| 462 |
+
with self._write_lock:
|
| 463 |
+
data_to_send = self._h2_state.data_to_send()
|
| 464 |
+
|
| 465 |
+
if self._write_exception is not None:
|
| 466 |
+
raise self._write_exception # pragma: nocover
|
| 467 |
+
|
| 468 |
+
try:
|
| 469 |
+
self._network_stream.write(data_to_send, timeout)
|
| 470 |
+
except Exception as exc: # pragma: nocover
|
| 471 |
+
# If we get a network error we should:
|
| 472 |
+
#
|
| 473 |
+
# 1. Save the exception and just raise it immediately on any future write.
|
| 474 |
+
# (For example, this means that a single write timeout or disconnect will
|
| 475 |
+
# immediately close all pending streams. Without requiring multiple
|
| 476 |
+
# sequential timeouts.)
|
| 477 |
+
# 2. Mark the connection as errored, so that we don't accept any other
|
| 478 |
+
# incoming requests.
|
| 479 |
+
self._write_exception = exc
|
| 480 |
+
self._connection_error = True
|
| 481 |
+
raise exc
|
| 482 |
+
|
| 483 |
+
# Flow control...
|
| 484 |
+
|
| 485 |
+
def _wait_for_outgoing_flow(self, request: Request, stream_id: int) -> int:
|
| 486 |
+
"""
|
| 487 |
+
Returns the maximum allowable outgoing flow for a given stream.
|
| 488 |
+
|
| 489 |
+
If the allowable flow is zero, then waits on the network until
|
| 490 |
+
WindowUpdated frames have increased the flow rate.
|
| 491 |
+
https://tools.ietf.org/html/rfc7540#section-6.9
|
| 492 |
+
"""
|
| 493 |
+
local_flow: int = self._h2_state.local_flow_control_window(stream_id)
|
| 494 |
+
max_frame_size: int = self._h2_state.max_outbound_frame_size
|
| 495 |
+
flow = min(local_flow, max_frame_size)
|
| 496 |
+
while flow == 0:
|
| 497 |
+
self._receive_events(request)
|
| 498 |
+
local_flow = self._h2_state.local_flow_control_window(stream_id)
|
| 499 |
+
max_frame_size = self._h2_state.max_outbound_frame_size
|
| 500 |
+
flow = min(local_flow, max_frame_size)
|
| 501 |
+
return flow
|
| 502 |
+
|
| 503 |
+
# Interface for connection pooling...
|
| 504 |
+
|
| 505 |
+
def can_handle_request(self, origin: Origin) -> bool:
|
| 506 |
+
return origin == self._origin
|
| 507 |
+
|
| 508 |
+
def is_available(self) -> bool:
|
| 509 |
+
return (
|
| 510 |
+
self._state != HTTPConnectionState.CLOSED
|
| 511 |
+
and not self._connection_error
|
| 512 |
+
and not self._used_all_stream_ids
|
| 513 |
+
and not (
|
| 514 |
+
self._h2_state.state_machine.state
|
| 515 |
+
== h2.connection.ConnectionState.CLOSED
|
| 516 |
+
)
|
| 517 |
+
)
|
| 518 |
+
|
| 519 |
+
def has_expired(self) -> bool:
|
| 520 |
+
now = time.monotonic()
|
| 521 |
+
return self._expire_at is not None and now > self._expire_at
|
| 522 |
+
|
| 523 |
+
def is_idle(self) -> bool:
|
| 524 |
+
return self._state == HTTPConnectionState.IDLE
|
| 525 |
+
|
| 526 |
+
def is_closed(self) -> bool:
|
| 527 |
+
return self._state == HTTPConnectionState.CLOSED
|
| 528 |
+
|
| 529 |
+
def info(self) -> str:
|
| 530 |
+
origin = str(self._origin)
|
| 531 |
+
return (
|
| 532 |
+
f"{origin!r}, HTTP/2, {self._state.name}, "
|
| 533 |
+
f"Request Count: {self._request_count}"
|
| 534 |
+
)
|
| 535 |
+
|
| 536 |
+
def __repr__(self) -> str:
|
| 537 |
+
class_name = self.__class__.__name__
|
| 538 |
+
origin = str(self._origin)
|
| 539 |
+
return (
|
| 540 |
+
f"<{class_name} [{origin!r}, {self._state.name}, "
|
| 541 |
+
f"Request Count: {self._request_count}]>"
|
| 542 |
+
)
|
| 543 |
+
|
| 544 |
+
# These context managers are not used in the standard flow, but are
|
| 545 |
+
# useful for testing or working with connection instances directly.
|
| 546 |
+
|
| 547 |
+
def __enter__(self) -> "HTTP2Connection":
|
| 548 |
+
return self
|
| 549 |
+
|
| 550 |
+
def __exit__(
|
| 551 |
+
self,
|
| 552 |
+
exc_type: typing.Optional[typing.Type[BaseException]] = None,
|
| 553 |
+
exc_value: typing.Optional[BaseException] = None,
|
| 554 |
+
traceback: typing.Optional[types.TracebackType] = None,
|
| 555 |
+
) -> None:
|
| 556 |
+
self.close()
|
| 557 |
+
|
| 558 |
+
|
| 559 |
+
class HTTP2ConnectionByteStream:
|
| 560 |
+
def __init__(
|
| 561 |
+
self, connection: HTTP2Connection, request: Request, stream_id: int
|
| 562 |
+
) -> None:
|
| 563 |
+
self._connection = connection
|
| 564 |
+
self._request = request
|
| 565 |
+
self._stream_id = stream_id
|
| 566 |
+
self._closed = False
|
| 567 |
+
|
| 568 |
+
def __iter__(self) -> typing.Iterator[bytes]:
|
| 569 |
+
kwargs = {"request": self._request, "stream_id": self._stream_id}
|
| 570 |
+
try:
|
| 571 |
+
with Trace("receive_response_body", logger, self._request, kwargs):
|
| 572 |
+
for chunk in self._connection._receive_response_body(
|
| 573 |
+
request=self._request, stream_id=self._stream_id
|
| 574 |
+
):
|
| 575 |
+
yield chunk
|
| 576 |
+
except BaseException as exc:
|
| 577 |
+
# If we get an exception while streaming the response,
|
| 578 |
+
# we want to close the response (and possibly the connection)
|
| 579 |
+
# before raising that exception.
|
| 580 |
+
with ShieldCancellation():
|
| 581 |
+
self.close()
|
| 582 |
+
raise exc
|
| 583 |
+
|
| 584 |
+
def close(self) -> None:
|
| 585 |
+
if not self._closed:
|
| 586 |
+
self._closed = True
|
| 587 |
+
kwargs = {"stream_id": self._stream_id}
|
| 588 |
+
with Trace("response_closed", logger, self._request, kwargs):
|
| 589 |
+
self._connection._response_closed(stream_id=self._stream_id)
|
evalkit_internvl/lib/python3.10/site-packages/httpcore/_sync/http_proxy.py
ADDED
|
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import ssl
|
| 3 |
+
from base64 import b64encode
|
| 4 |
+
from typing import Iterable, List, Mapping, Optional, Sequence, Tuple, Union
|
| 5 |
+
|
| 6 |
+
from .._backends.base import SOCKET_OPTION, NetworkBackend
|
| 7 |
+
from .._exceptions import ProxyError
|
| 8 |
+
from .._models import (
|
| 9 |
+
URL,
|
| 10 |
+
Origin,
|
| 11 |
+
Request,
|
| 12 |
+
Response,
|
| 13 |
+
enforce_bytes,
|
| 14 |
+
enforce_headers,
|
| 15 |
+
enforce_url,
|
| 16 |
+
)
|
| 17 |
+
from .._ssl import default_ssl_context
|
| 18 |
+
from .._synchronization import Lock
|
| 19 |
+
from .._trace import Trace
|
| 20 |
+
from .connection import HTTPConnection
|
| 21 |
+
from .connection_pool import ConnectionPool
|
| 22 |
+
from .http11 import HTTP11Connection
|
| 23 |
+
from .interfaces import ConnectionInterface
|
| 24 |
+
|
| 25 |
+
HeadersAsSequence = Sequence[Tuple[Union[bytes, str], Union[bytes, str]]]
|
| 26 |
+
HeadersAsMapping = Mapping[Union[bytes, str], Union[bytes, str]]
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
logger = logging.getLogger("httpcore.proxy")
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def merge_headers(
|
| 33 |
+
default_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None,
|
| 34 |
+
override_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None,
|
| 35 |
+
) -> List[Tuple[bytes, bytes]]:
|
| 36 |
+
"""
|
| 37 |
+
Append default_headers and override_headers, de-duplicating if a key exists
|
| 38 |
+
in both cases.
|
| 39 |
+
"""
|
| 40 |
+
default_headers = [] if default_headers is None else list(default_headers)
|
| 41 |
+
override_headers = [] if override_headers is None else list(override_headers)
|
| 42 |
+
has_override = set(key.lower() for key, value in override_headers)
|
| 43 |
+
default_headers = [
|
| 44 |
+
(key, value)
|
| 45 |
+
for key, value in default_headers
|
| 46 |
+
if key.lower() not in has_override
|
| 47 |
+
]
|
| 48 |
+
return default_headers + override_headers
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def build_auth_header(username: bytes, password: bytes) -> bytes:
|
| 52 |
+
userpass = username + b":" + password
|
| 53 |
+
return b"Basic " + b64encode(userpass)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class HTTPProxy(ConnectionPool):
|
| 57 |
+
"""
|
| 58 |
+
A connection pool that sends requests via an HTTP proxy.
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
def __init__(
|
| 62 |
+
self,
|
| 63 |
+
proxy_url: Union[URL, bytes, str],
|
| 64 |
+
proxy_auth: Optional[Tuple[Union[bytes, str], Union[bytes, str]]] = None,
|
| 65 |
+
proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None,
|
| 66 |
+
ssl_context: Optional[ssl.SSLContext] = None,
|
| 67 |
+
max_connections: Optional[int] = 10,
|
| 68 |
+
max_keepalive_connections: Optional[int] = None,
|
| 69 |
+
keepalive_expiry: Optional[float] = None,
|
| 70 |
+
http1: bool = True,
|
| 71 |
+
http2: bool = False,
|
| 72 |
+
retries: int = 0,
|
| 73 |
+
local_address: Optional[str] = None,
|
| 74 |
+
uds: Optional[str] = None,
|
| 75 |
+
network_backend: Optional[NetworkBackend] = None,
|
| 76 |
+
socket_options: Optional[Iterable[SOCKET_OPTION]] = None,
|
| 77 |
+
) -> None:
|
| 78 |
+
"""
|
| 79 |
+
A connection pool for making HTTP requests.
|
| 80 |
+
|
| 81 |
+
Parameters:
|
| 82 |
+
proxy_url: The URL to use when connecting to the proxy server.
|
| 83 |
+
For example `"http://127.0.0.1:8080/"`.
|
| 84 |
+
proxy_auth: Any proxy authentication as a two-tuple of
|
| 85 |
+
(username, password). May be either bytes or ascii-only str.
|
| 86 |
+
proxy_headers: Any HTTP headers to use for the proxy requests.
|
| 87 |
+
For example `{"Proxy-Authorization": "Basic <username>:<password>"}`.
|
| 88 |
+
ssl_context: An SSL context to use for verifying connections.
|
| 89 |
+
If not specified, the default `httpcore.default_ssl_context()`
|
| 90 |
+
will be used.
|
| 91 |
+
max_connections: The maximum number of concurrent HTTP connections that
|
| 92 |
+
the pool should allow. Any attempt to send a request on a pool that
|
| 93 |
+
would exceed this amount will block until a connection is available.
|
| 94 |
+
max_keepalive_connections: The maximum number of idle HTTP connections
|
| 95 |
+
that will be maintained in the pool.
|
| 96 |
+
keepalive_expiry: The duration in seconds that an idle HTTP connection
|
| 97 |
+
may be maintained for before being expired from the pool.
|
| 98 |
+
http1: A boolean indicating if HTTP/1.1 requests should be supported
|
| 99 |
+
by the connection pool. Defaults to True.
|
| 100 |
+
http2: A boolean indicating if HTTP/2 requests should be supported by
|
| 101 |
+
the connection pool. Defaults to False.
|
| 102 |
+
retries: The maximum number of retries when trying to establish
|
| 103 |
+
a connection.
|
| 104 |
+
local_address: Local address to connect from. Can also be used to
|
| 105 |
+
connect using a particular address family. Using
|
| 106 |
+
`local_address="0.0.0.0"` will connect using an `AF_INET` address
|
| 107 |
+
(IPv4), while using `local_address="::"` will connect using an
|
| 108 |
+
`AF_INET6` address (IPv6).
|
| 109 |
+
uds: Path to a Unix Domain Socket to use instead of TCP sockets.
|
| 110 |
+
network_backend: A backend instance to use for handling network I/O.
|
| 111 |
+
"""
|
| 112 |
+
super().__init__(
|
| 113 |
+
ssl_context=ssl_context,
|
| 114 |
+
max_connections=max_connections,
|
| 115 |
+
max_keepalive_connections=max_keepalive_connections,
|
| 116 |
+
keepalive_expiry=keepalive_expiry,
|
| 117 |
+
http1=http1,
|
| 118 |
+
http2=http2,
|
| 119 |
+
network_backend=network_backend,
|
| 120 |
+
retries=retries,
|
| 121 |
+
local_address=local_address,
|
| 122 |
+
uds=uds,
|
| 123 |
+
socket_options=socket_options,
|
| 124 |
+
)
|
| 125 |
+
self._ssl_context = ssl_context
|
| 126 |
+
self._proxy_url = enforce_url(proxy_url, name="proxy_url")
|
| 127 |
+
self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")
|
| 128 |
+
if proxy_auth is not None:
|
| 129 |
+
username = enforce_bytes(proxy_auth[0], name="proxy_auth")
|
| 130 |
+
password = enforce_bytes(proxy_auth[1], name="proxy_auth")
|
| 131 |
+
authorization = build_auth_header(username, password)
|
| 132 |
+
self._proxy_headers = [
|
| 133 |
+
(b"Proxy-Authorization", authorization)
|
| 134 |
+
] + self._proxy_headers
|
| 135 |
+
|
| 136 |
+
def create_connection(self, origin: Origin) -> ConnectionInterface:
|
| 137 |
+
if origin.scheme == b"http":
|
| 138 |
+
return ForwardHTTPConnection(
|
| 139 |
+
proxy_origin=self._proxy_url.origin,
|
| 140 |
+
proxy_headers=self._proxy_headers,
|
| 141 |
+
remote_origin=origin,
|
| 142 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 143 |
+
network_backend=self._network_backend,
|
| 144 |
+
)
|
| 145 |
+
return TunnelHTTPConnection(
|
| 146 |
+
proxy_origin=self._proxy_url.origin,
|
| 147 |
+
proxy_headers=self._proxy_headers,
|
| 148 |
+
remote_origin=origin,
|
| 149 |
+
ssl_context=self._ssl_context,
|
| 150 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 151 |
+
http1=self._http1,
|
| 152 |
+
http2=self._http2,
|
| 153 |
+
network_backend=self._network_backend,
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
class ForwardHTTPConnection(ConnectionInterface):
|
| 158 |
+
def __init__(
|
| 159 |
+
self,
|
| 160 |
+
proxy_origin: Origin,
|
| 161 |
+
remote_origin: Origin,
|
| 162 |
+
proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None,
|
| 163 |
+
keepalive_expiry: Optional[float] = None,
|
| 164 |
+
network_backend: Optional[NetworkBackend] = None,
|
| 165 |
+
socket_options: Optional[Iterable[SOCKET_OPTION]] = None,
|
| 166 |
+
) -> None:
|
| 167 |
+
self._connection = HTTPConnection(
|
| 168 |
+
origin=proxy_origin,
|
| 169 |
+
keepalive_expiry=keepalive_expiry,
|
| 170 |
+
network_backend=network_backend,
|
| 171 |
+
socket_options=socket_options,
|
| 172 |
+
)
|
| 173 |
+
self._proxy_origin = proxy_origin
|
| 174 |
+
self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")
|
| 175 |
+
self._remote_origin = remote_origin
|
| 176 |
+
|
| 177 |
+
def handle_request(self, request: Request) -> Response:
|
| 178 |
+
headers = merge_headers(self._proxy_headers, request.headers)
|
| 179 |
+
url = URL(
|
| 180 |
+
scheme=self._proxy_origin.scheme,
|
| 181 |
+
host=self._proxy_origin.host,
|
| 182 |
+
port=self._proxy_origin.port,
|
| 183 |
+
target=bytes(request.url),
|
| 184 |
+
)
|
| 185 |
+
proxy_request = Request(
|
| 186 |
+
method=request.method,
|
| 187 |
+
url=url,
|
| 188 |
+
headers=headers,
|
| 189 |
+
content=request.stream,
|
| 190 |
+
extensions=request.extensions,
|
| 191 |
+
)
|
| 192 |
+
return self._connection.handle_request(proxy_request)
|
| 193 |
+
|
| 194 |
+
def can_handle_request(self, origin: Origin) -> bool:
|
| 195 |
+
return origin == self._remote_origin
|
| 196 |
+
|
| 197 |
+
def close(self) -> None:
|
| 198 |
+
self._connection.close()
|
| 199 |
+
|
| 200 |
+
def info(self) -> str:
|
| 201 |
+
return self._connection.info()
|
| 202 |
+
|
| 203 |
+
def is_available(self) -> bool:
|
| 204 |
+
return self._connection.is_available()
|
| 205 |
+
|
| 206 |
+
def has_expired(self) -> bool:
|
| 207 |
+
return self._connection.has_expired()
|
| 208 |
+
|
| 209 |
+
def is_idle(self) -> bool:
|
| 210 |
+
return self._connection.is_idle()
|
| 211 |
+
|
| 212 |
+
def is_closed(self) -> bool:
|
| 213 |
+
return self._connection.is_closed()
|
| 214 |
+
|
| 215 |
+
def __repr__(self) -> str:
|
| 216 |
+
return f"<{self.__class__.__name__} [{self.info()}]>"
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
class TunnelHTTPConnection(ConnectionInterface):
|
| 220 |
+
def __init__(
|
| 221 |
+
self,
|
| 222 |
+
proxy_origin: Origin,
|
| 223 |
+
remote_origin: Origin,
|
| 224 |
+
ssl_context: Optional[ssl.SSLContext] = None,
|
| 225 |
+
proxy_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None,
|
| 226 |
+
keepalive_expiry: Optional[float] = None,
|
| 227 |
+
http1: bool = True,
|
| 228 |
+
http2: bool = False,
|
| 229 |
+
network_backend: Optional[NetworkBackend] = None,
|
| 230 |
+
socket_options: Optional[Iterable[SOCKET_OPTION]] = None,
|
| 231 |
+
) -> None:
|
| 232 |
+
self._connection: ConnectionInterface = HTTPConnection(
|
| 233 |
+
origin=proxy_origin,
|
| 234 |
+
keepalive_expiry=keepalive_expiry,
|
| 235 |
+
network_backend=network_backend,
|
| 236 |
+
socket_options=socket_options,
|
| 237 |
+
)
|
| 238 |
+
self._proxy_origin = proxy_origin
|
| 239 |
+
self._remote_origin = remote_origin
|
| 240 |
+
self._ssl_context = ssl_context
|
| 241 |
+
self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")
|
| 242 |
+
self._keepalive_expiry = keepalive_expiry
|
| 243 |
+
self._http1 = http1
|
| 244 |
+
self._http2 = http2
|
| 245 |
+
self._connect_lock = Lock()
|
| 246 |
+
self._connected = False
|
| 247 |
+
|
| 248 |
+
def handle_request(self, request: Request) -> Response:
|
| 249 |
+
timeouts = request.extensions.get("timeout", {})
|
| 250 |
+
timeout = timeouts.get("connect", None)
|
| 251 |
+
|
| 252 |
+
with self._connect_lock:
|
| 253 |
+
if not self._connected:
|
| 254 |
+
target = b"%b:%d" % (self._remote_origin.host, self._remote_origin.port)
|
| 255 |
+
|
| 256 |
+
connect_url = URL(
|
| 257 |
+
scheme=self._proxy_origin.scheme,
|
| 258 |
+
host=self._proxy_origin.host,
|
| 259 |
+
port=self._proxy_origin.port,
|
| 260 |
+
target=target,
|
| 261 |
+
)
|
| 262 |
+
connect_headers = merge_headers(
|
| 263 |
+
[(b"Host", target), (b"Accept", b"*/*")], self._proxy_headers
|
| 264 |
+
)
|
| 265 |
+
connect_request = Request(
|
| 266 |
+
method=b"CONNECT",
|
| 267 |
+
url=connect_url,
|
| 268 |
+
headers=connect_headers,
|
| 269 |
+
extensions=request.extensions,
|
| 270 |
+
)
|
| 271 |
+
connect_response = self._connection.handle_request(
|
| 272 |
+
connect_request
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
if connect_response.status < 200 or connect_response.status > 299:
|
| 276 |
+
reason_bytes = connect_response.extensions.get("reason_phrase", b"")
|
| 277 |
+
reason_str = reason_bytes.decode("ascii", errors="ignore")
|
| 278 |
+
msg = "%d %s" % (connect_response.status, reason_str)
|
| 279 |
+
self._connection.close()
|
| 280 |
+
raise ProxyError(msg)
|
| 281 |
+
|
| 282 |
+
stream = connect_response.extensions["network_stream"]
|
| 283 |
+
|
| 284 |
+
# Upgrade the stream to SSL
|
| 285 |
+
ssl_context = (
|
| 286 |
+
default_ssl_context()
|
| 287 |
+
if self._ssl_context is None
|
| 288 |
+
else self._ssl_context
|
| 289 |
+
)
|
| 290 |
+
alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"]
|
| 291 |
+
ssl_context.set_alpn_protocols(alpn_protocols)
|
| 292 |
+
|
| 293 |
+
kwargs = {
|
| 294 |
+
"ssl_context": ssl_context,
|
| 295 |
+
"server_hostname": self._remote_origin.host.decode("ascii"),
|
| 296 |
+
"timeout": timeout,
|
| 297 |
+
}
|
| 298 |
+
with Trace("start_tls", logger, request, kwargs) as trace:
|
| 299 |
+
stream = stream.start_tls(**kwargs)
|
| 300 |
+
trace.return_value = stream
|
| 301 |
+
|
| 302 |
+
# Determine if we should be using HTTP/1.1 or HTTP/2
|
| 303 |
+
ssl_object = stream.get_extra_info("ssl_object")
|
| 304 |
+
http2_negotiated = (
|
| 305 |
+
ssl_object is not None
|
| 306 |
+
and ssl_object.selected_alpn_protocol() == "h2"
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
# Create the HTTP/1.1 or HTTP/2 connection
|
| 310 |
+
if http2_negotiated or (self._http2 and not self._http1):
|
| 311 |
+
from .http2 import HTTP2Connection
|
| 312 |
+
|
| 313 |
+
self._connection = HTTP2Connection(
|
| 314 |
+
origin=self._remote_origin,
|
| 315 |
+
stream=stream,
|
| 316 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 317 |
+
)
|
| 318 |
+
else:
|
| 319 |
+
self._connection = HTTP11Connection(
|
| 320 |
+
origin=self._remote_origin,
|
| 321 |
+
stream=stream,
|
| 322 |
+
keepalive_expiry=self._keepalive_expiry,
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
self._connected = True
|
| 326 |
+
return self._connection.handle_request(request)
|
| 327 |
+
|
| 328 |
+
def can_handle_request(self, origin: Origin) -> bool:
|
| 329 |
+
return origin == self._remote_origin
|
| 330 |
+
|
| 331 |
+
def close(self) -> None:
|
| 332 |
+
self._connection.close()
|
| 333 |
+
|
| 334 |
+
def info(self) -> str:
|
| 335 |
+
return self._connection.info()
|
| 336 |
+
|
| 337 |
+
def is_available(self) -> bool:
|
| 338 |
+
return self._connection.is_available()
|
| 339 |
+
|
| 340 |
+
def has_expired(self) -> bool:
|
| 341 |
+
return self._connection.has_expired()
|
| 342 |
+
|
| 343 |
+
def is_idle(self) -> bool:
|
| 344 |
+
return self._connection.is_idle()
|
| 345 |
+
|
| 346 |
+
def is_closed(self) -> bool:
|
| 347 |
+
return self._connection.is_closed()
|
| 348 |
+
|
| 349 |
+
def __repr__(self) -> str:
|
| 350 |
+
return f"<{self.__class__.__name__} [{self.info()}]>"
|
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (4.16 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/_async.cpython-310.pyc
ADDED
|
Binary file (899 Bytes). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/expect.cpython-310.pyc
ADDED
|
Binary file (9.09 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/fdpexpect.cpython-310.pyc
ADDED
|
Binary file (5.97 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/popen_spawn.cpython-310.pyc
ADDED
|
Binary file (5.17 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/run.cpython-310.pyc
ADDED
|
Binary file (5.8 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/socket_pexpect.cpython-310.pyc
ADDED
|
Binary file (5.22 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/pexpect/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (3.79 kB). View file
|
|
|
evalkit_internvl/lib/python3.10/site-packages/tiktoken/_tiktoken.cpython-310-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ab730d9e9941f3bde5507915e9ac6986e06e0d611b22cc548b5ef0f0fdbaa4d3
|
| 3 |
+
size 3430112
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This module implements histogram-based gradient boosting estimators.
|
| 2 |
+
|
| 3 |
+
The implementation is a port from pygbm which is itself strongly inspired
|
| 4 |
+
from LightGBM.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
# Authors: The scikit-learn developers
|
| 8 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/_hist_gradient_boosting/common.pxd
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ...utils._typedefs cimport float32_t, float64_t, intp_t, uint8_t, uint32_t
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
ctypedef float64_t X_DTYPE_C
|
| 5 |
+
ctypedef uint8_t X_BINNED_DTYPE_C
|
| 6 |
+
ctypedef float64_t Y_DTYPE_C
|
| 7 |
+
ctypedef float32_t G_H_DTYPE_C
|
| 8 |
+
ctypedef uint32_t BITSET_INNER_DTYPE_C
|
| 9 |
+
ctypedef BITSET_INNER_DTYPE_C[8] BITSET_DTYPE_C
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
cdef packed struct hist_struct:
|
| 13 |
+
# Same as histogram dtype but we need a struct to declare views. It needs
|
| 14 |
+
# to be packed since by default numpy dtypes aren't aligned
|
| 15 |
+
Y_DTYPE_C sum_gradients
|
| 16 |
+
Y_DTYPE_C sum_hessians
|
| 17 |
+
unsigned int count
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
cdef packed struct node_struct:
|
| 21 |
+
# Equivalent struct to PREDICTOR_RECORD_DTYPE to use in memory views. It
|
| 22 |
+
# needs to be packed since by default numpy dtypes aren't aligned
|
| 23 |
+
Y_DTYPE_C value
|
| 24 |
+
unsigned int count
|
| 25 |
+
intp_t feature_idx
|
| 26 |
+
X_DTYPE_C num_threshold
|
| 27 |
+
uint8_t missing_go_to_left
|
| 28 |
+
unsigned int left
|
| 29 |
+
unsigned int right
|
| 30 |
+
Y_DTYPE_C gain
|
| 31 |
+
unsigned int depth
|
| 32 |
+
uint8_t is_leaf
|
| 33 |
+
X_BINNED_DTYPE_C bin_threshold
|
| 34 |
+
uint8_t is_categorical
|
| 35 |
+
# The index of the corresponding bitsets in the Predictor's bitset arrays.
|
| 36 |
+
# Only used if is_categorical is True
|
| 37 |
+
unsigned int bitset_idx
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
cpdef enum MonotonicConstraint:
|
| 41 |
+
NO_CST = 0
|
| 42 |
+
POS = 1
|
| 43 |
+
NEG = -1
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/_weight_boosting.py
ADDED
|
@@ -0,0 +1,1173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Weight Boosting.
|
| 2 |
+
|
| 3 |
+
This module contains weight boosting estimators for both classification and
|
| 4 |
+
regression.
|
| 5 |
+
|
| 6 |
+
The module structure is the following:
|
| 7 |
+
|
| 8 |
+
- The `BaseWeightBoosting` base class implements a common ``fit`` method
|
| 9 |
+
for all the estimators in the module. Regression and classification
|
| 10 |
+
only differ from each other in the loss function that is optimized.
|
| 11 |
+
|
| 12 |
+
- :class:`~sklearn.ensemble.AdaBoostClassifier` implements adaptive boosting
|
| 13 |
+
(AdaBoost-SAMME) for classification problems.
|
| 14 |
+
|
| 15 |
+
- :class:`~sklearn.ensemble.AdaBoostRegressor` implements adaptive boosting
|
| 16 |
+
(AdaBoost.R2) for regression problems.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
# Authors: The scikit-learn developers
|
| 20 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 21 |
+
|
| 22 |
+
import warnings
|
| 23 |
+
from abc import ABCMeta, abstractmethod
|
| 24 |
+
from numbers import Integral, Real
|
| 25 |
+
|
| 26 |
+
import numpy as np
|
| 27 |
+
|
| 28 |
+
from ..base import (
|
| 29 |
+
ClassifierMixin,
|
| 30 |
+
RegressorMixin,
|
| 31 |
+
_fit_context,
|
| 32 |
+
is_classifier,
|
| 33 |
+
is_regressor,
|
| 34 |
+
)
|
| 35 |
+
from ..metrics import accuracy_score, r2_score
|
| 36 |
+
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
|
| 37 |
+
from ..utils import _safe_indexing, check_random_state
|
| 38 |
+
from ..utils._param_validation import HasMethods, Hidden, Interval, StrOptions
|
| 39 |
+
from ..utils.extmath import softmax, stable_cumsum
|
| 40 |
+
from ..utils.metadata_routing import (
|
| 41 |
+
_raise_for_unsupported_routing,
|
| 42 |
+
_RoutingNotSupportedMixin,
|
| 43 |
+
)
|
| 44 |
+
from ..utils.validation import (
|
| 45 |
+
_check_sample_weight,
|
| 46 |
+
_num_samples,
|
| 47 |
+
check_is_fitted,
|
| 48 |
+
has_fit_parameter,
|
| 49 |
+
validate_data,
|
| 50 |
+
)
|
| 51 |
+
from ._base import BaseEnsemble
|
| 52 |
+
|
| 53 |
+
__all__ = [
|
| 54 |
+
"AdaBoostClassifier",
|
| 55 |
+
"AdaBoostRegressor",
|
| 56 |
+
]
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class BaseWeightBoosting(BaseEnsemble, metaclass=ABCMeta):
|
| 60 |
+
"""Base class for AdaBoost estimators.
|
| 61 |
+
|
| 62 |
+
Warning: This class should not be used directly. Use derived classes
|
| 63 |
+
instead.
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
_parameter_constraints: dict = {
|
| 67 |
+
"estimator": [HasMethods(["fit", "predict"]), None],
|
| 68 |
+
"n_estimators": [Interval(Integral, 1, None, closed="left")],
|
| 69 |
+
"learning_rate": [Interval(Real, 0, None, closed="neither")],
|
| 70 |
+
"random_state": ["random_state"],
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
@abstractmethod
|
| 74 |
+
def __init__(
|
| 75 |
+
self,
|
| 76 |
+
estimator=None,
|
| 77 |
+
*,
|
| 78 |
+
n_estimators=50,
|
| 79 |
+
estimator_params=tuple(),
|
| 80 |
+
learning_rate=1.0,
|
| 81 |
+
random_state=None,
|
| 82 |
+
):
|
| 83 |
+
super().__init__(
|
| 84 |
+
estimator=estimator,
|
| 85 |
+
n_estimators=n_estimators,
|
| 86 |
+
estimator_params=estimator_params,
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
self.learning_rate = learning_rate
|
| 90 |
+
self.random_state = random_state
|
| 91 |
+
|
| 92 |
+
def _check_X(self, X):
|
| 93 |
+
# Only called to validate X in non-fit methods, therefore reset=False
|
| 94 |
+
return validate_data(
|
| 95 |
+
self,
|
| 96 |
+
X,
|
| 97 |
+
accept_sparse=["csr", "csc"],
|
| 98 |
+
ensure_2d=True,
|
| 99 |
+
allow_nd=True,
|
| 100 |
+
dtype=None,
|
| 101 |
+
reset=False,
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
@_fit_context(
|
| 105 |
+
# AdaBoost*.estimator is not validated yet
|
| 106 |
+
prefer_skip_nested_validation=False
|
| 107 |
+
)
|
| 108 |
+
def fit(self, X, y, sample_weight=None):
|
| 109 |
+
"""Build a boosted classifier/regressor from the training set (X, y).
|
| 110 |
+
|
| 111 |
+
Parameters
|
| 112 |
+
----------
|
| 113 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 114 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 115 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 116 |
+
|
| 117 |
+
y : array-like of shape (n_samples,)
|
| 118 |
+
The target values.
|
| 119 |
+
|
| 120 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
| 121 |
+
Sample weights. If None, the sample weights are initialized to
|
| 122 |
+
1 / n_samples.
|
| 123 |
+
|
| 124 |
+
Returns
|
| 125 |
+
-------
|
| 126 |
+
self : object
|
| 127 |
+
Fitted estimator.
|
| 128 |
+
"""
|
| 129 |
+
_raise_for_unsupported_routing(self, "fit", sample_weight=sample_weight)
|
| 130 |
+
X, y = validate_data(
|
| 131 |
+
self,
|
| 132 |
+
X,
|
| 133 |
+
y,
|
| 134 |
+
accept_sparse=["csr", "csc"],
|
| 135 |
+
ensure_2d=True,
|
| 136 |
+
allow_nd=True,
|
| 137 |
+
dtype=None,
|
| 138 |
+
y_numeric=is_regressor(self),
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
sample_weight = _check_sample_weight(
|
| 142 |
+
sample_weight, X, np.float64, copy=True, ensure_non_negative=True
|
| 143 |
+
)
|
| 144 |
+
sample_weight /= sample_weight.sum()
|
| 145 |
+
|
| 146 |
+
# Check parameters
|
| 147 |
+
self._validate_estimator()
|
| 148 |
+
|
| 149 |
+
# Clear any previous fit results
|
| 150 |
+
self.estimators_ = []
|
| 151 |
+
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
|
| 152 |
+
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
|
| 153 |
+
|
| 154 |
+
# Initialization of the random number instance that will be used to
|
| 155 |
+
# generate a seed at each iteration
|
| 156 |
+
random_state = check_random_state(self.random_state)
|
| 157 |
+
epsilon = np.finfo(sample_weight.dtype).eps
|
| 158 |
+
|
| 159 |
+
zero_weight_mask = sample_weight == 0.0
|
| 160 |
+
for iboost in range(self.n_estimators):
|
| 161 |
+
# avoid extremely small sample weight, for details see issue #20320
|
| 162 |
+
sample_weight = np.clip(sample_weight, a_min=epsilon, a_max=None)
|
| 163 |
+
# do not clip sample weights that were exactly zero originally
|
| 164 |
+
sample_weight[zero_weight_mask] = 0.0
|
| 165 |
+
|
| 166 |
+
# Boosting step
|
| 167 |
+
sample_weight, estimator_weight, estimator_error = self._boost(
|
| 168 |
+
iboost, X, y, sample_weight, random_state
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
# Early termination
|
| 172 |
+
if sample_weight is None:
|
| 173 |
+
break
|
| 174 |
+
self.estimator_weights_[iboost] = estimator_weight
|
| 175 |
+
self.estimator_errors_[iboost] = estimator_error
|
| 176 |
+
|
| 177 |
+
# Stop if error is zero
|
| 178 |
+
if estimator_error == 0:
|
| 179 |
+
break
|
| 180 |
+
|
| 181 |
+
sample_weight_sum = np.sum(sample_weight)
|
| 182 |
+
|
| 183 |
+
if not np.isfinite(sample_weight_sum):
|
| 184 |
+
warnings.warn(
|
| 185 |
+
(
|
| 186 |
+
"Sample weights have reached infinite values,"
|
| 187 |
+
f" at iteration {iboost}, causing overflow. "
|
| 188 |
+
"Iterations stopped. Try lowering the learning rate."
|
| 189 |
+
),
|
| 190 |
+
stacklevel=2,
|
| 191 |
+
)
|
| 192 |
+
break
|
| 193 |
+
|
| 194 |
+
# Stop if the sum of sample weights has become non-positive
|
| 195 |
+
if sample_weight_sum <= 0:
|
| 196 |
+
break
|
| 197 |
+
|
| 198 |
+
if iboost < self.n_estimators - 1:
|
| 199 |
+
# Normalize
|
| 200 |
+
sample_weight /= sample_weight_sum
|
| 201 |
+
|
| 202 |
+
return self
|
| 203 |
+
|
| 204 |
+
@abstractmethod
|
| 205 |
+
def _boost(self, iboost, X, y, sample_weight, random_state):
|
| 206 |
+
"""Implement a single boost.
|
| 207 |
+
|
| 208 |
+
Warning: This method needs to be overridden by subclasses.
|
| 209 |
+
|
| 210 |
+
Parameters
|
| 211 |
+
----------
|
| 212 |
+
iboost : int
|
| 213 |
+
The index of the current boost iteration.
|
| 214 |
+
|
| 215 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 216 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 217 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 218 |
+
|
| 219 |
+
y : array-like of shape (n_samples,)
|
| 220 |
+
The target values (class labels).
|
| 221 |
+
|
| 222 |
+
sample_weight : array-like of shape (n_samples,)
|
| 223 |
+
The current sample weights.
|
| 224 |
+
|
| 225 |
+
random_state : RandomState
|
| 226 |
+
The current random number generator
|
| 227 |
+
|
| 228 |
+
Returns
|
| 229 |
+
-------
|
| 230 |
+
sample_weight : array-like of shape (n_samples,) or None
|
| 231 |
+
The reweighted sample weights.
|
| 232 |
+
If None then boosting has terminated early.
|
| 233 |
+
|
| 234 |
+
estimator_weight : float
|
| 235 |
+
The weight for the current boost.
|
| 236 |
+
If None then boosting has terminated early.
|
| 237 |
+
|
| 238 |
+
error : float
|
| 239 |
+
The classification error for the current boost.
|
| 240 |
+
If None then boosting has terminated early.
|
| 241 |
+
"""
|
| 242 |
+
pass
|
| 243 |
+
|
| 244 |
+
def staged_score(self, X, y, sample_weight=None):
|
| 245 |
+
"""Return staged scores for X, y.
|
| 246 |
+
|
| 247 |
+
This generator method yields the ensemble score after each iteration of
|
| 248 |
+
boosting and therefore allows monitoring, such as to determine the
|
| 249 |
+
score on a test set after each boost.
|
| 250 |
+
|
| 251 |
+
Parameters
|
| 252 |
+
----------
|
| 253 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 254 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 255 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 256 |
+
|
| 257 |
+
y : array-like of shape (n_samples,)
|
| 258 |
+
Labels for X.
|
| 259 |
+
|
| 260 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
| 261 |
+
Sample weights.
|
| 262 |
+
|
| 263 |
+
Yields
|
| 264 |
+
------
|
| 265 |
+
z : float
|
| 266 |
+
"""
|
| 267 |
+
X = self._check_X(X)
|
| 268 |
+
|
| 269 |
+
for y_pred in self.staged_predict(X):
|
| 270 |
+
if is_classifier(self):
|
| 271 |
+
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
|
| 272 |
+
else:
|
| 273 |
+
yield r2_score(y, y_pred, sample_weight=sample_weight)
|
| 274 |
+
|
| 275 |
+
@property
|
| 276 |
+
def feature_importances_(self):
|
| 277 |
+
"""The impurity-based feature importances.
|
| 278 |
+
|
| 279 |
+
The higher, the more important the feature.
|
| 280 |
+
The importance of a feature is computed as the (normalized)
|
| 281 |
+
total reduction of the criterion brought by that feature. It is also
|
| 282 |
+
known as the Gini importance.
|
| 283 |
+
|
| 284 |
+
Warning: impurity-based feature importances can be misleading for
|
| 285 |
+
high cardinality features (many unique values). See
|
| 286 |
+
:func:`sklearn.inspection.permutation_importance` as an alternative.
|
| 287 |
+
|
| 288 |
+
Returns
|
| 289 |
+
-------
|
| 290 |
+
feature_importances_ : ndarray of shape (n_features,)
|
| 291 |
+
The feature importances.
|
| 292 |
+
"""
|
| 293 |
+
if self.estimators_ is None or len(self.estimators_) == 0:
|
| 294 |
+
raise ValueError(
|
| 295 |
+
"Estimator not fitted, call `fit` before `feature_importances_`."
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
try:
|
| 299 |
+
norm = self.estimator_weights_.sum()
|
| 300 |
+
return (
|
| 301 |
+
sum(
|
| 302 |
+
weight * clf.feature_importances_
|
| 303 |
+
for weight, clf in zip(self.estimator_weights_, self.estimators_)
|
| 304 |
+
)
|
| 305 |
+
/ norm
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
except AttributeError as e:
|
| 309 |
+
raise AttributeError(
|
| 310 |
+
"Unable to compute feature importances "
|
| 311 |
+
"since estimator does not have a "
|
| 312 |
+
"feature_importances_ attribute"
|
| 313 |
+
) from e
|
| 314 |
+
|
| 315 |
+
def __sklearn_tags__(self):
|
| 316 |
+
tags = super().__sklearn_tags__()
|
| 317 |
+
tags.input_tags.sparse = True
|
| 318 |
+
return tags
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
def _samme_proba(estimator, n_classes, X):
|
| 322 |
+
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
|
| 323 |
+
|
| 324 |
+
References
|
| 325 |
+
----------
|
| 326 |
+
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
|
| 327 |
+
|
| 328 |
+
"""
|
| 329 |
+
proba = estimator.predict_proba(X)
|
| 330 |
+
|
| 331 |
+
# Displace zero probabilities so the log is defined.
|
| 332 |
+
# Also fix negative elements which may occur with
|
| 333 |
+
# negative sample weights.
|
| 334 |
+
np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba)
|
| 335 |
+
log_proba = np.log(proba)
|
| 336 |
+
|
| 337 |
+
return (n_classes - 1) * (
|
| 338 |
+
log_proba - (1.0 / n_classes) * log_proba.sum(axis=1)[:, np.newaxis]
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
class AdaBoostClassifier(
|
| 343 |
+
_RoutingNotSupportedMixin, ClassifierMixin, BaseWeightBoosting
|
| 344 |
+
):
|
| 345 |
+
"""An AdaBoost classifier.
|
| 346 |
+
|
| 347 |
+
An AdaBoost [1]_ classifier is a meta-estimator that begins by fitting a
|
| 348 |
+
classifier on the original dataset and then fits additional copies of the
|
| 349 |
+
classifier on the same dataset but where the weights of incorrectly
|
| 350 |
+
classified instances are adjusted such that subsequent classifiers focus
|
| 351 |
+
more on difficult cases.
|
| 352 |
+
|
| 353 |
+
This class implements the algorithm based on [2]_.
|
| 354 |
+
|
| 355 |
+
Read more in the :ref:`User Guide <adaboost>`.
|
| 356 |
+
|
| 357 |
+
.. versionadded:: 0.14
|
| 358 |
+
|
| 359 |
+
Parameters
|
| 360 |
+
----------
|
| 361 |
+
estimator : object, default=None
|
| 362 |
+
The base estimator from which the boosted ensemble is built.
|
| 363 |
+
Support for sample weighting is required, as well as proper
|
| 364 |
+
``classes_`` and ``n_classes_`` attributes. If ``None``, then
|
| 365 |
+
the base estimator is :class:`~sklearn.tree.DecisionTreeClassifier`
|
| 366 |
+
initialized with `max_depth=1`.
|
| 367 |
+
|
| 368 |
+
.. versionadded:: 1.2
|
| 369 |
+
`base_estimator` was renamed to `estimator`.
|
| 370 |
+
|
| 371 |
+
n_estimators : int, default=50
|
| 372 |
+
The maximum number of estimators at which boosting is terminated.
|
| 373 |
+
In case of perfect fit, the learning procedure is stopped early.
|
| 374 |
+
Values must be in the range `[1, inf)`.
|
| 375 |
+
|
| 376 |
+
learning_rate : float, default=1.0
|
| 377 |
+
Weight applied to each classifier at each boosting iteration. A higher
|
| 378 |
+
learning rate increases the contribution of each classifier. There is
|
| 379 |
+
a trade-off between the `learning_rate` and `n_estimators` parameters.
|
| 380 |
+
Values must be in the range `(0.0, inf)`.
|
| 381 |
+
|
| 382 |
+
algorithm : {'SAMME'}, default='SAMME'
|
| 383 |
+
Use the SAMME discrete boosting algorithm.
|
| 384 |
+
|
| 385 |
+
.. deprecated:: 1.6
|
| 386 |
+
`algorithm` is deprecated and will be removed in version 1.8. This
|
| 387 |
+
estimator only implements the 'SAMME' algorithm.
|
| 388 |
+
|
| 389 |
+
random_state : int, RandomState instance or None, default=None
|
| 390 |
+
Controls the random seed given at each `estimator` at each
|
| 391 |
+
boosting iteration.
|
| 392 |
+
Thus, it is only used when `estimator` exposes a `random_state`.
|
| 393 |
+
Pass an int for reproducible output across multiple function calls.
|
| 394 |
+
See :term:`Glossary <random_state>`.
|
| 395 |
+
|
| 396 |
+
Attributes
|
| 397 |
+
----------
|
| 398 |
+
estimator_ : estimator
|
| 399 |
+
The base estimator from which the ensemble is grown.
|
| 400 |
+
|
| 401 |
+
.. versionadded:: 1.2
|
| 402 |
+
`base_estimator_` was renamed to `estimator_`.
|
| 403 |
+
|
| 404 |
+
estimators_ : list of classifiers
|
| 405 |
+
The collection of fitted sub-estimators.
|
| 406 |
+
|
| 407 |
+
classes_ : ndarray of shape (n_classes,)
|
| 408 |
+
The classes labels.
|
| 409 |
+
|
| 410 |
+
n_classes_ : int
|
| 411 |
+
The number of classes.
|
| 412 |
+
|
| 413 |
+
estimator_weights_ : ndarray of floats
|
| 414 |
+
Weights for each estimator in the boosted ensemble.
|
| 415 |
+
|
| 416 |
+
estimator_errors_ : ndarray of floats
|
| 417 |
+
Classification error for each estimator in the boosted
|
| 418 |
+
ensemble.
|
| 419 |
+
|
| 420 |
+
feature_importances_ : ndarray of shape (n_features,)
|
| 421 |
+
The impurity-based feature importances if supported by the
|
| 422 |
+
``estimator`` (when based on decision trees).
|
| 423 |
+
|
| 424 |
+
Warning: impurity-based feature importances can be misleading for
|
| 425 |
+
high cardinality features (many unique values). See
|
| 426 |
+
:func:`sklearn.inspection.permutation_importance` as an alternative.
|
| 427 |
+
|
| 428 |
+
n_features_in_ : int
|
| 429 |
+
Number of features seen during :term:`fit`.
|
| 430 |
+
|
| 431 |
+
.. versionadded:: 0.24
|
| 432 |
+
|
| 433 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 434 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 435 |
+
has feature names that are all strings.
|
| 436 |
+
|
| 437 |
+
.. versionadded:: 1.0
|
| 438 |
+
|
| 439 |
+
See Also
|
| 440 |
+
--------
|
| 441 |
+
AdaBoostRegressor : An AdaBoost regressor that begins by fitting a
|
| 442 |
+
regressor on the original dataset and then fits additional copies of
|
| 443 |
+
the regressor on the same dataset but where the weights of instances
|
| 444 |
+
are adjusted according to the error of the current prediction.
|
| 445 |
+
|
| 446 |
+
GradientBoostingClassifier : GB builds an additive model in a forward
|
| 447 |
+
stage-wise fashion. Regression trees are fit on the negative gradient
|
| 448 |
+
of the binomial or multinomial deviance loss function. Binary
|
| 449 |
+
classification is a special case where only a single regression tree is
|
| 450 |
+
induced.
|
| 451 |
+
|
| 452 |
+
sklearn.tree.DecisionTreeClassifier : A non-parametric supervised learning
|
| 453 |
+
method used for classification.
|
| 454 |
+
Creates a model that predicts the value of a target variable by
|
| 455 |
+
learning simple decision rules inferred from the data features.
|
| 456 |
+
|
| 457 |
+
References
|
| 458 |
+
----------
|
| 459 |
+
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
|
| 460 |
+
on-Line Learning and an Application to Boosting", 1995.
|
| 461 |
+
|
| 462 |
+
.. [2] :doi:`J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class adaboost."
|
| 463 |
+
Statistics and its Interface 2.3 (2009): 349-360.
|
| 464 |
+
<10.4310/SII.2009.v2.n3.a8>`
|
| 465 |
+
|
| 466 |
+
Examples
|
| 467 |
+
--------
|
| 468 |
+
>>> from sklearn.ensemble import AdaBoostClassifier
|
| 469 |
+
>>> from sklearn.datasets import make_classification
|
| 470 |
+
>>> X, y = make_classification(n_samples=1000, n_features=4,
|
| 471 |
+
... n_informative=2, n_redundant=0,
|
| 472 |
+
... random_state=0, shuffle=False)
|
| 473 |
+
>>> clf = AdaBoostClassifier(n_estimators=100, random_state=0)
|
| 474 |
+
>>> clf.fit(X, y)
|
| 475 |
+
AdaBoostClassifier(n_estimators=100, random_state=0)
|
| 476 |
+
>>> clf.predict([[0, 0, 0, 0]])
|
| 477 |
+
array([1])
|
| 478 |
+
>>> clf.score(X, y)
|
| 479 |
+
0.96...
|
| 480 |
+
|
| 481 |
+
For a detailed example of using AdaBoost to fit a sequence of DecisionTrees
|
| 482 |
+
as weaklearners, please refer to
|
| 483 |
+
:ref:`sphx_glr_auto_examples_ensemble_plot_adaboost_multiclass.py`.
|
| 484 |
+
|
| 485 |
+
For a detailed example of using AdaBoost to fit a non-linearly seperable
|
| 486 |
+
classification dataset composed of two Gaussian quantiles clusters, please
|
| 487 |
+
refer to :ref:`sphx_glr_auto_examples_ensemble_plot_adaboost_twoclass.py`.
|
| 488 |
+
"""
|
| 489 |
+
|
| 490 |
+
# TODO(1.8): remove "algorithm" entry
|
| 491 |
+
_parameter_constraints: dict = {
|
| 492 |
+
**BaseWeightBoosting._parameter_constraints,
|
| 493 |
+
"algorithm": [StrOptions({"SAMME"}), Hidden(StrOptions({"deprecated"}))],
|
| 494 |
+
}
|
| 495 |
+
|
| 496 |
+
def __init__(
|
| 497 |
+
self,
|
| 498 |
+
estimator=None,
|
| 499 |
+
*,
|
| 500 |
+
n_estimators=50,
|
| 501 |
+
learning_rate=1.0,
|
| 502 |
+
algorithm="deprecated",
|
| 503 |
+
random_state=None,
|
| 504 |
+
):
|
| 505 |
+
super().__init__(
|
| 506 |
+
estimator=estimator,
|
| 507 |
+
n_estimators=n_estimators,
|
| 508 |
+
learning_rate=learning_rate,
|
| 509 |
+
random_state=random_state,
|
| 510 |
+
)
|
| 511 |
+
|
| 512 |
+
self.algorithm = algorithm
|
| 513 |
+
|
| 514 |
+
def _validate_estimator(self):
|
| 515 |
+
"""Check the estimator and set the estimator_ attribute."""
|
| 516 |
+
super()._validate_estimator(default=DecisionTreeClassifier(max_depth=1))
|
| 517 |
+
|
| 518 |
+
if self.algorithm != "deprecated":
|
| 519 |
+
warnings.warn(
|
| 520 |
+
"The parameter 'algorithm' is deprecated in 1.6 and has no effect. "
|
| 521 |
+
"It will be removed in version 1.8.",
|
| 522 |
+
FutureWarning,
|
| 523 |
+
)
|
| 524 |
+
|
| 525 |
+
if not has_fit_parameter(self.estimator_, "sample_weight"):
|
| 526 |
+
raise ValueError(
|
| 527 |
+
f"{self.estimator.__class__.__name__} doesn't support sample_weight."
|
| 528 |
+
)
|
| 529 |
+
|
| 530 |
+
def _boost(self, iboost, X, y, sample_weight, random_state):
|
| 531 |
+
"""Implement a single boost.
|
| 532 |
+
|
| 533 |
+
Perform a single boost according to the discrete SAMME algorithm and return the
|
| 534 |
+
updated sample weights.
|
| 535 |
+
|
| 536 |
+
Parameters
|
| 537 |
+
----------
|
| 538 |
+
iboost : int
|
| 539 |
+
The index of the current boost iteration.
|
| 540 |
+
|
| 541 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 542 |
+
The training input samples.
|
| 543 |
+
|
| 544 |
+
y : array-like of shape (n_samples,)
|
| 545 |
+
The target values (class labels).
|
| 546 |
+
|
| 547 |
+
sample_weight : array-like of shape (n_samples,)
|
| 548 |
+
The current sample weights.
|
| 549 |
+
|
| 550 |
+
random_state : RandomState instance
|
| 551 |
+
The RandomState instance used if the base estimator accepts a
|
| 552 |
+
`random_state` attribute.
|
| 553 |
+
|
| 554 |
+
Returns
|
| 555 |
+
-------
|
| 556 |
+
sample_weight : array-like of shape (n_samples,) or None
|
| 557 |
+
The reweighted sample weights.
|
| 558 |
+
If None then boosting has terminated early.
|
| 559 |
+
|
| 560 |
+
estimator_weight : float
|
| 561 |
+
The weight for the current boost.
|
| 562 |
+
If None then boosting has terminated early.
|
| 563 |
+
|
| 564 |
+
estimator_error : float
|
| 565 |
+
The classification error for the current boost.
|
| 566 |
+
If None then boosting has terminated early.
|
| 567 |
+
"""
|
| 568 |
+
estimator = self._make_estimator(random_state=random_state)
|
| 569 |
+
|
| 570 |
+
estimator.fit(X, y, sample_weight=sample_weight)
|
| 571 |
+
|
| 572 |
+
y_predict = estimator.predict(X)
|
| 573 |
+
|
| 574 |
+
if iboost == 0:
|
| 575 |
+
self.classes_ = getattr(estimator, "classes_", None)
|
| 576 |
+
self.n_classes_ = len(self.classes_)
|
| 577 |
+
|
| 578 |
+
# Instances incorrectly classified
|
| 579 |
+
incorrect = y_predict != y
|
| 580 |
+
|
| 581 |
+
# Error fraction
|
| 582 |
+
estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0))
|
| 583 |
+
|
| 584 |
+
# Stop if classification is perfect
|
| 585 |
+
if estimator_error <= 0:
|
| 586 |
+
return sample_weight, 1.0, 0.0
|
| 587 |
+
|
| 588 |
+
n_classes = self.n_classes_
|
| 589 |
+
|
| 590 |
+
# Stop if the error is at least as bad as random guessing
|
| 591 |
+
if estimator_error >= 1.0 - (1.0 / n_classes):
|
| 592 |
+
self.estimators_.pop(-1)
|
| 593 |
+
if len(self.estimators_) == 0:
|
| 594 |
+
raise ValueError(
|
| 595 |
+
"BaseClassifier in AdaBoostClassifier "
|
| 596 |
+
"ensemble is worse than random, ensemble "
|
| 597 |
+
"can not be fit."
|
| 598 |
+
)
|
| 599 |
+
return None, None, None
|
| 600 |
+
|
| 601 |
+
# Boost weight using multi-class AdaBoost SAMME alg
|
| 602 |
+
estimator_weight = self.learning_rate * (
|
| 603 |
+
np.log((1.0 - estimator_error) / estimator_error) + np.log(n_classes - 1.0)
|
| 604 |
+
)
|
| 605 |
+
|
| 606 |
+
# Only boost the weights if it will fit again
|
| 607 |
+
if not iboost == self.n_estimators - 1:
|
| 608 |
+
# Only boost positive weights
|
| 609 |
+
sample_weight = np.exp(
|
| 610 |
+
np.log(sample_weight)
|
| 611 |
+
+ estimator_weight * incorrect * (sample_weight > 0)
|
| 612 |
+
)
|
| 613 |
+
|
| 614 |
+
return sample_weight, estimator_weight, estimator_error
|
| 615 |
+
|
| 616 |
+
def predict(self, X):
|
| 617 |
+
"""Predict classes for X.
|
| 618 |
+
|
| 619 |
+
The predicted class of an input sample is computed as the weighted mean
|
| 620 |
+
prediction of the classifiers in the ensemble.
|
| 621 |
+
|
| 622 |
+
Parameters
|
| 623 |
+
----------
|
| 624 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 625 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 626 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 627 |
+
|
| 628 |
+
Returns
|
| 629 |
+
-------
|
| 630 |
+
y : ndarray of shape (n_samples,)
|
| 631 |
+
The predicted classes.
|
| 632 |
+
"""
|
| 633 |
+
pred = self.decision_function(X)
|
| 634 |
+
|
| 635 |
+
if self.n_classes_ == 2:
|
| 636 |
+
return self.classes_.take(pred > 0, axis=0)
|
| 637 |
+
|
| 638 |
+
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
|
| 639 |
+
|
| 640 |
+
def staged_predict(self, X):
|
| 641 |
+
"""Return staged predictions for X.
|
| 642 |
+
|
| 643 |
+
The predicted class of an input sample is computed as the weighted mean
|
| 644 |
+
prediction of the classifiers in the ensemble.
|
| 645 |
+
|
| 646 |
+
This generator method yields the ensemble prediction after each
|
| 647 |
+
iteration of boosting and therefore allows monitoring, such as to
|
| 648 |
+
determine the prediction on a test set after each boost.
|
| 649 |
+
|
| 650 |
+
Parameters
|
| 651 |
+
----------
|
| 652 |
+
X : array-like of shape (n_samples, n_features)
|
| 653 |
+
The input samples. Sparse matrix can be CSC, CSR, COO,
|
| 654 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 655 |
+
|
| 656 |
+
Yields
|
| 657 |
+
------
|
| 658 |
+
y : generator of ndarray of shape (n_samples,)
|
| 659 |
+
The predicted classes.
|
| 660 |
+
"""
|
| 661 |
+
X = self._check_X(X)
|
| 662 |
+
|
| 663 |
+
n_classes = self.n_classes_
|
| 664 |
+
classes = self.classes_
|
| 665 |
+
|
| 666 |
+
if n_classes == 2:
|
| 667 |
+
for pred in self.staged_decision_function(X):
|
| 668 |
+
yield np.array(classes.take(pred > 0, axis=0))
|
| 669 |
+
|
| 670 |
+
else:
|
| 671 |
+
for pred in self.staged_decision_function(X):
|
| 672 |
+
yield np.array(classes.take(np.argmax(pred, axis=1), axis=0))
|
| 673 |
+
|
| 674 |
+
def decision_function(self, X):
|
| 675 |
+
"""Compute the decision function of ``X``.
|
| 676 |
+
|
| 677 |
+
Parameters
|
| 678 |
+
----------
|
| 679 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 680 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 681 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 682 |
+
|
| 683 |
+
Returns
|
| 684 |
+
-------
|
| 685 |
+
score : ndarray of shape of (n_samples, k)
|
| 686 |
+
The decision function of the input samples. The order of
|
| 687 |
+
outputs is the same as that of the :term:`classes_` attribute.
|
| 688 |
+
Binary classification is a special cases with ``k == 1``,
|
| 689 |
+
otherwise ``k==n_classes``. For binary classification,
|
| 690 |
+
values closer to -1 or 1 mean more like the first or second
|
| 691 |
+
class in ``classes_``, respectively.
|
| 692 |
+
"""
|
| 693 |
+
check_is_fitted(self)
|
| 694 |
+
X = self._check_X(X)
|
| 695 |
+
|
| 696 |
+
n_classes = self.n_classes_
|
| 697 |
+
classes = self.classes_[:, np.newaxis]
|
| 698 |
+
|
| 699 |
+
if n_classes == 1:
|
| 700 |
+
return np.zeros_like(X, shape=(X.shape[0], 1))
|
| 701 |
+
|
| 702 |
+
pred = sum(
|
| 703 |
+
np.where(
|
| 704 |
+
(estimator.predict(X) == classes).T,
|
| 705 |
+
w,
|
| 706 |
+
-1 / (n_classes - 1) * w,
|
| 707 |
+
)
|
| 708 |
+
for estimator, w in zip(self.estimators_, self.estimator_weights_)
|
| 709 |
+
)
|
| 710 |
+
|
| 711 |
+
pred /= self.estimator_weights_.sum()
|
| 712 |
+
if n_classes == 2:
|
| 713 |
+
pred[:, 0] *= -1
|
| 714 |
+
return pred.sum(axis=1)
|
| 715 |
+
return pred
|
| 716 |
+
|
| 717 |
+
def staged_decision_function(self, X):
|
| 718 |
+
"""Compute decision function of ``X`` for each boosting iteration.
|
| 719 |
+
|
| 720 |
+
This method allows monitoring (i.e. determine error on testing set)
|
| 721 |
+
after each boosting iteration.
|
| 722 |
+
|
| 723 |
+
Parameters
|
| 724 |
+
----------
|
| 725 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 726 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 727 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 728 |
+
|
| 729 |
+
Yields
|
| 730 |
+
------
|
| 731 |
+
score : generator of ndarray of shape (n_samples, k)
|
| 732 |
+
The decision function of the input samples. The order of
|
| 733 |
+
outputs is the same of that of the :term:`classes_` attribute.
|
| 734 |
+
Binary classification is a special cases with ``k == 1``,
|
| 735 |
+
otherwise ``k==n_classes``. For binary classification,
|
| 736 |
+
values closer to -1 or 1 mean more like the first or second
|
| 737 |
+
class in ``classes_``, respectively.
|
| 738 |
+
"""
|
| 739 |
+
check_is_fitted(self)
|
| 740 |
+
X = self._check_X(X)
|
| 741 |
+
|
| 742 |
+
n_classes = self.n_classes_
|
| 743 |
+
classes = self.classes_[:, np.newaxis]
|
| 744 |
+
pred = None
|
| 745 |
+
norm = 0.0
|
| 746 |
+
|
| 747 |
+
for weight, estimator in zip(self.estimator_weights_, self.estimators_):
|
| 748 |
+
norm += weight
|
| 749 |
+
|
| 750 |
+
current_pred = np.where(
|
| 751 |
+
(estimator.predict(X) == classes).T,
|
| 752 |
+
weight,
|
| 753 |
+
-1 / (n_classes - 1) * weight,
|
| 754 |
+
)
|
| 755 |
+
|
| 756 |
+
if pred is None:
|
| 757 |
+
pred = current_pred
|
| 758 |
+
else:
|
| 759 |
+
pred += current_pred
|
| 760 |
+
|
| 761 |
+
if n_classes == 2:
|
| 762 |
+
tmp_pred = np.copy(pred)
|
| 763 |
+
tmp_pred[:, 0] *= -1
|
| 764 |
+
yield (tmp_pred / norm).sum(axis=1)
|
| 765 |
+
else:
|
| 766 |
+
yield pred / norm
|
| 767 |
+
|
| 768 |
+
@staticmethod
|
| 769 |
+
def _compute_proba_from_decision(decision, n_classes):
|
| 770 |
+
"""Compute probabilities from the decision function.
|
| 771 |
+
|
| 772 |
+
This is based eq. (15) of [1] where:
|
| 773 |
+
p(y=c|X) = exp((1 / K-1) f_c(X)) / sum_k(exp((1 / K-1) f_k(X)))
|
| 774 |
+
= softmax((1 / K-1) * f(X))
|
| 775 |
+
|
| 776 |
+
References
|
| 777 |
+
----------
|
| 778 |
+
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost",
|
| 779 |
+
2009.
|
| 780 |
+
"""
|
| 781 |
+
if n_classes == 2:
|
| 782 |
+
decision = np.vstack([-decision, decision]).T / 2
|
| 783 |
+
else:
|
| 784 |
+
decision /= n_classes - 1
|
| 785 |
+
return softmax(decision, copy=False)
|
| 786 |
+
|
| 787 |
+
def predict_proba(self, X):
|
| 788 |
+
"""Predict class probabilities for X.
|
| 789 |
+
|
| 790 |
+
The predicted class probabilities of an input sample is computed as
|
| 791 |
+
the weighted mean predicted class probabilities of the classifiers
|
| 792 |
+
in the ensemble.
|
| 793 |
+
|
| 794 |
+
Parameters
|
| 795 |
+
----------
|
| 796 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 797 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 798 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 799 |
+
|
| 800 |
+
Returns
|
| 801 |
+
-------
|
| 802 |
+
p : ndarray of shape (n_samples, n_classes)
|
| 803 |
+
The class probabilities of the input samples. The order of
|
| 804 |
+
outputs is the same of that of the :term:`classes_` attribute.
|
| 805 |
+
"""
|
| 806 |
+
check_is_fitted(self)
|
| 807 |
+
n_classes = self.n_classes_
|
| 808 |
+
|
| 809 |
+
if n_classes == 1:
|
| 810 |
+
return np.ones((_num_samples(X), 1))
|
| 811 |
+
|
| 812 |
+
decision = self.decision_function(X)
|
| 813 |
+
return self._compute_proba_from_decision(decision, n_classes)
|
| 814 |
+
|
| 815 |
+
def staged_predict_proba(self, X):
|
| 816 |
+
"""Predict class probabilities for X.
|
| 817 |
+
|
| 818 |
+
The predicted class probabilities of an input sample is computed as
|
| 819 |
+
the weighted mean predicted class probabilities of the classifiers
|
| 820 |
+
in the ensemble.
|
| 821 |
+
|
| 822 |
+
This generator method yields the ensemble predicted class probabilities
|
| 823 |
+
after each iteration of boosting and therefore allows monitoring, such
|
| 824 |
+
as to determine the predicted class probabilities on a test set after
|
| 825 |
+
each boost.
|
| 826 |
+
|
| 827 |
+
Parameters
|
| 828 |
+
----------
|
| 829 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 830 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 831 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 832 |
+
|
| 833 |
+
Yields
|
| 834 |
+
------
|
| 835 |
+
p : generator of ndarray of shape (n_samples,)
|
| 836 |
+
The class probabilities of the input samples. The order of
|
| 837 |
+
outputs is the same of that of the :term:`classes_` attribute.
|
| 838 |
+
"""
|
| 839 |
+
|
| 840 |
+
n_classes = self.n_classes_
|
| 841 |
+
|
| 842 |
+
for decision in self.staged_decision_function(X):
|
| 843 |
+
yield self._compute_proba_from_decision(decision, n_classes)
|
| 844 |
+
|
| 845 |
+
def predict_log_proba(self, X):
|
| 846 |
+
"""Predict class log-probabilities for X.
|
| 847 |
+
|
| 848 |
+
The predicted class log-probabilities of an input sample is computed as
|
| 849 |
+
the weighted mean predicted class log-probabilities of the classifiers
|
| 850 |
+
in the ensemble.
|
| 851 |
+
|
| 852 |
+
Parameters
|
| 853 |
+
----------
|
| 854 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 855 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 856 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 857 |
+
|
| 858 |
+
Returns
|
| 859 |
+
-------
|
| 860 |
+
p : ndarray of shape (n_samples, n_classes)
|
| 861 |
+
The class probabilities of the input samples. The order of
|
| 862 |
+
outputs is the same of that of the :term:`classes_` attribute.
|
| 863 |
+
"""
|
| 864 |
+
return np.log(self.predict_proba(X))
|
| 865 |
+
|
| 866 |
+
|
| 867 |
+
class AdaBoostRegressor(_RoutingNotSupportedMixin, RegressorMixin, BaseWeightBoosting):
|
| 868 |
+
"""An AdaBoost regressor.
|
| 869 |
+
|
| 870 |
+
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
|
| 871 |
+
regressor on the original dataset and then fits additional copies of the
|
| 872 |
+
regressor on the same dataset but where the weights of instances are
|
| 873 |
+
adjusted according to the error of the current prediction. As such,
|
| 874 |
+
subsequent regressors focus more on difficult cases.
|
| 875 |
+
|
| 876 |
+
This class implements the algorithm known as AdaBoost.R2 [2].
|
| 877 |
+
|
| 878 |
+
Read more in the :ref:`User Guide <adaboost>`.
|
| 879 |
+
|
| 880 |
+
.. versionadded:: 0.14
|
| 881 |
+
|
| 882 |
+
Parameters
|
| 883 |
+
----------
|
| 884 |
+
estimator : object, default=None
|
| 885 |
+
The base estimator from which the boosted ensemble is built.
|
| 886 |
+
If ``None``, then the base estimator is
|
| 887 |
+
:class:`~sklearn.tree.DecisionTreeRegressor` initialized with
|
| 888 |
+
`max_depth=3`.
|
| 889 |
+
|
| 890 |
+
.. versionadded:: 1.2
|
| 891 |
+
`base_estimator` was renamed to `estimator`.
|
| 892 |
+
|
| 893 |
+
n_estimators : int, default=50
|
| 894 |
+
The maximum number of estimators at which boosting is terminated.
|
| 895 |
+
In case of perfect fit, the learning procedure is stopped early.
|
| 896 |
+
Values must be in the range `[1, inf)`.
|
| 897 |
+
|
| 898 |
+
learning_rate : float, default=1.0
|
| 899 |
+
Weight applied to each regressor at each boosting iteration. A higher
|
| 900 |
+
learning rate increases the contribution of each regressor. There is
|
| 901 |
+
a trade-off between the `learning_rate` and `n_estimators` parameters.
|
| 902 |
+
Values must be in the range `(0.0, inf)`.
|
| 903 |
+
|
| 904 |
+
loss : {'linear', 'square', 'exponential'}, default='linear'
|
| 905 |
+
The loss function to use when updating the weights after each
|
| 906 |
+
boosting iteration.
|
| 907 |
+
|
| 908 |
+
random_state : int, RandomState instance or None, default=None
|
| 909 |
+
Controls the random seed given at each `estimator` at each
|
| 910 |
+
boosting iteration.
|
| 911 |
+
Thus, it is only used when `estimator` exposes a `random_state`.
|
| 912 |
+
In addition, it controls the bootstrap of the weights used to train the
|
| 913 |
+
`estimator` at each boosting iteration.
|
| 914 |
+
Pass an int for reproducible output across multiple function calls.
|
| 915 |
+
See :term:`Glossary <random_state>`.
|
| 916 |
+
|
| 917 |
+
Attributes
|
| 918 |
+
----------
|
| 919 |
+
estimator_ : estimator
|
| 920 |
+
The base estimator from which the ensemble is grown.
|
| 921 |
+
|
| 922 |
+
.. versionadded:: 1.2
|
| 923 |
+
`base_estimator_` was renamed to `estimator_`.
|
| 924 |
+
|
| 925 |
+
estimators_ : list of regressors
|
| 926 |
+
The collection of fitted sub-estimators.
|
| 927 |
+
|
| 928 |
+
estimator_weights_ : ndarray of floats
|
| 929 |
+
Weights for each estimator in the boosted ensemble.
|
| 930 |
+
|
| 931 |
+
estimator_errors_ : ndarray of floats
|
| 932 |
+
Regression error for each estimator in the boosted ensemble.
|
| 933 |
+
|
| 934 |
+
feature_importances_ : ndarray of shape (n_features,)
|
| 935 |
+
The impurity-based feature importances if supported by the
|
| 936 |
+
``estimator`` (when based on decision trees).
|
| 937 |
+
|
| 938 |
+
Warning: impurity-based feature importances can be misleading for
|
| 939 |
+
high cardinality features (many unique values). See
|
| 940 |
+
:func:`sklearn.inspection.permutation_importance` as an alternative.
|
| 941 |
+
|
| 942 |
+
n_features_in_ : int
|
| 943 |
+
Number of features seen during :term:`fit`.
|
| 944 |
+
|
| 945 |
+
.. versionadded:: 0.24
|
| 946 |
+
|
| 947 |
+
feature_names_in_ : ndarray of shape (`n_features_in_`,)
|
| 948 |
+
Names of features seen during :term:`fit`. Defined only when `X`
|
| 949 |
+
has feature names that are all strings.
|
| 950 |
+
|
| 951 |
+
.. versionadded:: 1.0
|
| 952 |
+
|
| 953 |
+
See Also
|
| 954 |
+
--------
|
| 955 |
+
AdaBoostClassifier : An AdaBoost classifier.
|
| 956 |
+
GradientBoostingRegressor : Gradient Boosting Classification Tree.
|
| 957 |
+
sklearn.tree.DecisionTreeRegressor : A decision tree regressor.
|
| 958 |
+
|
| 959 |
+
References
|
| 960 |
+
----------
|
| 961 |
+
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
|
| 962 |
+
on-Line Learning and an Application to Boosting", 1995.
|
| 963 |
+
|
| 964 |
+
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
|
| 965 |
+
|
| 966 |
+
Examples
|
| 967 |
+
--------
|
| 968 |
+
>>> from sklearn.ensemble import AdaBoostRegressor
|
| 969 |
+
>>> from sklearn.datasets import make_regression
|
| 970 |
+
>>> X, y = make_regression(n_features=4, n_informative=2,
|
| 971 |
+
... random_state=0, shuffle=False)
|
| 972 |
+
>>> regr = AdaBoostRegressor(random_state=0, n_estimators=100)
|
| 973 |
+
>>> regr.fit(X, y)
|
| 974 |
+
AdaBoostRegressor(n_estimators=100, random_state=0)
|
| 975 |
+
>>> regr.predict([[0, 0, 0, 0]])
|
| 976 |
+
array([4.7972...])
|
| 977 |
+
>>> regr.score(X, y)
|
| 978 |
+
0.9771...
|
| 979 |
+
|
| 980 |
+
For a detailed example of utilizing :class:`~sklearn.ensemble.AdaBoostRegressor`
|
| 981 |
+
to fit a sequence of decision trees as weak learners, please refer to
|
| 982 |
+
:ref:`sphx_glr_auto_examples_ensemble_plot_adaboost_regression.py`.
|
| 983 |
+
"""
|
| 984 |
+
|
| 985 |
+
_parameter_constraints: dict = {
|
| 986 |
+
**BaseWeightBoosting._parameter_constraints,
|
| 987 |
+
"loss": [StrOptions({"linear", "square", "exponential"})],
|
| 988 |
+
}
|
| 989 |
+
|
| 990 |
+
def __init__(
|
| 991 |
+
self,
|
| 992 |
+
estimator=None,
|
| 993 |
+
*,
|
| 994 |
+
n_estimators=50,
|
| 995 |
+
learning_rate=1.0,
|
| 996 |
+
loss="linear",
|
| 997 |
+
random_state=None,
|
| 998 |
+
):
|
| 999 |
+
super().__init__(
|
| 1000 |
+
estimator=estimator,
|
| 1001 |
+
n_estimators=n_estimators,
|
| 1002 |
+
learning_rate=learning_rate,
|
| 1003 |
+
random_state=random_state,
|
| 1004 |
+
)
|
| 1005 |
+
|
| 1006 |
+
self.loss = loss
|
| 1007 |
+
self.random_state = random_state
|
| 1008 |
+
|
| 1009 |
+
def _validate_estimator(self):
|
| 1010 |
+
"""Check the estimator and set the estimator_ attribute."""
|
| 1011 |
+
super()._validate_estimator(default=DecisionTreeRegressor(max_depth=3))
|
| 1012 |
+
|
| 1013 |
+
def _boost(self, iboost, X, y, sample_weight, random_state):
|
| 1014 |
+
"""Implement a single boost for regression
|
| 1015 |
+
|
| 1016 |
+
Perform a single boost according to the AdaBoost.R2 algorithm and
|
| 1017 |
+
return the updated sample weights.
|
| 1018 |
+
|
| 1019 |
+
Parameters
|
| 1020 |
+
----------
|
| 1021 |
+
iboost : int
|
| 1022 |
+
The index of the current boost iteration.
|
| 1023 |
+
|
| 1024 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 1025 |
+
The training input samples.
|
| 1026 |
+
|
| 1027 |
+
y : array-like of shape (n_samples,)
|
| 1028 |
+
The target values (class labels in classification, real numbers in
|
| 1029 |
+
regression).
|
| 1030 |
+
|
| 1031 |
+
sample_weight : array-like of shape (n_samples,)
|
| 1032 |
+
The current sample weights.
|
| 1033 |
+
|
| 1034 |
+
random_state : RandomState
|
| 1035 |
+
The RandomState instance used if the base estimator accepts a
|
| 1036 |
+
`random_state` attribute.
|
| 1037 |
+
Controls also the bootstrap of the weights used to train the weak
|
| 1038 |
+
learner.
|
| 1039 |
+
|
| 1040 |
+
Returns
|
| 1041 |
+
-------
|
| 1042 |
+
sample_weight : array-like of shape (n_samples,) or None
|
| 1043 |
+
The reweighted sample weights.
|
| 1044 |
+
If None then boosting has terminated early.
|
| 1045 |
+
|
| 1046 |
+
estimator_weight : float
|
| 1047 |
+
The weight for the current boost.
|
| 1048 |
+
If None then boosting has terminated early.
|
| 1049 |
+
|
| 1050 |
+
estimator_error : float
|
| 1051 |
+
The regression error for the current boost.
|
| 1052 |
+
If None then boosting has terminated early.
|
| 1053 |
+
"""
|
| 1054 |
+
estimator = self._make_estimator(random_state=random_state)
|
| 1055 |
+
|
| 1056 |
+
# Weighted sampling of the training set with replacement
|
| 1057 |
+
bootstrap_idx = random_state.choice(
|
| 1058 |
+
np.arange(_num_samples(X)),
|
| 1059 |
+
size=_num_samples(X),
|
| 1060 |
+
replace=True,
|
| 1061 |
+
p=sample_weight,
|
| 1062 |
+
)
|
| 1063 |
+
|
| 1064 |
+
# Fit on the bootstrapped sample and obtain a prediction
|
| 1065 |
+
# for all samples in the training set
|
| 1066 |
+
X_ = _safe_indexing(X, bootstrap_idx)
|
| 1067 |
+
y_ = _safe_indexing(y, bootstrap_idx)
|
| 1068 |
+
estimator.fit(X_, y_)
|
| 1069 |
+
y_predict = estimator.predict(X)
|
| 1070 |
+
|
| 1071 |
+
error_vect = np.abs(y_predict - y)
|
| 1072 |
+
sample_mask = sample_weight > 0
|
| 1073 |
+
masked_sample_weight = sample_weight[sample_mask]
|
| 1074 |
+
masked_error_vector = error_vect[sample_mask]
|
| 1075 |
+
|
| 1076 |
+
error_max = masked_error_vector.max()
|
| 1077 |
+
if error_max != 0:
|
| 1078 |
+
masked_error_vector /= error_max
|
| 1079 |
+
|
| 1080 |
+
if self.loss == "square":
|
| 1081 |
+
masked_error_vector **= 2
|
| 1082 |
+
elif self.loss == "exponential":
|
| 1083 |
+
masked_error_vector = 1.0 - np.exp(-masked_error_vector)
|
| 1084 |
+
|
| 1085 |
+
# Calculate the average loss
|
| 1086 |
+
estimator_error = (masked_sample_weight * masked_error_vector).sum()
|
| 1087 |
+
|
| 1088 |
+
if estimator_error <= 0:
|
| 1089 |
+
# Stop if fit is perfect
|
| 1090 |
+
return sample_weight, 1.0, 0.0
|
| 1091 |
+
|
| 1092 |
+
elif estimator_error >= 0.5:
|
| 1093 |
+
# Discard current estimator only if it isn't the only one
|
| 1094 |
+
if len(self.estimators_) > 1:
|
| 1095 |
+
self.estimators_.pop(-1)
|
| 1096 |
+
return None, None, None
|
| 1097 |
+
|
| 1098 |
+
beta = estimator_error / (1.0 - estimator_error)
|
| 1099 |
+
|
| 1100 |
+
# Boost weight using AdaBoost.R2 alg
|
| 1101 |
+
estimator_weight = self.learning_rate * np.log(1.0 / beta)
|
| 1102 |
+
|
| 1103 |
+
if not iboost == self.n_estimators - 1:
|
| 1104 |
+
sample_weight[sample_mask] *= np.power(
|
| 1105 |
+
beta, (1.0 - masked_error_vector) * self.learning_rate
|
| 1106 |
+
)
|
| 1107 |
+
|
| 1108 |
+
return sample_weight, estimator_weight, estimator_error
|
| 1109 |
+
|
| 1110 |
+
def _get_median_predict(self, X, limit):
|
| 1111 |
+
# Evaluate predictions of all estimators
|
| 1112 |
+
predictions = np.array([est.predict(X) for est in self.estimators_[:limit]]).T
|
| 1113 |
+
|
| 1114 |
+
# Sort the predictions
|
| 1115 |
+
sorted_idx = np.argsort(predictions, axis=1)
|
| 1116 |
+
|
| 1117 |
+
# Find index of median prediction for each sample
|
| 1118 |
+
weight_cdf = stable_cumsum(self.estimator_weights_[sorted_idx], axis=1)
|
| 1119 |
+
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
|
| 1120 |
+
median_idx = median_or_above.argmax(axis=1)
|
| 1121 |
+
|
| 1122 |
+
median_estimators = sorted_idx[np.arange(_num_samples(X)), median_idx]
|
| 1123 |
+
|
| 1124 |
+
# Return median predictions
|
| 1125 |
+
return predictions[np.arange(_num_samples(X)), median_estimators]
|
| 1126 |
+
|
| 1127 |
+
def predict(self, X):
|
| 1128 |
+
"""Predict regression value for X.
|
| 1129 |
+
|
| 1130 |
+
The predicted regression value of an input sample is computed
|
| 1131 |
+
as the weighted median prediction of the regressors in the ensemble.
|
| 1132 |
+
|
| 1133 |
+
Parameters
|
| 1134 |
+
----------
|
| 1135 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 1136 |
+
The training input samples. Sparse matrix can be CSC, CSR, COO,
|
| 1137 |
+
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
|
| 1138 |
+
|
| 1139 |
+
Returns
|
| 1140 |
+
-------
|
| 1141 |
+
y : ndarray of shape (n_samples,)
|
| 1142 |
+
The predicted regression values.
|
| 1143 |
+
"""
|
| 1144 |
+
check_is_fitted(self)
|
| 1145 |
+
X = self._check_X(X)
|
| 1146 |
+
|
| 1147 |
+
return self._get_median_predict(X, len(self.estimators_))
|
| 1148 |
+
|
| 1149 |
+
def staged_predict(self, X):
|
| 1150 |
+
"""Return staged predictions for X.
|
| 1151 |
+
|
| 1152 |
+
The predicted regression value of an input sample is computed
|
| 1153 |
+
as the weighted median prediction of the regressors in the ensemble.
|
| 1154 |
+
|
| 1155 |
+
This generator method yields the ensemble prediction after each
|
| 1156 |
+
iteration of boosting and therefore allows monitoring, such as to
|
| 1157 |
+
determine the prediction on a test set after each boost.
|
| 1158 |
+
|
| 1159 |
+
Parameters
|
| 1160 |
+
----------
|
| 1161 |
+
X : {array-like, sparse matrix} of shape (n_samples, n_features)
|
| 1162 |
+
The training input samples.
|
| 1163 |
+
|
| 1164 |
+
Yields
|
| 1165 |
+
------
|
| 1166 |
+
y : generator of ndarray of shape (n_samples,)
|
| 1167 |
+
The predicted regression values.
|
| 1168 |
+
"""
|
| 1169 |
+
check_is_fitted(self)
|
| 1170 |
+
X = self._check_X(X)
|
| 1171 |
+
|
| 1172 |
+
for i, _ in enumerate(self.estimators_, 1):
|
| 1173 |
+
yield self._get_median_predict(X, limit=i)
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_bagging.cpython-310.pyc
ADDED
|
Binary file (21.8 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_common.cpython-310.pyc
ADDED
|
Binary file (5.1 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_forest.cpython-310.pyc
ADDED
|
Binary file (44.9 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_iforest.cpython-310.pyc
ADDED
|
Binary file (10.8 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/__pycache__/test_weight_boosting.cpython-310.pyc
ADDED
|
Binary file (17.5 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/test_base.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Testing for the base module (sklearn.ensemble.base).
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
# Authors: The scikit-learn developers
|
| 6 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 7 |
+
|
| 8 |
+
from collections import OrderedDict
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
|
| 12 |
+
from sklearn.datasets import load_iris
|
| 13 |
+
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
|
| 14 |
+
from sklearn.ensemble import BaggingClassifier
|
| 15 |
+
from sklearn.ensemble._base import _set_random_states
|
| 16 |
+
from sklearn.feature_selection import SelectFromModel
|
| 17 |
+
from sklearn.linear_model import Perceptron
|
| 18 |
+
from sklearn.pipeline import Pipeline
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def test_base():
|
| 22 |
+
# Check BaseEnsemble methods.
|
| 23 |
+
ensemble = BaggingClassifier(
|
| 24 |
+
estimator=Perceptron(random_state=None), n_estimators=3
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
iris = load_iris()
|
| 28 |
+
ensemble.fit(iris.data, iris.target)
|
| 29 |
+
ensemble.estimators_ = [] # empty the list and create estimators manually
|
| 30 |
+
|
| 31 |
+
ensemble._make_estimator()
|
| 32 |
+
random_state = np.random.RandomState(3)
|
| 33 |
+
ensemble._make_estimator(random_state=random_state)
|
| 34 |
+
ensemble._make_estimator(random_state=random_state)
|
| 35 |
+
ensemble._make_estimator(append=False)
|
| 36 |
+
|
| 37 |
+
assert 3 == len(ensemble)
|
| 38 |
+
assert 3 == len(ensemble.estimators_)
|
| 39 |
+
|
| 40 |
+
assert isinstance(ensemble[0], Perceptron)
|
| 41 |
+
assert ensemble[0].random_state is None
|
| 42 |
+
assert isinstance(ensemble[1].random_state, int)
|
| 43 |
+
assert isinstance(ensemble[2].random_state, int)
|
| 44 |
+
assert ensemble[1].random_state != ensemble[2].random_state
|
| 45 |
+
|
| 46 |
+
np_int_ensemble = BaggingClassifier(
|
| 47 |
+
estimator=Perceptron(), n_estimators=np.int32(3)
|
| 48 |
+
)
|
| 49 |
+
np_int_ensemble.fit(iris.data, iris.target)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def test_set_random_states():
|
| 53 |
+
# Linear Discriminant Analysis doesn't have random state: smoke test
|
| 54 |
+
_set_random_states(LinearDiscriminantAnalysis(), random_state=17)
|
| 55 |
+
|
| 56 |
+
clf1 = Perceptron(random_state=None)
|
| 57 |
+
assert clf1.random_state is None
|
| 58 |
+
# check random_state is None still sets
|
| 59 |
+
_set_random_states(clf1, None)
|
| 60 |
+
assert isinstance(clf1.random_state, int)
|
| 61 |
+
|
| 62 |
+
# check random_state fixes results in consistent initialisation
|
| 63 |
+
_set_random_states(clf1, 3)
|
| 64 |
+
assert isinstance(clf1.random_state, int)
|
| 65 |
+
clf2 = Perceptron(random_state=None)
|
| 66 |
+
_set_random_states(clf2, 3)
|
| 67 |
+
assert clf1.random_state == clf2.random_state
|
| 68 |
+
|
| 69 |
+
# nested random_state
|
| 70 |
+
|
| 71 |
+
def make_steps():
|
| 72 |
+
return [
|
| 73 |
+
("sel", SelectFromModel(Perceptron(random_state=None))),
|
| 74 |
+
("clf", Perceptron(random_state=None)),
|
| 75 |
+
]
|
| 76 |
+
|
| 77 |
+
est1 = Pipeline(make_steps())
|
| 78 |
+
_set_random_states(est1, 3)
|
| 79 |
+
assert isinstance(est1.steps[0][1].estimator.random_state, int)
|
| 80 |
+
assert isinstance(est1.steps[1][1].random_state, int)
|
| 81 |
+
assert (
|
| 82 |
+
est1.get_params()["sel__estimator__random_state"]
|
| 83 |
+
!= est1.get_params()["clf__random_state"]
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
# ensure multiple random_state parameters are invariant to get_params()
|
| 87 |
+
# iteration order
|
| 88 |
+
|
| 89 |
+
class AlphaParamPipeline(Pipeline):
|
| 90 |
+
def get_params(self, *args, **kwargs):
|
| 91 |
+
params = Pipeline.get_params(self, *args, **kwargs).items()
|
| 92 |
+
return OrderedDict(sorted(params))
|
| 93 |
+
|
| 94 |
+
class RevParamPipeline(Pipeline):
|
| 95 |
+
def get_params(self, *args, **kwargs):
|
| 96 |
+
params = Pipeline.get_params(self, *args, **kwargs).items()
|
| 97 |
+
return OrderedDict(sorted(params, reverse=True))
|
| 98 |
+
|
| 99 |
+
for cls in [AlphaParamPipeline, RevParamPipeline]:
|
| 100 |
+
est2 = cls(make_steps())
|
| 101 |
+
_set_random_states(est2, 3)
|
| 102 |
+
assert (
|
| 103 |
+
est1.get_params()["sel__estimator__random_state"]
|
| 104 |
+
== est2.get_params()["sel__estimator__random_state"]
|
| 105 |
+
)
|
| 106 |
+
assert (
|
| 107 |
+
est1.get_params()["clf__random_state"]
|
| 108 |
+
== est2.get_params()["clf__random_state"]
|
| 109 |
+
)
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/test_stacking.py
ADDED
|
@@ -0,0 +1,1019 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Test the stacking classifier and regressor."""
|
| 2 |
+
|
| 3 |
+
# Authors: The scikit-learn developers
|
| 4 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 5 |
+
|
| 6 |
+
import re
|
| 7 |
+
from unittest.mock import Mock
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import pytest
|
| 11 |
+
from numpy.testing import assert_array_equal
|
| 12 |
+
from scipy import sparse
|
| 13 |
+
|
| 14 |
+
from sklearn import config_context
|
| 15 |
+
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
|
| 16 |
+
from sklearn.datasets import (
|
| 17 |
+
load_breast_cancer,
|
| 18 |
+
load_diabetes,
|
| 19 |
+
load_iris,
|
| 20 |
+
make_classification,
|
| 21 |
+
make_multilabel_classification,
|
| 22 |
+
make_regression,
|
| 23 |
+
)
|
| 24 |
+
from sklearn.dummy import DummyClassifier, DummyRegressor
|
| 25 |
+
from sklearn.ensemble import (
|
| 26 |
+
RandomForestClassifier,
|
| 27 |
+
RandomForestRegressor,
|
| 28 |
+
StackingClassifier,
|
| 29 |
+
StackingRegressor,
|
| 30 |
+
)
|
| 31 |
+
from sklearn.exceptions import ConvergenceWarning, NotFittedError
|
| 32 |
+
from sklearn.linear_model import (
|
| 33 |
+
LinearRegression,
|
| 34 |
+
LogisticRegression,
|
| 35 |
+
Ridge,
|
| 36 |
+
RidgeClassifier,
|
| 37 |
+
)
|
| 38 |
+
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split
|
| 39 |
+
from sklearn.neighbors import KNeighborsClassifier
|
| 40 |
+
from sklearn.neural_network import MLPClassifier
|
| 41 |
+
from sklearn.preprocessing import scale
|
| 42 |
+
from sklearn.svm import SVC, LinearSVC, LinearSVR
|
| 43 |
+
from sklearn.tests.metadata_routing_common import (
|
| 44 |
+
ConsumingClassifier,
|
| 45 |
+
ConsumingRegressor,
|
| 46 |
+
_Registry,
|
| 47 |
+
check_recorded_metadata,
|
| 48 |
+
)
|
| 49 |
+
from sklearn.utils._mocking import CheckingClassifier
|
| 50 |
+
from sklearn.utils._testing import (
|
| 51 |
+
assert_allclose,
|
| 52 |
+
assert_allclose_dense_sparse,
|
| 53 |
+
ignore_warnings,
|
| 54 |
+
)
|
| 55 |
+
from sklearn.utils.fixes import COO_CONTAINERS, CSC_CONTAINERS, CSR_CONTAINERS
|
| 56 |
+
|
| 57 |
+
diabetes = load_diabetes()
|
| 58 |
+
X_diabetes, y_diabetes = diabetes.data, diabetes.target
|
| 59 |
+
iris = load_iris()
|
| 60 |
+
X_iris, y_iris = iris.data, iris.target
|
| 61 |
+
X_multilabel, y_multilabel = make_multilabel_classification(
|
| 62 |
+
n_classes=3, random_state=42
|
| 63 |
+
)
|
| 64 |
+
X_binary, y_binary = make_classification(n_classes=2, random_state=42)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@pytest.mark.parametrize(
|
| 68 |
+
"cv", [3, StratifiedKFold(n_splits=3, shuffle=True, random_state=42)]
|
| 69 |
+
)
|
| 70 |
+
@pytest.mark.parametrize(
|
| 71 |
+
"final_estimator", [None, RandomForestClassifier(random_state=42)]
|
| 72 |
+
)
|
| 73 |
+
@pytest.mark.parametrize("passthrough", [False, True])
|
| 74 |
+
def test_stacking_classifier_iris(cv, final_estimator, passthrough):
|
| 75 |
+
# prescale the data to avoid convergence warning without using a pipeline
|
| 76 |
+
# for later assert
|
| 77 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 78 |
+
scale(X_iris), y_iris, stratify=y_iris, random_state=42
|
| 79 |
+
)
|
| 80 |
+
estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())]
|
| 81 |
+
clf = StackingClassifier(
|
| 82 |
+
estimators=estimators,
|
| 83 |
+
final_estimator=final_estimator,
|
| 84 |
+
cv=cv,
|
| 85 |
+
passthrough=passthrough,
|
| 86 |
+
)
|
| 87 |
+
clf.fit(X_train, y_train)
|
| 88 |
+
clf.predict(X_test)
|
| 89 |
+
clf.predict_proba(X_test)
|
| 90 |
+
assert clf.score(X_test, y_test) > 0.8
|
| 91 |
+
|
| 92 |
+
X_trans = clf.transform(X_test)
|
| 93 |
+
expected_column_count = 10 if passthrough else 6
|
| 94 |
+
assert X_trans.shape[1] == expected_column_count
|
| 95 |
+
if passthrough:
|
| 96 |
+
assert_allclose(X_test, X_trans[:, -4:])
|
| 97 |
+
|
| 98 |
+
clf.set_params(lr="drop")
|
| 99 |
+
clf.fit(X_train, y_train)
|
| 100 |
+
clf.predict(X_test)
|
| 101 |
+
clf.predict_proba(X_test)
|
| 102 |
+
if final_estimator is None:
|
| 103 |
+
# LogisticRegression has decision_function method
|
| 104 |
+
clf.decision_function(X_test)
|
| 105 |
+
|
| 106 |
+
X_trans = clf.transform(X_test)
|
| 107 |
+
expected_column_count_drop = 7 if passthrough else 3
|
| 108 |
+
assert X_trans.shape[1] == expected_column_count_drop
|
| 109 |
+
if passthrough:
|
| 110 |
+
assert_allclose(X_test, X_trans[:, -4:])
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def test_stacking_classifier_drop_column_binary_classification():
|
| 114 |
+
# check that a column is dropped in binary classification
|
| 115 |
+
X, y = load_breast_cancer(return_X_y=True)
|
| 116 |
+
X_train, X_test, y_train, _ = train_test_split(
|
| 117 |
+
scale(X), y, stratify=y, random_state=42
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
# both classifiers implement 'predict_proba' and will both drop one column
|
| 121 |
+
estimators = [
|
| 122 |
+
("lr", LogisticRegression()),
|
| 123 |
+
("rf", RandomForestClassifier(random_state=42)),
|
| 124 |
+
]
|
| 125 |
+
clf = StackingClassifier(estimators=estimators, cv=3)
|
| 126 |
+
|
| 127 |
+
clf.fit(X_train, y_train)
|
| 128 |
+
X_trans = clf.transform(X_test)
|
| 129 |
+
assert X_trans.shape[1] == 2
|
| 130 |
+
|
| 131 |
+
# LinearSVC does not implement 'predict_proba' and will not drop one column
|
| 132 |
+
estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())]
|
| 133 |
+
clf.set_params(estimators=estimators)
|
| 134 |
+
|
| 135 |
+
clf.fit(X_train, y_train)
|
| 136 |
+
X_trans = clf.transform(X_test)
|
| 137 |
+
assert X_trans.shape[1] == 2
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def test_stacking_classifier_drop_estimator():
|
| 141 |
+
# prescale the data to avoid convergence warning without using a pipeline
|
| 142 |
+
# for later assert
|
| 143 |
+
X_train, X_test, y_train, _ = train_test_split(
|
| 144 |
+
scale(X_iris), y_iris, stratify=y_iris, random_state=42
|
| 145 |
+
)
|
| 146 |
+
estimators = [("lr", "drop"), ("svc", LinearSVC(random_state=0))]
|
| 147 |
+
rf = RandomForestClassifier(n_estimators=10, random_state=42)
|
| 148 |
+
clf = StackingClassifier(
|
| 149 |
+
estimators=[("svc", LinearSVC(random_state=0))],
|
| 150 |
+
final_estimator=rf,
|
| 151 |
+
cv=5,
|
| 152 |
+
)
|
| 153 |
+
clf_drop = StackingClassifier(estimators=estimators, final_estimator=rf, cv=5)
|
| 154 |
+
|
| 155 |
+
clf.fit(X_train, y_train)
|
| 156 |
+
clf_drop.fit(X_train, y_train)
|
| 157 |
+
assert_allclose(clf.predict(X_test), clf_drop.predict(X_test))
|
| 158 |
+
assert_allclose(clf.predict_proba(X_test), clf_drop.predict_proba(X_test))
|
| 159 |
+
assert_allclose(clf.transform(X_test), clf_drop.transform(X_test))
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def test_stacking_regressor_drop_estimator():
|
| 163 |
+
# prescale the data to avoid convergence warning without using a pipeline
|
| 164 |
+
# for later assert
|
| 165 |
+
X_train, X_test, y_train, _ = train_test_split(
|
| 166 |
+
scale(X_diabetes), y_diabetes, random_state=42
|
| 167 |
+
)
|
| 168 |
+
estimators = [("lr", "drop"), ("svr", LinearSVR(random_state=0))]
|
| 169 |
+
rf = RandomForestRegressor(n_estimators=10, random_state=42)
|
| 170 |
+
reg = StackingRegressor(
|
| 171 |
+
estimators=[("svr", LinearSVR(random_state=0))],
|
| 172 |
+
final_estimator=rf,
|
| 173 |
+
cv=5,
|
| 174 |
+
)
|
| 175 |
+
reg_drop = StackingRegressor(estimators=estimators, final_estimator=rf, cv=5)
|
| 176 |
+
|
| 177 |
+
reg.fit(X_train, y_train)
|
| 178 |
+
reg_drop.fit(X_train, y_train)
|
| 179 |
+
assert_allclose(reg.predict(X_test), reg_drop.predict(X_test))
|
| 180 |
+
assert_allclose(reg.transform(X_test), reg_drop.transform(X_test))
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
@pytest.mark.parametrize("cv", [3, KFold(n_splits=3, shuffle=True, random_state=42)])
|
| 184 |
+
@pytest.mark.parametrize(
|
| 185 |
+
"final_estimator, predict_params",
|
| 186 |
+
[
|
| 187 |
+
(None, {}),
|
| 188 |
+
(RandomForestRegressor(random_state=42), {}),
|
| 189 |
+
(DummyRegressor(), {"return_std": True}),
|
| 190 |
+
],
|
| 191 |
+
)
|
| 192 |
+
@pytest.mark.parametrize("passthrough", [False, True])
|
| 193 |
+
def test_stacking_regressor_diabetes(cv, final_estimator, predict_params, passthrough):
|
| 194 |
+
# prescale the data to avoid convergence warning without using a pipeline
|
| 195 |
+
# for later assert
|
| 196 |
+
X_train, X_test, y_train, _ = train_test_split(
|
| 197 |
+
scale(X_diabetes), y_diabetes, random_state=42
|
| 198 |
+
)
|
| 199 |
+
estimators = [("lr", LinearRegression()), ("svr", LinearSVR())]
|
| 200 |
+
reg = StackingRegressor(
|
| 201 |
+
estimators=estimators,
|
| 202 |
+
final_estimator=final_estimator,
|
| 203 |
+
cv=cv,
|
| 204 |
+
passthrough=passthrough,
|
| 205 |
+
)
|
| 206 |
+
reg.fit(X_train, y_train)
|
| 207 |
+
result = reg.predict(X_test, **predict_params)
|
| 208 |
+
expected_result_length = 2 if predict_params else 1
|
| 209 |
+
if predict_params:
|
| 210 |
+
assert len(result) == expected_result_length
|
| 211 |
+
|
| 212 |
+
X_trans = reg.transform(X_test)
|
| 213 |
+
expected_column_count = 12 if passthrough else 2
|
| 214 |
+
assert X_trans.shape[1] == expected_column_count
|
| 215 |
+
if passthrough:
|
| 216 |
+
assert_allclose(X_test, X_trans[:, -10:])
|
| 217 |
+
|
| 218 |
+
reg.set_params(lr="drop")
|
| 219 |
+
reg.fit(X_train, y_train)
|
| 220 |
+
reg.predict(X_test)
|
| 221 |
+
|
| 222 |
+
X_trans = reg.transform(X_test)
|
| 223 |
+
expected_column_count_drop = 11 if passthrough else 1
|
| 224 |
+
assert X_trans.shape[1] == expected_column_count_drop
|
| 225 |
+
if passthrough:
|
| 226 |
+
assert_allclose(X_test, X_trans[:, -10:])
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
@pytest.mark.parametrize(
|
| 230 |
+
"sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
|
| 231 |
+
)
|
| 232 |
+
def test_stacking_regressor_sparse_passthrough(sparse_container):
|
| 233 |
+
# Check passthrough behavior on a sparse X matrix
|
| 234 |
+
X_train, X_test, y_train, _ = train_test_split(
|
| 235 |
+
sparse_container(scale(X_diabetes)), y_diabetes, random_state=42
|
| 236 |
+
)
|
| 237 |
+
estimators = [("lr", LinearRegression()), ("svr", LinearSVR())]
|
| 238 |
+
rf = RandomForestRegressor(n_estimators=10, random_state=42)
|
| 239 |
+
clf = StackingRegressor(
|
| 240 |
+
estimators=estimators, final_estimator=rf, cv=5, passthrough=True
|
| 241 |
+
)
|
| 242 |
+
clf.fit(X_train, y_train)
|
| 243 |
+
X_trans = clf.transform(X_test)
|
| 244 |
+
assert_allclose_dense_sparse(X_test, X_trans[:, -10:])
|
| 245 |
+
assert sparse.issparse(X_trans)
|
| 246 |
+
assert X_test.format == X_trans.format
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
@pytest.mark.parametrize(
|
| 250 |
+
"sparse_container", COO_CONTAINERS + CSC_CONTAINERS + CSR_CONTAINERS
|
| 251 |
+
)
|
| 252 |
+
def test_stacking_classifier_sparse_passthrough(sparse_container):
|
| 253 |
+
# Check passthrough behavior on a sparse X matrix
|
| 254 |
+
X_train, X_test, y_train, _ = train_test_split(
|
| 255 |
+
sparse_container(scale(X_iris)), y_iris, random_state=42
|
| 256 |
+
)
|
| 257 |
+
estimators = [("lr", LogisticRegression()), ("svc", LinearSVC())]
|
| 258 |
+
rf = RandomForestClassifier(n_estimators=10, random_state=42)
|
| 259 |
+
clf = StackingClassifier(
|
| 260 |
+
estimators=estimators, final_estimator=rf, cv=5, passthrough=True
|
| 261 |
+
)
|
| 262 |
+
clf.fit(X_train, y_train)
|
| 263 |
+
X_trans = clf.transform(X_test)
|
| 264 |
+
assert_allclose_dense_sparse(X_test, X_trans[:, -4:])
|
| 265 |
+
assert sparse.issparse(X_trans)
|
| 266 |
+
assert X_test.format == X_trans.format
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
def test_stacking_classifier_drop_binary_prob():
|
| 270 |
+
# check that classifier will drop one of the probability column for
|
| 271 |
+
# binary classification problem
|
| 272 |
+
|
| 273 |
+
# Select only the 2 first classes
|
| 274 |
+
X_, y_ = scale(X_iris[:100]), y_iris[:100]
|
| 275 |
+
|
| 276 |
+
estimators = [("lr", LogisticRegression()), ("rf", RandomForestClassifier())]
|
| 277 |
+
clf = StackingClassifier(estimators=estimators)
|
| 278 |
+
clf.fit(X_, y_)
|
| 279 |
+
X_meta = clf.transform(X_)
|
| 280 |
+
assert X_meta.shape[1] == 2
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class NoWeightRegressor(RegressorMixin, BaseEstimator):
|
| 284 |
+
def fit(self, X, y):
|
| 285 |
+
self.reg = DummyRegressor()
|
| 286 |
+
return self.reg.fit(X, y)
|
| 287 |
+
|
| 288 |
+
def predict(self, X):
|
| 289 |
+
return np.ones(X.shape[0])
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
class NoWeightClassifier(ClassifierMixin, BaseEstimator):
|
| 293 |
+
def fit(self, X, y):
|
| 294 |
+
self.clf = DummyClassifier(strategy="stratified")
|
| 295 |
+
return self.clf.fit(X, y)
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
@pytest.mark.parametrize(
|
| 299 |
+
"y, params, type_err, msg_err",
|
| 300 |
+
[
|
| 301 |
+
(y_iris, {"estimators": []}, ValueError, "Invalid 'estimators' attribute,"),
|
| 302 |
+
(
|
| 303 |
+
y_iris,
|
| 304 |
+
{
|
| 305 |
+
"estimators": [
|
| 306 |
+
("lr", LogisticRegression()),
|
| 307 |
+
("svm", SVC(max_iter=50_000)),
|
| 308 |
+
],
|
| 309 |
+
"stack_method": "predict_proba",
|
| 310 |
+
},
|
| 311 |
+
ValueError,
|
| 312 |
+
"does not implement the method predict_proba",
|
| 313 |
+
),
|
| 314 |
+
(
|
| 315 |
+
y_iris,
|
| 316 |
+
{
|
| 317 |
+
"estimators": [
|
| 318 |
+
("lr", LogisticRegression()),
|
| 319 |
+
("cor", NoWeightClassifier()),
|
| 320 |
+
]
|
| 321 |
+
},
|
| 322 |
+
TypeError,
|
| 323 |
+
"does not support sample weight",
|
| 324 |
+
),
|
| 325 |
+
(
|
| 326 |
+
y_iris,
|
| 327 |
+
{
|
| 328 |
+
"estimators": [
|
| 329 |
+
("lr", LogisticRegression()),
|
| 330 |
+
("cor", LinearSVC(max_iter=50_000)),
|
| 331 |
+
],
|
| 332 |
+
"final_estimator": NoWeightClassifier(),
|
| 333 |
+
},
|
| 334 |
+
TypeError,
|
| 335 |
+
"does not support sample weight",
|
| 336 |
+
),
|
| 337 |
+
],
|
| 338 |
+
)
|
| 339 |
+
def test_stacking_classifier_error(y, params, type_err, msg_err):
|
| 340 |
+
with pytest.raises(type_err, match=msg_err):
|
| 341 |
+
clf = StackingClassifier(**params, cv=3)
|
| 342 |
+
clf.fit(scale(X_iris), y, sample_weight=np.ones(X_iris.shape[0]))
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
@pytest.mark.parametrize(
|
| 346 |
+
"y, params, type_err, msg_err",
|
| 347 |
+
[
|
| 348 |
+
(y_diabetes, {"estimators": []}, ValueError, "Invalid 'estimators' attribute,"),
|
| 349 |
+
(
|
| 350 |
+
y_diabetes,
|
| 351 |
+
{"estimators": [("lr", LinearRegression()), ("cor", NoWeightRegressor())]},
|
| 352 |
+
TypeError,
|
| 353 |
+
"does not support sample weight",
|
| 354 |
+
),
|
| 355 |
+
(
|
| 356 |
+
y_diabetes,
|
| 357 |
+
{
|
| 358 |
+
"estimators": [
|
| 359 |
+
("lr", LinearRegression()),
|
| 360 |
+
("cor", LinearSVR()),
|
| 361 |
+
],
|
| 362 |
+
"final_estimator": NoWeightRegressor(),
|
| 363 |
+
},
|
| 364 |
+
TypeError,
|
| 365 |
+
"does not support sample weight",
|
| 366 |
+
),
|
| 367 |
+
],
|
| 368 |
+
)
|
| 369 |
+
def test_stacking_regressor_error(y, params, type_err, msg_err):
|
| 370 |
+
with pytest.raises(type_err, match=msg_err):
|
| 371 |
+
reg = StackingRegressor(**params, cv=3)
|
| 372 |
+
reg.fit(scale(X_diabetes), y, sample_weight=np.ones(X_diabetes.shape[0]))
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
@pytest.mark.parametrize(
|
| 376 |
+
"estimator, X, y",
|
| 377 |
+
[
|
| 378 |
+
(
|
| 379 |
+
StackingClassifier(
|
| 380 |
+
estimators=[
|
| 381 |
+
("lr", LogisticRegression(random_state=0)),
|
| 382 |
+
("svm", LinearSVC(random_state=0)),
|
| 383 |
+
]
|
| 384 |
+
),
|
| 385 |
+
X_iris[:100],
|
| 386 |
+
y_iris[:100],
|
| 387 |
+
), # keep only classes 0 and 1
|
| 388 |
+
(
|
| 389 |
+
StackingRegressor(
|
| 390 |
+
estimators=[
|
| 391 |
+
("lr", LinearRegression()),
|
| 392 |
+
("svm", LinearSVR(random_state=0)),
|
| 393 |
+
]
|
| 394 |
+
),
|
| 395 |
+
X_diabetes,
|
| 396 |
+
y_diabetes,
|
| 397 |
+
),
|
| 398 |
+
],
|
| 399 |
+
ids=["StackingClassifier", "StackingRegressor"],
|
| 400 |
+
)
|
| 401 |
+
def test_stacking_randomness(estimator, X, y):
|
| 402 |
+
# checking that fixing the random state of the CV will lead to the same
|
| 403 |
+
# results
|
| 404 |
+
estimator_full = clone(estimator)
|
| 405 |
+
estimator_full.set_params(
|
| 406 |
+
cv=KFold(shuffle=True, random_state=np.random.RandomState(0))
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
estimator_drop = clone(estimator)
|
| 410 |
+
estimator_drop.set_params(lr="drop")
|
| 411 |
+
estimator_drop.set_params(
|
| 412 |
+
cv=KFold(shuffle=True, random_state=np.random.RandomState(0))
|
| 413 |
+
)
|
| 414 |
+
|
| 415 |
+
assert_allclose(
|
| 416 |
+
estimator_full.fit(X, y).transform(X)[:, 1:],
|
| 417 |
+
estimator_drop.fit(X, y).transform(X),
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
def test_stacking_classifier_stratify_default():
|
| 422 |
+
# check that we stratify the classes for the default CV
|
| 423 |
+
clf = StackingClassifier(
|
| 424 |
+
estimators=[
|
| 425 |
+
("lr", LogisticRegression(max_iter=10_000)),
|
| 426 |
+
("svm", LinearSVC(max_iter=10_000)),
|
| 427 |
+
]
|
| 428 |
+
)
|
| 429 |
+
# since iris is not shuffled, a simple k-fold would not contain the
|
| 430 |
+
# 3 classes during training
|
| 431 |
+
clf.fit(X_iris, y_iris)
|
| 432 |
+
|
| 433 |
+
|
| 434 |
+
@pytest.mark.parametrize(
|
| 435 |
+
"stacker, X, y",
|
| 436 |
+
[
|
| 437 |
+
(
|
| 438 |
+
StackingClassifier(
|
| 439 |
+
estimators=[
|
| 440 |
+
("lr", LogisticRegression()),
|
| 441 |
+
("svm", LinearSVC(random_state=42)),
|
| 442 |
+
],
|
| 443 |
+
final_estimator=LogisticRegression(),
|
| 444 |
+
cv=KFold(shuffle=True, random_state=42),
|
| 445 |
+
),
|
| 446 |
+
*load_breast_cancer(return_X_y=True),
|
| 447 |
+
),
|
| 448 |
+
(
|
| 449 |
+
StackingRegressor(
|
| 450 |
+
estimators=[
|
| 451 |
+
("lr", LinearRegression()),
|
| 452 |
+
("svm", LinearSVR(random_state=42)),
|
| 453 |
+
],
|
| 454 |
+
final_estimator=LinearRegression(),
|
| 455 |
+
cv=KFold(shuffle=True, random_state=42),
|
| 456 |
+
),
|
| 457 |
+
X_diabetes,
|
| 458 |
+
y_diabetes,
|
| 459 |
+
),
|
| 460 |
+
],
|
| 461 |
+
ids=["StackingClassifier", "StackingRegressor"],
|
| 462 |
+
)
|
| 463 |
+
def test_stacking_with_sample_weight(stacker, X, y):
|
| 464 |
+
# check that sample weights has an influence on the fitting
|
| 465 |
+
# note: ConvergenceWarning are catch since we are not worrying about the
|
| 466 |
+
# convergence here
|
| 467 |
+
n_half_samples = len(y) // 2
|
| 468 |
+
total_sample_weight = np.array(
|
| 469 |
+
[0.1] * n_half_samples + [0.9] * (len(y) - n_half_samples)
|
| 470 |
+
)
|
| 471 |
+
X_train, X_test, y_train, _, sample_weight_train, _ = train_test_split(
|
| 472 |
+
X, y, total_sample_weight, random_state=42
|
| 473 |
+
)
|
| 474 |
+
|
| 475 |
+
with ignore_warnings(category=ConvergenceWarning):
|
| 476 |
+
stacker.fit(X_train, y_train)
|
| 477 |
+
y_pred_no_weight = stacker.predict(X_test)
|
| 478 |
+
|
| 479 |
+
with ignore_warnings(category=ConvergenceWarning):
|
| 480 |
+
stacker.fit(X_train, y_train, sample_weight=np.ones(y_train.shape))
|
| 481 |
+
y_pred_unit_weight = stacker.predict(X_test)
|
| 482 |
+
|
| 483 |
+
assert_allclose(y_pred_no_weight, y_pred_unit_weight)
|
| 484 |
+
|
| 485 |
+
with ignore_warnings(category=ConvergenceWarning):
|
| 486 |
+
stacker.fit(X_train, y_train, sample_weight=sample_weight_train)
|
| 487 |
+
y_pred_biased = stacker.predict(X_test)
|
| 488 |
+
|
| 489 |
+
assert np.abs(y_pred_no_weight - y_pred_biased).sum() > 0
|
| 490 |
+
|
| 491 |
+
|
| 492 |
+
def test_stacking_classifier_sample_weight_fit_param():
|
| 493 |
+
# check sample_weight is passed to all invocations of fit
|
| 494 |
+
stacker = StackingClassifier(
|
| 495 |
+
estimators=[("lr", CheckingClassifier(expected_sample_weight=True))],
|
| 496 |
+
final_estimator=CheckingClassifier(expected_sample_weight=True),
|
| 497 |
+
)
|
| 498 |
+
stacker.fit(X_iris, y_iris, sample_weight=np.ones(X_iris.shape[0]))
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
|
| 502 |
+
@pytest.mark.parametrize(
|
| 503 |
+
"stacker, X, y",
|
| 504 |
+
[
|
| 505 |
+
(
|
| 506 |
+
StackingClassifier(
|
| 507 |
+
estimators=[
|
| 508 |
+
("lr", LogisticRegression()),
|
| 509 |
+
("svm", LinearSVC(random_state=42)),
|
| 510 |
+
],
|
| 511 |
+
final_estimator=LogisticRegression(),
|
| 512 |
+
),
|
| 513 |
+
*load_breast_cancer(return_X_y=True),
|
| 514 |
+
),
|
| 515 |
+
(
|
| 516 |
+
StackingRegressor(
|
| 517 |
+
estimators=[
|
| 518 |
+
("lr", LinearRegression()),
|
| 519 |
+
("svm", LinearSVR(random_state=42)),
|
| 520 |
+
],
|
| 521 |
+
final_estimator=LinearRegression(),
|
| 522 |
+
),
|
| 523 |
+
X_diabetes,
|
| 524 |
+
y_diabetes,
|
| 525 |
+
),
|
| 526 |
+
],
|
| 527 |
+
ids=["StackingClassifier", "StackingRegressor"],
|
| 528 |
+
)
|
| 529 |
+
def test_stacking_cv_influence(stacker, X, y):
|
| 530 |
+
# check that the stacking affects the fit of the final estimator but not
|
| 531 |
+
# the fit of the base estimators
|
| 532 |
+
# note: ConvergenceWarning are catch since we are not worrying about the
|
| 533 |
+
# convergence here
|
| 534 |
+
stacker_cv_3 = clone(stacker)
|
| 535 |
+
stacker_cv_5 = clone(stacker)
|
| 536 |
+
|
| 537 |
+
stacker_cv_3.set_params(cv=3)
|
| 538 |
+
stacker_cv_5.set_params(cv=5)
|
| 539 |
+
|
| 540 |
+
stacker_cv_3.fit(X, y)
|
| 541 |
+
stacker_cv_5.fit(X, y)
|
| 542 |
+
|
| 543 |
+
# the base estimators should be identical
|
| 544 |
+
for est_cv_3, est_cv_5 in zip(stacker_cv_3.estimators_, stacker_cv_5.estimators_):
|
| 545 |
+
assert_allclose(est_cv_3.coef_, est_cv_5.coef_)
|
| 546 |
+
|
| 547 |
+
# the final estimator should be different
|
| 548 |
+
with pytest.raises(AssertionError, match="Not equal"):
|
| 549 |
+
assert_allclose(
|
| 550 |
+
stacker_cv_3.final_estimator_.coef_, stacker_cv_5.final_estimator_.coef_
|
| 551 |
+
)
|
| 552 |
+
|
| 553 |
+
|
| 554 |
+
@pytest.mark.parametrize(
|
| 555 |
+
"Stacker, Estimator, stack_method, final_estimator, X, y",
|
| 556 |
+
[
|
| 557 |
+
(
|
| 558 |
+
StackingClassifier,
|
| 559 |
+
DummyClassifier,
|
| 560 |
+
"predict_proba",
|
| 561 |
+
LogisticRegression(random_state=42),
|
| 562 |
+
X_iris,
|
| 563 |
+
y_iris,
|
| 564 |
+
),
|
| 565 |
+
(
|
| 566 |
+
StackingRegressor,
|
| 567 |
+
DummyRegressor,
|
| 568 |
+
"predict",
|
| 569 |
+
LinearRegression(),
|
| 570 |
+
X_diabetes,
|
| 571 |
+
y_diabetes,
|
| 572 |
+
),
|
| 573 |
+
],
|
| 574 |
+
)
|
| 575 |
+
def test_stacking_prefit(Stacker, Estimator, stack_method, final_estimator, X, y):
|
| 576 |
+
"""Check the behaviour of stacking when `cv='prefit'`"""
|
| 577 |
+
X_train1, X_train2, y_train1, y_train2 = train_test_split(
|
| 578 |
+
X, y, random_state=42, test_size=0.5
|
| 579 |
+
)
|
| 580 |
+
estimators = [
|
| 581 |
+
("d0", Estimator().fit(X_train1, y_train1)),
|
| 582 |
+
("d1", Estimator().fit(X_train1, y_train1)),
|
| 583 |
+
]
|
| 584 |
+
|
| 585 |
+
# mock out fit and stack_method to be asserted later
|
| 586 |
+
for _, estimator in estimators:
|
| 587 |
+
estimator.fit = Mock(name="fit")
|
| 588 |
+
stack_func = getattr(estimator, stack_method)
|
| 589 |
+
predict_method_mocked = Mock(side_effect=stack_func)
|
| 590 |
+
# Mocking a method will not provide a `__name__` while Python methods
|
| 591 |
+
# do and we are using it in `_get_response_method`.
|
| 592 |
+
predict_method_mocked.__name__ = stack_method
|
| 593 |
+
setattr(estimator, stack_method, predict_method_mocked)
|
| 594 |
+
|
| 595 |
+
stacker = Stacker(
|
| 596 |
+
estimators=estimators, cv="prefit", final_estimator=final_estimator
|
| 597 |
+
)
|
| 598 |
+
stacker.fit(X_train2, y_train2)
|
| 599 |
+
|
| 600 |
+
assert stacker.estimators_ == [estimator for _, estimator in estimators]
|
| 601 |
+
# fit was not called again
|
| 602 |
+
assert all(estimator.fit.call_count == 0 for estimator in stacker.estimators_)
|
| 603 |
+
|
| 604 |
+
# stack method is called with the proper inputs
|
| 605 |
+
for estimator in stacker.estimators_:
|
| 606 |
+
stack_func_mock = getattr(estimator, stack_method)
|
| 607 |
+
stack_func_mock.assert_called_with(X_train2)
|
| 608 |
+
|
| 609 |
+
|
| 610 |
+
@pytest.mark.parametrize(
|
| 611 |
+
"stacker, X, y",
|
| 612 |
+
[
|
| 613 |
+
(
|
| 614 |
+
StackingClassifier(
|
| 615 |
+
estimators=[("lr", LogisticRegression()), ("svm", SVC())],
|
| 616 |
+
cv="prefit",
|
| 617 |
+
),
|
| 618 |
+
X_iris,
|
| 619 |
+
y_iris,
|
| 620 |
+
),
|
| 621 |
+
(
|
| 622 |
+
StackingRegressor(
|
| 623 |
+
estimators=[
|
| 624 |
+
("lr", LinearRegression()),
|
| 625 |
+
("svm", LinearSVR()),
|
| 626 |
+
],
|
| 627 |
+
cv="prefit",
|
| 628 |
+
),
|
| 629 |
+
X_diabetes,
|
| 630 |
+
y_diabetes,
|
| 631 |
+
),
|
| 632 |
+
],
|
| 633 |
+
)
|
| 634 |
+
def test_stacking_prefit_error(stacker, X, y):
|
| 635 |
+
# check that NotFittedError is raised
|
| 636 |
+
# if base estimators are not fitted when cv="prefit"
|
| 637 |
+
with pytest.raises(NotFittedError):
|
| 638 |
+
stacker.fit(X, y)
|
| 639 |
+
|
| 640 |
+
|
| 641 |
+
@pytest.mark.parametrize(
|
| 642 |
+
"make_dataset, Stacking, Estimator",
|
| 643 |
+
[
|
| 644 |
+
(make_classification, StackingClassifier, LogisticRegression),
|
| 645 |
+
(make_regression, StackingRegressor, LinearRegression),
|
| 646 |
+
],
|
| 647 |
+
)
|
| 648 |
+
def test_stacking_without_n_features_in(make_dataset, Stacking, Estimator):
|
| 649 |
+
# Stacking supports estimators without `n_features_in_`. Regression test
|
| 650 |
+
# for #17353
|
| 651 |
+
|
| 652 |
+
class MyEstimator(Estimator):
|
| 653 |
+
"""Estimator without n_features_in_"""
|
| 654 |
+
|
| 655 |
+
def fit(self, X, y):
|
| 656 |
+
super().fit(X, y)
|
| 657 |
+
del self.n_features_in_
|
| 658 |
+
|
| 659 |
+
X, y = make_dataset(random_state=0, n_samples=100)
|
| 660 |
+
stacker = Stacking(estimators=[("lr", MyEstimator())])
|
| 661 |
+
|
| 662 |
+
msg = f"{Stacking.__name__} object has no attribute n_features_in_"
|
| 663 |
+
with pytest.raises(AttributeError, match=msg):
|
| 664 |
+
stacker.n_features_in_
|
| 665 |
+
|
| 666 |
+
# Does not raise
|
| 667 |
+
stacker.fit(X, y)
|
| 668 |
+
|
| 669 |
+
msg = "'MyEstimator' object has no attribute 'n_features_in_'"
|
| 670 |
+
with pytest.raises(AttributeError, match=msg):
|
| 671 |
+
stacker.n_features_in_
|
| 672 |
+
|
| 673 |
+
|
| 674 |
+
@pytest.mark.parametrize(
|
| 675 |
+
"estimator",
|
| 676 |
+
[
|
| 677 |
+
# output a 2D array of the probability of the positive class for each output
|
| 678 |
+
MLPClassifier(random_state=42),
|
| 679 |
+
# output a list of 2D array containing the probability of each class
|
| 680 |
+
# for each output
|
| 681 |
+
RandomForestClassifier(random_state=42),
|
| 682 |
+
],
|
| 683 |
+
ids=["MLPClassifier", "RandomForestClassifier"],
|
| 684 |
+
)
|
| 685 |
+
def test_stacking_classifier_multilabel_predict_proba(estimator):
|
| 686 |
+
"""Check the behaviour for the multilabel classification case and the
|
| 687 |
+
`predict_proba` stacking method.
|
| 688 |
+
|
| 689 |
+
Estimators are not consistent with the output arrays and we need to ensure that
|
| 690 |
+
we handle all cases.
|
| 691 |
+
"""
|
| 692 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 693 |
+
X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42
|
| 694 |
+
)
|
| 695 |
+
n_outputs = 3
|
| 696 |
+
|
| 697 |
+
estimators = [("est", estimator)]
|
| 698 |
+
stacker = StackingClassifier(
|
| 699 |
+
estimators=estimators,
|
| 700 |
+
final_estimator=KNeighborsClassifier(),
|
| 701 |
+
stack_method="predict_proba",
|
| 702 |
+
).fit(X_train, y_train)
|
| 703 |
+
|
| 704 |
+
X_trans = stacker.transform(X_test)
|
| 705 |
+
assert X_trans.shape == (X_test.shape[0], n_outputs)
|
| 706 |
+
# we should not have any collinear classes and thus nothing should sum to 1
|
| 707 |
+
assert not any(np.isclose(X_trans.sum(axis=1), 1.0))
|
| 708 |
+
|
| 709 |
+
y_pred = stacker.predict(X_test)
|
| 710 |
+
assert y_pred.shape == y_test.shape
|
| 711 |
+
|
| 712 |
+
|
| 713 |
+
def test_stacking_classifier_multilabel_decision_function():
|
| 714 |
+
"""Check the behaviour for the multilabel classification case and the
|
| 715 |
+
`decision_function` stacking method. Only `RidgeClassifier` supports this
|
| 716 |
+
case.
|
| 717 |
+
"""
|
| 718 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 719 |
+
X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42
|
| 720 |
+
)
|
| 721 |
+
n_outputs = 3
|
| 722 |
+
|
| 723 |
+
estimators = [("est", RidgeClassifier())]
|
| 724 |
+
stacker = StackingClassifier(
|
| 725 |
+
estimators=estimators,
|
| 726 |
+
final_estimator=KNeighborsClassifier(),
|
| 727 |
+
stack_method="decision_function",
|
| 728 |
+
).fit(X_train, y_train)
|
| 729 |
+
|
| 730 |
+
X_trans = stacker.transform(X_test)
|
| 731 |
+
assert X_trans.shape == (X_test.shape[0], n_outputs)
|
| 732 |
+
|
| 733 |
+
y_pred = stacker.predict(X_test)
|
| 734 |
+
assert y_pred.shape == y_test.shape
|
| 735 |
+
|
| 736 |
+
|
| 737 |
+
@pytest.mark.parametrize("stack_method", ["auto", "predict"])
|
| 738 |
+
@pytest.mark.parametrize("passthrough", [False, True])
|
| 739 |
+
def test_stacking_classifier_multilabel_auto_predict(stack_method, passthrough):
|
| 740 |
+
"""Check the behaviour for the multilabel classification case for stack methods
|
| 741 |
+
supported for all estimators or automatically picked up.
|
| 742 |
+
"""
|
| 743 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 744 |
+
X_multilabel, y_multilabel, stratify=y_multilabel, random_state=42
|
| 745 |
+
)
|
| 746 |
+
y_train_before_fit = y_train.copy()
|
| 747 |
+
n_outputs = 3
|
| 748 |
+
|
| 749 |
+
estimators = [
|
| 750 |
+
("mlp", MLPClassifier(random_state=42)),
|
| 751 |
+
("rf", RandomForestClassifier(random_state=42)),
|
| 752 |
+
("ridge", RidgeClassifier()),
|
| 753 |
+
]
|
| 754 |
+
final_estimator = KNeighborsClassifier()
|
| 755 |
+
|
| 756 |
+
clf = StackingClassifier(
|
| 757 |
+
estimators=estimators,
|
| 758 |
+
final_estimator=final_estimator,
|
| 759 |
+
passthrough=passthrough,
|
| 760 |
+
stack_method=stack_method,
|
| 761 |
+
).fit(X_train, y_train)
|
| 762 |
+
|
| 763 |
+
# make sure we don't change `y_train` inplace
|
| 764 |
+
assert_array_equal(y_train_before_fit, y_train)
|
| 765 |
+
|
| 766 |
+
y_pred = clf.predict(X_test)
|
| 767 |
+
assert y_pred.shape == y_test.shape
|
| 768 |
+
|
| 769 |
+
if stack_method == "auto":
|
| 770 |
+
expected_stack_methods = ["predict_proba", "predict_proba", "decision_function"]
|
| 771 |
+
else:
|
| 772 |
+
expected_stack_methods = ["predict"] * len(estimators)
|
| 773 |
+
assert clf.stack_method_ == expected_stack_methods
|
| 774 |
+
|
| 775 |
+
n_features_X_trans = n_outputs * len(estimators)
|
| 776 |
+
if passthrough:
|
| 777 |
+
n_features_X_trans += X_train.shape[1]
|
| 778 |
+
X_trans = clf.transform(X_test)
|
| 779 |
+
assert X_trans.shape == (X_test.shape[0], n_features_X_trans)
|
| 780 |
+
|
| 781 |
+
assert_array_equal(clf.classes_, [np.array([0, 1])] * n_outputs)
|
| 782 |
+
|
| 783 |
+
|
| 784 |
+
@pytest.mark.parametrize(
|
| 785 |
+
"stacker, feature_names, X, y, expected_names",
|
| 786 |
+
[
|
| 787 |
+
(
|
| 788 |
+
StackingClassifier(
|
| 789 |
+
estimators=[
|
| 790 |
+
("lr", LogisticRegression(random_state=0)),
|
| 791 |
+
("svm", LinearSVC(random_state=0)),
|
| 792 |
+
]
|
| 793 |
+
),
|
| 794 |
+
iris.feature_names,
|
| 795 |
+
X_iris,
|
| 796 |
+
y_iris,
|
| 797 |
+
[
|
| 798 |
+
"stackingclassifier_lr0",
|
| 799 |
+
"stackingclassifier_lr1",
|
| 800 |
+
"stackingclassifier_lr2",
|
| 801 |
+
"stackingclassifier_svm0",
|
| 802 |
+
"stackingclassifier_svm1",
|
| 803 |
+
"stackingclassifier_svm2",
|
| 804 |
+
],
|
| 805 |
+
),
|
| 806 |
+
(
|
| 807 |
+
StackingClassifier(
|
| 808 |
+
estimators=[
|
| 809 |
+
("lr", LogisticRegression(random_state=0)),
|
| 810 |
+
("other", "drop"),
|
| 811 |
+
("svm", LinearSVC(random_state=0)),
|
| 812 |
+
]
|
| 813 |
+
),
|
| 814 |
+
iris.feature_names,
|
| 815 |
+
X_iris[:100],
|
| 816 |
+
y_iris[:100], # keep only classes 0 and 1
|
| 817 |
+
[
|
| 818 |
+
"stackingclassifier_lr",
|
| 819 |
+
"stackingclassifier_svm",
|
| 820 |
+
],
|
| 821 |
+
),
|
| 822 |
+
(
|
| 823 |
+
StackingRegressor(
|
| 824 |
+
estimators=[
|
| 825 |
+
("lr", LinearRegression()),
|
| 826 |
+
("svm", LinearSVR(random_state=0)),
|
| 827 |
+
]
|
| 828 |
+
),
|
| 829 |
+
diabetes.feature_names,
|
| 830 |
+
X_diabetes,
|
| 831 |
+
y_diabetes,
|
| 832 |
+
[
|
| 833 |
+
"stackingregressor_lr",
|
| 834 |
+
"stackingregressor_svm",
|
| 835 |
+
],
|
| 836 |
+
),
|
| 837 |
+
],
|
| 838 |
+
ids=[
|
| 839 |
+
"StackingClassifier_multiclass",
|
| 840 |
+
"StackingClassifier_binary",
|
| 841 |
+
"StackingRegressor",
|
| 842 |
+
],
|
| 843 |
+
)
|
| 844 |
+
@pytest.mark.parametrize("passthrough", [True, False])
|
| 845 |
+
def test_get_feature_names_out(
|
| 846 |
+
stacker, feature_names, X, y, expected_names, passthrough
|
| 847 |
+
):
|
| 848 |
+
"""Check get_feature_names_out works for stacking."""
|
| 849 |
+
|
| 850 |
+
stacker.set_params(passthrough=passthrough)
|
| 851 |
+
stacker.fit(scale(X), y)
|
| 852 |
+
|
| 853 |
+
if passthrough:
|
| 854 |
+
expected_names = np.concatenate((expected_names, feature_names))
|
| 855 |
+
|
| 856 |
+
names_out = stacker.get_feature_names_out(feature_names)
|
| 857 |
+
assert_array_equal(names_out, expected_names)
|
| 858 |
+
|
| 859 |
+
|
| 860 |
+
def test_stacking_classifier_base_regressor():
|
| 861 |
+
"""Check that a regressor can be used as the first layer in `StackingClassifier`."""
|
| 862 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 863 |
+
scale(X_iris), y_iris, stratify=y_iris, random_state=42
|
| 864 |
+
)
|
| 865 |
+
clf = StackingClassifier(estimators=[("ridge", Ridge())])
|
| 866 |
+
clf.fit(X_train, y_train)
|
| 867 |
+
clf.predict(X_test)
|
| 868 |
+
clf.predict_proba(X_test)
|
| 869 |
+
assert clf.score(X_test, y_test) > 0.8
|
| 870 |
+
|
| 871 |
+
|
| 872 |
+
def test_stacking_final_estimator_attribute_error():
|
| 873 |
+
"""Check that we raise the proper AttributeError when the final estimator
|
| 874 |
+
does not implement the `decision_function` method, which is decorated with
|
| 875 |
+
`available_if`.
|
| 876 |
+
|
| 877 |
+
Non-regression test for:
|
| 878 |
+
https://github.com/scikit-learn/scikit-learn/issues/28108
|
| 879 |
+
"""
|
| 880 |
+
X, y = make_classification(random_state=42)
|
| 881 |
+
|
| 882 |
+
estimators = [
|
| 883 |
+
("lr", LogisticRegression()),
|
| 884 |
+
("rf", RandomForestClassifier(n_estimators=2, random_state=42)),
|
| 885 |
+
]
|
| 886 |
+
# RandomForestClassifier does not implement 'decision_function' and should raise
|
| 887 |
+
# an AttributeError
|
| 888 |
+
final_estimator = RandomForestClassifier(n_estimators=2, random_state=42)
|
| 889 |
+
clf = StackingClassifier(
|
| 890 |
+
estimators=estimators, final_estimator=final_estimator, cv=3
|
| 891 |
+
)
|
| 892 |
+
|
| 893 |
+
outer_msg = "This 'StackingClassifier' has no attribute 'decision_function'"
|
| 894 |
+
inner_msg = "'RandomForestClassifier' object has no attribute 'decision_function'"
|
| 895 |
+
with pytest.raises(AttributeError, match=outer_msg) as exec_info:
|
| 896 |
+
clf.fit(X, y).decision_function(X)
|
| 897 |
+
assert isinstance(exec_info.value.__cause__, AttributeError)
|
| 898 |
+
assert inner_msg in str(exec_info.value.__cause__)
|
| 899 |
+
|
| 900 |
+
|
| 901 |
+
# Metadata Routing Tests
|
| 902 |
+
# ======================
|
| 903 |
+
|
| 904 |
+
|
| 905 |
+
@pytest.mark.parametrize(
|
| 906 |
+
"Estimator, Child",
|
| 907 |
+
[
|
| 908 |
+
(StackingClassifier, ConsumingClassifier),
|
| 909 |
+
(StackingRegressor, ConsumingRegressor),
|
| 910 |
+
],
|
| 911 |
+
)
|
| 912 |
+
def test_routing_passed_metadata_not_supported(Estimator, Child):
|
| 913 |
+
"""Test that the right error message is raised when metadata is passed while
|
| 914 |
+
not supported when `enable_metadata_routing=False`."""
|
| 915 |
+
|
| 916 |
+
with pytest.raises(
|
| 917 |
+
ValueError, match="is only supported if enable_metadata_routing=True"
|
| 918 |
+
):
|
| 919 |
+
Estimator(["clf", Child()]).fit(
|
| 920 |
+
X_iris, y_iris, sample_weight=[1, 1, 1, 1, 1], metadata="a"
|
| 921 |
+
)
|
| 922 |
+
|
| 923 |
+
|
| 924 |
+
@pytest.mark.parametrize(
|
| 925 |
+
"Estimator, Child",
|
| 926 |
+
[
|
| 927 |
+
(StackingClassifier, ConsumingClassifier),
|
| 928 |
+
(StackingRegressor, ConsumingRegressor),
|
| 929 |
+
],
|
| 930 |
+
)
|
| 931 |
+
@config_context(enable_metadata_routing=True)
|
| 932 |
+
def test_get_metadata_routing_without_fit(Estimator, Child):
|
| 933 |
+
# Test that metadata_routing() doesn't raise when called before fit.
|
| 934 |
+
est = Estimator([("sub_est", Child())])
|
| 935 |
+
est.get_metadata_routing()
|
| 936 |
+
|
| 937 |
+
|
| 938 |
+
@pytest.mark.parametrize(
|
| 939 |
+
"Estimator, Child",
|
| 940 |
+
[
|
| 941 |
+
(StackingClassifier, ConsumingClassifier),
|
| 942 |
+
(StackingRegressor, ConsumingRegressor),
|
| 943 |
+
],
|
| 944 |
+
)
|
| 945 |
+
@pytest.mark.parametrize(
|
| 946 |
+
"prop, prop_value", [("sample_weight", np.ones(X_iris.shape[0])), ("metadata", "a")]
|
| 947 |
+
)
|
| 948 |
+
@config_context(enable_metadata_routing=True)
|
| 949 |
+
def test_metadata_routing_for_stacking_estimators(Estimator, Child, prop, prop_value):
|
| 950 |
+
"""Test that metadata is routed correctly for Stacking*."""
|
| 951 |
+
|
| 952 |
+
est = Estimator(
|
| 953 |
+
[
|
| 954 |
+
(
|
| 955 |
+
"sub_est1",
|
| 956 |
+
Child(registry=_Registry()).set_fit_request(**{prop: True}),
|
| 957 |
+
),
|
| 958 |
+
(
|
| 959 |
+
"sub_est2",
|
| 960 |
+
Child(registry=_Registry()).set_fit_request(**{prop: True}),
|
| 961 |
+
),
|
| 962 |
+
],
|
| 963 |
+
final_estimator=Child(registry=_Registry()).set_predict_request(**{prop: True}),
|
| 964 |
+
)
|
| 965 |
+
|
| 966 |
+
est.fit(X_iris, y_iris, **{prop: prop_value})
|
| 967 |
+
est.fit_transform(X_iris, y_iris, **{prop: prop_value})
|
| 968 |
+
|
| 969 |
+
est.predict(X_iris, **{prop: prop_value})
|
| 970 |
+
|
| 971 |
+
for estimator in est.estimators:
|
| 972 |
+
# access sub-estimator in (name, est) with estimator[1]:
|
| 973 |
+
registry = estimator[1].registry
|
| 974 |
+
assert len(registry)
|
| 975 |
+
for sub_est in registry:
|
| 976 |
+
check_recorded_metadata(
|
| 977 |
+
obj=sub_est,
|
| 978 |
+
method="fit",
|
| 979 |
+
parent="fit",
|
| 980 |
+
split_params=(prop),
|
| 981 |
+
**{prop: prop_value},
|
| 982 |
+
)
|
| 983 |
+
# access final_estimator:
|
| 984 |
+
registry = est.final_estimator_.registry
|
| 985 |
+
assert len(registry)
|
| 986 |
+
check_recorded_metadata(
|
| 987 |
+
obj=registry[-1],
|
| 988 |
+
method="predict",
|
| 989 |
+
parent="predict",
|
| 990 |
+
split_params=(prop),
|
| 991 |
+
**{prop: prop_value},
|
| 992 |
+
)
|
| 993 |
+
|
| 994 |
+
|
| 995 |
+
@pytest.mark.parametrize(
|
| 996 |
+
"Estimator, Child",
|
| 997 |
+
[
|
| 998 |
+
(StackingClassifier, ConsumingClassifier),
|
| 999 |
+
(StackingRegressor, ConsumingRegressor),
|
| 1000 |
+
],
|
| 1001 |
+
)
|
| 1002 |
+
@config_context(enable_metadata_routing=True)
|
| 1003 |
+
def test_metadata_routing_error_for_stacking_estimators(Estimator, Child):
|
| 1004 |
+
"""Test that the right error is raised when metadata is not requested."""
|
| 1005 |
+
sample_weight, metadata = np.ones(X_iris.shape[0]), "a"
|
| 1006 |
+
|
| 1007 |
+
est = Estimator([("sub_est", Child())])
|
| 1008 |
+
|
| 1009 |
+
error_message = (
|
| 1010 |
+
"[sample_weight, metadata] are passed but are not explicitly set as requested"
|
| 1011 |
+
f" or not requested for {Child.__name__}.fit"
|
| 1012 |
+
)
|
| 1013 |
+
|
| 1014 |
+
with pytest.raises(ValueError, match=re.escape(error_message)):
|
| 1015 |
+
est.fit(X_iris, y_iris, sample_weight=sample_weight, metadata=metadata)
|
| 1016 |
+
|
| 1017 |
+
|
| 1018 |
+
# End of Metadata Routing Tests
|
| 1019 |
+
# =============================
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/ensemble/tests/test_weight_boosting.py
ADDED
|
@@ -0,0 +1,639 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Testing for the boost module (sklearn.ensemble.boost)."""
|
| 2 |
+
|
| 3 |
+
import re
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
import pytest
|
| 7 |
+
|
| 8 |
+
from sklearn import datasets
|
| 9 |
+
from sklearn.base import BaseEstimator, clone
|
| 10 |
+
from sklearn.dummy import DummyClassifier, DummyRegressor
|
| 11 |
+
from sklearn.ensemble import AdaBoostClassifier, AdaBoostRegressor
|
| 12 |
+
from sklearn.ensemble._weight_boosting import _samme_proba
|
| 13 |
+
from sklearn.linear_model import LinearRegression
|
| 14 |
+
from sklearn.model_selection import GridSearchCV, train_test_split
|
| 15 |
+
from sklearn.svm import SVC, SVR
|
| 16 |
+
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
|
| 17 |
+
from sklearn.utils import shuffle
|
| 18 |
+
from sklearn.utils._mocking import NoSampleWeightWrapper
|
| 19 |
+
from sklearn.utils._testing import (
|
| 20 |
+
assert_allclose,
|
| 21 |
+
assert_array_almost_equal,
|
| 22 |
+
assert_array_equal,
|
| 23 |
+
)
|
| 24 |
+
from sklearn.utils.fixes import (
|
| 25 |
+
COO_CONTAINERS,
|
| 26 |
+
CSC_CONTAINERS,
|
| 27 |
+
CSR_CONTAINERS,
|
| 28 |
+
DOK_CONTAINERS,
|
| 29 |
+
LIL_CONTAINERS,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
# Common random state
|
| 33 |
+
rng = np.random.RandomState(0)
|
| 34 |
+
|
| 35 |
+
# Toy sample
|
| 36 |
+
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
|
| 37 |
+
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
|
| 38 |
+
y_regr = [-1, -1, -1, 1, 1, 1]
|
| 39 |
+
T = [[-1, -1], [2, 2], [3, 2]]
|
| 40 |
+
y_t_class = ["foo", 1, 1]
|
| 41 |
+
y_t_regr = [-1, 1, 1]
|
| 42 |
+
|
| 43 |
+
# Load the iris dataset and randomly permute it
|
| 44 |
+
iris = datasets.load_iris()
|
| 45 |
+
perm = rng.permutation(iris.target.size)
|
| 46 |
+
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
|
| 47 |
+
|
| 48 |
+
# Load the diabetes dataset and randomly permute it
|
| 49 |
+
diabetes = datasets.load_diabetes()
|
| 50 |
+
diabetes.data, diabetes.target = shuffle(
|
| 51 |
+
diabetes.data, diabetes.target, random_state=rng
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def test_samme_proba():
|
| 56 |
+
# Test the `_samme_proba` helper function.
|
| 57 |
+
|
| 58 |
+
# Define some example (bad) `predict_proba` output.
|
| 59 |
+
probs = np.array(
|
| 60 |
+
[[1, 1e-6, 0], [0.19, 0.6, 0.2], [-999, 0.51, 0.5], [1e-6, 1, 1e-9]]
|
| 61 |
+
)
|
| 62 |
+
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
|
| 63 |
+
|
| 64 |
+
# _samme_proba calls estimator.predict_proba.
|
| 65 |
+
# Make a mock object so I can control what gets returned.
|
| 66 |
+
class MockEstimator:
|
| 67 |
+
def predict_proba(self, X):
|
| 68 |
+
assert_array_equal(X.shape, probs.shape)
|
| 69 |
+
return probs
|
| 70 |
+
|
| 71 |
+
mock = MockEstimator()
|
| 72 |
+
|
| 73 |
+
samme_proba = _samme_proba(mock, 3, np.ones_like(probs))
|
| 74 |
+
|
| 75 |
+
assert_array_equal(samme_proba.shape, probs.shape)
|
| 76 |
+
assert np.isfinite(samme_proba).all()
|
| 77 |
+
|
| 78 |
+
# Make sure that the correct elements come out as smallest --
|
| 79 |
+
# `_samme_proba` should preserve the ordering in each example.
|
| 80 |
+
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
|
| 81 |
+
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def test_oneclass_adaboost_proba():
|
| 85 |
+
# Test predict_proba robustness for one class label input.
|
| 86 |
+
# In response to issue #7501
|
| 87 |
+
# https://github.com/scikit-learn/scikit-learn/issues/7501
|
| 88 |
+
y_t = np.ones(len(X))
|
| 89 |
+
clf = AdaBoostClassifier().fit(X, y_t)
|
| 90 |
+
assert_array_almost_equal(clf.predict_proba(X), np.ones((len(X), 1)))
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def test_classification_toy():
|
| 94 |
+
# Check classification on a toy dataset.
|
| 95 |
+
clf = AdaBoostClassifier(random_state=0)
|
| 96 |
+
clf.fit(X, y_class)
|
| 97 |
+
assert_array_equal(clf.predict(T), y_t_class)
|
| 98 |
+
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
|
| 99 |
+
assert clf.predict_proba(T).shape == (len(T), 2)
|
| 100 |
+
assert clf.decision_function(T).shape == (len(T),)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def test_regression_toy():
|
| 104 |
+
# Check classification on a toy dataset.
|
| 105 |
+
clf = AdaBoostRegressor(random_state=0)
|
| 106 |
+
clf.fit(X, y_regr)
|
| 107 |
+
assert_array_equal(clf.predict(T), y_t_regr)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def test_iris():
|
| 111 |
+
# Check consistency on dataset iris.
|
| 112 |
+
classes = np.unique(iris.target)
|
| 113 |
+
|
| 114 |
+
clf = AdaBoostClassifier()
|
| 115 |
+
clf.fit(iris.data, iris.target)
|
| 116 |
+
|
| 117 |
+
assert_array_equal(classes, clf.classes_)
|
| 118 |
+
proba = clf.predict_proba(iris.data)
|
| 119 |
+
|
| 120 |
+
assert proba.shape[1] == len(classes)
|
| 121 |
+
assert clf.decision_function(iris.data).shape[1] == len(classes)
|
| 122 |
+
|
| 123 |
+
score = clf.score(iris.data, iris.target)
|
| 124 |
+
assert score > 0.9, f"Failed with {score = }"
|
| 125 |
+
|
| 126 |
+
# Check we used multiple estimators
|
| 127 |
+
assert len(clf.estimators_) > 1
|
| 128 |
+
# Check for distinct random states (see issue #7408)
|
| 129 |
+
assert len(set(est.random_state for est in clf.estimators_)) == len(clf.estimators_)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
@pytest.mark.parametrize("loss", ["linear", "square", "exponential"])
|
| 133 |
+
def test_diabetes(loss):
|
| 134 |
+
# Check consistency on dataset diabetes.
|
| 135 |
+
reg = AdaBoostRegressor(loss=loss, random_state=0)
|
| 136 |
+
reg.fit(diabetes.data, diabetes.target)
|
| 137 |
+
score = reg.score(diabetes.data, diabetes.target)
|
| 138 |
+
assert score > 0.55
|
| 139 |
+
|
| 140 |
+
# Check we used multiple estimators
|
| 141 |
+
assert len(reg.estimators_) > 1
|
| 142 |
+
# Check for distinct random states (see issue #7408)
|
| 143 |
+
assert len(set(est.random_state for est in reg.estimators_)) == len(reg.estimators_)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def test_staged_predict():
|
| 147 |
+
# Check staged predictions.
|
| 148 |
+
rng = np.random.RandomState(0)
|
| 149 |
+
iris_weights = rng.randint(10, size=iris.target.shape)
|
| 150 |
+
diabetes_weights = rng.randint(10, size=diabetes.target.shape)
|
| 151 |
+
|
| 152 |
+
clf = AdaBoostClassifier(n_estimators=10)
|
| 153 |
+
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
|
| 154 |
+
|
| 155 |
+
predictions = clf.predict(iris.data)
|
| 156 |
+
staged_predictions = [p for p in clf.staged_predict(iris.data)]
|
| 157 |
+
proba = clf.predict_proba(iris.data)
|
| 158 |
+
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
|
| 159 |
+
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
|
| 160 |
+
staged_scores = [
|
| 161 |
+
s for s in clf.staged_score(iris.data, iris.target, sample_weight=iris_weights)
|
| 162 |
+
]
|
| 163 |
+
|
| 164 |
+
assert len(staged_predictions) == 10
|
| 165 |
+
assert_array_almost_equal(predictions, staged_predictions[-1])
|
| 166 |
+
assert len(staged_probas) == 10
|
| 167 |
+
assert_array_almost_equal(proba, staged_probas[-1])
|
| 168 |
+
assert len(staged_scores) == 10
|
| 169 |
+
assert_array_almost_equal(score, staged_scores[-1])
|
| 170 |
+
|
| 171 |
+
# AdaBoost regression
|
| 172 |
+
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
|
| 173 |
+
clf.fit(diabetes.data, diabetes.target, sample_weight=diabetes_weights)
|
| 174 |
+
|
| 175 |
+
predictions = clf.predict(diabetes.data)
|
| 176 |
+
staged_predictions = [p for p in clf.staged_predict(diabetes.data)]
|
| 177 |
+
score = clf.score(diabetes.data, diabetes.target, sample_weight=diabetes_weights)
|
| 178 |
+
staged_scores = [
|
| 179 |
+
s
|
| 180 |
+
for s in clf.staged_score(
|
| 181 |
+
diabetes.data, diabetes.target, sample_weight=diabetes_weights
|
| 182 |
+
)
|
| 183 |
+
]
|
| 184 |
+
|
| 185 |
+
assert len(staged_predictions) == 10
|
| 186 |
+
assert_array_almost_equal(predictions, staged_predictions[-1])
|
| 187 |
+
assert len(staged_scores) == 10
|
| 188 |
+
assert_array_almost_equal(score, staged_scores[-1])
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def test_gridsearch():
|
| 192 |
+
# Check that base trees can be grid-searched.
|
| 193 |
+
# AdaBoost classification
|
| 194 |
+
boost = AdaBoostClassifier(estimator=DecisionTreeClassifier())
|
| 195 |
+
parameters = {
|
| 196 |
+
"n_estimators": (1, 2),
|
| 197 |
+
"estimator__max_depth": (1, 2),
|
| 198 |
+
}
|
| 199 |
+
clf = GridSearchCV(boost, parameters)
|
| 200 |
+
clf.fit(iris.data, iris.target)
|
| 201 |
+
|
| 202 |
+
# AdaBoost regression
|
| 203 |
+
boost = AdaBoostRegressor(estimator=DecisionTreeRegressor(), random_state=0)
|
| 204 |
+
parameters = {"n_estimators": (1, 2), "estimator__max_depth": (1, 2)}
|
| 205 |
+
clf = GridSearchCV(boost, parameters)
|
| 206 |
+
clf.fit(diabetes.data, diabetes.target)
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def test_pickle():
|
| 210 |
+
# Check pickability.
|
| 211 |
+
import pickle
|
| 212 |
+
|
| 213 |
+
# Adaboost classifier
|
| 214 |
+
obj = AdaBoostClassifier()
|
| 215 |
+
obj.fit(iris.data, iris.target)
|
| 216 |
+
score = obj.score(iris.data, iris.target)
|
| 217 |
+
s = pickle.dumps(obj)
|
| 218 |
+
|
| 219 |
+
obj2 = pickle.loads(s)
|
| 220 |
+
assert type(obj2) == obj.__class__
|
| 221 |
+
score2 = obj2.score(iris.data, iris.target)
|
| 222 |
+
assert score == score2
|
| 223 |
+
|
| 224 |
+
# Adaboost regressor
|
| 225 |
+
obj = AdaBoostRegressor(random_state=0)
|
| 226 |
+
obj.fit(diabetes.data, diabetes.target)
|
| 227 |
+
score = obj.score(diabetes.data, diabetes.target)
|
| 228 |
+
s = pickle.dumps(obj)
|
| 229 |
+
|
| 230 |
+
obj2 = pickle.loads(s)
|
| 231 |
+
assert type(obj2) == obj.__class__
|
| 232 |
+
score2 = obj2.score(diabetes.data, diabetes.target)
|
| 233 |
+
assert score == score2
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
def test_importances():
|
| 237 |
+
# Check variable importances.
|
| 238 |
+
X, y = datasets.make_classification(
|
| 239 |
+
n_samples=2000,
|
| 240 |
+
n_features=10,
|
| 241 |
+
n_informative=3,
|
| 242 |
+
n_redundant=0,
|
| 243 |
+
n_repeated=0,
|
| 244 |
+
shuffle=False,
|
| 245 |
+
random_state=1,
|
| 246 |
+
)
|
| 247 |
+
|
| 248 |
+
clf = AdaBoostClassifier()
|
| 249 |
+
|
| 250 |
+
clf.fit(X, y)
|
| 251 |
+
importances = clf.feature_importances_
|
| 252 |
+
|
| 253 |
+
assert importances.shape[0] == 10
|
| 254 |
+
assert (importances[:3, np.newaxis] >= importances[3:]).all()
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def test_adaboost_classifier_sample_weight_error():
|
| 258 |
+
# Test that it gives proper exception on incorrect sample weight.
|
| 259 |
+
clf = AdaBoostClassifier()
|
| 260 |
+
msg = re.escape("sample_weight.shape == (1,), expected (6,)")
|
| 261 |
+
with pytest.raises(ValueError, match=msg):
|
| 262 |
+
clf.fit(X, y_class, sample_weight=np.asarray([-1]))
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def test_estimator():
|
| 266 |
+
# Test different estimators.
|
| 267 |
+
from sklearn.ensemble import RandomForestClassifier
|
| 268 |
+
|
| 269 |
+
# XXX doesn't work with y_class because RF doesn't support classes_
|
| 270 |
+
# Shouldn't AdaBoost run a LabelBinarizer?
|
| 271 |
+
clf = AdaBoostClassifier(RandomForestClassifier())
|
| 272 |
+
clf.fit(X, y_regr)
|
| 273 |
+
|
| 274 |
+
clf = AdaBoostClassifier(SVC())
|
| 275 |
+
clf.fit(X, y_class)
|
| 276 |
+
|
| 277 |
+
from sklearn.ensemble import RandomForestRegressor
|
| 278 |
+
|
| 279 |
+
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
|
| 280 |
+
clf.fit(X, y_regr)
|
| 281 |
+
|
| 282 |
+
clf = AdaBoostRegressor(SVR(), random_state=0)
|
| 283 |
+
clf.fit(X, y_regr)
|
| 284 |
+
|
| 285 |
+
# Check that an empty discrete ensemble fails in fit, not predict.
|
| 286 |
+
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
|
| 287 |
+
y_fail = ["foo", "bar", 1, 2]
|
| 288 |
+
clf = AdaBoostClassifier(SVC())
|
| 289 |
+
with pytest.raises(ValueError, match="worse than random"):
|
| 290 |
+
clf.fit(X_fail, y_fail)
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
def test_sample_weights_infinite():
|
| 294 |
+
msg = "Sample weights have reached infinite values"
|
| 295 |
+
clf = AdaBoostClassifier(n_estimators=30, learning_rate=23.0)
|
| 296 |
+
with pytest.warns(UserWarning, match=msg):
|
| 297 |
+
clf.fit(iris.data, iris.target)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
@pytest.mark.parametrize(
|
| 301 |
+
"sparse_container, expected_internal_type",
|
| 302 |
+
zip(
|
| 303 |
+
[
|
| 304 |
+
*CSC_CONTAINERS,
|
| 305 |
+
*CSR_CONTAINERS,
|
| 306 |
+
*LIL_CONTAINERS,
|
| 307 |
+
*COO_CONTAINERS,
|
| 308 |
+
*DOK_CONTAINERS,
|
| 309 |
+
],
|
| 310 |
+
CSC_CONTAINERS + 4 * CSR_CONTAINERS,
|
| 311 |
+
),
|
| 312 |
+
)
|
| 313 |
+
def test_sparse_classification(sparse_container, expected_internal_type):
|
| 314 |
+
# Check classification with sparse input.
|
| 315 |
+
|
| 316 |
+
class CustomSVC(SVC):
|
| 317 |
+
"""SVC variant that records the nature of the training set."""
|
| 318 |
+
|
| 319 |
+
def fit(self, X, y, sample_weight=None):
|
| 320 |
+
"""Modification on fit caries data type for later verification."""
|
| 321 |
+
super().fit(X, y, sample_weight=sample_weight)
|
| 322 |
+
self.data_type_ = type(X)
|
| 323 |
+
return self
|
| 324 |
+
|
| 325 |
+
X, y = datasets.make_multilabel_classification(
|
| 326 |
+
n_classes=1, n_samples=15, n_features=5, random_state=42
|
| 327 |
+
)
|
| 328 |
+
# Flatten y to a 1d array
|
| 329 |
+
y = np.ravel(y)
|
| 330 |
+
|
| 331 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
|
| 332 |
+
|
| 333 |
+
X_train_sparse = sparse_container(X_train)
|
| 334 |
+
X_test_sparse = sparse_container(X_test)
|
| 335 |
+
|
| 336 |
+
# Trained on sparse format
|
| 337 |
+
sparse_classifier = AdaBoostClassifier(
|
| 338 |
+
estimator=CustomSVC(probability=True),
|
| 339 |
+
random_state=1,
|
| 340 |
+
).fit(X_train_sparse, y_train)
|
| 341 |
+
|
| 342 |
+
# Trained on dense format
|
| 343 |
+
dense_classifier = AdaBoostClassifier(
|
| 344 |
+
estimator=CustomSVC(probability=True),
|
| 345 |
+
random_state=1,
|
| 346 |
+
).fit(X_train, y_train)
|
| 347 |
+
|
| 348 |
+
# predict
|
| 349 |
+
sparse_clf_results = sparse_classifier.predict(X_test_sparse)
|
| 350 |
+
dense_clf_results = dense_classifier.predict(X_test)
|
| 351 |
+
assert_array_equal(sparse_clf_results, dense_clf_results)
|
| 352 |
+
|
| 353 |
+
# decision_function
|
| 354 |
+
sparse_clf_results = sparse_classifier.decision_function(X_test_sparse)
|
| 355 |
+
dense_clf_results = dense_classifier.decision_function(X_test)
|
| 356 |
+
assert_array_almost_equal(sparse_clf_results, dense_clf_results)
|
| 357 |
+
|
| 358 |
+
# predict_log_proba
|
| 359 |
+
sparse_clf_results = sparse_classifier.predict_log_proba(X_test_sparse)
|
| 360 |
+
dense_clf_results = dense_classifier.predict_log_proba(X_test)
|
| 361 |
+
assert_array_almost_equal(sparse_clf_results, dense_clf_results)
|
| 362 |
+
|
| 363 |
+
# predict_proba
|
| 364 |
+
sparse_clf_results = sparse_classifier.predict_proba(X_test_sparse)
|
| 365 |
+
dense_clf_results = dense_classifier.predict_proba(X_test)
|
| 366 |
+
assert_array_almost_equal(sparse_clf_results, dense_clf_results)
|
| 367 |
+
|
| 368 |
+
# score
|
| 369 |
+
sparse_clf_results = sparse_classifier.score(X_test_sparse, y_test)
|
| 370 |
+
dense_clf_results = dense_classifier.score(X_test, y_test)
|
| 371 |
+
assert_array_almost_equal(sparse_clf_results, dense_clf_results)
|
| 372 |
+
|
| 373 |
+
# staged_decision_function
|
| 374 |
+
sparse_clf_results = sparse_classifier.staged_decision_function(X_test_sparse)
|
| 375 |
+
dense_clf_results = dense_classifier.staged_decision_function(X_test)
|
| 376 |
+
for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results):
|
| 377 |
+
assert_array_almost_equal(sparse_clf_res, dense_clf_res)
|
| 378 |
+
|
| 379 |
+
# staged_predict
|
| 380 |
+
sparse_clf_results = sparse_classifier.staged_predict(X_test_sparse)
|
| 381 |
+
dense_clf_results = dense_classifier.staged_predict(X_test)
|
| 382 |
+
for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results):
|
| 383 |
+
assert_array_equal(sparse_clf_res, dense_clf_res)
|
| 384 |
+
|
| 385 |
+
# staged_predict_proba
|
| 386 |
+
sparse_clf_results = sparse_classifier.staged_predict_proba(X_test_sparse)
|
| 387 |
+
dense_clf_results = dense_classifier.staged_predict_proba(X_test)
|
| 388 |
+
for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results):
|
| 389 |
+
assert_array_almost_equal(sparse_clf_res, dense_clf_res)
|
| 390 |
+
|
| 391 |
+
# staged_score
|
| 392 |
+
sparse_clf_results = sparse_classifier.staged_score(X_test_sparse, y_test)
|
| 393 |
+
dense_clf_results = dense_classifier.staged_score(X_test, y_test)
|
| 394 |
+
for sparse_clf_res, dense_clf_res in zip(sparse_clf_results, dense_clf_results):
|
| 395 |
+
assert_array_equal(sparse_clf_res, dense_clf_res)
|
| 396 |
+
|
| 397 |
+
# Verify sparsity of data is maintained during training
|
| 398 |
+
types = [i.data_type_ for i in sparse_classifier.estimators_]
|
| 399 |
+
|
| 400 |
+
assert all([t == expected_internal_type for t in types])
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
@pytest.mark.parametrize(
|
| 404 |
+
"sparse_container, expected_internal_type",
|
| 405 |
+
zip(
|
| 406 |
+
[
|
| 407 |
+
*CSC_CONTAINERS,
|
| 408 |
+
*CSR_CONTAINERS,
|
| 409 |
+
*LIL_CONTAINERS,
|
| 410 |
+
*COO_CONTAINERS,
|
| 411 |
+
*DOK_CONTAINERS,
|
| 412 |
+
],
|
| 413 |
+
CSC_CONTAINERS + 4 * CSR_CONTAINERS,
|
| 414 |
+
),
|
| 415 |
+
)
|
| 416 |
+
def test_sparse_regression(sparse_container, expected_internal_type):
|
| 417 |
+
# Check regression with sparse input.
|
| 418 |
+
|
| 419 |
+
class CustomSVR(SVR):
|
| 420 |
+
"""SVR variant that records the nature of the training set."""
|
| 421 |
+
|
| 422 |
+
def fit(self, X, y, sample_weight=None):
|
| 423 |
+
"""Modification on fit caries data type for later verification."""
|
| 424 |
+
super().fit(X, y, sample_weight=sample_weight)
|
| 425 |
+
self.data_type_ = type(X)
|
| 426 |
+
return self
|
| 427 |
+
|
| 428 |
+
X, y = datasets.make_regression(
|
| 429 |
+
n_samples=15, n_features=50, n_targets=1, random_state=42
|
| 430 |
+
)
|
| 431 |
+
|
| 432 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
|
| 433 |
+
|
| 434 |
+
X_train_sparse = sparse_container(X_train)
|
| 435 |
+
X_test_sparse = sparse_container(X_test)
|
| 436 |
+
|
| 437 |
+
# Trained on sparse format
|
| 438 |
+
sparse_regressor = AdaBoostRegressor(estimator=CustomSVR(), random_state=1).fit(
|
| 439 |
+
X_train_sparse, y_train
|
| 440 |
+
)
|
| 441 |
+
|
| 442 |
+
# Trained on dense format
|
| 443 |
+
dense_regressor = AdaBoostRegressor(estimator=CustomSVR(), random_state=1).fit(
|
| 444 |
+
X_train, y_train
|
| 445 |
+
)
|
| 446 |
+
|
| 447 |
+
# predict
|
| 448 |
+
sparse_regr_results = sparse_regressor.predict(X_test_sparse)
|
| 449 |
+
dense_regr_results = dense_regressor.predict(X_test)
|
| 450 |
+
assert_array_almost_equal(sparse_regr_results, dense_regr_results)
|
| 451 |
+
|
| 452 |
+
# staged_predict
|
| 453 |
+
sparse_regr_results = sparse_regressor.staged_predict(X_test_sparse)
|
| 454 |
+
dense_regr_results = dense_regressor.staged_predict(X_test)
|
| 455 |
+
for sparse_regr_res, dense_regr_res in zip(sparse_regr_results, dense_regr_results):
|
| 456 |
+
assert_array_almost_equal(sparse_regr_res, dense_regr_res)
|
| 457 |
+
|
| 458 |
+
types = [i.data_type_ for i in sparse_regressor.estimators_]
|
| 459 |
+
|
| 460 |
+
assert all([t == expected_internal_type for t in types])
|
| 461 |
+
|
| 462 |
+
|
| 463 |
+
def test_sample_weight_adaboost_regressor():
|
| 464 |
+
"""
|
| 465 |
+
AdaBoostRegressor should work without sample_weights in the base estimator
|
| 466 |
+
The random weighted sampling is done internally in the _boost method in
|
| 467 |
+
AdaBoostRegressor.
|
| 468 |
+
"""
|
| 469 |
+
|
| 470 |
+
class DummyEstimator(BaseEstimator):
|
| 471 |
+
def fit(self, X, y):
|
| 472 |
+
pass
|
| 473 |
+
|
| 474 |
+
def predict(self, X):
|
| 475 |
+
return np.zeros(X.shape[0])
|
| 476 |
+
|
| 477 |
+
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
|
| 478 |
+
boost.fit(X, y_regr)
|
| 479 |
+
assert len(boost.estimator_weights_) == len(boost.estimator_errors_)
|
| 480 |
+
|
| 481 |
+
|
| 482 |
+
def test_multidimensional_X():
|
| 483 |
+
"""
|
| 484 |
+
Check that the AdaBoost estimators can work with n-dimensional
|
| 485 |
+
data matrix
|
| 486 |
+
"""
|
| 487 |
+
rng = np.random.RandomState(0)
|
| 488 |
+
|
| 489 |
+
X = rng.randn(51, 3, 3)
|
| 490 |
+
yc = rng.choice([0, 1], 51)
|
| 491 |
+
yr = rng.randn(51)
|
| 492 |
+
|
| 493 |
+
boost = AdaBoostClassifier(DummyClassifier(strategy="most_frequent"))
|
| 494 |
+
boost.fit(X, yc)
|
| 495 |
+
boost.predict(X)
|
| 496 |
+
boost.predict_proba(X)
|
| 497 |
+
|
| 498 |
+
boost = AdaBoostRegressor(DummyRegressor())
|
| 499 |
+
boost.fit(X, yr)
|
| 500 |
+
boost.predict(X)
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
def test_adaboostclassifier_without_sample_weight():
|
| 504 |
+
X, y = iris.data, iris.target
|
| 505 |
+
estimator = NoSampleWeightWrapper(DummyClassifier())
|
| 506 |
+
clf = AdaBoostClassifier(estimator=estimator)
|
| 507 |
+
err_msg = "{} doesn't support sample_weight".format(estimator.__class__.__name__)
|
| 508 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 509 |
+
clf.fit(X, y)
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
def test_adaboostregressor_sample_weight():
|
| 513 |
+
# check that giving weight will have an influence on the error computed
|
| 514 |
+
# for a weak learner
|
| 515 |
+
rng = np.random.RandomState(42)
|
| 516 |
+
X = np.linspace(0, 100, num=1000)
|
| 517 |
+
y = (0.8 * X + 0.2) + (rng.rand(X.shape[0]) * 0.0001)
|
| 518 |
+
X = X.reshape(-1, 1)
|
| 519 |
+
|
| 520 |
+
# add an arbitrary outlier
|
| 521 |
+
X[-1] *= 10
|
| 522 |
+
y[-1] = 10000
|
| 523 |
+
|
| 524 |
+
# random_state=0 ensure that the underlying bootstrap will use the outlier
|
| 525 |
+
regr_no_outlier = AdaBoostRegressor(
|
| 526 |
+
estimator=LinearRegression(), n_estimators=1, random_state=0
|
| 527 |
+
)
|
| 528 |
+
regr_with_weight = clone(regr_no_outlier)
|
| 529 |
+
regr_with_outlier = clone(regr_no_outlier)
|
| 530 |
+
|
| 531 |
+
# fit 3 models:
|
| 532 |
+
# - a model containing the outlier
|
| 533 |
+
# - a model without the outlier
|
| 534 |
+
# - a model containing the outlier but with a null sample-weight
|
| 535 |
+
regr_with_outlier.fit(X, y)
|
| 536 |
+
regr_no_outlier.fit(X[:-1], y[:-1])
|
| 537 |
+
sample_weight = np.ones_like(y)
|
| 538 |
+
sample_weight[-1] = 0
|
| 539 |
+
regr_with_weight.fit(X, y, sample_weight=sample_weight)
|
| 540 |
+
|
| 541 |
+
score_with_outlier = regr_with_outlier.score(X[:-1], y[:-1])
|
| 542 |
+
score_no_outlier = regr_no_outlier.score(X[:-1], y[:-1])
|
| 543 |
+
score_with_weight = regr_with_weight.score(X[:-1], y[:-1])
|
| 544 |
+
|
| 545 |
+
assert score_with_outlier < score_no_outlier
|
| 546 |
+
assert score_with_outlier < score_with_weight
|
| 547 |
+
assert score_no_outlier == pytest.approx(score_with_weight)
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
def test_adaboost_consistent_predict():
|
| 551 |
+
# check that predict_proba and predict give consistent results
|
| 552 |
+
# regression test for:
|
| 553 |
+
# https://github.com/scikit-learn/scikit-learn/issues/14084
|
| 554 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 555 |
+
*datasets.load_digits(return_X_y=True), random_state=42
|
| 556 |
+
)
|
| 557 |
+
model = AdaBoostClassifier(random_state=42)
|
| 558 |
+
model.fit(X_train, y_train)
|
| 559 |
+
|
| 560 |
+
assert_array_equal(
|
| 561 |
+
np.argmax(model.predict_proba(X_test), axis=1), model.predict(X_test)
|
| 562 |
+
)
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
@pytest.mark.parametrize(
|
| 566 |
+
"model, X, y",
|
| 567 |
+
[
|
| 568 |
+
(AdaBoostClassifier(), iris.data, iris.target),
|
| 569 |
+
(AdaBoostRegressor(), diabetes.data, diabetes.target),
|
| 570 |
+
],
|
| 571 |
+
)
|
| 572 |
+
def test_adaboost_negative_weight_error(model, X, y):
|
| 573 |
+
sample_weight = np.ones_like(y)
|
| 574 |
+
sample_weight[-1] = -10
|
| 575 |
+
|
| 576 |
+
err_msg = "Negative values in data passed to `sample_weight`"
|
| 577 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 578 |
+
model.fit(X, y, sample_weight=sample_weight)
|
| 579 |
+
|
| 580 |
+
|
| 581 |
+
def test_adaboost_numerically_stable_feature_importance_with_small_weights():
|
| 582 |
+
"""Check that we don't create NaN feature importance with numerically
|
| 583 |
+
instable inputs.
|
| 584 |
+
|
| 585 |
+
Non-regression test for:
|
| 586 |
+
https://github.com/scikit-learn/scikit-learn/issues/20320
|
| 587 |
+
"""
|
| 588 |
+
rng = np.random.RandomState(42)
|
| 589 |
+
X = rng.normal(size=(1000, 10))
|
| 590 |
+
y = rng.choice([0, 1], size=1000)
|
| 591 |
+
sample_weight = np.ones_like(y) * 1e-263
|
| 592 |
+
tree = DecisionTreeClassifier(max_depth=10, random_state=12)
|
| 593 |
+
ada_model = AdaBoostClassifier(estimator=tree, n_estimators=20, random_state=12)
|
| 594 |
+
ada_model.fit(X, y, sample_weight=sample_weight)
|
| 595 |
+
assert np.isnan(ada_model.feature_importances_).sum() == 0
|
| 596 |
+
|
| 597 |
+
|
| 598 |
+
def test_adaboost_decision_function(global_random_seed):
|
| 599 |
+
"""Check that the decision function respects the symmetric constraint for weak
|
| 600 |
+
learners.
|
| 601 |
+
|
| 602 |
+
Non-regression test for:
|
| 603 |
+
https://github.com/scikit-learn/scikit-learn/issues/26520
|
| 604 |
+
"""
|
| 605 |
+
n_classes = 3
|
| 606 |
+
X, y = datasets.make_classification(
|
| 607 |
+
n_classes=n_classes, n_clusters_per_class=1, random_state=global_random_seed
|
| 608 |
+
)
|
| 609 |
+
clf = AdaBoostClassifier(n_estimators=1, random_state=global_random_seed).fit(X, y)
|
| 610 |
+
|
| 611 |
+
y_score = clf.decision_function(X)
|
| 612 |
+
assert_allclose(y_score.sum(axis=1), 0, atol=1e-8)
|
| 613 |
+
|
| 614 |
+
# With a single learner, we expect to have a decision function in
|
| 615 |
+
# {1, - 1 / (n_classes - 1)}.
|
| 616 |
+
assert set(np.unique(y_score)) == {1, -1 / (n_classes - 1)}
|
| 617 |
+
|
| 618 |
+
# We can assert the same for staged_decision_function since we have a single learner
|
| 619 |
+
for y_score in clf.staged_decision_function(X):
|
| 620 |
+
assert_allclose(y_score.sum(axis=1), 0, atol=1e-8)
|
| 621 |
+
|
| 622 |
+
# With a single learner, we expect to have a decision function in
|
| 623 |
+
# {1, - 1 / (n_classes - 1)}.
|
| 624 |
+
assert set(np.unique(y_score)) == {1, -1 / (n_classes - 1)}
|
| 625 |
+
|
| 626 |
+
clf.set_params(n_estimators=5).fit(X, y)
|
| 627 |
+
|
| 628 |
+
y_score = clf.decision_function(X)
|
| 629 |
+
assert_allclose(y_score.sum(axis=1), 0, atol=1e-8)
|
| 630 |
+
|
| 631 |
+
for y_score in clf.staged_decision_function(X):
|
| 632 |
+
assert_allclose(y_score.sum(axis=1), 0, atol=1e-8)
|
| 633 |
+
|
| 634 |
+
|
| 635 |
+
# TODO(1.8): remove
|
| 636 |
+
def test_deprecated_algorithm():
|
| 637 |
+
adaboost_clf = AdaBoostClassifier(n_estimators=1, algorithm="SAMME")
|
| 638 |
+
with pytest.warns(FutureWarning, match="The parameter 'algorithm' is deprecated"):
|
| 639 |
+
adaboost_clf.fit(X, y_class)
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tools for model inspection."""
|
| 2 |
+
|
| 3 |
+
# Authors: The scikit-learn developers
|
| 4 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 5 |
+
|
| 6 |
+
from ._partial_dependence import partial_dependence
|
| 7 |
+
from ._permutation_importance import permutation_importance
|
| 8 |
+
from ._plot.decision_boundary import DecisionBoundaryDisplay
|
| 9 |
+
from ._plot.partial_dependence import PartialDependenceDisplay
|
| 10 |
+
|
| 11 |
+
__all__ = [
|
| 12 |
+
"partial_dependence",
|
| 13 |
+
"permutation_importance",
|
| 14 |
+
"PartialDependenceDisplay",
|
| 15 |
+
"DecisionBoundaryDisplay",
|
| 16 |
+
]
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (538 Bytes). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_partial_dependence.cpython-310.pyc
ADDED
|
Binary file (23.9 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_pd_utils.cpython-310.pyc
ADDED
|
Binary file (2.02 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/__pycache__/_permutation_importance.cpython-310.pyc
ADDED
|
Binary file (9.61 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_partial_dependence.py
ADDED
|
@@ -0,0 +1,695 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Partial dependence plots for regression and classification models."""
|
| 2 |
+
|
| 3 |
+
# Authors: The scikit-learn developers
|
| 4 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 5 |
+
|
| 6 |
+
from collections.abc import Iterable
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
from scipy import sparse
|
| 10 |
+
from scipy.stats.mstats import mquantiles
|
| 11 |
+
|
| 12 |
+
from ..base import is_classifier, is_regressor
|
| 13 |
+
from ..ensemble import RandomForestRegressor
|
| 14 |
+
from ..ensemble._gb import BaseGradientBoosting
|
| 15 |
+
from ..ensemble._hist_gradient_boosting.gradient_boosting import (
|
| 16 |
+
BaseHistGradientBoosting,
|
| 17 |
+
)
|
| 18 |
+
from ..tree import DecisionTreeRegressor
|
| 19 |
+
from ..utils import Bunch, _safe_indexing, check_array
|
| 20 |
+
from ..utils._indexing import _determine_key_type, _get_column_indices, _safe_assign
|
| 21 |
+
from ..utils._optional_dependencies import check_matplotlib_support # noqa
|
| 22 |
+
from ..utils._param_validation import (
|
| 23 |
+
HasMethods,
|
| 24 |
+
Integral,
|
| 25 |
+
Interval,
|
| 26 |
+
StrOptions,
|
| 27 |
+
validate_params,
|
| 28 |
+
)
|
| 29 |
+
from ..utils._response import _get_response_values
|
| 30 |
+
from ..utils.extmath import cartesian
|
| 31 |
+
from ..utils.validation import _check_sample_weight, check_is_fitted
|
| 32 |
+
from ._pd_utils import _check_feature_names, _get_feature_index
|
| 33 |
+
|
| 34 |
+
__all__ = [
|
| 35 |
+
"partial_dependence",
|
| 36 |
+
]
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def _grid_from_X(X, percentiles, is_categorical, grid_resolution):
|
| 40 |
+
"""Generate a grid of points based on the percentiles of X.
|
| 41 |
+
|
| 42 |
+
The grid is a cartesian product between the columns of ``values``. The
|
| 43 |
+
ith column of ``values`` consists in ``grid_resolution`` equally-spaced
|
| 44 |
+
points between the percentiles of the jth column of X.
|
| 45 |
+
|
| 46 |
+
If ``grid_resolution`` is bigger than the number of unique values in the
|
| 47 |
+
j-th column of X or if the feature is a categorical feature (by inspecting
|
| 48 |
+
`is_categorical`) , then those unique values will be used instead.
|
| 49 |
+
|
| 50 |
+
Parameters
|
| 51 |
+
----------
|
| 52 |
+
X : array-like of shape (n_samples, n_target_features)
|
| 53 |
+
The data.
|
| 54 |
+
|
| 55 |
+
percentiles : tuple of float
|
| 56 |
+
The percentiles which are used to construct the extreme values of
|
| 57 |
+
the grid. Must be in [0, 1].
|
| 58 |
+
|
| 59 |
+
is_categorical : list of bool
|
| 60 |
+
For each feature, tells whether it is categorical or not. If a feature
|
| 61 |
+
is categorical, then the values used will be the unique ones
|
| 62 |
+
(i.e. categories) instead of the percentiles.
|
| 63 |
+
|
| 64 |
+
grid_resolution : int
|
| 65 |
+
The number of equally spaced points to be placed on the grid for each
|
| 66 |
+
feature.
|
| 67 |
+
|
| 68 |
+
Returns
|
| 69 |
+
-------
|
| 70 |
+
grid : ndarray of shape (n_points, n_target_features)
|
| 71 |
+
A value for each feature at each point in the grid. ``n_points`` is
|
| 72 |
+
always ``<= grid_resolution ** X.shape[1]``.
|
| 73 |
+
|
| 74 |
+
values : list of 1d ndarrays
|
| 75 |
+
The values with which the grid has been created. The size of each
|
| 76 |
+
array ``values[j]`` is either ``grid_resolution``, or the number of
|
| 77 |
+
unique values in ``X[:, j]``, whichever is smaller.
|
| 78 |
+
"""
|
| 79 |
+
if not isinstance(percentiles, Iterable) or len(percentiles) != 2:
|
| 80 |
+
raise ValueError("'percentiles' must be a sequence of 2 elements.")
|
| 81 |
+
if not all(0 <= x <= 1 for x in percentiles):
|
| 82 |
+
raise ValueError("'percentiles' values must be in [0, 1].")
|
| 83 |
+
if percentiles[0] >= percentiles[1]:
|
| 84 |
+
raise ValueError("percentiles[0] must be strictly less than percentiles[1].")
|
| 85 |
+
|
| 86 |
+
if grid_resolution <= 1:
|
| 87 |
+
raise ValueError("'grid_resolution' must be strictly greater than 1.")
|
| 88 |
+
|
| 89 |
+
values = []
|
| 90 |
+
# TODO: we should handle missing values (i.e. `np.nan`) specifically and store them
|
| 91 |
+
# in a different Bunch attribute.
|
| 92 |
+
for feature, is_cat in enumerate(is_categorical):
|
| 93 |
+
try:
|
| 94 |
+
uniques = np.unique(_safe_indexing(X, feature, axis=1))
|
| 95 |
+
except TypeError as exc:
|
| 96 |
+
# `np.unique` will fail in the presence of `np.nan` and `str` categories
|
| 97 |
+
# due to sorting. Temporary, we reraise an error explaining the problem.
|
| 98 |
+
raise ValueError(
|
| 99 |
+
f"The column #{feature} contains mixed data types. Finding unique "
|
| 100 |
+
"categories fail due to sorting. It usually means that the column "
|
| 101 |
+
"contains `np.nan` values together with `str` categories. Such use "
|
| 102 |
+
"case is not yet supported in scikit-learn."
|
| 103 |
+
) from exc
|
| 104 |
+
if is_cat or uniques.shape[0] < grid_resolution:
|
| 105 |
+
# Use the unique values either because:
|
| 106 |
+
# - feature has low resolution use unique values
|
| 107 |
+
# - feature is categorical
|
| 108 |
+
axis = uniques
|
| 109 |
+
else:
|
| 110 |
+
# create axis based on percentiles and grid resolution
|
| 111 |
+
emp_percentiles = mquantiles(
|
| 112 |
+
_safe_indexing(X, feature, axis=1), prob=percentiles, axis=0
|
| 113 |
+
)
|
| 114 |
+
if np.allclose(emp_percentiles[0], emp_percentiles[1]):
|
| 115 |
+
raise ValueError(
|
| 116 |
+
"percentiles are too close to each other, "
|
| 117 |
+
"unable to build the grid. Please choose percentiles "
|
| 118 |
+
"that are further apart."
|
| 119 |
+
)
|
| 120 |
+
axis = np.linspace(
|
| 121 |
+
emp_percentiles[0],
|
| 122 |
+
emp_percentiles[1],
|
| 123 |
+
num=grid_resolution,
|
| 124 |
+
endpoint=True,
|
| 125 |
+
)
|
| 126 |
+
values.append(axis)
|
| 127 |
+
|
| 128 |
+
return cartesian(values), values
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def _partial_dependence_recursion(est, grid, features):
|
| 132 |
+
"""Calculate partial dependence via the recursion method.
|
| 133 |
+
|
| 134 |
+
The recursion method is in particular enabled for tree-based estimators.
|
| 135 |
+
|
| 136 |
+
For each `grid` value, a weighted tree traversal is performed: if a split node
|
| 137 |
+
involves an input feature of interest, the corresponding left or right branch
|
| 138 |
+
is followed; otherwise both branches are followed, each branch being weighted
|
| 139 |
+
by the fraction of training samples that entered that branch. Finally, the
|
| 140 |
+
partial dependence is given by a weighted average of all the visited leaves
|
| 141 |
+
values.
|
| 142 |
+
|
| 143 |
+
This method is more efficient in terms of speed than the `'brute'` method
|
| 144 |
+
(:func:`~sklearn.inspection._partial_dependence._partial_dependence_brute`).
|
| 145 |
+
However, here, the partial dependence computation is done explicitly with the
|
| 146 |
+
`X` used during training of `est`.
|
| 147 |
+
|
| 148 |
+
Parameters
|
| 149 |
+
----------
|
| 150 |
+
est : BaseEstimator
|
| 151 |
+
A fitted estimator object implementing :term:`predict` or
|
| 152 |
+
:term:`decision_function`. Multioutput-multiclass classifiers are not
|
| 153 |
+
supported. Note that `'recursion'` is only supported for some tree-based
|
| 154 |
+
estimators (namely
|
| 155 |
+
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
|
| 156 |
+
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
|
| 157 |
+
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
|
| 158 |
+
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
|
| 159 |
+
:class:`~sklearn.tree.DecisionTreeRegressor`,
|
| 160 |
+
:class:`~sklearn.ensemble.RandomForestRegressor`,
|
| 161 |
+
).
|
| 162 |
+
|
| 163 |
+
grid : array-like of shape (n_points, n_target_features)
|
| 164 |
+
The grid of feature values for which the partial dependence is calculated.
|
| 165 |
+
Note that `n_points` is the number of points in the grid and `n_target_features`
|
| 166 |
+
is the number of features you are doing partial dependence at.
|
| 167 |
+
|
| 168 |
+
features : array-like of {int, str}
|
| 169 |
+
The feature (e.g. `[0]`) or pair of interacting features
|
| 170 |
+
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
|
| 171 |
+
|
| 172 |
+
Returns
|
| 173 |
+
-------
|
| 174 |
+
averaged_predictions : array-like of shape (n_targets, n_points)
|
| 175 |
+
The averaged predictions for the given `grid` of features values.
|
| 176 |
+
Note that `n_targets` is the number of targets (e.g. 1 for binary
|
| 177 |
+
classification, `n_tasks` for multi-output regression, and `n_classes` for
|
| 178 |
+
multiclass classification) and `n_points` is the number of points in the `grid`.
|
| 179 |
+
"""
|
| 180 |
+
averaged_predictions = est._compute_partial_dependence_recursion(grid, features)
|
| 181 |
+
if averaged_predictions.ndim == 1:
|
| 182 |
+
# reshape to (1, n_points) for consistency with
|
| 183 |
+
# _partial_dependence_brute
|
| 184 |
+
averaged_predictions = averaged_predictions.reshape(1, -1)
|
| 185 |
+
|
| 186 |
+
return averaged_predictions
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def _partial_dependence_brute(
|
| 190 |
+
est, grid, features, X, response_method, sample_weight=None
|
| 191 |
+
):
|
| 192 |
+
"""Calculate partial dependence via the brute force method.
|
| 193 |
+
|
| 194 |
+
The brute method explicitly averages the predictions of an estimator over a
|
| 195 |
+
grid of feature values.
|
| 196 |
+
|
| 197 |
+
For each `grid` value, all the samples from `X` have their variables of
|
| 198 |
+
interest replaced by that specific `grid` value. The predictions are then made
|
| 199 |
+
and averaged across the samples.
|
| 200 |
+
|
| 201 |
+
This method is slower than the `'recursion'`
|
| 202 |
+
(:func:`~sklearn.inspection._partial_dependence._partial_dependence_recursion`)
|
| 203 |
+
version for estimators with this second option. However, with the `'brute'`
|
| 204 |
+
force method, the average will be done with the given `X` and not the `X`
|
| 205 |
+
used during training, as it is done in the `'recursion'` version. Therefore
|
| 206 |
+
the average can always accept `sample_weight` (even when the estimator was
|
| 207 |
+
fitted without).
|
| 208 |
+
|
| 209 |
+
Parameters
|
| 210 |
+
----------
|
| 211 |
+
est : BaseEstimator
|
| 212 |
+
A fitted estimator object implementing :term:`predict`,
|
| 213 |
+
:term:`predict_proba`, or :term:`decision_function`.
|
| 214 |
+
Multioutput-multiclass classifiers are not supported.
|
| 215 |
+
|
| 216 |
+
grid : array-like of shape (n_points, n_target_features)
|
| 217 |
+
The grid of feature values for which the partial dependence is calculated.
|
| 218 |
+
Note that `n_points` is the number of points in the grid and `n_target_features`
|
| 219 |
+
is the number of features you are doing partial dependence at.
|
| 220 |
+
|
| 221 |
+
features : array-like of {int, str}
|
| 222 |
+
The feature (e.g. `[0]`) or pair of interacting features
|
| 223 |
+
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
|
| 224 |
+
|
| 225 |
+
X : array-like of shape (n_samples, n_features)
|
| 226 |
+
`X` is used to generate values for the complement features. That is, for
|
| 227 |
+
each value in `grid`, the method will average the prediction of each
|
| 228 |
+
sample from `X` having that grid value for `features`.
|
| 229 |
+
|
| 230 |
+
response_method : {'auto', 'predict_proba', 'decision_function'}, \
|
| 231 |
+
default='auto'
|
| 232 |
+
Specifies whether to use :term:`predict_proba` or
|
| 233 |
+
:term:`decision_function` as the target response. For regressors
|
| 234 |
+
this parameter is ignored and the response is always the output of
|
| 235 |
+
:term:`predict`. By default, :term:`predict_proba` is tried first
|
| 236 |
+
and we revert to :term:`decision_function` if it doesn't exist.
|
| 237 |
+
|
| 238 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
| 239 |
+
Sample weights are used to calculate weighted means when averaging the
|
| 240 |
+
model output. If `None`, then samples are equally weighted. Note that
|
| 241 |
+
`sample_weight` does not change the individual predictions.
|
| 242 |
+
|
| 243 |
+
Returns
|
| 244 |
+
-------
|
| 245 |
+
averaged_predictions : array-like of shape (n_targets, n_points)
|
| 246 |
+
The averaged predictions for the given `grid` of features values.
|
| 247 |
+
Note that `n_targets` is the number of targets (e.g. 1 for binary
|
| 248 |
+
classification, `n_tasks` for multi-output regression, and `n_classes` for
|
| 249 |
+
multiclass classification) and `n_points` is the number of points in the `grid`.
|
| 250 |
+
|
| 251 |
+
predictions : array-like
|
| 252 |
+
The predictions for the given `grid` of features values over the samples
|
| 253 |
+
from `X`. For non-multioutput regression and binary classification the
|
| 254 |
+
shape is `(n_instances, n_points)` and for multi-output regression and
|
| 255 |
+
multiclass classification the shape is `(n_targets, n_instances, n_points)`,
|
| 256 |
+
where `n_targets` is the number of targets (`n_tasks` for multi-output
|
| 257 |
+
regression, and `n_classes` for multiclass classification), `n_instances`
|
| 258 |
+
is the number of instances in `X`, and `n_points` is the number of points
|
| 259 |
+
in the `grid`.
|
| 260 |
+
"""
|
| 261 |
+
predictions = []
|
| 262 |
+
averaged_predictions = []
|
| 263 |
+
|
| 264 |
+
if response_method == "auto":
|
| 265 |
+
response_method = (
|
| 266 |
+
"predict" if is_regressor(est) else ["predict_proba", "decision_function"]
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
X_eval = X.copy()
|
| 270 |
+
for new_values in grid:
|
| 271 |
+
for i, variable in enumerate(features):
|
| 272 |
+
_safe_assign(X_eval, new_values[i], column_indexer=variable)
|
| 273 |
+
|
| 274 |
+
# Note: predictions is of shape
|
| 275 |
+
# (n_points,) for non-multioutput regressors
|
| 276 |
+
# (n_points, n_tasks) for multioutput regressors
|
| 277 |
+
# (n_points, 1) for the regressors in cross_decomposition (I think)
|
| 278 |
+
# (n_points, 2) for binary classification
|
| 279 |
+
# (n_points, n_classes) for multiclass classification
|
| 280 |
+
pred, _ = _get_response_values(est, X_eval, response_method=response_method)
|
| 281 |
+
|
| 282 |
+
predictions.append(pred)
|
| 283 |
+
# average over samples
|
| 284 |
+
averaged_predictions.append(np.average(pred, axis=0, weights=sample_weight))
|
| 285 |
+
|
| 286 |
+
n_samples = X.shape[0]
|
| 287 |
+
|
| 288 |
+
# reshape to (n_targets, n_instances, n_points) where n_targets is:
|
| 289 |
+
# - 1 for non-multioutput regression and binary classification (shape is
|
| 290 |
+
# already correct in those cases)
|
| 291 |
+
# - n_tasks for multi-output regression
|
| 292 |
+
# - n_classes for multiclass classification.
|
| 293 |
+
predictions = np.array(predictions).T
|
| 294 |
+
if is_regressor(est) and predictions.ndim == 2:
|
| 295 |
+
# non-multioutput regression, shape is (n_instances, n_points,)
|
| 296 |
+
predictions = predictions.reshape(n_samples, -1)
|
| 297 |
+
elif is_classifier(est) and predictions.shape[0] == 2:
|
| 298 |
+
# Binary classification, shape is (2, n_instances, n_points).
|
| 299 |
+
# we output the effect of **positive** class
|
| 300 |
+
predictions = predictions[1]
|
| 301 |
+
predictions = predictions.reshape(n_samples, -1)
|
| 302 |
+
|
| 303 |
+
# reshape averaged_predictions to (n_targets, n_points) where n_targets is:
|
| 304 |
+
# - 1 for non-multioutput regression and binary classification (shape is
|
| 305 |
+
# already correct in those cases)
|
| 306 |
+
# - n_tasks for multi-output regression
|
| 307 |
+
# - n_classes for multiclass classification.
|
| 308 |
+
averaged_predictions = np.array(averaged_predictions).T
|
| 309 |
+
if is_regressor(est) and averaged_predictions.ndim == 1:
|
| 310 |
+
# non-multioutput regression, shape is (n_points,)
|
| 311 |
+
averaged_predictions = averaged_predictions.reshape(1, -1)
|
| 312 |
+
elif is_classifier(est) and averaged_predictions.shape[0] == 2:
|
| 313 |
+
# Binary classification, shape is (2, n_points).
|
| 314 |
+
# we output the effect of **positive** class
|
| 315 |
+
averaged_predictions = averaged_predictions[1]
|
| 316 |
+
averaged_predictions = averaged_predictions.reshape(1, -1)
|
| 317 |
+
|
| 318 |
+
return averaged_predictions, predictions
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
@validate_params(
|
| 322 |
+
{
|
| 323 |
+
"estimator": [
|
| 324 |
+
HasMethods(["fit", "predict"]),
|
| 325 |
+
HasMethods(["fit", "predict_proba"]),
|
| 326 |
+
HasMethods(["fit", "decision_function"]),
|
| 327 |
+
],
|
| 328 |
+
"X": ["array-like", "sparse matrix"],
|
| 329 |
+
"features": ["array-like", Integral, str],
|
| 330 |
+
"sample_weight": ["array-like", None],
|
| 331 |
+
"categorical_features": ["array-like", None],
|
| 332 |
+
"feature_names": ["array-like", None],
|
| 333 |
+
"response_method": [StrOptions({"auto", "predict_proba", "decision_function"})],
|
| 334 |
+
"percentiles": [tuple],
|
| 335 |
+
"grid_resolution": [Interval(Integral, 1, None, closed="left")],
|
| 336 |
+
"method": [StrOptions({"auto", "recursion", "brute"})],
|
| 337 |
+
"kind": [StrOptions({"average", "individual", "both"})],
|
| 338 |
+
},
|
| 339 |
+
prefer_skip_nested_validation=True,
|
| 340 |
+
)
|
| 341 |
+
def partial_dependence(
|
| 342 |
+
estimator,
|
| 343 |
+
X,
|
| 344 |
+
features,
|
| 345 |
+
*,
|
| 346 |
+
sample_weight=None,
|
| 347 |
+
categorical_features=None,
|
| 348 |
+
feature_names=None,
|
| 349 |
+
response_method="auto",
|
| 350 |
+
percentiles=(0.05, 0.95),
|
| 351 |
+
grid_resolution=100,
|
| 352 |
+
method="auto",
|
| 353 |
+
kind="average",
|
| 354 |
+
):
|
| 355 |
+
"""Partial dependence of ``features``.
|
| 356 |
+
|
| 357 |
+
Partial dependence of a feature (or a set of features) corresponds to
|
| 358 |
+
the average response of an estimator for each possible value of the
|
| 359 |
+
feature.
|
| 360 |
+
|
| 361 |
+
Read more in the :ref:`User Guide <partial_dependence>`.
|
| 362 |
+
|
| 363 |
+
.. warning::
|
| 364 |
+
|
| 365 |
+
For :class:`~sklearn.ensemble.GradientBoostingClassifier` and
|
| 366 |
+
:class:`~sklearn.ensemble.GradientBoostingRegressor`, the
|
| 367 |
+
`'recursion'` method (used by default) will not account for the `init`
|
| 368 |
+
predictor of the boosting process. In practice, this will produce
|
| 369 |
+
the same values as `'brute'` up to a constant offset in the target
|
| 370 |
+
response, provided that `init` is a constant estimator (which is the
|
| 371 |
+
default). However, if `init` is not a constant estimator, the
|
| 372 |
+
partial dependence values are incorrect for `'recursion'` because the
|
| 373 |
+
offset will be sample-dependent. It is preferable to use the `'brute'`
|
| 374 |
+
method. Note that this only applies to
|
| 375 |
+
:class:`~sklearn.ensemble.GradientBoostingClassifier` and
|
| 376 |
+
:class:`~sklearn.ensemble.GradientBoostingRegressor`, not to
|
| 377 |
+
:class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
|
| 378 |
+
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
|
| 379 |
+
|
| 380 |
+
Parameters
|
| 381 |
+
----------
|
| 382 |
+
estimator : BaseEstimator
|
| 383 |
+
A fitted estimator object implementing :term:`predict`,
|
| 384 |
+
:term:`predict_proba`, or :term:`decision_function`.
|
| 385 |
+
Multioutput-multiclass classifiers are not supported.
|
| 386 |
+
|
| 387 |
+
X : {array-like, sparse matrix or dataframe} of shape (n_samples, n_features)
|
| 388 |
+
``X`` is used to generate a grid of values for the target
|
| 389 |
+
``features`` (where the partial dependence will be evaluated), and
|
| 390 |
+
also to generate values for the complement features when the
|
| 391 |
+
`method` is 'brute'.
|
| 392 |
+
|
| 393 |
+
features : array-like of {int, str, bool} or int or str
|
| 394 |
+
The feature (e.g. `[0]`) or pair of interacting features
|
| 395 |
+
(e.g. `[(0, 1)]`) for which the partial dependency should be computed.
|
| 396 |
+
|
| 397 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
| 398 |
+
Sample weights are used to calculate weighted means when averaging the
|
| 399 |
+
model output. If `None`, then samples are equally weighted. If
|
| 400 |
+
`sample_weight` is not `None`, then `method` will be set to `'brute'`.
|
| 401 |
+
Note that `sample_weight` is ignored for `kind='individual'`.
|
| 402 |
+
|
| 403 |
+
.. versionadded:: 1.3
|
| 404 |
+
|
| 405 |
+
categorical_features : array-like of shape (n_features,) or shape \
|
| 406 |
+
(n_categorical_features,), dtype={bool, int, str}, default=None
|
| 407 |
+
Indicates the categorical features.
|
| 408 |
+
|
| 409 |
+
- `None`: no feature will be considered categorical;
|
| 410 |
+
- boolean array-like: boolean mask of shape `(n_features,)`
|
| 411 |
+
indicating which features are categorical. Thus, this array has
|
| 412 |
+
the same shape has `X.shape[1]`;
|
| 413 |
+
- integer or string array-like: integer indices or strings
|
| 414 |
+
indicating categorical features.
|
| 415 |
+
|
| 416 |
+
.. versionadded:: 1.2
|
| 417 |
+
|
| 418 |
+
feature_names : array-like of shape (n_features,), dtype=str, default=None
|
| 419 |
+
Name of each feature; `feature_names[i]` holds the name of the feature
|
| 420 |
+
with index `i`.
|
| 421 |
+
By default, the name of the feature corresponds to their numerical
|
| 422 |
+
index for NumPy array and their column name for pandas dataframe.
|
| 423 |
+
|
| 424 |
+
.. versionadded:: 1.2
|
| 425 |
+
|
| 426 |
+
response_method : {'auto', 'predict_proba', 'decision_function'}, \
|
| 427 |
+
default='auto'
|
| 428 |
+
Specifies whether to use :term:`predict_proba` or
|
| 429 |
+
:term:`decision_function` as the target response. For regressors
|
| 430 |
+
this parameter is ignored and the response is always the output of
|
| 431 |
+
:term:`predict`. By default, :term:`predict_proba` is tried first
|
| 432 |
+
and we revert to :term:`decision_function` if it doesn't exist. If
|
| 433 |
+
``method`` is 'recursion', the response is always the output of
|
| 434 |
+
:term:`decision_function`.
|
| 435 |
+
|
| 436 |
+
percentiles : tuple of float, default=(0.05, 0.95)
|
| 437 |
+
The lower and upper percentile used to create the extreme values
|
| 438 |
+
for the grid. Must be in [0, 1].
|
| 439 |
+
|
| 440 |
+
grid_resolution : int, default=100
|
| 441 |
+
The number of equally spaced points on the grid, for each target
|
| 442 |
+
feature.
|
| 443 |
+
|
| 444 |
+
method : {'auto', 'recursion', 'brute'}, default='auto'
|
| 445 |
+
The method used to calculate the averaged predictions:
|
| 446 |
+
|
| 447 |
+
- `'recursion'` is only supported for some tree-based estimators
|
| 448 |
+
(namely
|
| 449 |
+
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
|
| 450 |
+
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
|
| 451 |
+
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
|
| 452 |
+
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
|
| 453 |
+
:class:`~sklearn.tree.DecisionTreeRegressor`,
|
| 454 |
+
:class:`~sklearn.ensemble.RandomForestRegressor`,
|
| 455 |
+
) when `kind='average'`.
|
| 456 |
+
This is more efficient in terms of speed.
|
| 457 |
+
With this method, the target response of a
|
| 458 |
+
classifier is always the decision function, not the predicted
|
| 459 |
+
probabilities. Since the `'recursion'` method implicitly computes
|
| 460 |
+
the average of the Individual Conditional Expectation (ICE) by
|
| 461 |
+
design, it is not compatible with ICE and thus `kind` must be
|
| 462 |
+
`'average'`.
|
| 463 |
+
|
| 464 |
+
- `'brute'` is supported for any estimator, but is more
|
| 465 |
+
computationally intensive.
|
| 466 |
+
|
| 467 |
+
- `'auto'`: the `'recursion'` is used for estimators that support it,
|
| 468 |
+
and `'brute'` is used otherwise. If `sample_weight` is not `None`,
|
| 469 |
+
then `'brute'` is used regardless of the estimator.
|
| 470 |
+
|
| 471 |
+
Please see :ref:`this note <pdp_method_differences>` for
|
| 472 |
+
differences between the `'brute'` and `'recursion'` method.
|
| 473 |
+
|
| 474 |
+
kind : {'average', 'individual', 'both'}, default='average'
|
| 475 |
+
Whether to return the partial dependence averaged across all the
|
| 476 |
+
samples in the dataset or one value per sample or both.
|
| 477 |
+
See Returns below.
|
| 478 |
+
|
| 479 |
+
Note that the fast `method='recursion'` option is only available for
|
| 480 |
+
`kind='average'` and `sample_weights=None`. Computing individual
|
| 481 |
+
dependencies and doing weighted averages requires using the slower
|
| 482 |
+
`method='brute'`.
|
| 483 |
+
|
| 484 |
+
.. versionadded:: 0.24
|
| 485 |
+
|
| 486 |
+
Returns
|
| 487 |
+
-------
|
| 488 |
+
predictions : :class:`~sklearn.utils.Bunch`
|
| 489 |
+
Dictionary-like object, with the following attributes.
|
| 490 |
+
|
| 491 |
+
individual : ndarray of shape (n_outputs, n_instances, \
|
| 492 |
+
len(values[0]), len(values[1]), ...)
|
| 493 |
+
The predictions for all the points in the grid for all
|
| 494 |
+
samples in X. This is also known as Individual
|
| 495 |
+
Conditional Expectation (ICE).
|
| 496 |
+
Only available when `kind='individual'` or `kind='both'`.
|
| 497 |
+
|
| 498 |
+
average : ndarray of shape (n_outputs, len(values[0]), \
|
| 499 |
+
len(values[1]), ...)
|
| 500 |
+
The predictions for all the points in the grid, averaged
|
| 501 |
+
over all samples in X (or over the training data if
|
| 502 |
+
`method` is 'recursion').
|
| 503 |
+
Only available when `kind='average'` or `kind='both'`.
|
| 504 |
+
|
| 505 |
+
grid_values : seq of 1d ndarrays
|
| 506 |
+
The values with which the grid has been created. The generated
|
| 507 |
+
grid is a cartesian product of the arrays in `grid_values` where
|
| 508 |
+
`len(grid_values) == len(features)`. The size of each array
|
| 509 |
+
`grid_values[j]` is either `grid_resolution`, or the number of
|
| 510 |
+
unique values in `X[:, j]`, whichever is smaller.
|
| 511 |
+
|
| 512 |
+
.. versionadded:: 1.3
|
| 513 |
+
|
| 514 |
+
`n_outputs` corresponds to the number of classes in a multi-class
|
| 515 |
+
setting, or to the number of tasks for multi-output regression.
|
| 516 |
+
For classical regression and binary classification `n_outputs==1`.
|
| 517 |
+
`n_values_feature_j` corresponds to the size `grid_values[j]`.
|
| 518 |
+
|
| 519 |
+
See Also
|
| 520 |
+
--------
|
| 521 |
+
PartialDependenceDisplay.from_estimator : Plot Partial Dependence.
|
| 522 |
+
PartialDependenceDisplay : Partial Dependence visualization.
|
| 523 |
+
|
| 524 |
+
Examples
|
| 525 |
+
--------
|
| 526 |
+
>>> X = [[0, 0, 2], [1, 0, 0]]
|
| 527 |
+
>>> y = [0, 1]
|
| 528 |
+
>>> from sklearn.ensemble import GradientBoostingClassifier
|
| 529 |
+
>>> gb = GradientBoostingClassifier(random_state=0).fit(X, y)
|
| 530 |
+
>>> partial_dependence(gb, features=[0], X=X, percentiles=(0, 1),
|
| 531 |
+
... grid_resolution=2) # doctest: +SKIP
|
| 532 |
+
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
|
| 533 |
+
"""
|
| 534 |
+
check_is_fitted(estimator)
|
| 535 |
+
|
| 536 |
+
if not (is_classifier(estimator) or is_regressor(estimator)):
|
| 537 |
+
raise ValueError("'estimator' must be a fitted regressor or classifier.")
|
| 538 |
+
|
| 539 |
+
if is_classifier(estimator) and isinstance(estimator.classes_[0], np.ndarray):
|
| 540 |
+
raise ValueError("Multiclass-multioutput estimators are not supported")
|
| 541 |
+
|
| 542 |
+
# Use check_array only on lists and other non-array-likes / sparse. Do not
|
| 543 |
+
# convert DataFrame into a NumPy array.
|
| 544 |
+
if not (hasattr(X, "__array__") or sparse.issparse(X)):
|
| 545 |
+
X = check_array(X, ensure_all_finite="allow-nan", dtype=object)
|
| 546 |
+
|
| 547 |
+
if is_regressor(estimator) and response_method != "auto":
|
| 548 |
+
raise ValueError(
|
| 549 |
+
"The response_method parameter is ignored for regressors and "
|
| 550 |
+
"must be 'auto'."
|
| 551 |
+
)
|
| 552 |
+
|
| 553 |
+
if kind != "average":
|
| 554 |
+
if method == "recursion":
|
| 555 |
+
raise ValueError(
|
| 556 |
+
"The 'recursion' method only applies when 'kind' is set to 'average'"
|
| 557 |
+
)
|
| 558 |
+
method = "brute"
|
| 559 |
+
|
| 560 |
+
if method == "recursion" and sample_weight is not None:
|
| 561 |
+
raise ValueError(
|
| 562 |
+
"The 'recursion' method can only be applied when sample_weight is None."
|
| 563 |
+
)
|
| 564 |
+
|
| 565 |
+
if method == "auto":
|
| 566 |
+
if sample_weight is not None:
|
| 567 |
+
method = "brute"
|
| 568 |
+
elif isinstance(estimator, BaseGradientBoosting) and estimator.init is None:
|
| 569 |
+
method = "recursion"
|
| 570 |
+
elif isinstance(
|
| 571 |
+
estimator,
|
| 572 |
+
(BaseHistGradientBoosting, DecisionTreeRegressor, RandomForestRegressor),
|
| 573 |
+
):
|
| 574 |
+
method = "recursion"
|
| 575 |
+
else:
|
| 576 |
+
method = "brute"
|
| 577 |
+
|
| 578 |
+
if method == "recursion":
|
| 579 |
+
if not isinstance(
|
| 580 |
+
estimator,
|
| 581 |
+
(
|
| 582 |
+
BaseGradientBoosting,
|
| 583 |
+
BaseHistGradientBoosting,
|
| 584 |
+
DecisionTreeRegressor,
|
| 585 |
+
RandomForestRegressor,
|
| 586 |
+
),
|
| 587 |
+
):
|
| 588 |
+
supported_classes_recursion = (
|
| 589 |
+
"GradientBoostingClassifier",
|
| 590 |
+
"GradientBoostingRegressor",
|
| 591 |
+
"HistGradientBoostingClassifier",
|
| 592 |
+
"HistGradientBoostingRegressor",
|
| 593 |
+
"HistGradientBoostingRegressor",
|
| 594 |
+
"DecisionTreeRegressor",
|
| 595 |
+
"RandomForestRegressor",
|
| 596 |
+
)
|
| 597 |
+
raise ValueError(
|
| 598 |
+
"Only the following estimators support the 'recursion' "
|
| 599 |
+
"method: {}. Try using method='brute'.".format(
|
| 600 |
+
", ".join(supported_classes_recursion)
|
| 601 |
+
)
|
| 602 |
+
)
|
| 603 |
+
if response_method == "auto":
|
| 604 |
+
response_method = "decision_function"
|
| 605 |
+
|
| 606 |
+
if response_method != "decision_function":
|
| 607 |
+
raise ValueError(
|
| 608 |
+
"With the 'recursion' method, the response_method must be "
|
| 609 |
+
"'decision_function'. Got {}.".format(response_method)
|
| 610 |
+
)
|
| 611 |
+
|
| 612 |
+
if sample_weight is not None:
|
| 613 |
+
sample_weight = _check_sample_weight(sample_weight, X)
|
| 614 |
+
|
| 615 |
+
if _determine_key_type(features, accept_slice=False) == "int":
|
| 616 |
+
# _get_column_indices() supports negative indexing. Here, we limit
|
| 617 |
+
# the indexing to be positive. The upper bound will be checked
|
| 618 |
+
# by _get_column_indices()
|
| 619 |
+
if np.any(np.less(features, 0)):
|
| 620 |
+
raise ValueError("all features must be in [0, {}]".format(X.shape[1] - 1))
|
| 621 |
+
|
| 622 |
+
features_indices = np.asarray(
|
| 623 |
+
_get_column_indices(X, features), dtype=np.intp, order="C"
|
| 624 |
+
).ravel()
|
| 625 |
+
|
| 626 |
+
feature_names = _check_feature_names(X, feature_names)
|
| 627 |
+
|
| 628 |
+
n_features = X.shape[1]
|
| 629 |
+
if categorical_features is None:
|
| 630 |
+
is_categorical = [False] * len(features_indices)
|
| 631 |
+
else:
|
| 632 |
+
categorical_features = np.asarray(categorical_features)
|
| 633 |
+
if categorical_features.dtype.kind == "b":
|
| 634 |
+
# categorical features provided as a list of boolean
|
| 635 |
+
if categorical_features.size != n_features:
|
| 636 |
+
raise ValueError(
|
| 637 |
+
"When `categorical_features` is a boolean array-like, "
|
| 638 |
+
"the array should be of shape (n_features,). Got "
|
| 639 |
+
f"{categorical_features.size} elements while `X` contains "
|
| 640 |
+
f"{n_features} features."
|
| 641 |
+
)
|
| 642 |
+
is_categorical = [categorical_features[idx] for idx in features_indices]
|
| 643 |
+
elif categorical_features.dtype.kind in ("i", "O", "U"):
|
| 644 |
+
# categorical features provided as a list of indices or feature names
|
| 645 |
+
categorical_features_idx = [
|
| 646 |
+
_get_feature_index(cat, feature_names=feature_names)
|
| 647 |
+
for cat in categorical_features
|
| 648 |
+
]
|
| 649 |
+
is_categorical = [
|
| 650 |
+
idx in categorical_features_idx for idx in features_indices
|
| 651 |
+
]
|
| 652 |
+
else:
|
| 653 |
+
raise ValueError(
|
| 654 |
+
"Expected `categorical_features` to be an array-like of boolean,"
|
| 655 |
+
f" integer, or string. Got {categorical_features.dtype} instead."
|
| 656 |
+
)
|
| 657 |
+
|
| 658 |
+
grid, values = _grid_from_X(
|
| 659 |
+
_safe_indexing(X, features_indices, axis=1),
|
| 660 |
+
percentiles,
|
| 661 |
+
is_categorical,
|
| 662 |
+
grid_resolution,
|
| 663 |
+
)
|
| 664 |
+
|
| 665 |
+
if method == "brute":
|
| 666 |
+
averaged_predictions, predictions = _partial_dependence_brute(
|
| 667 |
+
estimator, grid, features_indices, X, response_method, sample_weight
|
| 668 |
+
)
|
| 669 |
+
|
| 670 |
+
# reshape predictions to
|
| 671 |
+
# (n_outputs, n_instances, n_values_feature_0, n_values_feature_1, ...)
|
| 672 |
+
predictions = predictions.reshape(
|
| 673 |
+
-1, X.shape[0], *[val.shape[0] for val in values]
|
| 674 |
+
)
|
| 675 |
+
else:
|
| 676 |
+
averaged_predictions = _partial_dependence_recursion(
|
| 677 |
+
estimator, grid, features_indices
|
| 678 |
+
)
|
| 679 |
+
|
| 680 |
+
# reshape averaged_predictions to
|
| 681 |
+
# (n_outputs, n_values_feature_0, n_values_feature_1, ...)
|
| 682 |
+
averaged_predictions = averaged_predictions.reshape(
|
| 683 |
+
-1, *[val.shape[0] for val in values]
|
| 684 |
+
)
|
| 685 |
+
pdp_results = Bunch(grid_values=values)
|
| 686 |
+
|
| 687 |
+
if kind == "average":
|
| 688 |
+
pdp_results["average"] = averaged_predictions
|
| 689 |
+
elif kind == "individual":
|
| 690 |
+
pdp_results["individual"] = predictions
|
| 691 |
+
else: # kind='both'
|
| 692 |
+
pdp_results["average"] = averaged_predictions
|
| 693 |
+
pdp_results["individual"] = predictions
|
| 694 |
+
|
| 695 |
+
return pdp_results
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_pd_utils.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: The scikit-learn developers
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def _check_feature_names(X, feature_names=None):
|
| 6 |
+
"""Check feature names.
|
| 7 |
+
|
| 8 |
+
Parameters
|
| 9 |
+
----------
|
| 10 |
+
X : array-like of shape (n_samples, n_features)
|
| 11 |
+
Input data.
|
| 12 |
+
|
| 13 |
+
feature_names : None or array-like of shape (n_names,), dtype=str
|
| 14 |
+
Feature names to check or `None`.
|
| 15 |
+
|
| 16 |
+
Returns
|
| 17 |
+
-------
|
| 18 |
+
feature_names : list of str
|
| 19 |
+
Feature names validated. If `feature_names` is `None`, then a list of
|
| 20 |
+
feature names is provided, i.e. the column names of a pandas dataframe
|
| 21 |
+
or a generic list of feature names (e.g. `["x0", "x1", ...]`) for a
|
| 22 |
+
NumPy array.
|
| 23 |
+
"""
|
| 24 |
+
if feature_names is None:
|
| 25 |
+
if hasattr(X, "columns") and hasattr(X.columns, "tolist"):
|
| 26 |
+
# get the column names for a pandas dataframe
|
| 27 |
+
feature_names = X.columns.tolist()
|
| 28 |
+
else:
|
| 29 |
+
# define a list of numbered indices for a numpy array
|
| 30 |
+
feature_names = [f"x{i}" for i in range(X.shape[1])]
|
| 31 |
+
elif hasattr(feature_names, "tolist"):
|
| 32 |
+
# convert numpy array or pandas index to a list
|
| 33 |
+
feature_names = feature_names.tolist()
|
| 34 |
+
if len(set(feature_names)) != len(feature_names):
|
| 35 |
+
raise ValueError("feature_names should not contain duplicates.")
|
| 36 |
+
|
| 37 |
+
return feature_names
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _get_feature_index(fx, feature_names=None):
|
| 41 |
+
"""Get feature index.
|
| 42 |
+
|
| 43 |
+
Parameters
|
| 44 |
+
----------
|
| 45 |
+
fx : int or str
|
| 46 |
+
Feature index or name.
|
| 47 |
+
|
| 48 |
+
feature_names : list of str, default=None
|
| 49 |
+
All feature names from which to search the indices.
|
| 50 |
+
|
| 51 |
+
Returns
|
| 52 |
+
-------
|
| 53 |
+
idx : int
|
| 54 |
+
Feature index.
|
| 55 |
+
"""
|
| 56 |
+
if isinstance(fx, str):
|
| 57 |
+
if feature_names is None:
|
| 58 |
+
raise ValueError(
|
| 59 |
+
f"Cannot plot partial dependence for feature {fx!r} since "
|
| 60 |
+
"the list of feature names was not provided, neither as "
|
| 61 |
+
"column names of a pandas data-frame nor via the feature_names "
|
| 62 |
+
"parameter."
|
| 63 |
+
)
|
| 64 |
+
try:
|
| 65 |
+
return feature_names.index(fx)
|
| 66 |
+
except ValueError as e:
|
| 67 |
+
raise ValueError(f"Feature {fx!r} not in feature_names") from e
|
| 68 |
+
return fx
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_permutation_importance.py
ADDED
|
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Permutation importance for estimators."""
|
| 2 |
+
|
| 3 |
+
# Authors: The scikit-learn developers
|
| 4 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 5 |
+
|
| 6 |
+
import numbers
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
|
| 10 |
+
from ..ensemble._bagging import _generate_indices
|
| 11 |
+
from ..metrics import check_scoring, get_scorer_names
|
| 12 |
+
from ..model_selection._validation import _aggregate_score_dicts
|
| 13 |
+
from ..utils import Bunch, _safe_indexing, check_array, check_random_state
|
| 14 |
+
from ..utils._param_validation import (
|
| 15 |
+
HasMethods,
|
| 16 |
+
Integral,
|
| 17 |
+
Interval,
|
| 18 |
+
RealNotInt,
|
| 19 |
+
StrOptions,
|
| 20 |
+
validate_params,
|
| 21 |
+
)
|
| 22 |
+
from ..utils.parallel import Parallel, delayed
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _weights_scorer(scorer, estimator, X, y, sample_weight):
|
| 26 |
+
if sample_weight is not None:
|
| 27 |
+
return scorer(estimator, X, y, sample_weight=sample_weight)
|
| 28 |
+
return scorer(estimator, X, y)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _calculate_permutation_scores(
|
| 32 |
+
estimator,
|
| 33 |
+
X,
|
| 34 |
+
y,
|
| 35 |
+
sample_weight,
|
| 36 |
+
col_idx,
|
| 37 |
+
random_state,
|
| 38 |
+
n_repeats,
|
| 39 |
+
scorer,
|
| 40 |
+
max_samples,
|
| 41 |
+
):
|
| 42 |
+
"""Calculate score when `col_idx` is permuted."""
|
| 43 |
+
random_state = check_random_state(random_state)
|
| 44 |
+
|
| 45 |
+
# Work on a copy of X to ensure thread-safety in case of threading based
|
| 46 |
+
# parallelism. Furthermore, making a copy is also useful when the joblib
|
| 47 |
+
# backend is 'loky' (default) or the old 'multiprocessing': in those cases,
|
| 48 |
+
# if X is large it will be automatically be backed by a readonly memory map
|
| 49 |
+
# (memmap). X.copy() on the other hand is always guaranteed to return a
|
| 50 |
+
# writable data-structure whose columns can be shuffled inplace.
|
| 51 |
+
if max_samples < X.shape[0]:
|
| 52 |
+
row_indices = _generate_indices(
|
| 53 |
+
random_state=random_state,
|
| 54 |
+
bootstrap=False,
|
| 55 |
+
n_population=X.shape[0],
|
| 56 |
+
n_samples=max_samples,
|
| 57 |
+
)
|
| 58 |
+
X_permuted = _safe_indexing(X, row_indices, axis=0)
|
| 59 |
+
y = _safe_indexing(y, row_indices, axis=0)
|
| 60 |
+
if sample_weight is not None:
|
| 61 |
+
sample_weight = _safe_indexing(sample_weight, row_indices, axis=0)
|
| 62 |
+
else:
|
| 63 |
+
X_permuted = X.copy()
|
| 64 |
+
|
| 65 |
+
scores = []
|
| 66 |
+
shuffling_idx = np.arange(X_permuted.shape[0])
|
| 67 |
+
for _ in range(n_repeats):
|
| 68 |
+
random_state.shuffle(shuffling_idx)
|
| 69 |
+
if hasattr(X_permuted, "iloc"):
|
| 70 |
+
col = X_permuted.iloc[shuffling_idx, col_idx]
|
| 71 |
+
col.index = X_permuted.index
|
| 72 |
+
X_permuted[X_permuted.columns[col_idx]] = col
|
| 73 |
+
else:
|
| 74 |
+
X_permuted[:, col_idx] = X_permuted[shuffling_idx, col_idx]
|
| 75 |
+
scores.append(_weights_scorer(scorer, estimator, X_permuted, y, sample_weight))
|
| 76 |
+
|
| 77 |
+
if isinstance(scores[0], dict):
|
| 78 |
+
scores = _aggregate_score_dicts(scores)
|
| 79 |
+
else:
|
| 80 |
+
scores = np.array(scores)
|
| 81 |
+
|
| 82 |
+
return scores
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def _create_importances_bunch(baseline_score, permuted_score):
|
| 86 |
+
"""Compute the importances as the decrease in score.
|
| 87 |
+
|
| 88 |
+
Parameters
|
| 89 |
+
----------
|
| 90 |
+
baseline_score : ndarray of shape (n_features,)
|
| 91 |
+
The baseline score without permutation.
|
| 92 |
+
permuted_score : ndarray of shape (n_features, n_repeats)
|
| 93 |
+
The permuted scores for the `n` repetitions.
|
| 94 |
+
|
| 95 |
+
Returns
|
| 96 |
+
-------
|
| 97 |
+
importances : :class:`~sklearn.utils.Bunch`
|
| 98 |
+
Dictionary-like object, with the following attributes.
|
| 99 |
+
importances_mean : ndarray, shape (n_features, )
|
| 100 |
+
Mean of feature importance over `n_repeats`.
|
| 101 |
+
importances_std : ndarray, shape (n_features, )
|
| 102 |
+
Standard deviation over `n_repeats`.
|
| 103 |
+
importances : ndarray, shape (n_features, n_repeats)
|
| 104 |
+
Raw permutation importance scores.
|
| 105 |
+
"""
|
| 106 |
+
importances = baseline_score - permuted_score
|
| 107 |
+
return Bunch(
|
| 108 |
+
importances_mean=np.mean(importances, axis=1),
|
| 109 |
+
importances_std=np.std(importances, axis=1),
|
| 110 |
+
importances=importances,
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
@validate_params(
|
| 115 |
+
{
|
| 116 |
+
"estimator": [HasMethods(["fit"])],
|
| 117 |
+
"X": ["array-like"],
|
| 118 |
+
"y": ["array-like", None],
|
| 119 |
+
"scoring": [
|
| 120 |
+
StrOptions(set(get_scorer_names())),
|
| 121 |
+
callable,
|
| 122 |
+
list,
|
| 123 |
+
tuple,
|
| 124 |
+
dict,
|
| 125 |
+
None,
|
| 126 |
+
],
|
| 127 |
+
"n_repeats": [Interval(Integral, 1, None, closed="left")],
|
| 128 |
+
"n_jobs": [Integral, None],
|
| 129 |
+
"random_state": ["random_state"],
|
| 130 |
+
"sample_weight": ["array-like", None],
|
| 131 |
+
"max_samples": [
|
| 132 |
+
Interval(Integral, 1, None, closed="left"),
|
| 133 |
+
Interval(RealNotInt, 0, 1, closed="right"),
|
| 134 |
+
],
|
| 135 |
+
},
|
| 136 |
+
prefer_skip_nested_validation=True,
|
| 137 |
+
)
|
| 138 |
+
def permutation_importance(
|
| 139 |
+
estimator,
|
| 140 |
+
X,
|
| 141 |
+
y,
|
| 142 |
+
*,
|
| 143 |
+
scoring=None,
|
| 144 |
+
n_repeats=5,
|
| 145 |
+
n_jobs=None,
|
| 146 |
+
random_state=None,
|
| 147 |
+
sample_weight=None,
|
| 148 |
+
max_samples=1.0,
|
| 149 |
+
):
|
| 150 |
+
"""Permutation importance for feature evaluation [BRE]_.
|
| 151 |
+
|
| 152 |
+
The :term:`estimator` is required to be a fitted estimator. `X` can be the
|
| 153 |
+
data set used to train the estimator or a hold-out set. The permutation
|
| 154 |
+
importance of a feature is calculated as follows. First, a baseline metric,
|
| 155 |
+
defined by :term:`scoring`, is evaluated on a (potentially different)
|
| 156 |
+
dataset defined by the `X`. Next, a feature column from the validation set
|
| 157 |
+
is permuted and the metric is evaluated again. The permutation importance
|
| 158 |
+
is defined to be the difference between the baseline metric and metric from
|
| 159 |
+
permutating the feature column.
|
| 160 |
+
|
| 161 |
+
Read more in the :ref:`User Guide <permutation_importance>`.
|
| 162 |
+
|
| 163 |
+
Parameters
|
| 164 |
+
----------
|
| 165 |
+
estimator : object
|
| 166 |
+
An estimator that has already been :term:`fitted` and is compatible
|
| 167 |
+
with :term:`scorer`.
|
| 168 |
+
|
| 169 |
+
X : ndarray or DataFrame, shape (n_samples, n_features)
|
| 170 |
+
Data on which permutation importance will be computed.
|
| 171 |
+
|
| 172 |
+
y : array-like or None, shape (n_samples, ) or (n_samples, n_classes)
|
| 173 |
+
Targets for supervised or `None` for unsupervised.
|
| 174 |
+
|
| 175 |
+
scoring : str, callable, list, tuple, or dict, default=None
|
| 176 |
+
Scorer to use.
|
| 177 |
+
If `scoring` represents a single score, one can use:
|
| 178 |
+
|
| 179 |
+
- a single string (see :ref:`scoring_parameter`);
|
| 180 |
+
- a callable (see :ref:`scoring_callable`) that returns a single value.
|
| 181 |
+
|
| 182 |
+
If `scoring` represents multiple scores, one can use:
|
| 183 |
+
|
| 184 |
+
- a list or tuple of unique strings;
|
| 185 |
+
- a callable returning a dictionary where the keys are the metric
|
| 186 |
+
names and the values are the metric scores;
|
| 187 |
+
- a dictionary with metric names as keys and callables a values.
|
| 188 |
+
|
| 189 |
+
Passing multiple scores to `scoring` is more efficient than calling
|
| 190 |
+
`permutation_importance` for each of the scores as it reuses
|
| 191 |
+
predictions to avoid redundant computation.
|
| 192 |
+
|
| 193 |
+
If None, the estimator's default scorer is used.
|
| 194 |
+
|
| 195 |
+
n_repeats : int, default=5
|
| 196 |
+
Number of times to permute a feature.
|
| 197 |
+
|
| 198 |
+
n_jobs : int or None, default=None
|
| 199 |
+
Number of jobs to run in parallel. The computation is done by computing
|
| 200 |
+
permutation score for each columns and parallelized over the columns.
|
| 201 |
+
`None` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
| 202 |
+
`-1` means using all processors. See :term:`Glossary <n_jobs>`
|
| 203 |
+
for more details.
|
| 204 |
+
|
| 205 |
+
random_state : int, RandomState instance, default=None
|
| 206 |
+
Pseudo-random number generator to control the permutations of each
|
| 207 |
+
feature.
|
| 208 |
+
Pass an int to get reproducible results across function calls.
|
| 209 |
+
See :term:`Glossary <random_state>`.
|
| 210 |
+
|
| 211 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
| 212 |
+
Sample weights used in scoring.
|
| 213 |
+
|
| 214 |
+
.. versionadded:: 0.24
|
| 215 |
+
|
| 216 |
+
max_samples : int or float, default=1.0
|
| 217 |
+
The number of samples to draw from X to compute feature importance
|
| 218 |
+
in each repeat (without replacement).
|
| 219 |
+
|
| 220 |
+
- If int, then draw `max_samples` samples.
|
| 221 |
+
- If float, then draw `max_samples * X.shape[0]` samples.
|
| 222 |
+
- If `max_samples` is equal to `1.0` or `X.shape[0]`, all samples
|
| 223 |
+
will be used.
|
| 224 |
+
|
| 225 |
+
While using this option may provide less accurate importance estimates,
|
| 226 |
+
it keeps the method tractable when evaluating feature importance on
|
| 227 |
+
large datasets. In combination with `n_repeats`, this allows to control
|
| 228 |
+
the computational speed vs statistical accuracy trade-off of this method.
|
| 229 |
+
|
| 230 |
+
.. versionadded:: 1.0
|
| 231 |
+
|
| 232 |
+
Returns
|
| 233 |
+
-------
|
| 234 |
+
result : :class:`~sklearn.utils.Bunch` or dict of such instances
|
| 235 |
+
Dictionary-like object, with the following attributes.
|
| 236 |
+
|
| 237 |
+
importances_mean : ndarray of shape (n_features, )
|
| 238 |
+
Mean of feature importance over `n_repeats`.
|
| 239 |
+
importances_std : ndarray of shape (n_features, )
|
| 240 |
+
Standard deviation over `n_repeats`.
|
| 241 |
+
importances : ndarray of shape (n_features, n_repeats)
|
| 242 |
+
Raw permutation importance scores.
|
| 243 |
+
|
| 244 |
+
If there are multiple scoring metrics in the scoring parameter
|
| 245 |
+
`result` is a dict with scorer names as keys (e.g. 'roc_auc') and
|
| 246 |
+
`Bunch` objects like above as values.
|
| 247 |
+
|
| 248 |
+
References
|
| 249 |
+
----------
|
| 250 |
+
.. [BRE] :doi:`L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32,
|
| 251 |
+
2001. <10.1023/A:1010933404324>`
|
| 252 |
+
|
| 253 |
+
Examples
|
| 254 |
+
--------
|
| 255 |
+
>>> from sklearn.linear_model import LogisticRegression
|
| 256 |
+
>>> from sklearn.inspection import permutation_importance
|
| 257 |
+
>>> X = [[1, 9, 9],[1, 9, 9],[1, 9, 9],
|
| 258 |
+
... [0, 9, 9],[0, 9, 9],[0, 9, 9]]
|
| 259 |
+
>>> y = [1, 1, 1, 0, 0, 0]
|
| 260 |
+
>>> clf = LogisticRegression().fit(X, y)
|
| 261 |
+
>>> result = permutation_importance(clf, X, y, n_repeats=10,
|
| 262 |
+
... random_state=0)
|
| 263 |
+
>>> result.importances_mean
|
| 264 |
+
array([0.4666..., 0. , 0. ])
|
| 265 |
+
>>> result.importances_std
|
| 266 |
+
array([0.2211..., 0. , 0. ])
|
| 267 |
+
"""
|
| 268 |
+
if not hasattr(X, "iloc"):
|
| 269 |
+
X = check_array(X, ensure_all_finite="allow-nan", dtype=None)
|
| 270 |
+
|
| 271 |
+
# Precompute random seed from the random state to be used
|
| 272 |
+
# to get a fresh independent RandomState instance for each
|
| 273 |
+
# parallel call to _calculate_permutation_scores, irrespective of
|
| 274 |
+
# the fact that variables are shared or not depending on the active
|
| 275 |
+
# joblib backend (sequential, thread-based or process-based).
|
| 276 |
+
random_state = check_random_state(random_state)
|
| 277 |
+
random_seed = random_state.randint(np.iinfo(np.int32).max + 1)
|
| 278 |
+
|
| 279 |
+
if not isinstance(max_samples, numbers.Integral):
|
| 280 |
+
max_samples = int(max_samples * X.shape[0])
|
| 281 |
+
elif max_samples > X.shape[0]:
|
| 282 |
+
raise ValueError("max_samples must be <= n_samples")
|
| 283 |
+
|
| 284 |
+
scorer = check_scoring(estimator, scoring=scoring)
|
| 285 |
+
baseline_score = _weights_scorer(scorer, estimator, X, y, sample_weight)
|
| 286 |
+
|
| 287 |
+
scores = Parallel(n_jobs=n_jobs)(
|
| 288 |
+
delayed(_calculate_permutation_scores)(
|
| 289 |
+
estimator,
|
| 290 |
+
X,
|
| 291 |
+
y,
|
| 292 |
+
sample_weight,
|
| 293 |
+
col_idx,
|
| 294 |
+
random_seed,
|
| 295 |
+
n_repeats,
|
| 296 |
+
scorer,
|
| 297 |
+
max_samples,
|
| 298 |
+
)
|
| 299 |
+
for col_idx in range(X.shape[1])
|
| 300 |
+
)
|
| 301 |
+
|
| 302 |
+
if isinstance(baseline_score, dict):
|
| 303 |
+
return {
|
| 304 |
+
name: _create_importances_bunch(
|
| 305 |
+
baseline_score[name],
|
| 306 |
+
# unpack the permuted scores
|
| 307 |
+
np.array([scores[col_idx][name] for col_idx in range(X.shape[1])]),
|
| 308 |
+
)
|
| 309 |
+
for name in baseline_score
|
| 310 |
+
}
|
| 311 |
+
else:
|
| 312 |
+
return _create_importances_bunch(baseline_score, np.array(scores))
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: The scikit-learn developers
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (183 Bytes). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/decision_boundary.cpython-310.pyc
ADDED
|
Binary file (13 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/__pycache__/partial_dependence.cpython-310.pyc
ADDED
|
Binary file (46 kB). View file
|
|
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/decision_boundary.py
ADDED
|
@@ -0,0 +1,416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: The scikit-learn developers
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from ...base import is_regressor
|
| 7 |
+
from ...preprocessing import LabelEncoder
|
| 8 |
+
from ...utils import _safe_indexing
|
| 9 |
+
from ...utils._optional_dependencies import check_matplotlib_support
|
| 10 |
+
from ...utils._response import _get_response_values
|
| 11 |
+
from ...utils._set_output import _get_adapter_from_container
|
| 12 |
+
from ...utils.validation import (
|
| 13 |
+
_is_arraylike_not_scalar,
|
| 14 |
+
_is_pandas_df,
|
| 15 |
+
_is_polars_df,
|
| 16 |
+
_num_features,
|
| 17 |
+
check_is_fitted,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def _check_boundary_response_method(estimator, response_method, class_of_interest):
|
| 22 |
+
"""Validate the response methods to be used with the fitted estimator.
|
| 23 |
+
|
| 24 |
+
Parameters
|
| 25 |
+
----------
|
| 26 |
+
estimator : object
|
| 27 |
+
Fitted estimator to check.
|
| 28 |
+
|
| 29 |
+
response_method : {'auto', 'predict_proba', 'decision_function', 'predict'}
|
| 30 |
+
Specifies whether to use :term:`predict_proba`,
|
| 31 |
+
:term:`decision_function`, :term:`predict` as the target response.
|
| 32 |
+
If set to 'auto', the response method is tried in the following order:
|
| 33 |
+
:term:`decision_function`, :term:`predict_proba`, :term:`predict`.
|
| 34 |
+
|
| 35 |
+
class_of_interest : int, float, bool, str or None
|
| 36 |
+
The class considered when plotting the decision. Cannot be None if
|
| 37 |
+
multiclass and `response_method` is 'predict_proba' or 'decision_function'.
|
| 38 |
+
|
| 39 |
+
.. versionadded:: 1.4
|
| 40 |
+
|
| 41 |
+
Returns
|
| 42 |
+
-------
|
| 43 |
+
prediction_method : list of str or str
|
| 44 |
+
The name or list of names of the response methods to use.
|
| 45 |
+
"""
|
| 46 |
+
has_classes = hasattr(estimator, "classes_")
|
| 47 |
+
if has_classes and _is_arraylike_not_scalar(estimator.classes_[0]):
|
| 48 |
+
msg = "Multi-label and multi-output multi-class classifiers are not supported"
|
| 49 |
+
raise ValueError(msg)
|
| 50 |
+
|
| 51 |
+
if has_classes and len(estimator.classes_) > 2:
|
| 52 |
+
if response_method not in {"auto", "predict"} and class_of_interest is None:
|
| 53 |
+
msg = (
|
| 54 |
+
"Multiclass classifiers are only supported when `response_method` is "
|
| 55 |
+
"'predict' or 'auto'. Else you must provide `class_of_interest` to "
|
| 56 |
+
"plot the decision boundary of a specific class."
|
| 57 |
+
)
|
| 58 |
+
raise ValueError(msg)
|
| 59 |
+
prediction_method = "predict" if response_method == "auto" else response_method
|
| 60 |
+
elif response_method == "auto":
|
| 61 |
+
if is_regressor(estimator):
|
| 62 |
+
prediction_method = "predict"
|
| 63 |
+
else:
|
| 64 |
+
prediction_method = ["decision_function", "predict_proba", "predict"]
|
| 65 |
+
else:
|
| 66 |
+
prediction_method = response_method
|
| 67 |
+
|
| 68 |
+
return prediction_method
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class DecisionBoundaryDisplay:
|
| 72 |
+
"""Decisions boundary visualization.
|
| 73 |
+
|
| 74 |
+
It is recommended to use
|
| 75 |
+
:func:`~sklearn.inspection.DecisionBoundaryDisplay.from_estimator`
|
| 76 |
+
to create a :class:`DecisionBoundaryDisplay`. All parameters are stored as
|
| 77 |
+
attributes.
|
| 78 |
+
|
| 79 |
+
Read more in the :ref:`User Guide <visualizations>`.
|
| 80 |
+
|
| 81 |
+
.. versionadded:: 1.1
|
| 82 |
+
|
| 83 |
+
Parameters
|
| 84 |
+
----------
|
| 85 |
+
xx0 : ndarray of shape (grid_resolution, grid_resolution)
|
| 86 |
+
First output of :func:`meshgrid <numpy.meshgrid>`.
|
| 87 |
+
|
| 88 |
+
xx1 : ndarray of shape (grid_resolution, grid_resolution)
|
| 89 |
+
Second output of :func:`meshgrid <numpy.meshgrid>`.
|
| 90 |
+
|
| 91 |
+
response : ndarray of shape (grid_resolution, grid_resolution)
|
| 92 |
+
Values of the response function.
|
| 93 |
+
|
| 94 |
+
xlabel : str, default=None
|
| 95 |
+
Default label to place on x axis.
|
| 96 |
+
|
| 97 |
+
ylabel : str, default=None
|
| 98 |
+
Default label to place on y axis.
|
| 99 |
+
|
| 100 |
+
Attributes
|
| 101 |
+
----------
|
| 102 |
+
surface_ : matplotlib `QuadContourSet` or `QuadMesh`
|
| 103 |
+
If `plot_method` is 'contour' or 'contourf', `surface_` is a
|
| 104 |
+
:class:`QuadContourSet <matplotlib.contour.QuadContourSet>`. If
|
| 105 |
+
`plot_method` is 'pcolormesh', `surface_` is a
|
| 106 |
+
:class:`QuadMesh <matplotlib.collections.QuadMesh>`.
|
| 107 |
+
|
| 108 |
+
ax_ : matplotlib Axes
|
| 109 |
+
Axes with decision boundary.
|
| 110 |
+
|
| 111 |
+
figure_ : matplotlib Figure
|
| 112 |
+
Figure containing the decision boundary.
|
| 113 |
+
|
| 114 |
+
See Also
|
| 115 |
+
--------
|
| 116 |
+
DecisionBoundaryDisplay.from_estimator : Plot decision boundary given an estimator.
|
| 117 |
+
|
| 118 |
+
Examples
|
| 119 |
+
--------
|
| 120 |
+
>>> import matplotlib.pyplot as plt
|
| 121 |
+
>>> import numpy as np
|
| 122 |
+
>>> from sklearn.datasets import load_iris
|
| 123 |
+
>>> from sklearn.inspection import DecisionBoundaryDisplay
|
| 124 |
+
>>> from sklearn.tree import DecisionTreeClassifier
|
| 125 |
+
>>> iris = load_iris()
|
| 126 |
+
>>> feature_1, feature_2 = np.meshgrid(
|
| 127 |
+
... np.linspace(iris.data[:, 0].min(), iris.data[:, 0].max()),
|
| 128 |
+
... np.linspace(iris.data[:, 1].min(), iris.data[:, 1].max())
|
| 129 |
+
... )
|
| 130 |
+
>>> grid = np.vstack([feature_1.ravel(), feature_2.ravel()]).T
|
| 131 |
+
>>> tree = DecisionTreeClassifier().fit(iris.data[:, :2], iris.target)
|
| 132 |
+
>>> y_pred = np.reshape(tree.predict(grid), feature_1.shape)
|
| 133 |
+
>>> display = DecisionBoundaryDisplay(
|
| 134 |
+
... xx0=feature_1, xx1=feature_2, response=y_pred
|
| 135 |
+
... )
|
| 136 |
+
>>> display.plot()
|
| 137 |
+
<...>
|
| 138 |
+
>>> display.ax_.scatter(
|
| 139 |
+
... iris.data[:, 0], iris.data[:, 1], c=iris.target, edgecolor="black"
|
| 140 |
+
... )
|
| 141 |
+
<...>
|
| 142 |
+
>>> plt.show()
|
| 143 |
+
"""
|
| 144 |
+
|
| 145 |
+
def __init__(self, *, xx0, xx1, response, xlabel=None, ylabel=None):
|
| 146 |
+
self.xx0 = xx0
|
| 147 |
+
self.xx1 = xx1
|
| 148 |
+
self.response = response
|
| 149 |
+
self.xlabel = xlabel
|
| 150 |
+
self.ylabel = ylabel
|
| 151 |
+
|
| 152 |
+
def plot(self, plot_method="contourf", ax=None, xlabel=None, ylabel=None, **kwargs):
|
| 153 |
+
"""Plot visualization.
|
| 154 |
+
|
| 155 |
+
Parameters
|
| 156 |
+
----------
|
| 157 |
+
plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'
|
| 158 |
+
Plotting method to call when plotting the response. Please refer
|
| 159 |
+
to the following matplotlib documentation for details:
|
| 160 |
+
:func:`contourf <matplotlib.pyplot.contourf>`,
|
| 161 |
+
:func:`contour <matplotlib.pyplot.contour>`,
|
| 162 |
+
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>`.
|
| 163 |
+
|
| 164 |
+
ax : Matplotlib axes, default=None
|
| 165 |
+
Axes object to plot on. If `None`, a new figure and axes is
|
| 166 |
+
created.
|
| 167 |
+
|
| 168 |
+
xlabel : str, default=None
|
| 169 |
+
Overwrite the x-axis label.
|
| 170 |
+
|
| 171 |
+
ylabel : str, default=None
|
| 172 |
+
Overwrite the y-axis label.
|
| 173 |
+
|
| 174 |
+
**kwargs : dict
|
| 175 |
+
Additional keyword arguments to be passed to the `plot_method`.
|
| 176 |
+
|
| 177 |
+
Returns
|
| 178 |
+
-------
|
| 179 |
+
display: :class:`~sklearn.inspection.DecisionBoundaryDisplay`
|
| 180 |
+
Object that stores computed values.
|
| 181 |
+
"""
|
| 182 |
+
check_matplotlib_support("DecisionBoundaryDisplay.plot")
|
| 183 |
+
import matplotlib.pyplot as plt # noqa
|
| 184 |
+
|
| 185 |
+
if plot_method not in ("contourf", "contour", "pcolormesh"):
|
| 186 |
+
raise ValueError(
|
| 187 |
+
"plot_method must be 'contourf', 'contour', or 'pcolormesh'"
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
if ax is None:
|
| 191 |
+
_, ax = plt.subplots()
|
| 192 |
+
|
| 193 |
+
plot_func = getattr(ax, plot_method)
|
| 194 |
+
self.surface_ = plot_func(self.xx0, self.xx1, self.response, **kwargs)
|
| 195 |
+
|
| 196 |
+
if xlabel is not None or not ax.get_xlabel():
|
| 197 |
+
xlabel = self.xlabel if xlabel is None else xlabel
|
| 198 |
+
ax.set_xlabel(xlabel)
|
| 199 |
+
if ylabel is not None or not ax.get_ylabel():
|
| 200 |
+
ylabel = self.ylabel if ylabel is None else ylabel
|
| 201 |
+
ax.set_ylabel(ylabel)
|
| 202 |
+
|
| 203 |
+
self.ax_ = ax
|
| 204 |
+
self.figure_ = ax.figure
|
| 205 |
+
return self
|
| 206 |
+
|
| 207 |
+
@classmethod
|
| 208 |
+
def from_estimator(
|
| 209 |
+
cls,
|
| 210 |
+
estimator,
|
| 211 |
+
X,
|
| 212 |
+
*,
|
| 213 |
+
grid_resolution=100,
|
| 214 |
+
eps=1.0,
|
| 215 |
+
plot_method="contourf",
|
| 216 |
+
response_method="auto",
|
| 217 |
+
class_of_interest=None,
|
| 218 |
+
xlabel=None,
|
| 219 |
+
ylabel=None,
|
| 220 |
+
ax=None,
|
| 221 |
+
**kwargs,
|
| 222 |
+
):
|
| 223 |
+
"""Plot decision boundary given an estimator.
|
| 224 |
+
|
| 225 |
+
Read more in the :ref:`User Guide <visualizations>`.
|
| 226 |
+
|
| 227 |
+
Parameters
|
| 228 |
+
----------
|
| 229 |
+
estimator : object
|
| 230 |
+
Trained estimator used to plot the decision boundary.
|
| 231 |
+
|
| 232 |
+
X : {array-like, sparse matrix, dataframe} of shape (n_samples, 2)
|
| 233 |
+
Input data that should be only 2-dimensional.
|
| 234 |
+
|
| 235 |
+
grid_resolution : int, default=100
|
| 236 |
+
Number of grid points to use for plotting decision boundary.
|
| 237 |
+
Higher values will make the plot look nicer but be slower to
|
| 238 |
+
render.
|
| 239 |
+
|
| 240 |
+
eps : float, default=1.0
|
| 241 |
+
Extends the minimum and maximum values of X for evaluating the
|
| 242 |
+
response function.
|
| 243 |
+
|
| 244 |
+
plot_method : {'contourf', 'contour', 'pcolormesh'}, default='contourf'
|
| 245 |
+
Plotting method to call when plotting the response. Please refer
|
| 246 |
+
to the following matplotlib documentation for details:
|
| 247 |
+
:func:`contourf <matplotlib.pyplot.contourf>`,
|
| 248 |
+
:func:`contour <matplotlib.pyplot.contour>`,
|
| 249 |
+
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>`.
|
| 250 |
+
|
| 251 |
+
response_method : {'auto', 'predict_proba', 'decision_function', \
|
| 252 |
+
'predict'}, default='auto'
|
| 253 |
+
Specifies whether to use :term:`predict_proba`,
|
| 254 |
+
:term:`decision_function`, :term:`predict` as the target response.
|
| 255 |
+
If set to 'auto', the response method is tried in the following order:
|
| 256 |
+
:term:`decision_function`, :term:`predict_proba`, :term:`predict`.
|
| 257 |
+
For multiclass problems, :term:`predict` is selected when
|
| 258 |
+
`response_method="auto"`.
|
| 259 |
+
|
| 260 |
+
class_of_interest : int, float, bool or str, default=None
|
| 261 |
+
The class considered when plotting the decision. If None,
|
| 262 |
+
`estimator.classes_[1]` is considered as the positive class
|
| 263 |
+
for binary classifiers. Must have an explicit value for
|
| 264 |
+
multiclass classifiers when `response_method` is 'predict_proba'
|
| 265 |
+
or 'decision_function'.
|
| 266 |
+
|
| 267 |
+
.. versionadded:: 1.4
|
| 268 |
+
|
| 269 |
+
xlabel : str, default=None
|
| 270 |
+
The label used for the x-axis. If `None`, an attempt is made to
|
| 271 |
+
extract a label from `X` if it is a dataframe, otherwise an empty
|
| 272 |
+
string is used.
|
| 273 |
+
|
| 274 |
+
ylabel : str, default=None
|
| 275 |
+
The label used for the y-axis. If `None`, an attempt is made to
|
| 276 |
+
extract a label from `X` if it is a dataframe, otherwise an empty
|
| 277 |
+
string is used.
|
| 278 |
+
|
| 279 |
+
ax : Matplotlib axes, default=None
|
| 280 |
+
Axes object to plot on. If `None`, a new figure and axes is
|
| 281 |
+
created.
|
| 282 |
+
|
| 283 |
+
**kwargs : dict
|
| 284 |
+
Additional keyword arguments to be passed to the
|
| 285 |
+
`plot_method`.
|
| 286 |
+
|
| 287 |
+
Returns
|
| 288 |
+
-------
|
| 289 |
+
display : :class:`~sklearn.inspection.DecisionBoundaryDisplay`
|
| 290 |
+
Object that stores the result.
|
| 291 |
+
|
| 292 |
+
See Also
|
| 293 |
+
--------
|
| 294 |
+
DecisionBoundaryDisplay : Decision boundary visualization.
|
| 295 |
+
sklearn.metrics.ConfusionMatrixDisplay.from_estimator : Plot the
|
| 296 |
+
confusion matrix given an estimator, the data, and the label.
|
| 297 |
+
sklearn.metrics.ConfusionMatrixDisplay.from_predictions : Plot the
|
| 298 |
+
confusion matrix given the true and predicted labels.
|
| 299 |
+
|
| 300 |
+
Examples
|
| 301 |
+
--------
|
| 302 |
+
>>> import matplotlib.pyplot as plt
|
| 303 |
+
>>> from sklearn.datasets import load_iris
|
| 304 |
+
>>> from sklearn.linear_model import LogisticRegression
|
| 305 |
+
>>> from sklearn.inspection import DecisionBoundaryDisplay
|
| 306 |
+
>>> iris = load_iris()
|
| 307 |
+
>>> X = iris.data[:, :2]
|
| 308 |
+
>>> classifier = LogisticRegression().fit(X, iris.target)
|
| 309 |
+
>>> disp = DecisionBoundaryDisplay.from_estimator(
|
| 310 |
+
... classifier, X, response_method="predict",
|
| 311 |
+
... xlabel=iris.feature_names[0], ylabel=iris.feature_names[1],
|
| 312 |
+
... alpha=0.5,
|
| 313 |
+
... )
|
| 314 |
+
>>> disp.ax_.scatter(X[:, 0], X[:, 1], c=iris.target, edgecolor="k")
|
| 315 |
+
<...>
|
| 316 |
+
>>> plt.show()
|
| 317 |
+
"""
|
| 318 |
+
check_matplotlib_support(f"{cls.__name__}.from_estimator")
|
| 319 |
+
check_is_fitted(estimator)
|
| 320 |
+
|
| 321 |
+
if not grid_resolution > 1:
|
| 322 |
+
raise ValueError(
|
| 323 |
+
"grid_resolution must be greater than 1. Got"
|
| 324 |
+
f" {grid_resolution} instead."
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
if not eps >= 0:
|
| 328 |
+
raise ValueError(
|
| 329 |
+
f"eps must be greater than or equal to 0. Got {eps} instead."
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
possible_plot_methods = ("contourf", "contour", "pcolormesh")
|
| 333 |
+
if plot_method not in possible_plot_methods:
|
| 334 |
+
available_methods = ", ".join(possible_plot_methods)
|
| 335 |
+
raise ValueError(
|
| 336 |
+
f"plot_method must be one of {available_methods}. "
|
| 337 |
+
f"Got {plot_method} instead."
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
num_features = _num_features(X)
|
| 341 |
+
if num_features != 2:
|
| 342 |
+
raise ValueError(
|
| 343 |
+
f"n_features must be equal to 2. Got {num_features} instead."
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
x0, x1 = _safe_indexing(X, 0, axis=1), _safe_indexing(X, 1, axis=1)
|
| 347 |
+
|
| 348 |
+
x0_min, x0_max = x0.min() - eps, x0.max() + eps
|
| 349 |
+
x1_min, x1_max = x1.min() - eps, x1.max() + eps
|
| 350 |
+
|
| 351 |
+
xx0, xx1 = np.meshgrid(
|
| 352 |
+
np.linspace(x0_min, x0_max, grid_resolution),
|
| 353 |
+
np.linspace(x1_min, x1_max, grid_resolution),
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
X_grid = np.c_[xx0.ravel(), xx1.ravel()]
|
| 357 |
+
if _is_pandas_df(X) or _is_polars_df(X):
|
| 358 |
+
adapter = _get_adapter_from_container(X)
|
| 359 |
+
X_grid = adapter.create_container(
|
| 360 |
+
X_grid,
|
| 361 |
+
X_grid,
|
| 362 |
+
columns=X.columns,
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
prediction_method = _check_boundary_response_method(
|
| 366 |
+
estimator, response_method, class_of_interest
|
| 367 |
+
)
|
| 368 |
+
try:
|
| 369 |
+
response, _, response_method_used = _get_response_values(
|
| 370 |
+
estimator,
|
| 371 |
+
X_grid,
|
| 372 |
+
response_method=prediction_method,
|
| 373 |
+
pos_label=class_of_interest,
|
| 374 |
+
return_response_method_used=True,
|
| 375 |
+
)
|
| 376 |
+
except ValueError as exc:
|
| 377 |
+
if "is not a valid label" in str(exc):
|
| 378 |
+
# re-raise a more informative error message since `pos_label` is unknown
|
| 379 |
+
# to our user when interacting with
|
| 380 |
+
# `DecisionBoundaryDisplay.from_estimator`
|
| 381 |
+
raise ValueError(
|
| 382 |
+
f"class_of_interest={class_of_interest} is not a valid label: It "
|
| 383 |
+
f"should be one of {estimator.classes_}"
|
| 384 |
+
) from exc
|
| 385 |
+
raise
|
| 386 |
+
|
| 387 |
+
# convert classes predictions into integers
|
| 388 |
+
if response_method_used == "predict" and hasattr(estimator, "classes_"):
|
| 389 |
+
encoder = LabelEncoder()
|
| 390 |
+
encoder.classes_ = estimator.classes_
|
| 391 |
+
response = encoder.transform(response)
|
| 392 |
+
|
| 393 |
+
if response.ndim != 1:
|
| 394 |
+
if is_regressor(estimator):
|
| 395 |
+
raise ValueError("Multi-output regressors are not supported")
|
| 396 |
+
|
| 397 |
+
# For the multiclass case, `_get_response_values` returns the response
|
| 398 |
+
# as-is. Thus, we have a column per class and we need to select the column
|
| 399 |
+
# corresponding to the positive class.
|
| 400 |
+
col_idx = np.flatnonzero(estimator.classes_ == class_of_interest)[0]
|
| 401 |
+
response = response[:, col_idx]
|
| 402 |
+
|
| 403 |
+
if xlabel is None:
|
| 404 |
+
xlabel = X.columns[0] if hasattr(X, "columns") else ""
|
| 405 |
+
|
| 406 |
+
if ylabel is None:
|
| 407 |
+
ylabel = X.columns[1] if hasattr(X, "columns") else ""
|
| 408 |
+
|
| 409 |
+
display = cls(
|
| 410 |
+
xx0=xx0,
|
| 411 |
+
xx1=xx1,
|
| 412 |
+
response=response.reshape(xx0.shape),
|
| 413 |
+
xlabel=xlabel,
|
| 414 |
+
ylabel=ylabel,
|
| 415 |
+
)
|
| 416 |
+
return display.plot(ax=ax, plot_method=plot_method, **kwargs)
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/partial_dependence.py
ADDED
|
@@ -0,0 +1,1476 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Authors: The scikit-learn developers
|
| 2 |
+
# SPDX-License-Identifier: BSD-3-Clause
|
| 3 |
+
|
| 4 |
+
import numbers
|
| 5 |
+
from itertools import chain
|
| 6 |
+
from math import ceil
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
from scipy import sparse
|
| 10 |
+
from scipy.stats.mstats import mquantiles
|
| 11 |
+
|
| 12 |
+
from ...base import is_regressor
|
| 13 |
+
from ...utils import (
|
| 14 |
+
Bunch,
|
| 15 |
+
_safe_indexing,
|
| 16 |
+
check_array,
|
| 17 |
+
check_random_state,
|
| 18 |
+
)
|
| 19 |
+
from ...utils._encode import _unique
|
| 20 |
+
from ...utils._optional_dependencies import check_matplotlib_support # noqa
|
| 21 |
+
from ...utils._plotting import _validate_style_kwargs
|
| 22 |
+
from ...utils.parallel import Parallel, delayed
|
| 23 |
+
from .. import partial_dependence
|
| 24 |
+
from .._pd_utils import _check_feature_names, _get_feature_index
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class PartialDependenceDisplay:
|
| 28 |
+
"""Partial Dependence Plot (PDP).
|
| 29 |
+
|
| 30 |
+
This can also display individual partial dependencies which are often
|
| 31 |
+
referred to as: Individual Condition Expectation (ICE).
|
| 32 |
+
|
| 33 |
+
It is recommended to use
|
| 34 |
+
:func:`~sklearn.inspection.PartialDependenceDisplay.from_estimator` to create a
|
| 35 |
+
:class:`~sklearn.inspection.PartialDependenceDisplay`. All parameters are
|
| 36 |
+
stored as attributes.
|
| 37 |
+
|
| 38 |
+
Read more in
|
| 39 |
+
:ref:`sphx_glr_auto_examples_miscellaneous_plot_partial_dependence_visualization_api.py`
|
| 40 |
+
and the :ref:`User Guide <partial_dependence>`.
|
| 41 |
+
|
| 42 |
+
.. versionadded:: 0.22
|
| 43 |
+
|
| 44 |
+
Parameters
|
| 45 |
+
----------
|
| 46 |
+
pd_results : list of Bunch
|
| 47 |
+
Results of :func:`~sklearn.inspection.partial_dependence` for
|
| 48 |
+
``features``.
|
| 49 |
+
|
| 50 |
+
features : list of (int,) or list of (int, int)
|
| 51 |
+
Indices of features for a given plot. A tuple of one integer will plot
|
| 52 |
+
a partial dependence curve of one feature. A tuple of two integers will
|
| 53 |
+
plot a two-way partial dependence curve as a contour plot.
|
| 54 |
+
|
| 55 |
+
feature_names : list of str
|
| 56 |
+
Feature names corresponding to the indices in ``features``.
|
| 57 |
+
|
| 58 |
+
target_idx : int
|
| 59 |
+
|
| 60 |
+
- In a multiclass setting, specifies the class for which the PDPs
|
| 61 |
+
should be computed. Note that for binary classification, the
|
| 62 |
+
positive class (index 1) is always used.
|
| 63 |
+
- In a multioutput setting, specifies the task for which the PDPs
|
| 64 |
+
should be computed.
|
| 65 |
+
|
| 66 |
+
Ignored in binary classification or classical regression settings.
|
| 67 |
+
|
| 68 |
+
deciles : dict
|
| 69 |
+
Deciles for feature indices in ``features``.
|
| 70 |
+
|
| 71 |
+
kind : {'average', 'individual', 'both'} or list of such str, \
|
| 72 |
+
default='average'
|
| 73 |
+
Whether to plot the partial dependence averaged across all the samples
|
| 74 |
+
in the dataset or one line per sample or both.
|
| 75 |
+
|
| 76 |
+
- ``kind='average'`` results in the traditional PD plot;
|
| 77 |
+
- ``kind='individual'`` results in the ICE plot;
|
| 78 |
+
- ``kind='both'`` results in plotting both the ICE and PD on the same
|
| 79 |
+
plot.
|
| 80 |
+
|
| 81 |
+
A list of such strings can be provided to specify `kind` on a per-plot
|
| 82 |
+
basis. The length of the list should be the same as the number of
|
| 83 |
+
interaction requested in `features`.
|
| 84 |
+
|
| 85 |
+
.. note::
|
| 86 |
+
ICE ('individual' or 'both') is not a valid option for 2-ways
|
| 87 |
+
interactions plot. As a result, an error will be raised.
|
| 88 |
+
2-ways interaction plots should always be configured to
|
| 89 |
+
use the 'average' kind instead.
|
| 90 |
+
|
| 91 |
+
.. note::
|
| 92 |
+
The fast ``method='recursion'`` option is only available for
|
| 93 |
+
`kind='average'` and `sample_weights=None`. Computing individual
|
| 94 |
+
dependencies and doing weighted averages requires using the slower
|
| 95 |
+
`method='brute'`.
|
| 96 |
+
|
| 97 |
+
.. versionadded:: 0.24
|
| 98 |
+
Add `kind` parameter with `'average'`, `'individual'`, and `'both'`
|
| 99 |
+
options.
|
| 100 |
+
|
| 101 |
+
.. versionadded:: 1.1
|
| 102 |
+
Add the possibility to pass a list of string specifying `kind`
|
| 103 |
+
for each plot.
|
| 104 |
+
|
| 105 |
+
subsample : float, int or None, default=1000
|
| 106 |
+
Sampling for ICE curves when `kind` is 'individual' or 'both'.
|
| 107 |
+
If float, should be between 0.0 and 1.0 and represent the proportion
|
| 108 |
+
of the dataset to be used to plot ICE curves. If int, represents the
|
| 109 |
+
maximum absolute number of samples to use.
|
| 110 |
+
|
| 111 |
+
Note that the full dataset is still used to calculate partial
|
| 112 |
+
dependence when `kind='both'`.
|
| 113 |
+
|
| 114 |
+
.. versionadded:: 0.24
|
| 115 |
+
|
| 116 |
+
random_state : int, RandomState instance or None, default=None
|
| 117 |
+
Controls the randomness of the selected samples when subsamples is not
|
| 118 |
+
`None`. See :term:`Glossary <random_state>` for details.
|
| 119 |
+
|
| 120 |
+
.. versionadded:: 0.24
|
| 121 |
+
|
| 122 |
+
is_categorical : list of (bool,) or list of (bool, bool), default=None
|
| 123 |
+
Whether each target feature in `features` is categorical or not.
|
| 124 |
+
The list should be same size as `features`. If `None`, all features
|
| 125 |
+
are assumed to be continuous.
|
| 126 |
+
|
| 127 |
+
.. versionadded:: 1.2
|
| 128 |
+
|
| 129 |
+
Attributes
|
| 130 |
+
----------
|
| 131 |
+
bounding_ax_ : matplotlib Axes or None
|
| 132 |
+
If `ax` is an axes or None, the `bounding_ax_` is the axes where the
|
| 133 |
+
grid of partial dependence plots are drawn. If `ax` is a list of axes
|
| 134 |
+
or a numpy array of axes, `bounding_ax_` is None.
|
| 135 |
+
|
| 136 |
+
axes_ : ndarray of matplotlib Axes
|
| 137 |
+
If `ax` is an axes or None, `axes_[i, j]` is the axes on the i-th row
|
| 138 |
+
and j-th column. If `ax` is a list of axes, `axes_[i]` is the i-th item
|
| 139 |
+
in `ax`. Elements that are None correspond to a nonexisting axes in
|
| 140 |
+
that position.
|
| 141 |
+
|
| 142 |
+
lines_ : ndarray of matplotlib Artists
|
| 143 |
+
If `ax` is an axes or None, `lines_[i, j]` is the partial dependence
|
| 144 |
+
curve on the i-th row and j-th column. If `ax` is a list of axes,
|
| 145 |
+
`lines_[i]` is the partial dependence curve corresponding to the i-th
|
| 146 |
+
item in `ax`. Elements that are None correspond to a nonexisting axes
|
| 147 |
+
or an axes that does not include a line plot.
|
| 148 |
+
|
| 149 |
+
deciles_vlines_ : ndarray of matplotlib LineCollection
|
| 150 |
+
If `ax` is an axes or None, `vlines_[i, j]` is the line collection
|
| 151 |
+
representing the x axis deciles of the i-th row and j-th column. If
|
| 152 |
+
`ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in
|
| 153 |
+
`ax`. Elements that are None correspond to a nonexisting axes or an
|
| 154 |
+
axes that does not include a PDP plot.
|
| 155 |
+
|
| 156 |
+
.. versionadded:: 0.23
|
| 157 |
+
|
| 158 |
+
deciles_hlines_ : ndarray of matplotlib LineCollection
|
| 159 |
+
If `ax` is an axes or None, `vlines_[i, j]` is the line collection
|
| 160 |
+
representing the y axis deciles of the i-th row and j-th column. If
|
| 161 |
+
`ax` is a list of axes, `vlines_[i]` corresponds to the i-th item in
|
| 162 |
+
`ax`. Elements that are None correspond to a nonexisting axes or an
|
| 163 |
+
axes that does not include a 2-way plot.
|
| 164 |
+
|
| 165 |
+
.. versionadded:: 0.23
|
| 166 |
+
|
| 167 |
+
contours_ : ndarray of matplotlib Artists
|
| 168 |
+
If `ax` is an axes or None, `contours_[i, j]` is the partial dependence
|
| 169 |
+
plot on the i-th row and j-th column. If `ax` is a list of axes,
|
| 170 |
+
`contours_[i]` is the partial dependence plot corresponding to the i-th
|
| 171 |
+
item in `ax`. Elements that are None correspond to a nonexisting axes
|
| 172 |
+
or an axes that does not include a contour plot.
|
| 173 |
+
|
| 174 |
+
bars_ : ndarray of matplotlib Artists
|
| 175 |
+
If `ax` is an axes or None, `bars_[i, j]` is the partial dependence bar
|
| 176 |
+
plot on the i-th row and j-th column (for a categorical feature).
|
| 177 |
+
If `ax` is a list of axes, `bars_[i]` is the partial dependence bar
|
| 178 |
+
plot corresponding to the i-th item in `ax`. Elements that are None
|
| 179 |
+
correspond to a nonexisting axes or an axes that does not include a
|
| 180 |
+
bar plot.
|
| 181 |
+
|
| 182 |
+
.. versionadded:: 1.2
|
| 183 |
+
|
| 184 |
+
heatmaps_ : ndarray of matplotlib Artists
|
| 185 |
+
If `ax` is an axes or None, `heatmaps_[i, j]` is the partial dependence
|
| 186 |
+
heatmap on the i-th row and j-th column (for a pair of categorical
|
| 187 |
+
features) . If `ax` is a list of axes, `heatmaps_[i]` is the partial
|
| 188 |
+
dependence heatmap corresponding to the i-th item in `ax`. Elements
|
| 189 |
+
that are None correspond to a nonexisting axes or an axes that does not
|
| 190 |
+
include a heatmap.
|
| 191 |
+
|
| 192 |
+
.. versionadded:: 1.2
|
| 193 |
+
|
| 194 |
+
figure_ : matplotlib Figure
|
| 195 |
+
Figure containing partial dependence plots.
|
| 196 |
+
|
| 197 |
+
See Also
|
| 198 |
+
--------
|
| 199 |
+
partial_dependence : Compute Partial Dependence values.
|
| 200 |
+
PartialDependenceDisplay.from_estimator : Plot Partial Dependence.
|
| 201 |
+
|
| 202 |
+
Examples
|
| 203 |
+
--------
|
| 204 |
+
>>> import numpy as np
|
| 205 |
+
>>> import matplotlib.pyplot as plt
|
| 206 |
+
>>> from sklearn.datasets import make_friedman1
|
| 207 |
+
>>> from sklearn.ensemble import GradientBoostingRegressor
|
| 208 |
+
>>> from sklearn.inspection import PartialDependenceDisplay
|
| 209 |
+
>>> from sklearn.inspection import partial_dependence
|
| 210 |
+
>>> X, y = make_friedman1()
|
| 211 |
+
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
|
| 212 |
+
>>> features, feature_names = [(0,)], [f"Features #{i}" for i in range(X.shape[1])]
|
| 213 |
+
>>> deciles = {0: np.linspace(0, 1, num=5)}
|
| 214 |
+
>>> pd_results = partial_dependence(
|
| 215 |
+
... clf, X, features=0, kind="average", grid_resolution=5)
|
| 216 |
+
>>> display = PartialDependenceDisplay(
|
| 217 |
+
... [pd_results], features=features, feature_names=feature_names,
|
| 218 |
+
... target_idx=0, deciles=deciles
|
| 219 |
+
... )
|
| 220 |
+
>>> display.plot(pdp_lim={1: (-1.38, 0.66)})
|
| 221 |
+
<...>
|
| 222 |
+
>>> plt.show()
|
| 223 |
+
"""
|
| 224 |
+
|
| 225 |
+
def __init__(
|
| 226 |
+
self,
|
| 227 |
+
pd_results,
|
| 228 |
+
*,
|
| 229 |
+
features,
|
| 230 |
+
feature_names,
|
| 231 |
+
target_idx,
|
| 232 |
+
deciles,
|
| 233 |
+
kind="average",
|
| 234 |
+
subsample=1000,
|
| 235 |
+
random_state=None,
|
| 236 |
+
is_categorical=None,
|
| 237 |
+
):
|
| 238 |
+
self.pd_results = pd_results
|
| 239 |
+
self.features = features
|
| 240 |
+
self.feature_names = feature_names
|
| 241 |
+
self.target_idx = target_idx
|
| 242 |
+
self.deciles = deciles
|
| 243 |
+
self.kind = kind
|
| 244 |
+
self.subsample = subsample
|
| 245 |
+
self.random_state = random_state
|
| 246 |
+
self.is_categorical = is_categorical
|
| 247 |
+
|
| 248 |
+
@classmethod
|
| 249 |
+
def from_estimator(
|
| 250 |
+
cls,
|
| 251 |
+
estimator,
|
| 252 |
+
X,
|
| 253 |
+
features,
|
| 254 |
+
*,
|
| 255 |
+
sample_weight=None,
|
| 256 |
+
categorical_features=None,
|
| 257 |
+
feature_names=None,
|
| 258 |
+
target=None,
|
| 259 |
+
response_method="auto",
|
| 260 |
+
n_cols=3,
|
| 261 |
+
grid_resolution=100,
|
| 262 |
+
percentiles=(0.05, 0.95),
|
| 263 |
+
method="auto",
|
| 264 |
+
n_jobs=None,
|
| 265 |
+
verbose=0,
|
| 266 |
+
line_kw=None,
|
| 267 |
+
ice_lines_kw=None,
|
| 268 |
+
pd_line_kw=None,
|
| 269 |
+
contour_kw=None,
|
| 270 |
+
ax=None,
|
| 271 |
+
kind="average",
|
| 272 |
+
centered=False,
|
| 273 |
+
subsample=1000,
|
| 274 |
+
random_state=None,
|
| 275 |
+
):
|
| 276 |
+
"""Partial dependence (PD) and individual conditional expectation (ICE) plots.
|
| 277 |
+
|
| 278 |
+
Partial dependence plots, individual conditional expectation plots or an
|
| 279 |
+
overlay of both of them can be plotted by setting the ``kind``
|
| 280 |
+
parameter. The ``len(features)`` plots are arranged in a grid with
|
| 281 |
+
``n_cols`` columns. Two-way partial dependence plots are plotted as
|
| 282 |
+
contour plots. The deciles of the feature values will be shown with tick
|
| 283 |
+
marks on the x-axes for one-way plots, and on both axes for two-way
|
| 284 |
+
plots.
|
| 285 |
+
|
| 286 |
+
Read more in the :ref:`User Guide <partial_dependence>`.
|
| 287 |
+
|
| 288 |
+
.. note::
|
| 289 |
+
|
| 290 |
+
:func:`PartialDependenceDisplay.from_estimator` does not support using the
|
| 291 |
+
same axes with multiple calls. To plot the partial dependence for
|
| 292 |
+
multiple estimators, please pass the axes created by the first call to the
|
| 293 |
+
second call::
|
| 294 |
+
|
| 295 |
+
>>> from sklearn.inspection import PartialDependenceDisplay
|
| 296 |
+
>>> from sklearn.datasets import make_friedman1
|
| 297 |
+
>>> from sklearn.linear_model import LinearRegression
|
| 298 |
+
>>> from sklearn.ensemble import RandomForestRegressor
|
| 299 |
+
>>> X, y = make_friedman1()
|
| 300 |
+
>>> est1 = LinearRegression().fit(X, y)
|
| 301 |
+
>>> est2 = RandomForestRegressor().fit(X, y)
|
| 302 |
+
>>> disp1 = PartialDependenceDisplay.from_estimator(est1, X,
|
| 303 |
+
... [1, 2])
|
| 304 |
+
>>> disp2 = PartialDependenceDisplay.from_estimator(est2, X, [1, 2],
|
| 305 |
+
... ax=disp1.axes_)
|
| 306 |
+
|
| 307 |
+
.. warning::
|
| 308 |
+
|
| 309 |
+
For :class:`~sklearn.ensemble.GradientBoostingClassifier` and
|
| 310 |
+
:class:`~sklearn.ensemble.GradientBoostingRegressor`, the
|
| 311 |
+
`'recursion'` method (used by default) will not account for the `init`
|
| 312 |
+
predictor of the boosting process. In practice, this will produce
|
| 313 |
+
the same values as `'brute'` up to a constant offset in the target
|
| 314 |
+
response, provided that `init` is a constant estimator (which is the
|
| 315 |
+
default). However, if `init` is not a constant estimator, the
|
| 316 |
+
partial dependence values are incorrect for `'recursion'` because the
|
| 317 |
+
offset will be sample-dependent. It is preferable to use the `'brute'`
|
| 318 |
+
method. Note that this only applies to
|
| 319 |
+
:class:`~sklearn.ensemble.GradientBoostingClassifier` and
|
| 320 |
+
:class:`~sklearn.ensemble.GradientBoostingRegressor`, not to
|
| 321 |
+
:class:`~sklearn.ensemble.HistGradientBoostingClassifier` and
|
| 322 |
+
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`.
|
| 323 |
+
|
| 324 |
+
.. versionadded:: 1.0
|
| 325 |
+
|
| 326 |
+
Parameters
|
| 327 |
+
----------
|
| 328 |
+
estimator : BaseEstimator
|
| 329 |
+
A fitted estimator object implementing :term:`predict`,
|
| 330 |
+
:term:`predict_proba`, or :term:`decision_function`.
|
| 331 |
+
Multioutput-multiclass classifiers are not supported.
|
| 332 |
+
|
| 333 |
+
X : {array-like, dataframe} of shape (n_samples, n_features)
|
| 334 |
+
``X`` is used to generate a grid of values for the target
|
| 335 |
+
``features`` (where the partial dependence will be evaluated), and
|
| 336 |
+
also to generate values for the complement features when the
|
| 337 |
+
`method` is `'brute'`.
|
| 338 |
+
|
| 339 |
+
features : list of {int, str, pair of int, pair of str}
|
| 340 |
+
The target features for which to create the PDPs.
|
| 341 |
+
If `features[i]` is an integer or a string, a one-way PDP is created;
|
| 342 |
+
if `features[i]` is a tuple, a two-way PDP is created (only supported
|
| 343 |
+
with `kind='average'`). Each tuple must be of size 2.
|
| 344 |
+
If any entry is a string, then it must be in ``feature_names``.
|
| 345 |
+
|
| 346 |
+
sample_weight : array-like of shape (n_samples,), default=None
|
| 347 |
+
Sample weights are used to calculate weighted means when averaging the
|
| 348 |
+
model output. If `None`, then samples are equally weighted. If
|
| 349 |
+
`sample_weight` is not `None`, then `method` will be set to `'brute'`.
|
| 350 |
+
Note that `sample_weight` is ignored for `kind='individual'`.
|
| 351 |
+
|
| 352 |
+
.. versionadded:: 1.3
|
| 353 |
+
|
| 354 |
+
categorical_features : array-like of shape (n_features,) or shape \
|
| 355 |
+
(n_categorical_features,), dtype={bool, int, str}, default=None
|
| 356 |
+
Indicates the categorical features.
|
| 357 |
+
|
| 358 |
+
- `None`: no feature will be considered categorical;
|
| 359 |
+
- boolean array-like: boolean mask of shape `(n_features,)`
|
| 360 |
+
indicating which features are categorical. Thus, this array has
|
| 361 |
+
the same shape has `X.shape[1]`;
|
| 362 |
+
- integer or string array-like: integer indices or strings
|
| 363 |
+
indicating categorical features.
|
| 364 |
+
|
| 365 |
+
.. versionadded:: 1.2
|
| 366 |
+
|
| 367 |
+
feature_names : array-like of shape (n_features,), dtype=str, default=None
|
| 368 |
+
Name of each feature; `feature_names[i]` holds the name of the feature
|
| 369 |
+
with index `i`.
|
| 370 |
+
By default, the name of the feature corresponds to their numerical
|
| 371 |
+
index for NumPy array and their column name for pandas dataframe.
|
| 372 |
+
|
| 373 |
+
target : int, default=None
|
| 374 |
+
- In a multiclass setting, specifies the class for which the PDPs
|
| 375 |
+
should be computed. Note that for binary classification, the
|
| 376 |
+
positive class (index 1) is always used.
|
| 377 |
+
- In a multioutput setting, specifies the task for which the PDPs
|
| 378 |
+
should be computed.
|
| 379 |
+
|
| 380 |
+
Ignored in binary classification or classical regression settings.
|
| 381 |
+
|
| 382 |
+
response_method : {'auto', 'predict_proba', 'decision_function'}, \
|
| 383 |
+
default='auto'
|
| 384 |
+
Specifies whether to use :term:`predict_proba` or
|
| 385 |
+
:term:`decision_function` as the target response. For regressors
|
| 386 |
+
this parameter is ignored and the response is always the output of
|
| 387 |
+
:term:`predict`. By default, :term:`predict_proba` is tried first
|
| 388 |
+
and we revert to :term:`decision_function` if it doesn't exist. If
|
| 389 |
+
``method`` is `'recursion'`, the response is always the output of
|
| 390 |
+
:term:`decision_function`.
|
| 391 |
+
|
| 392 |
+
n_cols : int, default=3
|
| 393 |
+
The maximum number of columns in the grid plot. Only active when `ax`
|
| 394 |
+
is a single axis or `None`.
|
| 395 |
+
|
| 396 |
+
grid_resolution : int, default=100
|
| 397 |
+
The number of equally spaced points on the axes of the plots, for each
|
| 398 |
+
target feature.
|
| 399 |
+
|
| 400 |
+
percentiles : tuple of float, default=(0.05, 0.95)
|
| 401 |
+
The lower and upper percentile used to create the extreme values
|
| 402 |
+
for the PDP axes. Must be in [0, 1].
|
| 403 |
+
|
| 404 |
+
method : str, default='auto'
|
| 405 |
+
The method used to calculate the averaged predictions:
|
| 406 |
+
|
| 407 |
+
- `'recursion'` is only supported for some tree-based estimators
|
| 408 |
+
(namely
|
| 409 |
+
:class:`~sklearn.ensemble.GradientBoostingClassifier`,
|
| 410 |
+
:class:`~sklearn.ensemble.GradientBoostingRegressor`,
|
| 411 |
+
:class:`~sklearn.ensemble.HistGradientBoostingClassifier`,
|
| 412 |
+
:class:`~sklearn.ensemble.HistGradientBoostingRegressor`,
|
| 413 |
+
:class:`~sklearn.tree.DecisionTreeRegressor`,
|
| 414 |
+
:class:`~sklearn.ensemble.RandomForestRegressor`
|
| 415 |
+
but is more efficient in terms of speed.
|
| 416 |
+
With this method, the target response of a
|
| 417 |
+
classifier is always the decision function, not the predicted
|
| 418 |
+
probabilities. Since the `'recursion'` method implicitly computes
|
| 419 |
+
the average of the ICEs by design, it is not compatible with ICE and
|
| 420 |
+
thus `kind` must be `'average'`.
|
| 421 |
+
|
| 422 |
+
- `'brute'` is supported for any estimator, but is more
|
| 423 |
+
computationally intensive.
|
| 424 |
+
|
| 425 |
+
- `'auto'`: the `'recursion'` is used for estimators that support it,
|
| 426 |
+
and `'brute'` is used otherwise. If `sample_weight` is not `None`,
|
| 427 |
+
then `'brute'` is used regardless of the estimator.
|
| 428 |
+
|
| 429 |
+
Please see :ref:`this note <pdp_method_differences>` for
|
| 430 |
+
differences between the `'brute'` and `'recursion'` method.
|
| 431 |
+
|
| 432 |
+
n_jobs : int, default=None
|
| 433 |
+
The number of CPUs to use to compute the partial dependences.
|
| 434 |
+
Computation is parallelized over features specified by the `features`
|
| 435 |
+
parameter.
|
| 436 |
+
|
| 437 |
+
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
|
| 438 |
+
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
|
| 439 |
+
for more details.
|
| 440 |
+
|
| 441 |
+
verbose : int, default=0
|
| 442 |
+
Verbose output during PD computations.
|
| 443 |
+
|
| 444 |
+
line_kw : dict, default=None
|
| 445 |
+
Dict with keywords passed to the ``matplotlib.pyplot.plot`` call.
|
| 446 |
+
For one-way partial dependence plots. It can be used to define common
|
| 447 |
+
properties for both `ice_lines_kw` and `pdp_line_kw`.
|
| 448 |
+
|
| 449 |
+
ice_lines_kw : dict, default=None
|
| 450 |
+
Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.
|
| 451 |
+
For ICE lines in the one-way partial dependence plots.
|
| 452 |
+
The key value pairs defined in `ice_lines_kw` takes priority over
|
| 453 |
+
`line_kw`.
|
| 454 |
+
|
| 455 |
+
pd_line_kw : dict, default=None
|
| 456 |
+
Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.
|
| 457 |
+
For partial dependence in one-way partial dependence plots.
|
| 458 |
+
The key value pairs defined in `pd_line_kw` takes priority over
|
| 459 |
+
`line_kw`.
|
| 460 |
+
|
| 461 |
+
contour_kw : dict, default=None
|
| 462 |
+
Dict with keywords passed to the ``matplotlib.pyplot.contourf`` call.
|
| 463 |
+
For two-way partial dependence plots.
|
| 464 |
+
|
| 465 |
+
ax : Matplotlib axes or array-like of Matplotlib axes, default=None
|
| 466 |
+
- If a single axis is passed in, it is treated as a bounding axes
|
| 467 |
+
and a grid of partial dependence plots will be drawn within
|
| 468 |
+
these bounds. The `n_cols` parameter controls the number of
|
| 469 |
+
columns in the grid.
|
| 470 |
+
- If an array-like of axes are passed in, the partial dependence
|
| 471 |
+
plots will be drawn directly into these axes.
|
| 472 |
+
- If `None`, a figure and a bounding axes is created and treated
|
| 473 |
+
as the single axes case.
|
| 474 |
+
|
| 475 |
+
kind : {'average', 'individual', 'both'}, default='average'
|
| 476 |
+
Whether to plot the partial dependence averaged across all the samples
|
| 477 |
+
in the dataset or one line per sample or both.
|
| 478 |
+
|
| 479 |
+
- ``kind='average'`` results in the traditional PD plot;
|
| 480 |
+
- ``kind='individual'`` results in the ICE plot.
|
| 481 |
+
|
| 482 |
+
Note that the fast `method='recursion'` option is only available for
|
| 483 |
+
`kind='average'` and `sample_weights=None`. Computing individual
|
| 484 |
+
dependencies and doing weighted averages requires using the slower
|
| 485 |
+
`method='brute'`.
|
| 486 |
+
|
| 487 |
+
centered : bool, default=False
|
| 488 |
+
If `True`, the ICE and PD lines will start at the origin of the
|
| 489 |
+
y-axis. By default, no centering is done.
|
| 490 |
+
|
| 491 |
+
.. versionadded:: 1.1
|
| 492 |
+
|
| 493 |
+
subsample : float, int or None, default=1000
|
| 494 |
+
Sampling for ICE curves when `kind` is 'individual' or 'both'.
|
| 495 |
+
If `float`, should be between 0.0 and 1.0 and represent the proportion
|
| 496 |
+
of the dataset to be used to plot ICE curves. If `int`, represents the
|
| 497 |
+
absolute number samples to use.
|
| 498 |
+
|
| 499 |
+
Note that the full dataset is still used to calculate averaged partial
|
| 500 |
+
dependence when `kind='both'`.
|
| 501 |
+
|
| 502 |
+
random_state : int, RandomState instance or None, default=None
|
| 503 |
+
Controls the randomness of the selected samples when subsamples is not
|
| 504 |
+
`None` and `kind` is either `'both'` or `'individual'`.
|
| 505 |
+
See :term:`Glossary <random_state>` for details.
|
| 506 |
+
|
| 507 |
+
Returns
|
| 508 |
+
-------
|
| 509 |
+
display : :class:`~sklearn.inspection.PartialDependenceDisplay`
|
| 510 |
+
|
| 511 |
+
See Also
|
| 512 |
+
--------
|
| 513 |
+
partial_dependence : Compute Partial Dependence values.
|
| 514 |
+
|
| 515 |
+
Examples
|
| 516 |
+
--------
|
| 517 |
+
>>> import matplotlib.pyplot as plt
|
| 518 |
+
>>> from sklearn.datasets import make_friedman1
|
| 519 |
+
>>> from sklearn.ensemble import GradientBoostingRegressor
|
| 520 |
+
>>> from sklearn.inspection import PartialDependenceDisplay
|
| 521 |
+
>>> X, y = make_friedman1()
|
| 522 |
+
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
|
| 523 |
+
>>> PartialDependenceDisplay.from_estimator(clf, X, [0, (0, 1)])
|
| 524 |
+
<...>
|
| 525 |
+
>>> plt.show()
|
| 526 |
+
"""
|
| 527 |
+
check_matplotlib_support(f"{cls.__name__}.from_estimator") # noqa
|
| 528 |
+
import matplotlib.pyplot as plt # noqa
|
| 529 |
+
|
| 530 |
+
# set target_idx for multi-class estimators
|
| 531 |
+
if hasattr(estimator, "classes_") and np.size(estimator.classes_) > 2:
|
| 532 |
+
if target is None:
|
| 533 |
+
raise ValueError("target must be specified for multi-class")
|
| 534 |
+
target_idx = np.searchsorted(estimator.classes_, target)
|
| 535 |
+
if (
|
| 536 |
+
not (0 <= target_idx < len(estimator.classes_))
|
| 537 |
+
or estimator.classes_[target_idx] != target
|
| 538 |
+
):
|
| 539 |
+
raise ValueError("target not in est.classes_, got {}".format(target))
|
| 540 |
+
else:
|
| 541 |
+
# regression and binary classification
|
| 542 |
+
target_idx = 0
|
| 543 |
+
|
| 544 |
+
# Use check_array only on lists and other non-array-likes / sparse. Do not
|
| 545 |
+
# convert DataFrame into a NumPy array.
|
| 546 |
+
if not (hasattr(X, "__array__") or sparse.issparse(X)):
|
| 547 |
+
X = check_array(X, ensure_all_finite="allow-nan", dtype=object)
|
| 548 |
+
n_features = X.shape[1]
|
| 549 |
+
|
| 550 |
+
feature_names = _check_feature_names(X, feature_names)
|
| 551 |
+
# expand kind to always be a list of str
|
| 552 |
+
kind_ = [kind] * len(features) if isinstance(kind, str) else kind
|
| 553 |
+
if len(kind_) != len(features):
|
| 554 |
+
raise ValueError(
|
| 555 |
+
"When `kind` is provided as a list of strings, it should contain "
|
| 556 |
+
f"as many elements as `features`. `kind` contains {len(kind_)} "
|
| 557 |
+
f"element(s) and `features` contains {len(features)} element(s)."
|
| 558 |
+
)
|
| 559 |
+
|
| 560 |
+
# convert features into a seq of int tuples
|
| 561 |
+
tmp_features, ice_for_two_way_pd = [], []
|
| 562 |
+
for kind_plot, fxs in zip(kind_, features):
|
| 563 |
+
if isinstance(fxs, (numbers.Integral, str)):
|
| 564 |
+
fxs = (fxs,)
|
| 565 |
+
try:
|
| 566 |
+
fxs = tuple(
|
| 567 |
+
_get_feature_index(fx, feature_names=feature_names) for fx in fxs
|
| 568 |
+
)
|
| 569 |
+
except TypeError as e:
|
| 570 |
+
raise ValueError(
|
| 571 |
+
"Each entry in features must be either an int, "
|
| 572 |
+
"a string, or an iterable of size at most 2."
|
| 573 |
+
) from e
|
| 574 |
+
if not 1 <= np.size(fxs) <= 2:
|
| 575 |
+
raise ValueError(
|
| 576 |
+
"Each entry in features must be either an int, "
|
| 577 |
+
"a string, or an iterable of size at most 2."
|
| 578 |
+
)
|
| 579 |
+
# store the information if 2-way PD was requested with ICE to later
|
| 580 |
+
# raise a ValueError with an exhaustive list of problematic
|
| 581 |
+
# settings.
|
| 582 |
+
ice_for_two_way_pd.append(kind_plot != "average" and np.size(fxs) > 1)
|
| 583 |
+
|
| 584 |
+
tmp_features.append(fxs)
|
| 585 |
+
|
| 586 |
+
if any(ice_for_two_way_pd):
|
| 587 |
+
# raise an error and be specific regarding the parameter values
|
| 588 |
+
# when 1- and 2-way PD were requested
|
| 589 |
+
kind_ = [
|
| 590 |
+
"average" if forcing_average else kind_plot
|
| 591 |
+
for forcing_average, kind_plot in zip(ice_for_two_way_pd, kind_)
|
| 592 |
+
]
|
| 593 |
+
raise ValueError(
|
| 594 |
+
"ICE plot cannot be rendered for 2-way feature interactions. "
|
| 595 |
+
"2-way feature interactions mandates PD plots using the "
|
| 596 |
+
"'average' kind: "
|
| 597 |
+
f"features={features!r} should be configured to use "
|
| 598 |
+
f"kind={kind_!r} explicitly."
|
| 599 |
+
)
|
| 600 |
+
features = tmp_features
|
| 601 |
+
|
| 602 |
+
if categorical_features is None:
|
| 603 |
+
is_categorical = [
|
| 604 |
+
(False,) if len(fxs) == 1 else (False, False) for fxs in features
|
| 605 |
+
]
|
| 606 |
+
else:
|
| 607 |
+
# we need to create a boolean indicator of which features are
|
| 608 |
+
# categorical from the categorical_features list.
|
| 609 |
+
categorical_features = np.asarray(categorical_features)
|
| 610 |
+
if categorical_features.dtype.kind == "b":
|
| 611 |
+
# categorical features provided as a list of boolean
|
| 612 |
+
if categorical_features.size != n_features:
|
| 613 |
+
raise ValueError(
|
| 614 |
+
"When `categorical_features` is a boolean array-like, "
|
| 615 |
+
"the array should be of shape (n_features,). Got "
|
| 616 |
+
f"{categorical_features.size} elements while `X` contains "
|
| 617 |
+
f"{n_features} features."
|
| 618 |
+
)
|
| 619 |
+
is_categorical = [
|
| 620 |
+
tuple(categorical_features[fx] for fx in fxs) for fxs in features
|
| 621 |
+
]
|
| 622 |
+
elif categorical_features.dtype.kind in ("i", "O", "U"):
|
| 623 |
+
# categorical features provided as a list of indices or feature names
|
| 624 |
+
categorical_features_idx = [
|
| 625 |
+
_get_feature_index(cat, feature_names=feature_names)
|
| 626 |
+
for cat in categorical_features
|
| 627 |
+
]
|
| 628 |
+
is_categorical = [
|
| 629 |
+
tuple([idx in categorical_features_idx for idx in fxs])
|
| 630 |
+
for fxs in features
|
| 631 |
+
]
|
| 632 |
+
else:
|
| 633 |
+
raise ValueError(
|
| 634 |
+
"Expected `categorical_features` to be an array-like of boolean,"
|
| 635 |
+
f" integer, or string. Got {categorical_features.dtype} instead."
|
| 636 |
+
)
|
| 637 |
+
|
| 638 |
+
for cats in is_categorical:
|
| 639 |
+
if np.size(cats) == 2 and (cats[0] != cats[1]):
|
| 640 |
+
raise ValueError(
|
| 641 |
+
"Two-way partial dependence plots are not supported for pairs"
|
| 642 |
+
" of continuous and categorical features."
|
| 643 |
+
)
|
| 644 |
+
|
| 645 |
+
# collect the indices of the categorical features targeted by the partial
|
| 646 |
+
# dependence computation
|
| 647 |
+
categorical_features_targeted = set(
|
| 648 |
+
[
|
| 649 |
+
fx
|
| 650 |
+
for fxs, cats in zip(features, is_categorical)
|
| 651 |
+
for fx in fxs
|
| 652 |
+
if any(cats)
|
| 653 |
+
]
|
| 654 |
+
)
|
| 655 |
+
if categorical_features_targeted:
|
| 656 |
+
min_n_cats = min(
|
| 657 |
+
[
|
| 658 |
+
len(_unique(_safe_indexing(X, idx, axis=1)))
|
| 659 |
+
for idx in categorical_features_targeted
|
| 660 |
+
]
|
| 661 |
+
)
|
| 662 |
+
if grid_resolution < min_n_cats:
|
| 663 |
+
raise ValueError(
|
| 664 |
+
"The resolution of the computed grid is less than the "
|
| 665 |
+
"minimum number of categories in the targeted categorical "
|
| 666 |
+
"features. Expect the `grid_resolution` to be greater than "
|
| 667 |
+
f"{min_n_cats}. Got {grid_resolution} instead."
|
| 668 |
+
)
|
| 669 |
+
|
| 670 |
+
for is_cat, kind_plot in zip(is_categorical, kind_):
|
| 671 |
+
if any(is_cat) and kind_plot != "average":
|
| 672 |
+
raise ValueError(
|
| 673 |
+
"It is not possible to display individual effects for"
|
| 674 |
+
" categorical features."
|
| 675 |
+
)
|
| 676 |
+
|
| 677 |
+
# Early exit if the axes does not have the correct number of axes
|
| 678 |
+
if ax is not None and not isinstance(ax, plt.Axes):
|
| 679 |
+
axes = np.asarray(ax, dtype=object)
|
| 680 |
+
if axes.size != len(features):
|
| 681 |
+
raise ValueError(
|
| 682 |
+
"Expected ax to have {} axes, got {}".format(
|
| 683 |
+
len(features), axes.size
|
| 684 |
+
)
|
| 685 |
+
)
|
| 686 |
+
|
| 687 |
+
for i in chain.from_iterable(features):
|
| 688 |
+
if i >= len(feature_names):
|
| 689 |
+
raise ValueError(
|
| 690 |
+
"All entries of features must be less than "
|
| 691 |
+
"len(feature_names) = {0}, got {1}.".format(len(feature_names), i)
|
| 692 |
+
)
|
| 693 |
+
|
| 694 |
+
if isinstance(subsample, numbers.Integral):
|
| 695 |
+
if subsample <= 0:
|
| 696 |
+
raise ValueError(
|
| 697 |
+
f"When an integer, subsample={subsample} should be positive."
|
| 698 |
+
)
|
| 699 |
+
elif isinstance(subsample, numbers.Real):
|
| 700 |
+
if subsample <= 0 or subsample >= 1:
|
| 701 |
+
raise ValueError(
|
| 702 |
+
f"When a floating-point, subsample={subsample} should be in "
|
| 703 |
+
"the (0, 1) range."
|
| 704 |
+
)
|
| 705 |
+
|
| 706 |
+
# compute predictions and/or averaged predictions
|
| 707 |
+
pd_results = Parallel(n_jobs=n_jobs, verbose=verbose)(
|
| 708 |
+
delayed(partial_dependence)(
|
| 709 |
+
estimator,
|
| 710 |
+
X,
|
| 711 |
+
fxs,
|
| 712 |
+
sample_weight=sample_weight,
|
| 713 |
+
feature_names=feature_names,
|
| 714 |
+
categorical_features=categorical_features,
|
| 715 |
+
response_method=response_method,
|
| 716 |
+
method=method,
|
| 717 |
+
grid_resolution=grid_resolution,
|
| 718 |
+
percentiles=percentiles,
|
| 719 |
+
kind=kind_plot,
|
| 720 |
+
)
|
| 721 |
+
for kind_plot, fxs in zip(kind_, features)
|
| 722 |
+
)
|
| 723 |
+
|
| 724 |
+
# For multioutput regression, we can only check the validity of target
|
| 725 |
+
# now that we have the predictions.
|
| 726 |
+
# Also note: as multiclass-multioutput classifiers are not supported,
|
| 727 |
+
# multiclass and multioutput scenario are mutually exclusive. So there is
|
| 728 |
+
# no risk of overwriting target_idx here.
|
| 729 |
+
pd_result = pd_results[0] # checking the first result is enough
|
| 730 |
+
n_tasks = (
|
| 731 |
+
pd_result.average.shape[0]
|
| 732 |
+
if kind_[0] == "average"
|
| 733 |
+
else pd_result.individual.shape[0]
|
| 734 |
+
)
|
| 735 |
+
if is_regressor(estimator) and n_tasks > 1:
|
| 736 |
+
if target is None:
|
| 737 |
+
raise ValueError("target must be specified for multi-output regressors")
|
| 738 |
+
if not 0 <= target <= n_tasks:
|
| 739 |
+
raise ValueError(
|
| 740 |
+
"target must be in [0, n_tasks], got {}.".format(target)
|
| 741 |
+
)
|
| 742 |
+
target_idx = target
|
| 743 |
+
|
| 744 |
+
deciles = {}
|
| 745 |
+
for fxs, cats in zip(features, is_categorical):
|
| 746 |
+
for fx, cat in zip(fxs, cats):
|
| 747 |
+
if not cat and fx not in deciles:
|
| 748 |
+
X_col = _safe_indexing(X, fx, axis=1)
|
| 749 |
+
deciles[fx] = mquantiles(X_col, prob=np.arange(0.1, 1.0, 0.1))
|
| 750 |
+
|
| 751 |
+
display = cls(
|
| 752 |
+
pd_results=pd_results,
|
| 753 |
+
features=features,
|
| 754 |
+
feature_names=feature_names,
|
| 755 |
+
target_idx=target_idx,
|
| 756 |
+
deciles=deciles,
|
| 757 |
+
kind=kind,
|
| 758 |
+
subsample=subsample,
|
| 759 |
+
random_state=random_state,
|
| 760 |
+
is_categorical=is_categorical,
|
| 761 |
+
)
|
| 762 |
+
return display.plot(
|
| 763 |
+
ax=ax,
|
| 764 |
+
n_cols=n_cols,
|
| 765 |
+
line_kw=line_kw,
|
| 766 |
+
ice_lines_kw=ice_lines_kw,
|
| 767 |
+
pd_line_kw=pd_line_kw,
|
| 768 |
+
contour_kw=contour_kw,
|
| 769 |
+
centered=centered,
|
| 770 |
+
)
|
| 771 |
+
|
| 772 |
+
def _get_sample_count(self, n_samples):
|
| 773 |
+
"""Compute the number of samples as an integer."""
|
| 774 |
+
if isinstance(self.subsample, numbers.Integral):
|
| 775 |
+
if self.subsample < n_samples:
|
| 776 |
+
return self.subsample
|
| 777 |
+
return n_samples
|
| 778 |
+
elif isinstance(self.subsample, numbers.Real):
|
| 779 |
+
return ceil(n_samples * self.subsample)
|
| 780 |
+
return n_samples
|
| 781 |
+
|
| 782 |
+
def _plot_ice_lines(
|
| 783 |
+
self,
|
| 784 |
+
preds,
|
| 785 |
+
feature_values,
|
| 786 |
+
n_ice_to_plot,
|
| 787 |
+
ax,
|
| 788 |
+
pd_plot_idx,
|
| 789 |
+
n_total_lines_by_plot,
|
| 790 |
+
individual_line_kw,
|
| 791 |
+
):
|
| 792 |
+
"""Plot the ICE lines.
|
| 793 |
+
|
| 794 |
+
Parameters
|
| 795 |
+
----------
|
| 796 |
+
preds : ndarray of shape \
|
| 797 |
+
(n_instances, n_grid_points)
|
| 798 |
+
The predictions computed for all points of `feature_values` for a
|
| 799 |
+
given feature for all samples in `X`.
|
| 800 |
+
feature_values : ndarray of shape (n_grid_points,)
|
| 801 |
+
The feature values for which the predictions have been computed.
|
| 802 |
+
n_ice_to_plot : int
|
| 803 |
+
The number of ICE lines to plot.
|
| 804 |
+
ax : Matplotlib axes
|
| 805 |
+
The axis on which to plot the ICE lines.
|
| 806 |
+
pd_plot_idx : int
|
| 807 |
+
The sequential index of the plot. It will be unraveled to find the
|
| 808 |
+
matching 2D position in the grid layout.
|
| 809 |
+
n_total_lines_by_plot : int
|
| 810 |
+
The total number of lines expected to be plot on the axis.
|
| 811 |
+
individual_line_kw : dict
|
| 812 |
+
Dict with keywords passed when plotting the ICE lines.
|
| 813 |
+
"""
|
| 814 |
+
rng = check_random_state(self.random_state)
|
| 815 |
+
# subsample ice
|
| 816 |
+
ice_lines_idx = rng.choice(
|
| 817 |
+
preds.shape[0],
|
| 818 |
+
n_ice_to_plot,
|
| 819 |
+
replace=False,
|
| 820 |
+
)
|
| 821 |
+
ice_lines_subsampled = preds[ice_lines_idx, :]
|
| 822 |
+
# plot the subsampled ice
|
| 823 |
+
for ice_idx, ice in enumerate(ice_lines_subsampled):
|
| 824 |
+
line_idx = np.unravel_index(
|
| 825 |
+
pd_plot_idx * n_total_lines_by_plot + ice_idx, self.lines_.shape
|
| 826 |
+
)
|
| 827 |
+
self.lines_[line_idx] = ax.plot(
|
| 828 |
+
feature_values, ice.ravel(), **individual_line_kw
|
| 829 |
+
)[0]
|
| 830 |
+
|
| 831 |
+
def _plot_average_dependence(
|
| 832 |
+
self,
|
| 833 |
+
avg_preds,
|
| 834 |
+
feature_values,
|
| 835 |
+
ax,
|
| 836 |
+
pd_line_idx,
|
| 837 |
+
line_kw,
|
| 838 |
+
categorical,
|
| 839 |
+
bar_kw,
|
| 840 |
+
):
|
| 841 |
+
"""Plot the average partial dependence.
|
| 842 |
+
|
| 843 |
+
Parameters
|
| 844 |
+
----------
|
| 845 |
+
avg_preds : ndarray of shape (n_grid_points,)
|
| 846 |
+
The average predictions for all points of `feature_values` for a
|
| 847 |
+
given feature for all samples in `X`.
|
| 848 |
+
feature_values : ndarray of shape (n_grid_points,)
|
| 849 |
+
The feature values for which the predictions have been computed.
|
| 850 |
+
ax : Matplotlib axes
|
| 851 |
+
The axis on which to plot the average PD.
|
| 852 |
+
pd_line_idx : int
|
| 853 |
+
The sequential index of the plot. It will be unraveled to find the
|
| 854 |
+
matching 2D position in the grid layout.
|
| 855 |
+
line_kw : dict
|
| 856 |
+
Dict with keywords passed when plotting the PD plot.
|
| 857 |
+
categorical : bool
|
| 858 |
+
Whether feature is categorical.
|
| 859 |
+
bar_kw: dict
|
| 860 |
+
Dict with keywords passed when plotting the PD bars (categorical).
|
| 861 |
+
"""
|
| 862 |
+
if categorical:
|
| 863 |
+
bar_idx = np.unravel_index(pd_line_idx, self.bars_.shape)
|
| 864 |
+
self.bars_[bar_idx] = ax.bar(feature_values, avg_preds, **bar_kw)[0]
|
| 865 |
+
ax.tick_params(axis="x", rotation=90)
|
| 866 |
+
else:
|
| 867 |
+
line_idx = np.unravel_index(pd_line_idx, self.lines_.shape)
|
| 868 |
+
self.lines_[line_idx] = ax.plot(
|
| 869 |
+
feature_values,
|
| 870 |
+
avg_preds,
|
| 871 |
+
**line_kw,
|
| 872 |
+
)[0]
|
| 873 |
+
|
| 874 |
+
def _plot_one_way_partial_dependence(
|
| 875 |
+
self,
|
| 876 |
+
kind,
|
| 877 |
+
preds,
|
| 878 |
+
avg_preds,
|
| 879 |
+
feature_values,
|
| 880 |
+
feature_idx,
|
| 881 |
+
n_ice_lines,
|
| 882 |
+
ax,
|
| 883 |
+
n_cols,
|
| 884 |
+
pd_plot_idx,
|
| 885 |
+
n_lines,
|
| 886 |
+
ice_lines_kw,
|
| 887 |
+
pd_line_kw,
|
| 888 |
+
categorical,
|
| 889 |
+
bar_kw,
|
| 890 |
+
pdp_lim,
|
| 891 |
+
):
|
| 892 |
+
"""Plot 1-way partial dependence: ICE and PDP.
|
| 893 |
+
|
| 894 |
+
Parameters
|
| 895 |
+
----------
|
| 896 |
+
kind : str
|
| 897 |
+
The kind of partial plot to draw.
|
| 898 |
+
preds : ndarray of shape \
|
| 899 |
+
(n_instances, n_grid_points) or None
|
| 900 |
+
The predictions computed for all points of `feature_values` for a
|
| 901 |
+
given feature for all samples in `X`.
|
| 902 |
+
avg_preds : ndarray of shape (n_grid_points,)
|
| 903 |
+
The average predictions for all points of `feature_values` for a
|
| 904 |
+
given feature for all samples in `X`.
|
| 905 |
+
feature_values : ndarray of shape (n_grid_points,)
|
| 906 |
+
The feature values for which the predictions have been computed.
|
| 907 |
+
feature_idx : int
|
| 908 |
+
The index corresponding to the target feature.
|
| 909 |
+
n_ice_lines : int
|
| 910 |
+
The number of ICE lines to plot.
|
| 911 |
+
ax : Matplotlib axes
|
| 912 |
+
The axis on which to plot the ICE and PDP lines.
|
| 913 |
+
n_cols : int or None
|
| 914 |
+
The number of column in the axis.
|
| 915 |
+
pd_plot_idx : int
|
| 916 |
+
The sequential index of the plot. It will be unraveled to find the
|
| 917 |
+
matching 2D position in the grid layout.
|
| 918 |
+
n_lines : int
|
| 919 |
+
The total number of lines expected to be plot on the axis.
|
| 920 |
+
ice_lines_kw : dict
|
| 921 |
+
Dict with keywords passed when plotting the ICE lines.
|
| 922 |
+
pd_line_kw : dict
|
| 923 |
+
Dict with keywords passed when plotting the PD plot.
|
| 924 |
+
categorical : bool
|
| 925 |
+
Whether feature is categorical.
|
| 926 |
+
bar_kw: dict
|
| 927 |
+
Dict with keywords passed when plotting the PD bars (categorical).
|
| 928 |
+
pdp_lim : dict
|
| 929 |
+
Global min and max average predictions, such that all plots will
|
| 930 |
+
have the same scale and y limits. `pdp_lim[1]` is the global min
|
| 931 |
+
and max for single partial dependence curves.
|
| 932 |
+
"""
|
| 933 |
+
from matplotlib import transforms # noqa
|
| 934 |
+
|
| 935 |
+
if kind in ("individual", "both"):
|
| 936 |
+
self._plot_ice_lines(
|
| 937 |
+
preds[self.target_idx],
|
| 938 |
+
feature_values,
|
| 939 |
+
n_ice_lines,
|
| 940 |
+
ax,
|
| 941 |
+
pd_plot_idx,
|
| 942 |
+
n_lines,
|
| 943 |
+
ice_lines_kw,
|
| 944 |
+
)
|
| 945 |
+
|
| 946 |
+
if kind in ("average", "both"):
|
| 947 |
+
# the average is stored as the last line
|
| 948 |
+
if kind == "average":
|
| 949 |
+
pd_line_idx = pd_plot_idx
|
| 950 |
+
else:
|
| 951 |
+
pd_line_idx = pd_plot_idx * n_lines + n_ice_lines
|
| 952 |
+
self._plot_average_dependence(
|
| 953 |
+
avg_preds[self.target_idx].ravel(),
|
| 954 |
+
feature_values,
|
| 955 |
+
ax,
|
| 956 |
+
pd_line_idx,
|
| 957 |
+
pd_line_kw,
|
| 958 |
+
categorical,
|
| 959 |
+
bar_kw,
|
| 960 |
+
)
|
| 961 |
+
|
| 962 |
+
trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)
|
| 963 |
+
# create the decile line for the vertical axis
|
| 964 |
+
vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape)
|
| 965 |
+
if self.deciles.get(feature_idx[0], None) is not None:
|
| 966 |
+
self.deciles_vlines_[vlines_idx] = ax.vlines(
|
| 967 |
+
self.deciles[feature_idx[0]],
|
| 968 |
+
0,
|
| 969 |
+
0.05,
|
| 970 |
+
transform=trans,
|
| 971 |
+
color="k",
|
| 972 |
+
)
|
| 973 |
+
# reset ylim which was overwritten by vlines
|
| 974 |
+
min_val = min(val[0] for val in pdp_lim.values())
|
| 975 |
+
max_val = max(val[1] for val in pdp_lim.values())
|
| 976 |
+
ax.set_ylim([min_val, max_val])
|
| 977 |
+
|
| 978 |
+
# Set xlabel if it is not already set
|
| 979 |
+
if not ax.get_xlabel():
|
| 980 |
+
ax.set_xlabel(self.feature_names[feature_idx[0]])
|
| 981 |
+
|
| 982 |
+
if n_cols is None or pd_plot_idx % n_cols == 0:
|
| 983 |
+
if not ax.get_ylabel():
|
| 984 |
+
ax.set_ylabel("Partial dependence")
|
| 985 |
+
else:
|
| 986 |
+
ax.set_yticklabels([])
|
| 987 |
+
|
| 988 |
+
if pd_line_kw.get("label", None) and kind != "individual" and not categorical:
|
| 989 |
+
ax.legend()
|
| 990 |
+
|
| 991 |
+
def _plot_two_way_partial_dependence(
|
| 992 |
+
self,
|
| 993 |
+
avg_preds,
|
| 994 |
+
feature_values,
|
| 995 |
+
feature_idx,
|
| 996 |
+
ax,
|
| 997 |
+
pd_plot_idx,
|
| 998 |
+
Z_level,
|
| 999 |
+
contour_kw,
|
| 1000 |
+
categorical,
|
| 1001 |
+
heatmap_kw,
|
| 1002 |
+
):
|
| 1003 |
+
"""Plot 2-way partial dependence.
|
| 1004 |
+
|
| 1005 |
+
Parameters
|
| 1006 |
+
----------
|
| 1007 |
+
avg_preds : ndarray of shape \
|
| 1008 |
+
(n_instances, n_grid_points, n_grid_points)
|
| 1009 |
+
The average predictions for all points of `feature_values[0]` and
|
| 1010 |
+
`feature_values[1]` for some given features for all samples in `X`.
|
| 1011 |
+
feature_values : seq of 1d array
|
| 1012 |
+
A sequence of array of the feature values for which the predictions
|
| 1013 |
+
have been computed.
|
| 1014 |
+
feature_idx : tuple of int
|
| 1015 |
+
The indices of the target features
|
| 1016 |
+
ax : Matplotlib axes
|
| 1017 |
+
The axis on which to plot the ICE and PDP lines.
|
| 1018 |
+
pd_plot_idx : int
|
| 1019 |
+
The sequential index of the plot. It will be unraveled to find the
|
| 1020 |
+
matching 2D position in the grid layout.
|
| 1021 |
+
Z_level : ndarray of shape (8, 8)
|
| 1022 |
+
The Z-level used to encode the average predictions.
|
| 1023 |
+
contour_kw : dict
|
| 1024 |
+
Dict with keywords passed when plotting the contours.
|
| 1025 |
+
categorical : bool
|
| 1026 |
+
Whether features are categorical.
|
| 1027 |
+
heatmap_kw: dict
|
| 1028 |
+
Dict with keywords passed when plotting the PD heatmap
|
| 1029 |
+
(categorical).
|
| 1030 |
+
"""
|
| 1031 |
+
if categorical:
|
| 1032 |
+
import matplotlib.pyplot as plt
|
| 1033 |
+
|
| 1034 |
+
default_im_kw = dict(interpolation="nearest", cmap="viridis")
|
| 1035 |
+
im_kw = {**default_im_kw, **heatmap_kw}
|
| 1036 |
+
|
| 1037 |
+
data = avg_preds[self.target_idx]
|
| 1038 |
+
im = ax.imshow(data, **im_kw)
|
| 1039 |
+
text = None
|
| 1040 |
+
cmap_min, cmap_max = im.cmap(0), im.cmap(1.0)
|
| 1041 |
+
|
| 1042 |
+
text = np.empty_like(data, dtype=object)
|
| 1043 |
+
# print text with appropriate color depending on background
|
| 1044 |
+
thresh = (data.max() + data.min()) / 2.0
|
| 1045 |
+
|
| 1046 |
+
for flat_index in range(data.size):
|
| 1047 |
+
row, col = np.unravel_index(flat_index, data.shape)
|
| 1048 |
+
color = cmap_max if data[row, col] < thresh else cmap_min
|
| 1049 |
+
|
| 1050 |
+
values_format = ".2f"
|
| 1051 |
+
text_data = format(data[row, col], values_format)
|
| 1052 |
+
|
| 1053 |
+
text_kwargs = dict(ha="center", va="center", color=color)
|
| 1054 |
+
text[row, col] = ax.text(col, row, text_data, **text_kwargs)
|
| 1055 |
+
|
| 1056 |
+
fig = ax.figure
|
| 1057 |
+
fig.colorbar(im, ax=ax)
|
| 1058 |
+
ax.set(
|
| 1059 |
+
xticks=np.arange(len(feature_values[1])),
|
| 1060 |
+
yticks=np.arange(len(feature_values[0])),
|
| 1061 |
+
xticklabels=feature_values[1],
|
| 1062 |
+
yticklabels=feature_values[0],
|
| 1063 |
+
xlabel=self.feature_names[feature_idx[1]],
|
| 1064 |
+
ylabel=self.feature_names[feature_idx[0]],
|
| 1065 |
+
)
|
| 1066 |
+
|
| 1067 |
+
plt.setp(ax.get_xticklabels(), rotation="vertical")
|
| 1068 |
+
|
| 1069 |
+
heatmap_idx = np.unravel_index(pd_plot_idx, self.heatmaps_.shape)
|
| 1070 |
+
self.heatmaps_[heatmap_idx] = im
|
| 1071 |
+
else:
|
| 1072 |
+
from matplotlib import transforms # noqa
|
| 1073 |
+
|
| 1074 |
+
XX, YY = np.meshgrid(feature_values[0], feature_values[1])
|
| 1075 |
+
Z = avg_preds[self.target_idx].T
|
| 1076 |
+
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5, colors="k")
|
| 1077 |
+
contour_idx = np.unravel_index(pd_plot_idx, self.contours_.shape)
|
| 1078 |
+
self.contours_[contour_idx] = ax.contourf(
|
| 1079 |
+
XX,
|
| 1080 |
+
YY,
|
| 1081 |
+
Z,
|
| 1082 |
+
levels=Z_level,
|
| 1083 |
+
vmax=Z_level[-1],
|
| 1084 |
+
vmin=Z_level[0],
|
| 1085 |
+
**contour_kw,
|
| 1086 |
+
)
|
| 1087 |
+
ax.clabel(CS, fmt="%2.2f", colors="k", fontsize=10, inline=True)
|
| 1088 |
+
|
| 1089 |
+
trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)
|
| 1090 |
+
# create the decile line for the vertical axis
|
| 1091 |
+
xlim, ylim = ax.get_xlim(), ax.get_ylim()
|
| 1092 |
+
vlines_idx = np.unravel_index(pd_plot_idx, self.deciles_vlines_.shape)
|
| 1093 |
+
self.deciles_vlines_[vlines_idx] = ax.vlines(
|
| 1094 |
+
self.deciles[feature_idx[0]],
|
| 1095 |
+
0,
|
| 1096 |
+
0.05,
|
| 1097 |
+
transform=trans,
|
| 1098 |
+
color="k",
|
| 1099 |
+
)
|
| 1100 |
+
# create the decile line for the horizontal axis
|
| 1101 |
+
hlines_idx = np.unravel_index(pd_plot_idx, self.deciles_hlines_.shape)
|
| 1102 |
+
self.deciles_hlines_[hlines_idx] = ax.hlines(
|
| 1103 |
+
self.deciles[feature_idx[1]],
|
| 1104 |
+
0,
|
| 1105 |
+
0.05,
|
| 1106 |
+
transform=trans,
|
| 1107 |
+
color="k",
|
| 1108 |
+
)
|
| 1109 |
+
# reset xlim and ylim since they are overwritten by hlines and
|
| 1110 |
+
# vlines
|
| 1111 |
+
ax.set_xlim(xlim)
|
| 1112 |
+
ax.set_ylim(ylim)
|
| 1113 |
+
|
| 1114 |
+
# set xlabel if it is not already set
|
| 1115 |
+
if not ax.get_xlabel():
|
| 1116 |
+
ax.set_xlabel(self.feature_names[feature_idx[0]])
|
| 1117 |
+
ax.set_ylabel(self.feature_names[feature_idx[1]])
|
| 1118 |
+
|
| 1119 |
+
def plot(
|
| 1120 |
+
self,
|
| 1121 |
+
*,
|
| 1122 |
+
ax=None,
|
| 1123 |
+
n_cols=3,
|
| 1124 |
+
line_kw=None,
|
| 1125 |
+
ice_lines_kw=None,
|
| 1126 |
+
pd_line_kw=None,
|
| 1127 |
+
contour_kw=None,
|
| 1128 |
+
bar_kw=None,
|
| 1129 |
+
heatmap_kw=None,
|
| 1130 |
+
pdp_lim=None,
|
| 1131 |
+
centered=False,
|
| 1132 |
+
):
|
| 1133 |
+
"""Plot partial dependence plots.
|
| 1134 |
+
|
| 1135 |
+
Parameters
|
| 1136 |
+
----------
|
| 1137 |
+
ax : Matplotlib axes or array-like of Matplotlib axes, default=None
|
| 1138 |
+
- If a single axis is passed in, it is treated as a bounding axes
|
| 1139 |
+
and a grid of partial dependence plots will be drawn within
|
| 1140 |
+
these bounds. The `n_cols` parameter controls the number of
|
| 1141 |
+
columns in the grid.
|
| 1142 |
+
- If an array-like of axes are passed in, the partial dependence
|
| 1143 |
+
plots will be drawn directly into these axes.
|
| 1144 |
+
- If `None`, a figure and a bounding axes is created and treated
|
| 1145 |
+
as the single axes case.
|
| 1146 |
+
|
| 1147 |
+
n_cols : int, default=3
|
| 1148 |
+
The maximum number of columns in the grid plot. Only active when
|
| 1149 |
+
`ax` is a single axes or `None`.
|
| 1150 |
+
|
| 1151 |
+
line_kw : dict, default=None
|
| 1152 |
+
Dict with keywords passed to the `matplotlib.pyplot.plot` call.
|
| 1153 |
+
For one-way partial dependence plots.
|
| 1154 |
+
|
| 1155 |
+
ice_lines_kw : dict, default=None
|
| 1156 |
+
Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.
|
| 1157 |
+
For ICE lines in the one-way partial dependence plots.
|
| 1158 |
+
The key value pairs defined in `ice_lines_kw` takes priority over
|
| 1159 |
+
`line_kw`.
|
| 1160 |
+
|
| 1161 |
+
.. versionadded:: 1.0
|
| 1162 |
+
|
| 1163 |
+
pd_line_kw : dict, default=None
|
| 1164 |
+
Dictionary with keywords passed to the `matplotlib.pyplot.plot` call.
|
| 1165 |
+
For partial dependence in one-way partial dependence plots.
|
| 1166 |
+
The key value pairs defined in `pd_line_kw` takes priority over
|
| 1167 |
+
`line_kw`.
|
| 1168 |
+
|
| 1169 |
+
.. versionadded:: 1.0
|
| 1170 |
+
|
| 1171 |
+
contour_kw : dict, default=None
|
| 1172 |
+
Dict with keywords passed to the `matplotlib.pyplot.contourf`
|
| 1173 |
+
call for two-way partial dependence plots.
|
| 1174 |
+
|
| 1175 |
+
bar_kw : dict, default=None
|
| 1176 |
+
Dict with keywords passed to the `matplotlib.pyplot.bar`
|
| 1177 |
+
call for one-way categorical partial dependence plots.
|
| 1178 |
+
|
| 1179 |
+
.. versionadded:: 1.2
|
| 1180 |
+
|
| 1181 |
+
heatmap_kw : dict, default=None
|
| 1182 |
+
Dict with keywords passed to the `matplotlib.pyplot.imshow`
|
| 1183 |
+
call for two-way categorical partial dependence plots.
|
| 1184 |
+
|
| 1185 |
+
.. versionadded:: 1.2
|
| 1186 |
+
|
| 1187 |
+
pdp_lim : dict, default=None
|
| 1188 |
+
Global min and max average predictions, such that all plots will have the
|
| 1189 |
+
same scale and y limits. `pdp_lim[1]` is the global min and max for single
|
| 1190 |
+
partial dependence curves. `pdp_lim[2]` is the global min and max for
|
| 1191 |
+
two-way partial dependence curves. If `None` (default), the limit will be
|
| 1192 |
+
inferred from the global minimum and maximum of all predictions.
|
| 1193 |
+
|
| 1194 |
+
.. versionadded:: 1.1
|
| 1195 |
+
|
| 1196 |
+
centered : bool, default=False
|
| 1197 |
+
If `True`, the ICE and PD lines will start at the origin of the
|
| 1198 |
+
y-axis. By default, no centering is done.
|
| 1199 |
+
|
| 1200 |
+
.. versionadded:: 1.1
|
| 1201 |
+
|
| 1202 |
+
Returns
|
| 1203 |
+
-------
|
| 1204 |
+
display : :class:`~sklearn.inspection.PartialDependenceDisplay`
|
| 1205 |
+
Returns a :class:`~sklearn.inspection.PartialDependenceDisplay`
|
| 1206 |
+
object that contains the partial dependence plots.
|
| 1207 |
+
"""
|
| 1208 |
+
|
| 1209 |
+
check_matplotlib_support("plot_partial_dependence")
|
| 1210 |
+
import matplotlib.pyplot as plt # noqa
|
| 1211 |
+
from matplotlib.gridspec import GridSpecFromSubplotSpec # noqa
|
| 1212 |
+
|
| 1213 |
+
if isinstance(self.kind, str):
|
| 1214 |
+
kind = [self.kind] * len(self.features)
|
| 1215 |
+
else:
|
| 1216 |
+
kind = self.kind
|
| 1217 |
+
|
| 1218 |
+
if self.is_categorical is None:
|
| 1219 |
+
is_categorical = [
|
| 1220 |
+
(False,) if len(fx) == 1 else (False, False) for fx in self.features
|
| 1221 |
+
]
|
| 1222 |
+
else:
|
| 1223 |
+
is_categorical = self.is_categorical
|
| 1224 |
+
|
| 1225 |
+
if len(kind) != len(self.features):
|
| 1226 |
+
raise ValueError(
|
| 1227 |
+
"When `kind` is provided as a list of strings, it should "
|
| 1228 |
+
"contain as many elements as `features`. `kind` contains "
|
| 1229 |
+
f"{len(kind)} element(s) and `features` contains "
|
| 1230 |
+
f"{len(self.features)} element(s)."
|
| 1231 |
+
)
|
| 1232 |
+
|
| 1233 |
+
valid_kinds = {"average", "individual", "both"}
|
| 1234 |
+
if any([k not in valid_kinds for k in kind]):
|
| 1235 |
+
raise ValueError(
|
| 1236 |
+
f"Values provided to `kind` must be one of: {valid_kinds!r} or a list"
|
| 1237 |
+
f" of such values. Currently, kind={self.kind!r}"
|
| 1238 |
+
)
|
| 1239 |
+
|
| 1240 |
+
# Center results before plotting
|
| 1241 |
+
if not centered:
|
| 1242 |
+
pd_results_ = self.pd_results
|
| 1243 |
+
else:
|
| 1244 |
+
pd_results_ = []
|
| 1245 |
+
for kind_plot, pd_result in zip(kind, self.pd_results):
|
| 1246 |
+
current_results = {"grid_values": pd_result["grid_values"]}
|
| 1247 |
+
|
| 1248 |
+
if kind_plot in ("individual", "both"):
|
| 1249 |
+
preds = pd_result.individual
|
| 1250 |
+
preds = preds - preds[self.target_idx, :, 0, None]
|
| 1251 |
+
current_results["individual"] = preds
|
| 1252 |
+
|
| 1253 |
+
if kind_plot in ("average", "both"):
|
| 1254 |
+
avg_preds = pd_result.average
|
| 1255 |
+
avg_preds = avg_preds - avg_preds[self.target_idx, 0, None]
|
| 1256 |
+
current_results["average"] = avg_preds
|
| 1257 |
+
|
| 1258 |
+
pd_results_.append(Bunch(**current_results))
|
| 1259 |
+
|
| 1260 |
+
if pdp_lim is None:
|
| 1261 |
+
# get global min and max average predictions of PD grouped by plot type
|
| 1262 |
+
pdp_lim = {}
|
| 1263 |
+
for kind_plot, pdp in zip(kind, pd_results_):
|
| 1264 |
+
values = pdp["grid_values"]
|
| 1265 |
+
preds = pdp.average if kind_plot == "average" else pdp.individual
|
| 1266 |
+
min_pd = preds[self.target_idx].min()
|
| 1267 |
+
max_pd = preds[self.target_idx].max()
|
| 1268 |
+
|
| 1269 |
+
# expand the limits to account so that the plotted lines do not touch
|
| 1270 |
+
# the edges of the plot
|
| 1271 |
+
span = max_pd - min_pd
|
| 1272 |
+
min_pd -= 0.05 * span
|
| 1273 |
+
max_pd += 0.05 * span
|
| 1274 |
+
|
| 1275 |
+
n_fx = len(values)
|
| 1276 |
+
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
|
| 1277 |
+
min_pd = min(min_pd, old_min_pd)
|
| 1278 |
+
max_pd = max(max_pd, old_max_pd)
|
| 1279 |
+
pdp_lim[n_fx] = (min_pd, max_pd)
|
| 1280 |
+
|
| 1281 |
+
if line_kw is None:
|
| 1282 |
+
line_kw = {}
|
| 1283 |
+
if ice_lines_kw is None:
|
| 1284 |
+
ice_lines_kw = {}
|
| 1285 |
+
if pd_line_kw is None:
|
| 1286 |
+
pd_line_kw = {}
|
| 1287 |
+
if bar_kw is None:
|
| 1288 |
+
bar_kw = {}
|
| 1289 |
+
if heatmap_kw is None:
|
| 1290 |
+
heatmap_kw = {}
|
| 1291 |
+
|
| 1292 |
+
if ax is None:
|
| 1293 |
+
_, ax = plt.subplots()
|
| 1294 |
+
|
| 1295 |
+
if contour_kw is None:
|
| 1296 |
+
contour_kw = {}
|
| 1297 |
+
default_contour_kws = {"alpha": 0.75}
|
| 1298 |
+
contour_kw = _validate_style_kwargs(default_contour_kws, contour_kw)
|
| 1299 |
+
|
| 1300 |
+
n_features = len(self.features)
|
| 1301 |
+
is_average_plot = [kind_plot == "average" for kind_plot in kind]
|
| 1302 |
+
if all(is_average_plot):
|
| 1303 |
+
# only average plots are requested
|
| 1304 |
+
n_ice_lines = 0
|
| 1305 |
+
n_lines = 1
|
| 1306 |
+
else:
|
| 1307 |
+
# we need to determine the number of ICE samples computed
|
| 1308 |
+
ice_plot_idx = is_average_plot.index(False)
|
| 1309 |
+
n_ice_lines = self._get_sample_count(
|
| 1310 |
+
len(pd_results_[ice_plot_idx].individual[0])
|
| 1311 |
+
)
|
| 1312 |
+
if any([kind_plot == "both" for kind_plot in kind]):
|
| 1313 |
+
n_lines = n_ice_lines + 1 # account for the average line
|
| 1314 |
+
else:
|
| 1315 |
+
n_lines = n_ice_lines
|
| 1316 |
+
|
| 1317 |
+
if isinstance(ax, plt.Axes):
|
| 1318 |
+
# If ax was set off, it has most likely been set to off
|
| 1319 |
+
# by a previous call to plot.
|
| 1320 |
+
if not ax.axison:
|
| 1321 |
+
raise ValueError(
|
| 1322 |
+
"The ax was already used in another plot "
|
| 1323 |
+
"function, please set ax=display.axes_ "
|
| 1324 |
+
"instead"
|
| 1325 |
+
)
|
| 1326 |
+
|
| 1327 |
+
ax.set_axis_off()
|
| 1328 |
+
self.bounding_ax_ = ax
|
| 1329 |
+
self.figure_ = ax.figure
|
| 1330 |
+
|
| 1331 |
+
n_cols = min(n_cols, n_features)
|
| 1332 |
+
n_rows = int(np.ceil(n_features / float(n_cols)))
|
| 1333 |
+
|
| 1334 |
+
self.axes_ = np.empty((n_rows, n_cols), dtype=object)
|
| 1335 |
+
if all(is_average_plot):
|
| 1336 |
+
self.lines_ = np.empty((n_rows, n_cols), dtype=object)
|
| 1337 |
+
else:
|
| 1338 |
+
self.lines_ = np.empty((n_rows, n_cols, n_lines), dtype=object)
|
| 1339 |
+
self.contours_ = np.empty((n_rows, n_cols), dtype=object)
|
| 1340 |
+
self.bars_ = np.empty((n_rows, n_cols), dtype=object)
|
| 1341 |
+
self.heatmaps_ = np.empty((n_rows, n_cols), dtype=object)
|
| 1342 |
+
|
| 1343 |
+
axes_ravel = self.axes_.ravel()
|
| 1344 |
+
|
| 1345 |
+
gs = GridSpecFromSubplotSpec(
|
| 1346 |
+
n_rows, n_cols, subplot_spec=ax.get_subplotspec()
|
| 1347 |
+
)
|
| 1348 |
+
for i, spec in zip(range(n_features), gs):
|
| 1349 |
+
axes_ravel[i] = self.figure_.add_subplot(spec)
|
| 1350 |
+
|
| 1351 |
+
else: # array-like
|
| 1352 |
+
ax = np.asarray(ax, dtype=object)
|
| 1353 |
+
if ax.size != n_features:
|
| 1354 |
+
raise ValueError(
|
| 1355 |
+
"Expected ax to have {} axes, got {}".format(n_features, ax.size)
|
| 1356 |
+
)
|
| 1357 |
+
|
| 1358 |
+
if ax.ndim == 2:
|
| 1359 |
+
n_cols = ax.shape[1]
|
| 1360 |
+
else:
|
| 1361 |
+
n_cols = None
|
| 1362 |
+
|
| 1363 |
+
self.bounding_ax_ = None
|
| 1364 |
+
self.figure_ = ax.ravel()[0].figure
|
| 1365 |
+
self.axes_ = ax
|
| 1366 |
+
if all(is_average_plot):
|
| 1367 |
+
self.lines_ = np.empty_like(ax, dtype=object)
|
| 1368 |
+
else:
|
| 1369 |
+
self.lines_ = np.empty(ax.shape + (n_lines,), dtype=object)
|
| 1370 |
+
self.contours_ = np.empty_like(ax, dtype=object)
|
| 1371 |
+
self.bars_ = np.empty_like(ax, dtype=object)
|
| 1372 |
+
self.heatmaps_ = np.empty_like(ax, dtype=object)
|
| 1373 |
+
|
| 1374 |
+
# create contour levels for two-way plots
|
| 1375 |
+
if 2 in pdp_lim:
|
| 1376 |
+
Z_level = np.linspace(*pdp_lim[2], num=8)
|
| 1377 |
+
|
| 1378 |
+
self.deciles_vlines_ = np.empty_like(self.axes_, dtype=object)
|
| 1379 |
+
self.deciles_hlines_ = np.empty_like(self.axes_, dtype=object)
|
| 1380 |
+
|
| 1381 |
+
for pd_plot_idx, (axi, feature_idx, cat, pd_result, kind_plot) in enumerate(
|
| 1382 |
+
zip(
|
| 1383 |
+
self.axes_.ravel(),
|
| 1384 |
+
self.features,
|
| 1385 |
+
is_categorical,
|
| 1386 |
+
pd_results_,
|
| 1387 |
+
kind,
|
| 1388 |
+
)
|
| 1389 |
+
):
|
| 1390 |
+
avg_preds = None
|
| 1391 |
+
preds = None
|
| 1392 |
+
feature_values = pd_result["grid_values"]
|
| 1393 |
+
if kind_plot == "individual":
|
| 1394 |
+
preds = pd_result.individual
|
| 1395 |
+
elif kind_plot == "average":
|
| 1396 |
+
avg_preds = pd_result.average
|
| 1397 |
+
else: # kind_plot == 'both'
|
| 1398 |
+
avg_preds = pd_result.average
|
| 1399 |
+
preds = pd_result.individual
|
| 1400 |
+
|
| 1401 |
+
if len(feature_values) == 1:
|
| 1402 |
+
# define the line-style for the current plot
|
| 1403 |
+
default_line_kws = {
|
| 1404 |
+
"color": "C0",
|
| 1405 |
+
"label": "average" if kind_plot == "both" else None,
|
| 1406 |
+
}
|
| 1407 |
+
if kind_plot == "individual":
|
| 1408 |
+
default_ice_lines_kws = {"alpha": 0.3, "linewidth": 0.5}
|
| 1409 |
+
default_pd_lines_kws = {}
|
| 1410 |
+
elif kind_plot == "both":
|
| 1411 |
+
# by default, we need to distinguish the average line from
|
| 1412 |
+
# the individual lines via color and line style
|
| 1413 |
+
default_ice_lines_kws = {
|
| 1414 |
+
"alpha": 0.3,
|
| 1415 |
+
"linewidth": 0.5,
|
| 1416 |
+
"color": "tab:blue",
|
| 1417 |
+
}
|
| 1418 |
+
default_pd_lines_kws = {
|
| 1419 |
+
"color": "tab:orange",
|
| 1420 |
+
"linestyle": "--",
|
| 1421 |
+
}
|
| 1422 |
+
else:
|
| 1423 |
+
default_ice_lines_kws = {}
|
| 1424 |
+
default_pd_lines_kws = {}
|
| 1425 |
+
|
| 1426 |
+
default_ice_lines_kws = {**default_line_kws, **default_ice_lines_kws}
|
| 1427 |
+
default_pd_lines_kws = {**default_line_kws, **default_pd_lines_kws}
|
| 1428 |
+
|
| 1429 |
+
line_kw = _validate_style_kwargs(default_line_kws, line_kw)
|
| 1430 |
+
|
| 1431 |
+
ice_lines_kw = _validate_style_kwargs(
|
| 1432 |
+
_validate_style_kwargs(default_ice_lines_kws, line_kw), ice_lines_kw
|
| 1433 |
+
)
|
| 1434 |
+
del ice_lines_kw["label"]
|
| 1435 |
+
|
| 1436 |
+
pd_line_kw = _validate_style_kwargs(
|
| 1437 |
+
_validate_style_kwargs(default_pd_lines_kws, line_kw), pd_line_kw
|
| 1438 |
+
)
|
| 1439 |
+
|
| 1440 |
+
default_bar_kws = {"color": "C0"}
|
| 1441 |
+
bar_kw = _validate_style_kwargs(default_bar_kws, bar_kw)
|
| 1442 |
+
|
| 1443 |
+
default_heatmap_kw = {}
|
| 1444 |
+
heatmap_kw = _validate_style_kwargs(default_heatmap_kw, heatmap_kw)
|
| 1445 |
+
|
| 1446 |
+
self._plot_one_way_partial_dependence(
|
| 1447 |
+
kind_plot,
|
| 1448 |
+
preds,
|
| 1449 |
+
avg_preds,
|
| 1450 |
+
feature_values[0],
|
| 1451 |
+
feature_idx,
|
| 1452 |
+
n_ice_lines,
|
| 1453 |
+
axi,
|
| 1454 |
+
n_cols,
|
| 1455 |
+
pd_plot_idx,
|
| 1456 |
+
n_lines,
|
| 1457 |
+
ice_lines_kw,
|
| 1458 |
+
pd_line_kw,
|
| 1459 |
+
cat[0],
|
| 1460 |
+
bar_kw,
|
| 1461 |
+
pdp_lim,
|
| 1462 |
+
)
|
| 1463 |
+
else:
|
| 1464 |
+
self._plot_two_way_partial_dependence(
|
| 1465 |
+
avg_preds,
|
| 1466 |
+
feature_values,
|
| 1467 |
+
feature_idx,
|
| 1468 |
+
axi,
|
| 1469 |
+
pd_plot_idx,
|
| 1470 |
+
Z_level,
|
| 1471 |
+
contour_kw,
|
| 1472 |
+
cat[0] and cat[1],
|
| 1473 |
+
heatmap_kw,
|
| 1474 |
+
)
|
| 1475 |
+
|
| 1476 |
+
return self
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__init__.py
ADDED
|
File without changes
|
evalkit_tf437/lib/python3.10/site-packages/sklearn/inspection/_plot/tests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (189 Bytes). View file
|
|
|