ZTWHHH commited on
Commit
ddfc575
·
verified ·
1 Parent(s): b5f68d8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. vllm/lib/python3.10/site-packages/anyio/abc/__pycache__/__init__.cpython-310.pyc +0 -0
  3. vllm/lib/python3.10/site-packages/anyio/abc/__pycache__/_eventloop.cpython-310.pyc +0 -0
  4. vllm/lib/python3.10/site-packages/anyio/abc/__pycache__/_sockets.cpython-310.pyc +0 -0
  5. vllm/lib/python3.10/site-packages/anyio/abc/__pycache__/_streams.cpython-310.pyc +0 -0
  6. vllm/lib/python3.10/site-packages/anyio/abc/__pycache__/_subprocesses.cpython-310.pyc +0 -0
  7. vllm/lib/python3.10/site-packages/anyio/abc/__pycache__/_testing.cpython-310.pyc +0 -0
  8. vllm/lib/python3.10/site-packages/anyio/abc/_eventloop.py +376 -0
  9. vllm/lib/python3.10/site-packages/anyio/abc/_resources.py +33 -0
  10. vllm/lib/python3.10/site-packages/anyio/abc/_subprocesses.py +79 -0
  11. vllm/lib/python3.10/site-packages/anyio/abc/_tasks.py +101 -0
  12. vllm/lib/python3.10/site-packages/torchvision/image.so +3 -0
  13. vllm/lib/python3.10/site-packages/torchvision/models/__init__.py +23 -0
  14. vllm/lib/python3.10/site-packages/torchvision/models/_api.py +277 -0
  15. vllm/lib/python3.10/site-packages/torchvision/models/_meta.py +1554 -0
  16. vllm/lib/python3.10/site-packages/torchvision/models/_utils.py +256 -0
  17. vllm/lib/python3.10/site-packages/torchvision/models/alexnet.py +119 -0
  18. vllm/lib/python3.10/site-packages/torchvision/models/convnext.py +414 -0
  19. vllm/lib/python3.10/site-packages/torchvision/models/densenet.py +448 -0
  20. vllm/lib/python3.10/site-packages/torchvision/models/efficientnet.py +1131 -0
  21. vllm/lib/python3.10/site-packages/torchvision/models/feature_extraction.py +572 -0
  22. vllm/lib/python3.10/site-packages/torchvision/models/googlenet.py +345 -0
  23. vllm/lib/python3.10/site-packages/torchvision/models/inception.py +478 -0
  24. vllm/lib/python3.10/site-packages/torchvision/models/maxvit.py +833 -0
  25. vllm/lib/python3.10/site-packages/torchvision/models/mnasnet.py +434 -0
  26. vllm/lib/python3.10/site-packages/torchvision/models/mobilenet.py +6 -0
  27. vllm/lib/python3.10/site-packages/torchvision/models/mobilenetv2.py +260 -0
  28. vllm/lib/python3.10/site-packages/torchvision/models/mobilenetv3.py +423 -0
  29. vllm/lib/python3.10/site-packages/torchvision/models/optical_flow/__init__.py +1 -0
  30. vllm/lib/python3.10/site-packages/torchvision/models/optical_flow/__pycache__/__init__.cpython-310.pyc +0 -0
  31. vllm/lib/python3.10/site-packages/torchvision/models/optical_flow/__pycache__/_utils.cpython-310.pyc +0 -0
  32. vllm/lib/python3.10/site-packages/torchvision/models/optical_flow/__pycache__/raft.cpython-310.pyc +0 -0
  33. vllm/lib/python3.10/site-packages/torchvision/models/optical_flow/_utils.py +48 -0
  34. vllm/lib/python3.10/site-packages/torchvision/models/optical_flow/raft.py +947 -0
  35. vllm/lib/python3.10/site-packages/torchvision/models/quantization/__init__.py +5 -0
  36. vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/__init__.cpython-310.pyc +0 -0
  37. vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/googlenet.cpython-310.pyc +0 -0
  38. vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/inception.cpython-310.pyc +0 -0
  39. vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/mobilenet.cpython-310.pyc +0 -0
  40. vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/mobilenetv2.cpython-310.pyc +0 -0
  41. vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/mobilenetv3.cpython-310.pyc +0 -0
  42. vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/resnet.cpython-310.pyc +0 -0
  43. vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/shufflenetv2.cpython-310.pyc +0 -0
  44. vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/utils.cpython-310.pyc +0 -0
  45. vllm/lib/python3.10/site-packages/torchvision/models/quantization/googlenet.py +210 -0
  46. vllm/lib/python3.10/site-packages/torchvision/models/quantization/inception.py +273 -0
  47. vllm/lib/python3.10/site-packages/torchvision/models/quantization/mobilenet.py +6 -0
  48. vllm/lib/python3.10/site-packages/torchvision/models/quantization/mobilenetv2.py +154 -0
  49. vllm/lib/python3.10/site-packages/torchvision/models/quantization/mobilenetv3.py +237 -0
  50. vllm/lib/python3.10/site-packages/torchvision/models/quantization/resnet.py +484 -0
.gitattributes CHANGED
@@ -1490,3 +1490,4 @@ vllm/lib/python3.10/site-packages/decord/libdecord.so filter=lfs diff=lfs merge=
1490
  parrot/lib/python3.10/site-packages/scipy/stats/_biasedurn.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1491
  parrot/lib/python3.10/site-packages/scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1492
  vllm/lib/python3.10/site-packages/ray/data/__pycache__/dataset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
1490
  parrot/lib/python3.10/site-packages/scipy/stats/_biasedurn.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1491
  parrot/lib/python3.10/site-packages/scipy/stats/_rcont/rcont.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1492
  vllm/lib/python3.10/site-packages/ray/data/__pycache__/dataset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1493
+ vllm/lib/python3.10/site-packages/torchvision/image.so filter=lfs diff=lfs merge=lfs -text
vllm/lib/python3.10/site-packages/anyio/abc/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.01 kB). View file
 
vllm/lib/python3.10/site-packages/anyio/abc/__pycache__/_eventloop.cpython-310.pyc ADDED
Binary file (12.4 kB). View file
 
vllm/lib/python3.10/site-packages/anyio/abc/__pycache__/_sockets.cpython-310.pyc ADDED
Binary file (7.75 kB). View file
 
vllm/lib/python3.10/site-packages/anyio/abc/__pycache__/_streams.cpython-310.pyc ADDED
Binary file (7.47 kB). View file
 
vllm/lib/python3.10/site-packages/anyio/abc/__pycache__/_subprocesses.cpython-310.pyc ADDED
Binary file (2.97 kB). View file
 
vllm/lib/python3.10/site-packages/anyio/abc/__pycache__/_testing.cpython-310.pyc ADDED
Binary file (2.59 kB). View file
 
vllm/lib/python3.10/site-packages/anyio/abc/_eventloop.py ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import math
4
+ import sys
5
+ from abc import ABCMeta, abstractmethod
6
+ from collections.abc import AsyncIterator, Awaitable, Callable, Sequence
7
+ from contextlib import AbstractContextManager
8
+ from os import PathLike
9
+ from signal import Signals
10
+ from socket import AddressFamily, SocketKind, socket
11
+ from typing import (
12
+ IO,
13
+ TYPE_CHECKING,
14
+ Any,
15
+ TypeVar,
16
+ Union,
17
+ overload,
18
+ )
19
+
20
+ if sys.version_info >= (3, 11):
21
+ from typing import TypeVarTuple, Unpack
22
+ else:
23
+ from typing_extensions import TypeVarTuple, Unpack
24
+
25
+ if sys.version_info >= (3, 10):
26
+ from typing import TypeAlias
27
+ else:
28
+ from typing_extensions import TypeAlias
29
+
30
+ if TYPE_CHECKING:
31
+ from _typeshed import HasFileno
32
+
33
+ from .._core._synchronization import CapacityLimiter, Event, Lock, Semaphore
34
+ from .._core._tasks import CancelScope
35
+ from .._core._testing import TaskInfo
36
+ from ..from_thread import BlockingPortal
37
+ from ._sockets import (
38
+ ConnectedUDPSocket,
39
+ ConnectedUNIXDatagramSocket,
40
+ IPSockAddrType,
41
+ SocketListener,
42
+ SocketStream,
43
+ UDPSocket,
44
+ UNIXDatagramSocket,
45
+ UNIXSocketStream,
46
+ )
47
+ from ._subprocesses import Process
48
+ from ._tasks import TaskGroup
49
+ from ._testing import TestRunner
50
+
51
+ T_Retval = TypeVar("T_Retval")
52
+ PosArgsT = TypeVarTuple("PosArgsT")
53
+ StrOrBytesPath: TypeAlias = Union[str, bytes, "PathLike[str]", "PathLike[bytes]"]
54
+
55
+
56
+ class AsyncBackend(metaclass=ABCMeta):
57
+ @classmethod
58
+ @abstractmethod
59
+ def run(
60
+ cls,
61
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
62
+ args: tuple[Unpack[PosArgsT]],
63
+ kwargs: dict[str, Any],
64
+ options: dict[str, Any],
65
+ ) -> T_Retval:
66
+ """
67
+ Run the given coroutine function in an asynchronous event loop.
68
+
69
+ The current thread must not be already running an event loop.
70
+
71
+ :param func: a coroutine function
72
+ :param args: positional arguments to ``func``
73
+ :param kwargs: positional arguments to ``func``
74
+ :param options: keyword arguments to call the backend ``run()`` implementation
75
+ with
76
+ :return: the return value of the coroutine function
77
+ """
78
+
79
+ @classmethod
80
+ @abstractmethod
81
+ def current_token(cls) -> object:
82
+ """
83
+
84
+ :return:
85
+ """
86
+
87
+ @classmethod
88
+ @abstractmethod
89
+ def current_time(cls) -> float:
90
+ """
91
+ Return the current value of the event loop's internal clock.
92
+
93
+ :return: the clock value (seconds)
94
+ """
95
+
96
+ @classmethod
97
+ @abstractmethod
98
+ def cancelled_exception_class(cls) -> type[BaseException]:
99
+ """Return the exception class that is raised in a task if it's cancelled."""
100
+
101
+ @classmethod
102
+ @abstractmethod
103
+ async def checkpoint(cls) -> None:
104
+ """
105
+ Check if the task has been cancelled, and allow rescheduling of other tasks.
106
+
107
+ This is effectively the same as running :meth:`checkpoint_if_cancelled` and then
108
+ :meth:`cancel_shielded_checkpoint`.
109
+ """
110
+
111
+ @classmethod
112
+ async def checkpoint_if_cancelled(cls) -> None:
113
+ """
114
+ Check if the current task group has been cancelled.
115
+
116
+ This will check if the task has been cancelled, but will not allow other tasks
117
+ to be scheduled if not.
118
+
119
+ """
120
+ if cls.current_effective_deadline() == -math.inf:
121
+ await cls.checkpoint()
122
+
123
+ @classmethod
124
+ async def cancel_shielded_checkpoint(cls) -> None:
125
+ """
126
+ Allow the rescheduling of other tasks.
127
+
128
+ This will give other tasks the opportunity to run, but without checking if the
129
+ current task group has been cancelled, unlike with :meth:`checkpoint`.
130
+
131
+ """
132
+ with cls.create_cancel_scope(shield=True):
133
+ await cls.sleep(0)
134
+
135
+ @classmethod
136
+ @abstractmethod
137
+ async def sleep(cls, delay: float) -> None:
138
+ """
139
+ Pause the current task for the specified duration.
140
+
141
+ :param delay: the duration, in seconds
142
+ """
143
+
144
+ @classmethod
145
+ @abstractmethod
146
+ def create_cancel_scope(
147
+ cls, *, deadline: float = math.inf, shield: bool = False
148
+ ) -> CancelScope:
149
+ pass
150
+
151
+ @classmethod
152
+ @abstractmethod
153
+ def current_effective_deadline(cls) -> float:
154
+ """
155
+ Return the nearest deadline among all the cancel scopes effective for the
156
+ current task.
157
+
158
+ :return:
159
+ - a clock value from the event loop's internal clock
160
+ - ``inf`` if there is no deadline in effect
161
+ - ``-inf`` if the current scope has been cancelled
162
+ :rtype: float
163
+ """
164
+
165
+ @classmethod
166
+ @abstractmethod
167
+ def create_task_group(cls) -> TaskGroup:
168
+ pass
169
+
170
+ @classmethod
171
+ @abstractmethod
172
+ def create_event(cls) -> Event:
173
+ pass
174
+
175
+ @classmethod
176
+ @abstractmethod
177
+ def create_lock(cls, *, fast_acquire: bool) -> Lock:
178
+ pass
179
+
180
+ @classmethod
181
+ @abstractmethod
182
+ def create_semaphore(
183
+ cls,
184
+ initial_value: int,
185
+ *,
186
+ max_value: int | None = None,
187
+ fast_acquire: bool = False,
188
+ ) -> Semaphore:
189
+ pass
190
+
191
+ @classmethod
192
+ @abstractmethod
193
+ def create_capacity_limiter(cls, total_tokens: float) -> CapacityLimiter:
194
+ pass
195
+
196
+ @classmethod
197
+ @abstractmethod
198
+ async def run_sync_in_worker_thread(
199
+ cls,
200
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
201
+ args: tuple[Unpack[PosArgsT]],
202
+ abandon_on_cancel: bool = False,
203
+ limiter: CapacityLimiter | None = None,
204
+ ) -> T_Retval:
205
+ pass
206
+
207
+ @classmethod
208
+ @abstractmethod
209
+ def check_cancelled(cls) -> None:
210
+ pass
211
+
212
+ @classmethod
213
+ @abstractmethod
214
+ def run_async_from_thread(
215
+ cls,
216
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
217
+ args: tuple[Unpack[PosArgsT]],
218
+ token: object,
219
+ ) -> T_Retval:
220
+ pass
221
+
222
+ @classmethod
223
+ @abstractmethod
224
+ def run_sync_from_thread(
225
+ cls,
226
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
227
+ args: tuple[Unpack[PosArgsT]],
228
+ token: object,
229
+ ) -> T_Retval:
230
+ pass
231
+
232
+ @classmethod
233
+ @abstractmethod
234
+ def create_blocking_portal(cls) -> BlockingPortal:
235
+ pass
236
+
237
+ @classmethod
238
+ @abstractmethod
239
+ async def open_process(
240
+ cls,
241
+ command: StrOrBytesPath | Sequence[StrOrBytesPath],
242
+ *,
243
+ stdin: int | IO[Any] | None,
244
+ stdout: int | IO[Any] | None,
245
+ stderr: int | IO[Any] | None,
246
+ **kwargs: Any,
247
+ ) -> Process:
248
+ pass
249
+
250
+ @classmethod
251
+ @abstractmethod
252
+ def setup_process_pool_exit_at_shutdown(cls, workers: set[Process]) -> None:
253
+ pass
254
+
255
+ @classmethod
256
+ @abstractmethod
257
+ async def connect_tcp(
258
+ cls, host: str, port: int, local_address: IPSockAddrType | None = None
259
+ ) -> SocketStream:
260
+ pass
261
+
262
+ @classmethod
263
+ @abstractmethod
264
+ async def connect_unix(cls, path: str | bytes) -> UNIXSocketStream:
265
+ pass
266
+
267
+ @classmethod
268
+ @abstractmethod
269
+ def create_tcp_listener(cls, sock: socket) -> SocketListener:
270
+ pass
271
+
272
+ @classmethod
273
+ @abstractmethod
274
+ def create_unix_listener(cls, sock: socket) -> SocketListener:
275
+ pass
276
+
277
+ @classmethod
278
+ @abstractmethod
279
+ async def create_udp_socket(
280
+ cls,
281
+ family: AddressFamily,
282
+ local_address: IPSockAddrType | None,
283
+ remote_address: IPSockAddrType | None,
284
+ reuse_port: bool,
285
+ ) -> UDPSocket | ConnectedUDPSocket:
286
+ pass
287
+
288
+ @classmethod
289
+ @overload
290
+ async def create_unix_datagram_socket(
291
+ cls, raw_socket: socket, remote_path: None
292
+ ) -> UNIXDatagramSocket: ...
293
+
294
+ @classmethod
295
+ @overload
296
+ async def create_unix_datagram_socket(
297
+ cls, raw_socket: socket, remote_path: str | bytes
298
+ ) -> ConnectedUNIXDatagramSocket: ...
299
+
300
+ @classmethod
301
+ @abstractmethod
302
+ async def create_unix_datagram_socket(
303
+ cls, raw_socket: socket, remote_path: str | bytes | None
304
+ ) -> UNIXDatagramSocket | ConnectedUNIXDatagramSocket:
305
+ pass
306
+
307
+ @classmethod
308
+ @abstractmethod
309
+ async def getaddrinfo(
310
+ cls,
311
+ host: bytes | str | None,
312
+ port: str | int | None,
313
+ *,
314
+ family: int | AddressFamily = 0,
315
+ type: int | SocketKind = 0,
316
+ proto: int = 0,
317
+ flags: int = 0,
318
+ ) -> list[
319
+ tuple[
320
+ AddressFamily,
321
+ SocketKind,
322
+ int,
323
+ str,
324
+ tuple[str, int] | tuple[str, int, int, int],
325
+ ]
326
+ ]:
327
+ pass
328
+
329
+ @classmethod
330
+ @abstractmethod
331
+ async def getnameinfo(
332
+ cls, sockaddr: IPSockAddrType, flags: int = 0
333
+ ) -> tuple[str, str]:
334
+ pass
335
+
336
+ @classmethod
337
+ @abstractmethod
338
+ async def wait_readable(cls, obj: HasFileno | int) -> None:
339
+ pass
340
+
341
+ @classmethod
342
+ @abstractmethod
343
+ async def wait_writable(cls, obj: HasFileno | int) -> None:
344
+ pass
345
+
346
+ @classmethod
347
+ @abstractmethod
348
+ def current_default_thread_limiter(cls) -> CapacityLimiter:
349
+ pass
350
+
351
+ @classmethod
352
+ @abstractmethod
353
+ def open_signal_receiver(
354
+ cls, *signals: Signals
355
+ ) -> AbstractContextManager[AsyncIterator[Signals]]:
356
+ pass
357
+
358
+ @classmethod
359
+ @abstractmethod
360
+ def get_current_task(cls) -> TaskInfo:
361
+ pass
362
+
363
+ @classmethod
364
+ @abstractmethod
365
+ def get_running_tasks(cls) -> Sequence[TaskInfo]:
366
+ pass
367
+
368
+ @classmethod
369
+ @abstractmethod
370
+ async def wait_all_tasks_blocked(cls) -> None:
371
+ pass
372
+
373
+ @classmethod
374
+ @abstractmethod
375
+ def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:
376
+ pass
vllm/lib/python3.10/site-packages/anyio/abc/_resources.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from abc import ABCMeta, abstractmethod
4
+ from types import TracebackType
5
+ from typing import TypeVar
6
+
7
+ T = TypeVar("T")
8
+
9
+
10
+ class AsyncResource(metaclass=ABCMeta):
11
+ """
12
+ Abstract base class for all closeable asynchronous resources.
13
+
14
+ Works as an asynchronous context manager which returns the instance itself on enter,
15
+ and calls :meth:`aclose` on exit.
16
+ """
17
+
18
+ __slots__ = ()
19
+
20
+ async def __aenter__(self: T) -> T:
21
+ return self
22
+
23
+ async def __aexit__(
24
+ self,
25
+ exc_type: type[BaseException] | None,
26
+ exc_val: BaseException | None,
27
+ exc_tb: TracebackType | None,
28
+ ) -> None:
29
+ await self.aclose()
30
+
31
+ @abstractmethod
32
+ async def aclose(self) -> None:
33
+ """Close the resource."""
vllm/lib/python3.10/site-packages/anyio/abc/_subprocesses.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from abc import abstractmethod
4
+ from signal import Signals
5
+
6
+ from ._resources import AsyncResource
7
+ from ._streams import ByteReceiveStream, ByteSendStream
8
+
9
+
10
+ class Process(AsyncResource):
11
+ """An asynchronous version of :class:`subprocess.Popen`."""
12
+
13
+ @abstractmethod
14
+ async def wait(self) -> int:
15
+ """
16
+ Wait until the process exits.
17
+
18
+ :return: the exit code of the process
19
+ """
20
+
21
+ @abstractmethod
22
+ def terminate(self) -> None:
23
+ """
24
+ Terminates the process, gracefully if possible.
25
+
26
+ On Windows, this calls ``TerminateProcess()``.
27
+ On POSIX systems, this sends ``SIGTERM`` to the process.
28
+
29
+ .. seealso:: :meth:`subprocess.Popen.terminate`
30
+ """
31
+
32
+ @abstractmethod
33
+ def kill(self) -> None:
34
+ """
35
+ Kills the process.
36
+
37
+ On Windows, this calls ``TerminateProcess()``.
38
+ On POSIX systems, this sends ``SIGKILL`` to the process.
39
+
40
+ .. seealso:: :meth:`subprocess.Popen.kill`
41
+ """
42
+
43
+ @abstractmethod
44
+ def send_signal(self, signal: Signals) -> None:
45
+ """
46
+ Send a signal to the subprocess.
47
+
48
+ .. seealso:: :meth:`subprocess.Popen.send_signal`
49
+
50
+ :param signal: the signal number (e.g. :data:`signal.SIGHUP`)
51
+ """
52
+
53
+ @property
54
+ @abstractmethod
55
+ def pid(self) -> int:
56
+ """The process ID of the process."""
57
+
58
+ @property
59
+ @abstractmethod
60
+ def returncode(self) -> int | None:
61
+ """
62
+ The return code of the process. If the process has not yet terminated, this will
63
+ be ``None``.
64
+ """
65
+
66
+ @property
67
+ @abstractmethod
68
+ def stdin(self) -> ByteSendStream | None:
69
+ """The stream for the standard input of the process."""
70
+
71
+ @property
72
+ @abstractmethod
73
+ def stdout(self) -> ByteReceiveStream | None:
74
+ """The stream for the standard output of the process."""
75
+
76
+ @property
77
+ @abstractmethod
78
+ def stderr(self) -> ByteReceiveStream | None:
79
+ """The stream for the standard error output of the process."""
vllm/lib/python3.10/site-packages/anyio/abc/_tasks.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ from abc import ABCMeta, abstractmethod
5
+ from collections.abc import Awaitable, Callable
6
+ from types import TracebackType
7
+ from typing import TYPE_CHECKING, Any, Protocol, TypeVar, overload
8
+
9
+ if sys.version_info >= (3, 11):
10
+ from typing import TypeVarTuple, Unpack
11
+ else:
12
+ from typing_extensions import TypeVarTuple, Unpack
13
+
14
+ if TYPE_CHECKING:
15
+ from .._core._tasks import CancelScope
16
+
17
+ T_Retval = TypeVar("T_Retval")
18
+ T_contra = TypeVar("T_contra", contravariant=True)
19
+ PosArgsT = TypeVarTuple("PosArgsT")
20
+
21
+
22
+ class TaskStatus(Protocol[T_contra]):
23
+ @overload
24
+ def started(self: TaskStatus[None]) -> None: ...
25
+
26
+ @overload
27
+ def started(self, value: T_contra) -> None: ...
28
+
29
+ def started(self, value: T_contra | None = None) -> None:
30
+ """
31
+ Signal that the task has started.
32
+
33
+ :param value: object passed back to the starter of the task
34
+ """
35
+
36
+
37
+ class TaskGroup(metaclass=ABCMeta):
38
+ """
39
+ Groups several asynchronous tasks together.
40
+
41
+ :ivar cancel_scope: the cancel scope inherited by all child tasks
42
+ :vartype cancel_scope: CancelScope
43
+
44
+ .. note:: On asyncio, support for eager task factories is considered to be
45
+ **experimental**. In particular, they don't follow the usual semantics of new
46
+ tasks being scheduled on the next iteration of the event loop, and may thus
47
+ cause unexpected behavior in code that wasn't written with such semantics in
48
+ mind.
49
+ """
50
+
51
+ cancel_scope: CancelScope
52
+
53
+ @abstractmethod
54
+ def start_soon(
55
+ self,
56
+ func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
57
+ *args: Unpack[PosArgsT],
58
+ name: object = None,
59
+ ) -> None:
60
+ """
61
+ Start a new task in this task group.
62
+
63
+ :param func: a coroutine function
64
+ :param args: positional arguments to call the function with
65
+ :param name: name of the task, for the purposes of introspection and debugging
66
+
67
+ .. versionadded:: 3.0
68
+ """
69
+
70
+ @abstractmethod
71
+ async def start(
72
+ self,
73
+ func: Callable[..., Awaitable[Any]],
74
+ *args: object,
75
+ name: object = None,
76
+ ) -> Any:
77
+ """
78
+ Start a new task and wait until it signals for readiness.
79
+
80
+ :param func: a coroutine function
81
+ :param args: positional arguments to call the function with
82
+ :param name: name of the task, for the purposes of introspection and debugging
83
+ :return: the value passed to ``task_status.started()``
84
+ :raises RuntimeError: if the task finishes without calling
85
+ ``task_status.started()``
86
+
87
+ .. versionadded:: 3.0
88
+ """
89
+
90
+ @abstractmethod
91
+ async def __aenter__(self) -> TaskGroup:
92
+ """Enter the task group context and allow starting new tasks."""
93
+
94
+ @abstractmethod
95
+ async def __aexit__(
96
+ self,
97
+ exc_type: type[BaseException] | None,
98
+ exc_val: BaseException | None,
99
+ exc_tb: TracebackType | None,
100
+ ) -> bool | None:
101
+ """Exit the task group context waiting for all tasks to finish."""
vllm/lib/python3.10/site-packages/torchvision/image.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1e52c1fdc4518df5cffca0399626d6c2369c041202faf8fe54c1edb8b0d8d97
3
+ size 667265
vllm/lib/python3.10/site-packages/torchvision/models/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .alexnet import *
2
+ from .convnext import *
3
+ from .densenet import *
4
+ from .efficientnet import *
5
+ from .googlenet import *
6
+ from .inception import *
7
+ from .mnasnet import *
8
+ from .mobilenet import *
9
+ from .regnet import *
10
+ from .resnet import *
11
+ from .shufflenetv2 import *
12
+ from .squeezenet import *
13
+ from .vgg import *
14
+ from .vision_transformer import *
15
+ from .swin_transformer import *
16
+ from .maxvit import *
17
+ from . import detection, optical_flow, quantization, segmentation, video
18
+
19
+ # The Weights and WeightsEnum are developer-facing utils that we make public for
20
+ # downstream libs like torchgeo https://github.com/pytorch/vision/issues/7094
21
+ # TODO: we could / should document them publicly, but it's not clear where, as
22
+ # they're not intended for end users.
23
+ from ._api import get_model, get_model_builder, get_model_weights, get_weight, list_models, Weights, WeightsEnum
vllm/lib/python3.10/site-packages/torchvision/models/_api.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fnmatch
2
+ import importlib
3
+ import inspect
4
+ import sys
5
+ from dataclasses import dataclass
6
+ from enum import Enum
7
+ from functools import partial
8
+ from inspect import signature
9
+ from types import ModuleType
10
+ from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Set, Type, TypeVar, Union
11
+
12
+ from torch import nn
13
+
14
+ from .._internally_replaced_utils import load_state_dict_from_url
15
+
16
+
17
+ __all__ = ["WeightsEnum", "Weights", "get_model", "get_model_builder", "get_model_weights", "get_weight", "list_models"]
18
+
19
+
20
+ @dataclass
21
+ class Weights:
22
+ """
23
+ This class is used to group important attributes associated with the pre-trained weights.
24
+
25
+ Args:
26
+ url (str): The location where we find the weights.
27
+ transforms (Callable): A callable that constructs the preprocessing method (or validation preset transforms)
28
+ needed to use the model. The reason we attach a constructor method rather than an already constructed
29
+ object is because the specific object might have memory and thus we want to delay initialization until
30
+ needed.
31
+ meta (Dict[str, Any]): Stores meta-data related to the weights of the model and its configuration. These can be
32
+ informative attributes (for example the number of parameters/flops, recipe link/methods used in training
33
+ etc), configuration parameters (for example the `num_classes`) needed to construct the model or important
34
+ meta-data (for example the `classes` of a classification model) needed to use the model.
35
+ """
36
+
37
+ url: str
38
+ transforms: Callable
39
+ meta: Dict[str, Any]
40
+
41
+ def __eq__(self, other: Any) -> bool:
42
+ # We need this custom implementation for correct deep-copy and deserialization behavior.
43
+ # TL;DR: After the definition of an enum, creating a new instance, i.e. by deep-copying or deserializing it,
44
+ # involves an equality check against the defined members. Unfortunately, the `transforms` attribute is often
45
+ # defined with `functools.partial` and `fn = partial(...); assert deepcopy(fn) != fn`. Without custom handling
46
+ # for it, the check against the defined members would fail and effectively prevent the weights from being
47
+ # deep-copied or deserialized.
48
+ # See https://github.com/pytorch/vision/pull/7107 for details.
49
+ if not isinstance(other, Weights):
50
+ return NotImplemented
51
+
52
+ if self.url != other.url:
53
+ return False
54
+
55
+ if self.meta != other.meta:
56
+ return False
57
+
58
+ if isinstance(self.transforms, partial) and isinstance(other.transforms, partial):
59
+ return (
60
+ self.transforms.func == other.transforms.func
61
+ and self.transforms.args == other.transforms.args
62
+ and self.transforms.keywords == other.transforms.keywords
63
+ )
64
+ else:
65
+ return self.transforms == other.transforms
66
+
67
+
68
+ class WeightsEnum(Enum):
69
+ """
70
+ This class is the parent class of all model weights. Each model building method receives an optional `weights`
71
+ parameter with its associated pre-trained weights. It inherits from `Enum` and its values should be of type
72
+ `Weights`.
73
+
74
+ Args:
75
+ value (Weights): The data class entry with the weight information.
76
+ """
77
+
78
+ @classmethod
79
+ def verify(cls, obj: Any) -> Any:
80
+ if obj is not None:
81
+ if type(obj) is str:
82
+ obj = cls[obj.replace(cls.__name__ + ".", "")]
83
+ elif not isinstance(obj, cls):
84
+ raise TypeError(
85
+ f"Invalid Weight class provided; expected {cls.__name__} but received {obj.__class__.__name__}."
86
+ )
87
+ return obj
88
+
89
+ def get_state_dict(self, *args: Any, **kwargs: Any) -> Mapping[str, Any]:
90
+ return load_state_dict_from_url(self.url, *args, **kwargs)
91
+
92
+ def __repr__(self) -> str:
93
+ return f"{self.__class__.__name__}.{self._name_}"
94
+
95
+ @property
96
+ def url(self):
97
+ return self.value.url
98
+
99
+ @property
100
+ def transforms(self):
101
+ return self.value.transforms
102
+
103
+ @property
104
+ def meta(self):
105
+ return self.value.meta
106
+
107
+
108
+ def get_weight(name: str) -> WeightsEnum:
109
+ """
110
+ Gets the weights enum value by its full name. Example: "ResNet50_Weights.IMAGENET1K_V1"
111
+
112
+ Args:
113
+ name (str): The name of the weight enum entry.
114
+
115
+ Returns:
116
+ WeightsEnum: The requested weight enum.
117
+ """
118
+ try:
119
+ enum_name, value_name = name.split(".")
120
+ except ValueError:
121
+ raise ValueError(f"Invalid weight name provided: '{name}'.")
122
+
123
+ base_module_name = ".".join(sys.modules[__name__].__name__.split(".")[:-1])
124
+ base_module = importlib.import_module(base_module_name)
125
+ model_modules = [base_module] + [
126
+ x[1]
127
+ for x in inspect.getmembers(base_module, inspect.ismodule)
128
+ if x[1].__file__.endswith("__init__.py") # type: ignore[union-attr]
129
+ ]
130
+
131
+ weights_enum = None
132
+ for m in model_modules:
133
+ potential_class = m.__dict__.get(enum_name, None)
134
+ if potential_class is not None and issubclass(potential_class, WeightsEnum):
135
+ weights_enum = potential_class
136
+ break
137
+
138
+ if weights_enum is None:
139
+ raise ValueError(f"The weight enum '{enum_name}' for the specific method couldn't be retrieved.")
140
+
141
+ return weights_enum[value_name]
142
+
143
+
144
+ def get_model_weights(name: Union[Callable, str]) -> Type[WeightsEnum]:
145
+ """
146
+ Returns the weights enum class associated to the given model.
147
+
148
+ Args:
149
+ name (callable or str): The model builder function or the name under which it is registered.
150
+
151
+ Returns:
152
+ weights_enum (WeightsEnum): The weights enum class associated with the model.
153
+ """
154
+ model = get_model_builder(name) if isinstance(name, str) else name
155
+ return _get_enum_from_fn(model)
156
+
157
+
158
+ def _get_enum_from_fn(fn: Callable) -> Type[WeightsEnum]:
159
+ """
160
+ Internal method that gets the weight enum of a specific model builder method.
161
+
162
+ Args:
163
+ fn (Callable): The builder method used to create the model.
164
+ Returns:
165
+ WeightsEnum: The requested weight enum.
166
+ """
167
+ sig = signature(fn)
168
+ if "weights" not in sig.parameters:
169
+ raise ValueError("The method is missing the 'weights' argument.")
170
+
171
+ ann = signature(fn).parameters["weights"].annotation
172
+ weights_enum = None
173
+ if isinstance(ann, type) and issubclass(ann, WeightsEnum):
174
+ weights_enum = ann
175
+ else:
176
+ # handle cases like Union[Optional, T]
177
+ # TODO: Replace ann.__args__ with typing.get_args(ann) after python >= 3.8
178
+ for t in ann.__args__: # type: ignore[union-attr]
179
+ if isinstance(t, type) and issubclass(t, WeightsEnum):
180
+ weights_enum = t
181
+ break
182
+
183
+ if weights_enum is None:
184
+ raise ValueError(
185
+ "The WeightsEnum class for the specific method couldn't be retrieved. Make sure the typing info is correct."
186
+ )
187
+
188
+ return weights_enum
189
+
190
+
191
+ M = TypeVar("M", bound=nn.Module)
192
+
193
+ BUILTIN_MODELS = {}
194
+
195
+
196
+ def register_model(name: Optional[str] = None) -> Callable[[Callable[..., M]], Callable[..., M]]:
197
+ def wrapper(fn: Callable[..., M]) -> Callable[..., M]:
198
+ key = name if name is not None else fn.__name__
199
+ if key in BUILTIN_MODELS:
200
+ raise ValueError(f"An entry is already registered under the name '{key}'.")
201
+ BUILTIN_MODELS[key] = fn
202
+ return fn
203
+
204
+ return wrapper
205
+
206
+
207
+ def list_models(
208
+ module: Optional[ModuleType] = None,
209
+ include: Union[Iterable[str], str, None] = None,
210
+ exclude: Union[Iterable[str], str, None] = None,
211
+ ) -> List[str]:
212
+ """
213
+ Returns a list with the names of registered models.
214
+
215
+ Args:
216
+ module (ModuleType, optional): The module from which we want to extract the available models.
217
+ include (str or Iterable[str], optional): Filter(s) for including the models from the set of all models.
218
+ Filters are passed to `fnmatch <https://docs.python.org/3/library/fnmatch.html>`__ to match Unix shell-style
219
+ wildcards. In case of many filters, the results is the union of individual filters.
220
+ exclude (str or Iterable[str], optional): Filter(s) applied after include_filters to remove models.
221
+ Filter are passed to `fnmatch <https://docs.python.org/3/library/fnmatch.html>`__ to match Unix shell-style
222
+ wildcards. In case of many filters, the results is removal of all the models that match any individual filter.
223
+
224
+ Returns:
225
+ models (list): A list with the names of available models.
226
+ """
227
+ all_models = {
228
+ k for k, v in BUILTIN_MODELS.items() if module is None or v.__module__.rsplit(".", 1)[0] == module.__name__
229
+ }
230
+ if include:
231
+ models: Set[str] = set()
232
+ if isinstance(include, str):
233
+ include = [include]
234
+ for include_filter in include:
235
+ models = models | set(fnmatch.filter(all_models, include_filter))
236
+ else:
237
+ models = all_models
238
+
239
+ if exclude:
240
+ if isinstance(exclude, str):
241
+ exclude = [exclude]
242
+ for exclude_filter in exclude:
243
+ models = models - set(fnmatch.filter(all_models, exclude_filter))
244
+ return sorted(models)
245
+
246
+
247
+ def get_model_builder(name: str) -> Callable[..., nn.Module]:
248
+ """
249
+ Gets the model name and returns the model builder method.
250
+
251
+ Args:
252
+ name (str): The name under which the model is registered.
253
+
254
+ Returns:
255
+ fn (Callable): The model builder method.
256
+ """
257
+ name = name.lower()
258
+ try:
259
+ fn = BUILTIN_MODELS[name]
260
+ except KeyError:
261
+ raise ValueError(f"Unknown model {name}")
262
+ return fn
263
+
264
+
265
+ def get_model(name: str, **config: Any) -> nn.Module:
266
+ """
267
+ Gets the model name and configuration and returns an instantiated model.
268
+
269
+ Args:
270
+ name (str): The name under which the model is registered.
271
+ **config (Any): parameters passed to the model builder method.
272
+
273
+ Returns:
274
+ model (nn.Module): The initialized model.
275
+ """
276
+ fn = get_model_builder(name)
277
+ return fn(**config)
vllm/lib/python3.10/site-packages/torchvision/models/_meta.py ADDED
@@ -0,0 +1,1554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file is part of the private API. Please do not refer to any variables defined here directly as they will be
3
+ removed on future versions without warning.
4
+ """
5
+
6
+ # This will eventually be replaced with a call at torchvision.datasets.info("imagenet").categories
7
+ _IMAGENET_CATEGORIES = [
8
+ "tench",
9
+ "goldfish",
10
+ "great white shark",
11
+ "tiger shark",
12
+ "hammerhead",
13
+ "electric ray",
14
+ "stingray",
15
+ "cock",
16
+ "hen",
17
+ "ostrich",
18
+ "brambling",
19
+ "goldfinch",
20
+ "house finch",
21
+ "junco",
22
+ "indigo bunting",
23
+ "robin",
24
+ "bulbul",
25
+ "jay",
26
+ "magpie",
27
+ "chickadee",
28
+ "water ouzel",
29
+ "kite",
30
+ "bald eagle",
31
+ "vulture",
32
+ "great grey owl",
33
+ "European fire salamander",
34
+ "common newt",
35
+ "eft",
36
+ "spotted salamander",
37
+ "axolotl",
38
+ "bullfrog",
39
+ "tree frog",
40
+ "tailed frog",
41
+ "loggerhead",
42
+ "leatherback turtle",
43
+ "mud turtle",
44
+ "terrapin",
45
+ "box turtle",
46
+ "banded gecko",
47
+ "common iguana",
48
+ "American chameleon",
49
+ "whiptail",
50
+ "agama",
51
+ "frilled lizard",
52
+ "alligator lizard",
53
+ "Gila monster",
54
+ "green lizard",
55
+ "African chameleon",
56
+ "Komodo dragon",
57
+ "African crocodile",
58
+ "American alligator",
59
+ "triceratops",
60
+ "thunder snake",
61
+ "ringneck snake",
62
+ "hognose snake",
63
+ "green snake",
64
+ "king snake",
65
+ "garter snake",
66
+ "water snake",
67
+ "vine snake",
68
+ "night snake",
69
+ "boa constrictor",
70
+ "rock python",
71
+ "Indian cobra",
72
+ "green mamba",
73
+ "sea snake",
74
+ "horned viper",
75
+ "diamondback",
76
+ "sidewinder",
77
+ "trilobite",
78
+ "harvestman",
79
+ "scorpion",
80
+ "black and gold garden spider",
81
+ "barn spider",
82
+ "garden spider",
83
+ "black widow",
84
+ "tarantula",
85
+ "wolf spider",
86
+ "tick",
87
+ "centipede",
88
+ "black grouse",
89
+ "ptarmigan",
90
+ "ruffed grouse",
91
+ "prairie chicken",
92
+ "peacock",
93
+ "quail",
94
+ "partridge",
95
+ "African grey",
96
+ "macaw",
97
+ "sulphur-crested cockatoo",
98
+ "lorikeet",
99
+ "coucal",
100
+ "bee eater",
101
+ "hornbill",
102
+ "hummingbird",
103
+ "jacamar",
104
+ "toucan",
105
+ "drake",
106
+ "red-breasted merganser",
107
+ "goose",
108
+ "black swan",
109
+ "tusker",
110
+ "echidna",
111
+ "platypus",
112
+ "wallaby",
113
+ "koala",
114
+ "wombat",
115
+ "jellyfish",
116
+ "sea anemone",
117
+ "brain coral",
118
+ "flatworm",
119
+ "nematode",
120
+ "conch",
121
+ "snail",
122
+ "slug",
123
+ "sea slug",
124
+ "chiton",
125
+ "chambered nautilus",
126
+ "Dungeness crab",
127
+ "rock crab",
128
+ "fiddler crab",
129
+ "king crab",
130
+ "American lobster",
131
+ "spiny lobster",
132
+ "crayfish",
133
+ "hermit crab",
134
+ "isopod",
135
+ "white stork",
136
+ "black stork",
137
+ "spoonbill",
138
+ "flamingo",
139
+ "little blue heron",
140
+ "American egret",
141
+ "bittern",
142
+ "crane bird",
143
+ "limpkin",
144
+ "European gallinule",
145
+ "American coot",
146
+ "bustard",
147
+ "ruddy turnstone",
148
+ "red-backed sandpiper",
149
+ "redshank",
150
+ "dowitcher",
151
+ "oystercatcher",
152
+ "pelican",
153
+ "king penguin",
154
+ "albatross",
155
+ "grey whale",
156
+ "killer whale",
157
+ "dugong",
158
+ "sea lion",
159
+ "Chihuahua",
160
+ "Japanese spaniel",
161
+ "Maltese dog",
162
+ "Pekinese",
163
+ "Shih-Tzu",
164
+ "Blenheim spaniel",
165
+ "papillon",
166
+ "toy terrier",
167
+ "Rhodesian ridgeback",
168
+ "Afghan hound",
169
+ "basset",
170
+ "beagle",
171
+ "bloodhound",
172
+ "bluetick",
173
+ "black-and-tan coonhound",
174
+ "Walker hound",
175
+ "English foxhound",
176
+ "redbone",
177
+ "borzoi",
178
+ "Irish wolfhound",
179
+ "Italian greyhound",
180
+ "whippet",
181
+ "Ibizan hound",
182
+ "Norwegian elkhound",
183
+ "otterhound",
184
+ "Saluki",
185
+ "Scottish deerhound",
186
+ "Weimaraner",
187
+ "Staffordshire bullterrier",
188
+ "American Staffordshire terrier",
189
+ "Bedlington terrier",
190
+ "Border terrier",
191
+ "Kerry blue terrier",
192
+ "Irish terrier",
193
+ "Norfolk terrier",
194
+ "Norwich terrier",
195
+ "Yorkshire terrier",
196
+ "wire-haired fox terrier",
197
+ "Lakeland terrier",
198
+ "Sealyham terrier",
199
+ "Airedale",
200
+ "cairn",
201
+ "Australian terrier",
202
+ "Dandie Dinmont",
203
+ "Boston bull",
204
+ "miniature schnauzer",
205
+ "giant schnauzer",
206
+ "standard schnauzer",
207
+ "Scotch terrier",
208
+ "Tibetan terrier",
209
+ "silky terrier",
210
+ "soft-coated wheaten terrier",
211
+ "West Highland white terrier",
212
+ "Lhasa",
213
+ "flat-coated retriever",
214
+ "curly-coated retriever",
215
+ "golden retriever",
216
+ "Labrador retriever",
217
+ "Chesapeake Bay retriever",
218
+ "German short-haired pointer",
219
+ "vizsla",
220
+ "English setter",
221
+ "Irish setter",
222
+ "Gordon setter",
223
+ "Brittany spaniel",
224
+ "clumber",
225
+ "English springer",
226
+ "Welsh springer spaniel",
227
+ "cocker spaniel",
228
+ "Sussex spaniel",
229
+ "Irish water spaniel",
230
+ "kuvasz",
231
+ "schipperke",
232
+ "groenendael",
233
+ "malinois",
234
+ "briard",
235
+ "kelpie",
236
+ "komondor",
237
+ "Old English sheepdog",
238
+ "Shetland sheepdog",
239
+ "collie",
240
+ "Border collie",
241
+ "Bouvier des Flandres",
242
+ "Rottweiler",
243
+ "German shepherd",
244
+ "Doberman",
245
+ "miniature pinscher",
246
+ "Greater Swiss Mountain dog",
247
+ "Bernese mountain dog",
248
+ "Appenzeller",
249
+ "EntleBucher",
250
+ "boxer",
251
+ "bull mastiff",
252
+ "Tibetan mastiff",
253
+ "French bulldog",
254
+ "Great Dane",
255
+ "Saint Bernard",
256
+ "Eskimo dog",
257
+ "malamute",
258
+ "Siberian husky",
259
+ "dalmatian",
260
+ "affenpinscher",
261
+ "basenji",
262
+ "pug",
263
+ "Leonberg",
264
+ "Newfoundland",
265
+ "Great Pyrenees",
266
+ "Samoyed",
267
+ "Pomeranian",
268
+ "chow",
269
+ "keeshond",
270
+ "Brabancon griffon",
271
+ "Pembroke",
272
+ "Cardigan",
273
+ "toy poodle",
274
+ "miniature poodle",
275
+ "standard poodle",
276
+ "Mexican hairless",
277
+ "timber wolf",
278
+ "white wolf",
279
+ "red wolf",
280
+ "coyote",
281
+ "dingo",
282
+ "dhole",
283
+ "African hunting dog",
284
+ "hyena",
285
+ "red fox",
286
+ "kit fox",
287
+ "Arctic fox",
288
+ "grey fox",
289
+ "tabby",
290
+ "tiger cat",
291
+ "Persian cat",
292
+ "Siamese cat",
293
+ "Egyptian cat",
294
+ "cougar",
295
+ "lynx",
296
+ "leopard",
297
+ "snow leopard",
298
+ "jaguar",
299
+ "lion",
300
+ "tiger",
301
+ "cheetah",
302
+ "brown bear",
303
+ "American black bear",
304
+ "ice bear",
305
+ "sloth bear",
306
+ "mongoose",
307
+ "meerkat",
308
+ "tiger beetle",
309
+ "ladybug",
310
+ "ground beetle",
311
+ "long-horned beetle",
312
+ "leaf beetle",
313
+ "dung beetle",
314
+ "rhinoceros beetle",
315
+ "weevil",
316
+ "fly",
317
+ "bee",
318
+ "ant",
319
+ "grasshopper",
320
+ "cricket",
321
+ "walking stick",
322
+ "cockroach",
323
+ "mantis",
324
+ "cicada",
325
+ "leafhopper",
326
+ "lacewing",
327
+ "dragonfly",
328
+ "damselfly",
329
+ "admiral",
330
+ "ringlet",
331
+ "monarch",
332
+ "cabbage butterfly",
333
+ "sulphur butterfly",
334
+ "lycaenid",
335
+ "starfish",
336
+ "sea urchin",
337
+ "sea cucumber",
338
+ "wood rabbit",
339
+ "hare",
340
+ "Angora",
341
+ "hamster",
342
+ "porcupine",
343
+ "fox squirrel",
344
+ "marmot",
345
+ "beaver",
346
+ "guinea pig",
347
+ "sorrel",
348
+ "zebra",
349
+ "hog",
350
+ "wild boar",
351
+ "warthog",
352
+ "hippopotamus",
353
+ "ox",
354
+ "water buffalo",
355
+ "bison",
356
+ "ram",
357
+ "bighorn",
358
+ "ibex",
359
+ "hartebeest",
360
+ "impala",
361
+ "gazelle",
362
+ "Arabian camel",
363
+ "llama",
364
+ "weasel",
365
+ "mink",
366
+ "polecat",
367
+ "black-footed ferret",
368
+ "otter",
369
+ "skunk",
370
+ "badger",
371
+ "armadillo",
372
+ "three-toed sloth",
373
+ "orangutan",
374
+ "gorilla",
375
+ "chimpanzee",
376
+ "gibbon",
377
+ "siamang",
378
+ "guenon",
379
+ "patas",
380
+ "baboon",
381
+ "macaque",
382
+ "langur",
383
+ "colobus",
384
+ "proboscis monkey",
385
+ "marmoset",
386
+ "capuchin",
387
+ "howler monkey",
388
+ "titi",
389
+ "spider monkey",
390
+ "squirrel monkey",
391
+ "Madagascar cat",
392
+ "indri",
393
+ "Indian elephant",
394
+ "African elephant",
395
+ "lesser panda",
396
+ "giant panda",
397
+ "barracouta",
398
+ "eel",
399
+ "coho",
400
+ "rock beauty",
401
+ "anemone fish",
402
+ "sturgeon",
403
+ "gar",
404
+ "lionfish",
405
+ "puffer",
406
+ "abacus",
407
+ "abaya",
408
+ "academic gown",
409
+ "accordion",
410
+ "acoustic guitar",
411
+ "aircraft carrier",
412
+ "airliner",
413
+ "airship",
414
+ "altar",
415
+ "ambulance",
416
+ "amphibian",
417
+ "analog clock",
418
+ "apiary",
419
+ "apron",
420
+ "ashcan",
421
+ "assault rifle",
422
+ "backpack",
423
+ "bakery",
424
+ "balance beam",
425
+ "balloon",
426
+ "ballpoint",
427
+ "Band Aid",
428
+ "banjo",
429
+ "bannister",
430
+ "barbell",
431
+ "barber chair",
432
+ "barbershop",
433
+ "barn",
434
+ "barometer",
435
+ "barrel",
436
+ "barrow",
437
+ "baseball",
438
+ "basketball",
439
+ "bassinet",
440
+ "bassoon",
441
+ "bathing cap",
442
+ "bath towel",
443
+ "bathtub",
444
+ "beach wagon",
445
+ "beacon",
446
+ "beaker",
447
+ "bearskin",
448
+ "beer bottle",
449
+ "beer glass",
450
+ "bell cote",
451
+ "bib",
452
+ "bicycle-built-for-two",
453
+ "bikini",
454
+ "binder",
455
+ "binoculars",
456
+ "birdhouse",
457
+ "boathouse",
458
+ "bobsled",
459
+ "bolo tie",
460
+ "bonnet",
461
+ "bookcase",
462
+ "bookshop",
463
+ "bottlecap",
464
+ "bow",
465
+ "bow tie",
466
+ "brass",
467
+ "brassiere",
468
+ "breakwater",
469
+ "breastplate",
470
+ "broom",
471
+ "bucket",
472
+ "buckle",
473
+ "bulletproof vest",
474
+ "bullet train",
475
+ "butcher shop",
476
+ "cab",
477
+ "caldron",
478
+ "candle",
479
+ "cannon",
480
+ "canoe",
481
+ "can opener",
482
+ "cardigan",
483
+ "car mirror",
484
+ "carousel",
485
+ "carpenter's kit",
486
+ "carton",
487
+ "car wheel",
488
+ "cash machine",
489
+ "cassette",
490
+ "cassette player",
491
+ "castle",
492
+ "catamaran",
493
+ "CD player",
494
+ "cello",
495
+ "cellular telephone",
496
+ "chain",
497
+ "chainlink fence",
498
+ "chain mail",
499
+ "chain saw",
500
+ "chest",
501
+ "chiffonier",
502
+ "chime",
503
+ "china cabinet",
504
+ "Christmas stocking",
505
+ "church",
506
+ "cinema",
507
+ "cleaver",
508
+ "cliff dwelling",
509
+ "cloak",
510
+ "clog",
511
+ "cocktail shaker",
512
+ "coffee mug",
513
+ "coffeepot",
514
+ "coil",
515
+ "combination lock",
516
+ "computer keyboard",
517
+ "confectionery",
518
+ "container ship",
519
+ "convertible",
520
+ "corkscrew",
521
+ "cornet",
522
+ "cowboy boot",
523
+ "cowboy hat",
524
+ "cradle",
525
+ "crane",
526
+ "crash helmet",
527
+ "crate",
528
+ "crib",
529
+ "Crock Pot",
530
+ "croquet ball",
531
+ "crutch",
532
+ "cuirass",
533
+ "dam",
534
+ "desk",
535
+ "desktop computer",
536
+ "dial telephone",
537
+ "diaper",
538
+ "digital clock",
539
+ "digital watch",
540
+ "dining table",
541
+ "dishrag",
542
+ "dishwasher",
543
+ "disk brake",
544
+ "dock",
545
+ "dogsled",
546
+ "dome",
547
+ "doormat",
548
+ "drilling platform",
549
+ "drum",
550
+ "drumstick",
551
+ "dumbbell",
552
+ "Dutch oven",
553
+ "electric fan",
554
+ "electric guitar",
555
+ "electric locomotive",
556
+ "entertainment center",
557
+ "envelope",
558
+ "espresso maker",
559
+ "face powder",
560
+ "feather boa",
561
+ "file",
562
+ "fireboat",
563
+ "fire engine",
564
+ "fire screen",
565
+ "flagpole",
566
+ "flute",
567
+ "folding chair",
568
+ "football helmet",
569
+ "forklift",
570
+ "fountain",
571
+ "fountain pen",
572
+ "four-poster",
573
+ "freight car",
574
+ "French horn",
575
+ "frying pan",
576
+ "fur coat",
577
+ "garbage truck",
578
+ "gasmask",
579
+ "gas pump",
580
+ "goblet",
581
+ "go-kart",
582
+ "golf ball",
583
+ "golfcart",
584
+ "gondola",
585
+ "gong",
586
+ "gown",
587
+ "grand piano",
588
+ "greenhouse",
589
+ "grille",
590
+ "grocery store",
591
+ "guillotine",
592
+ "hair slide",
593
+ "hair spray",
594
+ "half track",
595
+ "hammer",
596
+ "hamper",
597
+ "hand blower",
598
+ "hand-held computer",
599
+ "handkerchief",
600
+ "hard disc",
601
+ "harmonica",
602
+ "harp",
603
+ "harvester",
604
+ "hatchet",
605
+ "holster",
606
+ "home theater",
607
+ "honeycomb",
608
+ "hook",
609
+ "hoopskirt",
610
+ "horizontal bar",
611
+ "horse cart",
612
+ "hourglass",
613
+ "iPod",
614
+ "iron",
615
+ "jack-o'-lantern",
616
+ "jean",
617
+ "jeep",
618
+ "jersey",
619
+ "jigsaw puzzle",
620
+ "jinrikisha",
621
+ "joystick",
622
+ "kimono",
623
+ "knee pad",
624
+ "knot",
625
+ "lab coat",
626
+ "ladle",
627
+ "lampshade",
628
+ "laptop",
629
+ "lawn mower",
630
+ "lens cap",
631
+ "letter opener",
632
+ "library",
633
+ "lifeboat",
634
+ "lighter",
635
+ "limousine",
636
+ "liner",
637
+ "lipstick",
638
+ "Loafer",
639
+ "lotion",
640
+ "loudspeaker",
641
+ "loupe",
642
+ "lumbermill",
643
+ "magnetic compass",
644
+ "mailbag",
645
+ "mailbox",
646
+ "maillot",
647
+ "maillot tank suit",
648
+ "manhole cover",
649
+ "maraca",
650
+ "marimba",
651
+ "mask",
652
+ "matchstick",
653
+ "maypole",
654
+ "maze",
655
+ "measuring cup",
656
+ "medicine chest",
657
+ "megalith",
658
+ "microphone",
659
+ "microwave",
660
+ "military uniform",
661
+ "milk can",
662
+ "minibus",
663
+ "miniskirt",
664
+ "minivan",
665
+ "missile",
666
+ "mitten",
667
+ "mixing bowl",
668
+ "mobile home",
669
+ "Model T",
670
+ "modem",
671
+ "monastery",
672
+ "monitor",
673
+ "moped",
674
+ "mortar",
675
+ "mortarboard",
676
+ "mosque",
677
+ "mosquito net",
678
+ "motor scooter",
679
+ "mountain bike",
680
+ "mountain tent",
681
+ "mouse",
682
+ "mousetrap",
683
+ "moving van",
684
+ "muzzle",
685
+ "nail",
686
+ "neck brace",
687
+ "necklace",
688
+ "nipple",
689
+ "notebook",
690
+ "obelisk",
691
+ "oboe",
692
+ "ocarina",
693
+ "odometer",
694
+ "oil filter",
695
+ "organ",
696
+ "oscilloscope",
697
+ "overskirt",
698
+ "oxcart",
699
+ "oxygen mask",
700
+ "packet",
701
+ "paddle",
702
+ "paddlewheel",
703
+ "padlock",
704
+ "paintbrush",
705
+ "pajama",
706
+ "palace",
707
+ "panpipe",
708
+ "paper towel",
709
+ "parachute",
710
+ "parallel bars",
711
+ "park bench",
712
+ "parking meter",
713
+ "passenger car",
714
+ "patio",
715
+ "pay-phone",
716
+ "pedestal",
717
+ "pencil box",
718
+ "pencil sharpener",
719
+ "perfume",
720
+ "Petri dish",
721
+ "photocopier",
722
+ "pick",
723
+ "pickelhaube",
724
+ "picket fence",
725
+ "pickup",
726
+ "pier",
727
+ "piggy bank",
728
+ "pill bottle",
729
+ "pillow",
730
+ "ping-pong ball",
731
+ "pinwheel",
732
+ "pirate",
733
+ "pitcher",
734
+ "plane",
735
+ "planetarium",
736
+ "plastic bag",
737
+ "plate rack",
738
+ "plow",
739
+ "plunger",
740
+ "Polaroid camera",
741
+ "pole",
742
+ "police van",
743
+ "poncho",
744
+ "pool table",
745
+ "pop bottle",
746
+ "pot",
747
+ "potter's wheel",
748
+ "power drill",
749
+ "prayer rug",
750
+ "printer",
751
+ "prison",
752
+ "projectile",
753
+ "projector",
754
+ "puck",
755
+ "punching bag",
756
+ "purse",
757
+ "quill",
758
+ "quilt",
759
+ "racer",
760
+ "racket",
761
+ "radiator",
762
+ "radio",
763
+ "radio telescope",
764
+ "rain barrel",
765
+ "recreational vehicle",
766
+ "reel",
767
+ "reflex camera",
768
+ "refrigerator",
769
+ "remote control",
770
+ "restaurant",
771
+ "revolver",
772
+ "rifle",
773
+ "rocking chair",
774
+ "rotisserie",
775
+ "rubber eraser",
776
+ "rugby ball",
777
+ "rule",
778
+ "running shoe",
779
+ "safe",
780
+ "safety pin",
781
+ "saltshaker",
782
+ "sandal",
783
+ "sarong",
784
+ "sax",
785
+ "scabbard",
786
+ "scale",
787
+ "school bus",
788
+ "schooner",
789
+ "scoreboard",
790
+ "screen",
791
+ "screw",
792
+ "screwdriver",
793
+ "seat belt",
794
+ "sewing machine",
795
+ "shield",
796
+ "shoe shop",
797
+ "shoji",
798
+ "shopping basket",
799
+ "shopping cart",
800
+ "shovel",
801
+ "shower cap",
802
+ "shower curtain",
803
+ "ski",
804
+ "ski mask",
805
+ "sleeping bag",
806
+ "slide rule",
807
+ "sliding door",
808
+ "slot",
809
+ "snorkel",
810
+ "snowmobile",
811
+ "snowplow",
812
+ "soap dispenser",
813
+ "soccer ball",
814
+ "sock",
815
+ "solar dish",
816
+ "sombrero",
817
+ "soup bowl",
818
+ "space bar",
819
+ "space heater",
820
+ "space shuttle",
821
+ "spatula",
822
+ "speedboat",
823
+ "spider web",
824
+ "spindle",
825
+ "sports car",
826
+ "spotlight",
827
+ "stage",
828
+ "steam locomotive",
829
+ "steel arch bridge",
830
+ "steel drum",
831
+ "stethoscope",
832
+ "stole",
833
+ "stone wall",
834
+ "stopwatch",
835
+ "stove",
836
+ "strainer",
837
+ "streetcar",
838
+ "stretcher",
839
+ "studio couch",
840
+ "stupa",
841
+ "submarine",
842
+ "suit",
843
+ "sundial",
844
+ "sunglass",
845
+ "sunglasses",
846
+ "sunscreen",
847
+ "suspension bridge",
848
+ "swab",
849
+ "sweatshirt",
850
+ "swimming trunks",
851
+ "swing",
852
+ "switch",
853
+ "syringe",
854
+ "table lamp",
855
+ "tank",
856
+ "tape player",
857
+ "teapot",
858
+ "teddy",
859
+ "television",
860
+ "tennis ball",
861
+ "thatch",
862
+ "theater curtain",
863
+ "thimble",
864
+ "thresher",
865
+ "throne",
866
+ "tile roof",
867
+ "toaster",
868
+ "tobacco shop",
869
+ "toilet seat",
870
+ "torch",
871
+ "totem pole",
872
+ "tow truck",
873
+ "toyshop",
874
+ "tractor",
875
+ "trailer truck",
876
+ "tray",
877
+ "trench coat",
878
+ "tricycle",
879
+ "trimaran",
880
+ "tripod",
881
+ "triumphal arch",
882
+ "trolleybus",
883
+ "trombone",
884
+ "tub",
885
+ "turnstile",
886
+ "typewriter keyboard",
887
+ "umbrella",
888
+ "unicycle",
889
+ "upright",
890
+ "vacuum",
891
+ "vase",
892
+ "vault",
893
+ "velvet",
894
+ "vending machine",
895
+ "vestment",
896
+ "viaduct",
897
+ "violin",
898
+ "volleyball",
899
+ "waffle iron",
900
+ "wall clock",
901
+ "wallet",
902
+ "wardrobe",
903
+ "warplane",
904
+ "washbasin",
905
+ "washer",
906
+ "water bottle",
907
+ "water jug",
908
+ "water tower",
909
+ "whiskey jug",
910
+ "whistle",
911
+ "wig",
912
+ "window screen",
913
+ "window shade",
914
+ "Windsor tie",
915
+ "wine bottle",
916
+ "wing",
917
+ "wok",
918
+ "wooden spoon",
919
+ "wool",
920
+ "worm fence",
921
+ "wreck",
922
+ "yawl",
923
+ "yurt",
924
+ "web site",
925
+ "comic book",
926
+ "crossword puzzle",
927
+ "street sign",
928
+ "traffic light",
929
+ "book jacket",
930
+ "menu",
931
+ "plate",
932
+ "guacamole",
933
+ "consomme",
934
+ "hot pot",
935
+ "trifle",
936
+ "ice cream",
937
+ "ice lolly",
938
+ "French loaf",
939
+ "bagel",
940
+ "pretzel",
941
+ "cheeseburger",
942
+ "hotdog",
943
+ "mashed potato",
944
+ "head cabbage",
945
+ "broccoli",
946
+ "cauliflower",
947
+ "zucchini",
948
+ "spaghetti squash",
949
+ "acorn squash",
950
+ "butternut squash",
951
+ "cucumber",
952
+ "artichoke",
953
+ "bell pepper",
954
+ "cardoon",
955
+ "mushroom",
956
+ "Granny Smith",
957
+ "strawberry",
958
+ "orange",
959
+ "lemon",
960
+ "fig",
961
+ "pineapple",
962
+ "banana",
963
+ "jackfruit",
964
+ "custard apple",
965
+ "pomegranate",
966
+ "hay",
967
+ "carbonara",
968
+ "chocolate sauce",
969
+ "dough",
970
+ "meat loaf",
971
+ "pizza",
972
+ "potpie",
973
+ "burrito",
974
+ "red wine",
975
+ "espresso",
976
+ "cup",
977
+ "eggnog",
978
+ "alp",
979
+ "bubble",
980
+ "cliff",
981
+ "coral reef",
982
+ "geyser",
983
+ "lakeside",
984
+ "promontory",
985
+ "sandbar",
986
+ "seashore",
987
+ "valley",
988
+ "volcano",
989
+ "ballplayer",
990
+ "groom",
991
+ "scuba diver",
992
+ "rapeseed",
993
+ "daisy",
994
+ "yellow lady's slipper",
995
+ "corn",
996
+ "acorn",
997
+ "hip",
998
+ "buckeye",
999
+ "coral fungus",
1000
+ "agaric",
1001
+ "gyromitra",
1002
+ "stinkhorn",
1003
+ "earthstar",
1004
+ "hen-of-the-woods",
1005
+ "bolete",
1006
+ "ear",
1007
+ "toilet tissue",
1008
+ ]
1009
+
1010
+ # To be replaced with torchvision.datasets.info("coco").categories
1011
+ _COCO_CATEGORIES = [
1012
+ "__background__",
1013
+ "person",
1014
+ "bicycle",
1015
+ "car",
1016
+ "motorcycle",
1017
+ "airplane",
1018
+ "bus",
1019
+ "train",
1020
+ "truck",
1021
+ "boat",
1022
+ "traffic light",
1023
+ "fire hydrant",
1024
+ "N/A",
1025
+ "stop sign",
1026
+ "parking meter",
1027
+ "bench",
1028
+ "bird",
1029
+ "cat",
1030
+ "dog",
1031
+ "horse",
1032
+ "sheep",
1033
+ "cow",
1034
+ "elephant",
1035
+ "bear",
1036
+ "zebra",
1037
+ "giraffe",
1038
+ "N/A",
1039
+ "backpack",
1040
+ "umbrella",
1041
+ "N/A",
1042
+ "N/A",
1043
+ "handbag",
1044
+ "tie",
1045
+ "suitcase",
1046
+ "frisbee",
1047
+ "skis",
1048
+ "snowboard",
1049
+ "sports ball",
1050
+ "kite",
1051
+ "baseball bat",
1052
+ "baseball glove",
1053
+ "skateboard",
1054
+ "surfboard",
1055
+ "tennis racket",
1056
+ "bottle",
1057
+ "N/A",
1058
+ "wine glass",
1059
+ "cup",
1060
+ "fork",
1061
+ "knife",
1062
+ "spoon",
1063
+ "bowl",
1064
+ "banana",
1065
+ "apple",
1066
+ "sandwich",
1067
+ "orange",
1068
+ "broccoli",
1069
+ "carrot",
1070
+ "hot dog",
1071
+ "pizza",
1072
+ "donut",
1073
+ "cake",
1074
+ "chair",
1075
+ "couch",
1076
+ "potted plant",
1077
+ "bed",
1078
+ "N/A",
1079
+ "dining table",
1080
+ "N/A",
1081
+ "N/A",
1082
+ "toilet",
1083
+ "N/A",
1084
+ "tv",
1085
+ "laptop",
1086
+ "mouse",
1087
+ "remote",
1088
+ "keyboard",
1089
+ "cell phone",
1090
+ "microwave",
1091
+ "oven",
1092
+ "toaster",
1093
+ "sink",
1094
+ "refrigerator",
1095
+ "N/A",
1096
+ "book",
1097
+ "clock",
1098
+ "vase",
1099
+ "scissors",
1100
+ "teddy bear",
1101
+ "hair drier",
1102
+ "toothbrush",
1103
+ ]
1104
+
1105
+ # To be replaced with torchvision.datasets.info("coco_kp")
1106
+ _COCO_PERSON_CATEGORIES = ["no person", "person"]
1107
+ _COCO_PERSON_KEYPOINT_NAMES = [
1108
+ "nose",
1109
+ "left_eye",
1110
+ "right_eye",
1111
+ "left_ear",
1112
+ "right_ear",
1113
+ "left_shoulder",
1114
+ "right_shoulder",
1115
+ "left_elbow",
1116
+ "right_elbow",
1117
+ "left_wrist",
1118
+ "right_wrist",
1119
+ "left_hip",
1120
+ "right_hip",
1121
+ "left_knee",
1122
+ "right_knee",
1123
+ "left_ankle",
1124
+ "right_ankle",
1125
+ ]
1126
+
1127
+ # To be replaced with torchvision.datasets.info("voc").categories
1128
+ _VOC_CATEGORIES = [
1129
+ "__background__",
1130
+ "aeroplane",
1131
+ "bicycle",
1132
+ "bird",
1133
+ "boat",
1134
+ "bottle",
1135
+ "bus",
1136
+ "car",
1137
+ "cat",
1138
+ "chair",
1139
+ "cow",
1140
+ "diningtable",
1141
+ "dog",
1142
+ "horse",
1143
+ "motorbike",
1144
+ "person",
1145
+ "pottedplant",
1146
+ "sheep",
1147
+ "sofa",
1148
+ "train",
1149
+ "tvmonitor",
1150
+ ]
1151
+
1152
+ # To be replaced with torchvision.datasets.info("kinetics400").categories
1153
+ _KINETICS400_CATEGORIES = [
1154
+ "abseiling",
1155
+ "air drumming",
1156
+ "answering questions",
1157
+ "applauding",
1158
+ "applying cream",
1159
+ "archery",
1160
+ "arm wrestling",
1161
+ "arranging flowers",
1162
+ "assembling computer",
1163
+ "auctioning",
1164
+ "baby waking up",
1165
+ "baking cookies",
1166
+ "balloon blowing",
1167
+ "bandaging",
1168
+ "barbequing",
1169
+ "bartending",
1170
+ "beatboxing",
1171
+ "bee keeping",
1172
+ "belly dancing",
1173
+ "bench pressing",
1174
+ "bending back",
1175
+ "bending metal",
1176
+ "biking through snow",
1177
+ "blasting sand",
1178
+ "blowing glass",
1179
+ "blowing leaves",
1180
+ "blowing nose",
1181
+ "blowing out candles",
1182
+ "bobsledding",
1183
+ "bookbinding",
1184
+ "bouncing on trampoline",
1185
+ "bowling",
1186
+ "braiding hair",
1187
+ "breading or breadcrumbing",
1188
+ "breakdancing",
1189
+ "brush painting",
1190
+ "brushing hair",
1191
+ "brushing teeth",
1192
+ "building cabinet",
1193
+ "building shed",
1194
+ "bungee jumping",
1195
+ "busking",
1196
+ "canoeing or kayaking",
1197
+ "capoeira",
1198
+ "carrying baby",
1199
+ "cartwheeling",
1200
+ "carving pumpkin",
1201
+ "catching fish",
1202
+ "catching or throwing baseball",
1203
+ "catching or throwing frisbee",
1204
+ "catching or throwing softball",
1205
+ "celebrating",
1206
+ "changing oil",
1207
+ "changing wheel",
1208
+ "checking tires",
1209
+ "cheerleading",
1210
+ "chopping wood",
1211
+ "clapping",
1212
+ "clay pottery making",
1213
+ "clean and jerk",
1214
+ "cleaning floor",
1215
+ "cleaning gutters",
1216
+ "cleaning pool",
1217
+ "cleaning shoes",
1218
+ "cleaning toilet",
1219
+ "cleaning windows",
1220
+ "climbing a rope",
1221
+ "climbing ladder",
1222
+ "climbing tree",
1223
+ "contact juggling",
1224
+ "cooking chicken",
1225
+ "cooking egg",
1226
+ "cooking on campfire",
1227
+ "cooking sausages",
1228
+ "counting money",
1229
+ "country line dancing",
1230
+ "cracking neck",
1231
+ "crawling baby",
1232
+ "crossing river",
1233
+ "crying",
1234
+ "curling hair",
1235
+ "cutting nails",
1236
+ "cutting pineapple",
1237
+ "cutting watermelon",
1238
+ "dancing ballet",
1239
+ "dancing charleston",
1240
+ "dancing gangnam style",
1241
+ "dancing macarena",
1242
+ "deadlifting",
1243
+ "decorating the christmas tree",
1244
+ "digging",
1245
+ "dining",
1246
+ "disc golfing",
1247
+ "diving cliff",
1248
+ "dodgeball",
1249
+ "doing aerobics",
1250
+ "doing laundry",
1251
+ "doing nails",
1252
+ "drawing",
1253
+ "dribbling basketball",
1254
+ "drinking",
1255
+ "drinking beer",
1256
+ "drinking shots",
1257
+ "driving car",
1258
+ "driving tractor",
1259
+ "drop kicking",
1260
+ "drumming fingers",
1261
+ "dunking basketball",
1262
+ "dying hair",
1263
+ "eating burger",
1264
+ "eating cake",
1265
+ "eating carrots",
1266
+ "eating chips",
1267
+ "eating doughnuts",
1268
+ "eating hotdog",
1269
+ "eating ice cream",
1270
+ "eating spaghetti",
1271
+ "eating watermelon",
1272
+ "egg hunting",
1273
+ "exercising arm",
1274
+ "exercising with an exercise ball",
1275
+ "extinguishing fire",
1276
+ "faceplanting",
1277
+ "feeding birds",
1278
+ "feeding fish",
1279
+ "feeding goats",
1280
+ "filling eyebrows",
1281
+ "finger snapping",
1282
+ "fixing hair",
1283
+ "flipping pancake",
1284
+ "flying kite",
1285
+ "folding clothes",
1286
+ "folding napkins",
1287
+ "folding paper",
1288
+ "front raises",
1289
+ "frying vegetables",
1290
+ "garbage collecting",
1291
+ "gargling",
1292
+ "getting a haircut",
1293
+ "getting a tattoo",
1294
+ "giving or receiving award",
1295
+ "golf chipping",
1296
+ "golf driving",
1297
+ "golf putting",
1298
+ "grinding meat",
1299
+ "grooming dog",
1300
+ "grooming horse",
1301
+ "gymnastics tumbling",
1302
+ "hammer throw",
1303
+ "headbanging",
1304
+ "headbutting",
1305
+ "high jump",
1306
+ "high kick",
1307
+ "hitting baseball",
1308
+ "hockey stop",
1309
+ "holding snake",
1310
+ "hopscotch",
1311
+ "hoverboarding",
1312
+ "hugging",
1313
+ "hula hooping",
1314
+ "hurdling",
1315
+ "hurling (sport)",
1316
+ "ice climbing",
1317
+ "ice fishing",
1318
+ "ice skating",
1319
+ "ironing",
1320
+ "javelin throw",
1321
+ "jetskiing",
1322
+ "jogging",
1323
+ "juggling balls",
1324
+ "juggling fire",
1325
+ "juggling soccer ball",
1326
+ "jumping into pool",
1327
+ "jumpstyle dancing",
1328
+ "kicking field goal",
1329
+ "kicking soccer ball",
1330
+ "kissing",
1331
+ "kitesurfing",
1332
+ "knitting",
1333
+ "krumping",
1334
+ "laughing",
1335
+ "laying bricks",
1336
+ "long jump",
1337
+ "lunge",
1338
+ "making a cake",
1339
+ "making a sandwich",
1340
+ "making bed",
1341
+ "making jewelry",
1342
+ "making pizza",
1343
+ "making snowman",
1344
+ "making sushi",
1345
+ "making tea",
1346
+ "marching",
1347
+ "massaging back",
1348
+ "massaging feet",
1349
+ "massaging legs",
1350
+ "massaging person's head",
1351
+ "milking cow",
1352
+ "mopping floor",
1353
+ "motorcycling",
1354
+ "moving furniture",
1355
+ "mowing lawn",
1356
+ "news anchoring",
1357
+ "opening bottle",
1358
+ "opening present",
1359
+ "paragliding",
1360
+ "parasailing",
1361
+ "parkour",
1362
+ "passing American football (in game)",
1363
+ "passing American football (not in game)",
1364
+ "peeling apples",
1365
+ "peeling potatoes",
1366
+ "petting animal (not cat)",
1367
+ "petting cat",
1368
+ "picking fruit",
1369
+ "planting trees",
1370
+ "plastering",
1371
+ "playing accordion",
1372
+ "playing badminton",
1373
+ "playing bagpipes",
1374
+ "playing basketball",
1375
+ "playing bass guitar",
1376
+ "playing cards",
1377
+ "playing cello",
1378
+ "playing chess",
1379
+ "playing clarinet",
1380
+ "playing controller",
1381
+ "playing cricket",
1382
+ "playing cymbals",
1383
+ "playing didgeridoo",
1384
+ "playing drums",
1385
+ "playing flute",
1386
+ "playing guitar",
1387
+ "playing harmonica",
1388
+ "playing harp",
1389
+ "playing ice hockey",
1390
+ "playing keyboard",
1391
+ "playing kickball",
1392
+ "playing monopoly",
1393
+ "playing organ",
1394
+ "playing paintball",
1395
+ "playing piano",
1396
+ "playing poker",
1397
+ "playing recorder",
1398
+ "playing saxophone",
1399
+ "playing squash or racquetball",
1400
+ "playing tennis",
1401
+ "playing trombone",
1402
+ "playing trumpet",
1403
+ "playing ukulele",
1404
+ "playing violin",
1405
+ "playing volleyball",
1406
+ "playing xylophone",
1407
+ "pole vault",
1408
+ "presenting weather forecast",
1409
+ "pull ups",
1410
+ "pumping fist",
1411
+ "pumping gas",
1412
+ "punching bag",
1413
+ "punching person (boxing)",
1414
+ "push up",
1415
+ "pushing car",
1416
+ "pushing cart",
1417
+ "pushing wheelchair",
1418
+ "reading book",
1419
+ "reading newspaper",
1420
+ "recording music",
1421
+ "riding a bike",
1422
+ "riding camel",
1423
+ "riding elephant",
1424
+ "riding mechanical bull",
1425
+ "riding mountain bike",
1426
+ "riding mule",
1427
+ "riding or walking with horse",
1428
+ "riding scooter",
1429
+ "riding unicycle",
1430
+ "ripping paper",
1431
+ "robot dancing",
1432
+ "rock climbing",
1433
+ "rock scissors paper",
1434
+ "roller skating",
1435
+ "running on treadmill",
1436
+ "sailing",
1437
+ "salsa dancing",
1438
+ "sanding floor",
1439
+ "scrambling eggs",
1440
+ "scuba diving",
1441
+ "setting table",
1442
+ "shaking hands",
1443
+ "shaking head",
1444
+ "sharpening knives",
1445
+ "sharpening pencil",
1446
+ "shaving head",
1447
+ "shaving legs",
1448
+ "shearing sheep",
1449
+ "shining shoes",
1450
+ "shooting basketball",
1451
+ "shooting goal (soccer)",
1452
+ "shot put",
1453
+ "shoveling snow",
1454
+ "shredding paper",
1455
+ "shuffling cards",
1456
+ "side kick",
1457
+ "sign language interpreting",
1458
+ "singing",
1459
+ "situp",
1460
+ "skateboarding",
1461
+ "ski jumping",
1462
+ "skiing (not slalom or crosscountry)",
1463
+ "skiing crosscountry",
1464
+ "skiing slalom",
1465
+ "skipping rope",
1466
+ "skydiving",
1467
+ "slacklining",
1468
+ "slapping",
1469
+ "sled dog racing",
1470
+ "smoking",
1471
+ "smoking hookah",
1472
+ "snatch weight lifting",
1473
+ "sneezing",
1474
+ "sniffing",
1475
+ "snorkeling",
1476
+ "snowboarding",
1477
+ "snowkiting",
1478
+ "snowmobiling",
1479
+ "somersaulting",
1480
+ "spinning poi",
1481
+ "spray painting",
1482
+ "spraying",
1483
+ "springboard diving",
1484
+ "squat",
1485
+ "sticking tongue out",
1486
+ "stomping grapes",
1487
+ "stretching arm",
1488
+ "stretching leg",
1489
+ "strumming guitar",
1490
+ "surfing crowd",
1491
+ "surfing water",
1492
+ "sweeping floor",
1493
+ "swimming backstroke",
1494
+ "swimming breast stroke",
1495
+ "swimming butterfly stroke",
1496
+ "swing dancing",
1497
+ "swinging legs",
1498
+ "swinging on something",
1499
+ "sword fighting",
1500
+ "tai chi",
1501
+ "taking a shower",
1502
+ "tango dancing",
1503
+ "tap dancing",
1504
+ "tapping guitar",
1505
+ "tapping pen",
1506
+ "tasting beer",
1507
+ "tasting food",
1508
+ "testifying",
1509
+ "texting",
1510
+ "throwing axe",
1511
+ "throwing ball",
1512
+ "throwing discus",
1513
+ "tickling",
1514
+ "tobogganing",
1515
+ "tossing coin",
1516
+ "tossing salad",
1517
+ "training dog",
1518
+ "trapezing",
1519
+ "trimming or shaving beard",
1520
+ "trimming trees",
1521
+ "triple jump",
1522
+ "tying bow tie",
1523
+ "tying knot (not on a tie)",
1524
+ "tying tie",
1525
+ "unboxing",
1526
+ "unloading truck",
1527
+ "using computer",
1528
+ "using remote controller (not gaming)",
1529
+ "using segway",
1530
+ "vault",
1531
+ "waiting in line",
1532
+ "walking the dog",
1533
+ "washing dishes",
1534
+ "washing feet",
1535
+ "washing hair",
1536
+ "washing hands",
1537
+ "water skiing",
1538
+ "water sliding",
1539
+ "watering plants",
1540
+ "waxing back",
1541
+ "waxing chest",
1542
+ "waxing eyebrows",
1543
+ "waxing legs",
1544
+ "weaving basket",
1545
+ "welding",
1546
+ "whistling",
1547
+ "windsurfing",
1548
+ "wrapping present",
1549
+ "wrestling",
1550
+ "writing",
1551
+ "yawning",
1552
+ "yoga",
1553
+ "zumba",
1554
+ ]
vllm/lib/python3.10/site-packages/torchvision/models/_utils.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import inspect
3
+ import warnings
4
+ from collections import OrderedDict
5
+ from typing import Any, Callable, Dict, Optional, Tuple, TypeVar, Union
6
+
7
+ from torch import nn
8
+
9
+ from .._utils import sequence_to_str
10
+ from ._api import WeightsEnum
11
+
12
+
13
+ class IntermediateLayerGetter(nn.ModuleDict):
14
+ """
15
+ Module wrapper that returns intermediate layers from a model
16
+
17
+ It has a strong assumption that the modules have been registered
18
+ into the model in the same order as they are used.
19
+ This means that one should **not** reuse the same nn.Module
20
+ twice in the forward if you want this to work.
21
+
22
+ Additionally, it is only able to query submodules that are directly
23
+ assigned to the model. So if `model` is passed, `model.feature1` can
24
+ be returned, but not `model.feature1.layer2`.
25
+
26
+ Args:
27
+ model (nn.Module): model on which we will extract the features
28
+ return_layers (Dict[name, new_name]): a dict containing the names
29
+ of the modules for which the activations will be returned as
30
+ the key of the dict, and the value of the dict is the name
31
+ of the returned activation (which the user can specify).
32
+
33
+ Examples::
34
+
35
+ >>> m = torchvision.models.resnet18(weights=ResNet18_Weights.DEFAULT)
36
+ >>> # extract layer1 and layer3, giving as names `feat1` and feat2`
37
+ >>> new_m = torchvision.models._utils.IntermediateLayerGetter(m,
38
+ >>> {'layer1': 'feat1', 'layer3': 'feat2'})
39
+ >>> out = new_m(torch.rand(1, 3, 224, 224))
40
+ >>> print([(k, v.shape) for k, v in out.items()])
41
+ >>> [('feat1', torch.Size([1, 64, 56, 56])),
42
+ >>> ('feat2', torch.Size([1, 256, 14, 14]))]
43
+ """
44
+
45
+ _version = 2
46
+ __annotations__ = {
47
+ "return_layers": Dict[str, str],
48
+ }
49
+
50
+ def __init__(self, model: nn.Module, return_layers: Dict[str, str]) -> None:
51
+ if not set(return_layers).issubset([name for name, _ in model.named_children()]):
52
+ raise ValueError("return_layers are not present in model")
53
+ orig_return_layers = return_layers
54
+ return_layers = {str(k): str(v) for k, v in return_layers.items()}
55
+ layers = OrderedDict()
56
+ for name, module in model.named_children():
57
+ layers[name] = module
58
+ if name in return_layers:
59
+ del return_layers[name]
60
+ if not return_layers:
61
+ break
62
+
63
+ super().__init__(layers)
64
+ self.return_layers = orig_return_layers
65
+
66
+ def forward(self, x):
67
+ out = OrderedDict()
68
+ for name, module in self.items():
69
+ x = module(x)
70
+ if name in self.return_layers:
71
+ out_name = self.return_layers[name]
72
+ out[out_name] = x
73
+ return out
74
+
75
+
76
+ def _make_divisible(v: float, divisor: int, min_value: Optional[int] = None) -> int:
77
+ """
78
+ This function is taken from the original tf repo.
79
+ It ensures that all layers have a channel number that is divisible by 8
80
+ It can be seen here:
81
+ https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
82
+ """
83
+ if min_value is None:
84
+ min_value = divisor
85
+ new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
86
+ # Make sure that round down does not go down by more than 10%.
87
+ if new_v < 0.9 * v:
88
+ new_v += divisor
89
+ return new_v
90
+
91
+
92
+ D = TypeVar("D")
93
+
94
+
95
+ def kwonly_to_pos_or_kw(fn: Callable[..., D]) -> Callable[..., D]:
96
+ """Decorates a function that uses keyword only parameters to also allow them being passed as positionals.
97
+
98
+ For example, consider the use case of changing the signature of ``old_fn`` into the one from ``new_fn``:
99
+
100
+ .. code::
101
+
102
+ def old_fn(foo, bar, baz=None):
103
+ ...
104
+
105
+ def new_fn(foo, *, bar, baz=None):
106
+ ...
107
+
108
+ Calling ``old_fn("foo", "bar, "baz")`` was valid, but the same call is no longer valid with ``new_fn``. To keep BC
109
+ and at the same time warn the user of the deprecation, this decorator can be used:
110
+
111
+ .. code::
112
+
113
+ @kwonly_to_pos_or_kw
114
+ def new_fn(foo, *, bar, baz=None):
115
+ ...
116
+
117
+ new_fn("foo", "bar, "baz")
118
+ """
119
+ params = inspect.signature(fn).parameters
120
+
121
+ try:
122
+ keyword_only_start_idx = next(
123
+ idx for idx, param in enumerate(params.values()) if param.kind == param.KEYWORD_ONLY
124
+ )
125
+ except StopIteration:
126
+ raise TypeError(f"Found no keyword-only parameter on function '{fn.__name__}'") from None
127
+
128
+ keyword_only_params = tuple(inspect.signature(fn).parameters)[keyword_only_start_idx:]
129
+
130
+ @functools.wraps(fn)
131
+ def wrapper(*args: Any, **kwargs: Any) -> D:
132
+ args, keyword_only_args = args[:keyword_only_start_idx], args[keyword_only_start_idx:]
133
+ if keyword_only_args:
134
+ keyword_only_kwargs = dict(zip(keyword_only_params, keyword_only_args))
135
+ warnings.warn(
136
+ f"Using {sequence_to_str(tuple(keyword_only_kwargs.keys()), separate_last='and ')} as positional "
137
+ f"parameter(s) is deprecated since 0.13 and may be removed in the future. Please use keyword parameter(s) "
138
+ f"instead."
139
+ )
140
+ kwargs.update(keyword_only_kwargs)
141
+
142
+ return fn(*args, **kwargs)
143
+
144
+ return wrapper
145
+
146
+
147
+ W = TypeVar("W", bound=WeightsEnum)
148
+ M = TypeVar("M", bound=nn.Module)
149
+ V = TypeVar("V")
150
+
151
+
152
+ def handle_legacy_interface(**weights: Tuple[str, Union[Optional[W], Callable[[Dict[str, Any]], Optional[W]]]]):
153
+ """Decorates a model builder with the new interface to make it compatible with the old.
154
+
155
+ In particular this handles two things:
156
+
157
+ 1. Allows positional parameters again, but emits a deprecation warning in case they are used. See
158
+ :func:`torchvision.prototype.utils._internal.kwonly_to_pos_or_kw` for details.
159
+ 2. Handles the default value change from ``pretrained=False`` to ``weights=None`` and ``pretrained=True`` to
160
+ ``weights=Weights`` and emits a deprecation warning with instructions for the new interface.
161
+
162
+ Args:
163
+ **weights (Tuple[str, Union[Optional[W], Callable[[Dict[str, Any]], Optional[W]]]]): Deprecated parameter
164
+ name and default value for the legacy ``pretrained=True``. The default value can be a callable in which
165
+ case it will be called with a dictionary of the keyword arguments. The only key that is guaranteed to be in
166
+ the dictionary is the deprecated parameter name passed as first element in the tuple. All other parameters
167
+ should be accessed with :meth:`~dict.get`.
168
+ """
169
+
170
+ def outer_wrapper(builder: Callable[..., M]) -> Callable[..., M]:
171
+ @kwonly_to_pos_or_kw
172
+ @functools.wraps(builder)
173
+ def inner_wrapper(*args: Any, **kwargs: Any) -> M:
174
+ for weights_param, (pretrained_param, default) in weights.items(): # type: ignore[union-attr]
175
+ # If neither the weights nor the pretrained parameter as passed, or the weights argument already use
176
+ # the new style arguments, there is nothing to do. Note that we cannot use `None` as sentinel for the
177
+ # weight argument, since it is a valid value.
178
+ sentinel = object()
179
+ weights_arg = kwargs.get(weights_param, sentinel)
180
+ if (
181
+ (weights_param not in kwargs and pretrained_param not in kwargs)
182
+ or isinstance(weights_arg, WeightsEnum)
183
+ or (isinstance(weights_arg, str) and weights_arg != "legacy")
184
+ or weights_arg is None
185
+ ):
186
+ continue
187
+
188
+ # If the pretrained parameter was passed as positional argument, it is now mapped to
189
+ # `kwargs[weights_param]`. This happens because the @kwonly_to_pos_or_kw decorator uses the current
190
+ # signature to infer the names of positionally passed arguments and thus has no knowledge that there
191
+ # used to be a pretrained parameter.
192
+ pretrained_positional = weights_arg is not sentinel
193
+ if pretrained_positional:
194
+ # We put the pretrained argument under its legacy name in the keyword argument dictionary to have
195
+ # unified access to the value if the default value is a callable.
196
+ kwargs[pretrained_param] = pretrained_arg = kwargs.pop(weights_param)
197
+ else:
198
+ pretrained_arg = kwargs[pretrained_param]
199
+
200
+ if pretrained_arg:
201
+ default_weights_arg = default(kwargs) if callable(default) else default
202
+ if not isinstance(default_weights_arg, WeightsEnum):
203
+ raise ValueError(f"No weights available for model {builder.__name__}")
204
+ else:
205
+ default_weights_arg = None
206
+
207
+ if not pretrained_positional:
208
+ warnings.warn(
209
+ f"The parameter '{pretrained_param}' is deprecated since 0.13 and may be removed in the future, "
210
+ f"please use '{weights_param}' instead."
211
+ )
212
+
213
+ msg = (
214
+ f"Arguments other than a weight enum or `None` for '{weights_param}' are deprecated since 0.13 and "
215
+ f"may be removed in the future. "
216
+ f"The current behavior is equivalent to passing `{weights_param}={default_weights_arg}`."
217
+ )
218
+ if pretrained_arg:
219
+ msg = (
220
+ f"{msg} You can also use `{weights_param}={type(default_weights_arg).__name__}.DEFAULT` "
221
+ f"to get the most up-to-date weights."
222
+ )
223
+ warnings.warn(msg)
224
+
225
+ del kwargs[pretrained_param]
226
+ kwargs[weights_param] = default_weights_arg
227
+
228
+ return builder(*args, **kwargs)
229
+
230
+ return inner_wrapper
231
+
232
+ return outer_wrapper
233
+
234
+
235
+ def _ovewrite_named_param(kwargs: Dict[str, Any], param: str, new_value: V) -> None:
236
+ if param in kwargs:
237
+ if kwargs[param] != new_value:
238
+ raise ValueError(f"The parameter '{param}' expected value {new_value} but got {kwargs[param]} instead.")
239
+ else:
240
+ kwargs[param] = new_value
241
+
242
+
243
+ def _ovewrite_value_param(param: str, actual: Optional[V], expected: V) -> V:
244
+ if actual is not None:
245
+ if actual != expected:
246
+ raise ValueError(f"The parameter '{param}' expected value {expected} but got {actual} instead.")
247
+ return expected
248
+
249
+
250
+ class _ModelURLs(dict):
251
+ def __getitem__(self, item):
252
+ warnings.warn(
253
+ "Accessing the model URLs via the internal dictionary of the module is deprecated since 0.13 and may "
254
+ "be removed in the future. Please access them via the appropriate Weights Enum instead."
255
+ )
256
+ return super().__getitem__(item)
vllm/lib/python3.10/site-packages/torchvision/models/alexnet.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Any, Optional
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+
7
+ from ..transforms._presets import ImageClassification
8
+ from ..utils import _log_api_usage_once
9
+ from ._api import register_model, Weights, WeightsEnum
10
+ from ._meta import _IMAGENET_CATEGORIES
11
+ from ._utils import _ovewrite_named_param, handle_legacy_interface
12
+
13
+
14
+ __all__ = ["AlexNet", "AlexNet_Weights", "alexnet"]
15
+
16
+
17
+ class AlexNet(nn.Module):
18
+ def __init__(self, num_classes: int = 1000, dropout: float = 0.5) -> None:
19
+ super().__init__()
20
+ _log_api_usage_once(self)
21
+ self.features = nn.Sequential(
22
+ nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
23
+ nn.ReLU(inplace=True),
24
+ nn.MaxPool2d(kernel_size=3, stride=2),
25
+ nn.Conv2d(64, 192, kernel_size=5, padding=2),
26
+ nn.ReLU(inplace=True),
27
+ nn.MaxPool2d(kernel_size=3, stride=2),
28
+ nn.Conv2d(192, 384, kernel_size=3, padding=1),
29
+ nn.ReLU(inplace=True),
30
+ nn.Conv2d(384, 256, kernel_size=3, padding=1),
31
+ nn.ReLU(inplace=True),
32
+ nn.Conv2d(256, 256, kernel_size=3, padding=1),
33
+ nn.ReLU(inplace=True),
34
+ nn.MaxPool2d(kernel_size=3, stride=2),
35
+ )
36
+ self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
37
+ self.classifier = nn.Sequential(
38
+ nn.Dropout(p=dropout),
39
+ nn.Linear(256 * 6 * 6, 4096),
40
+ nn.ReLU(inplace=True),
41
+ nn.Dropout(p=dropout),
42
+ nn.Linear(4096, 4096),
43
+ nn.ReLU(inplace=True),
44
+ nn.Linear(4096, num_classes),
45
+ )
46
+
47
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
48
+ x = self.features(x)
49
+ x = self.avgpool(x)
50
+ x = torch.flatten(x, 1)
51
+ x = self.classifier(x)
52
+ return x
53
+
54
+
55
+ class AlexNet_Weights(WeightsEnum):
56
+ IMAGENET1K_V1 = Weights(
57
+ url="https://download.pytorch.org/models/alexnet-owt-7be5be79.pth",
58
+ transforms=partial(ImageClassification, crop_size=224),
59
+ meta={
60
+ "num_params": 61100840,
61
+ "min_size": (63, 63),
62
+ "categories": _IMAGENET_CATEGORIES,
63
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#alexnet-and-vgg",
64
+ "_metrics": {
65
+ "ImageNet-1K": {
66
+ "acc@1": 56.522,
67
+ "acc@5": 79.066,
68
+ }
69
+ },
70
+ "_ops": 0.714,
71
+ "_file_size": 233.087,
72
+ "_docs": """
73
+ These weights reproduce closely the results of the paper using a simplified training recipe.
74
+ """,
75
+ },
76
+ )
77
+ DEFAULT = IMAGENET1K_V1
78
+
79
+
80
+ @register_model()
81
+ @handle_legacy_interface(weights=("pretrained", AlexNet_Weights.IMAGENET1K_V1))
82
+ def alexnet(*, weights: Optional[AlexNet_Weights] = None, progress: bool = True, **kwargs: Any) -> AlexNet:
83
+ """AlexNet model architecture from `One weird trick for parallelizing convolutional neural networks <https://arxiv.org/abs/1404.5997>`__.
84
+
85
+ .. note::
86
+ AlexNet was originally introduced in the `ImageNet Classification with
87
+ Deep Convolutional Neural Networks
88
+ <https://papers.nips.cc/paper/2012/hash/c399862d3b9d6b76c8436e924a68c45b-Abstract.html>`__
89
+ paper. Our implementation is based instead on the "One weird trick"
90
+ paper above.
91
+
92
+ Args:
93
+ weights (:class:`~torchvision.models.AlexNet_Weights`, optional): The
94
+ pretrained weights to use. See
95
+ :class:`~torchvision.models.AlexNet_Weights` below for
96
+ more details, and possible values. By default, no pre-trained
97
+ weights are used.
98
+ progress (bool, optional): If True, displays a progress bar of the
99
+ download to stderr. Default is True.
100
+ **kwargs: parameters passed to the ``torchvision.models.squeezenet.AlexNet``
101
+ base class. Please refer to the `source code
102
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/alexnet.py>`_
103
+ for more details about this class.
104
+
105
+ .. autoclass:: torchvision.models.AlexNet_Weights
106
+ :members:
107
+ """
108
+
109
+ weights = AlexNet_Weights.verify(weights)
110
+
111
+ if weights is not None:
112
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
113
+
114
+ model = AlexNet(**kwargs)
115
+
116
+ if weights is not None:
117
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
118
+
119
+ return model
vllm/lib/python3.10/site-packages/torchvision/models/convnext.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Any, Callable, List, Optional, Sequence
3
+
4
+ import torch
5
+ from torch import nn, Tensor
6
+ from torch.nn import functional as F
7
+
8
+ from ..ops.misc import Conv2dNormActivation, Permute
9
+ from ..ops.stochastic_depth import StochasticDepth
10
+ from ..transforms._presets import ImageClassification
11
+ from ..utils import _log_api_usage_once
12
+ from ._api import register_model, Weights, WeightsEnum
13
+ from ._meta import _IMAGENET_CATEGORIES
14
+ from ._utils import _ovewrite_named_param, handle_legacy_interface
15
+
16
+
17
+ __all__ = [
18
+ "ConvNeXt",
19
+ "ConvNeXt_Tiny_Weights",
20
+ "ConvNeXt_Small_Weights",
21
+ "ConvNeXt_Base_Weights",
22
+ "ConvNeXt_Large_Weights",
23
+ "convnext_tiny",
24
+ "convnext_small",
25
+ "convnext_base",
26
+ "convnext_large",
27
+ ]
28
+
29
+
30
+ class LayerNorm2d(nn.LayerNorm):
31
+ def forward(self, x: Tensor) -> Tensor:
32
+ x = x.permute(0, 2, 3, 1)
33
+ x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
34
+ x = x.permute(0, 3, 1, 2)
35
+ return x
36
+
37
+
38
+ class CNBlock(nn.Module):
39
+ def __init__(
40
+ self,
41
+ dim,
42
+ layer_scale: float,
43
+ stochastic_depth_prob: float,
44
+ norm_layer: Optional[Callable[..., nn.Module]] = None,
45
+ ) -> None:
46
+ super().__init__()
47
+ if norm_layer is None:
48
+ norm_layer = partial(nn.LayerNorm, eps=1e-6)
49
+
50
+ self.block = nn.Sequential(
51
+ nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim, bias=True),
52
+ Permute([0, 2, 3, 1]),
53
+ norm_layer(dim),
54
+ nn.Linear(in_features=dim, out_features=4 * dim, bias=True),
55
+ nn.GELU(),
56
+ nn.Linear(in_features=4 * dim, out_features=dim, bias=True),
57
+ Permute([0, 3, 1, 2]),
58
+ )
59
+ self.layer_scale = nn.Parameter(torch.ones(dim, 1, 1) * layer_scale)
60
+ self.stochastic_depth = StochasticDepth(stochastic_depth_prob, "row")
61
+
62
+ def forward(self, input: Tensor) -> Tensor:
63
+ result = self.layer_scale * self.block(input)
64
+ result = self.stochastic_depth(result)
65
+ result += input
66
+ return result
67
+
68
+
69
+ class CNBlockConfig:
70
+ # Stores information listed at Section 3 of the ConvNeXt paper
71
+ def __init__(
72
+ self,
73
+ input_channels: int,
74
+ out_channels: Optional[int],
75
+ num_layers: int,
76
+ ) -> None:
77
+ self.input_channels = input_channels
78
+ self.out_channels = out_channels
79
+ self.num_layers = num_layers
80
+
81
+ def __repr__(self) -> str:
82
+ s = self.__class__.__name__ + "("
83
+ s += "input_channels={input_channels}"
84
+ s += ", out_channels={out_channels}"
85
+ s += ", num_layers={num_layers}"
86
+ s += ")"
87
+ return s.format(**self.__dict__)
88
+
89
+
90
+ class ConvNeXt(nn.Module):
91
+ def __init__(
92
+ self,
93
+ block_setting: List[CNBlockConfig],
94
+ stochastic_depth_prob: float = 0.0,
95
+ layer_scale: float = 1e-6,
96
+ num_classes: int = 1000,
97
+ block: Optional[Callable[..., nn.Module]] = None,
98
+ norm_layer: Optional[Callable[..., nn.Module]] = None,
99
+ **kwargs: Any,
100
+ ) -> None:
101
+ super().__init__()
102
+ _log_api_usage_once(self)
103
+
104
+ if not block_setting:
105
+ raise ValueError("The block_setting should not be empty")
106
+ elif not (isinstance(block_setting, Sequence) and all([isinstance(s, CNBlockConfig) for s in block_setting])):
107
+ raise TypeError("The block_setting should be List[CNBlockConfig]")
108
+
109
+ if block is None:
110
+ block = CNBlock
111
+
112
+ if norm_layer is None:
113
+ norm_layer = partial(LayerNorm2d, eps=1e-6)
114
+
115
+ layers: List[nn.Module] = []
116
+
117
+ # Stem
118
+ firstconv_output_channels = block_setting[0].input_channels
119
+ layers.append(
120
+ Conv2dNormActivation(
121
+ 3,
122
+ firstconv_output_channels,
123
+ kernel_size=4,
124
+ stride=4,
125
+ padding=0,
126
+ norm_layer=norm_layer,
127
+ activation_layer=None,
128
+ bias=True,
129
+ )
130
+ )
131
+
132
+ total_stage_blocks = sum(cnf.num_layers for cnf in block_setting)
133
+ stage_block_id = 0
134
+ for cnf in block_setting:
135
+ # Bottlenecks
136
+ stage: List[nn.Module] = []
137
+ for _ in range(cnf.num_layers):
138
+ # adjust stochastic depth probability based on the depth of the stage block
139
+ sd_prob = stochastic_depth_prob * stage_block_id / (total_stage_blocks - 1.0)
140
+ stage.append(block(cnf.input_channels, layer_scale, sd_prob))
141
+ stage_block_id += 1
142
+ layers.append(nn.Sequential(*stage))
143
+ if cnf.out_channels is not None:
144
+ # Downsampling
145
+ layers.append(
146
+ nn.Sequential(
147
+ norm_layer(cnf.input_channels),
148
+ nn.Conv2d(cnf.input_channels, cnf.out_channels, kernel_size=2, stride=2),
149
+ )
150
+ )
151
+
152
+ self.features = nn.Sequential(*layers)
153
+ self.avgpool = nn.AdaptiveAvgPool2d(1)
154
+
155
+ lastblock = block_setting[-1]
156
+ lastconv_output_channels = (
157
+ lastblock.out_channels if lastblock.out_channels is not None else lastblock.input_channels
158
+ )
159
+ self.classifier = nn.Sequential(
160
+ norm_layer(lastconv_output_channels), nn.Flatten(1), nn.Linear(lastconv_output_channels, num_classes)
161
+ )
162
+
163
+ for m in self.modules():
164
+ if isinstance(m, (nn.Conv2d, nn.Linear)):
165
+ nn.init.trunc_normal_(m.weight, std=0.02)
166
+ if m.bias is not None:
167
+ nn.init.zeros_(m.bias)
168
+
169
+ def _forward_impl(self, x: Tensor) -> Tensor:
170
+ x = self.features(x)
171
+ x = self.avgpool(x)
172
+ x = self.classifier(x)
173
+ return x
174
+
175
+ def forward(self, x: Tensor) -> Tensor:
176
+ return self._forward_impl(x)
177
+
178
+
179
+ def _convnext(
180
+ block_setting: List[CNBlockConfig],
181
+ stochastic_depth_prob: float,
182
+ weights: Optional[WeightsEnum],
183
+ progress: bool,
184
+ **kwargs: Any,
185
+ ) -> ConvNeXt:
186
+ if weights is not None:
187
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
188
+
189
+ model = ConvNeXt(block_setting, stochastic_depth_prob=stochastic_depth_prob, **kwargs)
190
+
191
+ if weights is not None:
192
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
193
+
194
+ return model
195
+
196
+
197
+ _COMMON_META = {
198
+ "min_size": (32, 32),
199
+ "categories": _IMAGENET_CATEGORIES,
200
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#convnext",
201
+ "_docs": """
202
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
203
+ `new training recipe
204
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
205
+ """,
206
+ }
207
+
208
+
209
+ class ConvNeXt_Tiny_Weights(WeightsEnum):
210
+ IMAGENET1K_V1 = Weights(
211
+ url="https://download.pytorch.org/models/convnext_tiny-983f1562.pth",
212
+ transforms=partial(ImageClassification, crop_size=224, resize_size=236),
213
+ meta={
214
+ **_COMMON_META,
215
+ "num_params": 28589128,
216
+ "_metrics": {
217
+ "ImageNet-1K": {
218
+ "acc@1": 82.520,
219
+ "acc@5": 96.146,
220
+ }
221
+ },
222
+ "_ops": 4.456,
223
+ "_file_size": 109.119,
224
+ },
225
+ )
226
+ DEFAULT = IMAGENET1K_V1
227
+
228
+
229
+ class ConvNeXt_Small_Weights(WeightsEnum):
230
+ IMAGENET1K_V1 = Weights(
231
+ url="https://download.pytorch.org/models/convnext_small-0c510722.pth",
232
+ transforms=partial(ImageClassification, crop_size=224, resize_size=230),
233
+ meta={
234
+ **_COMMON_META,
235
+ "num_params": 50223688,
236
+ "_metrics": {
237
+ "ImageNet-1K": {
238
+ "acc@1": 83.616,
239
+ "acc@5": 96.650,
240
+ }
241
+ },
242
+ "_ops": 8.684,
243
+ "_file_size": 191.703,
244
+ },
245
+ )
246
+ DEFAULT = IMAGENET1K_V1
247
+
248
+
249
+ class ConvNeXt_Base_Weights(WeightsEnum):
250
+ IMAGENET1K_V1 = Weights(
251
+ url="https://download.pytorch.org/models/convnext_base-6075fbad.pth",
252
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
253
+ meta={
254
+ **_COMMON_META,
255
+ "num_params": 88591464,
256
+ "_metrics": {
257
+ "ImageNet-1K": {
258
+ "acc@1": 84.062,
259
+ "acc@5": 96.870,
260
+ }
261
+ },
262
+ "_ops": 15.355,
263
+ "_file_size": 338.064,
264
+ },
265
+ )
266
+ DEFAULT = IMAGENET1K_V1
267
+
268
+
269
+ class ConvNeXt_Large_Weights(WeightsEnum):
270
+ IMAGENET1K_V1 = Weights(
271
+ url="https://download.pytorch.org/models/convnext_large-ea097f82.pth",
272
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
273
+ meta={
274
+ **_COMMON_META,
275
+ "num_params": 197767336,
276
+ "_metrics": {
277
+ "ImageNet-1K": {
278
+ "acc@1": 84.414,
279
+ "acc@5": 96.976,
280
+ }
281
+ },
282
+ "_ops": 34.361,
283
+ "_file_size": 754.537,
284
+ },
285
+ )
286
+ DEFAULT = IMAGENET1K_V1
287
+
288
+
289
+ @register_model()
290
+ @handle_legacy_interface(weights=("pretrained", ConvNeXt_Tiny_Weights.IMAGENET1K_V1))
291
+ def convnext_tiny(*, weights: Optional[ConvNeXt_Tiny_Weights] = None, progress: bool = True, **kwargs: Any) -> ConvNeXt:
292
+ """ConvNeXt Tiny model architecture from the
293
+ `A ConvNet for the 2020s <https://arxiv.org/abs/2201.03545>`_ paper.
294
+
295
+ Args:
296
+ weights (:class:`~torchvision.models.convnext.ConvNeXt_Tiny_Weights`, optional): The pretrained
297
+ weights to use. See :class:`~torchvision.models.convnext.ConvNeXt_Tiny_Weights`
298
+ below for more details and possible values. By default, no pre-trained weights are used.
299
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
300
+ **kwargs: parameters passed to the ``torchvision.models.convnext.ConvNext``
301
+ base class. Please refer to the `source code
302
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/convnext.py>`_
303
+ for more details about this class.
304
+
305
+ .. autoclass:: torchvision.models.ConvNeXt_Tiny_Weights
306
+ :members:
307
+ """
308
+ weights = ConvNeXt_Tiny_Weights.verify(weights)
309
+
310
+ block_setting = [
311
+ CNBlockConfig(96, 192, 3),
312
+ CNBlockConfig(192, 384, 3),
313
+ CNBlockConfig(384, 768, 9),
314
+ CNBlockConfig(768, None, 3),
315
+ ]
316
+ stochastic_depth_prob = kwargs.pop("stochastic_depth_prob", 0.1)
317
+ return _convnext(block_setting, stochastic_depth_prob, weights, progress, **kwargs)
318
+
319
+
320
+ @register_model()
321
+ @handle_legacy_interface(weights=("pretrained", ConvNeXt_Small_Weights.IMAGENET1K_V1))
322
+ def convnext_small(
323
+ *, weights: Optional[ConvNeXt_Small_Weights] = None, progress: bool = True, **kwargs: Any
324
+ ) -> ConvNeXt:
325
+ """ConvNeXt Small model architecture from the
326
+ `A ConvNet for the 2020s <https://arxiv.org/abs/2201.03545>`_ paper.
327
+
328
+ Args:
329
+ weights (:class:`~torchvision.models.convnext.ConvNeXt_Small_Weights`, optional): The pretrained
330
+ weights to use. See :class:`~torchvision.models.convnext.ConvNeXt_Small_Weights`
331
+ below for more details and possible values. By default, no pre-trained weights are used.
332
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
333
+ **kwargs: parameters passed to the ``torchvision.models.convnext.ConvNext``
334
+ base class. Please refer to the `source code
335
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/convnext.py>`_
336
+ for more details about this class.
337
+
338
+ .. autoclass:: torchvision.models.ConvNeXt_Small_Weights
339
+ :members:
340
+ """
341
+ weights = ConvNeXt_Small_Weights.verify(weights)
342
+
343
+ block_setting = [
344
+ CNBlockConfig(96, 192, 3),
345
+ CNBlockConfig(192, 384, 3),
346
+ CNBlockConfig(384, 768, 27),
347
+ CNBlockConfig(768, None, 3),
348
+ ]
349
+ stochastic_depth_prob = kwargs.pop("stochastic_depth_prob", 0.4)
350
+ return _convnext(block_setting, stochastic_depth_prob, weights, progress, **kwargs)
351
+
352
+
353
+ @register_model()
354
+ @handle_legacy_interface(weights=("pretrained", ConvNeXt_Base_Weights.IMAGENET1K_V1))
355
+ def convnext_base(*, weights: Optional[ConvNeXt_Base_Weights] = None, progress: bool = True, **kwargs: Any) -> ConvNeXt:
356
+ """ConvNeXt Base model architecture from the
357
+ `A ConvNet for the 2020s <https://arxiv.org/abs/2201.03545>`_ paper.
358
+
359
+ Args:
360
+ weights (:class:`~torchvision.models.convnext.ConvNeXt_Base_Weights`, optional): The pretrained
361
+ weights to use. See :class:`~torchvision.models.convnext.ConvNeXt_Base_Weights`
362
+ below for more details and possible values. By default, no pre-trained weights are used.
363
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
364
+ **kwargs: parameters passed to the ``torchvision.models.convnext.ConvNext``
365
+ base class. Please refer to the `source code
366
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/convnext.py>`_
367
+ for more details about this class.
368
+
369
+ .. autoclass:: torchvision.models.ConvNeXt_Base_Weights
370
+ :members:
371
+ """
372
+ weights = ConvNeXt_Base_Weights.verify(weights)
373
+
374
+ block_setting = [
375
+ CNBlockConfig(128, 256, 3),
376
+ CNBlockConfig(256, 512, 3),
377
+ CNBlockConfig(512, 1024, 27),
378
+ CNBlockConfig(1024, None, 3),
379
+ ]
380
+ stochastic_depth_prob = kwargs.pop("stochastic_depth_prob", 0.5)
381
+ return _convnext(block_setting, stochastic_depth_prob, weights, progress, **kwargs)
382
+
383
+
384
+ @register_model()
385
+ @handle_legacy_interface(weights=("pretrained", ConvNeXt_Large_Weights.IMAGENET1K_V1))
386
+ def convnext_large(
387
+ *, weights: Optional[ConvNeXt_Large_Weights] = None, progress: bool = True, **kwargs: Any
388
+ ) -> ConvNeXt:
389
+ """ConvNeXt Large model architecture from the
390
+ `A ConvNet for the 2020s <https://arxiv.org/abs/2201.03545>`_ paper.
391
+
392
+ Args:
393
+ weights (:class:`~torchvision.models.convnext.ConvNeXt_Large_Weights`, optional): The pretrained
394
+ weights to use. See :class:`~torchvision.models.convnext.ConvNeXt_Large_Weights`
395
+ below for more details and possible values. By default, no pre-trained weights are used.
396
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
397
+ **kwargs: parameters passed to the ``torchvision.models.convnext.ConvNext``
398
+ base class. Please refer to the `source code
399
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/convnext.py>`_
400
+ for more details about this class.
401
+
402
+ .. autoclass:: torchvision.models.ConvNeXt_Large_Weights
403
+ :members:
404
+ """
405
+ weights = ConvNeXt_Large_Weights.verify(weights)
406
+
407
+ block_setting = [
408
+ CNBlockConfig(192, 384, 3),
409
+ CNBlockConfig(384, 768, 3),
410
+ CNBlockConfig(768, 1536, 27),
411
+ CNBlockConfig(1536, None, 3),
412
+ ]
413
+ stochastic_depth_prob = kwargs.pop("stochastic_depth_prob", 0.5)
414
+ return _convnext(block_setting, stochastic_depth_prob, weights, progress, **kwargs)
vllm/lib/python3.10/site-packages/torchvision/models/densenet.py ADDED
@@ -0,0 +1,448 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from collections import OrderedDict
3
+ from functools import partial
4
+ from typing import Any, List, Optional, Tuple
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ import torch.utils.checkpoint as cp
10
+ from torch import Tensor
11
+
12
+ from ..transforms._presets import ImageClassification
13
+ from ..utils import _log_api_usage_once
14
+ from ._api import register_model, Weights, WeightsEnum
15
+ from ._meta import _IMAGENET_CATEGORIES
16
+ from ._utils import _ovewrite_named_param, handle_legacy_interface
17
+
18
+ __all__ = [
19
+ "DenseNet",
20
+ "DenseNet121_Weights",
21
+ "DenseNet161_Weights",
22
+ "DenseNet169_Weights",
23
+ "DenseNet201_Weights",
24
+ "densenet121",
25
+ "densenet161",
26
+ "densenet169",
27
+ "densenet201",
28
+ ]
29
+
30
+
31
+ class _DenseLayer(nn.Module):
32
+ def __init__(
33
+ self, num_input_features: int, growth_rate: int, bn_size: int, drop_rate: float, memory_efficient: bool = False
34
+ ) -> None:
35
+ super().__init__()
36
+ self.norm1 = nn.BatchNorm2d(num_input_features)
37
+ self.relu1 = nn.ReLU(inplace=True)
38
+ self.conv1 = nn.Conv2d(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)
39
+
40
+ self.norm2 = nn.BatchNorm2d(bn_size * growth_rate)
41
+ self.relu2 = nn.ReLU(inplace=True)
42
+ self.conv2 = nn.Conv2d(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)
43
+
44
+ self.drop_rate = float(drop_rate)
45
+ self.memory_efficient = memory_efficient
46
+
47
+ def bn_function(self, inputs: List[Tensor]) -> Tensor:
48
+ concated_features = torch.cat(inputs, 1)
49
+ bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features))) # noqa: T484
50
+ return bottleneck_output
51
+
52
+ # todo: rewrite when torchscript supports any
53
+ def any_requires_grad(self, input: List[Tensor]) -> bool:
54
+ for tensor in input:
55
+ if tensor.requires_grad:
56
+ return True
57
+ return False
58
+
59
+ @torch.jit.unused # noqa: T484
60
+ def call_checkpoint_bottleneck(self, input: List[Tensor]) -> Tensor:
61
+ def closure(*inputs):
62
+ return self.bn_function(inputs)
63
+
64
+ return cp.checkpoint(closure, *input, use_reentrant=False)
65
+
66
+ @torch.jit._overload_method # noqa: F811
67
+ def forward(self, input: List[Tensor]) -> Tensor: # noqa: F811
68
+ pass
69
+
70
+ @torch.jit._overload_method # noqa: F811
71
+ def forward(self, input: Tensor) -> Tensor: # noqa: F811
72
+ pass
73
+
74
+ # torchscript does not yet support *args, so we overload method
75
+ # allowing it to take either a List[Tensor] or single Tensor
76
+ def forward(self, input: Tensor) -> Tensor: # noqa: F811
77
+ if isinstance(input, Tensor):
78
+ prev_features = [input]
79
+ else:
80
+ prev_features = input
81
+
82
+ if self.memory_efficient and self.any_requires_grad(prev_features):
83
+ if torch.jit.is_scripting():
84
+ raise Exception("Memory Efficient not supported in JIT")
85
+
86
+ bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
87
+ else:
88
+ bottleneck_output = self.bn_function(prev_features)
89
+
90
+ new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
91
+ if self.drop_rate > 0:
92
+ new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
93
+ return new_features
94
+
95
+
96
+ class _DenseBlock(nn.ModuleDict):
97
+ _version = 2
98
+
99
+ def __init__(
100
+ self,
101
+ num_layers: int,
102
+ num_input_features: int,
103
+ bn_size: int,
104
+ growth_rate: int,
105
+ drop_rate: float,
106
+ memory_efficient: bool = False,
107
+ ) -> None:
108
+ super().__init__()
109
+ for i in range(num_layers):
110
+ layer = _DenseLayer(
111
+ num_input_features + i * growth_rate,
112
+ growth_rate=growth_rate,
113
+ bn_size=bn_size,
114
+ drop_rate=drop_rate,
115
+ memory_efficient=memory_efficient,
116
+ )
117
+ self.add_module("denselayer%d" % (i + 1), layer)
118
+
119
+ def forward(self, init_features: Tensor) -> Tensor:
120
+ features = [init_features]
121
+ for name, layer in self.items():
122
+ new_features = layer(features)
123
+ features.append(new_features)
124
+ return torch.cat(features, 1)
125
+
126
+
127
+ class _Transition(nn.Sequential):
128
+ def __init__(self, num_input_features: int, num_output_features: int) -> None:
129
+ super().__init__()
130
+ self.norm = nn.BatchNorm2d(num_input_features)
131
+ self.relu = nn.ReLU(inplace=True)
132
+ self.conv = nn.Conv2d(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)
133
+ self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
134
+
135
+
136
+ class DenseNet(nn.Module):
137
+ r"""Densenet-BC model class, based on
138
+ `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_.
139
+
140
+ Args:
141
+ growth_rate (int) - how many filters to add each layer (`k` in paper)
142
+ block_config (list of 4 ints) - how many layers in each pooling block
143
+ num_init_features (int) - the number of filters to learn in the first convolution layer
144
+ bn_size (int) - multiplicative factor for number of bottle neck layers
145
+ (i.e. bn_size * k features in the bottleneck layer)
146
+ drop_rate (float) - dropout rate after each dense layer
147
+ num_classes (int) - number of classification classes
148
+ memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
149
+ but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_.
150
+ """
151
+
152
+ def __init__(
153
+ self,
154
+ growth_rate: int = 32,
155
+ block_config: Tuple[int, int, int, int] = (6, 12, 24, 16),
156
+ num_init_features: int = 64,
157
+ bn_size: int = 4,
158
+ drop_rate: float = 0,
159
+ num_classes: int = 1000,
160
+ memory_efficient: bool = False,
161
+ ) -> None:
162
+
163
+ super().__init__()
164
+ _log_api_usage_once(self)
165
+
166
+ # First convolution
167
+ self.features = nn.Sequential(
168
+ OrderedDict(
169
+ [
170
+ ("conv0", nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
171
+ ("norm0", nn.BatchNorm2d(num_init_features)),
172
+ ("relu0", nn.ReLU(inplace=True)),
173
+ ("pool0", nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
174
+ ]
175
+ )
176
+ )
177
+
178
+ # Each denseblock
179
+ num_features = num_init_features
180
+ for i, num_layers in enumerate(block_config):
181
+ block = _DenseBlock(
182
+ num_layers=num_layers,
183
+ num_input_features=num_features,
184
+ bn_size=bn_size,
185
+ growth_rate=growth_rate,
186
+ drop_rate=drop_rate,
187
+ memory_efficient=memory_efficient,
188
+ )
189
+ self.features.add_module("denseblock%d" % (i + 1), block)
190
+ num_features = num_features + num_layers * growth_rate
191
+ if i != len(block_config) - 1:
192
+ trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2)
193
+ self.features.add_module("transition%d" % (i + 1), trans)
194
+ num_features = num_features // 2
195
+
196
+ # Final batch norm
197
+ self.features.add_module("norm5", nn.BatchNorm2d(num_features))
198
+
199
+ # Linear layer
200
+ self.classifier = nn.Linear(num_features, num_classes)
201
+
202
+ # Official init from torch repo.
203
+ for m in self.modules():
204
+ if isinstance(m, nn.Conv2d):
205
+ nn.init.kaiming_normal_(m.weight)
206
+ elif isinstance(m, nn.BatchNorm2d):
207
+ nn.init.constant_(m.weight, 1)
208
+ nn.init.constant_(m.bias, 0)
209
+ elif isinstance(m, nn.Linear):
210
+ nn.init.constant_(m.bias, 0)
211
+
212
+ def forward(self, x: Tensor) -> Tensor:
213
+ features = self.features(x)
214
+ out = F.relu(features, inplace=True)
215
+ out = F.adaptive_avg_pool2d(out, (1, 1))
216
+ out = torch.flatten(out, 1)
217
+ out = self.classifier(out)
218
+ return out
219
+
220
+
221
+ def _load_state_dict(model: nn.Module, weights: WeightsEnum, progress: bool) -> None:
222
+ # '.'s are no longer allowed in module names, but previous _DenseLayer
223
+ # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
224
+ # They are also in the checkpoints in model_urls. This pattern is used
225
+ # to find such keys.
226
+ pattern = re.compile(
227
+ r"^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$"
228
+ )
229
+
230
+ state_dict = weights.get_state_dict(progress=progress, check_hash=True)
231
+ for key in list(state_dict.keys()):
232
+ res = pattern.match(key)
233
+ if res:
234
+ new_key = res.group(1) + res.group(2)
235
+ state_dict[new_key] = state_dict[key]
236
+ del state_dict[key]
237
+ model.load_state_dict(state_dict)
238
+
239
+
240
+ def _densenet(
241
+ growth_rate: int,
242
+ block_config: Tuple[int, int, int, int],
243
+ num_init_features: int,
244
+ weights: Optional[WeightsEnum],
245
+ progress: bool,
246
+ **kwargs: Any,
247
+ ) -> DenseNet:
248
+ if weights is not None:
249
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
250
+
251
+ model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)
252
+
253
+ if weights is not None:
254
+ _load_state_dict(model=model, weights=weights, progress=progress)
255
+
256
+ return model
257
+
258
+
259
+ _COMMON_META = {
260
+ "min_size": (29, 29),
261
+ "categories": _IMAGENET_CATEGORIES,
262
+ "recipe": "https://github.com/pytorch/vision/pull/116",
263
+ "_docs": """These weights are ported from LuaTorch.""",
264
+ }
265
+
266
+
267
+ class DenseNet121_Weights(WeightsEnum):
268
+ IMAGENET1K_V1 = Weights(
269
+ url="https://download.pytorch.org/models/densenet121-a639ec97.pth",
270
+ transforms=partial(ImageClassification, crop_size=224),
271
+ meta={
272
+ **_COMMON_META,
273
+ "num_params": 7978856,
274
+ "_metrics": {
275
+ "ImageNet-1K": {
276
+ "acc@1": 74.434,
277
+ "acc@5": 91.972,
278
+ }
279
+ },
280
+ "_ops": 2.834,
281
+ "_file_size": 30.845,
282
+ },
283
+ )
284
+ DEFAULT = IMAGENET1K_V1
285
+
286
+
287
+ class DenseNet161_Weights(WeightsEnum):
288
+ IMAGENET1K_V1 = Weights(
289
+ url="https://download.pytorch.org/models/densenet161-8d451a50.pth",
290
+ transforms=partial(ImageClassification, crop_size=224),
291
+ meta={
292
+ **_COMMON_META,
293
+ "num_params": 28681000,
294
+ "_metrics": {
295
+ "ImageNet-1K": {
296
+ "acc@1": 77.138,
297
+ "acc@5": 93.560,
298
+ }
299
+ },
300
+ "_ops": 7.728,
301
+ "_file_size": 110.369,
302
+ },
303
+ )
304
+ DEFAULT = IMAGENET1K_V1
305
+
306
+
307
+ class DenseNet169_Weights(WeightsEnum):
308
+ IMAGENET1K_V1 = Weights(
309
+ url="https://download.pytorch.org/models/densenet169-b2777c0a.pth",
310
+ transforms=partial(ImageClassification, crop_size=224),
311
+ meta={
312
+ **_COMMON_META,
313
+ "num_params": 14149480,
314
+ "_metrics": {
315
+ "ImageNet-1K": {
316
+ "acc@1": 75.600,
317
+ "acc@5": 92.806,
318
+ }
319
+ },
320
+ "_ops": 3.36,
321
+ "_file_size": 54.708,
322
+ },
323
+ )
324
+ DEFAULT = IMAGENET1K_V1
325
+
326
+
327
+ class DenseNet201_Weights(WeightsEnum):
328
+ IMAGENET1K_V1 = Weights(
329
+ url="https://download.pytorch.org/models/densenet201-c1103571.pth",
330
+ transforms=partial(ImageClassification, crop_size=224),
331
+ meta={
332
+ **_COMMON_META,
333
+ "num_params": 20013928,
334
+ "_metrics": {
335
+ "ImageNet-1K": {
336
+ "acc@1": 76.896,
337
+ "acc@5": 93.370,
338
+ }
339
+ },
340
+ "_ops": 4.291,
341
+ "_file_size": 77.373,
342
+ },
343
+ )
344
+ DEFAULT = IMAGENET1K_V1
345
+
346
+
347
+ @register_model()
348
+ @handle_legacy_interface(weights=("pretrained", DenseNet121_Weights.IMAGENET1K_V1))
349
+ def densenet121(*, weights: Optional[DenseNet121_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
350
+ r"""Densenet-121 model from
351
+ `Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
352
+
353
+ Args:
354
+ weights (:class:`~torchvision.models.DenseNet121_Weights`, optional): The
355
+ pretrained weights to use. See
356
+ :class:`~torchvision.models.DenseNet121_Weights` below for
357
+ more details, and possible values. By default, no pre-trained
358
+ weights are used.
359
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
360
+ **kwargs: parameters passed to the ``torchvision.models.densenet.DenseNet``
361
+ base class. Please refer to the `source code
362
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py>`_
363
+ for more details about this class.
364
+
365
+ .. autoclass:: torchvision.models.DenseNet121_Weights
366
+ :members:
367
+ """
368
+ weights = DenseNet121_Weights.verify(weights)
369
+
370
+ return _densenet(32, (6, 12, 24, 16), 64, weights, progress, **kwargs)
371
+
372
+
373
+ @register_model()
374
+ @handle_legacy_interface(weights=("pretrained", DenseNet161_Weights.IMAGENET1K_V1))
375
+ def densenet161(*, weights: Optional[DenseNet161_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
376
+ r"""Densenet-161 model from
377
+ `Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
378
+
379
+ Args:
380
+ weights (:class:`~torchvision.models.DenseNet161_Weights`, optional): The
381
+ pretrained weights to use. See
382
+ :class:`~torchvision.models.DenseNet161_Weights` below for
383
+ more details, and possible values. By default, no pre-trained
384
+ weights are used.
385
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
386
+ **kwargs: parameters passed to the ``torchvision.models.densenet.DenseNet``
387
+ base class. Please refer to the `source code
388
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py>`_
389
+ for more details about this class.
390
+
391
+ .. autoclass:: torchvision.models.DenseNet161_Weights
392
+ :members:
393
+ """
394
+ weights = DenseNet161_Weights.verify(weights)
395
+
396
+ return _densenet(48, (6, 12, 36, 24), 96, weights, progress, **kwargs)
397
+
398
+
399
+ @register_model()
400
+ @handle_legacy_interface(weights=("pretrained", DenseNet169_Weights.IMAGENET1K_V1))
401
+ def densenet169(*, weights: Optional[DenseNet169_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
402
+ r"""Densenet-169 model from
403
+ `Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
404
+
405
+ Args:
406
+ weights (:class:`~torchvision.models.DenseNet169_Weights`, optional): The
407
+ pretrained weights to use. See
408
+ :class:`~torchvision.models.DenseNet169_Weights` below for
409
+ more details, and possible values. By default, no pre-trained
410
+ weights are used.
411
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
412
+ **kwargs: parameters passed to the ``torchvision.models.densenet.DenseNet``
413
+ base class. Please refer to the `source code
414
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py>`_
415
+ for more details about this class.
416
+
417
+ .. autoclass:: torchvision.models.DenseNet169_Weights
418
+ :members:
419
+ """
420
+ weights = DenseNet169_Weights.verify(weights)
421
+
422
+ return _densenet(32, (6, 12, 32, 32), 64, weights, progress, **kwargs)
423
+
424
+
425
+ @register_model()
426
+ @handle_legacy_interface(weights=("pretrained", DenseNet201_Weights.IMAGENET1K_V1))
427
+ def densenet201(*, weights: Optional[DenseNet201_Weights] = None, progress: bool = True, **kwargs: Any) -> DenseNet:
428
+ r"""Densenet-201 model from
429
+ `Densely Connected Convolutional Networks <https://arxiv.org/abs/1608.06993>`_.
430
+
431
+ Args:
432
+ weights (:class:`~torchvision.models.DenseNet201_Weights`, optional): The
433
+ pretrained weights to use. See
434
+ :class:`~torchvision.models.DenseNet201_Weights` below for
435
+ more details, and possible values. By default, no pre-trained
436
+ weights are used.
437
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
438
+ **kwargs: parameters passed to the ``torchvision.models.densenet.DenseNet``
439
+ base class. Please refer to the `source code
440
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py>`_
441
+ for more details about this class.
442
+
443
+ .. autoclass:: torchvision.models.DenseNet201_Weights
444
+ :members:
445
+ """
446
+ weights = DenseNet201_Weights.verify(weights)
447
+
448
+ return _densenet(32, (6, 12, 48, 32), 64, weights, progress, **kwargs)
vllm/lib/python3.10/site-packages/torchvision/models/efficientnet.py ADDED
@@ -0,0 +1,1131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import math
3
+ from dataclasses import dataclass
4
+ from functools import partial
5
+ from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
6
+
7
+ import torch
8
+ from torch import nn, Tensor
9
+ from torchvision.ops import StochasticDepth
10
+
11
+ from ..ops.misc import Conv2dNormActivation, SqueezeExcitation
12
+ from ..transforms._presets import ImageClassification, InterpolationMode
13
+ from ..utils import _log_api_usage_once
14
+ from ._api import register_model, Weights, WeightsEnum
15
+ from ._meta import _IMAGENET_CATEGORIES
16
+ from ._utils import _make_divisible, _ovewrite_named_param, handle_legacy_interface
17
+
18
+
19
+ __all__ = [
20
+ "EfficientNet",
21
+ "EfficientNet_B0_Weights",
22
+ "EfficientNet_B1_Weights",
23
+ "EfficientNet_B2_Weights",
24
+ "EfficientNet_B3_Weights",
25
+ "EfficientNet_B4_Weights",
26
+ "EfficientNet_B5_Weights",
27
+ "EfficientNet_B6_Weights",
28
+ "EfficientNet_B7_Weights",
29
+ "EfficientNet_V2_S_Weights",
30
+ "EfficientNet_V2_M_Weights",
31
+ "EfficientNet_V2_L_Weights",
32
+ "efficientnet_b0",
33
+ "efficientnet_b1",
34
+ "efficientnet_b2",
35
+ "efficientnet_b3",
36
+ "efficientnet_b4",
37
+ "efficientnet_b5",
38
+ "efficientnet_b6",
39
+ "efficientnet_b7",
40
+ "efficientnet_v2_s",
41
+ "efficientnet_v2_m",
42
+ "efficientnet_v2_l",
43
+ ]
44
+
45
+
46
+ @dataclass
47
+ class _MBConvConfig:
48
+ expand_ratio: float
49
+ kernel: int
50
+ stride: int
51
+ input_channels: int
52
+ out_channels: int
53
+ num_layers: int
54
+ block: Callable[..., nn.Module]
55
+
56
+ @staticmethod
57
+ def adjust_channels(channels: int, width_mult: float, min_value: Optional[int] = None) -> int:
58
+ return _make_divisible(channels * width_mult, 8, min_value)
59
+
60
+
61
+ class MBConvConfig(_MBConvConfig):
62
+ # Stores information listed at Table 1 of the EfficientNet paper & Table 4 of the EfficientNetV2 paper
63
+ def __init__(
64
+ self,
65
+ expand_ratio: float,
66
+ kernel: int,
67
+ stride: int,
68
+ input_channels: int,
69
+ out_channels: int,
70
+ num_layers: int,
71
+ width_mult: float = 1.0,
72
+ depth_mult: float = 1.0,
73
+ block: Optional[Callable[..., nn.Module]] = None,
74
+ ) -> None:
75
+ input_channels = self.adjust_channels(input_channels, width_mult)
76
+ out_channels = self.adjust_channels(out_channels, width_mult)
77
+ num_layers = self.adjust_depth(num_layers, depth_mult)
78
+ if block is None:
79
+ block = MBConv
80
+ super().__init__(expand_ratio, kernel, stride, input_channels, out_channels, num_layers, block)
81
+
82
+ @staticmethod
83
+ def adjust_depth(num_layers: int, depth_mult: float):
84
+ return int(math.ceil(num_layers * depth_mult))
85
+
86
+
87
+ class FusedMBConvConfig(_MBConvConfig):
88
+ # Stores information listed at Table 4 of the EfficientNetV2 paper
89
+ def __init__(
90
+ self,
91
+ expand_ratio: float,
92
+ kernel: int,
93
+ stride: int,
94
+ input_channels: int,
95
+ out_channels: int,
96
+ num_layers: int,
97
+ block: Optional[Callable[..., nn.Module]] = None,
98
+ ) -> None:
99
+ if block is None:
100
+ block = FusedMBConv
101
+ super().__init__(expand_ratio, kernel, stride, input_channels, out_channels, num_layers, block)
102
+
103
+
104
+ class MBConv(nn.Module):
105
+ def __init__(
106
+ self,
107
+ cnf: MBConvConfig,
108
+ stochastic_depth_prob: float,
109
+ norm_layer: Callable[..., nn.Module],
110
+ se_layer: Callable[..., nn.Module] = SqueezeExcitation,
111
+ ) -> None:
112
+ super().__init__()
113
+
114
+ if not (1 <= cnf.stride <= 2):
115
+ raise ValueError("illegal stride value")
116
+
117
+ self.use_res_connect = cnf.stride == 1 and cnf.input_channels == cnf.out_channels
118
+
119
+ layers: List[nn.Module] = []
120
+ activation_layer = nn.SiLU
121
+
122
+ # expand
123
+ expanded_channels = cnf.adjust_channels(cnf.input_channels, cnf.expand_ratio)
124
+ if expanded_channels != cnf.input_channels:
125
+ layers.append(
126
+ Conv2dNormActivation(
127
+ cnf.input_channels,
128
+ expanded_channels,
129
+ kernel_size=1,
130
+ norm_layer=norm_layer,
131
+ activation_layer=activation_layer,
132
+ )
133
+ )
134
+
135
+ # depthwise
136
+ layers.append(
137
+ Conv2dNormActivation(
138
+ expanded_channels,
139
+ expanded_channels,
140
+ kernel_size=cnf.kernel,
141
+ stride=cnf.stride,
142
+ groups=expanded_channels,
143
+ norm_layer=norm_layer,
144
+ activation_layer=activation_layer,
145
+ )
146
+ )
147
+
148
+ # squeeze and excitation
149
+ squeeze_channels = max(1, cnf.input_channels // 4)
150
+ layers.append(se_layer(expanded_channels, squeeze_channels, activation=partial(nn.SiLU, inplace=True)))
151
+
152
+ # project
153
+ layers.append(
154
+ Conv2dNormActivation(
155
+ expanded_channels, cnf.out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=None
156
+ )
157
+ )
158
+
159
+ self.block = nn.Sequential(*layers)
160
+ self.stochastic_depth = StochasticDepth(stochastic_depth_prob, "row")
161
+ self.out_channels = cnf.out_channels
162
+
163
+ def forward(self, input: Tensor) -> Tensor:
164
+ result = self.block(input)
165
+ if self.use_res_connect:
166
+ result = self.stochastic_depth(result)
167
+ result += input
168
+ return result
169
+
170
+
171
+ class FusedMBConv(nn.Module):
172
+ def __init__(
173
+ self,
174
+ cnf: FusedMBConvConfig,
175
+ stochastic_depth_prob: float,
176
+ norm_layer: Callable[..., nn.Module],
177
+ ) -> None:
178
+ super().__init__()
179
+
180
+ if not (1 <= cnf.stride <= 2):
181
+ raise ValueError("illegal stride value")
182
+
183
+ self.use_res_connect = cnf.stride == 1 and cnf.input_channels == cnf.out_channels
184
+
185
+ layers: List[nn.Module] = []
186
+ activation_layer = nn.SiLU
187
+
188
+ expanded_channels = cnf.adjust_channels(cnf.input_channels, cnf.expand_ratio)
189
+ if expanded_channels != cnf.input_channels:
190
+ # fused expand
191
+ layers.append(
192
+ Conv2dNormActivation(
193
+ cnf.input_channels,
194
+ expanded_channels,
195
+ kernel_size=cnf.kernel,
196
+ stride=cnf.stride,
197
+ norm_layer=norm_layer,
198
+ activation_layer=activation_layer,
199
+ )
200
+ )
201
+
202
+ # project
203
+ layers.append(
204
+ Conv2dNormActivation(
205
+ expanded_channels, cnf.out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=None
206
+ )
207
+ )
208
+ else:
209
+ layers.append(
210
+ Conv2dNormActivation(
211
+ cnf.input_channels,
212
+ cnf.out_channels,
213
+ kernel_size=cnf.kernel,
214
+ stride=cnf.stride,
215
+ norm_layer=norm_layer,
216
+ activation_layer=activation_layer,
217
+ )
218
+ )
219
+
220
+ self.block = nn.Sequential(*layers)
221
+ self.stochastic_depth = StochasticDepth(stochastic_depth_prob, "row")
222
+ self.out_channels = cnf.out_channels
223
+
224
+ def forward(self, input: Tensor) -> Tensor:
225
+ result = self.block(input)
226
+ if self.use_res_connect:
227
+ result = self.stochastic_depth(result)
228
+ result += input
229
+ return result
230
+
231
+
232
+ class EfficientNet(nn.Module):
233
+ def __init__(
234
+ self,
235
+ inverted_residual_setting: Sequence[Union[MBConvConfig, FusedMBConvConfig]],
236
+ dropout: float,
237
+ stochastic_depth_prob: float = 0.2,
238
+ num_classes: int = 1000,
239
+ norm_layer: Optional[Callable[..., nn.Module]] = None,
240
+ last_channel: Optional[int] = None,
241
+ ) -> None:
242
+ """
243
+ EfficientNet V1 and V2 main class
244
+
245
+ Args:
246
+ inverted_residual_setting (Sequence[Union[MBConvConfig, FusedMBConvConfig]]): Network structure
247
+ dropout (float): The droupout probability
248
+ stochastic_depth_prob (float): The stochastic depth probability
249
+ num_classes (int): Number of classes
250
+ norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use
251
+ last_channel (int): The number of channels on the penultimate layer
252
+ """
253
+ super().__init__()
254
+ _log_api_usage_once(self)
255
+
256
+ if not inverted_residual_setting:
257
+ raise ValueError("The inverted_residual_setting should not be empty")
258
+ elif not (
259
+ isinstance(inverted_residual_setting, Sequence)
260
+ and all([isinstance(s, _MBConvConfig) for s in inverted_residual_setting])
261
+ ):
262
+ raise TypeError("The inverted_residual_setting should be List[MBConvConfig]")
263
+
264
+ if norm_layer is None:
265
+ norm_layer = nn.BatchNorm2d
266
+
267
+ layers: List[nn.Module] = []
268
+
269
+ # building first layer
270
+ firstconv_output_channels = inverted_residual_setting[0].input_channels
271
+ layers.append(
272
+ Conv2dNormActivation(
273
+ 3, firstconv_output_channels, kernel_size=3, stride=2, norm_layer=norm_layer, activation_layer=nn.SiLU
274
+ )
275
+ )
276
+
277
+ # building inverted residual blocks
278
+ total_stage_blocks = sum(cnf.num_layers for cnf in inverted_residual_setting)
279
+ stage_block_id = 0
280
+ for cnf in inverted_residual_setting:
281
+ stage: List[nn.Module] = []
282
+ for _ in range(cnf.num_layers):
283
+ # copy to avoid modifications. shallow copy is enough
284
+ block_cnf = copy.copy(cnf)
285
+
286
+ # overwrite info if not the first conv in the stage
287
+ if stage:
288
+ block_cnf.input_channels = block_cnf.out_channels
289
+ block_cnf.stride = 1
290
+
291
+ # adjust stochastic depth probability based on the depth of the stage block
292
+ sd_prob = stochastic_depth_prob * float(stage_block_id) / total_stage_blocks
293
+
294
+ stage.append(block_cnf.block(block_cnf, sd_prob, norm_layer))
295
+ stage_block_id += 1
296
+
297
+ layers.append(nn.Sequential(*stage))
298
+
299
+ # building last several layers
300
+ lastconv_input_channels = inverted_residual_setting[-1].out_channels
301
+ lastconv_output_channels = last_channel if last_channel is not None else 4 * lastconv_input_channels
302
+ layers.append(
303
+ Conv2dNormActivation(
304
+ lastconv_input_channels,
305
+ lastconv_output_channels,
306
+ kernel_size=1,
307
+ norm_layer=norm_layer,
308
+ activation_layer=nn.SiLU,
309
+ )
310
+ )
311
+
312
+ self.features = nn.Sequential(*layers)
313
+ self.avgpool = nn.AdaptiveAvgPool2d(1)
314
+ self.classifier = nn.Sequential(
315
+ nn.Dropout(p=dropout, inplace=True),
316
+ nn.Linear(lastconv_output_channels, num_classes),
317
+ )
318
+
319
+ for m in self.modules():
320
+ if isinstance(m, nn.Conv2d):
321
+ nn.init.kaiming_normal_(m.weight, mode="fan_out")
322
+ if m.bias is not None:
323
+ nn.init.zeros_(m.bias)
324
+ elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
325
+ nn.init.ones_(m.weight)
326
+ nn.init.zeros_(m.bias)
327
+ elif isinstance(m, nn.Linear):
328
+ init_range = 1.0 / math.sqrt(m.out_features)
329
+ nn.init.uniform_(m.weight, -init_range, init_range)
330
+ nn.init.zeros_(m.bias)
331
+
332
+ def _forward_impl(self, x: Tensor) -> Tensor:
333
+ x = self.features(x)
334
+
335
+ x = self.avgpool(x)
336
+ x = torch.flatten(x, 1)
337
+
338
+ x = self.classifier(x)
339
+
340
+ return x
341
+
342
+ def forward(self, x: Tensor) -> Tensor:
343
+ return self._forward_impl(x)
344
+
345
+
346
+ def _efficientnet(
347
+ inverted_residual_setting: Sequence[Union[MBConvConfig, FusedMBConvConfig]],
348
+ dropout: float,
349
+ last_channel: Optional[int],
350
+ weights: Optional[WeightsEnum],
351
+ progress: bool,
352
+ **kwargs: Any,
353
+ ) -> EfficientNet:
354
+ if weights is not None:
355
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
356
+
357
+ model = EfficientNet(inverted_residual_setting, dropout, last_channel=last_channel, **kwargs)
358
+
359
+ if weights is not None:
360
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
361
+
362
+ return model
363
+
364
+
365
+ def _efficientnet_conf(
366
+ arch: str,
367
+ **kwargs: Any,
368
+ ) -> Tuple[Sequence[Union[MBConvConfig, FusedMBConvConfig]], Optional[int]]:
369
+ inverted_residual_setting: Sequence[Union[MBConvConfig, FusedMBConvConfig]]
370
+ if arch.startswith("efficientnet_b"):
371
+ bneck_conf = partial(MBConvConfig, width_mult=kwargs.pop("width_mult"), depth_mult=kwargs.pop("depth_mult"))
372
+ inverted_residual_setting = [
373
+ bneck_conf(1, 3, 1, 32, 16, 1),
374
+ bneck_conf(6, 3, 2, 16, 24, 2),
375
+ bneck_conf(6, 5, 2, 24, 40, 2),
376
+ bneck_conf(6, 3, 2, 40, 80, 3),
377
+ bneck_conf(6, 5, 1, 80, 112, 3),
378
+ bneck_conf(6, 5, 2, 112, 192, 4),
379
+ bneck_conf(6, 3, 1, 192, 320, 1),
380
+ ]
381
+ last_channel = None
382
+ elif arch.startswith("efficientnet_v2_s"):
383
+ inverted_residual_setting = [
384
+ FusedMBConvConfig(1, 3, 1, 24, 24, 2),
385
+ FusedMBConvConfig(4, 3, 2, 24, 48, 4),
386
+ FusedMBConvConfig(4, 3, 2, 48, 64, 4),
387
+ MBConvConfig(4, 3, 2, 64, 128, 6),
388
+ MBConvConfig(6, 3, 1, 128, 160, 9),
389
+ MBConvConfig(6, 3, 2, 160, 256, 15),
390
+ ]
391
+ last_channel = 1280
392
+ elif arch.startswith("efficientnet_v2_m"):
393
+ inverted_residual_setting = [
394
+ FusedMBConvConfig(1, 3, 1, 24, 24, 3),
395
+ FusedMBConvConfig(4, 3, 2, 24, 48, 5),
396
+ FusedMBConvConfig(4, 3, 2, 48, 80, 5),
397
+ MBConvConfig(4, 3, 2, 80, 160, 7),
398
+ MBConvConfig(6, 3, 1, 160, 176, 14),
399
+ MBConvConfig(6, 3, 2, 176, 304, 18),
400
+ MBConvConfig(6, 3, 1, 304, 512, 5),
401
+ ]
402
+ last_channel = 1280
403
+ elif arch.startswith("efficientnet_v2_l"):
404
+ inverted_residual_setting = [
405
+ FusedMBConvConfig(1, 3, 1, 32, 32, 4),
406
+ FusedMBConvConfig(4, 3, 2, 32, 64, 7),
407
+ FusedMBConvConfig(4, 3, 2, 64, 96, 7),
408
+ MBConvConfig(4, 3, 2, 96, 192, 10),
409
+ MBConvConfig(6, 3, 1, 192, 224, 19),
410
+ MBConvConfig(6, 3, 2, 224, 384, 25),
411
+ MBConvConfig(6, 3, 1, 384, 640, 7),
412
+ ]
413
+ last_channel = 1280
414
+ else:
415
+ raise ValueError(f"Unsupported model type {arch}")
416
+
417
+ return inverted_residual_setting, last_channel
418
+
419
+
420
+ _COMMON_META: Dict[str, Any] = {
421
+ "categories": _IMAGENET_CATEGORIES,
422
+ }
423
+
424
+
425
+ _COMMON_META_V1 = {
426
+ **_COMMON_META,
427
+ "min_size": (1, 1),
428
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#efficientnet-v1",
429
+ }
430
+
431
+
432
+ _COMMON_META_V2 = {
433
+ **_COMMON_META,
434
+ "min_size": (33, 33),
435
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#efficientnet-v2",
436
+ }
437
+
438
+
439
+ class EfficientNet_B0_Weights(WeightsEnum):
440
+ IMAGENET1K_V1 = Weights(
441
+ # Weights ported from https://github.com/rwightman/pytorch-image-models/
442
+ url="https://download.pytorch.org/models/efficientnet_b0_rwightman-7f5810bc.pth",
443
+ transforms=partial(
444
+ ImageClassification, crop_size=224, resize_size=256, interpolation=InterpolationMode.BICUBIC
445
+ ),
446
+ meta={
447
+ **_COMMON_META_V1,
448
+ "num_params": 5288548,
449
+ "_metrics": {
450
+ "ImageNet-1K": {
451
+ "acc@1": 77.692,
452
+ "acc@5": 93.532,
453
+ }
454
+ },
455
+ "_ops": 0.386,
456
+ "_file_size": 20.451,
457
+ "_docs": """These weights are ported from the original paper.""",
458
+ },
459
+ )
460
+ DEFAULT = IMAGENET1K_V1
461
+
462
+
463
+ class EfficientNet_B1_Weights(WeightsEnum):
464
+ IMAGENET1K_V1 = Weights(
465
+ # Weights ported from https://github.com/rwightman/pytorch-image-models/
466
+ url="https://download.pytorch.org/models/efficientnet_b1_rwightman-bac287d4.pth",
467
+ transforms=partial(
468
+ ImageClassification, crop_size=240, resize_size=256, interpolation=InterpolationMode.BICUBIC
469
+ ),
470
+ meta={
471
+ **_COMMON_META_V1,
472
+ "num_params": 7794184,
473
+ "_metrics": {
474
+ "ImageNet-1K": {
475
+ "acc@1": 78.642,
476
+ "acc@5": 94.186,
477
+ }
478
+ },
479
+ "_ops": 0.687,
480
+ "_file_size": 30.134,
481
+ "_docs": """These weights are ported from the original paper.""",
482
+ },
483
+ )
484
+ IMAGENET1K_V2 = Weights(
485
+ url="https://download.pytorch.org/models/efficientnet_b1-c27df63c.pth",
486
+ transforms=partial(
487
+ ImageClassification, crop_size=240, resize_size=255, interpolation=InterpolationMode.BILINEAR
488
+ ),
489
+ meta={
490
+ **_COMMON_META_V1,
491
+ "num_params": 7794184,
492
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-lr-wd-crop-tuning",
493
+ "_metrics": {
494
+ "ImageNet-1K": {
495
+ "acc@1": 79.838,
496
+ "acc@5": 94.934,
497
+ }
498
+ },
499
+ "_ops": 0.687,
500
+ "_file_size": 30.136,
501
+ "_docs": """
502
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
503
+ `new training recipe
504
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
505
+ """,
506
+ },
507
+ )
508
+ DEFAULT = IMAGENET1K_V2
509
+
510
+
511
+ class EfficientNet_B2_Weights(WeightsEnum):
512
+ IMAGENET1K_V1 = Weights(
513
+ # Weights ported from https://github.com/rwightman/pytorch-image-models/
514
+ url="https://download.pytorch.org/models/efficientnet_b2_rwightman-c35c1473.pth",
515
+ transforms=partial(
516
+ ImageClassification, crop_size=288, resize_size=288, interpolation=InterpolationMode.BICUBIC
517
+ ),
518
+ meta={
519
+ **_COMMON_META_V1,
520
+ "num_params": 9109994,
521
+ "_metrics": {
522
+ "ImageNet-1K": {
523
+ "acc@1": 80.608,
524
+ "acc@5": 95.310,
525
+ }
526
+ },
527
+ "_ops": 1.088,
528
+ "_file_size": 35.174,
529
+ "_docs": """These weights are ported from the original paper.""",
530
+ },
531
+ )
532
+ DEFAULT = IMAGENET1K_V1
533
+
534
+
535
+ class EfficientNet_B3_Weights(WeightsEnum):
536
+ IMAGENET1K_V1 = Weights(
537
+ # Weights ported from https://github.com/rwightman/pytorch-image-models/
538
+ url="https://download.pytorch.org/models/efficientnet_b3_rwightman-b3899882.pth",
539
+ transforms=partial(
540
+ ImageClassification, crop_size=300, resize_size=320, interpolation=InterpolationMode.BICUBIC
541
+ ),
542
+ meta={
543
+ **_COMMON_META_V1,
544
+ "num_params": 12233232,
545
+ "_metrics": {
546
+ "ImageNet-1K": {
547
+ "acc@1": 82.008,
548
+ "acc@5": 96.054,
549
+ }
550
+ },
551
+ "_ops": 1.827,
552
+ "_file_size": 47.184,
553
+ "_docs": """These weights are ported from the original paper.""",
554
+ },
555
+ )
556
+ DEFAULT = IMAGENET1K_V1
557
+
558
+
559
+ class EfficientNet_B4_Weights(WeightsEnum):
560
+ IMAGENET1K_V1 = Weights(
561
+ # Weights ported from https://github.com/rwightman/pytorch-image-models/
562
+ url="https://download.pytorch.org/models/efficientnet_b4_rwightman-23ab8bcd.pth",
563
+ transforms=partial(
564
+ ImageClassification, crop_size=380, resize_size=384, interpolation=InterpolationMode.BICUBIC
565
+ ),
566
+ meta={
567
+ **_COMMON_META_V1,
568
+ "num_params": 19341616,
569
+ "_metrics": {
570
+ "ImageNet-1K": {
571
+ "acc@1": 83.384,
572
+ "acc@5": 96.594,
573
+ }
574
+ },
575
+ "_ops": 4.394,
576
+ "_file_size": 74.489,
577
+ "_docs": """These weights are ported from the original paper.""",
578
+ },
579
+ )
580
+ DEFAULT = IMAGENET1K_V1
581
+
582
+
583
+ class EfficientNet_B5_Weights(WeightsEnum):
584
+ IMAGENET1K_V1 = Weights(
585
+ # Weights ported from https://github.com/lukemelas/EfficientNet-PyTorch/
586
+ url="https://download.pytorch.org/models/efficientnet_b5_lukemelas-1a07897c.pth",
587
+ transforms=partial(
588
+ ImageClassification, crop_size=456, resize_size=456, interpolation=InterpolationMode.BICUBIC
589
+ ),
590
+ meta={
591
+ **_COMMON_META_V1,
592
+ "num_params": 30389784,
593
+ "_metrics": {
594
+ "ImageNet-1K": {
595
+ "acc@1": 83.444,
596
+ "acc@5": 96.628,
597
+ }
598
+ },
599
+ "_ops": 10.266,
600
+ "_file_size": 116.864,
601
+ "_docs": """These weights are ported from the original paper.""",
602
+ },
603
+ )
604
+ DEFAULT = IMAGENET1K_V1
605
+
606
+
607
+ class EfficientNet_B6_Weights(WeightsEnum):
608
+ IMAGENET1K_V1 = Weights(
609
+ # Weights ported from https://github.com/lukemelas/EfficientNet-PyTorch/
610
+ url="https://download.pytorch.org/models/efficientnet_b6_lukemelas-24a108a5.pth",
611
+ transforms=partial(
612
+ ImageClassification, crop_size=528, resize_size=528, interpolation=InterpolationMode.BICUBIC
613
+ ),
614
+ meta={
615
+ **_COMMON_META_V1,
616
+ "num_params": 43040704,
617
+ "_metrics": {
618
+ "ImageNet-1K": {
619
+ "acc@1": 84.008,
620
+ "acc@5": 96.916,
621
+ }
622
+ },
623
+ "_ops": 19.068,
624
+ "_file_size": 165.362,
625
+ "_docs": """These weights are ported from the original paper.""",
626
+ },
627
+ )
628
+ DEFAULT = IMAGENET1K_V1
629
+
630
+
631
+ class EfficientNet_B7_Weights(WeightsEnum):
632
+ IMAGENET1K_V1 = Weights(
633
+ # Weights ported from https://github.com/lukemelas/EfficientNet-PyTorch/
634
+ url="https://download.pytorch.org/models/efficientnet_b7_lukemelas-c5b4e57e.pth",
635
+ transforms=partial(
636
+ ImageClassification, crop_size=600, resize_size=600, interpolation=InterpolationMode.BICUBIC
637
+ ),
638
+ meta={
639
+ **_COMMON_META_V1,
640
+ "num_params": 66347960,
641
+ "_metrics": {
642
+ "ImageNet-1K": {
643
+ "acc@1": 84.122,
644
+ "acc@5": 96.908,
645
+ }
646
+ },
647
+ "_ops": 37.746,
648
+ "_file_size": 254.675,
649
+ "_docs": """These weights are ported from the original paper.""",
650
+ },
651
+ )
652
+ DEFAULT = IMAGENET1K_V1
653
+
654
+
655
+ class EfficientNet_V2_S_Weights(WeightsEnum):
656
+ IMAGENET1K_V1 = Weights(
657
+ url="https://download.pytorch.org/models/efficientnet_v2_s-dd5fe13b.pth",
658
+ transforms=partial(
659
+ ImageClassification,
660
+ crop_size=384,
661
+ resize_size=384,
662
+ interpolation=InterpolationMode.BILINEAR,
663
+ ),
664
+ meta={
665
+ **_COMMON_META_V2,
666
+ "num_params": 21458488,
667
+ "_metrics": {
668
+ "ImageNet-1K": {
669
+ "acc@1": 84.228,
670
+ "acc@5": 96.878,
671
+ }
672
+ },
673
+ "_ops": 8.366,
674
+ "_file_size": 82.704,
675
+ "_docs": """
676
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
677
+ `new training recipe
678
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
679
+ """,
680
+ },
681
+ )
682
+ DEFAULT = IMAGENET1K_V1
683
+
684
+
685
+ class EfficientNet_V2_M_Weights(WeightsEnum):
686
+ IMAGENET1K_V1 = Weights(
687
+ url="https://download.pytorch.org/models/efficientnet_v2_m-dc08266a.pth",
688
+ transforms=partial(
689
+ ImageClassification,
690
+ crop_size=480,
691
+ resize_size=480,
692
+ interpolation=InterpolationMode.BILINEAR,
693
+ ),
694
+ meta={
695
+ **_COMMON_META_V2,
696
+ "num_params": 54139356,
697
+ "_metrics": {
698
+ "ImageNet-1K": {
699
+ "acc@1": 85.112,
700
+ "acc@5": 97.156,
701
+ }
702
+ },
703
+ "_ops": 24.582,
704
+ "_file_size": 208.01,
705
+ "_docs": """
706
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
707
+ `new training recipe
708
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
709
+ """,
710
+ },
711
+ )
712
+ DEFAULT = IMAGENET1K_V1
713
+
714
+
715
+ class EfficientNet_V2_L_Weights(WeightsEnum):
716
+ # Weights ported from https://github.com/google/automl/tree/master/efficientnetv2
717
+ IMAGENET1K_V1 = Weights(
718
+ url="https://download.pytorch.org/models/efficientnet_v2_l-59c71312.pth",
719
+ transforms=partial(
720
+ ImageClassification,
721
+ crop_size=480,
722
+ resize_size=480,
723
+ interpolation=InterpolationMode.BICUBIC,
724
+ mean=(0.5, 0.5, 0.5),
725
+ std=(0.5, 0.5, 0.5),
726
+ ),
727
+ meta={
728
+ **_COMMON_META_V2,
729
+ "num_params": 118515272,
730
+ "_metrics": {
731
+ "ImageNet-1K": {
732
+ "acc@1": 85.808,
733
+ "acc@5": 97.788,
734
+ }
735
+ },
736
+ "_ops": 56.08,
737
+ "_file_size": 454.573,
738
+ "_docs": """These weights are ported from the original paper.""",
739
+ },
740
+ )
741
+ DEFAULT = IMAGENET1K_V1
742
+
743
+
744
+ @register_model()
745
+ @handle_legacy_interface(weights=("pretrained", EfficientNet_B0_Weights.IMAGENET1K_V1))
746
+ def efficientnet_b0(
747
+ *, weights: Optional[EfficientNet_B0_Weights] = None, progress: bool = True, **kwargs: Any
748
+ ) -> EfficientNet:
749
+ """EfficientNet B0 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
750
+ Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.
751
+
752
+ Args:
753
+ weights (:class:`~torchvision.models.EfficientNet_B0_Weights`, optional): The
754
+ pretrained weights to use. See
755
+ :class:`~torchvision.models.EfficientNet_B0_Weights` below for
756
+ more details, and possible values. By default, no pre-trained
757
+ weights are used.
758
+ progress (bool, optional): If True, displays a progress bar of the
759
+ download to stderr. Default is True.
760
+ **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
761
+ base class. Please refer to the `source code
762
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
763
+ for more details about this class.
764
+ .. autoclass:: torchvision.models.EfficientNet_B0_Weights
765
+ :members:
766
+ """
767
+ weights = EfficientNet_B0_Weights.verify(weights)
768
+
769
+ inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_b0", width_mult=1.0, depth_mult=1.0)
770
+ return _efficientnet(
771
+ inverted_residual_setting, kwargs.pop("dropout", 0.2), last_channel, weights, progress, **kwargs
772
+ )
773
+
774
+
775
+ @register_model()
776
+ @handle_legacy_interface(weights=("pretrained", EfficientNet_B1_Weights.IMAGENET1K_V1))
777
+ def efficientnet_b1(
778
+ *, weights: Optional[EfficientNet_B1_Weights] = None, progress: bool = True, **kwargs: Any
779
+ ) -> EfficientNet:
780
+ """EfficientNet B1 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
781
+ Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.
782
+
783
+ Args:
784
+ weights (:class:`~torchvision.models.EfficientNet_B1_Weights`, optional): The
785
+ pretrained weights to use. See
786
+ :class:`~torchvision.models.EfficientNet_B1_Weights` below for
787
+ more details, and possible values. By default, no pre-trained
788
+ weights are used.
789
+ progress (bool, optional): If True, displays a progress bar of the
790
+ download to stderr. Default is True.
791
+ **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
792
+ base class. Please refer to the `source code
793
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
794
+ for more details about this class.
795
+ .. autoclass:: torchvision.models.EfficientNet_B1_Weights
796
+ :members:
797
+ """
798
+ weights = EfficientNet_B1_Weights.verify(weights)
799
+
800
+ inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_b1", width_mult=1.0, depth_mult=1.1)
801
+ return _efficientnet(
802
+ inverted_residual_setting, kwargs.pop("dropout", 0.2), last_channel, weights, progress, **kwargs
803
+ )
804
+
805
+
806
+ @register_model()
807
+ @handle_legacy_interface(weights=("pretrained", EfficientNet_B2_Weights.IMAGENET1K_V1))
808
+ def efficientnet_b2(
809
+ *, weights: Optional[EfficientNet_B2_Weights] = None, progress: bool = True, **kwargs: Any
810
+ ) -> EfficientNet:
811
+ """EfficientNet B2 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
812
+ Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.
813
+
814
+ Args:
815
+ weights (:class:`~torchvision.models.EfficientNet_B2_Weights`, optional): The
816
+ pretrained weights to use. See
817
+ :class:`~torchvision.models.EfficientNet_B2_Weights` below for
818
+ more details, and possible values. By default, no pre-trained
819
+ weights are used.
820
+ progress (bool, optional): If True, displays a progress bar of the
821
+ download to stderr. Default is True.
822
+ **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
823
+ base class. Please refer to the `source code
824
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
825
+ for more details about this class.
826
+ .. autoclass:: torchvision.models.EfficientNet_B2_Weights
827
+ :members:
828
+ """
829
+ weights = EfficientNet_B2_Weights.verify(weights)
830
+
831
+ inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_b2", width_mult=1.1, depth_mult=1.2)
832
+ return _efficientnet(
833
+ inverted_residual_setting, kwargs.pop("dropout", 0.3), last_channel, weights, progress, **kwargs
834
+ )
835
+
836
+
837
+ @register_model()
838
+ @handle_legacy_interface(weights=("pretrained", EfficientNet_B3_Weights.IMAGENET1K_V1))
839
+ def efficientnet_b3(
840
+ *, weights: Optional[EfficientNet_B3_Weights] = None, progress: bool = True, **kwargs: Any
841
+ ) -> EfficientNet:
842
+ """EfficientNet B3 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
843
+ Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.
844
+
845
+ Args:
846
+ weights (:class:`~torchvision.models.EfficientNet_B3_Weights`, optional): The
847
+ pretrained weights to use. See
848
+ :class:`~torchvision.models.EfficientNet_B3_Weights` below for
849
+ more details, and possible values. By default, no pre-trained
850
+ weights are used.
851
+ progress (bool, optional): If True, displays a progress bar of the
852
+ download to stderr. Default is True.
853
+ **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
854
+ base class. Please refer to the `source code
855
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
856
+ for more details about this class.
857
+ .. autoclass:: torchvision.models.EfficientNet_B3_Weights
858
+ :members:
859
+ """
860
+ weights = EfficientNet_B3_Weights.verify(weights)
861
+
862
+ inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_b3", width_mult=1.2, depth_mult=1.4)
863
+ return _efficientnet(
864
+ inverted_residual_setting,
865
+ kwargs.pop("dropout", 0.3),
866
+ last_channel,
867
+ weights,
868
+ progress,
869
+ **kwargs,
870
+ )
871
+
872
+
873
+ @register_model()
874
+ @handle_legacy_interface(weights=("pretrained", EfficientNet_B4_Weights.IMAGENET1K_V1))
875
+ def efficientnet_b4(
876
+ *, weights: Optional[EfficientNet_B4_Weights] = None, progress: bool = True, **kwargs: Any
877
+ ) -> EfficientNet:
878
+ """EfficientNet B4 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
879
+ Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.
880
+
881
+ Args:
882
+ weights (:class:`~torchvision.models.EfficientNet_B4_Weights`, optional): The
883
+ pretrained weights to use. See
884
+ :class:`~torchvision.models.EfficientNet_B4_Weights` below for
885
+ more details, and possible values. By default, no pre-trained
886
+ weights are used.
887
+ progress (bool, optional): If True, displays a progress bar of the
888
+ download to stderr. Default is True.
889
+ **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
890
+ base class. Please refer to the `source code
891
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
892
+ for more details about this class.
893
+ .. autoclass:: torchvision.models.EfficientNet_B4_Weights
894
+ :members:
895
+ """
896
+ weights = EfficientNet_B4_Weights.verify(weights)
897
+
898
+ inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_b4", width_mult=1.4, depth_mult=1.8)
899
+ return _efficientnet(
900
+ inverted_residual_setting,
901
+ kwargs.pop("dropout", 0.4),
902
+ last_channel,
903
+ weights,
904
+ progress,
905
+ **kwargs,
906
+ )
907
+
908
+
909
+ @register_model()
910
+ @handle_legacy_interface(weights=("pretrained", EfficientNet_B5_Weights.IMAGENET1K_V1))
911
+ def efficientnet_b5(
912
+ *, weights: Optional[EfficientNet_B5_Weights] = None, progress: bool = True, **kwargs: Any
913
+ ) -> EfficientNet:
914
+ """EfficientNet B5 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
915
+ Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.
916
+
917
+ Args:
918
+ weights (:class:`~torchvision.models.EfficientNet_B5_Weights`, optional): The
919
+ pretrained weights to use. See
920
+ :class:`~torchvision.models.EfficientNet_B5_Weights` below for
921
+ more details, and possible values. By default, no pre-trained
922
+ weights are used.
923
+ progress (bool, optional): If True, displays a progress bar of the
924
+ download to stderr. Default is True.
925
+ **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
926
+ base class. Please refer to the `source code
927
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
928
+ for more details about this class.
929
+ .. autoclass:: torchvision.models.EfficientNet_B5_Weights
930
+ :members:
931
+ """
932
+ weights = EfficientNet_B5_Weights.verify(weights)
933
+
934
+ inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_b5", width_mult=1.6, depth_mult=2.2)
935
+ return _efficientnet(
936
+ inverted_residual_setting,
937
+ kwargs.pop("dropout", 0.4),
938
+ last_channel,
939
+ weights,
940
+ progress,
941
+ norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.01),
942
+ **kwargs,
943
+ )
944
+
945
+
946
+ @register_model()
947
+ @handle_legacy_interface(weights=("pretrained", EfficientNet_B6_Weights.IMAGENET1K_V1))
948
+ def efficientnet_b6(
949
+ *, weights: Optional[EfficientNet_B6_Weights] = None, progress: bool = True, **kwargs: Any
950
+ ) -> EfficientNet:
951
+ """EfficientNet B6 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
952
+ Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.
953
+
954
+ Args:
955
+ weights (:class:`~torchvision.models.EfficientNet_B6_Weights`, optional): The
956
+ pretrained weights to use. See
957
+ :class:`~torchvision.models.EfficientNet_B6_Weights` below for
958
+ more details, and possible values. By default, no pre-trained
959
+ weights are used.
960
+ progress (bool, optional): If True, displays a progress bar of the
961
+ download to stderr. Default is True.
962
+ **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
963
+ base class. Please refer to the `source code
964
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
965
+ for more details about this class.
966
+ .. autoclass:: torchvision.models.EfficientNet_B6_Weights
967
+ :members:
968
+ """
969
+ weights = EfficientNet_B6_Weights.verify(weights)
970
+
971
+ inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_b6", width_mult=1.8, depth_mult=2.6)
972
+ return _efficientnet(
973
+ inverted_residual_setting,
974
+ kwargs.pop("dropout", 0.5),
975
+ last_channel,
976
+ weights,
977
+ progress,
978
+ norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.01),
979
+ **kwargs,
980
+ )
981
+
982
+
983
+ @register_model()
984
+ @handle_legacy_interface(weights=("pretrained", EfficientNet_B7_Weights.IMAGENET1K_V1))
985
+ def efficientnet_b7(
986
+ *, weights: Optional[EfficientNet_B7_Weights] = None, progress: bool = True, **kwargs: Any
987
+ ) -> EfficientNet:
988
+ """EfficientNet B7 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
989
+ Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.
990
+
991
+ Args:
992
+ weights (:class:`~torchvision.models.EfficientNet_B7_Weights`, optional): The
993
+ pretrained weights to use. See
994
+ :class:`~torchvision.models.EfficientNet_B7_Weights` below for
995
+ more details, and possible values. By default, no pre-trained
996
+ weights are used.
997
+ progress (bool, optional): If True, displays a progress bar of the
998
+ download to stderr. Default is True.
999
+ **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
1000
+ base class. Please refer to the `source code
1001
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
1002
+ for more details about this class.
1003
+ .. autoclass:: torchvision.models.EfficientNet_B7_Weights
1004
+ :members:
1005
+ """
1006
+ weights = EfficientNet_B7_Weights.verify(weights)
1007
+
1008
+ inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_b7", width_mult=2.0, depth_mult=3.1)
1009
+ return _efficientnet(
1010
+ inverted_residual_setting,
1011
+ kwargs.pop("dropout", 0.5),
1012
+ last_channel,
1013
+ weights,
1014
+ progress,
1015
+ norm_layer=partial(nn.BatchNorm2d, eps=0.001, momentum=0.01),
1016
+ **kwargs,
1017
+ )
1018
+
1019
+
1020
+ @register_model()
1021
+ @handle_legacy_interface(weights=("pretrained", EfficientNet_V2_S_Weights.IMAGENET1K_V1))
1022
+ def efficientnet_v2_s(
1023
+ *, weights: Optional[EfficientNet_V2_S_Weights] = None, progress: bool = True, **kwargs: Any
1024
+ ) -> EfficientNet:
1025
+ """
1026
+ Constructs an EfficientNetV2-S architecture from
1027
+ `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.
1028
+
1029
+ Args:
1030
+ weights (:class:`~torchvision.models.EfficientNet_V2_S_Weights`, optional): The
1031
+ pretrained weights to use. See
1032
+ :class:`~torchvision.models.EfficientNet_V2_S_Weights` below for
1033
+ more details, and possible values. By default, no pre-trained
1034
+ weights are used.
1035
+ progress (bool, optional): If True, displays a progress bar of the
1036
+ download to stderr. Default is True.
1037
+ **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
1038
+ base class. Please refer to the `source code
1039
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
1040
+ for more details about this class.
1041
+ .. autoclass:: torchvision.models.EfficientNet_V2_S_Weights
1042
+ :members:
1043
+ """
1044
+ weights = EfficientNet_V2_S_Weights.verify(weights)
1045
+
1046
+ inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_v2_s")
1047
+ return _efficientnet(
1048
+ inverted_residual_setting,
1049
+ kwargs.pop("dropout", 0.2),
1050
+ last_channel,
1051
+ weights,
1052
+ progress,
1053
+ norm_layer=partial(nn.BatchNorm2d, eps=1e-03),
1054
+ **kwargs,
1055
+ )
1056
+
1057
+
1058
+ @register_model()
1059
+ @handle_legacy_interface(weights=("pretrained", EfficientNet_V2_M_Weights.IMAGENET1K_V1))
1060
+ def efficientnet_v2_m(
1061
+ *, weights: Optional[EfficientNet_V2_M_Weights] = None, progress: bool = True, **kwargs: Any
1062
+ ) -> EfficientNet:
1063
+ """
1064
+ Constructs an EfficientNetV2-M architecture from
1065
+ `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.
1066
+
1067
+ Args:
1068
+ weights (:class:`~torchvision.models.EfficientNet_V2_M_Weights`, optional): The
1069
+ pretrained weights to use. See
1070
+ :class:`~torchvision.models.EfficientNet_V2_M_Weights` below for
1071
+ more details, and possible values. By default, no pre-trained
1072
+ weights are used.
1073
+ progress (bool, optional): If True, displays a progress bar of the
1074
+ download to stderr. Default is True.
1075
+ **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
1076
+ base class. Please refer to the `source code
1077
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
1078
+ for more details about this class.
1079
+ .. autoclass:: torchvision.models.EfficientNet_V2_M_Weights
1080
+ :members:
1081
+ """
1082
+ weights = EfficientNet_V2_M_Weights.verify(weights)
1083
+
1084
+ inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_v2_m")
1085
+ return _efficientnet(
1086
+ inverted_residual_setting,
1087
+ kwargs.pop("dropout", 0.3),
1088
+ last_channel,
1089
+ weights,
1090
+ progress,
1091
+ norm_layer=partial(nn.BatchNorm2d, eps=1e-03),
1092
+ **kwargs,
1093
+ )
1094
+
1095
+
1096
+ @register_model()
1097
+ @handle_legacy_interface(weights=("pretrained", EfficientNet_V2_L_Weights.IMAGENET1K_V1))
1098
+ def efficientnet_v2_l(
1099
+ *, weights: Optional[EfficientNet_V2_L_Weights] = None, progress: bool = True, **kwargs: Any
1100
+ ) -> EfficientNet:
1101
+ """
1102
+ Constructs an EfficientNetV2-L architecture from
1103
+ `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.
1104
+
1105
+ Args:
1106
+ weights (:class:`~torchvision.models.EfficientNet_V2_L_Weights`, optional): The
1107
+ pretrained weights to use. See
1108
+ :class:`~torchvision.models.EfficientNet_V2_L_Weights` below for
1109
+ more details, and possible values. By default, no pre-trained
1110
+ weights are used.
1111
+ progress (bool, optional): If True, displays a progress bar of the
1112
+ download to stderr. Default is True.
1113
+ **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
1114
+ base class. Please refer to the `source code
1115
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
1116
+ for more details about this class.
1117
+ .. autoclass:: torchvision.models.EfficientNet_V2_L_Weights
1118
+ :members:
1119
+ """
1120
+ weights = EfficientNet_V2_L_Weights.verify(weights)
1121
+
1122
+ inverted_residual_setting, last_channel = _efficientnet_conf("efficientnet_v2_l")
1123
+ return _efficientnet(
1124
+ inverted_residual_setting,
1125
+ kwargs.pop("dropout", 0.4),
1126
+ last_channel,
1127
+ weights,
1128
+ progress,
1129
+ norm_layer=partial(nn.BatchNorm2d, eps=1e-03),
1130
+ **kwargs,
1131
+ )
vllm/lib/python3.10/site-packages/torchvision/models/feature_extraction.py ADDED
@@ -0,0 +1,572 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import math
3
+ import re
4
+ import warnings
5
+ from collections import OrderedDict
6
+ from copy import deepcopy
7
+ from itertools import chain
8
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
9
+
10
+ import torch
11
+ import torchvision
12
+ from torch import fx, nn
13
+ from torch.fx.graph_module import _copy_attr
14
+
15
+
16
+ __all__ = ["create_feature_extractor", "get_graph_node_names"]
17
+
18
+
19
+ class LeafModuleAwareTracer(fx.Tracer):
20
+ """
21
+ An fx.Tracer that allows the user to specify a set of leaf modules, i.e.
22
+ modules that are not to be traced through. The resulting graph ends up
23
+ having single nodes referencing calls to the leaf modules' forward methods.
24
+ """
25
+
26
+ def __init__(self, *args, **kwargs):
27
+ self.leaf_modules = {}
28
+ if "leaf_modules" in kwargs:
29
+ leaf_modules = kwargs.pop("leaf_modules")
30
+ self.leaf_modules = leaf_modules
31
+ super().__init__(*args, **kwargs)
32
+
33
+ def is_leaf_module(self, m: nn.Module, module_qualname: str) -> bool:
34
+ if isinstance(m, tuple(self.leaf_modules)):
35
+ return True
36
+ return super().is_leaf_module(m, module_qualname)
37
+
38
+
39
+ class NodePathTracer(LeafModuleAwareTracer):
40
+ """
41
+ NodePathTracer is an FX tracer that, for each operation, also records the
42
+ name of the Node from which the operation originated. A node name here is
43
+ a `.` separated path walking the hierarchy from top level module down to
44
+ leaf operation or leaf module. The name of the top level module is not
45
+ included as part of the node name. For example, if we trace a module whose
46
+ forward method applies a ReLU module, the name for that node will simply
47
+ be 'relu'.
48
+
49
+ Some notes on the specifics:
50
+ - Nodes are recorded to `self.node_to_qualname` which is a dictionary
51
+ mapping a given Node object to its node name.
52
+ - Nodes are recorded in the order which they are executed during
53
+ tracing.
54
+ - When a duplicate node name is encountered, a suffix of the form
55
+ _{int} is added. The counter starts from 1.
56
+ """
57
+
58
+ def __init__(self, *args, **kwargs):
59
+ super().__init__(*args, **kwargs)
60
+ # Track the qualified name of the Node being traced
61
+ self.current_module_qualname = ""
62
+ # A map from FX Node to the qualified name\#
63
+ # NOTE: This is loosely like the "qualified name" mentioned in the
64
+ # torch.fx docs https://pytorch.org/docs/stable/fx.html but adapted
65
+ # for the purposes of the torchvision feature extractor
66
+ self.node_to_qualname = OrderedDict()
67
+
68
+ def call_module(self, m: torch.nn.Module, forward: Callable, args, kwargs):
69
+ """
70
+ Override of `fx.Tracer.call_module`
71
+ This override:
72
+ 1) Stores away the qualified name of the caller for restoration later
73
+ 2) Adds the qualified name of the caller to
74
+ `current_module_qualname` for retrieval by `create_proxy`
75
+ 3) Once a leaf module is reached, calls `create_proxy`
76
+ 4) Restores the caller's qualified name into current_module_qualname
77
+ """
78
+ old_qualname = self.current_module_qualname
79
+ try:
80
+ module_qualname = self.path_of_module(m)
81
+ self.current_module_qualname = module_qualname
82
+ if not self.is_leaf_module(m, module_qualname):
83
+ out = forward(*args, **kwargs)
84
+ return out
85
+ return self.create_proxy("call_module", module_qualname, args, kwargs)
86
+ finally:
87
+ self.current_module_qualname = old_qualname
88
+
89
+ def create_proxy(
90
+ self, kind: str, target: fx.node.Target, args, kwargs, name=None, type_expr=None, *_
91
+ ) -> fx.proxy.Proxy:
92
+ """
93
+ Override of `Tracer.create_proxy`. This override intercepts the recording
94
+ of every operation and stores away the current traced module's qualified
95
+ name in `node_to_qualname`
96
+ """
97
+ proxy = super().create_proxy(kind, target, args, kwargs, name, type_expr)
98
+ self.node_to_qualname[proxy.node] = self._get_node_qualname(self.current_module_qualname, proxy.node)
99
+ return proxy
100
+
101
+ def _get_node_qualname(self, module_qualname: str, node: fx.node.Node) -> str:
102
+ node_qualname = module_qualname
103
+
104
+ if node.op != "call_module":
105
+ # In this case module_qualname from torch.fx doesn't go all the
106
+ # way to the leaf function/op, so we need to append it
107
+ if len(node_qualname) > 0:
108
+ # Only append '.' if we are deeper than the top level module
109
+ node_qualname += "."
110
+ node_qualname += str(node)
111
+
112
+ # Now we need to add an _{index} postfix on any repeated node names
113
+ # For modules we do this from scratch
114
+ # But for anything else, torch.fx already has a globally scoped
115
+ # _{index} postfix. But we want it locally (relative to direct parent)
116
+ # scoped. So first we need to undo the torch.fx postfix
117
+ if re.match(r".+_[0-9]+$", node_qualname) is not None:
118
+ node_qualname = node_qualname.rsplit("_", 1)[0]
119
+
120
+ # ... and now we add on our own postfix
121
+ for existing_qualname in reversed(self.node_to_qualname.values()):
122
+ # Check to see if existing_qualname is of the form
123
+ # {node_qualname} or {node_qualname}_{int}
124
+ if re.match(rf"{node_qualname}(_[0-9]+)?$", existing_qualname) is not None:
125
+ postfix = existing_qualname.replace(node_qualname, "")
126
+ if len(postfix):
127
+ # existing_qualname is of the form {node_qualname}_{int}
128
+ next_index = int(postfix[1:]) + 1
129
+ else:
130
+ # existing_qualname is of the form {node_qualname}
131
+ next_index = 1
132
+ node_qualname += f"_{next_index}"
133
+ break
134
+
135
+ return node_qualname
136
+
137
+
138
+ def _is_subseq(x, y):
139
+ """Check if y is a subsequence of x
140
+ https://stackoverflow.com/a/24017747/4391249
141
+ """
142
+ iter_x = iter(x)
143
+ return all(any(x_item == y_item for x_item in iter_x) for y_item in y)
144
+
145
+
146
+ def _warn_graph_differences(train_tracer: NodePathTracer, eval_tracer: NodePathTracer):
147
+ """
148
+ Utility function for warning the user if there are differences between
149
+ the train graph nodes and the eval graph nodes.
150
+ """
151
+ train_nodes = list(train_tracer.node_to_qualname.values())
152
+ eval_nodes = list(eval_tracer.node_to_qualname.values())
153
+
154
+ if len(train_nodes) == len(eval_nodes) and all(t == e for t, e in zip(train_nodes, eval_nodes)):
155
+ return
156
+
157
+ suggestion_msg = (
158
+ "When choosing nodes for feature extraction, you may need to specify "
159
+ "output nodes for train and eval mode separately."
160
+ )
161
+
162
+ if _is_subseq(train_nodes, eval_nodes):
163
+ msg = (
164
+ "NOTE: The nodes obtained by tracing the model in eval mode "
165
+ "are a subsequence of those obtained in train mode. "
166
+ )
167
+ elif _is_subseq(eval_nodes, train_nodes):
168
+ msg = (
169
+ "NOTE: The nodes obtained by tracing the model in train mode "
170
+ "are a subsequence of those obtained in eval mode. "
171
+ )
172
+ else:
173
+ msg = "The nodes obtained by tracing the model in train mode are different to those obtained in eval mode. "
174
+ warnings.warn(msg + suggestion_msg)
175
+
176
+
177
+ def _get_leaf_modules_for_ops() -> List[type]:
178
+ members = inspect.getmembers(torchvision.ops)
179
+ result = []
180
+ for _, obj in members:
181
+ if inspect.isclass(obj) and issubclass(obj, torch.nn.Module):
182
+ result.append(obj)
183
+ return result
184
+
185
+
186
+ def _set_default_tracer_kwargs(original_tr_kwargs: Optional[Dict[str, Any]]) -> Dict[str, Any]:
187
+ default_autowrap_modules = (math, torchvision.ops)
188
+ default_leaf_modules = _get_leaf_modules_for_ops()
189
+ result_tracer_kwargs = {} if original_tr_kwargs is None else original_tr_kwargs
190
+ result_tracer_kwargs["autowrap_modules"] = (
191
+ tuple(set(result_tracer_kwargs["autowrap_modules"] + default_autowrap_modules))
192
+ if "autowrap_modules" in result_tracer_kwargs
193
+ else default_autowrap_modules
194
+ )
195
+ result_tracer_kwargs["leaf_modules"] = (
196
+ list(set(result_tracer_kwargs["leaf_modules"] + default_leaf_modules))
197
+ if "leaf_modules" in result_tracer_kwargs
198
+ else default_leaf_modules
199
+ )
200
+ return result_tracer_kwargs
201
+
202
+
203
+ def get_graph_node_names(
204
+ model: nn.Module,
205
+ tracer_kwargs: Optional[Dict[str, Any]] = None,
206
+ suppress_diff_warning: bool = False,
207
+ concrete_args: Optional[Dict[str, Any]] = None,
208
+ ) -> Tuple[List[str], List[str]]:
209
+ """
210
+ Dev utility to return node names in order of execution. See note on node
211
+ names under :func:`create_feature_extractor`. Useful for seeing which node
212
+ names are available for feature extraction. There are two reasons that
213
+ node names can't easily be read directly from the code for a model:
214
+
215
+ 1. Not all submodules are traced through. Modules from ``torch.nn`` all
216
+ fall within this category.
217
+ 2. Nodes representing the repeated application of the same operation
218
+ or leaf module get a ``_{counter}`` postfix.
219
+
220
+ The model is traced twice: once in train mode, and once in eval mode. Both
221
+ sets of node names are returned.
222
+
223
+ For more details on the node naming conventions used here, please see the
224
+ :ref:`relevant subheading <about-node-names>` in the
225
+ `documentation <https://pytorch.org/vision/stable/feature_extraction.html>`_.
226
+
227
+ Args:
228
+ model (nn.Module): model for which we'd like to print node names
229
+ tracer_kwargs (dict, optional): a dictionary of keyword arguments for
230
+ ``NodePathTracer`` (they are eventually passed onto
231
+ `torch.fx.Tracer <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer>`_).
232
+ By default, it will be set to wrap and make leaf nodes all torchvision ops:
233
+ {"autowrap_modules": (math, torchvision.ops,),"leaf_modules": _get_leaf_modules_for_ops(),}
234
+ WARNING: In case the user provides tracer_kwargs, above default arguments will be appended to the user
235
+ provided dictionary.
236
+ suppress_diff_warning (bool, optional): whether to suppress a warning
237
+ when there are discrepancies between the train and eval version of
238
+ the graph. Defaults to False.
239
+ concrete_args (Optional[Dict[str, any]]): Concrete arguments that should
240
+ not be treated as Proxies. According to the `Pytorch docs
241
+ <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer.trace>`_,
242
+ this parameter's API may not be guaranteed.
243
+
244
+ Returns:
245
+ tuple(list, list): a list of node names from tracing the model in
246
+ train mode, and another from tracing the model in eval mode.
247
+
248
+ Examples::
249
+
250
+ >>> model = torchvision.models.resnet18()
251
+ >>> train_nodes, eval_nodes = get_graph_node_names(model)
252
+ """
253
+ tracer_kwargs = _set_default_tracer_kwargs(tracer_kwargs)
254
+ is_training = model.training
255
+ train_tracer = NodePathTracer(**tracer_kwargs)
256
+ train_tracer.trace(model.train(), concrete_args=concrete_args)
257
+ eval_tracer = NodePathTracer(**tracer_kwargs)
258
+ eval_tracer.trace(model.eval(), concrete_args=concrete_args)
259
+ train_nodes = list(train_tracer.node_to_qualname.values())
260
+ eval_nodes = list(eval_tracer.node_to_qualname.values())
261
+ if not suppress_diff_warning:
262
+ _warn_graph_differences(train_tracer, eval_tracer)
263
+ # Restore training state
264
+ model.train(is_training)
265
+ return train_nodes, eval_nodes
266
+
267
+
268
+ class DualGraphModule(fx.GraphModule):
269
+ """
270
+ A derivative of `fx.GraphModule`. Differs in the following ways:
271
+ - Requires a train and eval version of the underlying graph
272
+ - Copies submodules according to the nodes of both train and eval graphs.
273
+ - Calling train(mode) switches between train graph and eval graph.
274
+ """
275
+
276
+ def __init__(
277
+ self, root: torch.nn.Module, train_graph: fx.Graph, eval_graph: fx.Graph, class_name: str = "GraphModule"
278
+ ):
279
+ """
280
+ Args:
281
+ root (nn.Module): module from which the copied module hierarchy is
282
+ built
283
+ train_graph (fx.Graph): the graph that should be used in train mode
284
+ eval_graph (fx.Graph): the graph that should be used in eval mode
285
+ """
286
+ super(fx.GraphModule, self).__init__()
287
+
288
+ self.__class__.__name__ = class_name
289
+
290
+ self.train_graph = train_graph
291
+ self.eval_graph = eval_graph
292
+
293
+ # Copy all get_attr and call_module ops (indicated by BOTH train and
294
+ # eval graphs)
295
+ for node in chain(iter(train_graph.nodes), iter(eval_graph.nodes)):
296
+ if node.op in ["get_attr", "call_module"]:
297
+ if not isinstance(node.target, str):
298
+ raise TypeError(f"node.target should be of type str instead of {type(node.target)}")
299
+ _copy_attr(root, self, node.target)
300
+
301
+ # train mode by default
302
+ self.train()
303
+ self.graph = train_graph
304
+
305
+ # (borrowed from fx.GraphModule):
306
+ # Store the Tracer class responsible for creating a Graph separately as part of the
307
+ # GraphModule state, except when the Tracer is defined in a local namespace.
308
+ # Locally defined Tracers are not pickleable. This is needed because torch.package will
309
+ # serialize a GraphModule without retaining the Graph, and needs to use the correct Tracer
310
+ # to re-create the Graph during deserialization.
311
+ if self.eval_graph._tracer_cls != self.train_graph._tracer_cls:
312
+ raise TypeError(
313
+ f"Train mode and eval mode should use the same tracer class. Instead got {self.eval_graph._tracer_cls} for eval vs {self.train_graph._tracer_cls} for train"
314
+ )
315
+ self._tracer_cls = None
316
+ if self.graph._tracer_cls and "<locals>" not in self.graph._tracer_cls.__qualname__:
317
+ self._tracer_cls = self.graph._tracer_cls
318
+
319
+ def train(self, mode=True):
320
+ """
321
+ Swap out the graph depending on the selected training mode.
322
+ NOTE this should be safe when calling model.eval() because that just
323
+ calls this with mode == False.
324
+ """
325
+ # NOTE: Only set self.graph if the current graph is not the desired
326
+ # one. This saves us from recompiling the graph where not necessary.
327
+ if mode and not self.training:
328
+ self.graph = self.train_graph
329
+ elif not mode and self.training:
330
+ self.graph = self.eval_graph
331
+ return super().train(mode=mode)
332
+
333
+
334
+ def create_feature_extractor(
335
+ model: nn.Module,
336
+ return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
337
+ train_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
338
+ eval_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
339
+ tracer_kwargs: Optional[Dict[str, Any]] = None,
340
+ suppress_diff_warning: bool = False,
341
+ concrete_args: Optional[Dict[str, Any]] = None,
342
+ ) -> fx.GraphModule:
343
+ """
344
+ Creates a new graph module that returns intermediate nodes from a given
345
+ model as dictionary with user specified keys as strings, and the requested
346
+ outputs as values. This is achieved by re-writing the computation graph of
347
+ the model via FX to return the desired nodes as outputs. All unused nodes
348
+ are removed, together with their corresponding parameters.
349
+
350
+ Desired output nodes must be specified as a ``.`` separated
351
+ path walking the module hierarchy from top level module down to leaf
352
+ operation or leaf module. For more details on the node naming conventions
353
+ used here, please see the :ref:`relevant subheading <about-node-names>`
354
+ in the `documentation <https://pytorch.org/vision/stable/feature_extraction.html>`_.
355
+
356
+ Not all models will be FX traceable, although with some massaging they can
357
+ be made to cooperate. Here's a (not exhaustive) list of tips:
358
+
359
+ - If you don't need to trace through a particular, problematic
360
+ sub-module, turn it into a "leaf module" by passing a list of
361
+ ``leaf_modules`` as one of the ``tracer_kwargs`` (see example below).
362
+ It will not be traced through, but rather, the resulting graph will
363
+ hold a reference to that module's forward method.
364
+ - Likewise, you may turn functions into leaf functions by passing a
365
+ list of ``autowrap_functions`` as one of the ``tracer_kwargs`` (see
366
+ example below).
367
+ - Some inbuilt Python functions can be problematic. For instance,
368
+ ``int`` will raise an error during tracing. You may wrap them in your
369
+ own function and then pass that in ``autowrap_functions`` as one of
370
+ the ``tracer_kwargs``.
371
+
372
+ For further information on FX see the
373
+ `torch.fx documentation <https://pytorch.org/docs/stable/fx.html>`_.
374
+
375
+ Args:
376
+ model (nn.Module): model on which we will extract the features
377
+ return_nodes (list or dict, optional): either a ``List`` or a ``Dict``
378
+ containing the names (or partial names - see note above)
379
+ of the nodes for which the activations will be returned. If it is
380
+ a ``Dict``, the keys are the node names, and the values
381
+ are the user-specified keys for the graph module's returned
382
+ dictionary. If it is a ``List``, it is treated as a ``Dict`` mapping
383
+ node specification strings directly to output names. In the case
384
+ that ``train_return_nodes`` and ``eval_return_nodes`` are specified,
385
+ this should not be specified.
386
+ train_return_nodes (list or dict, optional): similar to
387
+ ``return_nodes``. This can be used if the return nodes
388
+ for train mode are different than those from eval mode.
389
+ If this is specified, ``eval_return_nodes`` must also be specified,
390
+ and ``return_nodes`` should not be specified.
391
+ eval_return_nodes (list or dict, optional): similar to
392
+ ``return_nodes``. This can be used if the return nodes
393
+ for train mode are different than those from eval mode.
394
+ If this is specified, ``train_return_nodes`` must also be specified,
395
+ and `return_nodes` should not be specified.
396
+ tracer_kwargs (dict, optional): a dictionary of keyword arguments for
397
+ ``NodePathTracer`` (which passes them onto it's parent class
398
+ `torch.fx.Tracer <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer>`_).
399
+ By default, it will be set to wrap and make leaf nodes all torchvision ops:
400
+ {"autowrap_modules": (math, torchvision.ops,),"leaf_modules": _get_leaf_modules_for_ops(),}
401
+ WARNING: In case the user provides tracer_kwargs, above default arguments will be appended to the user
402
+ provided dictionary.
403
+ suppress_diff_warning (bool, optional): whether to suppress a warning
404
+ when there are discrepancies between the train and eval version of
405
+ the graph. Defaults to False.
406
+ concrete_args (Optional[Dict[str, any]]): Concrete arguments that should
407
+ not be treated as Proxies. According to the `Pytorch docs
408
+ <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer.trace>`_,
409
+ this parameter's API may not be guaranteed.
410
+
411
+ Examples::
412
+
413
+ >>> # Feature extraction with resnet
414
+ >>> model = torchvision.models.resnet18()
415
+ >>> # extract layer1 and layer3, giving as names `feat1` and feat2`
416
+ >>> model = create_feature_extractor(
417
+ >>> model, {'layer1': 'feat1', 'layer3': 'feat2'})
418
+ >>> out = model(torch.rand(1, 3, 224, 224))
419
+ >>> print([(k, v.shape) for k, v in out.items()])
420
+ >>> [('feat1', torch.Size([1, 64, 56, 56])),
421
+ >>> ('feat2', torch.Size([1, 256, 14, 14]))]
422
+
423
+ >>> # Specifying leaf modules and leaf functions
424
+ >>> def leaf_function(x):
425
+ >>> # This would raise a TypeError if traced through
426
+ >>> return int(x)
427
+ >>>
428
+ >>> class LeafModule(torch.nn.Module):
429
+ >>> def forward(self, x):
430
+ >>> # This would raise a TypeError if traced through
431
+ >>> int(x.shape[0])
432
+ >>> return torch.nn.functional.relu(x + 4)
433
+ >>>
434
+ >>> class MyModule(torch.nn.Module):
435
+ >>> def __init__(self):
436
+ >>> super().__init__()
437
+ >>> self.conv = torch.nn.Conv2d(3, 1, 3)
438
+ >>> self.leaf_module = LeafModule()
439
+ >>>
440
+ >>> def forward(self, x):
441
+ >>> leaf_function(x.shape[0])
442
+ >>> x = self.conv(x)
443
+ >>> return self.leaf_module(x)
444
+ >>>
445
+ >>> model = create_feature_extractor(
446
+ >>> MyModule(), return_nodes=['leaf_module'],
447
+ >>> tracer_kwargs={'leaf_modules': [LeafModule],
448
+ >>> 'autowrap_functions': [leaf_function]})
449
+
450
+ """
451
+ tracer_kwargs = _set_default_tracer_kwargs(tracer_kwargs)
452
+ is_training = model.training
453
+
454
+ if all(arg is None for arg in [return_nodes, train_return_nodes, eval_return_nodes]):
455
+
456
+ raise ValueError(
457
+ "Either `return_nodes` or `train_return_nodes` and `eval_return_nodes` together, should be specified"
458
+ )
459
+
460
+ if (train_return_nodes is None) ^ (eval_return_nodes is None):
461
+ raise ValueError(
462
+ "If any of `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified"
463
+ )
464
+
465
+ if not ((return_nodes is None) ^ (train_return_nodes is None)):
466
+ raise ValueError("If `train_return_nodes` and `eval_return_nodes` are specified, then both should be specified")
467
+
468
+ # Put *_return_nodes into Dict[str, str] format
469
+ def to_strdict(n) -> Dict[str, str]:
470
+ if isinstance(n, list):
471
+ return {str(i): str(i) for i in n}
472
+ return {str(k): str(v) for k, v in n.items()}
473
+
474
+ if train_return_nodes is None:
475
+ return_nodes = to_strdict(return_nodes)
476
+ train_return_nodes = deepcopy(return_nodes)
477
+ eval_return_nodes = deepcopy(return_nodes)
478
+ else:
479
+ train_return_nodes = to_strdict(train_return_nodes)
480
+ eval_return_nodes = to_strdict(eval_return_nodes)
481
+
482
+ # Repeat the tracing and graph rewriting for train and eval mode
483
+ tracers = {}
484
+ graphs = {}
485
+ mode_return_nodes: Dict[str, Dict[str, str]] = {"train": train_return_nodes, "eval": eval_return_nodes}
486
+ for mode in ["train", "eval"]:
487
+ if mode == "train":
488
+ model.train()
489
+ elif mode == "eval":
490
+ model.eval()
491
+
492
+ # Instantiate our NodePathTracer and use that to trace the model
493
+ tracer = NodePathTracer(**tracer_kwargs)
494
+ graph = tracer.trace(model, concrete_args=concrete_args)
495
+
496
+ name = model.__class__.__name__ if isinstance(model, nn.Module) else model.__name__
497
+ graph_module = fx.GraphModule(tracer.root, graph, name)
498
+
499
+ available_nodes = list(tracer.node_to_qualname.values())
500
+ # FIXME We don't know if we should expect this to happen
501
+ if len(set(available_nodes)) != len(available_nodes):
502
+ raise ValueError(
503
+ "There are duplicate nodes! Please raise an issue https://github.com/pytorch/vision/issues"
504
+ )
505
+ # Check that all outputs in return_nodes are present in the model
506
+ for query in mode_return_nodes[mode].keys():
507
+ # To check if a query is available we need to check that at least
508
+ # one of the available names starts with it up to a .
509
+ if not any([re.match(rf"^{query}(\.|$)", n) is not None for n in available_nodes]):
510
+ raise ValueError(
511
+ f"node: '{query}' is not present in model. Hint: use "
512
+ "`get_graph_node_names` to make sure the "
513
+ "`return_nodes` you specified are present. It may even "
514
+ "be that you need to specify `train_return_nodes` and "
515
+ "`eval_return_nodes` separately."
516
+ )
517
+
518
+ # Remove existing output nodes (train mode)
519
+ orig_output_nodes = []
520
+ for n in reversed(graph_module.graph.nodes):
521
+ if n.op == "output":
522
+ orig_output_nodes.append(n)
523
+ if not orig_output_nodes:
524
+ raise ValueError("No output nodes found in graph_module.graph.nodes")
525
+
526
+ for n in orig_output_nodes:
527
+ graph_module.graph.erase_node(n)
528
+
529
+ # Find nodes corresponding to return_nodes and make them into output_nodes
530
+ nodes = [n for n in graph_module.graph.nodes]
531
+ output_nodes = OrderedDict()
532
+ for n in reversed(nodes):
533
+ module_qualname = tracer.node_to_qualname.get(n)
534
+ if module_qualname is None:
535
+ # NOTE - Know cases where this happens:
536
+ # - Node representing creation of a tensor constant - probably
537
+ # not interesting as a return node
538
+ # - When packing outputs into a named tuple like in InceptionV3
539
+ continue
540
+ for query in mode_return_nodes[mode]:
541
+ depth = query.count(".")
542
+ if ".".join(module_qualname.split(".")[: depth + 1]) == query:
543
+ output_nodes[mode_return_nodes[mode][query]] = n
544
+ mode_return_nodes[mode].pop(query)
545
+ break
546
+ output_nodes = OrderedDict(reversed(list(output_nodes.items())))
547
+
548
+ # And add them in the end of the graph
549
+ with graph_module.graph.inserting_after(nodes[-1]):
550
+ graph_module.graph.output(output_nodes)
551
+
552
+ # Remove unused modules / parameters
553
+ graph_module.graph.eliminate_dead_code()
554
+ graph_module.recompile()
555
+
556
+ # Keep track of the tracer and graph, so we can choose the main one
557
+ tracers[mode] = tracer
558
+ graphs[mode] = graph
559
+
560
+ # Warn user if there are any discrepancies between the graphs of the
561
+ # train and eval modes
562
+ if not suppress_diff_warning:
563
+ _warn_graph_differences(tracers["train"], tracers["eval"])
564
+
565
+ # Build the final graph module
566
+ graph_module = DualGraphModule(model, graphs["train"], graphs["eval"], class_name=name)
567
+
568
+ # Restore original training mode
569
+ model.train(is_training)
570
+ graph_module.train(is_training)
571
+
572
+ return graph_module
vllm/lib/python3.10/site-packages/torchvision/models/googlenet.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from collections import namedtuple
3
+ from functools import partial
4
+ from typing import Any, Callable, List, Optional, Tuple
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ from torch import Tensor
10
+
11
+ from ..transforms._presets import ImageClassification
12
+ from ..utils import _log_api_usage_once
13
+ from ._api import register_model, Weights, WeightsEnum
14
+ from ._meta import _IMAGENET_CATEGORIES
15
+ from ._utils import _ovewrite_named_param, handle_legacy_interface
16
+
17
+
18
+ __all__ = ["GoogLeNet", "GoogLeNetOutputs", "_GoogLeNetOutputs", "GoogLeNet_Weights", "googlenet"]
19
+
20
+
21
+ GoogLeNetOutputs = namedtuple("GoogLeNetOutputs", ["logits", "aux_logits2", "aux_logits1"])
22
+ GoogLeNetOutputs.__annotations__ = {"logits": Tensor, "aux_logits2": Optional[Tensor], "aux_logits1": Optional[Tensor]}
23
+
24
+ # Script annotations failed with _GoogleNetOutputs = namedtuple ...
25
+ # _GoogLeNetOutputs set here for backwards compat
26
+ _GoogLeNetOutputs = GoogLeNetOutputs
27
+
28
+
29
+ class GoogLeNet(nn.Module):
30
+ __constants__ = ["aux_logits", "transform_input"]
31
+
32
+ def __init__(
33
+ self,
34
+ num_classes: int = 1000,
35
+ aux_logits: bool = True,
36
+ transform_input: bool = False,
37
+ init_weights: Optional[bool] = None,
38
+ blocks: Optional[List[Callable[..., nn.Module]]] = None,
39
+ dropout: float = 0.2,
40
+ dropout_aux: float = 0.7,
41
+ ) -> None:
42
+ super().__init__()
43
+ _log_api_usage_once(self)
44
+ if blocks is None:
45
+ blocks = [BasicConv2d, Inception, InceptionAux]
46
+ if init_weights is None:
47
+ warnings.warn(
48
+ "The default weight initialization of GoogleNet will be changed in future releases of "
49
+ "torchvision. If you wish to keep the old behavior (which leads to long initialization times"
50
+ " due to scipy/scipy#11299), please set init_weights=True.",
51
+ FutureWarning,
52
+ )
53
+ init_weights = True
54
+ if len(blocks) != 3:
55
+ raise ValueError(f"blocks length should be 3 instead of {len(blocks)}")
56
+ conv_block = blocks[0]
57
+ inception_block = blocks[1]
58
+ inception_aux_block = blocks[2]
59
+
60
+ self.aux_logits = aux_logits
61
+ self.transform_input = transform_input
62
+
63
+ self.conv1 = conv_block(3, 64, kernel_size=7, stride=2, padding=3)
64
+ self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
65
+ self.conv2 = conv_block(64, 64, kernel_size=1)
66
+ self.conv3 = conv_block(64, 192, kernel_size=3, padding=1)
67
+ self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
68
+
69
+ self.inception3a = inception_block(192, 64, 96, 128, 16, 32, 32)
70
+ self.inception3b = inception_block(256, 128, 128, 192, 32, 96, 64)
71
+ self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
72
+
73
+ self.inception4a = inception_block(480, 192, 96, 208, 16, 48, 64)
74
+ self.inception4b = inception_block(512, 160, 112, 224, 24, 64, 64)
75
+ self.inception4c = inception_block(512, 128, 128, 256, 24, 64, 64)
76
+ self.inception4d = inception_block(512, 112, 144, 288, 32, 64, 64)
77
+ self.inception4e = inception_block(528, 256, 160, 320, 32, 128, 128)
78
+ self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
79
+
80
+ self.inception5a = inception_block(832, 256, 160, 320, 32, 128, 128)
81
+ self.inception5b = inception_block(832, 384, 192, 384, 48, 128, 128)
82
+
83
+ if aux_logits:
84
+ self.aux1 = inception_aux_block(512, num_classes, dropout=dropout_aux)
85
+ self.aux2 = inception_aux_block(528, num_classes, dropout=dropout_aux)
86
+ else:
87
+ self.aux1 = None # type: ignore[assignment]
88
+ self.aux2 = None # type: ignore[assignment]
89
+
90
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
91
+ self.dropout = nn.Dropout(p=dropout)
92
+ self.fc = nn.Linear(1024, num_classes)
93
+
94
+ if init_weights:
95
+ for m in self.modules():
96
+ if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
97
+ torch.nn.init.trunc_normal_(m.weight, mean=0.0, std=0.01, a=-2, b=2)
98
+ elif isinstance(m, nn.BatchNorm2d):
99
+ nn.init.constant_(m.weight, 1)
100
+ nn.init.constant_(m.bias, 0)
101
+
102
+ def _transform_input(self, x: Tensor) -> Tensor:
103
+ if self.transform_input:
104
+ x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
105
+ x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
106
+ x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
107
+ x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
108
+ return x
109
+
110
+ def _forward(self, x: Tensor) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
111
+ # N x 3 x 224 x 224
112
+ x = self.conv1(x)
113
+ # N x 64 x 112 x 112
114
+ x = self.maxpool1(x)
115
+ # N x 64 x 56 x 56
116
+ x = self.conv2(x)
117
+ # N x 64 x 56 x 56
118
+ x = self.conv3(x)
119
+ # N x 192 x 56 x 56
120
+ x = self.maxpool2(x)
121
+
122
+ # N x 192 x 28 x 28
123
+ x = self.inception3a(x)
124
+ # N x 256 x 28 x 28
125
+ x = self.inception3b(x)
126
+ # N x 480 x 28 x 28
127
+ x = self.maxpool3(x)
128
+ # N x 480 x 14 x 14
129
+ x = self.inception4a(x)
130
+ # N x 512 x 14 x 14
131
+ aux1: Optional[Tensor] = None
132
+ if self.aux1 is not None:
133
+ if self.training:
134
+ aux1 = self.aux1(x)
135
+
136
+ x = self.inception4b(x)
137
+ # N x 512 x 14 x 14
138
+ x = self.inception4c(x)
139
+ # N x 512 x 14 x 14
140
+ x = self.inception4d(x)
141
+ # N x 528 x 14 x 14
142
+ aux2: Optional[Tensor] = None
143
+ if self.aux2 is not None:
144
+ if self.training:
145
+ aux2 = self.aux2(x)
146
+
147
+ x = self.inception4e(x)
148
+ # N x 832 x 14 x 14
149
+ x = self.maxpool4(x)
150
+ # N x 832 x 7 x 7
151
+ x = self.inception5a(x)
152
+ # N x 832 x 7 x 7
153
+ x = self.inception5b(x)
154
+ # N x 1024 x 7 x 7
155
+
156
+ x = self.avgpool(x)
157
+ # N x 1024 x 1 x 1
158
+ x = torch.flatten(x, 1)
159
+ # N x 1024
160
+ x = self.dropout(x)
161
+ x = self.fc(x)
162
+ # N x 1000 (num_classes)
163
+ return x, aux2, aux1
164
+
165
+ @torch.jit.unused
166
+ def eager_outputs(self, x: Tensor, aux2: Tensor, aux1: Optional[Tensor]) -> GoogLeNetOutputs:
167
+ if self.training and self.aux_logits:
168
+ return _GoogLeNetOutputs(x, aux2, aux1)
169
+ else:
170
+ return x # type: ignore[return-value]
171
+
172
+ def forward(self, x: Tensor) -> GoogLeNetOutputs:
173
+ x = self._transform_input(x)
174
+ x, aux1, aux2 = self._forward(x)
175
+ aux_defined = self.training and self.aux_logits
176
+ if torch.jit.is_scripting():
177
+ if not aux_defined:
178
+ warnings.warn("Scripted GoogleNet always returns GoogleNetOutputs Tuple")
179
+ return GoogLeNetOutputs(x, aux2, aux1)
180
+ else:
181
+ return self.eager_outputs(x, aux2, aux1)
182
+
183
+
184
+ class Inception(nn.Module):
185
+ def __init__(
186
+ self,
187
+ in_channels: int,
188
+ ch1x1: int,
189
+ ch3x3red: int,
190
+ ch3x3: int,
191
+ ch5x5red: int,
192
+ ch5x5: int,
193
+ pool_proj: int,
194
+ conv_block: Optional[Callable[..., nn.Module]] = None,
195
+ ) -> None:
196
+ super().__init__()
197
+ if conv_block is None:
198
+ conv_block = BasicConv2d
199
+ self.branch1 = conv_block(in_channels, ch1x1, kernel_size=1)
200
+
201
+ self.branch2 = nn.Sequential(
202
+ conv_block(in_channels, ch3x3red, kernel_size=1), conv_block(ch3x3red, ch3x3, kernel_size=3, padding=1)
203
+ )
204
+
205
+ self.branch3 = nn.Sequential(
206
+ conv_block(in_channels, ch5x5red, kernel_size=1),
207
+ # Here, kernel_size=3 instead of kernel_size=5 is a known bug.
208
+ # Please see https://github.com/pytorch/vision/issues/906 for details.
209
+ conv_block(ch5x5red, ch5x5, kernel_size=3, padding=1),
210
+ )
211
+
212
+ self.branch4 = nn.Sequential(
213
+ nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True),
214
+ conv_block(in_channels, pool_proj, kernel_size=1),
215
+ )
216
+
217
+ def _forward(self, x: Tensor) -> List[Tensor]:
218
+ branch1 = self.branch1(x)
219
+ branch2 = self.branch2(x)
220
+ branch3 = self.branch3(x)
221
+ branch4 = self.branch4(x)
222
+
223
+ outputs = [branch1, branch2, branch3, branch4]
224
+ return outputs
225
+
226
+ def forward(self, x: Tensor) -> Tensor:
227
+ outputs = self._forward(x)
228
+ return torch.cat(outputs, 1)
229
+
230
+
231
+ class InceptionAux(nn.Module):
232
+ def __init__(
233
+ self,
234
+ in_channels: int,
235
+ num_classes: int,
236
+ conv_block: Optional[Callable[..., nn.Module]] = None,
237
+ dropout: float = 0.7,
238
+ ) -> None:
239
+ super().__init__()
240
+ if conv_block is None:
241
+ conv_block = BasicConv2d
242
+ self.conv = conv_block(in_channels, 128, kernel_size=1)
243
+
244
+ self.fc1 = nn.Linear(2048, 1024)
245
+ self.fc2 = nn.Linear(1024, num_classes)
246
+ self.dropout = nn.Dropout(p=dropout)
247
+
248
+ def forward(self, x: Tensor) -> Tensor:
249
+ # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
250
+ x = F.adaptive_avg_pool2d(x, (4, 4))
251
+ # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
252
+ x = self.conv(x)
253
+ # N x 128 x 4 x 4
254
+ x = torch.flatten(x, 1)
255
+ # N x 2048
256
+ x = F.relu(self.fc1(x), inplace=True)
257
+ # N x 1024
258
+ x = self.dropout(x)
259
+ # N x 1024
260
+ x = self.fc2(x)
261
+ # N x 1000 (num_classes)
262
+
263
+ return x
264
+
265
+
266
+ class BasicConv2d(nn.Module):
267
+ def __init__(self, in_channels: int, out_channels: int, **kwargs: Any) -> None:
268
+ super().__init__()
269
+ self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
270
+ self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
271
+
272
+ def forward(self, x: Tensor) -> Tensor:
273
+ x = self.conv(x)
274
+ x = self.bn(x)
275
+ return F.relu(x, inplace=True)
276
+
277
+
278
+ class GoogLeNet_Weights(WeightsEnum):
279
+ IMAGENET1K_V1 = Weights(
280
+ url="https://download.pytorch.org/models/googlenet-1378be20.pth",
281
+ transforms=partial(ImageClassification, crop_size=224),
282
+ meta={
283
+ "num_params": 6624904,
284
+ "min_size": (15, 15),
285
+ "categories": _IMAGENET_CATEGORIES,
286
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#googlenet",
287
+ "_metrics": {
288
+ "ImageNet-1K": {
289
+ "acc@1": 69.778,
290
+ "acc@5": 89.530,
291
+ }
292
+ },
293
+ "_ops": 1.498,
294
+ "_file_size": 49.731,
295
+ "_docs": """These weights are ported from the original paper.""",
296
+ },
297
+ )
298
+ DEFAULT = IMAGENET1K_V1
299
+
300
+
301
+ @register_model()
302
+ @handle_legacy_interface(weights=("pretrained", GoogLeNet_Weights.IMAGENET1K_V1))
303
+ def googlenet(*, weights: Optional[GoogLeNet_Weights] = None, progress: bool = True, **kwargs: Any) -> GoogLeNet:
304
+ """GoogLeNet (Inception v1) model architecture from
305
+ `Going Deeper with Convolutions <http://arxiv.org/abs/1409.4842>`_.
306
+
307
+ Args:
308
+ weights (:class:`~torchvision.models.GoogLeNet_Weights`, optional): The
309
+ pretrained weights for the model. See
310
+ :class:`~torchvision.models.GoogLeNet_Weights` below for
311
+ more details, and possible values. By default, no pre-trained
312
+ weights are used.
313
+ progress (bool, optional): If True, displays a progress bar of the
314
+ download to stderr. Default is True.
315
+ **kwargs: parameters passed to the ``torchvision.models.GoogLeNet``
316
+ base class. Please refer to the `source code
317
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/googlenet.py>`_
318
+ for more details about this class.
319
+ .. autoclass:: torchvision.models.GoogLeNet_Weights
320
+ :members:
321
+ """
322
+ weights = GoogLeNet_Weights.verify(weights)
323
+
324
+ original_aux_logits = kwargs.get("aux_logits", False)
325
+ if weights is not None:
326
+ if "transform_input" not in kwargs:
327
+ _ovewrite_named_param(kwargs, "transform_input", True)
328
+ _ovewrite_named_param(kwargs, "aux_logits", True)
329
+ _ovewrite_named_param(kwargs, "init_weights", False)
330
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
331
+
332
+ model = GoogLeNet(**kwargs)
333
+
334
+ if weights is not None:
335
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
336
+ if not original_aux_logits:
337
+ model.aux_logits = False
338
+ model.aux1 = None # type: ignore[assignment]
339
+ model.aux2 = None # type: ignore[assignment]
340
+ else:
341
+ warnings.warn(
342
+ "auxiliary heads in the pretrained googlenet model are NOT pretrained, so make sure to train them"
343
+ )
344
+
345
+ return model
vllm/lib/python3.10/site-packages/torchvision/models/inception.py ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from collections import namedtuple
3
+ from functools import partial
4
+ from typing import Any, Callable, List, Optional, Tuple
5
+
6
+ import torch
7
+ import torch.nn.functional as F
8
+ from torch import nn, Tensor
9
+
10
+ from ..transforms._presets import ImageClassification
11
+ from ..utils import _log_api_usage_once
12
+ from ._api import register_model, Weights, WeightsEnum
13
+ from ._meta import _IMAGENET_CATEGORIES
14
+ from ._utils import _ovewrite_named_param, handle_legacy_interface
15
+
16
+
17
+ __all__ = ["Inception3", "InceptionOutputs", "_InceptionOutputs", "Inception_V3_Weights", "inception_v3"]
18
+
19
+
20
+ InceptionOutputs = namedtuple("InceptionOutputs", ["logits", "aux_logits"])
21
+ InceptionOutputs.__annotations__ = {"logits": Tensor, "aux_logits": Optional[Tensor]}
22
+
23
+ # Script annotations failed with _GoogleNetOutputs = namedtuple ...
24
+ # _InceptionOutputs set here for backwards compat
25
+ _InceptionOutputs = InceptionOutputs
26
+
27
+
28
+ class Inception3(nn.Module):
29
+ def __init__(
30
+ self,
31
+ num_classes: int = 1000,
32
+ aux_logits: bool = True,
33
+ transform_input: bool = False,
34
+ inception_blocks: Optional[List[Callable[..., nn.Module]]] = None,
35
+ init_weights: Optional[bool] = None,
36
+ dropout: float = 0.5,
37
+ ) -> None:
38
+ super().__init__()
39
+ _log_api_usage_once(self)
40
+ if inception_blocks is None:
41
+ inception_blocks = [BasicConv2d, InceptionA, InceptionB, InceptionC, InceptionD, InceptionE, InceptionAux]
42
+ if init_weights is None:
43
+ warnings.warn(
44
+ "The default weight initialization of inception_v3 will be changed in future releases of "
45
+ "torchvision. If you wish to keep the old behavior (which leads to long initialization times"
46
+ " due to scipy/scipy#11299), please set init_weights=True.",
47
+ FutureWarning,
48
+ )
49
+ init_weights = True
50
+ if len(inception_blocks) != 7:
51
+ raise ValueError(f"length of inception_blocks should be 7 instead of {len(inception_blocks)}")
52
+ conv_block = inception_blocks[0]
53
+ inception_a = inception_blocks[1]
54
+ inception_b = inception_blocks[2]
55
+ inception_c = inception_blocks[3]
56
+ inception_d = inception_blocks[4]
57
+ inception_e = inception_blocks[5]
58
+ inception_aux = inception_blocks[6]
59
+
60
+ self.aux_logits = aux_logits
61
+ self.transform_input = transform_input
62
+ self.Conv2d_1a_3x3 = conv_block(3, 32, kernel_size=3, stride=2)
63
+ self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3)
64
+ self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1)
65
+ self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2)
66
+ self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1)
67
+ self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3)
68
+ self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2)
69
+ self.Mixed_5b = inception_a(192, pool_features=32)
70
+ self.Mixed_5c = inception_a(256, pool_features=64)
71
+ self.Mixed_5d = inception_a(288, pool_features=64)
72
+ self.Mixed_6a = inception_b(288)
73
+ self.Mixed_6b = inception_c(768, channels_7x7=128)
74
+ self.Mixed_6c = inception_c(768, channels_7x7=160)
75
+ self.Mixed_6d = inception_c(768, channels_7x7=160)
76
+ self.Mixed_6e = inception_c(768, channels_7x7=192)
77
+ self.AuxLogits: Optional[nn.Module] = None
78
+ if aux_logits:
79
+ self.AuxLogits = inception_aux(768, num_classes)
80
+ self.Mixed_7a = inception_d(768)
81
+ self.Mixed_7b = inception_e(1280)
82
+ self.Mixed_7c = inception_e(2048)
83
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
84
+ self.dropout = nn.Dropout(p=dropout)
85
+ self.fc = nn.Linear(2048, num_classes)
86
+ if init_weights:
87
+ for m in self.modules():
88
+ if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
89
+ stddev = float(m.stddev) if hasattr(m, "stddev") else 0.1 # type: ignore
90
+ torch.nn.init.trunc_normal_(m.weight, mean=0.0, std=stddev, a=-2, b=2)
91
+ elif isinstance(m, nn.BatchNorm2d):
92
+ nn.init.constant_(m.weight, 1)
93
+ nn.init.constant_(m.bias, 0)
94
+
95
+ def _transform_input(self, x: Tensor) -> Tensor:
96
+ if self.transform_input:
97
+ x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
98
+ x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
99
+ x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
100
+ x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
101
+ return x
102
+
103
+ def _forward(self, x: Tensor) -> Tuple[Tensor, Optional[Tensor]]:
104
+ # N x 3 x 299 x 299
105
+ x = self.Conv2d_1a_3x3(x)
106
+ # N x 32 x 149 x 149
107
+ x = self.Conv2d_2a_3x3(x)
108
+ # N x 32 x 147 x 147
109
+ x = self.Conv2d_2b_3x3(x)
110
+ # N x 64 x 147 x 147
111
+ x = self.maxpool1(x)
112
+ # N x 64 x 73 x 73
113
+ x = self.Conv2d_3b_1x1(x)
114
+ # N x 80 x 73 x 73
115
+ x = self.Conv2d_4a_3x3(x)
116
+ # N x 192 x 71 x 71
117
+ x = self.maxpool2(x)
118
+ # N x 192 x 35 x 35
119
+ x = self.Mixed_5b(x)
120
+ # N x 256 x 35 x 35
121
+ x = self.Mixed_5c(x)
122
+ # N x 288 x 35 x 35
123
+ x = self.Mixed_5d(x)
124
+ # N x 288 x 35 x 35
125
+ x = self.Mixed_6a(x)
126
+ # N x 768 x 17 x 17
127
+ x = self.Mixed_6b(x)
128
+ # N x 768 x 17 x 17
129
+ x = self.Mixed_6c(x)
130
+ # N x 768 x 17 x 17
131
+ x = self.Mixed_6d(x)
132
+ # N x 768 x 17 x 17
133
+ x = self.Mixed_6e(x)
134
+ # N x 768 x 17 x 17
135
+ aux: Optional[Tensor] = None
136
+ if self.AuxLogits is not None:
137
+ if self.training:
138
+ aux = self.AuxLogits(x)
139
+ # N x 768 x 17 x 17
140
+ x = self.Mixed_7a(x)
141
+ # N x 1280 x 8 x 8
142
+ x = self.Mixed_7b(x)
143
+ # N x 2048 x 8 x 8
144
+ x = self.Mixed_7c(x)
145
+ # N x 2048 x 8 x 8
146
+ # Adaptive average pooling
147
+ x = self.avgpool(x)
148
+ # N x 2048 x 1 x 1
149
+ x = self.dropout(x)
150
+ # N x 2048 x 1 x 1
151
+ x = torch.flatten(x, 1)
152
+ # N x 2048
153
+ x = self.fc(x)
154
+ # N x 1000 (num_classes)
155
+ return x, aux
156
+
157
+ @torch.jit.unused
158
+ def eager_outputs(self, x: Tensor, aux: Optional[Tensor]) -> InceptionOutputs:
159
+ if self.training and self.aux_logits:
160
+ return InceptionOutputs(x, aux)
161
+ else:
162
+ return x # type: ignore[return-value]
163
+
164
+ def forward(self, x: Tensor) -> InceptionOutputs:
165
+ x = self._transform_input(x)
166
+ x, aux = self._forward(x)
167
+ aux_defined = self.training and self.aux_logits
168
+ if torch.jit.is_scripting():
169
+ if not aux_defined:
170
+ warnings.warn("Scripted Inception3 always returns Inception3 Tuple")
171
+ return InceptionOutputs(x, aux)
172
+ else:
173
+ return self.eager_outputs(x, aux)
174
+
175
+
176
+ class InceptionA(nn.Module):
177
+ def __init__(
178
+ self, in_channels: int, pool_features: int, conv_block: Optional[Callable[..., nn.Module]] = None
179
+ ) -> None:
180
+ super().__init__()
181
+ if conv_block is None:
182
+ conv_block = BasicConv2d
183
+ self.branch1x1 = conv_block(in_channels, 64, kernel_size=1)
184
+
185
+ self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1)
186
+ self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2)
187
+
188
+ self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1)
189
+ self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
190
+ self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1)
191
+
192
+ self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1)
193
+
194
+ def _forward(self, x: Tensor) -> List[Tensor]:
195
+ branch1x1 = self.branch1x1(x)
196
+
197
+ branch5x5 = self.branch5x5_1(x)
198
+ branch5x5 = self.branch5x5_2(branch5x5)
199
+
200
+ branch3x3dbl = self.branch3x3dbl_1(x)
201
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
202
+ branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
203
+
204
+ branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
205
+ branch_pool = self.branch_pool(branch_pool)
206
+
207
+ outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
208
+ return outputs
209
+
210
+ def forward(self, x: Tensor) -> Tensor:
211
+ outputs = self._forward(x)
212
+ return torch.cat(outputs, 1)
213
+
214
+
215
+ class InceptionB(nn.Module):
216
+ def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None:
217
+ super().__init__()
218
+ if conv_block is None:
219
+ conv_block = BasicConv2d
220
+ self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2)
221
+
222
+ self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1)
223
+ self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1)
224
+ self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2)
225
+
226
+ def _forward(self, x: Tensor) -> List[Tensor]:
227
+ branch3x3 = self.branch3x3(x)
228
+
229
+ branch3x3dbl = self.branch3x3dbl_1(x)
230
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
231
+ branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
232
+
233
+ branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
234
+
235
+ outputs = [branch3x3, branch3x3dbl, branch_pool]
236
+ return outputs
237
+
238
+ def forward(self, x: Tensor) -> Tensor:
239
+ outputs = self._forward(x)
240
+ return torch.cat(outputs, 1)
241
+
242
+
243
+ class InceptionC(nn.Module):
244
+ def __init__(
245
+ self, in_channels: int, channels_7x7: int, conv_block: Optional[Callable[..., nn.Module]] = None
246
+ ) -> None:
247
+ super().__init__()
248
+ if conv_block is None:
249
+ conv_block = BasicConv2d
250
+ self.branch1x1 = conv_block(in_channels, 192, kernel_size=1)
251
+
252
+ c7 = channels_7x7
253
+ self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1)
254
+ self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
255
+ self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0))
256
+
257
+ self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1)
258
+ self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
259
+ self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3))
260
+ self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0))
261
+ self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3))
262
+
263
+ self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
264
+
265
+ def _forward(self, x: Tensor) -> List[Tensor]:
266
+ branch1x1 = self.branch1x1(x)
267
+
268
+ branch7x7 = self.branch7x7_1(x)
269
+ branch7x7 = self.branch7x7_2(branch7x7)
270
+ branch7x7 = self.branch7x7_3(branch7x7)
271
+
272
+ branch7x7dbl = self.branch7x7dbl_1(x)
273
+ branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
274
+ branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
275
+ branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
276
+ branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
277
+
278
+ branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
279
+ branch_pool = self.branch_pool(branch_pool)
280
+
281
+ outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
282
+ return outputs
283
+
284
+ def forward(self, x: Tensor) -> Tensor:
285
+ outputs = self._forward(x)
286
+ return torch.cat(outputs, 1)
287
+
288
+
289
+ class InceptionD(nn.Module):
290
+ def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None:
291
+ super().__init__()
292
+ if conv_block is None:
293
+ conv_block = BasicConv2d
294
+ self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1)
295
+ self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2)
296
+
297
+ self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1)
298
+ self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3))
299
+ self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0))
300
+ self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2)
301
+
302
+ def _forward(self, x: Tensor) -> List[Tensor]:
303
+ branch3x3 = self.branch3x3_1(x)
304
+ branch3x3 = self.branch3x3_2(branch3x3)
305
+
306
+ branch7x7x3 = self.branch7x7x3_1(x)
307
+ branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
308
+ branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
309
+ branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
310
+
311
+ branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
312
+ outputs = [branch3x3, branch7x7x3, branch_pool]
313
+ return outputs
314
+
315
+ def forward(self, x: Tensor) -> Tensor:
316
+ outputs = self._forward(x)
317
+ return torch.cat(outputs, 1)
318
+
319
+
320
+ class InceptionE(nn.Module):
321
+ def __init__(self, in_channels: int, conv_block: Optional[Callable[..., nn.Module]] = None) -> None:
322
+ super().__init__()
323
+ if conv_block is None:
324
+ conv_block = BasicConv2d
325
+ self.branch1x1 = conv_block(in_channels, 320, kernel_size=1)
326
+
327
+ self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1)
328
+ self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1))
329
+ self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0))
330
+
331
+ self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1)
332
+ self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1)
333
+ self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1))
334
+ self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0))
335
+
336
+ self.branch_pool = conv_block(in_channels, 192, kernel_size=1)
337
+
338
+ def _forward(self, x: Tensor) -> List[Tensor]:
339
+ branch1x1 = self.branch1x1(x)
340
+
341
+ branch3x3 = self.branch3x3_1(x)
342
+ branch3x3 = [
343
+ self.branch3x3_2a(branch3x3),
344
+ self.branch3x3_2b(branch3x3),
345
+ ]
346
+ branch3x3 = torch.cat(branch3x3, 1)
347
+
348
+ branch3x3dbl = self.branch3x3dbl_1(x)
349
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
350
+ branch3x3dbl = [
351
+ self.branch3x3dbl_3a(branch3x3dbl),
352
+ self.branch3x3dbl_3b(branch3x3dbl),
353
+ ]
354
+ branch3x3dbl = torch.cat(branch3x3dbl, 1)
355
+
356
+ branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
357
+ branch_pool = self.branch_pool(branch_pool)
358
+
359
+ outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
360
+ return outputs
361
+
362
+ def forward(self, x: Tensor) -> Tensor:
363
+ outputs = self._forward(x)
364
+ return torch.cat(outputs, 1)
365
+
366
+
367
+ class InceptionAux(nn.Module):
368
+ def __init__(
369
+ self, in_channels: int, num_classes: int, conv_block: Optional[Callable[..., nn.Module]] = None
370
+ ) -> None:
371
+ super().__init__()
372
+ if conv_block is None:
373
+ conv_block = BasicConv2d
374
+ self.conv0 = conv_block(in_channels, 128, kernel_size=1)
375
+ self.conv1 = conv_block(128, 768, kernel_size=5)
376
+ self.conv1.stddev = 0.01 # type: ignore[assignment]
377
+ self.fc = nn.Linear(768, num_classes)
378
+ self.fc.stddev = 0.001 # type: ignore[assignment]
379
+
380
+ def forward(self, x: Tensor) -> Tensor:
381
+ # N x 768 x 17 x 17
382
+ x = F.avg_pool2d(x, kernel_size=5, stride=3)
383
+ # N x 768 x 5 x 5
384
+ x = self.conv0(x)
385
+ # N x 128 x 5 x 5
386
+ x = self.conv1(x)
387
+ # N x 768 x 1 x 1
388
+ # Adaptive average pooling
389
+ x = F.adaptive_avg_pool2d(x, (1, 1))
390
+ # N x 768 x 1 x 1
391
+ x = torch.flatten(x, 1)
392
+ # N x 768
393
+ x = self.fc(x)
394
+ # N x 1000
395
+ return x
396
+
397
+
398
+ class BasicConv2d(nn.Module):
399
+ def __init__(self, in_channels: int, out_channels: int, **kwargs: Any) -> None:
400
+ super().__init__()
401
+ self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
402
+ self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
403
+
404
+ def forward(self, x: Tensor) -> Tensor:
405
+ x = self.conv(x)
406
+ x = self.bn(x)
407
+ return F.relu(x, inplace=True)
408
+
409
+
410
+ class Inception_V3_Weights(WeightsEnum):
411
+ IMAGENET1K_V1 = Weights(
412
+ url="https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth",
413
+ transforms=partial(ImageClassification, crop_size=299, resize_size=342),
414
+ meta={
415
+ "num_params": 27161264,
416
+ "min_size": (75, 75),
417
+ "categories": _IMAGENET_CATEGORIES,
418
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#inception-v3",
419
+ "_metrics": {
420
+ "ImageNet-1K": {
421
+ "acc@1": 77.294,
422
+ "acc@5": 93.450,
423
+ }
424
+ },
425
+ "_ops": 5.713,
426
+ "_file_size": 103.903,
427
+ "_docs": """These weights are ported from the original paper.""",
428
+ },
429
+ )
430
+ DEFAULT = IMAGENET1K_V1
431
+
432
+
433
+ @register_model()
434
+ @handle_legacy_interface(weights=("pretrained", Inception_V3_Weights.IMAGENET1K_V1))
435
+ def inception_v3(*, weights: Optional[Inception_V3_Weights] = None, progress: bool = True, **kwargs: Any) -> Inception3:
436
+ """
437
+ Inception v3 model architecture from
438
+ `Rethinking the Inception Architecture for Computer Vision <http://arxiv.org/abs/1512.00567>`_.
439
+
440
+ .. note::
441
+ **Important**: In contrast to the other models the inception_v3 expects tensors with a size of
442
+ N x 3 x 299 x 299, so ensure your images are sized accordingly.
443
+
444
+ Args:
445
+ weights (:class:`~torchvision.models.Inception_V3_Weights`, optional): The
446
+ pretrained weights for the model. See
447
+ :class:`~torchvision.models.Inception_V3_Weights` below for
448
+ more details, and possible values. By default, no pre-trained
449
+ weights are used.
450
+ progress (bool, optional): If True, displays a progress bar of the
451
+ download to stderr. Default is True.
452
+ **kwargs: parameters passed to the ``torchvision.models.Inception3``
453
+ base class. Please refer to the `source code
454
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/inception.py>`_
455
+ for more details about this class.
456
+
457
+ .. autoclass:: torchvision.models.Inception_V3_Weights
458
+ :members:
459
+ """
460
+ weights = Inception_V3_Weights.verify(weights)
461
+
462
+ original_aux_logits = kwargs.get("aux_logits", True)
463
+ if weights is not None:
464
+ if "transform_input" not in kwargs:
465
+ _ovewrite_named_param(kwargs, "transform_input", True)
466
+ _ovewrite_named_param(kwargs, "aux_logits", True)
467
+ _ovewrite_named_param(kwargs, "init_weights", False)
468
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
469
+
470
+ model = Inception3(**kwargs)
471
+
472
+ if weights is not None:
473
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
474
+ if not original_aux_logits:
475
+ model.aux_logits = False
476
+ model.AuxLogits = None
477
+
478
+ return model
vllm/lib/python3.10/site-packages/torchvision/models/maxvit.py ADDED
@@ -0,0 +1,833 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from collections import OrderedDict
3
+ from functools import partial
4
+ from typing import Any, Callable, List, Optional, Sequence, Tuple
5
+
6
+ import numpy as np
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from torch import nn, Tensor
10
+ from torchvision.models._api import register_model, Weights, WeightsEnum
11
+ from torchvision.models._meta import _IMAGENET_CATEGORIES
12
+ from torchvision.models._utils import _ovewrite_named_param, handle_legacy_interface
13
+ from torchvision.ops.misc import Conv2dNormActivation, SqueezeExcitation
14
+ from torchvision.ops.stochastic_depth import StochasticDepth
15
+ from torchvision.transforms._presets import ImageClassification, InterpolationMode
16
+ from torchvision.utils import _log_api_usage_once
17
+
18
+ __all__ = [
19
+ "MaxVit",
20
+ "MaxVit_T_Weights",
21
+ "maxvit_t",
22
+ ]
23
+
24
+
25
+ def _get_conv_output_shape(input_size: Tuple[int, int], kernel_size: int, stride: int, padding: int) -> Tuple[int, int]:
26
+ return (
27
+ (input_size[0] - kernel_size + 2 * padding) // stride + 1,
28
+ (input_size[1] - kernel_size + 2 * padding) // stride + 1,
29
+ )
30
+
31
+
32
+ def _make_block_input_shapes(input_size: Tuple[int, int], n_blocks: int) -> List[Tuple[int, int]]:
33
+ """Util function to check that the input size is correct for a MaxVit configuration."""
34
+ shapes = []
35
+ block_input_shape = _get_conv_output_shape(input_size, 3, 2, 1)
36
+ for _ in range(n_blocks):
37
+ block_input_shape = _get_conv_output_shape(block_input_shape, 3, 2, 1)
38
+ shapes.append(block_input_shape)
39
+ return shapes
40
+
41
+
42
+ def _get_relative_position_index(height: int, width: int) -> torch.Tensor:
43
+ coords = torch.stack(torch.meshgrid([torch.arange(height), torch.arange(width)]))
44
+ coords_flat = torch.flatten(coords, 1)
45
+ relative_coords = coords_flat[:, :, None] - coords_flat[:, None, :]
46
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous()
47
+ relative_coords[:, :, 0] += height - 1
48
+ relative_coords[:, :, 1] += width - 1
49
+ relative_coords[:, :, 0] *= 2 * width - 1
50
+ return relative_coords.sum(-1)
51
+
52
+
53
+ class MBConv(nn.Module):
54
+ """MBConv: Mobile Inverted Residual Bottleneck.
55
+
56
+ Args:
57
+ in_channels (int): Number of input channels.
58
+ out_channels (int): Number of output channels.
59
+ expansion_ratio (float): Expansion ratio in the bottleneck.
60
+ squeeze_ratio (float): Squeeze ratio in the SE Layer.
61
+ stride (int): Stride of the depthwise convolution.
62
+ activation_layer (Callable[..., nn.Module]): Activation function.
63
+ norm_layer (Callable[..., nn.Module]): Normalization function.
64
+ p_stochastic_dropout (float): Probability of stochastic depth.
65
+ """
66
+
67
+ def __init__(
68
+ self,
69
+ in_channels: int,
70
+ out_channels: int,
71
+ expansion_ratio: float,
72
+ squeeze_ratio: float,
73
+ stride: int,
74
+ activation_layer: Callable[..., nn.Module],
75
+ norm_layer: Callable[..., nn.Module],
76
+ p_stochastic_dropout: float = 0.0,
77
+ ) -> None:
78
+ super().__init__()
79
+
80
+ proj: Sequence[nn.Module]
81
+ self.proj: nn.Module
82
+
83
+ should_proj = stride != 1 or in_channels != out_channels
84
+ if should_proj:
85
+ proj = [nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=True)]
86
+ if stride == 2:
87
+ proj = [nn.AvgPool2d(kernel_size=3, stride=stride, padding=1)] + proj # type: ignore
88
+ self.proj = nn.Sequential(*proj)
89
+ else:
90
+ self.proj = nn.Identity() # type: ignore
91
+
92
+ mid_channels = int(out_channels * expansion_ratio)
93
+ sqz_channels = int(out_channels * squeeze_ratio)
94
+
95
+ if p_stochastic_dropout:
96
+ self.stochastic_depth = StochasticDepth(p_stochastic_dropout, mode="row") # type: ignore
97
+ else:
98
+ self.stochastic_depth = nn.Identity() # type: ignore
99
+
100
+ _layers = OrderedDict()
101
+ _layers["pre_norm"] = norm_layer(in_channels)
102
+ _layers["conv_a"] = Conv2dNormActivation(
103
+ in_channels,
104
+ mid_channels,
105
+ kernel_size=1,
106
+ stride=1,
107
+ padding=0,
108
+ activation_layer=activation_layer,
109
+ norm_layer=norm_layer,
110
+ inplace=None,
111
+ )
112
+ _layers["conv_b"] = Conv2dNormActivation(
113
+ mid_channels,
114
+ mid_channels,
115
+ kernel_size=3,
116
+ stride=stride,
117
+ padding=1,
118
+ activation_layer=activation_layer,
119
+ norm_layer=norm_layer,
120
+ groups=mid_channels,
121
+ inplace=None,
122
+ )
123
+ _layers["squeeze_excitation"] = SqueezeExcitation(mid_channels, sqz_channels, activation=nn.SiLU)
124
+ _layers["conv_c"] = nn.Conv2d(in_channels=mid_channels, out_channels=out_channels, kernel_size=1, bias=True)
125
+
126
+ self.layers = nn.Sequential(_layers)
127
+
128
+ def forward(self, x: Tensor) -> Tensor:
129
+ """
130
+ Args:
131
+ x (Tensor): Input tensor with expected layout of [B, C, H, W].
132
+ Returns:
133
+ Tensor: Output tensor with expected layout of [B, C, H / stride, W / stride].
134
+ """
135
+ res = self.proj(x)
136
+ x = self.stochastic_depth(self.layers(x))
137
+ return res + x
138
+
139
+
140
+ class RelativePositionalMultiHeadAttention(nn.Module):
141
+ """Relative Positional Multi-Head Attention.
142
+
143
+ Args:
144
+ feat_dim (int): Number of input features.
145
+ head_dim (int): Number of features per head.
146
+ max_seq_len (int): Maximum sequence length.
147
+ """
148
+
149
+ def __init__(
150
+ self,
151
+ feat_dim: int,
152
+ head_dim: int,
153
+ max_seq_len: int,
154
+ ) -> None:
155
+ super().__init__()
156
+
157
+ if feat_dim % head_dim != 0:
158
+ raise ValueError(f"feat_dim: {feat_dim} must be divisible by head_dim: {head_dim}")
159
+
160
+ self.n_heads = feat_dim // head_dim
161
+ self.head_dim = head_dim
162
+ self.size = int(math.sqrt(max_seq_len))
163
+ self.max_seq_len = max_seq_len
164
+
165
+ self.to_qkv = nn.Linear(feat_dim, self.n_heads * self.head_dim * 3)
166
+ self.scale_factor = feat_dim**-0.5
167
+
168
+ self.merge = nn.Linear(self.head_dim * self.n_heads, feat_dim)
169
+ self.relative_position_bias_table = nn.parameter.Parameter(
170
+ torch.empty(((2 * self.size - 1) * (2 * self.size - 1), self.n_heads), dtype=torch.float32),
171
+ )
172
+
173
+ self.register_buffer("relative_position_index", _get_relative_position_index(self.size, self.size))
174
+ # initialize with truncated normal the bias
175
+ torch.nn.init.trunc_normal_(self.relative_position_bias_table, std=0.02)
176
+
177
+ def get_relative_positional_bias(self) -> torch.Tensor:
178
+ bias_index = self.relative_position_index.view(-1) # type: ignore
179
+ relative_bias = self.relative_position_bias_table[bias_index].view(self.max_seq_len, self.max_seq_len, -1) # type: ignore
180
+ relative_bias = relative_bias.permute(2, 0, 1).contiguous()
181
+ return relative_bias.unsqueeze(0)
182
+
183
+ def forward(self, x: Tensor) -> Tensor:
184
+ """
185
+ Args:
186
+ x (Tensor): Input tensor with expected layout of [B, G, P, D].
187
+ Returns:
188
+ Tensor: Output tensor with expected layout of [B, G, P, D].
189
+ """
190
+ B, G, P, D = x.shape
191
+ H, DH = self.n_heads, self.head_dim
192
+
193
+ qkv = self.to_qkv(x)
194
+ q, k, v = torch.chunk(qkv, 3, dim=-1)
195
+
196
+ q = q.reshape(B, G, P, H, DH).permute(0, 1, 3, 2, 4)
197
+ k = k.reshape(B, G, P, H, DH).permute(0, 1, 3, 2, 4)
198
+ v = v.reshape(B, G, P, H, DH).permute(0, 1, 3, 2, 4)
199
+
200
+ k = k * self.scale_factor
201
+ dot_prod = torch.einsum("B G H I D, B G H J D -> B G H I J", q, k)
202
+ pos_bias = self.get_relative_positional_bias()
203
+
204
+ dot_prod = F.softmax(dot_prod + pos_bias, dim=-1)
205
+
206
+ out = torch.einsum("B G H I J, B G H J D -> B G H I D", dot_prod, v)
207
+ out = out.permute(0, 1, 3, 2, 4).reshape(B, G, P, D)
208
+
209
+ out = self.merge(out)
210
+ return out
211
+
212
+
213
+ class SwapAxes(nn.Module):
214
+ """Permute the axes of a tensor."""
215
+
216
+ def __init__(self, a: int, b: int) -> None:
217
+ super().__init__()
218
+ self.a = a
219
+ self.b = b
220
+
221
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
222
+ res = torch.swapaxes(x, self.a, self.b)
223
+ return res
224
+
225
+
226
+ class WindowPartition(nn.Module):
227
+ """
228
+ Partition the input tensor into non-overlapping windows.
229
+ """
230
+
231
+ def __init__(self) -> None:
232
+ super().__init__()
233
+
234
+ def forward(self, x: Tensor, p: int) -> Tensor:
235
+ """
236
+ Args:
237
+ x (Tensor): Input tensor with expected layout of [B, C, H, W].
238
+ p (int): Number of partitions.
239
+ Returns:
240
+ Tensor: Output tensor with expected layout of [B, H/P, W/P, P*P, C].
241
+ """
242
+ B, C, H, W = x.shape
243
+ P = p
244
+ # chunk up H and W dimensions
245
+ x = x.reshape(B, C, H // P, P, W // P, P)
246
+ x = x.permute(0, 2, 4, 3, 5, 1)
247
+ # colapse P * P dimension
248
+ x = x.reshape(B, (H // P) * (W // P), P * P, C)
249
+ return x
250
+
251
+
252
+ class WindowDepartition(nn.Module):
253
+ """
254
+ Departition the input tensor of non-overlapping windows into a feature volume of layout [B, C, H, W].
255
+ """
256
+
257
+ def __init__(self) -> None:
258
+ super().__init__()
259
+
260
+ def forward(self, x: Tensor, p: int, h_partitions: int, w_partitions: int) -> Tensor:
261
+ """
262
+ Args:
263
+ x (Tensor): Input tensor with expected layout of [B, (H/P * W/P), P*P, C].
264
+ p (int): Number of partitions.
265
+ h_partitions (int): Number of vertical partitions.
266
+ w_partitions (int): Number of horizontal partitions.
267
+ Returns:
268
+ Tensor: Output tensor with expected layout of [B, C, H, W].
269
+ """
270
+ B, G, PP, C = x.shape
271
+ P = p
272
+ HP, WP = h_partitions, w_partitions
273
+ # split P * P dimension into 2 P tile dimensionsa
274
+ x = x.reshape(B, HP, WP, P, P, C)
275
+ # permute into B, C, HP, P, WP, P
276
+ x = x.permute(0, 5, 1, 3, 2, 4)
277
+ # reshape into B, C, H, W
278
+ x = x.reshape(B, C, HP * P, WP * P)
279
+ return x
280
+
281
+
282
+ class PartitionAttentionLayer(nn.Module):
283
+ """
284
+ Layer for partitioning the input tensor into non-overlapping windows and applying attention to each window.
285
+
286
+ Args:
287
+ in_channels (int): Number of input channels.
288
+ head_dim (int): Dimension of each attention head.
289
+ partition_size (int): Size of the partitions.
290
+ partition_type (str): Type of partitioning to use. Can be either "grid" or "window".
291
+ grid_size (Tuple[int, int]): Size of the grid to partition the input tensor into.
292
+ mlp_ratio (int): Ratio of the feature size expansion in the MLP layer.
293
+ activation_layer (Callable[..., nn.Module]): Activation function to use.
294
+ norm_layer (Callable[..., nn.Module]): Normalization function to use.
295
+ attention_dropout (float): Dropout probability for the attention layer.
296
+ mlp_dropout (float): Dropout probability for the MLP layer.
297
+ p_stochastic_dropout (float): Probability of dropping out a partition.
298
+ """
299
+
300
+ def __init__(
301
+ self,
302
+ in_channels: int,
303
+ head_dim: int,
304
+ # partitioning parameters
305
+ partition_size: int,
306
+ partition_type: str,
307
+ # grid size needs to be known at initialization time
308
+ # because we need to know hamy relative offsets there are in the grid
309
+ grid_size: Tuple[int, int],
310
+ mlp_ratio: int,
311
+ activation_layer: Callable[..., nn.Module],
312
+ norm_layer: Callable[..., nn.Module],
313
+ attention_dropout: float,
314
+ mlp_dropout: float,
315
+ p_stochastic_dropout: float,
316
+ ) -> None:
317
+ super().__init__()
318
+
319
+ self.n_heads = in_channels // head_dim
320
+ self.head_dim = head_dim
321
+ self.n_partitions = grid_size[0] // partition_size
322
+ self.partition_type = partition_type
323
+ self.grid_size = grid_size
324
+
325
+ if partition_type not in ["grid", "window"]:
326
+ raise ValueError("partition_type must be either 'grid' or 'window'")
327
+
328
+ if partition_type == "window":
329
+ self.p, self.g = partition_size, self.n_partitions
330
+ else:
331
+ self.p, self.g = self.n_partitions, partition_size
332
+
333
+ self.partition_op = WindowPartition()
334
+ self.departition_op = WindowDepartition()
335
+ self.partition_swap = SwapAxes(-2, -3) if partition_type == "grid" else nn.Identity()
336
+ self.departition_swap = SwapAxes(-2, -3) if partition_type == "grid" else nn.Identity()
337
+
338
+ self.attn_layer = nn.Sequential(
339
+ norm_layer(in_channels),
340
+ # it's always going to be partition_size ** 2 because
341
+ # of the axis swap in the case of grid partitioning
342
+ RelativePositionalMultiHeadAttention(in_channels, head_dim, partition_size**2),
343
+ nn.Dropout(attention_dropout),
344
+ )
345
+
346
+ # pre-normalization similar to transformer layers
347
+ self.mlp_layer = nn.Sequential(
348
+ nn.LayerNorm(in_channels),
349
+ nn.Linear(in_channels, in_channels * mlp_ratio),
350
+ activation_layer(),
351
+ nn.Linear(in_channels * mlp_ratio, in_channels),
352
+ nn.Dropout(mlp_dropout),
353
+ )
354
+
355
+ # layer scale factors
356
+ self.stochastic_dropout = StochasticDepth(p_stochastic_dropout, mode="row")
357
+
358
+ def forward(self, x: Tensor) -> Tensor:
359
+ """
360
+ Args:
361
+ x (Tensor): Input tensor with expected layout of [B, C, H, W].
362
+ Returns:
363
+ Tensor: Output tensor with expected layout of [B, C, H, W].
364
+ """
365
+
366
+ # Undefined behavior if H or W are not divisible by p
367
+ # https://github.com/google-research/maxvit/blob/da76cf0d8a6ec668cc31b399c4126186da7da944/maxvit/models/maxvit.py#L766
368
+ gh, gw = self.grid_size[0] // self.p, self.grid_size[1] // self.p
369
+ torch._assert(
370
+ self.grid_size[0] % self.p == 0 and self.grid_size[1] % self.p == 0,
371
+ "Grid size must be divisible by partition size. Got grid size of {} and partition size of {}".format(
372
+ self.grid_size, self.p
373
+ ),
374
+ )
375
+
376
+ x = self.partition_op(x, self.p)
377
+ x = self.partition_swap(x)
378
+ x = x + self.stochastic_dropout(self.attn_layer(x))
379
+ x = x + self.stochastic_dropout(self.mlp_layer(x))
380
+ x = self.departition_swap(x)
381
+ x = self.departition_op(x, self.p, gh, gw)
382
+
383
+ return x
384
+
385
+
386
+ class MaxVitLayer(nn.Module):
387
+ """
388
+ MaxVit layer consisting of a MBConv layer followed by a PartitionAttentionLayer with `window` and a PartitionAttentionLayer with `grid`.
389
+
390
+ Args:
391
+ in_channels (int): Number of input channels.
392
+ out_channels (int): Number of output channels.
393
+ expansion_ratio (float): Expansion ratio in the bottleneck.
394
+ squeeze_ratio (float): Squeeze ratio in the SE Layer.
395
+ stride (int): Stride of the depthwise convolution.
396
+ activation_layer (Callable[..., nn.Module]): Activation function.
397
+ norm_layer (Callable[..., nn.Module]): Normalization function.
398
+ head_dim (int): Dimension of the attention heads.
399
+ mlp_ratio (int): Ratio of the MLP layer.
400
+ mlp_dropout (float): Dropout probability for the MLP layer.
401
+ attention_dropout (float): Dropout probability for the attention layer.
402
+ p_stochastic_dropout (float): Probability of stochastic depth.
403
+ partition_size (int): Size of the partitions.
404
+ grid_size (Tuple[int, int]): Size of the input feature grid.
405
+ """
406
+
407
+ def __init__(
408
+ self,
409
+ # conv parameters
410
+ in_channels: int,
411
+ out_channels: int,
412
+ squeeze_ratio: float,
413
+ expansion_ratio: float,
414
+ stride: int,
415
+ # conv + transformer parameters
416
+ norm_layer: Callable[..., nn.Module],
417
+ activation_layer: Callable[..., nn.Module],
418
+ # transformer parameters
419
+ head_dim: int,
420
+ mlp_ratio: int,
421
+ mlp_dropout: float,
422
+ attention_dropout: float,
423
+ p_stochastic_dropout: float,
424
+ # partitioning parameters
425
+ partition_size: int,
426
+ grid_size: Tuple[int, int],
427
+ ) -> None:
428
+ super().__init__()
429
+
430
+ layers: OrderedDict = OrderedDict()
431
+
432
+ # convolutional layer
433
+ layers["MBconv"] = MBConv(
434
+ in_channels=in_channels,
435
+ out_channels=out_channels,
436
+ expansion_ratio=expansion_ratio,
437
+ squeeze_ratio=squeeze_ratio,
438
+ stride=stride,
439
+ activation_layer=activation_layer,
440
+ norm_layer=norm_layer,
441
+ p_stochastic_dropout=p_stochastic_dropout,
442
+ )
443
+ # attention layers, block -> grid
444
+ layers["window_attention"] = PartitionAttentionLayer(
445
+ in_channels=out_channels,
446
+ head_dim=head_dim,
447
+ partition_size=partition_size,
448
+ partition_type="window",
449
+ grid_size=grid_size,
450
+ mlp_ratio=mlp_ratio,
451
+ activation_layer=activation_layer,
452
+ norm_layer=nn.LayerNorm,
453
+ attention_dropout=attention_dropout,
454
+ mlp_dropout=mlp_dropout,
455
+ p_stochastic_dropout=p_stochastic_dropout,
456
+ )
457
+ layers["grid_attention"] = PartitionAttentionLayer(
458
+ in_channels=out_channels,
459
+ head_dim=head_dim,
460
+ partition_size=partition_size,
461
+ partition_type="grid",
462
+ grid_size=grid_size,
463
+ mlp_ratio=mlp_ratio,
464
+ activation_layer=activation_layer,
465
+ norm_layer=nn.LayerNorm,
466
+ attention_dropout=attention_dropout,
467
+ mlp_dropout=mlp_dropout,
468
+ p_stochastic_dropout=p_stochastic_dropout,
469
+ )
470
+ self.layers = nn.Sequential(layers)
471
+
472
+ def forward(self, x: Tensor) -> Tensor:
473
+ """
474
+ Args:
475
+ x (Tensor): Input tensor of shape (B, C, H, W).
476
+ Returns:
477
+ Tensor: Output tensor of shape (B, C, H, W).
478
+ """
479
+ x = self.layers(x)
480
+ return x
481
+
482
+
483
+ class MaxVitBlock(nn.Module):
484
+ """
485
+ A MaxVit block consisting of `n_layers` MaxVit layers.
486
+
487
+ Args:
488
+ in_channels (int): Number of input channels.
489
+ out_channels (int): Number of output channels.
490
+ expansion_ratio (float): Expansion ratio in the bottleneck.
491
+ squeeze_ratio (float): Squeeze ratio in the SE Layer.
492
+ activation_layer (Callable[..., nn.Module]): Activation function.
493
+ norm_layer (Callable[..., nn.Module]): Normalization function.
494
+ head_dim (int): Dimension of the attention heads.
495
+ mlp_ratio (int): Ratio of the MLP layer.
496
+ mlp_dropout (float): Dropout probability for the MLP layer.
497
+ attention_dropout (float): Dropout probability for the attention layer.
498
+ p_stochastic_dropout (float): Probability of stochastic depth.
499
+ partition_size (int): Size of the partitions.
500
+ input_grid_size (Tuple[int, int]): Size of the input feature grid.
501
+ n_layers (int): Number of layers in the block.
502
+ p_stochastic (List[float]): List of probabilities for stochastic depth for each layer.
503
+ """
504
+
505
+ def __init__(
506
+ self,
507
+ # conv parameters
508
+ in_channels: int,
509
+ out_channels: int,
510
+ squeeze_ratio: float,
511
+ expansion_ratio: float,
512
+ # conv + transformer parameters
513
+ norm_layer: Callable[..., nn.Module],
514
+ activation_layer: Callable[..., nn.Module],
515
+ # transformer parameters
516
+ head_dim: int,
517
+ mlp_ratio: int,
518
+ mlp_dropout: float,
519
+ attention_dropout: float,
520
+ # partitioning parameters
521
+ partition_size: int,
522
+ input_grid_size: Tuple[int, int],
523
+ # number of layers
524
+ n_layers: int,
525
+ p_stochastic: List[float],
526
+ ) -> None:
527
+ super().__init__()
528
+ if not len(p_stochastic) == n_layers:
529
+ raise ValueError(f"p_stochastic must have length n_layers={n_layers}, got p_stochastic={p_stochastic}.")
530
+
531
+ self.layers = nn.ModuleList()
532
+ # account for the first stride of the first layer
533
+ self.grid_size = _get_conv_output_shape(input_grid_size, kernel_size=3, stride=2, padding=1)
534
+
535
+ for idx, p in enumerate(p_stochastic):
536
+ stride = 2 if idx == 0 else 1
537
+ self.layers += [
538
+ MaxVitLayer(
539
+ in_channels=in_channels if idx == 0 else out_channels,
540
+ out_channels=out_channels,
541
+ squeeze_ratio=squeeze_ratio,
542
+ expansion_ratio=expansion_ratio,
543
+ stride=stride,
544
+ norm_layer=norm_layer,
545
+ activation_layer=activation_layer,
546
+ head_dim=head_dim,
547
+ mlp_ratio=mlp_ratio,
548
+ mlp_dropout=mlp_dropout,
549
+ attention_dropout=attention_dropout,
550
+ partition_size=partition_size,
551
+ grid_size=self.grid_size,
552
+ p_stochastic_dropout=p,
553
+ ),
554
+ ]
555
+
556
+ def forward(self, x: Tensor) -> Tensor:
557
+ """
558
+ Args:
559
+ x (Tensor): Input tensor of shape (B, C, H, W).
560
+ Returns:
561
+ Tensor: Output tensor of shape (B, C, H, W).
562
+ """
563
+ for layer in self.layers:
564
+ x = layer(x)
565
+ return x
566
+
567
+
568
+ class MaxVit(nn.Module):
569
+ """
570
+ Implements MaxVit Transformer from the `MaxViT: Multi-Axis Vision Transformer <https://arxiv.org/abs/2204.01697>`_ paper.
571
+ Args:
572
+ input_size (Tuple[int, int]): Size of the input image.
573
+ stem_channels (int): Number of channels in the stem.
574
+ partition_size (int): Size of the partitions.
575
+ block_channels (List[int]): Number of channels in each block.
576
+ block_layers (List[int]): Number of layers in each block.
577
+ stochastic_depth_prob (float): Probability of stochastic depth. Expands to a list of probabilities for each layer that scales linearly to the specified value.
578
+ squeeze_ratio (float): Squeeze ratio in the SE Layer. Default: 0.25.
579
+ expansion_ratio (float): Expansion ratio in the MBConv bottleneck. Default: 4.
580
+ norm_layer (Callable[..., nn.Module]): Normalization function. Default: None (setting to None will produce a `BatchNorm2d(eps=1e-3, momentum=0.01)`).
581
+ activation_layer (Callable[..., nn.Module]): Activation function Default: nn.GELU.
582
+ head_dim (int): Dimension of the attention heads.
583
+ mlp_ratio (int): Expansion ratio of the MLP layer. Default: 4.
584
+ mlp_dropout (float): Dropout probability for the MLP layer. Default: 0.0.
585
+ attention_dropout (float): Dropout probability for the attention layer. Default: 0.0.
586
+ num_classes (int): Number of classes. Default: 1000.
587
+ """
588
+
589
+ def __init__(
590
+ self,
591
+ # input size parameters
592
+ input_size: Tuple[int, int],
593
+ # stem and task parameters
594
+ stem_channels: int,
595
+ # partitioning parameters
596
+ partition_size: int,
597
+ # block parameters
598
+ block_channels: List[int],
599
+ block_layers: List[int],
600
+ # attention head dimensions
601
+ head_dim: int,
602
+ stochastic_depth_prob: float,
603
+ # conv + transformer parameters
604
+ # norm_layer is applied only to the conv layers
605
+ # activation_layer is applied both to conv and transformer layers
606
+ norm_layer: Optional[Callable[..., nn.Module]] = None,
607
+ activation_layer: Callable[..., nn.Module] = nn.GELU,
608
+ # conv parameters
609
+ squeeze_ratio: float = 0.25,
610
+ expansion_ratio: float = 4,
611
+ # transformer parameters
612
+ mlp_ratio: int = 4,
613
+ mlp_dropout: float = 0.0,
614
+ attention_dropout: float = 0.0,
615
+ # task parameters
616
+ num_classes: int = 1000,
617
+ ) -> None:
618
+ super().__init__()
619
+ _log_api_usage_once(self)
620
+
621
+ input_channels = 3
622
+
623
+ # https://github.com/google-research/maxvit/blob/da76cf0d8a6ec668cc31b399c4126186da7da944/maxvit/models/maxvit.py#L1029-L1030
624
+ # for the exact parameters used in batchnorm
625
+ if norm_layer is None:
626
+ norm_layer = partial(nn.BatchNorm2d, eps=1e-3, momentum=0.01)
627
+
628
+ # Make sure input size will be divisible by the partition size in all blocks
629
+ # Undefined behavior if H or W are not divisible by p
630
+ # https://github.com/google-research/maxvit/blob/da76cf0d8a6ec668cc31b399c4126186da7da944/maxvit/models/maxvit.py#L766
631
+ block_input_sizes = _make_block_input_shapes(input_size, len(block_channels))
632
+ for idx, block_input_size in enumerate(block_input_sizes):
633
+ if block_input_size[0] % partition_size != 0 or block_input_size[1] % partition_size != 0:
634
+ raise ValueError(
635
+ f"Input size {block_input_size} of block {idx} is not divisible by partition size {partition_size}. "
636
+ f"Consider changing the partition size or the input size.\n"
637
+ f"Current configuration yields the following block input sizes: {block_input_sizes}."
638
+ )
639
+
640
+ # stem
641
+ self.stem = nn.Sequential(
642
+ Conv2dNormActivation(
643
+ input_channels,
644
+ stem_channels,
645
+ 3,
646
+ stride=2,
647
+ norm_layer=norm_layer,
648
+ activation_layer=activation_layer,
649
+ bias=False,
650
+ inplace=None,
651
+ ),
652
+ Conv2dNormActivation(
653
+ stem_channels, stem_channels, 3, stride=1, norm_layer=None, activation_layer=None, bias=True
654
+ ),
655
+ )
656
+
657
+ # account for stem stride
658
+ input_size = _get_conv_output_shape(input_size, kernel_size=3, stride=2, padding=1)
659
+ self.partition_size = partition_size
660
+
661
+ # blocks
662
+ self.blocks = nn.ModuleList()
663
+ in_channels = [stem_channels] + block_channels[:-1]
664
+ out_channels = block_channels
665
+
666
+ # precompute the stochastich depth probabilities from 0 to stochastic_depth_prob
667
+ # since we have N blocks with L layers, we will have N * L probabilities uniformly distributed
668
+ # over the range [0, stochastic_depth_prob]
669
+ p_stochastic = np.linspace(0, stochastic_depth_prob, sum(block_layers)).tolist()
670
+
671
+ p_idx = 0
672
+ for in_channel, out_channel, num_layers in zip(in_channels, out_channels, block_layers):
673
+ self.blocks.append(
674
+ MaxVitBlock(
675
+ in_channels=in_channel,
676
+ out_channels=out_channel,
677
+ squeeze_ratio=squeeze_ratio,
678
+ expansion_ratio=expansion_ratio,
679
+ norm_layer=norm_layer,
680
+ activation_layer=activation_layer,
681
+ head_dim=head_dim,
682
+ mlp_ratio=mlp_ratio,
683
+ mlp_dropout=mlp_dropout,
684
+ attention_dropout=attention_dropout,
685
+ partition_size=partition_size,
686
+ input_grid_size=input_size,
687
+ n_layers=num_layers,
688
+ p_stochastic=p_stochastic[p_idx : p_idx + num_layers],
689
+ ),
690
+ )
691
+ input_size = self.blocks[-1].grid_size
692
+ p_idx += num_layers
693
+
694
+ # see https://github.com/google-research/maxvit/blob/da76cf0d8a6ec668cc31b399c4126186da7da944/maxvit/models/maxvit.py#L1137-L1158
695
+ # for why there is Linear -> Tanh -> Linear
696
+ self.classifier = nn.Sequential(
697
+ nn.AdaptiveAvgPool2d(1),
698
+ nn.Flatten(),
699
+ nn.LayerNorm(block_channels[-1]),
700
+ nn.Linear(block_channels[-1], block_channels[-1]),
701
+ nn.Tanh(),
702
+ nn.Linear(block_channels[-1], num_classes, bias=False),
703
+ )
704
+
705
+ self._init_weights()
706
+
707
+ def forward(self, x: Tensor) -> Tensor:
708
+ x = self.stem(x)
709
+ for block in self.blocks:
710
+ x = block(x)
711
+ x = self.classifier(x)
712
+ return x
713
+
714
+ def _init_weights(self):
715
+ for m in self.modules():
716
+ if isinstance(m, nn.Conv2d):
717
+ nn.init.normal_(m.weight, std=0.02)
718
+ if m.bias is not None:
719
+ nn.init.zeros_(m.bias)
720
+ elif isinstance(m, nn.BatchNorm2d):
721
+ nn.init.constant_(m.weight, 1)
722
+ nn.init.constant_(m.bias, 0)
723
+ elif isinstance(m, nn.Linear):
724
+ nn.init.normal_(m.weight, std=0.02)
725
+ if m.bias is not None:
726
+ nn.init.zeros_(m.bias)
727
+
728
+
729
+ def _maxvit(
730
+ # stem parameters
731
+ stem_channels: int,
732
+ # block parameters
733
+ block_channels: List[int],
734
+ block_layers: List[int],
735
+ stochastic_depth_prob: float,
736
+ # partitioning parameters
737
+ partition_size: int,
738
+ # transformer parameters
739
+ head_dim: int,
740
+ # Weights API
741
+ weights: Optional[WeightsEnum] = None,
742
+ progress: bool = False,
743
+ # kwargs,
744
+ **kwargs: Any,
745
+ ) -> MaxVit:
746
+
747
+ if weights is not None:
748
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
749
+ assert weights.meta["min_size"][0] == weights.meta["min_size"][1]
750
+ _ovewrite_named_param(kwargs, "input_size", weights.meta["min_size"])
751
+
752
+ input_size = kwargs.pop("input_size", (224, 224))
753
+
754
+ model = MaxVit(
755
+ stem_channels=stem_channels,
756
+ block_channels=block_channels,
757
+ block_layers=block_layers,
758
+ stochastic_depth_prob=stochastic_depth_prob,
759
+ head_dim=head_dim,
760
+ partition_size=partition_size,
761
+ input_size=input_size,
762
+ **kwargs,
763
+ )
764
+
765
+ if weights is not None:
766
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
767
+
768
+ return model
769
+
770
+
771
+ class MaxVit_T_Weights(WeightsEnum):
772
+ IMAGENET1K_V1 = Weights(
773
+ # URL empty until official release
774
+ url="https://download.pytorch.org/models/maxvit_t-bc5ab103.pth",
775
+ transforms=partial(
776
+ ImageClassification, crop_size=224, resize_size=224, interpolation=InterpolationMode.BICUBIC
777
+ ),
778
+ meta={
779
+ "categories": _IMAGENET_CATEGORIES,
780
+ "num_params": 30919624,
781
+ "min_size": (224, 224),
782
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#maxvit",
783
+ "_metrics": {
784
+ "ImageNet-1K": {
785
+ "acc@1": 83.700,
786
+ "acc@5": 96.722,
787
+ }
788
+ },
789
+ "_ops": 5.558,
790
+ "_file_size": 118.769,
791
+ "_docs": """These weights reproduce closely the results of the paper using a similar training recipe.
792
+ They were trained with a BatchNorm2D momentum of 0.99 instead of the more correct 0.01.""",
793
+ },
794
+ )
795
+ DEFAULT = IMAGENET1K_V1
796
+
797
+
798
+ @register_model()
799
+ @handle_legacy_interface(weights=("pretrained", MaxVit_T_Weights.IMAGENET1K_V1))
800
+ def maxvit_t(*, weights: Optional[MaxVit_T_Weights] = None, progress: bool = True, **kwargs: Any) -> MaxVit:
801
+ """
802
+ Constructs a maxvit_t architecture from
803
+ `MaxViT: Multi-Axis Vision Transformer <https://arxiv.org/abs/2204.01697>`_.
804
+
805
+ Args:
806
+ weights (:class:`~torchvision.models.MaxVit_T_Weights`, optional): The
807
+ pretrained weights to use. See
808
+ :class:`~torchvision.models.MaxVit_T_Weights` below for
809
+ more details, and possible values. By default, no pre-trained
810
+ weights are used.
811
+ progress (bool, optional): If True, displays a progress bar of the
812
+ download to stderr. Default is True.
813
+ **kwargs: parameters passed to the ``torchvision.models.maxvit.MaxVit``
814
+ base class. Please refer to the `source code
815
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/maxvit.py>`_
816
+ for more details about this class.
817
+
818
+ .. autoclass:: torchvision.models.MaxVit_T_Weights
819
+ :members:
820
+ """
821
+ weights = MaxVit_T_Weights.verify(weights)
822
+
823
+ return _maxvit(
824
+ stem_channels=64,
825
+ block_channels=[64, 128, 256, 512],
826
+ block_layers=[2, 2, 5, 2],
827
+ head_dim=32,
828
+ stochastic_depth_prob=0.2,
829
+ partition_size=7,
830
+ weights=weights,
831
+ progress=progress,
832
+ **kwargs,
833
+ )
vllm/lib/python3.10/site-packages/torchvision/models/mnasnet.py ADDED
@@ -0,0 +1,434 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from functools import partial
3
+ from typing import Any, Dict, List, Optional
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ from torch import Tensor
8
+
9
+ from ..transforms._presets import ImageClassification
10
+ from ..utils import _log_api_usage_once
11
+ from ._api import register_model, Weights, WeightsEnum
12
+ from ._meta import _IMAGENET_CATEGORIES
13
+ from ._utils import _ovewrite_named_param, handle_legacy_interface
14
+
15
+
16
+ __all__ = [
17
+ "MNASNet",
18
+ "MNASNet0_5_Weights",
19
+ "MNASNet0_75_Weights",
20
+ "MNASNet1_0_Weights",
21
+ "MNASNet1_3_Weights",
22
+ "mnasnet0_5",
23
+ "mnasnet0_75",
24
+ "mnasnet1_0",
25
+ "mnasnet1_3",
26
+ ]
27
+
28
+
29
+ # Paper suggests 0.9997 momentum, for TensorFlow. Equivalent PyTorch momentum is
30
+ # 1.0 - tensorflow.
31
+ _BN_MOMENTUM = 1 - 0.9997
32
+
33
+
34
+ class _InvertedResidual(nn.Module):
35
+ def __init__(
36
+ self, in_ch: int, out_ch: int, kernel_size: int, stride: int, expansion_factor: int, bn_momentum: float = 0.1
37
+ ) -> None:
38
+ super().__init__()
39
+ if stride not in [1, 2]:
40
+ raise ValueError(f"stride should be 1 or 2 instead of {stride}")
41
+ if kernel_size not in [3, 5]:
42
+ raise ValueError(f"kernel_size should be 3 or 5 instead of {kernel_size}")
43
+ mid_ch = in_ch * expansion_factor
44
+ self.apply_residual = in_ch == out_ch and stride == 1
45
+ self.layers = nn.Sequential(
46
+ # Pointwise
47
+ nn.Conv2d(in_ch, mid_ch, 1, bias=False),
48
+ nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
49
+ nn.ReLU(inplace=True),
50
+ # Depthwise
51
+ nn.Conv2d(mid_ch, mid_ch, kernel_size, padding=kernel_size // 2, stride=stride, groups=mid_ch, bias=False),
52
+ nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
53
+ nn.ReLU(inplace=True),
54
+ # Linear pointwise. Note that there's no activation.
55
+ nn.Conv2d(mid_ch, out_ch, 1, bias=False),
56
+ nn.BatchNorm2d(out_ch, momentum=bn_momentum),
57
+ )
58
+
59
+ def forward(self, input: Tensor) -> Tensor:
60
+ if self.apply_residual:
61
+ return self.layers(input) + input
62
+ else:
63
+ return self.layers(input)
64
+
65
+
66
+ def _stack(
67
+ in_ch: int, out_ch: int, kernel_size: int, stride: int, exp_factor: int, repeats: int, bn_momentum: float
68
+ ) -> nn.Sequential:
69
+ """Creates a stack of inverted residuals."""
70
+ if repeats < 1:
71
+ raise ValueError(f"repeats should be >= 1, instead got {repeats}")
72
+ # First one has no skip, because feature map size changes.
73
+ first = _InvertedResidual(in_ch, out_ch, kernel_size, stride, exp_factor, bn_momentum=bn_momentum)
74
+ remaining = []
75
+ for _ in range(1, repeats):
76
+ remaining.append(_InvertedResidual(out_ch, out_ch, kernel_size, 1, exp_factor, bn_momentum=bn_momentum))
77
+ return nn.Sequential(first, *remaining)
78
+
79
+
80
+ def _round_to_multiple_of(val: float, divisor: int, round_up_bias: float = 0.9) -> int:
81
+ """Asymmetric rounding to make `val` divisible by `divisor`. With default
82
+ bias, will round up, unless the number is no more than 10% greater than the
83
+ smaller divisible value, i.e. (83, 8) -> 80, but (84, 8) -> 88."""
84
+ if not 0.0 < round_up_bias < 1.0:
85
+ raise ValueError(f"round_up_bias should be greater than 0.0 and smaller than 1.0 instead of {round_up_bias}")
86
+ new_val = max(divisor, int(val + divisor / 2) // divisor * divisor)
87
+ return new_val if new_val >= round_up_bias * val else new_val + divisor
88
+
89
+
90
+ def _get_depths(alpha: float) -> List[int]:
91
+ """Scales tensor depths as in reference MobileNet code, prefers rounding up
92
+ rather than down."""
93
+ depths = [32, 16, 24, 40, 80, 96, 192, 320]
94
+ return [_round_to_multiple_of(depth * alpha, 8) for depth in depths]
95
+
96
+
97
+ class MNASNet(torch.nn.Module):
98
+ """MNASNet, as described in https://arxiv.org/abs/1807.11626. This
99
+ implements the B1 variant of the model.
100
+ >>> model = MNASNet(1.0, num_classes=1000)
101
+ >>> x = torch.rand(1, 3, 224, 224)
102
+ >>> y = model(x)
103
+ >>> y.dim()
104
+ 2
105
+ >>> y.nelement()
106
+ 1000
107
+ """
108
+
109
+ # Version 2 adds depth scaling in the initial stages of the network.
110
+ _version = 2
111
+
112
+ def __init__(self, alpha: float, num_classes: int = 1000, dropout: float = 0.2) -> None:
113
+ super().__init__()
114
+ _log_api_usage_once(self)
115
+ if alpha <= 0.0:
116
+ raise ValueError(f"alpha should be greater than 0.0 instead of {alpha}")
117
+ self.alpha = alpha
118
+ self.num_classes = num_classes
119
+ depths = _get_depths(alpha)
120
+ layers = [
121
+ # First layer: regular conv.
122
+ nn.Conv2d(3, depths[0], 3, padding=1, stride=2, bias=False),
123
+ nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM),
124
+ nn.ReLU(inplace=True),
125
+ # Depthwise separable, no skip.
126
+ nn.Conv2d(depths[0], depths[0], 3, padding=1, stride=1, groups=depths[0], bias=False),
127
+ nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM),
128
+ nn.ReLU(inplace=True),
129
+ nn.Conv2d(depths[0], depths[1], 1, padding=0, stride=1, bias=False),
130
+ nn.BatchNorm2d(depths[1], momentum=_BN_MOMENTUM),
131
+ # MNASNet blocks: stacks of inverted residuals.
132
+ _stack(depths[1], depths[2], 3, 2, 3, 3, _BN_MOMENTUM),
133
+ _stack(depths[2], depths[3], 5, 2, 3, 3, _BN_MOMENTUM),
134
+ _stack(depths[3], depths[4], 5, 2, 6, 3, _BN_MOMENTUM),
135
+ _stack(depths[4], depths[5], 3, 1, 6, 2, _BN_MOMENTUM),
136
+ _stack(depths[5], depths[6], 5, 2, 6, 4, _BN_MOMENTUM),
137
+ _stack(depths[6], depths[7], 3, 1, 6, 1, _BN_MOMENTUM),
138
+ # Final mapping to classifier input.
139
+ nn.Conv2d(depths[7], 1280, 1, padding=0, stride=1, bias=False),
140
+ nn.BatchNorm2d(1280, momentum=_BN_MOMENTUM),
141
+ nn.ReLU(inplace=True),
142
+ ]
143
+ self.layers = nn.Sequential(*layers)
144
+ self.classifier = nn.Sequential(nn.Dropout(p=dropout, inplace=True), nn.Linear(1280, num_classes))
145
+
146
+ for m in self.modules():
147
+ if isinstance(m, nn.Conv2d):
148
+ nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
149
+ if m.bias is not None:
150
+ nn.init.zeros_(m.bias)
151
+ elif isinstance(m, nn.BatchNorm2d):
152
+ nn.init.ones_(m.weight)
153
+ nn.init.zeros_(m.bias)
154
+ elif isinstance(m, nn.Linear):
155
+ nn.init.kaiming_uniform_(m.weight, mode="fan_out", nonlinearity="sigmoid")
156
+ nn.init.zeros_(m.bias)
157
+
158
+ def forward(self, x: Tensor) -> Tensor:
159
+ x = self.layers(x)
160
+ # Equivalent to global avgpool and removing H and W dimensions.
161
+ x = x.mean([2, 3])
162
+ return self.classifier(x)
163
+
164
+ def _load_from_state_dict(
165
+ self,
166
+ state_dict: Dict,
167
+ prefix: str,
168
+ local_metadata: Dict,
169
+ strict: bool,
170
+ missing_keys: List[str],
171
+ unexpected_keys: List[str],
172
+ error_msgs: List[str],
173
+ ) -> None:
174
+ version = local_metadata.get("version", None)
175
+ if version not in [1, 2]:
176
+ raise ValueError(f"version shluld be set to 1 or 2 instead of {version}")
177
+
178
+ if version == 1 and not self.alpha == 1.0:
179
+ # In the initial version of the model (v1), stem was fixed-size.
180
+ # All other layer configurations were the same. This will patch
181
+ # the model so that it's identical to v1. Model with alpha 1.0 is
182
+ # unaffected.
183
+ depths = _get_depths(self.alpha)
184
+ v1_stem = [
185
+ nn.Conv2d(3, 32, 3, padding=1, stride=2, bias=False),
186
+ nn.BatchNorm2d(32, momentum=_BN_MOMENTUM),
187
+ nn.ReLU(inplace=True),
188
+ nn.Conv2d(32, 32, 3, padding=1, stride=1, groups=32, bias=False),
189
+ nn.BatchNorm2d(32, momentum=_BN_MOMENTUM),
190
+ nn.ReLU(inplace=True),
191
+ nn.Conv2d(32, 16, 1, padding=0, stride=1, bias=False),
192
+ nn.BatchNorm2d(16, momentum=_BN_MOMENTUM),
193
+ _stack(16, depths[2], 3, 2, 3, 3, _BN_MOMENTUM),
194
+ ]
195
+ for idx, layer in enumerate(v1_stem):
196
+ self.layers[idx] = layer
197
+
198
+ # The model is now identical to v1, and must be saved as such.
199
+ self._version = 1
200
+ warnings.warn(
201
+ "A new version of MNASNet model has been implemented. "
202
+ "Your checkpoint was saved using the previous version. "
203
+ "This checkpoint will load and work as before, but "
204
+ "you may want to upgrade by training a newer model or "
205
+ "transfer learning from an updated ImageNet checkpoint.",
206
+ UserWarning,
207
+ )
208
+
209
+ super()._load_from_state_dict(
210
+ state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
211
+ )
212
+
213
+
214
+ _COMMON_META = {
215
+ "min_size": (1, 1),
216
+ "categories": _IMAGENET_CATEGORIES,
217
+ "recipe": "https://github.com/1e100/mnasnet_trainer",
218
+ }
219
+
220
+
221
+ class MNASNet0_5_Weights(WeightsEnum):
222
+ IMAGENET1K_V1 = Weights(
223
+ url="https://download.pytorch.org/models/mnasnet0.5_top1_67.823-3ffadce67e.pth",
224
+ transforms=partial(ImageClassification, crop_size=224),
225
+ meta={
226
+ **_COMMON_META,
227
+ "num_params": 2218512,
228
+ "_metrics": {
229
+ "ImageNet-1K": {
230
+ "acc@1": 67.734,
231
+ "acc@5": 87.490,
232
+ }
233
+ },
234
+ "_ops": 0.104,
235
+ "_file_size": 8.591,
236
+ "_docs": """These weights reproduce closely the results of the paper.""",
237
+ },
238
+ )
239
+ DEFAULT = IMAGENET1K_V1
240
+
241
+
242
+ class MNASNet0_75_Weights(WeightsEnum):
243
+ IMAGENET1K_V1 = Weights(
244
+ url="https://download.pytorch.org/models/mnasnet0_75-7090bc5f.pth",
245
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
246
+ meta={
247
+ **_COMMON_META,
248
+ "recipe": "https://github.com/pytorch/vision/pull/6019",
249
+ "num_params": 3170208,
250
+ "_metrics": {
251
+ "ImageNet-1K": {
252
+ "acc@1": 71.180,
253
+ "acc@5": 90.496,
254
+ }
255
+ },
256
+ "_ops": 0.215,
257
+ "_file_size": 12.303,
258
+ "_docs": """
259
+ These weights were trained from scratch by using TorchVision's `new training recipe
260
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
261
+ """,
262
+ },
263
+ )
264
+ DEFAULT = IMAGENET1K_V1
265
+
266
+
267
+ class MNASNet1_0_Weights(WeightsEnum):
268
+ IMAGENET1K_V1 = Weights(
269
+ url="https://download.pytorch.org/models/mnasnet1.0_top1_73.512-f206786ef8.pth",
270
+ transforms=partial(ImageClassification, crop_size=224),
271
+ meta={
272
+ **_COMMON_META,
273
+ "num_params": 4383312,
274
+ "_metrics": {
275
+ "ImageNet-1K": {
276
+ "acc@1": 73.456,
277
+ "acc@5": 91.510,
278
+ }
279
+ },
280
+ "_ops": 0.314,
281
+ "_file_size": 16.915,
282
+ "_docs": """These weights reproduce closely the results of the paper.""",
283
+ },
284
+ )
285
+ DEFAULT = IMAGENET1K_V1
286
+
287
+
288
+ class MNASNet1_3_Weights(WeightsEnum):
289
+ IMAGENET1K_V1 = Weights(
290
+ url="https://download.pytorch.org/models/mnasnet1_3-a4c69d6f.pth",
291
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
292
+ meta={
293
+ **_COMMON_META,
294
+ "recipe": "https://github.com/pytorch/vision/pull/6019",
295
+ "num_params": 6282256,
296
+ "_metrics": {
297
+ "ImageNet-1K": {
298
+ "acc@1": 76.506,
299
+ "acc@5": 93.522,
300
+ }
301
+ },
302
+ "_ops": 0.526,
303
+ "_file_size": 24.246,
304
+ "_docs": """
305
+ These weights were trained from scratch by using TorchVision's `new training recipe
306
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
307
+ """,
308
+ },
309
+ )
310
+ DEFAULT = IMAGENET1K_V1
311
+
312
+
313
+ def _mnasnet(alpha: float, weights: Optional[WeightsEnum], progress: bool, **kwargs: Any) -> MNASNet:
314
+ if weights is not None:
315
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
316
+
317
+ model = MNASNet(alpha, **kwargs)
318
+
319
+ if weights:
320
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
321
+
322
+ return model
323
+
324
+
325
+ @register_model()
326
+ @handle_legacy_interface(weights=("pretrained", MNASNet0_5_Weights.IMAGENET1K_V1))
327
+ def mnasnet0_5(*, weights: Optional[MNASNet0_5_Weights] = None, progress: bool = True, **kwargs: Any) -> MNASNet:
328
+ """MNASNet with depth multiplier of 0.5 from
329
+ `MnasNet: Platform-Aware Neural Architecture Search for Mobile
330
+ <https://arxiv.org/abs/1807.11626>`_ paper.
331
+
332
+ Args:
333
+ weights (:class:`~torchvision.models.MNASNet0_5_Weights`, optional): The
334
+ pretrained weights to use. See
335
+ :class:`~torchvision.models.MNASNet0_5_Weights` below for
336
+ more details, and possible values. By default, no pre-trained
337
+ weights are used.
338
+ progress (bool, optional): If True, displays a progress bar of the
339
+ download to stderr. Default is True.
340
+ **kwargs: parameters passed to the ``torchvision.models.mnasnet.MNASNet``
341
+ base class. Please refer to the `source code
342
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py>`_
343
+ for more details about this class.
344
+
345
+ .. autoclass:: torchvision.models.MNASNet0_5_Weights
346
+ :members:
347
+ """
348
+ weights = MNASNet0_5_Weights.verify(weights)
349
+
350
+ return _mnasnet(0.5, weights, progress, **kwargs)
351
+
352
+
353
+ @register_model()
354
+ @handle_legacy_interface(weights=("pretrained", MNASNet0_75_Weights.IMAGENET1K_V1))
355
+ def mnasnet0_75(*, weights: Optional[MNASNet0_75_Weights] = None, progress: bool = True, **kwargs: Any) -> MNASNet:
356
+ """MNASNet with depth multiplier of 0.75 from
357
+ `MnasNet: Platform-Aware Neural Architecture Search for Mobile
358
+ <https://arxiv.org/abs/1807.11626>`_ paper.
359
+
360
+ Args:
361
+ weights (:class:`~torchvision.models.MNASNet0_75_Weights`, optional): The
362
+ pretrained weights to use. See
363
+ :class:`~torchvision.models.MNASNet0_75_Weights` below for
364
+ more details, and possible values. By default, no pre-trained
365
+ weights are used.
366
+ progress (bool, optional): If True, displays a progress bar of the
367
+ download to stderr. Default is True.
368
+ **kwargs: parameters passed to the ``torchvision.models.mnasnet.MNASNet``
369
+ base class. Please refer to the `source code
370
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py>`_
371
+ for more details about this class.
372
+
373
+ .. autoclass:: torchvision.models.MNASNet0_75_Weights
374
+ :members:
375
+ """
376
+ weights = MNASNet0_75_Weights.verify(weights)
377
+
378
+ return _mnasnet(0.75, weights, progress, **kwargs)
379
+
380
+
381
+ @register_model()
382
+ @handle_legacy_interface(weights=("pretrained", MNASNet1_0_Weights.IMAGENET1K_V1))
383
+ def mnasnet1_0(*, weights: Optional[MNASNet1_0_Weights] = None, progress: bool = True, **kwargs: Any) -> MNASNet:
384
+ """MNASNet with depth multiplier of 1.0 from
385
+ `MnasNet: Platform-Aware Neural Architecture Search for Mobile
386
+ <https://arxiv.org/abs/1807.11626>`_ paper.
387
+
388
+ Args:
389
+ weights (:class:`~torchvision.models.MNASNet1_0_Weights`, optional): The
390
+ pretrained weights to use. See
391
+ :class:`~torchvision.models.MNASNet1_0_Weights` below for
392
+ more details, and possible values. By default, no pre-trained
393
+ weights are used.
394
+ progress (bool, optional): If True, displays a progress bar of the
395
+ download to stderr. Default is True.
396
+ **kwargs: parameters passed to the ``torchvision.models.mnasnet.MNASNet``
397
+ base class. Please refer to the `source code
398
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py>`_
399
+ for more details about this class.
400
+
401
+ .. autoclass:: torchvision.models.MNASNet1_0_Weights
402
+ :members:
403
+ """
404
+ weights = MNASNet1_0_Weights.verify(weights)
405
+
406
+ return _mnasnet(1.0, weights, progress, **kwargs)
407
+
408
+
409
+ @register_model()
410
+ @handle_legacy_interface(weights=("pretrained", MNASNet1_3_Weights.IMAGENET1K_V1))
411
+ def mnasnet1_3(*, weights: Optional[MNASNet1_3_Weights] = None, progress: bool = True, **kwargs: Any) -> MNASNet:
412
+ """MNASNet with depth multiplier of 1.3 from
413
+ `MnasNet: Platform-Aware Neural Architecture Search for Mobile
414
+ <https://arxiv.org/abs/1807.11626>`_ paper.
415
+
416
+ Args:
417
+ weights (:class:`~torchvision.models.MNASNet1_3_Weights`, optional): The
418
+ pretrained weights to use. See
419
+ :class:`~torchvision.models.MNASNet1_3_Weights` below for
420
+ more details, and possible values. By default, no pre-trained
421
+ weights are used.
422
+ progress (bool, optional): If True, displays a progress bar of the
423
+ download to stderr. Default is True.
424
+ **kwargs: parameters passed to the ``torchvision.models.mnasnet.MNASNet``
425
+ base class. Please refer to the `source code
426
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/mnasnet.py>`_
427
+ for more details about this class.
428
+
429
+ .. autoclass:: torchvision.models.MNASNet1_3_Weights
430
+ :members:
431
+ """
432
+ weights = MNASNet1_3_Weights.verify(weights)
433
+
434
+ return _mnasnet(1.3, weights, progress, **kwargs)
vllm/lib/python3.10/site-packages/torchvision/models/mobilenet.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from .mobilenetv2 import * # noqa: F401, F403
2
+ from .mobilenetv3 import * # noqa: F401, F403
3
+ from .mobilenetv2 import __all__ as mv2_all
4
+ from .mobilenetv3 import __all__ as mv3_all
5
+
6
+ __all__ = mv2_all + mv3_all
vllm/lib/python3.10/site-packages/torchvision/models/mobilenetv2.py ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Any, Callable, List, Optional
3
+
4
+ import torch
5
+ from torch import nn, Tensor
6
+
7
+ from ..ops.misc import Conv2dNormActivation
8
+ from ..transforms._presets import ImageClassification
9
+ from ..utils import _log_api_usage_once
10
+ from ._api import register_model, Weights, WeightsEnum
11
+ from ._meta import _IMAGENET_CATEGORIES
12
+ from ._utils import _make_divisible, _ovewrite_named_param, handle_legacy_interface
13
+
14
+
15
+ __all__ = ["MobileNetV2", "MobileNet_V2_Weights", "mobilenet_v2"]
16
+
17
+
18
+ # necessary for backwards compatibility
19
+ class InvertedResidual(nn.Module):
20
+ def __init__(
21
+ self, inp: int, oup: int, stride: int, expand_ratio: int, norm_layer: Optional[Callable[..., nn.Module]] = None
22
+ ) -> None:
23
+ super().__init__()
24
+ self.stride = stride
25
+ if stride not in [1, 2]:
26
+ raise ValueError(f"stride should be 1 or 2 instead of {stride}")
27
+
28
+ if norm_layer is None:
29
+ norm_layer = nn.BatchNorm2d
30
+
31
+ hidden_dim = int(round(inp * expand_ratio))
32
+ self.use_res_connect = self.stride == 1 and inp == oup
33
+
34
+ layers: List[nn.Module] = []
35
+ if expand_ratio != 1:
36
+ # pw
37
+ layers.append(
38
+ Conv2dNormActivation(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.ReLU6)
39
+ )
40
+ layers.extend(
41
+ [
42
+ # dw
43
+ Conv2dNormActivation(
44
+ hidden_dim,
45
+ hidden_dim,
46
+ stride=stride,
47
+ groups=hidden_dim,
48
+ norm_layer=norm_layer,
49
+ activation_layer=nn.ReLU6,
50
+ ),
51
+ # pw-linear
52
+ nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
53
+ norm_layer(oup),
54
+ ]
55
+ )
56
+ self.conv = nn.Sequential(*layers)
57
+ self.out_channels = oup
58
+ self._is_cn = stride > 1
59
+
60
+ def forward(self, x: Tensor) -> Tensor:
61
+ if self.use_res_connect:
62
+ return x + self.conv(x)
63
+ else:
64
+ return self.conv(x)
65
+
66
+
67
+ class MobileNetV2(nn.Module):
68
+ def __init__(
69
+ self,
70
+ num_classes: int = 1000,
71
+ width_mult: float = 1.0,
72
+ inverted_residual_setting: Optional[List[List[int]]] = None,
73
+ round_nearest: int = 8,
74
+ block: Optional[Callable[..., nn.Module]] = None,
75
+ norm_layer: Optional[Callable[..., nn.Module]] = None,
76
+ dropout: float = 0.2,
77
+ ) -> None:
78
+ """
79
+ MobileNet V2 main class
80
+
81
+ Args:
82
+ num_classes (int): Number of classes
83
+ width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
84
+ inverted_residual_setting: Network structure
85
+ round_nearest (int): Round the number of channels in each layer to be a multiple of this number
86
+ Set to 1 to turn off rounding
87
+ block: Module specifying inverted residual building block for mobilenet
88
+ norm_layer: Module specifying the normalization layer to use
89
+ dropout (float): The droupout probability
90
+
91
+ """
92
+ super().__init__()
93
+ _log_api_usage_once(self)
94
+
95
+ if block is None:
96
+ block = InvertedResidual
97
+
98
+ if norm_layer is None:
99
+ norm_layer = nn.BatchNorm2d
100
+
101
+ input_channel = 32
102
+ last_channel = 1280
103
+
104
+ if inverted_residual_setting is None:
105
+ inverted_residual_setting = [
106
+ # t, c, n, s
107
+ [1, 16, 1, 1],
108
+ [6, 24, 2, 2],
109
+ [6, 32, 3, 2],
110
+ [6, 64, 4, 2],
111
+ [6, 96, 3, 1],
112
+ [6, 160, 3, 2],
113
+ [6, 320, 1, 1],
114
+ ]
115
+
116
+ # only check the first element, assuming user knows t,c,n,s are required
117
+ if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
118
+ raise ValueError(
119
+ f"inverted_residual_setting should be non-empty or a 4-element list, got {inverted_residual_setting}"
120
+ )
121
+
122
+ # building first layer
123
+ input_channel = _make_divisible(input_channel * width_mult, round_nearest)
124
+ self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
125
+ features: List[nn.Module] = [
126
+ Conv2dNormActivation(3, input_channel, stride=2, norm_layer=norm_layer, activation_layer=nn.ReLU6)
127
+ ]
128
+ # building inverted residual blocks
129
+ for t, c, n, s in inverted_residual_setting:
130
+ output_channel = _make_divisible(c * width_mult, round_nearest)
131
+ for i in range(n):
132
+ stride = s if i == 0 else 1
133
+ features.append(block(input_channel, output_channel, stride, expand_ratio=t, norm_layer=norm_layer))
134
+ input_channel = output_channel
135
+ # building last several layers
136
+ features.append(
137
+ Conv2dNormActivation(
138
+ input_channel, self.last_channel, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.ReLU6
139
+ )
140
+ )
141
+ # make it nn.Sequential
142
+ self.features = nn.Sequential(*features)
143
+
144
+ # building classifier
145
+ self.classifier = nn.Sequential(
146
+ nn.Dropout(p=dropout),
147
+ nn.Linear(self.last_channel, num_classes),
148
+ )
149
+
150
+ # weight initialization
151
+ for m in self.modules():
152
+ if isinstance(m, nn.Conv2d):
153
+ nn.init.kaiming_normal_(m.weight, mode="fan_out")
154
+ if m.bias is not None:
155
+ nn.init.zeros_(m.bias)
156
+ elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
157
+ nn.init.ones_(m.weight)
158
+ nn.init.zeros_(m.bias)
159
+ elif isinstance(m, nn.Linear):
160
+ nn.init.normal_(m.weight, 0, 0.01)
161
+ nn.init.zeros_(m.bias)
162
+
163
+ def _forward_impl(self, x: Tensor) -> Tensor:
164
+ # This exists since TorchScript doesn't support inheritance, so the superclass method
165
+ # (this one) needs to have a name other than `forward` that can be accessed in a subclass
166
+ x = self.features(x)
167
+ # Cannot use "squeeze" as batch-size can be 1
168
+ x = nn.functional.adaptive_avg_pool2d(x, (1, 1))
169
+ x = torch.flatten(x, 1)
170
+ x = self.classifier(x)
171
+ return x
172
+
173
+ def forward(self, x: Tensor) -> Tensor:
174
+ return self._forward_impl(x)
175
+
176
+
177
+ _COMMON_META = {
178
+ "num_params": 3504872,
179
+ "min_size": (1, 1),
180
+ "categories": _IMAGENET_CATEGORIES,
181
+ }
182
+
183
+
184
+ class MobileNet_V2_Weights(WeightsEnum):
185
+ IMAGENET1K_V1 = Weights(
186
+ url="https://download.pytorch.org/models/mobilenet_v2-b0353104.pth",
187
+ transforms=partial(ImageClassification, crop_size=224),
188
+ meta={
189
+ **_COMMON_META,
190
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv2",
191
+ "_metrics": {
192
+ "ImageNet-1K": {
193
+ "acc@1": 71.878,
194
+ "acc@5": 90.286,
195
+ }
196
+ },
197
+ "_ops": 0.301,
198
+ "_file_size": 13.555,
199
+ "_docs": """These weights reproduce closely the results of the paper using a simple training recipe.""",
200
+ },
201
+ )
202
+ IMAGENET1K_V2 = Weights(
203
+ url="https://download.pytorch.org/models/mobilenet_v2-7ebf99e0.pth",
204
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
205
+ meta={
206
+ **_COMMON_META,
207
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning",
208
+ "_metrics": {
209
+ "ImageNet-1K": {
210
+ "acc@1": 72.154,
211
+ "acc@5": 90.822,
212
+ }
213
+ },
214
+ "_ops": 0.301,
215
+ "_file_size": 13.598,
216
+ "_docs": """
217
+ These weights improve upon the results of the original paper by using a modified version of TorchVision's
218
+ `new training recipe
219
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
220
+ """,
221
+ },
222
+ )
223
+ DEFAULT = IMAGENET1K_V2
224
+
225
+
226
+ @register_model()
227
+ @handle_legacy_interface(weights=("pretrained", MobileNet_V2_Weights.IMAGENET1K_V1))
228
+ def mobilenet_v2(
229
+ *, weights: Optional[MobileNet_V2_Weights] = None, progress: bool = True, **kwargs: Any
230
+ ) -> MobileNetV2:
231
+ """MobileNetV2 architecture from the `MobileNetV2: Inverted Residuals and Linear
232
+ Bottlenecks <https://arxiv.org/abs/1801.04381>`_ paper.
233
+
234
+ Args:
235
+ weights (:class:`~torchvision.models.MobileNet_V2_Weights`, optional): The
236
+ pretrained weights to use. See
237
+ :class:`~torchvision.models.MobileNet_V2_Weights` below for
238
+ more details, and possible values. By default, no pre-trained
239
+ weights are used.
240
+ progress (bool, optional): If True, displays a progress bar of the
241
+ download to stderr. Default is True.
242
+ **kwargs: parameters passed to the ``torchvision.models.mobilenetv2.MobileNetV2``
243
+ base class. Please refer to the `source code
244
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv2.py>`_
245
+ for more details about this class.
246
+
247
+ .. autoclass:: torchvision.models.MobileNet_V2_Weights
248
+ :members:
249
+ """
250
+ weights = MobileNet_V2_Weights.verify(weights)
251
+
252
+ if weights is not None:
253
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
254
+
255
+ model = MobileNetV2(**kwargs)
256
+
257
+ if weights is not None:
258
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
259
+
260
+ return model
vllm/lib/python3.10/site-packages/torchvision/models/mobilenetv3.py ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Any, Callable, List, Optional, Sequence
3
+
4
+ import torch
5
+ from torch import nn, Tensor
6
+
7
+ from ..ops.misc import Conv2dNormActivation, SqueezeExcitation as SElayer
8
+ from ..transforms._presets import ImageClassification
9
+ from ..utils import _log_api_usage_once
10
+ from ._api import register_model, Weights, WeightsEnum
11
+ from ._meta import _IMAGENET_CATEGORIES
12
+ from ._utils import _make_divisible, _ovewrite_named_param, handle_legacy_interface
13
+
14
+
15
+ __all__ = [
16
+ "MobileNetV3",
17
+ "MobileNet_V3_Large_Weights",
18
+ "MobileNet_V3_Small_Weights",
19
+ "mobilenet_v3_large",
20
+ "mobilenet_v3_small",
21
+ ]
22
+
23
+
24
+ class InvertedResidualConfig:
25
+ # Stores information listed at Tables 1 and 2 of the MobileNetV3 paper
26
+ def __init__(
27
+ self,
28
+ input_channels: int,
29
+ kernel: int,
30
+ expanded_channels: int,
31
+ out_channels: int,
32
+ use_se: bool,
33
+ activation: str,
34
+ stride: int,
35
+ dilation: int,
36
+ width_mult: float,
37
+ ):
38
+ self.input_channels = self.adjust_channels(input_channels, width_mult)
39
+ self.kernel = kernel
40
+ self.expanded_channels = self.adjust_channels(expanded_channels, width_mult)
41
+ self.out_channels = self.adjust_channels(out_channels, width_mult)
42
+ self.use_se = use_se
43
+ self.use_hs = activation == "HS"
44
+ self.stride = stride
45
+ self.dilation = dilation
46
+
47
+ @staticmethod
48
+ def adjust_channels(channels: int, width_mult: float):
49
+ return _make_divisible(channels * width_mult, 8)
50
+
51
+
52
+ class InvertedResidual(nn.Module):
53
+ # Implemented as described at section 5 of MobileNetV3 paper
54
+ def __init__(
55
+ self,
56
+ cnf: InvertedResidualConfig,
57
+ norm_layer: Callable[..., nn.Module],
58
+ se_layer: Callable[..., nn.Module] = partial(SElayer, scale_activation=nn.Hardsigmoid),
59
+ ):
60
+ super().__init__()
61
+ if not (1 <= cnf.stride <= 2):
62
+ raise ValueError("illegal stride value")
63
+
64
+ self.use_res_connect = cnf.stride == 1 and cnf.input_channels == cnf.out_channels
65
+
66
+ layers: List[nn.Module] = []
67
+ activation_layer = nn.Hardswish if cnf.use_hs else nn.ReLU
68
+
69
+ # expand
70
+ if cnf.expanded_channels != cnf.input_channels:
71
+ layers.append(
72
+ Conv2dNormActivation(
73
+ cnf.input_channels,
74
+ cnf.expanded_channels,
75
+ kernel_size=1,
76
+ norm_layer=norm_layer,
77
+ activation_layer=activation_layer,
78
+ )
79
+ )
80
+
81
+ # depthwise
82
+ stride = 1 if cnf.dilation > 1 else cnf.stride
83
+ layers.append(
84
+ Conv2dNormActivation(
85
+ cnf.expanded_channels,
86
+ cnf.expanded_channels,
87
+ kernel_size=cnf.kernel,
88
+ stride=stride,
89
+ dilation=cnf.dilation,
90
+ groups=cnf.expanded_channels,
91
+ norm_layer=norm_layer,
92
+ activation_layer=activation_layer,
93
+ )
94
+ )
95
+ if cnf.use_se:
96
+ squeeze_channels = _make_divisible(cnf.expanded_channels // 4, 8)
97
+ layers.append(se_layer(cnf.expanded_channels, squeeze_channels))
98
+
99
+ # project
100
+ layers.append(
101
+ Conv2dNormActivation(
102
+ cnf.expanded_channels, cnf.out_channels, kernel_size=1, norm_layer=norm_layer, activation_layer=None
103
+ )
104
+ )
105
+
106
+ self.block = nn.Sequential(*layers)
107
+ self.out_channels = cnf.out_channels
108
+ self._is_cn = cnf.stride > 1
109
+
110
+ def forward(self, input: Tensor) -> Tensor:
111
+ result = self.block(input)
112
+ if self.use_res_connect:
113
+ result += input
114
+ return result
115
+
116
+
117
+ class MobileNetV3(nn.Module):
118
+ def __init__(
119
+ self,
120
+ inverted_residual_setting: List[InvertedResidualConfig],
121
+ last_channel: int,
122
+ num_classes: int = 1000,
123
+ block: Optional[Callable[..., nn.Module]] = None,
124
+ norm_layer: Optional[Callable[..., nn.Module]] = None,
125
+ dropout: float = 0.2,
126
+ **kwargs: Any,
127
+ ) -> None:
128
+ """
129
+ MobileNet V3 main class
130
+
131
+ Args:
132
+ inverted_residual_setting (List[InvertedResidualConfig]): Network structure
133
+ last_channel (int): The number of channels on the penultimate layer
134
+ num_classes (int): Number of classes
135
+ block (Optional[Callable[..., nn.Module]]): Module specifying inverted residual building block for mobilenet
136
+ norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use
137
+ dropout (float): The droupout probability
138
+ """
139
+ super().__init__()
140
+ _log_api_usage_once(self)
141
+
142
+ if not inverted_residual_setting:
143
+ raise ValueError("The inverted_residual_setting should not be empty")
144
+ elif not (
145
+ isinstance(inverted_residual_setting, Sequence)
146
+ and all([isinstance(s, InvertedResidualConfig) for s in inverted_residual_setting])
147
+ ):
148
+ raise TypeError("The inverted_residual_setting should be List[InvertedResidualConfig]")
149
+
150
+ if block is None:
151
+ block = InvertedResidual
152
+
153
+ if norm_layer is None:
154
+ norm_layer = partial(nn.BatchNorm2d, eps=0.001, momentum=0.01)
155
+
156
+ layers: List[nn.Module] = []
157
+
158
+ # building first layer
159
+ firstconv_output_channels = inverted_residual_setting[0].input_channels
160
+ layers.append(
161
+ Conv2dNormActivation(
162
+ 3,
163
+ firstconv_output_channels,
164
+ kernel_size=3,
165
+ stride=2,
166
+ norm_layer=norm_layer,
167
+ activation_layer=nn.Hardswish,
168
+ )
169
+ )
170
+
171
+ # building inverted residual blocks
172
+ for cnf in inverted_residual_setting:
173
+ layers.append(block(cnf, norm_layer))
174
+
175
+ # building last several layers
176
+ lastconv_input_channels = inverted_residual_setting[-1].out_channels
177
+ lastconv_output_channels = 6 * lastconv_input_channels
178
+ layers.append(
179
+ Conv2dNormActivation(
180
+ lastconv_input_channels,
181
+ lastconv_output_channels,
182
+ kernel_size=1,
183
+ norm_layer=norm_layer,
184
+ activation_layer=nn.Hardswish,
185
+ )
186
+ )
187
+
188
+ self.features = nn.Sequential(*layers)
189
+ self.avgpool = nn.AdaptiveAvgPool2d(1)
190
+ self.classifier = nn.Sequential(
191
+ nn.Linear(lastconv_output_channels, last_channel),
192
+ nn.Hardswish(inplace=True),
193
+ nn.Dropout(p=dropout, inplace=True),
194
+ nn.Linear(last_channel, num_classes),
195
+ )
196
+
197
+ for m in self.modules():
198
+ if isinstance(m, nn.Conv2d):
199
+ nn.init.kaiming_normal_(m.weight, mode="fan_out")
200
+ if m.bias is not None:
201
+ nn.init.zeros_(m.bias)
202
+ elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
203
+ nn.init.ones_(m.weight)
204
+ nn.init.zeros_(m.bias)
205
+ elif isinstance(m, nn.Linear):
206
+ nn.init.normal_(m.weight, 0, 0.01)
207
+ nn.init.zeros_(m.bias)
208
+
209
+ def _forward_impl(self, x: Tensor) -> Tensor:
210
+ x = self.features(x)
211
+
212
+ x = self.avgpool(x)
213
+ x = torch.flatten(x, 1)
214
+
215
+ x = self.classifier(x)
216
+
217
+ return x
218
+
219
+ def forward(self, x: Tensor) -> Tensor:
220
+ return self._forward_impl(x)
221
+
222
+
223
+ def _mobilenet_v3_conf(
224
+ arch: str, width_mult: float = 1.0, reduced_tail: bool = False, dilated: bool = False, **kwargs: Any
225
+ ):
226
+ reduce_divider = 2 if reduced_tail else 1
227
+ dilation = 2 if dilated else 1
228
+
229
+ bneck_conf = partial(InvertedResidualConfig, width_mult=width_mult)
230
+ adjust_channels = partial(InvertedResidualConfig.adjust_channels, width_mult=width_mult)
231
+
232
+ if arch == "mobilenet_v3_large":
233
+ inverted_residual_setting = [
234
+ bneck_conf(16, 3, 16, 16, False, "RE", 1, 1),
235
+ bneck_conf(16, 3, 64, 24, False, "RE", 2, 1), # C1
236
+ bneck_conf(24, 3, 72, 24, False, "RE", 1, 1),
237
+ bneck_conf(24, 5, 72, 40, True, "RE", 2, 1), # C2
238
+ bneck_conf(40, 5, 120, 40, True, "RE", 1, 1),
239
+ bneck_conf(40, 5, 120, 40, True, "RE", 1, 1),
240
+ bneck_conf(40, 3, 240, 80, False, "HS", 2, 1), # C3
241
+ bneck_conf(80, 3, 200, 80, False, "HS", 1, 1),
242
+ bneck_conf(80, 3, 184, 80, False, "HS", 1, 1),
243
+ bneck_conf(80, 3, 184, 80, False, "HS", 1, 1),
244
+ bneck_conf(80, 3, 480, 112, True, "HS", 1, 1),
245
+ bneck_conf(112, 3, 672, 112, True, "HS", 1, 1),
246
+ bneck_conf(112, 5, 672, 160 // reduce_divider, True, "HS", 2, dilation), # C4
247
+ bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider, 160 // reduce_divider, True, "HS", 1, dilation),
248
+ bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider, 160 // reduce_divider, True, "HS", 1, dilation),
249
+ ]
250
+ last_channel = adjust_channels(1280 // reduce_divider) # C5
251
+ elif arch == "mobilenet_v3_small":
252
+ inverted_residual_setting = [
253
+ bneck_conf(16, 3, 16, 16, True, "RE", 2, 1), # C1
254
+ bneck_conf(16, 3, 72, 24, False, "RE", 2, 1), # C2
255
+ bneck_conf(24, 3, 88, 24, False, "RE", 1, 1),
256
+ bneck_conf(24, 5, 96, 40, True, "HS", 2, 1), # C3
257
+ bneck_conf(40, 5, 240, 40, True, "HS", 1, 1),
258
+ bneck_conf(40, 5, 240, 40, True, "HS", 1, 1),
259
+ bneck_conf(40, 5, 120, 48, True, "HS", 1, 1),
260
+ bneck_conf(48, 5, 144, 48, True, "HS", 1, 1),
261
+ bneck_conf(48, 5, 288, 96 // reduce_divider, True, "HS", 2, dilation), # C4
262
+ bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider, 96 // reduce_divider, True, "HS", 1, dilation),
263
+ bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider, 96 // reduce_divider, True, "HS", 1, dilation),
264
+ ]
265
+ last_channel = adjust_channels(1024 // reduce_divider) # C5
266
+ else:
267
+ raise ValueError(f"Unsupported model type {arch}")
268
+
269
+ return inverted_residual_setting, last_channel
270
+
271
+
272
+ def _mobilenet_v3(
273
+ inverted_residual_setting: List[InvertedResidualConfig],
274
+ last_channel: int,
275
+ weights: Optional[WeightsEnum],
276
+ progress: bool,
277
+ **kwargs: Any,
278
+ ) -> MobileNetV3:
279
+ if weights is not None:
280
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
281
+
282
+ model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs)
283
+
284
+ if weights is not None:
285
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
286
+
287
+ return model
288
+
289
+
290
+ _COMMON_META = {
291
+ "min_size": (1, 1),
292
+ "categories": _IMAGENET_CATEGORIES,
293
+ }
294
+
295
+
296
+ class MobileNet_V3_Large_Weights(WeightsEnum):
297
+ IMAGENET1K_V1 = Weights(
298
+ url="https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth",
299
+ transforms=partial(ImageClassification, crop_size=224),
300
+ meta={
301
+ **_COMMON_META,
302
+ "num_params": 5483032,
303
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small",
304
+ "_metrics": {
305
+ "ImageNet-1K": {
306
+ "acc@1": 74.042,
307
+ "acc@5": 91.340,
308
+ }
309
+ },
310
+ "_ops": 0.217,
311
+ "_file_size": 21.114,
312
+ "_docs": """These weights were trained from scratch by using a simple training recipe.""",
313
+ },
314
+ )
315
+ IMAGENET1K_V2 = Weights(
316
+ url="https://download.pytorch.org/models/mobilenet_v3_large-5c1a4163.pth",
317
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
318
+ meta={
319
+ **_COMMON_META,
320
+ "num_params": 5483032,
321
+ "recipe": "https://github.com/pytorch/vision/issues/3995#new-recipe-with-reg-tuning",
322
+ "_metrics": {
323
+ "ImageNet-1K": {
324
+ "acc@1": 75.274,
325
+ "acc@5": 92.566,
326
+ }
327
+ },
328
+ "_ops": 0.217,
329
+ "_file_size": 21.107,
330
+ "_docs": """
331
+ These weights improve marginally upon the results of the original paper by using a modified version of
332
+ TorchVision's `new training recipe
333
+ <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
334
+ """,
335
+ },
336
+ )
337
+ DEFAULT = IMAGENET1K_V2
338
+
339
+
340
+ class MobileNet_V3_Small_Weights(WeightsEnum):
341
+ IMAGENET1K_V1 = Weights(
342
+ url="https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth",
343
+ transforms=partial(ImageClassification, crop_size=224),
344
+ meta={
345
+ **_COMMON_META,
346
+ "num_params": 2542856,
347
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#mobilenetv3-large--small",
348
+ "_metrics": {
349
+ "ImageNet-1K": {
350
+ "acc@1": 67.668,
351
+ "acc@5": 87.402,
352
+ }
353
+ },
354
+ "_ops": 0.057,
355
+ "_file_size": 9.829,
356
+ "_docs": """
357
+ These weights improve upon the results of the original paper by using a simple training recipe.
358
+ """,
359
+ },
360
+ )
361
+ DEFAULT = IMAGENET1K_V1
362
+
363
+
364
+ @register_model()
365
+ @handle_legacy_interface(weights=("pretrained", MobileNet_V3_Large_Weights.IMAGENET1K_V1))
366
+ def mobilenet_v3_large(
367
+ *, weights: Optional[MobileNet_V3_Large_Weights] = None, progress: bool = True, **kwargs: Any
368
+ ) -> MobileNetV3:
369
+ """
370
+ Constructs a large MobileNetV3 architecture from
371
+ `Searching for MobileNetV3 <https://arxiv.org/abs/1905.02244>`__.
372
+
373
+ Args:
374
+ weights (:class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The
375
+ pretrained weights to use. See
376
+ :class:`~torchvision.models.MobileNet_V3_Large_Weights` below for
377
+ more details, and possible values. By default, no pre-trained
378
+ weights are used.
379
+ progress (bool, optional): If True, displays a progress bar of the
380
+ download to stderr. Default is True.
381
+ **kwargs: parameters passed to the ``torchvision.models.mobilenet.MobileNetV3``
382
+ base class. Please refer to the `source code
383
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py>`_
384
+ for more details about this class.
385
+
386
+ .. autoclass:: torchvision.models.MobileNet_V3_Large_Weights
387
+ :members:
388
+ """
389
+ weights = MobileNet_V3_Large_Weights.verify(weights)
390
+
391
+ inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_large", **kwargs)
392
+ return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs)
393
+
394
+
395
+ @register_model()
396
+ @handle_legacy_interface(weights=("pretrained", MobileNet_V3_Small_Weights.IMAGENET1K_V1))
397
+ def mobilenet_v3_small(
398
+ *, weights: Optional[MobileNet_V3_Small_Weights] = None, progress: bool = True, **kwargs: Any
399
+ ) -> MobileNetV3:
400
+ """
401
+ Constructs a small MobileNetV3 architecture from
402
+ `Searching for MobileNetV3 <https://arxiv.org/abs/1905.02244>`__.
403
+
404
+ Args:
405
+ weights (:class:`~torchvision.models.MobileNet_V3_Small_Weights`, optional): The
406
+ pretrained weights to use. See
407
+ :class:`~torchvision.models.MobileNet_V3_Small_Weights` below for
408
+ more details, and possible values. By default, no pre-trained
409
+ weights are used.
410
+ progress (bool, optional): If True, displays a progress bar of the
411
+ download to stderr. Default is True.
412
+ **kwargs: parameters passed to the ``torchvision.models.mobilenet.MobileNetV3``
413
+ base class. Please refer to the `source code
414
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py>`_
415
+ for more details about this class.
416
+
417
+ .. autoclass:: torchvision.models.MobileNet_V3_Small_Weights
418
+ :members:
419
+ """
420
+ weights = MobileNet_V3_Small_Weights.verify(weights)
421
+
422
+ inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_small", **kwargs)
423
+ return _mobilenet_v3(inverted_residual_setting, last_channel, weights, progress, **kwargs)
vllm/lib/python3.10/site-packages/torchvision/models/optical_flow/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .raft import *
vllm/lib/python3.10/site-packages/torchvision/models/optical_flow/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (202 Bytes). View file
 
vllm/lib/python3.10/site-packages/torchvision/models/optical_flow/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (2.16 kB). View file
 
vllm/lib/python3.10/site-packages/torchvision/models/optical_flow/__pycache__/raft.cpython-310.pyc ADDED
Binary file (28.4 kB). View file
 
vllm/lib/python3.10/site-packages/torchvision/models/optical_flow/_utils.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+ import torch.nn.functional as F
5
+ from torch import Tensor
6
+
7
+
8
+ def grid_sample(img: Tensor, absolute_grid: Tensor, mode: str = "bilinear", align_corners: Optional[bool] = None):
9
+ """Same as torch's grid_sample, with absolute pixel coordinates instead of normalized coordinates."""
10
+ h, w = img.shape[-2:]
11
+
12
+ xgrid, ygrid = absolute_grid.split([1, 1], dim=-1)
13
+ xgrid = 2 * xgrid / (w - 1) - 1
14
+ # Adding condition if h > 1 to enable this function be reused in raft-stereo
15
+ if h > 1:
16
+ ygrid = 2 * ygrid / (h - 1) - 1
17
+ normalized_grid = torch.cat([xgrid, ygrid], dim=-1)
18
+
19
+ return F.grid_sample(img, normalized_grid, mode=mode, align_corners=align_corners)
20
+
21
+
22
+ def make_coords_grid(batch_size: int, h: int, w: int, device: str = "cpu"):
23
+ device = torch.device(device)
24
+ coords = torch.meshgrid(torch.arange(h, device=device), torch.arange(w, device=device), indexing="ij")
25
+ coords = torch.stack(coords[::-1], dim=0).float()
26
+ return coords[None].repeat(batch_size, 1, 1, 1)
27
+
28
+
29
+ def upsample_flow(flow, up_mask: Optional[Tensor] = None, factor: int = 8):
30
+ """Upsample flow by the input factor (default 8).
31
+
32
+ If up_mask is None we just interpolate.
33
+ If up_mask is specified, we upsample using a convex combination of its weights. See paper page 8 and appendix B.
34
+ Note that in appendix B the picture assumes a downsample factor of 4 instead of 8.
35
+ """
36
+ batch_size, num_channels, h, w = flow.shape
37
+ new_h, new_w = h * factor, w * factor
38
+
39
+ if up_mask is None:
40
+ return factor * F.interpolate(flow, size=(new_h, new_w), mode="bilinear", align_corners=True)
41
+
42
+ up_mask = up_mask.view(batch_size, 1, 9, factor, factor, h, w)
43
+ up_mask = torch.softmax(up_mask, dim=2) # "convex" == weights sum to 1
44
+
45
+ upsampled_flow = F.unfold(factor * flow, kernel_size=3, padding=1).view(batch_size, num_channels, 9, 1, 1, h, w)
46
+ upsampled_flow = torch.sum(up_mask * upsampled_flow, dim=2)
47
+
48
+ return upsampled_flow.permute(0, 1, 4, 2, 5, 3).reshape(batch_size, num_channels, new_h, new_w)
vllm/lib/python3.10/site-packages/torchvision/models/optical_flow/raft.py ADDED
@@ -0,0 +1,947 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ from torch import Tensor
7
+ from torch.nn.modules.batchnorm import BatchNorm2d
8
+ from torch.nn.modules.instancenorm import InstanceNorm2d
9
+ from torchvision.ops import Conv2dNormActivation
10
+
11
+ from ...transforms._presets import OpticalFlow
12
+ from ...utils import _log_api_usage_once
13
+ from .._api import register_model, Weights, WeightsEnum
14
+ from .._utils import handle_legacy_interface
15
+ from ._utils import grid_sample, make_coords_grid, upsample_flow
16
+
17
+
18
+ __all__ = (
19
+ "RAFT",
20
+ "raft_large",
21
+ "raft_small",
22
+ "Raft_Large_Weights",
23
+ "Raft_Small_Weights",
24
+ )
25
+
26
+
27
+ class ResidualBlock(nn.Module):
28
+ """Slightly modified Residual block with extra relu and biases."""
29
+
30
+ def __init__(self, in_channels, out_channels, *, norm_layer, stride=1, always_project: bool = False):
31
+ super().__init__()
32
+
33
+ # Note regarding bias=True:
34
+ # Usually we can pass bias=False in conv layers followed by a norm layer.
35
+ # But in the RAFT training reference, the BatchNorm2d layers are only activated for the first dataset,
36
+ # and frozen for the rest of the training process (i.e. set as eval()). The bias term is thus still useful
37
+ # for the rest of the datasets. Technically, we could remove the bias for other norm layers like Instance norm
38
+ # because these aren't frozen, but we don't bother (also, we wouldn't be able to load the original weights).
39
+ self.convnormrelu1 = Conv2dNormActivation(
40
+ in_channels, out_channels, norm_layer=norm_layer, kernel_size=3, stride=stride, bias=True
41
+ )
42
+ self.convnormrelu2 = Conv2dNormActivation(
43
+ out_channels, out_channels, norm_layer=norm_layer, kernel_size=3, bias=True
44
+ )
45
+
46
+ # make mypy happy
47
+ self.downsample: nn.Module
48
+
49
+ if stride == 1 and not always_project:
50
+ self.downsample = nn.Identity()
51
+ else:
52
+ self.downsample = Conv2dNormActivation(
53
+ in_channels,
54
+ out_channels,
55
+ norm_layer=norm_layer,
56
+ kernel_size=1,
57
+ stride=stride,
58
+ bias=True,
59
+ activation_layer=None,
60
+ )
61
+
62
+ self.relu = nn.ReLU(inplace=True)
63
+
64
+ def forward(self, x):
65
+ y = x
66
+ y = self.convnormrelu1(y)
67
+ y = self.convnormrelu2(y)
68
+
69
+ x = self.downsample(x)
70
+
71
+ return self.relu(x + y)
72
+
73
+
74
+ class BottleneckBlock(nn.Module):
75
+ """Slightly modified BottleNeck block (extra relu and biases)"""
76
+
77
+ def __init__(self, in_channels, out_channels, *, norm_layer, stride=1):
78
+ super().__init__()
79
+
80
+ # See note in ResidualBlock for the reason behind bias=True
81
+ self.convnormrelu1 = Conv2dNormActivation(
82
+ in_channels, out_channels // 4, norm_layer=norm_layer, kernel_size=1, bias=True
83
+ )
84
+ self.convnormrelu2 = Conv2dNormActivation(
85
+ out_channels // 4, out_channels // 4, norm_layer=norm_layer, kernel_size=3, stride=stride, bias=True
86
+ )
87
+ self.convnormrelu3 = Conv2dNormActivation(
88
+ out_channels // 4, out_channels, norm_layer=norm_layer, kernel_size=1, bias=True
89
+ )
90
+ self.relu = nn.ReLU(inplace=True)
91
+
92
+ if stride == 1:
93
+ self.downsample = nn.Identity()
94
+ else:
95
+ self.downsample = Conv2dNormActivation(
96
+ in_channels,
97
+ out_channels,
98
+ norm_layer=norm_layer,
99
+ kernel_size=1,
100
+ stride=stride,
101
+ bias=True,
102
+ activation_layer=None,
103
+ )
104
+
105
+ def forward(self, x):
106
+ y = x
107
+ y = self.convnormrelu1(y)
108
+ y = self.convnormrelu2(y)
109
+ y = self.convnormrelu3(y)
110
+
111
+ x = self.downsample(x)
112
+
113
+ return self.relu(x + y)
114
+
115
+
116
+ class FeatureEncoder(nn.Module):
117
+ """The feature encoder, used both as the actual feature encoder, and as the context encoder.
118
+
119
+ It must downsample its input by 8.
120
+ """
121
+
122
+ def __init__(
123
+ self, *, block=ResidualBlock, layers=(64, 64, 96, 128, 256), strides=(2, 1, 2, 2), norm_layer=nn.BatchNorm2d
124
+ ):
125
+ super().__init__()
126
+
127
+ if len(layers) != 5:
128
+ raise ValueError(f"The expected number of layers is 5, instead got {len(layers)}")
129
+
130
+ # See note in ResidualBlock for the reason behind bias=True
131
+ self.convnormrelu = Conv2dNormActivation(
132
+ 3, layers[0], norm_layer=norm_layer, kernel_size=7, stride=strides[0], bias=True
133
+ )
134
+
135
+ self.layer1 = self._make_2_blocks(block, layers[0], layers[1], norm_layer=norm_layer, first_stride=strides[1])
136
+ self.layer2 = self._make_2_blocks(block, layers[1], layers[2], norm_layer=norm_layer, first_stride=strides[2])
137
+ self.layer3 = self._make_2_blocks(block, layers[2], layers[3], norm_layer=norm_layer, first_stride=strides[3])
138
+
139
+ self.conv = nn.Conv2d(layers[3], layers[4], kernel_size=1)
140
+
141
+ for m in self.modules():
142
+ if isinstance(m, nn.Conv2d):
143
+ nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
144
+ elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d)):
145
+ if m.weight is not None:
146
+ nn.init.constant_(m.weight, 1)
147
+ if m.bias is not None:
148
+ nn.init.constant_(m.bias, 0)
149
+
150
+ num_downsamples = len(list(filter(lambda s: s == 2, strides)))
151
+ self.output_dim = layers[-1]
152
+ self.downsample_factor = 2**num_downsamples
153
+
154
+ def _make_2_blocks(self, block, in_channels, out_channels, norm_layer, first_stride):
155
+ block1 = block(in_channels, out_channels, norm_layer=norm_layer, stride=first_stride)
156
+ block2 = block(out_channels, out_channels, norm_layer=norm_layer, stride=1)
157
+ return nn.Sequential(block1, block2)
158
+
159
+ def forward(self, x):
160
+ x = self.convnormrelu(x)
161
+
162
+ x = self.layer1(x)
163
+ x = self.layer2(x)
164
+ x = self.layer3(x)
165
+
166
+ x = self.conv(x)
167
+
168
+ return x
169
+
170
+
171
+ class MotionEncoder(nn.Module):
172
+ """The motion encoder, part of the update block.
173
+
174
+ Takes the current predicted flow and the correlation features as input and returns an encoded version of these.
175
+ """
176
+
177
+ def __init__(self, *, in_channels_corr, corr_layers=(256, 192), flow_layers=(128, 64), out_channels=128):
178
+ super().__init__()
179
+
180
+ if len(flow_layers) != 2:
181
+ raise ValueError(f"The expected number of flow_layers is 2, instead got {len(flow_layers)}")
182
+ if len(corr_layers) not in (1, 2):
183
+ raise ValueError(f"The number of corr_layers should be 1 or 2, instead got {len(corr_layers)}")
184
+
185
+ self.convcorr1 = Conv2dNormActivation(in_channels_corr, corr_layers[0], norm_layer=None, kernel_size=1)
186
+ if len(corr_layers) == 2:
187
+ self.convcorr2 = Conv2dNormActivation(corr_layers[0], corr_layers[1], norm_layer=None, kernel_size=3)
188
+ else:
189
+ self.convcorr2 = nn.Identity()
190
+
191
+ self.convflow1 = Conv2dNormActivation(2, flow_layers[0], norm_layer=None, kernel_size=7)
192
+ self.convflow2 = Conv2dNormActivation(flow_layers[0], flow_layers[1], norm_layer=None, kernel_size=3)
193
+
194
+ # out_channels - 2 because we cat the flow (2 channels) at the end
195
+ self.conv = Conv2dNormActivation(
196
+ corr_layers[-1] + flow_layers[-1], out_channels - 2, norm_layer=None, kernel_size=3
197
+ )
198
+
199
+ self.out_channels = out_channels
200
+
201
+ def forward(self, flow, corr_features):
202
+ corr = self.convcorr1(corr_features)
203
+ corr = self.convcorr2(corr)
204
+
205
+ flow_orig = flow
206
+ flow = self.convflow1(flow)
207
+ flow = self.convflow2(flow)
208
+
209
+ corr_flow = torch.cat([corr, flow], dim=1)
210
+ corr_flow = self.conv(corr_flow)
211
+ return torch.cat([corr_flow, flow_orig], dim=1)
212
+
213
+
214
+ class ConvGRU(nn.Module):
215
+ """Convolutional Gru unit."""
216
+
217
+ def __init__(self, *, input_size, hidden_size, kernel_size, padding):
218
+ super().__init__()
219
+ self.convz = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding)
220
+ self.convr = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding)
221
+ self.convq = nn.Conv2d(hidden_size + input_size, hidden_size, kernel_size=kernel_size, padding=padding)
222
+
223
+ def forward(self, h, x):
224
+ hx = torch.cat([h, x], dim=1)
225
+ z = torch.sigmoid(self.convz(hx))
226
+ r = torch.sigmoid(self.convr(hx))
227
+ q = torch.tanh(self.convq(torch.cat([r * h, x], dim=1)))
228
+ h = (1 - z) * h + z * q
229
+ return h
230
+
231
+
232
+ def _pass_through_h(h, _):
233
+ # Declared here for torchscript
234
+ return h
235
+
236
+
237
+ class RecurrentBlock(nn.Module):
238
+ """Recurrent block, part of the update block.
239
+
240
+ Takes the current hidden state and the concatenation of (motion encoder output, context) as input.
241
+ Returns an updated hidden state.
242
+ """
243
+
244
+ def __init__(self, *, input_size, hidden_size, kernel_size=((1, 5), (5, 1)), padding=((0, 2), (2, 0))):
245
+ super().__init__()
246
+
247
+ if len(kernel_size) != len(padding):
248
+ raise ValueError(
249
+ f"kernel_size should have the same length as padding, instead got len(kernel_size) = {len(kernel_size)} and len(padding) = {len(padding)}"
250
+ )
251
+ if len(kernel_size) not in (1, 2):
252
+ raise ValueError(f"kernel_size should either 1 or 2, instead got {len(kernel_size)}")
253
+
254
+ self.convgru1 = ConvGRU(
255
+ input_size=input_size, hidden_size=hidden_size, kernel_size=kernel_size[0], padding=padding[0]
256
+ )
257
+ if len(kernel_size) == 2:
258
+ self.convgru2 = ConvGRU(
259
+ input_size=input_size, hidden_size=hidden_size, kernel_size=kernel_size[1], padding=padding[1]
260
+ )
261
+ else:
262
+ self.convgru2 = _pass_through_h
263
+
264
+ self.hidden_size = hidden_size
265
+
266
+ def forward(self, h, x):
267
+ h = self.convgru1(h, x)
268
+ h = self.convgru2(h, x)
269
+ return h
270
+
271
+
272
+ class FlowHead(nn.Module):
273
+ """Flow head, part of the update block.
274
+
275
+ Takes the hidden state of the recurrent unit as input, and outputs the predicted "delta flow".
276
+ """
277
+
278
+ def __init__(self, *, in_channels, hidden_size):
279
+ super().__init__()
280
+ self.conv1 = nn.Conv2d(in_channels, hidden_size, 3, padding=1)
281
+ self.conv2 = nn.Conv2d(hidden_size, 2, 3, padding=1)
282
+ self.relu = nn.ReLU(inplace=True)
283
+
284
+ def forward(self, x):
285
+ return self.conv2(self.relu(self.conv1(x)))
286
+
287
+
288
+ class UpdateBlock(nn.Module):
289
+ """The update block which contains the motion encoder, the recurrent block, and the flow head.
290
+
291
+ It must expose a ``hidden_state_size`` attribute which is the hidden state size of its recurrent block.
292
+ """
293
+
294
+ def __init__(self, *, motion_encoder, recurrent_block, flow_head):
295
+ super().__init__()
296
+ self.motion_encoder = motion_encoder
297
+ self.recurrent_block = recurrent_block
298
+ self.flow_head = flow_head
299
+
300
+ self.hidden_state_size = recurrent_block.hidden_size
301
+
302
+ def forward(self, hidden_state, context, corr_features, flow):
303
+ motion_features = self.motion_encoder(flow, corr_features)
304
+ x = torch.cat([context, motion_features], dim=1)
305
+
306
+ hidden_state = self.recurrent_block(hidden_state, x)
307
+ delta_flow = self.flow_head(hidden_state)
308
+ return hidden_state, delta_flow
309
+
310
+
311
+ class MaskPredictor(nn.Module):
312
+ """Mask predictor to be used when upsampling the predicted flow.
313
+
314
+ It takes the hidden state of the recurrent unit as input and outputs the mask.
315
+ This is not used in the raft-small model.
316
+ """
317
+
318
+ def __init__(self, *, in_channels, hidden_size, multiplier=0.25):
319
+ super().__init__()
320
+ self.convrelu = Conv2dNormActivation(in_channels, hidden_size, norm_layer=None, kernel_size=3)
321
+ # 8 * 8 * 9 because the predicted flow is downsampled by 8, from the downsampling of the initial FeatureEncoder,
322
+ # and we interpolate with all 9 surrounding neighbors. See paper and appendix B.
323
+ self.conv = nn.Conv2d(hidden_size, 8 * 8 * 9, 1, padding=0)
324
+
325
+ # In the original code, they use a factor of 0.25 to "downweight the gradients" of that branch.
326
+ # See e.g. https://github.com/princeton-vl/RAFT/issues/119#issuecomment-953950419
327
+ # or https://github.com/princeton-vl/RAFT/issues/24.
328
+ # It doesn't seem to affect epe significantly and can likely be set to 1.
329
+ self.multiplier = multiplier
330
+
331
+ def forward(self, x):
332
+ x = self.convrelu(x)
333
+ x = self.conv(x)
334
+ return self.multiplier * x
335
+
336
+
337
+ class CorrBlock(nn.Module):
338
+ """The correlation block.
339
+
340
+ Creates a correlation pyramid with ``num_levels`` levels from the outputs of the feature encoder,
341
+ and then indexes from this pyramid to create correlation features.
342
+ The "indexing" of a given centroid pixel x' is done by concatenating its surrounding neighbors that
343
+ are within a ``radius``, according to the infinity norm (see paper section 3.2).
344
+ Note: typo in the paper, it should be infinity norm, not 1-norm.
345
+ """
346
+
347
+ def __init__(self, *, num_levels: int = 4, radius: int = 4):
348
+ super().__init__()
349
+ self.num_levels = num_levels
350
+ self.radius = radius
351
+
352
+ self.corr_pyramid: List[Tensor] = [torch.tensor(0)] # useless, but torchscript is otherwise confused :')
353
+
354
+ # The neighborhood of a centroid pixel x' is {x' + delta, ||delta||_inf <= radius}
355
+ # so it's a square surrounding x', and its sides have a length of 2 * radius + 1
356
+ # The paper claims that it's ||.||_1 instead of ||.||_inf but it's a typo:
357
+ # https://github.com/princeton-vl/RAFT/issues/122
358
+ self.out_channels = num_levels * (2 * radius + 1) ** 2
359
+
360
+ def build_pyramid(self, fmap1, fmap2):
361
+ """Build the correlation pyramid from two feature maps.
362
+
363
+ The correlation volume is first computed as the dot product of each pair (pixel_in_fmap1, pixel_in_fmap2)
364
+ The last 2 dimensions of the correlation volume are then pooled num_levels times at different resolutions
365
+ to build the correlation pyramid.
366
+ """
367
+
368
+ if fmap1.shape != fmap2.shape:
369
+ raise ValueError(
370
+ f"Input feature maps should have the same shape, instead got {fmap1.shape} (fmap1.shape) != {fmap2.shape} (fmap2.shape)"
371
+ )
372
+
373
+ # Explaining min_fmap_size below: the fmaps are down-sampled (num_levels - 1) times by a factor of 2.
374
+ # The last corr_volume most have at least 2 values (hence the 2* factor), otherwise grid_sample() would
375
+ # produce nans in its output.
376
+ min_fmap_size = 2 * (2 ** (self.num_levels - 1))
377
+ if any(fmap_size < min_fmap_size for fmap_size in fmap1.shape[-2:]):
378
+ raise ValueError(
379
+ "Feature maps are too small to be down-sampled by the correlation pyramid. "
380
+ f"H and W of feature maps should be at least {min_fmap_size}; got: {fmap1.shape[-2:]}. "
381
+ "Remember that input images to the model are downsampled by 8, so that means their "
382
+ f"dimensions should be at least 8 * {min_fmap_size} = {8 * min_fmap_size}."
383
+ )
384
+
385
+ corr_volume = self._compute_corr_volume(fmap1, fmap2)
386
+
387
+ batch_size, h, w, num_channels, _, _ = corr_volume.shape # _, _ = h, w
388
+ corr_volume = corr_volume.reshape(batch_size * h * w, num_channels, h, w)
389
+ self.corr_pyramid = [corr_volume]
390
+ for _ in range(self.num_levels - 1):
391
+ corr_volume = F.avg_pool2d(corr_volume, kernel_size=2, stride=2)
392
+ self.corr_pyramid.append(corr_volume)
393
+
394
+ def index_pyramid(self, centroids_coords):
395
+ """Return correlation features by indexing from the pyramid."""
396
+ neighborhood_side_len = 2 * self.radius + 1 # see note in __init__ about out_channels
397
+ di = torch.linspace(-self.radius, self.radius, neighborhood_side_len)
398
+ dj = torch.linspace(-self.radius, self.radius, neighborhood_side_len)
399
+ delta = torch.stack(torch.meshgrid(di, dj, indexing="ij"), dim=-1).to(centroids_coords.device)
400
+ delta = delta.view(1, neighborhood_side_len, neighborhood_side_len, 2)
401
+
402
+ batch_size, _, h, w = centroids_coords.shape # _ = 2
403
+ centroids_coords = centroids_coords.permute(0, 2, 3, 1).reshape(batch_size * h * w, 1, 1, 2)
404
+
405
+ indexed_pyramid = []
406
+ for corr_volume in self.corr_pyramid:
407
+ sampling_coords = centroids_coords + delta # end shape is (batch_size * h * w, side_len, side_len, 2)
408
+ indexed_corr_volume = grid_sample(corr_volume, sampling_coords, align_corners=True, mode="bilinear").view(
409
+ batch_size, h, w, -1
410
+ )
411
+ indexed_pyramid.append(indexed_corr_volume)
412
+ centroids_coords = centroids_coords / 2
413
+
414
+ corr_features = torch.cat(indexed_pyramid, dim=-1).permute(0, 3, 1, 2).contiguous()
415
+
416
+ expected_output_shape = (batch_size, self.out_channels, h, w)
417
+ if corr_features.shape != expected_output_shape:
418
+ raise ValueError(
419
+ f"Output shape of index pyramid is incorrect. Should be {expected_output_shape}, got {corr_features.shape}"
420
+ )
421
+
422
+ return corr_features
423
+
424
+ def _compute_corr_volume(self, fmap1, fmap2):
425
+ batch_size, num_channels, h, w = fmap1.shape
426
+ fmap1 = fmap1.view(batch_size, num_channels, h * w)
427
+ fmap2 = fmap2.view(batch_size, num_channels, h * w)
428
+
429
+ corr = torch.matmul(fmap1.transpose(1, 2), fmap2)
430
+ corr = corr.view(batch_size, h, w, 1, h, w)
431
+ return corr / torch.sqrt(torch.tensor(num_channels))
432
+
433
+
434
+ class RAFT(nn.Module):
435
+ def __init__(self, *, feature_encoder, context_encoder, corr_block, update_block, mask_predictor=None):
436
+ """RAFT model from
437
+ `RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`_.
438
+
439
+ args:
440
+ feature_encoder (nn.Module): The feature encoder. It must downsample the input by 8.
441
+ Its input is the concatenation of ``image1`` and ``image2``.
442
+ context_encoder (nn.Module): The context encoder. It must downsample the input by 8.
443
+ Its input is ``image1``. As in the original implementation, its output will be split into 2 parts:
444
+
445
+ - one part will be used as the actual "context", passed to the recurrent unit of the ``update_block``
446
+ - one part will be used to initialize the hidden state of the recurrent unit of
447
+ the ``update_block``
448
+
449
+ These 2 parts are split according to the ``hidden_state_size`` of the ``update_block``, so the output
450
+ of the ``context_encoder`` must be strictly greater than ``hidden_state_size``.
451
+
452
+ corr_block (nn.Module): The correlation block, which creates a correlation pyramid from the output of the
453
+ ``feature_encoder``, and then indexes from this pyramid to create correlation features. It must expose
454
+ 2 methods:
455
+
456
+ - a ``build_pyramid`` method that takes ``feature_map_1`` and ``feature_map_2`` as input (these are the
457
+ output of the ``feature_encoder``).
458
+ - a ``index_pyramid`` method that takes the coordinates of the centroid pixels as input, and returns
459
+ the correlation features. See paper section 3.2.
460
+
461
+ It must expose an ``out_channels`` attribute.
462
+
463
+ update_block (nn.Module): The update block, which contains the motion encoder, the recurrent unit, and the
464
+ flow head. It takes as input the hidden state of its recurrent unit, the context, the correlation
465
+ features, and the current predicted flow. It outputs an updated hidden state, and the ``delta_flow``
466
+ prediction (see paper appendix A). It must expose a ``hidden_state_size`` attribute.
467
+ mask_predictor (nn.Module, optional): Predicts the mask that will be used to upsample the predicted flow.
468
+ The output channel must be 8 * 8 * 9 - see paper section 3.3, and Appendix B.
469
+ If ``None`` (default), the flow is upsampled using interpolation.
470
+ """
471
+ super().__init__()
472
+ _log_api_usage_once(self)
473
+
474
+ self.feature_encoder = feature_encoder
475
+ self.context_encoder = context_encoder
476
+ self.corr_block = corr_block
477
+ self.update_block = update_block
478
+
479
+ self.mask_predictor = mask_predictor
480
+
481
+ if not hasattr(self.update_block, "hidden_state_size"):
482
+ raise ValueError("The update_block parameter should expose a 'hidden_state_size' attribute.")
483
+
484
+ def forward(self, image1, image2, num_flow_updates: int = 12):
485
+
486
+ batch_size, _, h, w = image1.shape
487
+ if (h, w) != image2.shape[-2:]:
488
+ raise ValueError(f"input images should have the same shape, instead got ({h}, {w}) != {image2.shape[-2:]}")
489
+ if not (h % 8 == 0) and (w % 8 == 0):
490
+ raise ValueError(f"input image H and W should be divisible by 8, instead got {h} (h) and {w} (w)")
491
+
492
+ fmaps = self.feature_encoder(torch.cat([image1, image2], dim=0))
493
+ fmap1, fmap2 = torch.chunk(fmaps, chunks=2, dim=0)
494
+ if fmap1.shape[-2:] != (h // 8, w // 8):
495
+ raise ValueError("The feature encoder should downsample H and W by 8")
496
+
497
+ self.corr_block.build_pyramid(fmap1, fmap2)
498
+
499
+ context_out = self.context_encoder(image1)
500
+ if context_out.shape[-2:] != (h // 8, w // 8):
501
+ raise ValueError("The context encoder should downsample H and W by 8")
502
+
503
+ # As in the original paper, the actual output of the context encoder is split in 2 parts:
504
+ # - one part is used to initialize the hidden state of the recurent units of the update block
505
+ # - the rest is the "actual" context.
506
+ hidden_state_size = self.update_block.hidden_state_size
507
+ out_channels_context = context_out.shape[1] - hidden_state_size
508
+ if out_channels_context <= 0:
509
+ raise ValueError(
510
+ f"The context encoder outputs {context_out.shape[1]} channels, but it should have at strictly more than hidden_state={hidden_state_size} channels"
511
+ )
512
+ hidden_state, context = torch.split(context_out, [hidden_state_size, out_channels_context], dim=1)
513
+ hidden_state = torch.tanh(hidden_state)
514
+ context = F.relu(context)
515
+
516
+ coords0 = make_coords_grid(batch_size, h // 8, w // 8).to(fmap1.device)
517
+ coords1 = make_coords_grid(batch_size, h // 8, w // 8).to(fmap1.device)
518
+
519
+ flow_predictions = []
520
+ for _ in range(num_flow_updates):
521
+ coords1 = coords1.detach() # Don't backpropagate gradients through this branch, see paper
522
+ corr_features = self.corr_block.index_pyramid(centroids_coords=coords1)
523
+
524
+ flow = coords1 - coords0
525
+ hidden_state, delta_flow = self.update_block(hidden_state, context, corr_features, flow)
526
+
527
+ coords1 = coords1 + delta_flow
528
+
529
+ up_mask = None if self.mask_predictor is None else self.mask_predictor(hidden_state)
530
+ upsampled_flow = upsample_flow(flow=(coords1 - coords0), up_mask=up_mask)
531
+ flow_predictions.append(upsampled_flow)
532
+
533
+ return flow_predictions
534
+
535
+
536
+ _COMMON_META = {
537
+ "min_size": (128, 128),
538
+ }
539
+
540
+
541
+ class Raft_Large_Weights(WeightsEnum):
542
+ """The metrics reported here are as follows.
543
+
544
+ ``epe`` is the "end-point-error" and indicates how far (in pixels) the
545
+ predicted flow is from its true value. This is averaged over all pixels
546
+ of all images. ``per_image_epe`` is similar, but the average is different:
547
+ the epe is first computed on each image independently, and then averaged
548
+ over all images. This corresponds to "Fl-epe" (sometimes written "F1-epe")
549
+ in the original paper, and it's only used on Kitti. ``fl-all`` is also a
550
+ Kitti-specific metric, defined by the author of the dataset and used for the
551
+ Kitti leaderboard. It corresponds to the average of pixels whose epe is
552
+ either <3px, or <5% of flow's 2-norm.
553
+ """
554
+
555
+ C_T_V1 = Weights(
556
+ # Weights ported from https://github.com/princeton-vl/RAFT
557
+ url="https://download.pytorch.org/models/raft_large_C_T_V1-22a6c225.pth",
558
+ transforms=OpticalFlow,
559
+ meta={
560
+ **_COMMON_META,
561
+ "num_params": 5257536,
562
+ "recipe": "https://github.com/princeton-vl/RAFT",
563
+ "_metrics": {
564
+ "Sintel-Train-Cleanpass": {"epe": 1.4411},
565
+ "Sintel-Train-Finalpass": {"epe": 2.7894},
566
+ "Kitti-Train": {"per_image_epe": 5.0172, "fl_all": 17.4506},
567
+ },
568
+ "_ops": 211.007,
569
+ "_file_size": 20.129,
570
+ "_docs": """These weights were ported from the original paper. They
571
+ are trained on :class:`~torchvision.datasets.FlyingChairs` +
572
+ :class:`~torchvision.datasets.FlyingThings3D`.""",
573
+ },
574
+ )
575
+
576
+ C_T_V2 = Weights(
577
+ url="https://download.pytorch.org/models/raft_large_C_T_V2-1bb1363a.pth",
578
+ transforms=OpticalFlow,
579
+ meta={
580
+ **_COMMON_META,
581
+ "num_params": 5257536,
582
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/optical_flow",
583
+ "_metrics": {
584
+ "Sintel-Train-Cleanpass": {"epe": 1.3822},
585
+ "Sintel-Train-Finalpass": {"epe": 2.7161},
586
+ "Kitti-Train": {"per_image_epe": 4.5118, "fl_all": 16.0679},
587
+ },
588
+ "_ops": 211.007,
589
+ "_file_size": 20.129,
590
+ "_docs": """These weights were trained from scratch on
591
+ :class:`~torchvision.datasets.FlyingChairs` +
592
+ :class:`~torchvision.datasets.FlyingThings3D`.""",
593
+ },
594
+ )
595
+
596
+ C_T_SKHT_V1 = Weights(
597
+ # Weights ported from https://github.com/princeton-vl/RAFT
598
+ url="https://download.pytorch.org/models/raft_large_C_T_SKHT_V1-0b8c9e55.pth",
599
+ transforms=OpticalFlow,
600
+ meta={
601
+ **_COMMON_META,
602
+ "num_params": 5257536,
603
+ "recipe": "https://github.com/princeton-vl/RAFT",
604
+ "_metrics": {
605
+ "Sintel-Test-Cleanpass": {"epe": 1.94},
606
+ "Sintel-Test-Finalpass": {"epe": 3.18},
607
+ },
608
+ "_ops": 211.007,
609
+ "_file_size": 20.129,
610
+ "_docs": """
611
+ These weights were ported from the original paper. They are
612
+ trained on :class:`~torchvision.datasets.FlyingChairs` +
613
+ :class:`~torchvision.datasets.FlyingThings3D` and fine-tuned on
614
+ Sintel. The Sintel fine-tuning step is a combination of
615
+ :class:`~torchvision.datasets.Sintel`,
616
+ :class:`~torchvision.datasets.KittiFlow`,
617
+ :class:`~torchvision.datasets.HD1K`, and
618
+ :class:`~torchvision.datasets.FlyingThings3D` (clean pass).
619
+ """,
620
+ },
621
+ )
622
+
623
+ C_T_SKHT_V2 = Weights(
624
+ url="https://download.pytorch.org/models/raft_large_C_T_SKHT_V2-ff5fadd5.pth",
625
+ transforms=OpticalFlow,
626
+ meta={
627
+ **_COMMON_META,
628
+ "num_params": 5257536,
629
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/optical_flow",
630
+ "_metrics": {
631
+ "Sintel-Test-Cleanpass": {"epe": 1.819},
632
+ "Sintel-Test-Finalpass": {"epe": 3.067},
633
+ },
634
+ "_ops": 211.007,
635
+ "_file_size": 20.129,
636
+ "_docs": """
637
+ These weights were trained from scratch. They are
638
+ pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
639
+ :class:`~torchvision.datasets.FlyingThings3D` and then
640
+ fine-tuned on Sintel. The Sintel fine-tuning step is a
641
+ combination of :class:`~torchvision.datasets.Sintel`,
642
+ :class:`~torchvision.datasets.KittiFlow`,
643
+ :class:`~torchvision.datasets.HD1K`, and
644
+ :class:`~torchvision.datasets.FlyingThings3D` (clean pass).
645
+ """,
646
+ },
647
+ )
648
+
649
+ C_T_SKHT_K_V1 = Weights(
650
+ # Weights ported from https://github.com/princeton-vl/RAFT
651
+ url="https://download.pytorch.org/models/raft_large_C_T_SKHT_K_V1-4a6a5039.pth",
652
+ transforms=OpticalFlow,
653
+ meta={
654
+ **_COMMON_META,
655
+ "num_params": 5257536,
656
+ "recipe": "https://github.com/princeton-vl/RAFT",
657
+ "_metrics": {
658
+ "Kitti-Test": {"fl_all": 5.10},
659
+ },
660
+ "_ops": 211.007,
661
+ "_file_size": 20.129,
662
+ "_docs": """
663
+ These weights were ported from the original paper. They are
664
+ pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
665
+ :class:`~torchvision.datasets.FlyingThings3D`,
666
+ fine-tuned on Sintel, and then fine-tuned on
667
+ :class:`~torchvision.datasets.KittiFlow`. The Sintel fine-tuning
668
+ step was described above.
669
+ """,
670
+ },
671
+ )
672
+
673
+ C_T_SKHT_K_V2 = Weights(
674
+ url="https://download.pytorch.org/models/raft_large_C_T_SKHT_K_V2-b5c70766.pth",
675
+ transforms=OpticalFlow,
676
+ meta={
677
+ **_COMMON_META,
678
+ "num_params": 5257536,
679
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/optical_flow",
680
+ "_metrics": {
681
+ "Kitti-Test": {"fl_all": 5.19},
682
+ },
683
+ "_ops": 211.007,
684
+ "_file_size": 20.129,
685
+ "_docs": """
686
+ These weights were trained from scratch. They are
687
+ pre-trained on :class:`~torchvision.datasets.FlyingChairs` +
688
+ :class:`~torchvision.datasets.FlyingThings3D`,
689
+ fine-tuned on Sintel, and then fine-tuned on
690
+ :class:`~torchvision.datasets.KittiFlow`. The Sintel fine-tuning
691
+ step was described above.
692
+ """,
693
+ },
694
+ )
695
+
696
+ DEFAULT = C_T_SKHT_V2
697
+
698
+
699
+ class Raft_Small_Weights(WeightsEnum):
700
+ """The metrics reported here are as follows.
701
+
702
+ ``epe`` is the "end-point-error" and indicates how far (in pixels) the
703
+ predicted flow is from its true value. This is averaged over all pixels
704
+ of all images. ``per_image_epe`` is similar, but the average is different:
705
+ the epe is first computed on each image independently, and then averaged
706
+ over all images. This corresponds to "Fl-epe" (sometimes written "F1-epe")
707
+ in the original paper, and it's only used on Kitti. ``fl-all`` is also a
708
+ Kitti-specific metric, defined by the author of the dataset and used for the
709
+ Kitti leaderboard. It corresponds to the average of pixels whose epe is
710
+ either <3px, or <5% of flow's 2-norm.
711
+ """
712
+
713
+ C_T_V1 = Weights(
714
+ # Weights ported from https://github.com/princeton-vl/RAFT
715
+ url="https://download.pytorch.org/models/raft_small_C_T_V1-ad48884c.pth",
716
+ transforms=OpticalFlow,
717
+ meta={
718
+ **_COMMON_META,
719
+ "num_params": 990162,
720
+ "recipe": "https://github.com/princeton-vl/RAFT",
721
+ "_metrics": {
722
+ "Sintel-Train-Cleanpass": {"epe": 2.1231},
723
+ "Sintel-Train-Finalpass": {"epe": 3.2790},
724
+ "Kitti-Train": {"per_image_epe": 7.6557, "fl_all": 25.2801},
725
+ },
726
+ "_ops": 47.655,
727
+ "_file_size": 3.821,
728
+ "_docs": """These weights were ported from the original paper. They
729
+ are trained on :class:`~torchvision.datasets.FlyingChairs` +
730
+ :class:`~torchvision.datasets.FlyingThings3D`.""",
731
+ },
732
+ )
733
+ C_T_V2 = Weights(
734
+ url="https://download.pytorch.org/models/raft_small_C_T_V2-01064c6d.pth",
735
+ transforms=OpticalFlow,
736
+ meta={
737
+ **_COMMON_META,
738
+ "num_params": 990162,
739
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/optical_flow",
740
+ "_metrics": {
741
+ "Sintel-Train-Cleanpass": {"epe": 1.9901},
742
+ "Sintel-Train-Finalpass": {"epe": 3.2831},
743
+ "Kitti-Train": {"per_image_epe": 7.5978, "fl_all": 25.2369},
744
+ },
745
+ "_ops": 47.655,
746
+ "_file_size": 3.821,
747
+ "_docs": """These weights were trained from scratch on
748
+ :class:`~torchvision.datasets.FlyingChairs` +
749
+ :class:`~torchvision.datasets.FlyingThings3D`.""",
750
+ },
751
+ )
752
+
753
+ DEFAULT = C_T_V2
754
+
755
+
756
+ def _raft(
757
+ *,
758
+ weights=None,
759
+ progress=False,
760
+ # Feature encoder
761
+ feature_encoder_layers,
762
+ feature_encoder_block,
763
+ feature_encoder_norm_layer,
764
+ # Context encoder
765
+ context_encoder_layers,
766
+ context_encoder_block,
767
+ context_encoder_norm_layer,
768
+ # Correlation block
769
+ corr_block_num_levels,
770
+ corr_block_radius,
771
+ # Motion encoder
772
+ motion_encoder_corr_layers,
773
+ motion_encoder_flow_layers,
774
+ motion_encoder_out_channels,
775
+ # Recurrent block
776
+ recurrent_block_hidden_state_size,
777
+ recurrent_block_kernel_size,
778
+ recurrent_block_padding,
779
+ # Flow Head
780
+ flow_head_hidden_size,
781
+ # Mask predictor
782
+ use_mask_predictor,
783
+ **kwargs,
784
+ ):
785
+ feature_encoder = kwargs.pop("feature_encoder", None) or FeatureEncoder(
786
+ block=feature_encoder_block, layers=feature_encoder_layers, norm_layer=feature_encoder_norm_layer
787
+ )
788
+ context_encoder = kwargs.pop("context_encoder", None) or FeatureEncoder(
789
+ block=context_encoder_block, layers=context_encoder_layers, norm_layer=context_encoder_norm_layer
790
+ )
791
+
792
+ corr_block = kwargs.pop("corr_block", None) or CorrBlock(num_levels=corr_block_num_levels, radius=corr_block_radius)
793
+
794
+ update_block = kwargs.pop("update_block", None)
795
+ if update_block is None:
796
+ motion_encoder = MotionEncoder(
797
+ in_channels_corr=corr_block.out_channels,
798
+ corr_layers=motion_encoder_corr_layers,
799
+ flow_layers=motion_encoder_flow_layers,
800
+ out_channels=motion_encoder_out_channels,
801
+ )
802
+
803
+ # See comments in forward pass of RAFT class about why we split the output of the context encoder
804
+ out_channels_context = context_encoder_layers[-1] - recurrent_block_hidden_state_size
805
+ recurrent_block = RecurrentBlock(
806
+ input_size=motion_encoder.out_channels + out_channels_context,
807
+ hidden_size=recurrent_block_hidden_state_size,
808
+ kernel_size=recurrent_block_kernel_size,
809
+ padding=recurrent_block_padding,
810
+ )
811
+
812
+ flow_head = FlowHead(in_channels=recurrent_block_hidden_state_size, hidden_size=flow_head_hidden_size)
813
+
814
+ update_block = UpdateBlock(motion_encoder=motion_encoder, recurrent_block=recurrent_block, flow_head=flow_head)
815
+
816
+ mask_predictor = kwargs.pop("mask_predictor", None)
817
+ if mask_predictor is None and use_mask_predictor:
818
+ mask_predictor = MaskPredictor(
819
+ in_channels=recurrent_block_hidden_state_size,
820
+ hidden_size=256,
821
+ multiplier=0.25, # See comment in MaskPredictor about this
822
+ )
823
+
824
+ model = RAFT(
825
+ feature_encoder=feature_encoder,
826
+ context_encoder=context_encoder,
827
+ corr_block=corr_block,
828
+ update_block=update_block,
829
+ mask_predictor=mask_predictor,
830
+ **kwargs, # not really needed, all params should be consumed by now
831
+ )
832
+
833
+ if weights is not None:
834
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
835
+
836
+ return model
837
+
838
+
839
+ @register_model()
840
+ @handle_legacy_interface(weights=("pretrained", Raft_Large_Weights.C_T_SKHT_V2))
841
+ def raft_large(*, weights: Optional[Raft_Large_Weights] = None, progress=True, **kwargs) -> RAFT:
842
+ """RAFT model from
843
+ `RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`_.
844
+
845
+ Please see the example below for a tutorial on how to use this model.
846
+
847
+ Args:
848
+ weights(:class:`~torchvision.models.optical_flow.Raft_Large_Weights`, optional): The
849
+ pretrained weights to use. See
850
+ :class:`~torchvision.models.optical_flow.Raft_Large_Weights`
851
+ below for more details, and possible values. By default, no
852
+ pre-trained weights are used.
853
+ progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
854
+ **kwargs: parameters passed to the ``torchvision.models.optical_flow.RAFT``
855
+ base class. Please refer to the `source code
856
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/optical_flow/raft.py>`_
857
+ for more details about this class.
858
+
859
+ .. autoclass:: torchvision.models.optical_flow.Raft_Large_Weights
860
+ :members:
861
+ """
862
+
863
+ weights = Raft_Large_Weights.verify(weights)
864
+
865
+ return _raft(
866
+ weights=weights,
867
+ progress=progress,
868
+ # Feature encoder
869
+ feature_encoder_layers=(64, 64, 96, 128, 256),
870
+ feature_encoder_block=ResidualBlock,
871
+ feature_encoder_norm_layer=InstanceNorm2d,
872
+ # Context encoder
873
+ context_encoder_layers=(64, 64, 96, 128, 256),
874
+ context_encoder_block=ResidualBlock,
875
+ context_encoder_norm_layer=BatchNorm2d,
876
+ # Correlation block
877
+ corr_block_num_levels=4,
878
+ corr_block_radius=4,
879
+ # Motion encoder
880
+ motion_encoder_corr_layers=(256, 192),
881
+ motion_encoder_flow_layers=(128, 64),
882
+ motion_encoder_out_channels=128,
883
+ # Recurrent block
884
+ recurrent_block_hidden_state_size=128,
885
+ recurrent_block_kernel_size=((1, 5), (5, 1)),
886
+ recurrent_block_padding=((0, 2), (2, 0)),
887
+ # Flow head
888
+ flow_head_hidden_size=256,
889
+ # Mask predictor
890
+ use_mask_predictor=True,
891
+ **kwargs,
892
+ )
893
+
894
+
895
+ @register_model()
896
+ @handle_legacy_interface(weights=("pretrained", Raft_Small_Weights.C_T_V2))
897
+ def raft_small(*, weights: Optional[Raft_Small_Weights] = None, progress=True, **kwargs) -> RAFT:
898
+ """RAFT "small" model from
899
+ `RAFT: Recurrent All Pairs Field Transforms for Optical Flow <https://arxiv.org/abs/2003.12039>`__.
900
+
901
+ Please see the example below for a tutorial on how to use this model.
902
+
903
+ Args:
904
+ weights(:class:`~torchvision.models.optical_flow.Raft_Small_Weights`, optional): The
905
+ pretrained weights to use. See
906
+ :class:`~torchvision.models.optical_flow.Raft_Small_Weights`
907
+ below for more details, and possible values. By default, no
908
+ pre-trained weights are used.
909
+ progress (bool): If True, displays a progress bar of the download to stderr. Default is True.
910
+ **kwargs: parameters passed to the ``torchvision.models.optical_flow.RAFT``
911
+ base class. Please refer to the `source code
912
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/optical_flow/raft.py>`_
913
+ for more details about this class.
914
+
915
+ .. autoclass:: torchvision.models.optical_flow.Raft_Small_Weights
916
+ :members:
917
+ """
918
+ weights = Raft_Small_Weights.verify(weights)
919
+
920
+ return _raft(
921
+ weights=weights,
922
+ progress=progress,
923
+ # Feature encoder
924
+ feature_encoder_layers=(32, 32, 64, 96, 128),
925
+ feature_encoder_block=BottleneckBlock,
926
+ feature_encoder_norm_layer=InstanceNorm2d,
927
+ # Context encoder
928
+ context_encoder_layers=(32, 32, 64, 96, 160),
929
+ context_encoder_block=BottleneckBlock,
930
+ context_encoder_norm_layer=None,
931
+ # Correlation block
932
+ corr_block_num_levels=4,
933
+ corr_block_radius=3,
934
+ # Motion encoder
935
+ motion_encoder_corr_layers=(96,),
936
+ motion_encoder_flow_layers=(64, 32),
937
+ motion_encoder_out_channels=82,
938
+ # Recurrent block
939
+ recurrent_block_hidden_state_size=96,
940
+ recurrent_block_kernel_size=(3,),
941
+ recurrent_block_padding=(1,),
942
+ # Flow head
943
+ flow_head_hidden_size=128,
944
+ # Mask predictor
945
+ use_mask_predictor=False,
946
+ **kwargs,
947
+ )
vllm/lib/python3.10/site-packages/torchvision/models/quantization/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .googlenet import *
2
+ from .inception import *
3
+ from .mobilenet import *
4
+ from .resnet import *
5
+ from .shufflenetv2 import *
vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (291 Bytes). View file
 
vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/googlenet.cpython-310.pyc ADDED
Binary file (8.07 kB). View file
 
vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/inception.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/mobilenet.cpython-310.pyc ADDED
Binary file (305 Bytes). View file
 
vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/mobilenetv2.cpython-310.pyc ADDED
Binary file (6.19 kB). View file
 
vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/mobilenetv3.cpython-310.pyc ADDED
Binary file (8.58 kB). View file
 
vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/resnet.cpython-310.pyc ADDED
Binary file (15.1 kB). View file
 
vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/shufflenetv2.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
vllm/lib/python3.10/site-packages/torchvision/models/quantization/__pycache__/utils.cpython-310.pyc ADDED
Binary file (1.73 kB). View file
 
vllm/lib/python3.10/site-packages/torchvision/models/quantization/googlenet.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from functools import partial
3
+ from typing import Any, Optional, Union
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ from torch import Tensor
8
+ from torch.nn import functional as F
9
+
10
+ from ...transforms._presets import ImageClassification
11
+ from .._api import register_model, Weights, WeightsEnum
12
+ from .._meta import _IMAGENET_CATEGORIES
13
+ from .._utils import _ovewrite_named_param, handle_legacy_interface
14
+ from ..googlenet import BasicConv2d, GoogLeNet, GoogLeNet_Weights, GoogLeNetOutputs, Inception, InceptionAux
15
+ from .utils import _fuse_modules, _replace_relu, quantize_model
16
+
17
+
18
+ __all__ = [
19
+ "QuantizableGoogLeNet",
20
+ "GoogLeNet_QuantizedWeights",
21
+ "googlenet",
22
+ ]
23
+
24
+
25
+ class QuantizableBasicConv2d(BasicConv2d):
26
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
27
+ super().__init__(*args, **kwargs)
28
+ self.relu = nn.ReLU()
29
+
30
+ def forward(self, x: Tensor) -> Tensor:
31
+ x = self.conv(x)
32
+ x = self.bn(x)
33
+ x = self.relu(x)
34
+ return x
35
+
36
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
37
+ _fuse_modules(self, ["conv", "bn", "relu"], is_qat, inplace=True)
38
+
39
+
40
+ class QuantizableInception(Inception):
41
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
42
+ super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
43
+ self.cat = nn.quantized.FloatFunctional()
44
+
45
+ def forward(self, x: Tensor) -> Tensor:
46
+ outputs = self._forward(x)
47
+ return self.cat.cat(outputs, 1)
48
+
49
+
50
+ class QuantizableInceptionAux(InceptionAux):
51
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
52
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
53
+ super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
54
+ self.relu = nn.ReLU()
55
+
56
+ def forward(self, x: Tensor) -> Tensor:
57
+ # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14
58
+ x = F.adaptive_avg_pool2d(x, (4, 4))
59
+ # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4
60
+ x = self.conv(x)
61
+ # N x 128 x 4 x 4
62
+ x = torch.flatten(x, 1)
63
+ # N x 2048
64
+ x = self.relu(self.fc1(x))
65
+ # N x 1024
66
+ x = self.dropout(x)
67
+ # N x 1024
68
+ x = self.fc2(x)
69
+ # N x 1000 (num_classes)
70
+
71
+ return x
72
+
73
+
74
+ class QuantizableGoogLeNet(GoogLeNet):
75
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
76
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
77
+ super().__init__( # type: ignore[misc]
78
+ *args, blocks=[QuantizableBasicConv2d, QuantizableInception, QuantizableInceptionAux], **kwargs
79
+ )
80
+ self.quant = torch.ao.quantization.QuantStub()
81
+ self.dequant = torch.ao.quantization.DeQuantStub()
82
+
83
+ def forward(self, x: Tensor) -> GoogLeNetOutputs:
84
+ x = self._transform_input(x)
85
+ x = self.quant(x)
86
+ x, aux1, aux2 = self._forward(x)
87
+ x = self.dequant(x)
88
+ aux_defined = self.training and self.aux_logits
89
+ if torch.jit.is_scripting():
90
+ if not aux_defined:
91
+ warnings.warn("Scripted QuantizableGoogleNet always returns GoogleNetOutputs Tuple")
92
+ return GoogLeNetOutputs(x, aux2, aux1)
93
+ else:
94
+ return self.eager_outputs(x, aux2, aux1)
95
+
96
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
97
+ r"""Fuse conv/bn/relu modules in googlenet model
98
+
99
+ Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
100
+ Model is modified in place. Note that this operation does not change numerics
101
+ and the model after modification is in floating point
102
+ """
103
+
104
+ for m in self.modules():
105
+ if type(m) is QuantizableBasicConv2d:
106
+ m.fuse_model(is_qat)
107
+
108
+
109
+ class GoogLeNet_QuantizedWeights(WeightsEnum):
110
+ IMAGENET1K_FBGEMM_V1 = Weights(
111
+ url="https://download.pytorch.org/models/quantized/googlenet_fbgemm-c81f6644.pth",
112
+ transforms=partial(ImageClassification, crop_size=224),
113
+ meta={
114
+ "num_params": 6624904,
115
+ "min_size": (15, 15),
116
+ "categories": _IMAGENET_CATEGORIES,
117
+ "backend": "fbgemm",
118
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
119
+ "unquantized": GoogLeNet_Weights.IMAGENET1K_V1,
120
+ "_metrics": {
121
+ "ImageNet-1K": {
122
+ "acc@1": 69.826,
123
+ "acc@5": 89.404,
124
+ }
125
+ },
126
+ "_ops": 1.498,
127
+ "_file_size": 12.618,
128
+ "_docs": """
129
+ These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
130
+ weights listed below.
131
+ """,
132
+ },
133
+ )
134
+ DEFAULT = IMAGENET1K_FBGEMM_V1
135
+
136
+
137
+ @register_model(name="quantized_googlenet")
138
+ @handle_legacy_interface(
139
+ weights=(
140
+ "pretrained",
141
+ lambda kwargs: GoogLeNet_QuantizedWeights.IMAGENET1K_FBGEMM_V1
142
+ if kwargs.get("quantize", False)
143
+ else GoogLeNet_Weights.IMAGENET1K_V1,
144
+ )
145
+ )
146
+ def googlenet(
147
+ *,
148
+ weights: Optional[Union[GoogLeNet_QuantizedWeights, GoogLeNet_Weights]] = None,
149
+ progress: bool = True,
150
+ quantize: bool = False,
151
+ **kwargs: Any,
152
+ ) -> QuantizableGoogLeNet:
153
+ """GoogLeNet (Inception v1) model architecture from `Going Deeper with Convolutions <http://arxiv.org/abs/1409.4842>`__.
154
+
155
+ .. note::
156
+ Note that ``quantize = True`` returns a quantized model with 8 bit
157
+ weights. Quantized models only support inference and run on CPUs.
158
+ GPU inference is not yet supported.
159
+
160
+ Args:
161
+ weights (:class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` or :class:`~torchvision.models.GoogLeNet_Weights`, optional): The
162
+ pretrained weights for the model. See
163
+ :class:`~torchvision.models.quantization.GoogLeNet_QuantizedWeights` below for
164
+ more details, and possible values. By default, no pre-trained
165
+ weights are used.
166
+ progress (bool, optional): If True, displays a progress bar of the
167
+ download to stderr. Default is True.
168
+ quantize (bool, optional): If True, return a quantized version of the model. Default is False.
169
+ **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableGoogLeNet``
170
+ base class. Please refer to the `source code
171
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/googlenet.py>`_
172
+ for more details about this class.
173
+
174
+ .. autoclass:: torchvision.models.quantization.GoogLeNet_QuantizedWeights
175
+ :members:
176
+
177
+ .. autoclass:: torchvision.models.GoogLeNet_Weights
178
+ :members:
179
+ :noindex:
180
+ """
181
+ weights = (GoogLeNet_QuantizedWeights if quantize else GoogLeNet_Weights).verify(weights)
182
+
183
+ original_aux_logits = kwargs.get("aux_logits", False)
184
+ if weights is not None:
185
+ if "transform_input" not in kwargs:
186
+ _ovewrite_named_param(kwargs, "transform_input", True)
187
+ _ovewrite_named_param(kwargs, "aux_logits", True)
188
+ _ovewrite_named_param(kwargs, "init_weights", False)
189
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
190
+ if "backend" in weights.meta:
191
+ _ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
192
+ backend = kwargs.pop("backend", "fbgemm")
193
+
194
+ model = QuantizableGoogLeNet(**kwargs)
195
+ _replace_relu(model)
196
+ if quantize:
197
+ quantize_model(model, backend)
198
+
199
+ if weights is not None:
200
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
201
+ if not original_aux_logits:
202
+ model.aux_logits = False
203
+ model.aux1 = None # type: ignore[assignment]
204
+ model.aux2 = None # type: ignore[assignment]
205
+ else:
206
+ warnings.warn(
207
+ "auxiliary heads in the pretrained googlenet model are NOT pretrained, so make sure to train them"
208
+ )
209
+
210
+ return model
vllm/lib/python3.10/site-packages/torchvision/models/quantization/inception.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from functools import partial
3
+ from typing import Any, List, Optional, Union
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ from torch import Tensor
9
+ from torchvision.models import inception as inception_module
10
+ from torchvision.models.inception import Inception_V3_Weights, InceptionOutputs
11
+
12
+ from ...transforms._presets import ImageClassification
13
+ from .._api import register_model, Weights, WeightsEnum
14
+ from .._meta import _IMAGENET_CATEGORIES
15
+ from .._utils import _ovewrite_named_param, handle_legacy_interface
16
+ from .utils import _fuse_modules, _replace_relu, quantize_model
17
+
18
+
19
+ __all__ = [
20
+ "QuantizableInception3",
21
+ "Inception_V3_QuantizedWeights",
22
+ "inception_v3",
23
+ ]
24
+
25
+
26
+ class QuantizableBasicConv2d(inception_module.BasicConv2d):
27
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
28
+ super().__init__(*args, **kwargs)
29
+ self.relu = nn.ReLU()
30
+
31
+ def forward(self, x: Tensor) -> Tensor:
32
+ x = self.conv(x)
33
+ x = self.bn(x)
34
+ x = self.relu(x)
35
+ return x
36
+
37
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
38
+ _fuse_modules(self, ["conv", "bn", "relu"], is_qat, inplace=True)
39
+
40
+
41
+ class QuantizableInceptionA(inception_module.InceptionA):
42
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
43
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
44
+ super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
45
+ self.myop = nn.quantized.FloatFunctional()
46
+
47
+ def forward(self, x: Tensor) -> Tensor:
48
+ outputs = self._forward(x)
49
+ return self.myop.cat(outputs, 1)
50
+
51
+
52
+ class QuantizableInceptionB(inception_module.InceptionB):
53
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
54
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
55
+ super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
56
+ self.myop = nn.quantized.FloatFunctional()
57
+
58
+ def forward(self, x: Tensor) -> Tensor:
59
+ outputs = self._forward(x)
60
+ return self.myop.cat(outputs, 1)
61
+
62
+
63
+ class QuantizableInceptionC(inception_module.InceptionC):
64
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
65
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
66
+ super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
67
+ self.myop = nn.quantized.FloatFunctional()
68
+
69
+ def forward(self, x: Tensor) -> Tensor:
70
+ outputs = self._forward(x)
71
+ return self.myop.cat(outputs, 1)
72
+
73
+
74
+ class QuantizableInceptionD(inception_module.InceptionD):
75
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
76
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
77
+ super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
78
+ self.myop = nn.quantized.FloatFunctional()
79
+
80
+ def forward(self, x: Tensor) -> Tensor:
81
+ outputs = self._forward(x)
82
+ return self.myop.cat(outputs, 1)
83
+
84
+
85
+ class QuantizableInceptionE(inception_module.InceptionE):
86
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
87
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
88
+ super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
89
+ self.myop1 = nn.quantized.FloatFunctional()
90
+ self.myop2 = nn.quantized.FloatFunctional()
91
+ self.myop3 = nn.quantized.FloatFunctional()
92
+
93
+ def _forward(self, x: Tensor) -> List[Tensor]:
94
+ branch1x1 = self.branch1x1(x)
95
+
96
+ branch3x3 = self.branch3x3_1(x)
97
+ branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)]
98
+ branch3x3 = self.myop1.cat(branch3x3, 1)
99
+
100
+ branch3x3dbl = self.branch3x3dbl_1(x)
101
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
102
+ branch3x3dbl = [
103
+ self.branch3x3dbl_3a(branch3x3dbl),
104
+ self.branch3x3dbl_3b(branch3x3dbl),
105
+ ]
106
+ branch3x3dbl = self.myop2.cat(branch3x3dbl, 1)
107
+
108
+ branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
109
+ branch_pool = self.branch_pool(branch_pool)
110
+
111
+ outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
112
+ return outputs
113
+
114
+ def forward(self, x: Tensor) -> Tensor:
115
+ outputs = self._forward(x)
116
+ return self.myop3.cat(outputs, 1)
117
+
118
+
119
+ class QuantizableInceptionAux(inception_module.InceptionAux):
120
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
121
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
122
+ super().__init__(*args, conv_block=QuantizableBasicConv2d, **kwargs) # type: ignore[misc]
123
+
124
+
125
+ class QuantizableInception3(inception_module.Inception3):
126
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
127
+ super().__init__( # type: ignore[misc]
128
+ *args,
129
+ inception_blocks=[
130
+ QuantizableBasicConv2d,
131
+ QuantizableInceptionA,
132
+ QuantizableInceptionB,
133
+ QuantizableInceptionC,
134
+ QuantizableInceptionD,
135
+ QuantizableInceptionE,
136
+ QuantizableInceptionAux,
137
+ ],
138
+ **kwargs,
139
+ )
140
+ self.quant = torch.ao.quantization.QuantStub()
141
+ self.dequant = torch.ao.quantization.DeQuantStub()
142
+
143
+ def forward(self, x: Tensor) -> InceptionOutputs:
144
+ x = self._transform_input(x)
145
+ x = self.quant(x)
146
+ x, aux = self._forward(x)
147
+ x = self.dequant(x)
148
+ aux_defined = self.training and self.aux_logits
149
+ if torch.jit.is_scripting():
150
+ if not aux_defined:
151
+ warnings.warn("Scripted QuantizableInception3 always returns QuantizableInception3 Tuple")
152
+ return InceptionOutputs(x, aux)
153
+ else:
154
+ return self.eager_outputs(x, aux)
155
+
156
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
157
+ r"""Fuse conv/bn/relu modules in inception model
158
+
159
+ Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
160
+ Model is modified in place. Note that this operation does not change numerics
161
+ and the model after modification is in floating point
162
+ """
163
+
164
+ for m in self.modules():
165
+ if type(m) is QuantizableBasicConv2d:
166
+ m.fuse_model(is_qat)
167
+
168
+
169
+ class Inception_V3_QuantizedWeights(WeightsEnum):
170
+ IMAGENET1K_FBGEMM_V1 = Weights(
171
+ url="https://download.pytorch.org/models/quantized/inception_v3_google_fbgemm-a2837893.pth",
172
+ transforms=partial(ImageClassification, crop_size=299, resize_size=342),
173
+ meta={
174
+ "num_params": 27161264,
175
+ "min_size": (75, 75),
176
+ "categories": _IMAGENET_CATEGORIES,
177
+ "backend": "fbgemm",
178
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
179
+ "unquantized": Inception_V3_Weights.IMAGENET1K_V1,
180
+ "_metrics": {
181
+ "ImageNet-1K": {
182
+ "acc@1": 77.176,
183
+ "acc@5": 93.354,
184
+ }
185
+ },
186
+ "_ops": 5.713,
187
+ "_file_size": 23.146,
188
+ "_docs": """
189
+ These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
190
+ weights listed below.
191
+ """,
192
+ },
193
+ )
194
+ DEFAULT = IMAGENET1K_FBGEMM_V1
195
+
196
+
197
+ @register_model(name="quantized_inception_v3")
198
+ @handle_legacy_interface(
199
+ weights=(
200
+ "pretrained",
201
+ lambda kwargs: Inception_V3_QuantizedWeights.IMAGENET1K_FBGEMM_V1
202
+ if kwargs.get("quantize", False)
203
+ else Inception_V3_Weights.IMAGENET1K_V1,
204
+ )
205
+ )
206
+ def inception_v3(
207
+ *,
208
+ weights: Optional[Union[Inception_V3_QuantizedWeights, Inception_V3_Weights]] = None,
209
+ progress: bool = True,
210
+ quantize: bool = False,
211
+ **kwargs: Any,
212
+ ) -> QuantizableInception3:
213
+ r"""Inception v3 model architecture from
214
+ `Rethinking the Inception Architecture for Computer Vision <http://arxiv.org/abs/1512.00567>`__.
215
+
216
+ .. note::
217
+ **Important**: In contrast to the other models the inception_v3 expects tensors with a size of
218
+ N x 3 x 299 x 299, so ensure your images are sized accordingly.
219
+
220
+ .. note::
221
+ Note that ``quantize = True`` returns a quantized model with 8 bit
222
+ weights. Quantized models only support inference and run on CPUs.
223
+ GPU inference is not yet supported.
224
+
225
+ Args:
226
+ weights (:class:`~torchvision.models.quantization.Inception_V3_QuantizedWeights` or :class:`~torchvision.models.Inception_V3_Weights`, optional): The pretrained
227
+ weights for the model. See
228
+ :class:`~torchvision.models.quantization.Inception_V3_QuantizedWeights` below for
229
+ more details, and possible values. By default, no pre-trained
230
+ weights are used.
231
+ progress (bool, optional): If True, displays a progress bar of the download to stderr.
232
+ Default is True.
233
+ quantize (bool, optional): If True, return a quantized version of the model.
234
+ Default is False.
235
+ **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableInception3``
236
+ base class. Please refer to the `source code
237
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/inception.py>`_
238
+ for more details about this class.
239
+
240
+ .. autoclass:: torchvision.models.quantization.Inception_V3_QuantizedWeights
241
+ :members:
242
+
243
+ .. autoclass:: torchvision.models.Inception_V3_Weights
244
+ :members:
245
+ :noindex:
246
+ """
247
+ weights = (Inception_V3_QuantizedWeights if quantize else Inception_V3_Weights).verify(weights)
248
+
249
+ original_aux_logits = kwargs.get("aux_logits", False)
250
+ if weights is not None:
251
+ if "transform_input" not in kwargs:
252
+ _ovewrite_named_param(kwargs, "transform_input", True)
253
+ _ovewrite_named_param(kwargs, "aux_logits", True)
254
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
255
+ if "backend" in weights.meta:
256
+ _ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
257
+ backend = kwargs.pop("backend", "fbgemm")
258
+
259
+ model = QuantizableInception3(**kwargs)
260
+ _replace_relu(model)
261
+ if quantize:
262
+ quantize_model(model, backend)
263
+
264
+ if weights is not None:
265
+ if quantize and not original_aux_logits:
266
+ model.aux_logits = False
267
+ model.AuxLogits = None
268
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
269
+ if not quantize and not original_aux_logits:
270
+ model.aux_logits = False
271
+ model.AuxLogits = None
272
+
273
+ return model
vllm/lib/python3.10/site-packages/torchvision/models/quantization/mobilenet.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from .mobilenetv2 import * # noqa: F401, F403
2
+ from .mobilenetv3 import * # noqa: F401, F403
3
+ from .mobilenetv2 import __all__ as mv2_all
4
+ from .mobilenetv3 import __all__ as mv3_all
5
+
6
+ __all__ = mv2_all + mv3_all
vllm/lib/python3.10/site-packages/torchvision/models/quantization/mobilenetv2.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Any, Optional, Union
3
+
4
+ from torch import nn, Tensor
5
+ from torch.ao.quantization import DeQuantStub, QuantStub
6
+ from torchvision.models.mobilenetv2 import InvertedResidual, MobileNet_V2_Weights, MobileNetV2
7
+
8
+ from ...ops.misc import Conv2dNormActivation
9
+ from ...transforms._presets import ImageClassification
10
+ from .._api import register_model, Weights, WeightsEnum
11
+ from .._meta import _IMAGENET_CATEGORIES
12
+ from .._utils import _ovewrite_named_param, handle_legacy_interface
13
+ from .utils import _fuse_modules, _replace_relu, quantize_model
14
+
15
+
16
+ __all__ = [
17
+ "QuantizableMobileNetV2",
18
+ "MobileNet_V2_QuantizedWeights",
19
+ "mobilenet_v2",
20
+ ]
21
+
22
+
23
+ class QuantizableInvertedResidual(InvertedResidual):
24
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
25
+ super().__init__(*args, **kwargs)
26
+ self.skip_add = nn.quantized.FloatFunctional()
27
+
28
+ def forward(self, x: Tensor) -> Tensor:
29
+ if self.use_res_connect:
30
+ return self.skip_add.add(x, self.conv(x))
31
+ else:
32
+ return self.conv(x)
33
+
34
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
35
+ for idx in range(len(self.conv)):
36
+ if type(self.conv[idx]) is nn.Conv2d:
37
+ _fuse_modules(self.conv, [str(idx), str(idx + 1)], is_qat, inplace=True)
38
+
39
+
40
+ class QuantizableMobileNetV2(MobileNetV2):
41
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
42
+ """
43
+ MobileNet V2 main class
44
+
45
+ Args:
46
+ Inherits args from floating point MobileNetV2
47
+ """
48
+ super().__init__(*args, **kwargs)
49
+ self.quant = QuantStub()
50
+ self.dequant = DeQuantStub()
51
+
52
+ def forward(self, x: Tensor) -> Tensor:
53
+ x = self.quant(x)
54
+ x = self._forward_impl(x)
55
+ x = self.dequant(x)
56
+ return x
57
+
58
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
59
+ for m in self.modules():
60
+ if type(m) is Conv2dNormActivation:
61
+ _fuse_modules(m, ["0", "1", "2"], is_qat, inplace=True)
62
+ if type(m) is QuantizableInvertedResidual:
63
+ m.fuse_model(is_qat)
64
+
65
+
66
+ class MobileNet_V2_QuantizedWeights(WeightsEnum):
67
+ IMAGENET1K_QNNPACK_V1 = Weights(
68
+ url="https://download.pytorch.org/models/quantized/mobilenet_v2_qnnpack_37f702c5.pth",
69
+ transforms=partial(ImageClassification, crop_size=224),
70
+ meta={
71
+ "num_params": 3504872,
72
+ "min_size": (1, 1),
73
+ "categories": _IMAGENET_CATEGORIES,
74
+ "backend": "qnnpack",
75
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv2",
76
+ "unquantized": MobileNet_V2_Weights.IMAGENET1K_V1,
77
+ "_metrics": {
78
+ "ImageNet-1K": {
79
+ "acc@1": 71.658,
80
+ "acc@5": 90.150,
81
+ }
82
+ },
83
+ "_ops": 0.301,
84
+ "_file_size": 3.423,
85
+ "_docs": """
86
+ These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized
87
+ weights listed below.
88
+ """,
89
+ },
90
+ )
91
+ DEFAULT = IMAGENET1K_QNNPACK_V1
92
+
93
+
94
+ @register_model(name="quantized_mobilenet_v2")
95
+ @handle_legacy_interface(
96
+ weights=(
97
+ "pretrained",
98
+ lambda kwargs: MobileNet_V2_QuantizedWeights.IMAGENET1K_QNNPACK_V1
99
+ if kwargs.get("quantize", False)
100
+ else MobileNet_V2_Weights.IMAGENET1K_V1,
101
+ )
102
+ )
103
+ def mobilenet_v2(
104
+ *,
105
+ weights: Optional[Union[MobileNet_V2_QuantizedWeights, MobileNet_V2_Weights]] = None,
106
+ progress: bool = True,
107
+ quantize: bool = False,
108
+ **kwargs: Any,
109
+ ) -> QuantizableMobileNetV2:
110
+ """
111
+ Constructs a MobileNetV2 architecture from
112
+ `MobileNetV2: Inverted Residuals and Linear Bottlenecks
113
+ <https://arxiv.org/abs/1801.04381>`_.
114
+
115
+ .. note::
116
+ Note that ``quantize = True`` returns a quantized model with 8 bit
117
+ weights. Quantized models only support inference and run on CPUs.
118
+ GPU inference is not yet supported.
119
+
120
+ Args:
121
+ weights (:class:`~torchvision.models.quantization.MobileNet_V2_QuantizedWeights` or :class:`~torchvision.models.MobileNet_V2_Weights`, optional): The
122
+ pretrained weights for the model. See
123
+ :class:`~torchvision.models.quantization.MobileNet_V2_QuantizedWeights` below for
124
+ more details, and possible values. By default, no pre-trained
125
+ weights are used.
126
+ progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
127
+ quantize (bool, optional): If True, returns a quantized version of the model. Default is False.
128
+ **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableMobileNetV2``
129
+ base class. Please refer to the `source code
130
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/mobilenetv2.py>`_
131
+ for more details about this class.
132
+ .. autoclass:: torchvision.models.quantization.MobileNet_V2_QuantizedWeights
133
+ :members:
134
+ .. autoclass:: torchvision.models.MobileNet_V2_Weights
135
+ :members:
136
+ :noindex:
137
+ """
138
+ weights = (MobileNet_V2_QuantizedWeights if quantize else MobileNet_V2_Weights).verify(weights)
139
+
140
+ if weights is not None:
141
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
142
+ if "backend" in weights.meta:
143
+ _ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
144
+ backend = kwargs.pop("backend", "qnnpack")
145
+
146
+ model = QuantizableMobileNetV2(block=QuantizableInvertedResidual, **kwargs)
147
+ _replace_relu(model)
148
+ if quantize:
149
+ quantize_model(model, backend)
150
+
151
+ if weights is not None:
152
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
153
+
154
+ return model
vllm/lib/python3.10/site-packages/torchvision/models/quantization/mobilenetv3.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Any, List, Optional, Union
3
+
4
+ import torch
5
+ from torch import nn, Tensor
6
+ from torch.ao.quantization import DeQuantStub, QuantStub
7
+
8
+ from ...ops.misc import Conv2dNormActivation, SqueezeExcitation
9
+ from ...transforms._presets import ImageClassification
10
+ from .._api import register_model, Weights, WeightsEnum
11
+ from .._meta import _IMAGENET_CATEGORIES
12
+ from .._utils import _ovewrite_named_param, handle_legacy_interface
13
+ from ..mobilenetv3 import (
14
+ _mobilenet_v3_conf,
15
+ InvertedResidual,
16
+ InvertedResidualConfig,
17
+ MobileNet_V3_Large_Weights,
18
+ MobileNetV3,
19
+ )
20
+ from .utils import _fuse_modules, _replace_relu
21
+
22
+
23
+ __all__ = [
24
+ "QuantizableMobileNetV3",
25
+ "MobileNet_V3_Large_QuantizedWeights",
26
+ "mobilenet_v3_large",
27
+ ]
28
+
29
+
30
+ class QuantizableSqueezeExcitation(SqueezeExcitation):
31
+ _version = 2
32
+
33
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
34
+ kwargs["scale_activation"] = nn.Hardsigmoid
35
+ super().__init__(*args, **kwargs)
36
+ self.skip_mul = nn.quantized.FloatFunctional()
37
+
38
+ def forward(self, input: Tensor) -> Tensor:
39
+ return self.skip_mul.mul(self._scale(input), input)
40
+
41
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
42
+ _fuse_modules(self, ["fc1", "activation"], is_qat, inplace=True)
43
+
44
+ def _load_from_state_dict(
45
+ self,
46
+ state_dict,
47
+ prefix,
48
+ local_metadata,
49
+ strict,
50
+ missing_keys,
51
+ unexpected_keys,
52
+ error_msgs,
53
+ ):
54
+ version = local_metadata.get("version", None)
55
+
56
+ if hasattr(self, "qconfig") and (version is None or version < 2):
57
+ default_state_dict = {
58
+ "scale_activation.activation_post_process.scale": torch.tensor([1.0]),
59
+ "scale_activation.activation_post_process.activation_post_process.scale": torch.tensor([1.0]),
60
+ "scale_activation.activation_post_process.zero_point": torch.tensor([0], dtype=torch.int32),
61
+ "scale_activation.activation_post_process.activation_post_process.zero_point": torch.tensor(
62
+ [0], dtype=torch.int32
63
+ ),
64
+ "scale_activation.activation_post_process.fake_quant_enabled": torch.tensor([1]),
65
+ "scale_activation.activation_post_process.observer_enabled": torch.tensor([1]),
66
+ }
67
+ for k, v in default_state_dict.items():
68
+ full_key = prefix + k
69
+ if full_key not in state_dict:
70
+ state_dict[full_key] = v
71
+
72
+ super()._load_from_state_dict(
73
+ state_dict,
74
+ prefix,
75
+ local_metadata,
76
+ strict,
77
+ missing_keys,
78
+ unexpected_keys,
79
+ error_msgs,
80
+ )
81
+
82
+
83
+ class QuantizableInvertedResidual(InvertedResidual):
84
+ # TODO https://github.com/pytorch/vision/pull/4232#pullrequestreview-730461659
85
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
86
+ super().__init__(*args, se_layer=QuantizableSqueezeExcitation, **kwargs) # type: ignore[misc]
87
+ self.skip_add = nn.quantized.FloatFunctional()
88
+
89
+ def forward(self, x: Tensor) -> Tensor:
90
+ if self.use_res_connect:
91
+ return self.skip_add.add(x, self.block(x))
92
+ else:
93
+ return self.block(x)
94
+
95
+
96
+ class QuantizableMobileNetV3(MobileNetV3):
97
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
98
+ """
99
+ MobileNet V3 main class
100
+
101
+ Args:
102
+ Inherits args from floating point MobileNetV3
103
+ """
104
+ super().__init__(*args, **kwargs)
105
+ self.quant = QuantStub()
106
+ self.dequant = DeQuantStub()
107
+
108
+ def forward(self, x: Tensor) -> Tensor:
109
+ x = self.quant(x)
110
+ x = self._forward_impl(x)
111
+ x = self.dequant(x)
112
+ return x
113
+
114
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
115
+ for m in self.modules():
116
+ if type(m) is Conv2dNormActivation:
117
+ modules_to_fuse = ["0", "1"]
118
+ if len(m) == 3 and type(m[2]) is nn.ReLU:
119
+ modules_to_fuse.append("2")
120
+ _fuse_modules(m, modules_to_fuse, is_qat, inplace=True)
121
+ elif type(m) is QuantizableSqueezeExcitation:
122
+ m.fuse_model(is_qat)
123
+
124
+
125
+ def _mobilenet_v3_model(
126
+ inverted_residual_setting: List[InvertedResidualConfig],
127
+ last_channel: int,
128
+ weights: Optional[WeightsEnum],
129
+ progress: bool,
130
+ quantize: bool,
131
+ **kwargs: Any,
132
+ ) -> QuantizableMobileNetV3:
133
+ if weights is not None:
134
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
135
+ if "backend" in weights.meta:
136
+ _ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
137
+ backend = kwargs.pop("backend", "qnnpack")
138
+
139
+ model = QuantizableMobileNetV3(inverted_residual_setting, last_channel, block=QuantizableInvertedResidual, **kwargs)
140
+ _replace_relu(model)
141
+
142
+ if quantize:
143
+ # Instead of quantizing the model and then loading the quantized weights we take a different approach.
144
+ # We prepare the QAT model, load the QAT weights from training and then convert it.
145
+ # This is done to avoid extremely low accuracies observed on the specific model. This is rather a workaround
146
+ # for an unresolved bug on the eager quantization API detailed at: https://github.com/pytorch/vision/issues/5890
147
+ model.fuse_model(is_qat=True)
148
+ model.qconfig = torch.ao.quantization.get_default_qat_qconfig(backend)
149
+ torch.ao.quantization.prepare_qat(model, inplace=True)
150
+
151
+ if weights is not None:
152
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
153
+
154
+ if quantize:
155
+ torch.ao.quantization.convert(model, inplace=True)
156
+ model.eval()
157
+
158
+ return model
159
+
160
+
161
+ class MobileNet_V3_Large_QuantizedWeights(WeightsEnum):
162
+ IMAGENET1K_QNNPACK_V1 = Weights(
163
+ url="https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-5bcacf28.pth",
164
+ transforms=partial(ImageClassification, crop_size=224),
165
+ meta={
166
+ "num_params": 5483032,
167
+ "min_size": (1, 1),
168
+ "categories": _IMAGENET_CATEGORIES,
169
+ "backend": "qnnpack",
170
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv3",
171
+ "unquantized": MobileNet_V3_Large_Weights.IMAGENET1K_V1,
172
+ "_metrics": {
173
+ "ImageNet-1K": {
174
+ "acc@1": 73.004,
175
+ "acc@5": 90.858,
176
+ }
177
+ },
178
+ "_ops": 0.217,
179
+ "_file_size": 21.554,
180
+ "_docs": """
181
+ These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized
182
+ weights listed below.
183
+ """,
184
+ },
185
+ )
186
+ DEFAULT = IMAGENET1K_QNNPACK_V1
187
+
188
+
189
+ @register_model(name="quantized_mobilenet_v3_large")
190
+ @handle_legacy_interface(
191
+ weights=(
192
+ "pretrained",
193
+ lambda kwargs: MobileNet_V3_Large_QuantizedWeights.IMAGENET1K_QNNPACK_V1
194
+ if kwargs.get("quantize", False)
195
+ else MobileNet_V3_Large_Weights.IMAGENET1K_V1,
196
+ )
197
+ )
198
+ def mobilenet_v3_large(
199
+ *,
200
+ weights: Optional[Union[MobileNet_V3_Large_QuantizedWeights, MobileNet_V3_Large_Weights]] = None,
201
+ progress: bool = True,
202
+ quantize: bool = False,
203
+ **kwargs: Any,
204
+ ) -> QuantizableMobileNetV3:
205
+ """
206
+ MobileNetV3 (Large) model from
207
+ `Searching for MobileNetV3 <https://arxiv.org/abs/1905.02244>`_.
208
+
209
+ .. note::
210
+ Note that ``quantize = True`` returns a quantized model with 8 bit
211
+ weights. Quantized models only support inference and run on CPUs.
212
+ GPU inference is not yet supported.
213
+
214
+ Args:
215
+ weights (:class:`~torchvision.models.quantization.MobileNet_V3_Large_QuantizedWeights` or :class:`~torchvision.models.MobileNet_V3_Large_Weights`, optional): The
216
+ pretrained weights for the model. See
217
+ :class:`~torchvision.models.quantization.MobileNet_V3_Large_QuantizedWeights` below for
218
+ more details, and possible values. By default, no pre-trained
219
+ weights are used.
220
+ progress (bool): If True, displays a progress bar of the
221
+ download to stderr. Default is True.
222
+ quantize (bool): If True, return a quantized version of the model. Default is False.
223
+ **kwargs: parameters passed to the ``torchvision.models.quantization.MobileNet_V3_Large_QuantizedWeights``
224
+ base class. Please refer to the `source code
225
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/mobilenetv3.py>`_
226
+ for more details about this class.
227
+
228
+ .. autoclass:: torchvision.models.quantization.MobileNet_V3_Large_QuantizedWeights
229
+ :members:
230
+ .. autoclass:: torchvision.models.MobileNet_V3_Large_Weights
231
+ :members:
232
+ :noindex:
233
+ """
234
+ weights = (MobileNet_V3_Large_QuantizedWeights if quantize else MobileNet_V3_Large_Weights).verify(weights)
235
+
236
+ inverted_residual_setting, last_channel = _mobilenet_v3_conf("mobilenet_v3_large", **kwargs)
237
+ return _mobilenet_v3_model(inverted_residual_setting, last_channel, weights, progress, quantize, **kwargs)
vllm/lib/python3.10/site-packages/torchvision/models/quantization/resnet.py ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from typing import Any, List, Optional, Type, Union
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ from torch import Tensor
7
+ from torchvision.models.resnet import (
8
+ BasicBlock,
9
+ Bottleneck,
10
+ ResNet,
11
+ ResNet18_Weights,
12
+ ResNet50_Weights,
13
+ ResNeXt101_32X8D_Weights,
14
+ ResNeXt101_64X4D_Weights,
15
+ )
16
+
17
+ from ...transforms._presets import ImageClassification
18
+ from .._api import register_model, Weights, WeightsEnum
19
+ from .._meta import _IMAGENET_CATEGORIES
20
+ from .._utils import _ovewrite_named_param, handle_legacy_interface
21
+ from .utils import _fuse_modules, _replace_relu, quantize_model
22
+
23
+
24
+ __all__ = [
25
+ "QuantizableResNet",
26
+ "ResNet18_QuantizedWeights",
27
+ "ResNet50_QuantizedWeights",
28
+ "ResNeXt101_32X8D_QuantizedWeights",
29
+ "ResNeXt101_64X4D_QuantizedWeights",
30
+ "resnet18",
31
+ "resnet50",
32
+ "resnext101_32x8d",
33
+ "resnext101_64x4d",
34
+ ]
35
+
36
+
37
+ class QuantizableBasicBlock(BasicBlock):
38
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
39
+ super().__init__(*args, **kwargs)
40
+ self.add_relu = torch.nn.quantized.FloatFunctional()
41
+
42
+ def forward(self, x: Tensor) -> Tensor:
43
+ identity = x
44
+
45
+ out = self.conv1(x)
46
+ out = self.bn1(out)
47
+ out = self.relu(out)
48
+
49
+ out = self.conv2(out)
50
+ out = self.bn2(out)
51
+
52
+ if self.downsample is not None:
53
+ identity = self.downsample(x)
54
+
55
+ out = self.add_relu.add_relu(out, identity)
56
+
57
+ return out
58
+
59
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
60
+ _fuse_modules(self, [["conv1", "bn1", "relu"], ["conv2", "bn2"]], is_qat, inplace=True)
61
+ if self.downsample:
62
+ _fuse_modules(self.downsample, ["0", "1"], is_qat, inplace=True)
63
+
64
+
65
+ class QuantizableBottleneck(Bottleneck):
66
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
67
+ super().__init__(*args, **kwargs)
68
+ self.skip_add_relu = nn.quantized.FloatFunctional()
69
+ self.relu1 = nn.ReLU(inplace=False)
70
+ self.relu2 = nn.ReLU(inplace=False)
71
+
72
+ def forward(self, x: Tensor) -> Tensor:
73
+ identity = x
74
+ out = self.conv1(x)
75
+ out = self.bn1(out)
76
+ out = self.relu1(out)
77
+ out = self.conv2(out)
78
+ out = self.bn2(out)
79
+ out = self.relu2(out)
80
+
81
+ out = self.conv3(out)
82
+ out = self.bn3(out)
83
+
84
+ if self.downsample is not None:
85
+ identity = self.downsample(x)
86
+ out = self.skip_add_relu.add_relu(out, identity)
87
+
88
+ return out
89
+
90
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
91
+ _fuse_modules(
92
+ self, [["conv1", "bn1", "relu1"], ["conv2", "bn2", "relu2"], ["conv3", "bn3"]], is_qat, inplace=True
93
+ )
94
+ if self.downsample:
95
+ _fuse_modules(self.downsample, ["0", "1"], is_qat, inplace=True)
96
+
97
+
98
+ class QuantizableResNet(ResNet):
99
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
100
+ super().__init__(*args, **kwargs)
101
+
102
+ self.quant = torch.ao.quantization.QuantStub()
103
+ self.dequant = torch.ao.quantization.DeQuantStub()
104
+
105
+ def forward(self, x: Tensor) -> Tensor:
106
+ x = self.quant(x)
107
+ # Ensure scriptability
108
+ # super(QuantizableResNet,self).forward(x)
109
+ # is not scriptable
110
+ x = self._forward_impl(x)
111
+ x = self.dequant(x)
112
+ return x
113
+
114
+ def fuse_model(self, is_qat: Optional[bool] = None) -> None:
115
+ r"""Fuse conv/bn/relu modules in resnet models
116
+
117
+ Fuse conv+bn+relu/ Conv+relu/conv+Bn modules to prepare for quantization.
118
+ Model is modified in place. Note that this operation does not change numerics
119
+ and the model after modification is in floating point
120
+ """
121
+ _fuse_modules(self, ["conv1", "bn1", "relu"], is_qat, inplace=True)
122
+ for m in self.modules():
123
+ if type(m) is QuantizableBottleneck or type(m) is QuantizableBasicBlock:
124
+ m.fuse_model(is_qat)
125
+
126
+
127
+ def _resnet(
128
+ block: Type[Union[QuantizableBasicBlock, QuantizableBottleneck]],
129
+ layers: List[int],
130
+ weights: Optional[WeightsEnum],
131
+ progress: bool,
132
+ quantize: bool,
133
+ **kwargs: Any,
134
+ ) -> QuantizableResNet:
135
+ if weights is not None:
136
+ _ovewrite_named_param(kwargs, "num_classes", len(weights.meta["categories"]))
137
+ if "backend" in weights.meta:
138
+ _ovewrite_named_param(kwargs, "backend", weights.meta["backend"])
139
+ backend = kwargs.pop("backend", "fbgemm")
140
+
141
+ model = QuantizableResNet(block, layers, **kwargs)
142
+ _replace_relu(model)
143
+ if quantize:
144
+ quantize_model(model, backend)
145
+
146
+ if weights is not None:
147
+ model.load_state_dict(weights.get_state_dict(progress=progress, check_hash=True))
148
+
149
+ return model
150
+
151
+
152
+ _COMMON_META = {
153
+ "min_size": (1, 1),
154
+ "categories": _IMAGENET_CATEGORIES,
155
+ "backend": "fbgemm",
156
+ "recipe": "https://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-models",
157
+ "_docs": """
158
+ These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
159
+ weights listed below.
160
+ """,
161
+ }
162
+
163
+
164
+ class ResNet18_QuantizedWeights(WeightsEnum):
165
+ IMAGENET1K_FBGEMM_V1 = Weights(
166
+ url="https://download.pytorch.org/models/quantized/resnet18_fbgemm_16fa66dd.pth",
167
+ transforms=partial(ImageClassification, crop_size=224),
168
+ meta={
169
+ **_COMMON_META,
170
+ "num_params": 11689512,
171
+ "unquantized": ResNet18_Weights.IMAGENET1K_V1,
172
+ "_metrics": {
173
+ "ImageNet-1K": {
174
+ "acc@1": 69.494,
175
+ "acc@5": 88.882,
176
+ }
177
+ },
178
+ "_ops": 1.814,
179
+ "_file_size": 11.238,
180
+ },
181
+ )
182
+ DEFAULT = IMAGENET1K_FBGEMM_V1
183
+
184
+
185
+ class ResNet50_QuantizedWeights(WeightsEnum):
186
+ IMAGENET1K_FBGEMM_V1 = Weights(
187
+ url="https://download.pytorch.org/models/quantized/resnet50_fbgemm_bf931d71.pth",
188
+ transforms=partial(ImageClassification, crop_size=224),
189
+ meta={
190
+ **_COMMON_META,
191
+ "num_params": 25557032,
192
+ "unquantized": ResNet50_Weights.IMAGENET1K_V1,
193
+ "_metrics": {
194
+ "ImageNet-1K": {
195
+ "acc@1": 75.920,
196
+ "acc@5": 92.814,
197
+ }
198
+ },
199
+ "_ops": 4.089,
200
+ "_file_size": 24.759,
201
+ },
202
+ )
203
+ IMAGENET1K_FBGEMM_V2 = Weights(
204
+ url="https://download.pytorch.org/models/quantized/resnet50_fbgemm-23753f79.pth",
205
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
206
+ meta={
207
+ **_COMMON_META,
208
+ "num_params": 25557032,
209
+ "unquantized": ResNet50_Weights.IMAGENET1K_V2,
210
+ "_metrics": {
211
+ "ImageNet-1K": {
212
+ "acc@1": 80.282,
213
+ "acc@5": 94.976,
214
+ }
215
+ },
216
+ "_ops": 4.089,
217
+ "_file_size": 24.953,
218
+ },
219
+ )
220
+ DEFAULT = IMAGENET1K_FBGEMM_V2
221
+
222
+
223
+ class ResNeXt101_32X8D_QuantizedWeights(WeightsEnum):
224
+ IMAGENET1K_FBGEMM_V1 = Weights(
225
+ url="https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm_09835ccf.pth",
226
+ transforms=partial(ImageClassification, crop_size=224),
227
+ meta={
228
+ **_COMMON_META,
229
+ "num_params": 88791336,
230
+ "unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V1,
231
+ "_metrics": {
232
+ "ImageNet-1K": {
233
+ "acc@1": 78.986,
234
+ "acc@5": 94.480,
235
+ }
236
+ },
237
+ "_ops": 16.414,
238
+ "_file_size": 86.034,
239
+ },
240
+ )
241
+ IMAGENET1K_FBGEMM_V2 = Weights(
242
+ url="https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm-ee16d00c.pth",
243
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
244
+ meta={
245
+ **_COMMON_META,
246
+ "num_params": 88791336,
247
+ "unquantized": ResNeXt101_32X8D_Weights.IMAGENET1K_V2,
248
+ "_metrics": {
249
+ "ImageNet-1K": {
250
+ "acc@1": 82.574,
251
+ "acc@5": 96.132,
252
+ }
253
+ },
254
+ "_ops": 16.414,
255
+ "_file_size": 86.645,
256
+ },
257
+ )
258
+ DEFAULT = IMAGENET1K_FBGEMM_V2
259
+
260
+
261
+ class ResNeXt101_64X4D_QuantizedWeights(WeightsEnum):
262
+ IMAGENET1K_FBGEMM_V1 = Weights(
263
+ url="https://download.pytorch.org/models/quantized/resnext101_64x4d_fbgemm-605a1cb3.pth",
264
+ transforms=partial(ImageClassification, crop_size=224, resize_size=232),
265
+ meta={
266
+ **_COMMON_META,
267
+ "num_params": 83455272,
268
+ "recipe": "https://github.com/pytorch/vision/pull/5935",
269
+ "unquantized": ResNeXt101_64X4D_Weights.IMAGENET1K_V1,
270
+ "_metrics": {
271
+ "ImageNet-1K": {
272
+ "acc@1": 82.898,
273
+ "acc@5": 96.326,
274
+ }
275
+ },
276
+ "_ops": 15.46,
277
+ "_file_size": 81.556,
278
+ },
279
+ )
280
+ DEFAULT = IMAGENET1K_FBGEMM_V1
281
+
282
+
283
+ @register_model(name="quantized_resnet18")
284
+ @handle_legacy_interface(
285
+ weights=(
286
+ "pretrained",
287
+ lambda kwargs: ResNet18_QuantizedWeights.IMAGENET1K_FBGEMM_V1
288
+ if kwargs.get("quantize", False)
289
+ else ResNet18_Weights.IMAGENET1K_V1,
290
+ )
291
+ )
292
+ def resnet18(
293
+ *,
294
+ weights: Optional[Union[ResNet18_QuantizedWeights, ResNet18_Weights]] = None,
295
+ progress: bool = True,
296
+ quantize: bool = False,
297
+ **kwargs: Any,
298
+ ) -> QuantizableResNet:
299
+ """ResNet-18 model from
300
+ `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_
301
+
302
+ .. note::
303
+ Note that ``quantize = True`` returns a quantized model with 8 bit
304
+ weights. Quantized models only support inference and run on CPUs.
305
+ GPU inference is not yet supported.
306
+
307
+ Args:
308
+ weights (:class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` or :class:`~torchvision.models.ResNet18_Weights`, optional): The
309
+ pretrained weights for the model. See
310
+ :class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` below for
311
+ more details, and possible values. By default, no pre-trained
312
+ weights are used.
313
+ progress (bool, optional): If True, displays a progress bar of the
314
+ download to stderr. Default is True.
315
+ quantize (bool, optional): If True, return a quantized version of the model. Default is False.
316
+ **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
317
+ base class. Please refer to the `source code
318
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
319
+ for more details about this class.
320
+
321
+ .. autoclass:: torchvision.models.quantization.ResNet18_QuantizedWeights
322
+ :members:
323
+
324
+ .. autoclass:: torchvision.models.ResNet18_Weights
325
+ :members:
326
+ :noindex:
327
+ """
328
+ weights = (ResNet18_QuantizedWeights if quantize else ResNet18_Weights).verify(weights)
329
+
330
+ return _resnet(QuantizableBasicBlock, [2, 2, 2, 2], weights, progress, quantize, **kwargs)
331
+
332
+
333
+ @register_model(name="quantized_resnet50")
334
+ @handle_legacy_interface(
335
+ weights=(
336
+ "pretrained",
337
+ lambda kwargs: ResNet50_QuantizedWeights.IMAGENET1K_FBGEMM_V1
338
+ if kwargs.get("quantize", False)
339
+ else ResNet50_Weights.IMAGENET1K_V1,
340
+ )
341
+ )
342
+ def resnet50(
343
+ *,
344
+ weights: Optional[Union[ResNet50_QuantizedWeights, ResNet50_Weights]] = None,
345
+ progress: bool = True,
346
+ quantize: bool = False,
347
+ **kwargs: Any,
348
+ ) -> QuantizableResNet:
349
+ """ResNet-50 model from
350
+ `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_
351
+
352
+ .. note::
353
+ Note that ``quantize = True`` returns a quantized model with 8 bit
354
+ weights. Quantized models only support inference and run on CPUs.
355
+ GPU inference is not yet supported.
356
+
357
+ Args:
358
+ weights (:class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` or :class:`~torchvision.models.ResNet50_Weights`, optional): The
359
+ pretrained weights for the model. See
360
+ :class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` below for
361
+ more details, and possible values. By default, no pre-trained
362
+ weights are used.
363
+ progress (bool, optional): If True, displays a progress bar of the
364
+ download to stderr. Default is True.
365
+ quantize (bool, optional): If True, return a quantized version of the model. Default is False.
366
+ **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
367
+ base class. Please refer to the `source code
368
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
369
+ for more details about this class.
370
+
371
+ .. autoclass:: torchvision.models.quantization.ResNet50_QuantizedWeights
372
+ :members:
373
+
374
+ .. autoclass:: torchvision.models.ResNet50_Weights
375
+ :members:
376
+ :noindex:
377
+ """
378
+ weights = (ResNet50_QuantizedWeights if quantize else ResNet50_Weights).verify(weights)
379
+
380
+ return _resnet(QuantizableBottleneck, [3, 4, 6, 3], weights, progress, quantize, **kwargs)
381
+
382
+
383
+ @register_model(name="quantized_resnext101_32x8d")
384
+ @handle_legacy_interface(
385
+ weights=(
386
+ "pretrained",
387
+ lambda kwargs: ResNeXt101_32X8D_QuantizedWeights.IMAGENET1K_FBGEMM_V1
388
+ if kwargs.get("quantize", False)
389
+ else ResNeXt101_32X8D_Weights.IMAGENET1K_V1,
390
+ )
391
+ )
392
+ def resnext101_32x8d(
393
+ *,
394
+ weights: Optional[Union[ResNeXt101_32X8D_QuantizedWeights, ResNeXt101_32X8D_Weights]] = None,
395
+ progress: bool = True,
396
+ quantize: bool = False,
397
+ **kwargs: Any,
398
+ ) -> QuantizableResNet:
399
+ """ResNeXt-101 32x8d model from
400
+ `Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_
401
+
402
+ .. note::
403
+ Note that ``quantize = True`` returns a quantized model with 8 bit
404
+ weights. Quantized models only support inference and run on CPUs.
405
+ GPU inference is not yet supported.
406
+
407
+ Args:
408
+ weights (:class:`~torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_32X8D_Weights`, optional): The
409
+ pretrained weights for the model. See
410
+ :class:`~torchvision.models.quantization.ResNet101_32X8D_QuantizedWeights` below for
411
+ more details, and possible values. By default, no pre-trained
412
+ weights are used.
413
+ progress (bool, optional): If True, displays a progress bar of the
414
+ download to stderr. Default is True.
415
+ quantize (bool, optional): If True, return a quantized version of the model. Default is False.
416
+ **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
417
+ base class. Please refer to the `source code
418
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
419
+ for more details about this class.
420
+
421
+ .. autoclass:: torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights
422
+ :members:
423
+
424
+ .. autoclass:: torchvision.models.ResNeXt101_32X8D_Weights
425
+ :members:
426
+ :noindex:
427
+ """
428
+ weights = (ResNeXt101_32X8D_QuantizedWeights if quantize else ResNeXt101_32X8D_Weights).verify(weights)
429
+
430
+ _ovewrite_named_param(kwargs, "groups", 32)
431
+ _ovewrite_named_param(kwargs, "width_per_group", 8)
432
+ return _resnet(QuantizableBottleneck, [3, 4, 23, 3], weights, progress, quantize, **kwargs)
433
+
434
+
435
+ @register_model(name="quantized_resnext101_64x4d")
436
+ @handle_legacy_interface(
437
+ weights=(
438
+ "pretrained",
439
+ lambda kwargs: ResNeXt101_64X4D_QuantizedWeights.IMAGENET1K_FBGEMM_V1
440
+ if kwargs.get("quantize", False)
441
+ else ResNeXt101_64X4D_Weights.IMAGENET1K_V1,
442
+ )
443
+ )
444
+ def resnext101_64x4d(
445
+ *,
446
+ weights: Optional[Union[ResNeXt101_64X4D_QuantizedWeights, ResNeXt101_64X4D_Weights]] = None,
447
+ progress: bool = True,
448
+ quantize: bool = False,
449
+ **kwargs: Any,
450
+ ) -> QuantizableResNet:
451
+ """ResNeXt-101 64x4d model from
452
+ `Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_
453
+
454
+ .. note::
455
+ Note that ``quantize = True`` returns a quantized model with 8 bit
456
+ weights. Quantized models only support inference and run on CPUs.
457
+ GPU inference is not yet supported.
458
+
459
+ Args:
460
+ weights (:class:`~torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_64X4D_Weights`, optional): The
461
+ pretrained weights for the model. See
462
+ :class:`~torchvision.models.quantization.ResNet101_64X4D_QuantizedWeights` below for
463
+ more details, and possible values. By default, no pre-trained
464
+ weights are used.
465
+ progress (bool, optional): If True, displays a progress bar of the
466
+ download to stderr. Default is True.
467
+ quantize (bool, optional): If True, return a quantized version of the model. Default is False.
468
+ **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
469
+ base class. Please refer to the `source code
470
+ <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
471
+ for more details about this class.
472
+
473
+ .. autoclass:: torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights
474
+ :members:
475
+
476
+ .. autoclass:: torchvision.models.ResNeXt101_64X4D_Weights
477
+ :members:
478
+ :noindex:
479
+ """
480
+ weights = (ResNeXt101_64X4D_QuantizedWeights if quantize else ResNeXt101_64X4D_Weights).verify(weights)
481
+
482
+ _ovewrite_named_param(kwargs, "groups", 64)
483
+ _ovewrite_named_param(kwargs, "width_per_group", 4)
484
+ return _resnet(QuantizableBottleneck, [3, 4, 23, 3], weights, progress, quantize, **kwargs)